1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-25 22:25:08 +00:00

Kernel: Make kernel memory regions be non-executable by default

From now on, you'll have to request executable memory specifically
if you want some.
This commit is contained in:
Andreas Kling 2019-12-25 22:41:34 +01:00
parent 0b7a2e0a5a
commit 9e55bcb7da
7 changed files with 27 additions and 31 deletions

View file

@ -17,16 +17,16 @@
class KBufferImpl : public RefCounted<KBufferImpl> { class KBufferImpl : public RefCounted<KBufferImpl> {
public: public:
static NonnullRefPtr<KBufferImpl> create_with_size(size_t size) static NonnullRefPtr<KBufferImpl> create_with_size(size_t size, u8 access)
{ {
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), "KBuffer", false, false); auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), "KBuffer", access, false, false);
ASSERT(region); ASSERT(region);
return adopt(*new KBufferImpl(region.release_nonnull(), size)); return adopt(*new KBufferImpl(region.release_nonnull(), size));
} }
static NonnullRefPtr<KBufferImpl> copy(const void* data, size_t size) static NonnullRefPtr<KBufferImpl> copy(const void* data, size_t size, u8 access)
{ {
auto buffer = create_with_size(size); auto buffer = create_with_size(size, access);
memcpy(buffer->data(), data, size); memcpy(buffer->data(), data, size);
return buffer; return buffer;
} }
@ -55,14 +55,14 @@ private:
class KBuffer { class KBuffer {
public: public:
static KBuffer create_with_size(size_t size) static KBuffer create_with_size(size_t size, u8 access = Region::Access::Read | Region::Access::Write)
{ {
return KBuffer(KBufferImpl::create_with_size(size)); return KBuffer(KBufferImpl::create_with_size(size, access));
} }
static KBuffer copy(const void* data, size_t size) static KBuffer copy(const void* data, size_t size, u8 access = Region::Access::Read | Region::Access::Write)
{ {
return KBuffer(KBufferImpl::copy(data, size)); return KBuffer(KBufferImpl::copy(data, size, access));
} }
u8* data() { return m_impl->data(); } u8* data() { return m_impl->data(); }
@ -74,8 +74,8 @@ public:
const KBufferImpl& impl() const { return m_impl; } const KBufferImpl& impl() const { return m_impl; }
KBuffer(const ByteBuffer& buffer) KBuffer(const ByteBuffer& buffer, u8 access = Region::Access::Read | Region::Access::Write)
: m_impl(KBufferImpl::copy(buffer.data(), buffer.size())) : m_impl(KBufferImpl::copy(buffer.data(), buffer.size(), access))
{ {
} }

View file

@ -17,7 +17,7 @@ KBuffer KBufferBuilder::build()
} }
KBufferBuilder::KBufferBuilder() KBufferBuilder::KBufferBuilder()
: m_buffer(KBuffer::create_with_size(1048576 * 4)) : m_buffer(KBuffer::create_with_size(4 * MB, Region::Access::Read | Region::Access::Write))
{ {
} }

View file

@ -1019,7 +1019,7 @@ void create_signal_trampolines()
InterruptDisabler disabler; InterruptDisabler disabler;
// NOTE: We leak this region. // NOTE: We leak this region.
auto* trampoline_region = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Signal trampolines").leak_ptr(); auto* trampoline_region = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Signal trampolines", Region::Access::Read | Region::Access::Write | Region::Access::Execute).leak_ptr();
g_return_to_ring3_from_signal_trampoline = trampoline_region->vaddr(); g_return_to_ring3_from_signal_trampoline = trampoline_region->vaddr();
u8* trampoline = (u8*)asm_signal_trampoline; u8* trampoline = (u8*)asm_signal_trampoline;
@ -1035,15 +1035,11 @@ void create_signal_trampolines()
void create_kernel_info_page() void create_kernel_info_page()
{ {
auto* info_page_region_for_userspace = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Kernel info page").leak_ptr(); auto* info_page_region_for_userspace = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Kernel info page", Region::Access::Read).leak_ptr();
auto* info_page_region_for_kernel = MM.allocate_kernel_region_with_vmobject(info_page_region_for_userspace->vmobject(), PAGE_SIZE, "Kernel info page").leak_ptr(); auto* info_page_region_for_kernel = MM.allocate_kernel_region_with_vmobject(info_page_region_for_userspace->vmobject(), PAGE_SIZE, "Kernel info page", Region::Access::Read | Region::Access::Write).leak_ptr();
s_info_page_address_for_userspace = info_page_region_for_userspace->vaddr(); s_info_page_address_for_userspace = info_page_region_for_userspace->vaddr();
s_info_page_address_for_kernel = info_page_region_for_kernel->vaddr(); s_info_page_address_for_kernel = info_page_region_for_kernel->vaddr();
memset(s_info_page_address_for_kernel.as_ptr(), 0, PAGE_SIZE); memset(s_info_page_address_for_kernel.as_ptr(), 0, PAGE_SIZE);
info_page_region_for_userspace->set_writable(false);
info_page_region_for_userspace->remap();
} }
int Process::sys$restore_signal_mask(u32 mask) int Process::sys$restore_signal_mask(u32 mask)

View file

@ -84,7 +84,7 @@ Thread::Thread(Process& process)
m_tss.cr3 = m_process.page_directory().cr3(); m_tss.cr3 = m_process.page_directory().cr3();
if (m_process.is_ring0()) { if (m_process.is_ring0()) {
m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), false, true); m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), Region::Access::Read | Region::Access::Write, false, true);
m_kernel_stack_region->set_stack(true); m_kernel_stack_region->set_stack(true);
m_kernel_stack_base = m_kernel_stack_region->vaddr().get(); m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u; m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
@ -92,7 +92,7 @@ Thread::Thread(Process& process)
kprintf("Allocated ring0 stack @ %p - %p\n", m_kernel_stack_base, m_kernel_stack_top); kprintf("Allocated ring0 stack @ %p - %p\n", m_kernel_stack_base, m_kernel_stack_top);
} else { } else {
// Ring3 processes need a separate stack for Ring0. // Ring3 processes need a separate stack for Ring0.
m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), false, true); m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid), Region::Access::Read | Region::Access::Write, false, true);
m_kernel_stack_region->set_stack(true); m_kernel_stack_region->set_stack(true);
m_kernel_stack_base = m_kernel_stack_region->vaddr().get(); m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u; m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;

View file

@ -334,7 +334,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
return region->handle_fault(fault); return region->handle_fault(fault);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, bool user_accessible, bool should_commit) OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit)
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
ASSERT(!(size % PAGE_SIZE)); ASSERT(!(size % PAGE_SIZE));
@ -342,9 +342,9 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
ASSERT(range.is_valid()); ASSERT(range.is_valid());
OwnPtr<Region> region; OwnPtr<Region> region;
if (user_accessible) if (user_accessible)
region = Region::create_user_accessible(range, name, PROT_READ | PROT_WRITE | PROT_EXEC); region = Region::create_user_accessible(range, name, access);
else else
region = Region::create_kernel_only(range, name, PROT_READ | PROT_WRITE | PROT_EXEC); region = Region::create_kernel_only(range, name, access);
region->map(kernel_page_directory()); region->map(kernel_page_directory());
// FIXME: It would be cool if these could zero-fill on demand instead. // FIXME: It would be cool if these could zero-fill on demand instead.
if (should_commit) if (should_commit)
@ -352,18 +352,18 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
return region; return region;
} }
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name) OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access)
{ {
return allocate_kernel_region(size, name, true); return allocate_kernel_region(size, name, access, true);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name) OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access)
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
ASSERT(!(size % PAGE_SIZE)); ASSERT(!(size % PAGE_SIZE));
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
ASSERT(range.is_valid()); ASSERT(range.is_valid());
auto region = make<Region>(range, vmobject, 0, name, PROT_READ | PROT_WRITE | PROT_EXEC); auto region = make<Region>(range, vmobject, 0, name, access);
region->map(kernel_page_directory()); region->map(kernel_page_directory());
return region; return region;
} }

View file

@ -60,9 +60,9 @@ public:
void map_for_kernel(VirtualAddress, PhysicalAddress, bool cache_disabled = false); void map_for_kernel(VirtualAddress, PhysicalAddress, bool cache_disabled = false);
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, bool user_accessible = false, bool should_commit = true); OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name); OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access);
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name); OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access);
unsigned user_physical_pages() const { return m_user_physical_pages; } unsigned user_physical_pages() const { return m_user_physical_pages; }
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; } unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }

View file

@ -107,7 +107,7 @@ String ELFLoader::symbolicate(u32 address, u32* out_offset) const
SortedSymbol* sorted_symbols = nullptr; SortedSymbol* sorted_symbols = nullptr;
#ifdef KERNEL #ifdef KERNEL
if (!m_sorted_symbols_region) { if (!m_sorted_symbols_region) {
m_sorted_symbols_region = MM.allocate_kernel_region(PAGE_ROUND_UP(m_image.symbol_count() * sizeof(SortedSymbol)), "Sorted symbols"); m_sorted_symbols_region = MM.allocate_kernel_region(PAGE_ROUND_UP(m_image.symbol_count() * sizeof(SortedSymbol)), "Sorted symbols", Region::Access::Read | Region::Access::Write);
sorted_symbols = (SortedSymbol*)m_sorted_symbols_region->vaddr().as_ptr(); sorted_symbols = (SortedSymbol*)m_sorted_symbols_region->vaddr().as_ptr();
size_t index = 0; size_t index = 0;
m_image.for_each_symbol([&](auto& symbol) { m_image.for_each_symbol([&](auto& symbol) {