1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-23 13:57:35 +00:00

Kernel: Change Region allocation helpers

We now can create a cacheable Region, so when map() is called, if a
Region is cacheable then all the virtual memory space being allocated
to it will be marked as not cache disabled.

In addition to that, OS components can create a Region that will be
mapped to a specific physical address by using the appropriate helper
method.
This commit is contained in:
Liav A 2020-01-09 23:29:31 +02:00 committed by Andreas Kling
parent b913e30011
commit d2b41010c5
7 changed files with 77 additions and 56 deletions

View file

@ -1313,9 +1313,8 @@ extern "C" void asm_signal_trampoline_end(void);
void create_signal_trampolines() void create_signal_trampolines()
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
// NOTE: We leak this region. // NOTE: We leak this region.
auto* trampoline_region = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Signal trampolines", Region::Access::Read | Region::Access::Write | Region::Access::Execute).leak_ptr(); auto* trampoline_region = MM.allocate_user_accessible_kernel_region(PAGE_SIZE, "Signal trampolines", Region::Access::Read | Region::Access::Write | Region::Access::Execute, false).leak_ptr();
g_return_to_ring3_from_signal_trampoline = trampoline_region->vaddr(); g_return_to_ring3_from_signal_trampoline = trampoline_region->vaddr();
u8* trampoline = (u8*)asm_signal_trampoline; u8* trampoline = (u8*)asm_signal_trampoline;

View file

@ -321,7 +321,7 @@ void MemoryManager::create_identity_mapping(PageDirectory& page_directory, Virtu
pte.set_user_allowed(false); pte.set_user_allowed(false);
pte.set_present(true); pte.set_present(true);
pte.set_writable(true); pte.set_writable(true);
page_directory.flush(pte_address); flush_tlb(pte_address);
} }
} }
@ -394,7 +394,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
return region->handle_fault(fault); return region->handle_fault(fault);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit) OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit, bool cacheable)
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
ASSERT(!(size % PAGE_SIZE)); ASSERT(!(size % PAGE_SIZE));
@ -402,28 +402,47 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
ASSERT(range.is_valid()); ASSERT(range.is_valid());
OwnPtr<Region> region; OwnPtr<Region> region;
if (user_accessible) if (user_accessible)
region = Region::create_user_accessible(range, name, access); region = Region::create_user_accessible(range, name, access, cacheable);
else else
region = Region::create_kernel_only(range, name, access); region = Region::create_kernel_only(range, name, access, cacheable);
region->map(kernel_page_directory()); region->set_page_directory(kernel_page_directory());
// FIXME: It would be cool if these could zero-fill on demand instead. // FIXME: It would be cool if these could zero-fill on demand instead.
if (should_commit) if (should_commit)
region->commit(); region->commit();
return region; return region;
} }
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access) OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
{
return allocate_kernel_region(size, name, access, true);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access)
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
ASSERT(!(size % PAGE_SIZE)); ASSERT(!(size % PAGE_SIZE));
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
ASSERT(range.is_valid()); ASSERT(range.is_valid());
auto region = make<Region>(range, vmobject, 0, name, access); OwnPtr<Region> region;
if (user_accessible)
region = Region::create_user_accessible(range, AnonymousVMObject::create_for_physical_range(paddr, size), 0, name, access, cacheable);
else
region = Region::create_kernel_only(range, AnonymousVMObject::create_for_physical_range(paddr, size), 0, name, access, cacheable);
region->map(kernel_page_directory());
return region;
}
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
{
return allocate_kernel_region(size, name, access, true, true, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
{
InterruptDisabler disabler;
ASSERT(!(size % PAGE_SIZE));
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
ASSERT(range.is_valid());
OwnPtr<Region> region;
if (user_accessible)
region = Region::create_user_accessible(range, vmobject, 0, name, access, cacheable);
else
region = Region::create_kernel_only(range, vmobject, 0, name, access, cacheable);
region->map(kernel_page_directory()); region->map(kernel_page_directory());
return region; return region;
} }
@ -573,6 +592,9 @@ void MemoryManager::flush_entire_tlb()
void MemoryManager::flush_tlb(VirtualAddress vaddr) void MemoryManager::flush_tlb(VirtualAddress vaddr)
{ {
#ifdef MM_DEBUG
dbgprintf("MM: Flush page V%p\n", vaddr.get());
#endif
asm volatile("invlpg %0" asm volatile("invlpg %0"
: :
: "m"(*(char*)vaddr.get()) : "m"(*(char*)vaddr.get())

View file

@ -62,9 +62,10 @@ public:
void map_for_kernel(VirtualAddress, PhysicalAddress, bool cache_disabled = false); void map_for_kernel(VirtualAddress, PhysicalAddress, bool cache_disabled = false);
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true); OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access); OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access); OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = false);
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = false);
unsigned user_physical_pages() const { return m_user_physical_pages; } unsigned user_physical_pages() const { return m_user_physical_pages; }
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; } unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }

View file

@ -74,15 +74,4 @@ PageDirectory::~PageDirectory()
#endif #endif
InterruptDisabler disabler; InterruptDisabler disabler;
cr3_map().remove(cr3()); cr3_map().remove(cr3());
} }
void PageDirectory::flush(VirtualAddress vaddr)
{
#ifdef MM_DEBUG
dbgprintf("MM: Flush page V%p\n", vaddr.get());
#endif
if (!current)
return;
if (this == &MM.kernel_page_directory() || &current->process().page_directory() == this)
MM.flush_tlb(vaddr);
}

View file

@ -24,8 +24,6 @@ public:
u32 cr3() const { return m_directory_table->paddr().get(); } u32 cr3() const { return m_directory_table->paddr().get(); }
PageDirectoryPointerTable& table() { return *reinterpret_cast<PageDirectoryPointerTable*>(cr3()); } PageDirectoryPointerTable& table() { return *reinterpret_cast<PageDirectoryPointerTable*>(cr3()); }
void flush(VirtualAddress);
RangeAllocator& range_allocator() { return m_range_allocator; } RangeAllocator& range_allocator() { return m_range_allocator; }
Process* process() { return m_process; } Process* process() { return m_process; }

View file

@ -9,30 +9,33 @@
//#define MM_DEBUG //#define MM_DEBUG
//#define PAGE_FAULT_DEBUG //#define PAGE_FAULT_DEBUG
Region::Region(const Range& range, const String& name, u8 access) Region::Region(const Range& range, const String& name, u8 access, bool cacheable)
: m_range(range) : m_range(range)
, m_vmobject(AnonymousVMObject::create_with_size(size())) , m_vmobject(AnonymousVMObject::create_with_size(size()))
, m_name(name) , m_name(name)
, m_access(access) , m_access(access)
, m_cacheable(cacheable)
{ {
MM.register_region(*this); MM.register_region(*this);
} }
Region::Region(const Range& range, NonnullRefPtr<Inode> inode, const String& name, u8 access) Region::Region(const Range& range, NonnullRefPtr<Inode> inode, const String& name, u8 access, bool cacheable)
: m_range(range) : m_range(range)
, m_vmobject(InodeVMObject::create_with_inode(*inode)) , m_vmobject(InodeVMObject::create_with_inode(*inode))
, m_name(name) , m_name(name)
, m_access(access) , m_access(access)
, m_cacheable(cacheable)
{ {
MM.register_region(*this); MM.register_region(*this);
} }
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access) Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access, bool cacheable)
: m_range(range) : m_range(range)
, m_offset_in_vmobject(offset_in_vmobject) , m_offset_in_vmobject(offset_in_vmobject)
, m_vmobject(move(vmobject)) , m_vmobject(move(vmobject))
, m_name(name) , m_name(name)
, m_access(access) , m_access(access)
, m_cacheable(cacheable)
{ {
MM.register_region(*this); MM.register_region(*this);
} }
@ -164,37 +167,37 @@ size_t Region::amount_shared() const
return bytes; return bytes;
} }
NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, const StringView& name, u8 access) NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, const StringView& name, u8 access, bool cacheable)
{ {
auto region = make<Region>(range, name, access); auto region = make<Region>(range, name, access, cacheable);
region->m_user_accessible = true; region->m_user_accessible = true;
return region; return region;
} }
NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access) NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
{ {
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access); auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable);
region->m_user_accessible = true; region->m_user_accessible = true;
return region; return region;
} }
NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<Inode> inode, const StringView& name, u8 access) NonnullOwnPtr<Region> Region::create_user_accessible(const Range& range, NonnullRefPtr<Inode> inode, const StringView& name, u8 access, bool cacheable)
{ {
auto region = make<Region>(range, move(inode), name, access); auto region = make<Region>(range, move(inode), name, access, cacheable);
region->m_user_accessible = true; region->m_user_accessible = true;
return region; return region;
} }
NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, const StringView& name, u8 access) NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
{ {
auto region = make<Region>(range, name, access); auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable);
region->m_user_accessible = false; region->m_user_accessible = false;
return region; return region;
} }
NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access) NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, const StringView& name, u8 access, bool cacheable)
{ {
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access); auto region = make<Region>(range, name, access, cacheable);
region->m_user_accessible = false; region->m_user_accessible = false;
return region; return region;
} }
@ -228,6 +231,7 @@ void Region::map_individual_page_impl(size_t page_index)
pte.set_physical_page_base(0); pte.set_physical_page_base(0);
pte.set_present(false); pte.set_present(false);
} else { } else {
pte.set_cache_disabled(!m_cacheable);
pte.set_physical_page_base(physical_page->paddr().get()); pte.set_physical_page_base(physical_page->paddr().get());
pte.set_present(is_readable()); pte.set_present(is_readable());
if (should_cow(page_index)) if (should_cow(page_index))
@ -237,11 +241,11 @@ void Region::map_individual_page_impl(size_t page_index)
if (g_cpu_supports_nx) if (g_cpu_supports_nx)
pte.set_execute_disabled(!is_executable()); pte.set_execute_disabled(!is_executable());
pte.set_user_allowed(is_user_accessible()); pte.set_user_allowed(is_user_accessible());
}
m_page_directory->flush(page_vaddr);
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")"; dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
#endif #endif
}
MM.flush_tlb(page_vaddr);
} }
void Region::remap_page(size_t page_index) void Region::remap_page(size_t page_index)
@ -263,7 +267,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
pte.set_present(false); pte.set_present(false);
pte.set_writable(false); pte.set_writable(false);
pte.set_user_allowed(false); pte.set_user_allowed(false);
m_page_directory->flush(vaddr); MM.flush_tlb(vaddr);
#ifdef MM_DEBUG #ifdef MM_DEBUG
auto& physical_page = vmobject().physical_pages()[first_page_index() + i]; auto& physical_page = vmobject().physical_pages()[first_page_index() + i];
dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr.get(), physical_page ? physical_page->paddr().get() : 0); dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr.get(), physical_page ? physical_page->paddr().get() : 0);
@ -274,11 +278,16 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
m_page_directory = nullptr; m_page_directory = nullptr;
} }
void Region::map(PageDirectory& page_directory) void Region::set_page_directory(PageDirectory& page_directory)
{ {
ASSERT(!m_page_directory || m_page_directory == &page_directory); ASSERT(!m_page_directory || m_page_directory == &page_directory);
InterruptDisabler disabler; InterruptDisabler disabler;
m_page_directory = page_directory; m_page_directory = page_directory;
}
void Region::map(PageDirectory& page_directory)
{
set_page_directory(page_directory);
InterruptDisabler disabler;
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbgprintf("MM: Region::map() will map VMO pages %u - %u (VMO page count: %u)\n", first_page_index(), last_page_index(), vmobject().page_count()); dbgprintf("MM: Region::map() will map VMO pages %u - %u (VMO page count: %u)\n", first_page_index(), last_page_index(), vmobject().page_count());
#endif #endif

View file

@ -26,11 +26,11 @@ public:
Execute = 4, Execute = 4,
}; };
static NonnullOwnPtr<Region> create_user_accessible(const Range&, const StringView& name, u8 access); static NonnullOwnPtr<Region> create_user_accessible(const Range&, const StringView& name, u8 access, bool cacheable = true);
static NonnullOwnPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access); static NonnullOwnPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable = true);
static NonnullOwnPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<Inode>, const StringView& name, u8 access); static NonnullOwnPtr<Region> create_user_accessible(const Range&, NonnullRefPtr<Inode>, const StringView& name, u8 access, bool cacheable = true);
static NonnullOwnPtr<Region> create_kernel_only(const Range&, const StringView& name, u8 access); static NonnullOwnPtr<Region> create_kernel_only(const Range&, const StringView& name, u8 access, bool cacheable = true);
static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access); static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable = true);
~Region(); ~Region();
@ -40,6 +40,7 @@ public:
bool is_readable() const { return m_access & Access::Read; } bool is_readable() const { return m_access & Access::Read; }
bool is_writable() const { return m_access & Access::Write; } bool is_writable() const { return m_access & Access::Write; }
bool is_executable() const { return m_access & Access::Execute; } bool is_executable() const { return m_access & Access::Execute; }
bool is_cacheable() const { return m_cacheable; }
const String& name() const { return m_name; } const String& name() const { return m_name; }
unsigned access() const { return m_access; } unsigned access() const { return m_access; }
@ -115,6 +116,7 @@ public:
void set_writable(bool b) { set_access_bit(Access::Write, b); } void set_writable(bool b) { set_access_bit(Access::Write, b); }
void set_executable(bool b) { set_access_bit(Access::Execute, b); } void set_executable(bool b) { set_access_bit(Access::Execute, b); }
void set_page_directory(PageDirectory&);
void map(PageDirectory&); void map(PageDirectory&);
enum class ShouldDeallocateVirtualMemoryRange { enum class ShouldDeallocateVirtualMemoryRange {
No, No,
@ -130,9 +132,9 @@ public:
Region* m_prev { nullptr }; Region* m_prev { nullptr };
// NOTE: These are public so we can make<> them. // NOTE: These are public so we can make<> them.
Region(const Range&, const String&, u8 access); Region(const Range&, const String&, u8 access, bool cacheable);
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access); Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access, bool cacheable);
Region(const Range&, NonnullRefPtr<Inode>, const String&, u8 access); Region(const Range&, NonnullRefPtr<Inode>, const String&, u8 access, bool cacheable);
private: private:
Bitmap& ensure_cow_map() const; Bitmap& ensure_cow_map() const;
@ -159,6 +161,7 @@ private:
u8 m_access { 0 }; u8 m_access { 0 };
bool m_shared { false }; bool m_shared { false };
bool m_user_accessible { false }; bool m_user_accessible { false };
bool m_cacheable { false };
bool m_stack { false }; bool m_stack { false };
bool m_mmap { false }; bool m_mmap { false };
mutable OwnPtr<Bitmap> m_cow_map; mutable OwnPtr<Bitmap> m_cow_map;