1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 20:37:34 +00:00

Kernel: Move region map/unmap operations into the Region class

The more Region can take care of itself, the better.
This commit is contained in:
Andreas Kling 2019-11-03 20:37:03 +01:00
parent 9e03f3ce20
commit 2cfc43c982
5 changed files with 42 additions and 39 deletions

View file

@ -122,7 +122,7 @@ Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String
if (!range.is_valid())
return nullptr;
m_regions.append(Region::create_user_accessible(range, name, prot_to_region_access_flags(prot)));
MM.map_region(*this, m_regions.last());
m_regions.last().map(*this);
if (commit)
m_regions.last().commit();
return &m_regions.last();
@ -134,7 +134,7 @@ Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size,
if (!range.is_valid())
return nullptr;
m_regions.append(Region::create_user_accessible(range, inode, name, prot_to_region_access_flags(prot)));
MM.map_region(*this, m_regions.last());
m_regions.last().map(*this);
return &m_regions.last();
}
@ -145,7 +145,7 @@ Region* Process::allocate_region_with_vmo(VirtualAddress vaddr, size_t size, Non
return nullptr;
offset_in_vmo &= PAGE_MASK;
m_regions.append(Region::create_user_accessible(range, move(vmo), offset_in_vmo, name, prot_to_region_access_flags(prot)));
MM.map_region(*this, m_regions.last());
m_regions.last().map(*this);
return &m_regions.last();
}
@ -259,7 +259,7 @@ int Process::sys$munmap(void* addr, size_t size)
}
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
MM.unmap_region(*old_region, false);
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
deallocate_region(*old_region);
// Instead we give back the unwanted VM manually.
@ -267,7 +267,7 @@ int Process::sys$munmap(void* addr, size_t size)
// And finally we map the new region(s).
for (auto* new_region : new_regions) {
MM.map_region(*this, *new_region);
new_region->map(*this);
}
return 0;
}
@ -313,7 +313,7 @@ Process* Process::fork(RegisterDump& regs)
dbg() << "fork: cloning Region{" << &region << "} '" << region.name() << "' @ " << region.vaddr();
#endif
child->m_regions.append(region.clone());
MM.map_region(*child, child->m_regions.last());
child->m_regions.last().map(*child);
if (&region == m_master_tls_region)
child->m_master_tls_region = &child->m_regions.last();

View file

@ -713,35 +713,6 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region&
}
}
bool MemoryManager::unmap_region(Region& region, bool deallocate_range)
{
ASSERT(region.page_directory());
InterruptDisabler disabler;
for (size_t i = 0; i < region.page_count(); ++i) {
auto vaddr = region.vaddr().offset(i * PAGE_SIZE);
auto& pte = ensure_pte(*region.page_directory(), vaddr);
pte.set_physical_page_base(0);
pte.set_present(false);
pte.set_writable(false);
pte.set_user_allowed(false);
region.page_directory()->flush(vaddr);
#ifdef MM_DEBUG
auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i];
dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
#endif
}
if (deallocate_range)
region.page_directory()->range_allocator().deallocate(region.range());
region.release_page_directory();
return true;
}
bool MemoryManager::map_region(Process& process, Region& region)
{
map_region_at_address(process.page_directory(), region, region.vaddr());
return true;
}
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const
{
auto* region = region_from_vaddr(process, vaddr);

View file

@ -47,9 +47,6 @@ public:
PageFaultResponse handle_page_fault(const PageFault&);
bool map_region(Process&, Region&);
bool unmap_region(Region&, bool deallocate_range = true);
void populate_page_directory(PageDirectory&);
void enter_process_paging_scope(Process&);

View file

@ -41,7 +41,7 @@ Region::~Region()
// find the address<->region mappings in an invalid state there.
InterruptDisabler disabler;
if (m_page_directory) {
MM.unmap_region(*this);
unmap(ShouldDeallocateVirtualMemoryRange::Yes);
ASSERT(!m_page_directory);
}
MM.unregister_region(*this);
@ -192,3 +192,31 @@ void Region::remap_page(size_t index)
#endif
}
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
{
InterruptDisabler disabler;
ASSERT(page_directory());
for (size_t i = 0; i < page_count(); ++i) {
auto vaddr = this->vaddr().offset(i * PAGE_SIZE);
auto& pte = MM.ensure_pte(*page_directory(), vaddr);
pte.set_physical_page_base(0);
pte.set_present(false);
pte.set_writable(false);
pte.set_user_allowed(false);
page_directory()->flush(vaddr);
#ifdef MM_DEBUG
auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i];
dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
#endif
}
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
page_directory()->range_allocator().deallocate(range());
release_page_directory();
}
void Region::map(Process& process)
{
MM.map_region_at_address(process.page_directory(), *this, vaddr());
}

View file

@ -114,6 +114,13 @@ public:
m_access &= ~Access::Write;
}
void map(Process&);
enum class ShouldDeallocateVirtualMemoryRange {
No,
Yes,
};
void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes);
void remap_page(size_t index);
// For InlineLinkedListNode