mirror of
https://github.com/RGBCube/serenity
synced 2025-07-24 21:07:34 +00:00
Kernel: Move region map/unmap operations into the Region class
The more Region can take care of itself, the better.
This commit is contained in:
parent
9e03f3ce20
commit
2cfc43c982
5 changed files with 42 additions and 39 deletions
|
@ -122,7 +122,7 @@ Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String
|
||||||
if (!range.is_valid())
|
if (!range.is_valid())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
m_regions.append(Region::create_user_accessible(range, name, prot_to_region_access_flags(prot)));
|
m_regions.append(Region::create_user_accessible(range, name, prot_to_region_access_flags(prot)));
|
||||||
MM.map_region(*this, m_regions.last());
|
m_regions.last().map(*this);
|
||||||
if (commit)
|
if (commit)
|
||||||
m_regions.last().commit();
|
m_regions.last().commit();
|
||||||
return &m_regions.last();
|
return &m_regions.last();
|
||||||
|
@ -134,7 +134,7 @@ Region* Process::allocate_file_backed_region(VirtualAddress vaddr, size_t size,
|
||||||
if (!range.is_valid())
|
if (!range.is_valid())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
m_regions.append(Region::create_user_accessible(range, inode, name, prot_to_region_access_flags(prot)));
|
m_regions.append(Region::create_user_accessible(range, inode, name, prot_to_region_access_flags(prot)));
|
||||||
MM.map_region(*this, m_regions.last());
|
m_regions.last().map(*this);
|
||||||
return &m_regions.last();
|
return &m_regions.last();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,7 +145,7 @@ Region* Process::allocate_region_with_vmo(VirtualAddress vaddr, size_t size, Non
|
||||||
return nullptr;
|
return nullptr;
|
||||||
offset_in_vmo &= PAGE_MASK;
|
offset_in_vmo &= PAGE_MASK;
|
||||||
m_regions.append(Region::create_user_accessible(range, move(vmo), offset_in_vmo, name, prot_to_region_access_flags(prot)));
|
m_regions.append(Region::create_user_accessible(range, move(vmo), offset_in_vmo, name, prot_to_region_access_flags(prot)));
|
||||||
MM.map_region(*this, m_regions.last());
|
m_regions.last().map(*this);
|
||||||
return &m_regions.last();
|
return &m_regions.last();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,7 +259,7 @@ int Process::sys$munmap(void* addr, size_t size)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||||
MM.unmap_region(*old_region, false);
|
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||||
deallocate_region(*old_region);
|
deallocate_region(*old_region);
|
||||||
|
|
||||||
// Instead we give back the unwanted VM manually.
|
// Instead we give back the unwanted VM manually.
|
||||||
|
@ -267,7 +267,7 @@ int Process::sys$munmap(void* addr, size_t size)
|
||||||
|
|
||||||
// And finally we map the new region(s).
|
// And finally we map the new region(s).
|
||||||
for (auto* new_region : new_regions) {
|
for (auto* new_region : new_regions) {
|
||||||
MM.map_region(*this, *new_region);
|
new_region->map(*this);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -313,7 +313,7 @@ Process* Process::fork(RegisterDump& regs)
|
||||||
dbg() << "fork: cloning Region{" << ®ion << "} '" << region.name() << "' @ " << region.vaddr();
|
dbg() << "fork: cloning Region{" << ®ion << "} '" << region.name() << "' @ " << region.vaddr();
|
||||||
#endif
|
#endif
|
||||||
child->m_regions.append(region.clone());
|
child->m_regions.append(region.clone());
|
||||||
MM.map_region(*child, child->m_regions.last());
|
child->m_regions.last().map(*child);
|
||||||
|
|
||||||
if (®ion == m_master_tls_region)
|
if (®ion == m_master_tls_region)
|
||||||
child->m_master_tls_region = &child->m_regions.last();
|
child->m_master_tls_region = &child->m_regions.last();
|
||||||
|
|
|
@ -713,35 +713,6 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region&
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemoryManager::unmap_region(Region& region, bool deallocate_range)
|
|
||||||
{
|
|
||||||
ASSERT(region.page_directory());
|
|
||||||
InterruptDisabler disabler;
|
|
||||||
for (size_t i = 0; i < region.page_count(); ++i) {
|
|
||||||
auto vaddr = region.vaddr().offset(i * PAGE_SIZE);
|
|
||||||
auto& pte = ensure_pte(*region.page_directory(), vaddr);
|
|
||||||
pte.set_physical_page_base(0);
|
|
||||||
pte.set_present(false);
|
|
||||||
pte.set_writable(false);
|
|
||||||
pte.set_user_allowed(false);
|
|
||||||
region.page_directory()->flush(vaddr);
|
|
||||||
#ifdef MM_DEBUG
|
|
||||||
auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i];
|
|
||||||
dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
if (deallocate_range)
|
|
||||||
region.page_directory()->range_allocator().deallocate(region.range());
|
|
||||||
region.release_page_directory();
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MemoryManager::map_region(Process& process, Region& region)
|
|
||||||
{
|
|
||||||
map_region_at_address(process.page_directory(), region, region.vaddr());
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const
|
bool MemoryManager::validate_user_read(const Process& process, VirtualAddress vaddr) const
|
||||||
{
|
{
|
||||||
auto* region = region_from_vaddr(process, vaddr);
|
auto* region = region_from_vaddr(process, vaddr);
|
||||||
|
|
|
@ -47,9 +47,6 @@ public:
|
||||||
|
|
||||||
PageFaultResponse handle_page_fault(const PageFault&);
|
PageFaultResponse handle_page_fault(const PageFault&);
|
||||||
|
|
||||||
bool map_region(Process&, Region&);
|
|
||||||
bool unmap_region(Region&, bool deallocate_range = true);
|
|
||||||
|
|
||||||
void populate_page_directory(PageDirectory&);
|
void populate_page_directory(PageDirectory&);
|
||||||
|
|
||||||
void enter_process_paging_scope(Process&);
|
void enter_process_paging_scope(Process&);
|
||||||
|
|
|
@ -41,7 +41,7 @@ Region::~Region()
|
||||||
// find the address<->region mappings in an invalid state there.
|
// find the address<->region mappings in an invalid state there.
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
if (m_page_directory) {
|
if (m_page_directory) {
|
||||||
MM.unmap_region(*this);
|
unmap(ShouldDeallocateVirtualMemoryRange::Yes);
|
||||||
ASSERT(!m_page_directory);
|
ASSERT(!m_page_directory);
|
||||||
}
|
}
|
||||||
MM.unregister_region(*this);
|
MM.unregister_region(*this);
|
||||||
|
@ -192,3 +192,31 @@ void Region::remap_page(size_t index)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
||||||
|
{
|
||||||
|
InterruptDisabler disabler;
|
||||||
|
ASSERT(page_directory());
|
||||||
|
for (size_t i = 0; i < page_count(); ++i) {
|
||||||
|
auto vaddr = this->vaddr().offset(i * PAGE_SIZE);
|
||||||
|
auto& pte = MM.ensure_pte(*page_directory(), vaddr);
|
||||||
|
pte.set_physical_page_base(0);
|
||||||
|
pte.set_present(false);
|
||||||
|
pte.set_writable(false);
|
||||||
|
pte.set_user_allowed(false);
|
||||||
|
page_directory()->flush(vaddr);
|
||||||
|
#ifdef MM_DEBUG
|
||||||
|
auto& physical_page = region.vmobject().physical_pages()[region.first_page_index() + i];
|
||||||
|
dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr, physical_page ? physical_page->paddr().get() : 0);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
|
||||||
|
page_directory()->range_allocator().deallocate(range());
|
||||||
|
release_page_directory();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Region::map(Process& process)
|
||||||
|
{
|
||||||
|
MM.map_region_at_address(process.page_directory(), *this, vaddr());
|
||||||
|
}
|
||||||
|
|
|
@ -114,6 +114,13 @@ public:
|
||||||
m_access &= ~Access::Write;
|
m_access &= ~Access::Write;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void map(Process&);
|
||||||
|
enum class ShouldDeallocateVirtualMemoryRange {
|
||||||
|
No,
|
||||||
|
Yes,
|
||||||
|
};
|
||||||
|
void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes);
|
||||||
|
|
||||||
void remap_page(size_t index);
|
void remap_page(size_t index);
|
||||||
|
|
||||||
// For InlineLinkedListNode
|
// For InlineLinkedListNode
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue