mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 02:57:36 +00:00
Kernel: Don't expose a region's page directory to the outside world
Now that region manages its own mapping/unmapping, there's no need for the outside world to be able to grab at its page directory.
This commit is contained in:
parent
6ed9cc4717
commit
0e8f1d7cb6
3 changed files with 8 additions and 11 deletions
|
@ -365,7 +365,6 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
||||||
|
|
||||||
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
|
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
|
||||||
{
|
{
|
||||||
ASSERT(region.page_directory());
|
|
||||||
ASSERT(region.vmobject().is_inode());
|
ASSERT(region.vmobject().is_inode());
|
||||||
|
|
||||||
auto& vmobject = region.vmobject();
|
auto& vmobject = region.vmobject();
|
||||||
|
|
|
@ -175,10 +175,10 @@ Bitmap& Region::ensure_cow_map() const
|
||||||
|
|
||||||
void Region::remap_page(size_t index)
|
void Region::remap_page(size_t index)
|
||||||
{
|
{
|
||||||
ASSERT(page_directory());
|
ASSERT(m_page_directory);
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
auto page_vaddr = vaddr().offset(index * PAGE_SIZE);
|
auto page_vaddr = vaddr().offset(index * PAGE_SIZE);
|
||||||
auto& pte = MM.ensure_pte(*page_directory(), page_vaddr);
|
auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr);
|
||||||
auto& physical_page = vmobject().physical_pages()[first_page_index() + index];
|
auto& physical_page = vmobject().physical_pages()[first_page_index() + index];
|
||||||
ASSERT(physical_page);
|
ASSERT(physical_page);
|
||||||
pte.set_physical_page_base(physical_page->paddr().get());
|
pte.set_physical_page_base(physical_page->paddr().get());
|
||||||
|
@ -188,31 +188,31 @@ void Region::remap_page(size_t index)
|
||||||
else
|
else
|
||||||
pte.set_writable(is_writable());
|
pte.set_writable(is_writable());
|
||||||
pte.set_user_allowed(is_user_accessible());
|
pte.set_user_allowed(is_user_accessible());
|
||||||
page_directory()->flush(page_vaddr);
|
m_page_directory->flush(page_vaddr);
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
dbg() << "MM: >> region.remap_page (PD=" << page_directory()->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
|
dbg() << "MM: >> region.remap_page (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << physical_page->paddr() << " (@" << physical_page.ptr() << ")";
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
ASSERT(page_directory());
|
ASSERT(m_page_directory);
|
||||||
for (size_t i = 0; i < page_count(); ++i) {
|
for (size_t i = 0; i < page_count(); ++i) {
|
||||||
auto vaddr = this->vaddr().offset(i * PAGE_SIZE);
|
auto vaddr = this->vaddr().offset(i * PAGE_SIZE);
|
||||||
auto& pte = MM.ensure_pte(*page_directory(), vaddr);
|
auto& pte = MM.ensure_pte(*m_page_directory, vaddr);
|
||||||
pte.set_physical_page_base(0);
|
pte.set_physical_page_base(0);
|
||||||
pte.set_present(false);
|
pte.set_present(false);
|
||||||
pte.set_writable(false);
|
pte.set_writable(false);
|
||||||
pte.set_user_allowed(false);
|
pte.set_user_allowed(false);
|
||||||
page_directory()->flush(vaddr);
|
m_page_directory->flush(vaddr);
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
auto& physical_page = vmobject().physical_pages()[first_page_index() + i];
|
auto& physical_page = vmobject().physical_pages()[first_page_index() + i];
|
||||||
dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr.get(), physical_page ? physical_page->paddr().get() : 0);
|
dbgprintf("MM: >> Unmapped V%p => P%p <<\n", vaddr.get(), physical_page ? physical_page->paddr().get() : 0);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
|
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes)
|
||||||
page_directory()->range_allocator().deallocate(range());
|
m_page_directory->range_allocator().deallocate(range());
|
||||||
m_page_directory = nullptr;
|
m_page_directory = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -89,8 +89,6 @@ public:
|
||||||
size_t amount_resident() const;
|
size_t amount_resident() const;
|
||||||
size_t amount_shared() const;
|
size_t amount_shared() const;
|
||||||
|
|
||||||
PageDirectory* page_directory() { return m_page_directory.ptr(); }
|
|
||||||
|
|
||||||
bool should_cow(size_t page_index) const;
|
bool should_cow(size_t page_index) const;
|
||||||
void set_should_cow(size_t page_index, bool);
|
void set_should_cow(size_t page_index, bool);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue