mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 15:07:45 +00:00
Kernel: Handle committing pages in regions more gracefully
Sometimes a physical underlying page may be there, but we may be unable to allocate a page table that may be needed to map it. Bubble up such mapping errors so that they can be handled more appropriately.
This commit is contained in:
parent
1ece93c805
commit
bf268a0185
4 changed files with 58 additions and 31 deletions
|
@ -89,18 +89,18 @@ void MemoryManager::protect_kernel_image()
|
||||||
{
|
{
|
||||||
// Disable writing to the kernel text and rodata segments.
|
// Disable writing to the kernel text and rodata segments.
|
||||||
for (size_t i = (FlatPtr)&start_of_kernel_text; i < (FlatPtr)&start_of_kernel_data; i += PAGE_SIZE) {
|
for (size_t i = (FlatPtr)&start_of_kernel_text; i < (FlatPtr)&start_of_kernel_data; i += PAGE_SIZE) {
|
||||||
auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
||||||
pte.set_writable(false);
|
pte.set_writable(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Processor::current().has_feature(CPUFeature::NX)) {
|
if (Processor::current().has_feature(CPUFeature::NX)) {
|
||||||
// Disable execution of the kernel data and bss segments, as well as the kernel heap.
|
// Disable execution of the kernel data and bss segments, as well as the kernel heap.
|
||||||
for (size_t i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_bss; i += PAGE_SIZE) {
|
for (size_t i = (FlatPtr)&start_of_kernel_data; i < (FlatPtr)&end_of_kernel_bss; i += PAGE_SIZE) {
|
||||||
auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
||||||
pte.set_execute_disabled(true);
|
pte.set_execute_disabled(true);
|
||||||
}
|
}
|
||||||
for (size_t i = FlatPtr(kmalloc_start); i < FlatPtr(kmalloc_end); i += PAGE_SIZE) {
|
for (size_t i = FlatPtr(kmalloc_start); i < FlatPtr(kmalloc_end); i += PAGE_SIZE) {
|
||||||
auto& pte = ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
||||||
pte.set_execute_disabled(true);
|
pte.set_execute_disabled(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -209,7 +209,7 @@ PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualA
|
||||||
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
|
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
|
||||||
}
|
}
|
||||||
|
|
||||||
PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
|
PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
|
||||||
{
|
{
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
ASSERT_INTERRUPTS_DISABLED();
|
||||||
ASSERT(s_mm_lock.own_lock());
|
ASSERT(s_mm_lock.own_lock());
|
||||||
|
@ -225,6 +225,10 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
|
||||||
#endif
|
#endif
|
||||||
bool did_purge = false;
|
bool did_purge = false;
|
||||||
auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
|
auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
|
||||||
|
if (!page_table) {
|
||||||
|
dbg() << "MM: Unable to allocate page table to map " << vaddr;
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
if (did_purge) {
|
if (did_purge) {
|
||||||
// If any memory had to be purged, ensure_pte may have been called as part
|
// If any memory had to be purged, ensure_pte may have been called as part
|
||||||
// of the purging process. So we need to re-map the pd in this case to ensure
|
// of the purging process. So we need to re-map the pd in this case to ensure
|
||||||
|
@ -247,7 +251,7 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
|
||||||
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
|
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
|
||||||
}
|
}
|
||||||
|
|
||||||
return quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
|
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release)
|
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release)
|
||||||
|
|
|
@ -195,7 +195,7 @@ private:
|
||||||
PageTableEntry* quickmap_pt(PhysicalAddress);
|
PageTableEntry* quickmap_pt(PhysicalAddress);
|
||||||
|
|
||||||
PageTableEntry* pte(const PageDirectory&, VirtualAddress);
|
PageTableEntry* pte(const PageDirectory&, VirtualAddress);
|
||||||
PageTableEntry& ensure_pte(PageDirectory&, VirtualAddress);
|
PageTableEntry* ensure_pte(PageDirectory&, VirtualAddress);
|
||||||
void release_pte(PageDirectory&, VirtualAddress, bool);
|
void release_pte(PageDirectory&, VirtualAddress, bool);
|
||||||
|
|
||||||
RefPtr<PageDirectory> m_kernel_page_directory;
|
RefPtr<PageDirectory> m_kernel_page_directory;
|
||||||
|
|
|
@ -227,38 +227,46 @@ Bitmap& Region::ensure_cow_map() const
|
||||||
return *m_cow_map;
|
return *m_cow_map;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Region::map_individual_page_impl(size_t page_index)
|
bool Region::map_individual_page_impl(size_t page_index)
|
||||||
{
|
{
|
||||||
auto page_vaddr = vaddr_from_page_index(page_index);
|
auto page_vaddr = vaddr_from_page_index(page_index);
|
||||||
auto& pte = MM.ensure_pte(*m_page_directory, page_vaddr);
|
auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
|
||||||
|
if (!pte) {
|
||||||
|
#ifdef MM_DEBUG
|
||||||
|
dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << " " << name() << " cannot create PTE for " << page_vaddr;
|
||||||
|
#endif
|
||||||
|
return false;
|
||||||
|
}
|
||||||
auto* page = physical_page(page_index);
|
auto* page = physical_page(page_index);
|
||||||
if (!page || (!is_readable() && !is_writable())) {
|
if (!page || (!is_readable() && !is_writable())) {
|
||||||
pte.clear();
|
pte->clear();
|
||||||
} else {
|
} else {
|
||||||
pte.set_cache_disabled(!m_cacheable);
|
pte->set_cache_disabled(!m_cacheable);
|
||||||
pte.set_physical_page_base(page->paddr().get());
|
pte->set_physical_page_base(page->paddr().get());
|
||||||
pte.set_present(true);
|
pte->set_present(true);
|
||||||
if (should_cow(page_index))
|
if (should_cow(page_index))
|
||||||
pte.set_writable(false);
|
pte->set_writable(false);
|
||||||
else
|
else
|
||||||
pte.set_writable(is_writable());
|
pte->set_writable(is_writable());
|
||||||
if (Processor::current().has_feature(CPUFeature::NX))
|
if (Processor::current().has_feature(CPUFeature::NX))
|
||||||
pte.set_execute_disabled(!is_executable());
|
pte->set_execute_disabled(!is_executable());
|
||||||
pte.set_user_allowed(is_user_accessible());
|
pte->set_user_allowed(is_user_accessible());
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte.raw() << "{" << &pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")";
|
dbg() << "MM: >> region map (PD=" << m_page_directory->cr3() << ", PTE=" << (void*)pte->raw() << "{" << pte << "}) " << name() << " " << page_vaddr << " => " << page->paddr() << " (@" << page << ")";
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Region::remap_page(size_t page_index, bool with_flush)
|
bool Region::remap_page(size_t page_index, bool with_flush)
|
||||||
{
|
{
|
||||||
ASSERT(m_page_directory);
|
ASSERT(m_page_directory);
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
ASSERT(physical_page(page_index));
|
ASSERT(physical_page(page_index));
|
||||||
map_individual_page_impl(page_index);
|
bool success = map_individual_page_impl(page_index);
|
||||||
if (with_flush)
|
if (with_flush)
|
||||||
MM.flush_tlb(vaddr_from_page_index(page_index));
|
MM.flush_tlb(vaddr_from_page_index(page_index));
|
||||||
|
return success;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
||||||
|
@ -291,16 +299,24 @@ void Region::set_page_directory(PageDirectory& page_directory)
|
||||||
m_page_directory = page_directory;
|
m_page_directory = page_directory;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Region::map(PageDirectory& page_directory)
|
bool Region::map(PageDirectory& page_directory)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
set_page_directory(page_directory);
|
set_page_directory(page_directory);
|
||||||
#ifdef MM_DEBUG
|
#ifdef MM_DEBUG
|
||||||
dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")";
|
dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")";
|
||||||
#endif
|
#endif
|
||||||
for (size_t page_index = 0; page_index < page_count(); ++page_index)
|
size_t page_index = 0;
|
||||||
map_individual_page_impl(page_index);
|
while (page_index < page_count()) {
|
||||||
MM.flush_tlb(vaddr(), page_count());
|
if (!map_individual_page_impl(page_index))
|
||||||
|
break;
|
||||||
|
++page_index;
|
||||||
|
}
|
||||||
|
if (page_index > 0) {
|
||||||
|
MM.flush_tlb(vaddr(), page_index);
|
||||||
|
return page_index == page_count();
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Region::remap()
|
void Region::remap()
|
||||||
|
@ -371,7 +387,8 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
dbg() << "MM: zero_page() but page already present. Fine with me!";
|
dbg() << "MM: zero_page() but page already present. Fine with me!";
|
||||||
#endif
|
#endif
|
||||||
remap_page(page_index_in_region);
|
if (!remap_page(page_index_in_region))
|
||||||
|
return PageFaultResponse::OutOfMemory;
|
||||||
return PageFaultResponse::Continue;
|
return PageFaultResponse::Continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -389,7 +406,10 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
|
||||||
dbg() << " >> ZERO " << page->paddr();
|
dbg() << " >> ZERO " << page->paddr();
|
||||||
#endif
|
#endif
|
||||||
page_slot = move(page);
|
page_slot = move(page);
|
||||||
remap_page(page_index_in_region);
|
if (!remap_page(page_index_in_region)) {
|
||||||
|
klog() << "MM: handle_zero_fault was unable to allocate a page table to map " << page_slot;
|
||||||
|
return PageFaultResponse::OutOfMemory;
|
||||||
|
}
|
||||||
return PageFaultResponse::Continue;
|
return PageFaultResponse::Continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,7 +422,8 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
|
||||||
dbg() << " >> It's a COW page but nobody is sharing it anymore. Remap r/w";
|
dbg() << " >> It's a COW page but nobody is sharing it anymore. Remap r/w";
|
||||||
#endif
|
#endif
|
||||||
set_should_cow(page_index_in_region, false);
|
set_should_cow(page_index_in_region, false);
|
||||||
remap_page(page_index_in_region);
|
if (!remap_page(page_index_in_region))
|
||||||
|
return PageFaultResponse::OutOfMemory;
|
||||||
return PageFaultResponse::Continue;
|
return PageFaultResponse::Continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -428,7 +449,8 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
|
||||||
page_slot = move(page);
|
page_slot = move(page);
|
||||||
MM.unquickmap_page();
|
MM.unquickmap_page();
|
||||||
set_should_cow(page_index_in_region, false);
|
set_should_cow(page_index_in_region, false);
|
||||||
remap_page(page_index_in_region);
|
if (!remap_page(page_index_in_region))
|
||||||
|
return PageFaultResponse::OutOfMemory;
|
||||||
return PageFaultResponse::Continue;
|
return PageFaultResponse::Continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -452,7 +474,8 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
dbg() << ("MM: page_in_from_inode() but page already present. Fine with me!");
|
dbg() << ("MM: page_in_from_inode() but page already present. Fine with me!");
|
||||||
#endif
|
#endif
|
||||||
remap_page(page_index_in_region);
|
if (!remap_page(page_index_in_region))
|
||||||
|
return PageFaultResponse::OutOfMemory;
|
||||||
return PageFaultResponse::Continue;
|
return PageFaultResponse::Continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -171,7 +171,7 @@ public:
|
||||||
void set_executable(bool b) { set_access_bit(Access::Execute, b); }
|
void set_executable(bool b) { set_access_bit(Access::Execute, b); }
|
||||||
|
|
||||||
void set_page_directory(PageDirectory&);
|
void set_page_directory(PageDirectory&);
|
||||||
void map(PageDirectory&);
|
bool map(PageDirectory&);
|
||||||
enum class ShouldDeallocateVirtualMemoryRange {
|
enum class ShouldDeallocateVirtualMemoryRange {
|
||||||
No,
|
No,
|
||||||
Yes,
|
Yes,
|
||||||
|
@ -201,13 +201,13 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
bool commit(size_t page_index);
|
bool commit(size_t page_index);
|
||||||
void remap_page(size_t index, bool with_flush = true);
|
bool remap_page(size_t index, bool with_flush = true);
|
||||||
|
|
||||||
PageFaultResponse handle_cow_fault(size_t page_index);
|
PageFaultResponse handle_cow_fault(size_t page_index);
|
||||||
PageFaultResponse handle_inode_fault(size_t page_index);
|
PageFaultResponse handle_inode_fault(size_t page_index);
|
||||||
PageFaultResponse handle_zero_fault(size_t page_index);
|
PageFaultResponse handle_zero_fault(size_t page_index);
|
||||||
|
|
||||||
void map_individual_page_impl(size_t page_index);
|
bool map_individual_page_impl(size_t page_index);
|
||||||
|
|
||||||
RefPtr<PageDirectory> m_page_directory;
|
RefPtr<PageDirectory> m_page_directory;
|
||||||
Range m_range;
|
Range m_range;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue