1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 00:47:36 +00:00

Kernel: Convert MemoryManager::allocate_user_physical_page to ErrorOr

This allows is to use the TRY macro at the call sites, instead of using
clunky null checks.
This commit is contained in:
Idan Horowitz 2022-01-28 16:36:53 +02:00
parent bd5b56cab0
commit 5146315a15
6 changed files with 20 additions and 23 deletions

View file

@ -175,7 +175,7 @@ ErrorOr<void> FramebufferDevice::create_framebuffer()
m_buffer_size = calculate_framebuffer_size(info.rect.width, info.rect.height); m_buffer_size = calculate_framebuffer_size(info.rect.width, info.rect.height);
auto region_name = TRY(KString::formatted("VirtGPU FrameBuffer #{}", m_scanout.value())); auto region_name = TRY(KString::formatted("VirtGPU FrameBuffer #{}", m_scanout.value()));
m_framebuffer = TRY(MM.allocate_kernel_region(m_buffer_size * 2, region_name->view(), Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow)); m_framebuffer = TRY(MM.allocate_kernel_region(m_buffer_size * 2, region_name->view(), Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow));
auto write_sink_page = MM.allocate_user_physical_page(Memory::MemoryManager::ShouldZeroFill::No).release_nonnull(); auto write_sink_page = TRY(MM.allocate_user_physical_page(Memory::MemoryManager::ShouldZeroFill::No));
auto num_needed_pages = m_framebuffer->vmobject().page_count(); auto num_needed_pages = m_framebuffer->vmobject().page_count();
NonnullRefPtrVector<Memory::PhysicalPage> pages; NonnullRefPtrVector<Memory::PhysicalPage> pages;

View file

@ -322,11 +322,12 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
page = m_shared_committed_cow_pages->take_one(); page = m_shared_committed_cow_pages->take_one();
} else { } else {
dbgln_if(PAGE_FAULT_DEBUG, " >> It's a COW page and it's time to COW!"); dbgln_if(PAGE_FAULT_DEBUG, " >> It's a COW page and it's time to COW!");
page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No); auto page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
if (page.is_null()) { if (page_or_error.is_error()) {
dmesgln("MM: handle_cow_fault was unable to allocate a physical page"); dmesgln("MM: handle_cow_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory; return PageFaultResponse::OutOfMemory;
} }
page = page_or_error.release_value();
} }
dbgln_if(PAGE_FAULT_DEBUG, " >> COW {} <- {}", page->paddr(), page_slot->paddr()); dbgln_if(PAGE_FAULT_DEBUG, " >> COW {} <- {}", page->paddr(), page_slot->paddr());

View file

@ -572,11 +572,12 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index]; return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index];
bool did_purge = false; bool did_purge = false;
auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge); auto page_table_or_error = allocate_user_physical_page(ShouldZeroFill::Yes, &did_purge);
if (!page_table) { if (page_table_or_error.is_error()) {
dbgln("MM: Unable to allocate page table to map {}", vaddr); dbgln("MM: Unable to allocate page table to map {}", vaddr);
return nullptr; return nullptr;
} }
auto page_table = page_table_or_error.release_value();
if (did_purge) { if (did_purge) {
// If any memory had to be purged, ensure_pte may have been called as part // If any memory had to be purged, ensure_pte may have been called as part
// of the purging process. So we need to re-map the pd in this case to ensure // of the purging process. So we need to re-map the pd in this case to ensure
@ -892,7 +893,7 @@ NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page
return page.release_nonnull(); return page.release_nonnull();
} }
RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge) ErrorOr<NonnullRefPtr<PhysicalPage>> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
{ {
SpinlockLocker lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
auto page = find_free_user_physical_page(false); auto page = find_free_user_physical_page(false);
@ -918,7 +919,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
}); });
if (!page) { if (!page) {
dmesgln("MM: no user physical pages available"); dmesgln("MM: no user physical pages available");
return {}; return ENOMEM;
} }
} }
@ -930,7 +931,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
if (did_purge) if (did_purge)
*did_purge = purged_pages; *did_purge = purged_pages;
return page; return page.release_nonnull();
} }
ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size) ErrorOr<NonnullRefPtrVector<PhysicalPage>> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)

View file

@ -171,7 +171,7 @@ public:
void uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count); void uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count);
NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes); NonnullRefPtr<PhysicalPage> allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill = ShouldZeroFill::Yes);
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr); ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes, bool* did_purge = nullptr);
ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_supervisor_physical_page(); ErrorOr<NonnullRefPtr<PhysicalPage>> allocate_supervisor_physical_page();
ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_supervisor_physical_pages(size_t size); ErrorOr<NonnullRefPtrVector<PhysicalPage>> allocate_contiguous_supervisor_physical_pages(size_t size);
void deallocate_physical_page(PhysicalAddress); void deallocate_physical_page(PhysicalAddress);

View file

@ -61,19 +61,13 @@ ErrorOr<NonnullRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace(Vi
SpinlockLocker lock(s_mm_lock); SpinlockLocker lock(s_mm_lock);
#if ARCH(X86_64) #if ARCH(X86_64)
directory->m_pml4t = MM.allocate_user_physical_page(); directory->m_pml4t = TRY(MM.allocate_user_physical_page());
if (!directory->m_pml4t)
return ENOMEM;
#endif #endif
directory->m_directory_table = MM.allocate_user_physical_page(); directory->m_directory_table = TRY(MM.allocate_user_physical_page());
if (!directory->m_directory_table)
return ENOMEM;
auto kernel_pd_index = (kernel_mapping_base >> 30) & 0x1ffu; auto kernel_pd_index = (kernel_mapping_base >> 30) & 0x1ffu;
for (size_t i = 0; i < kernel_pd_index; i++) { for (size_t i = 0; i < kernel_pd_index; i++) {
directory->m_directory_pages[i] = MM.allocate_user_physical_page(); directory->m_directory_pages[i] = TRY(MM.allocate_user_physical_page());
if (!directory->m_directory_pages[i])
return ENOMEM;
} }
// Share the top 1 GiB of kernel-only mappings (>=kernel_mapping_base) // Share the top 1 GiB of kernel-only mappings (>=kernel_mapping_base)

View file

@ -407,11 +407,12 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({}); page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", page_slot->paddr()); dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", page_slot->paddr());
} else { } else {
page_slot = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes); auto page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
if (page_slot.is_null()) { if (page_or_error.is_error()) {
dmesgln("MM: handle_zero_fault was unable to allocate a physical page"); dmesgln("MM: handle_zero_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory; return PageFaultResponse::OutOfMemory;
} }
page_slot = page_or_error.release_value();
dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED {}", page_slot->paddr()); dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED {}", page_slot->paddr());
} }
@ -495,12 +496,12 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
return PageFaultResponse::Continue; return PageFaultResponse::Continue;
} }
vmobject_physical_page_entry = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No); auto vmobject_physical_page_or_error = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
if (vmobject_physical_page_or_error.is_error()) {
if (vmobject_physical_page_entry.is_null()) {
dmesgln("MM: handle_inode_fault was unable to allocate a physical page"); dmesgln("MM: handle_inode_fault was unable to allocate a physical page");
return PageFaultResponse::OutOfMemory; return PageFaultResponse::OutOfMemory;
} }
vmobject_physical_page_entry = vmobject_physical_page_or_error.release_value();
{ {
SpinlockLocker mm_locker(s_mm_lock); SpinlockLocker mm_locker(s_mm_lock);