1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 16:38:10 +00:00

Kernel: Don't relock MM lock for every page when remapping region

Make sure that callers already hold the MM lock, and we don't have to
worry about reacquiring it every time.
This commit is contained in:
Andreas Kling 2022-01-10 15:08:44 +01:00
parent d7475449cc
commit bdbff9df24

View file

@ -177,6 +177,8 @@ void Region::set_should_cow(size_t page_index, bool cow)
bool Region::map_individual_page_impl(size_t page_index)
{
VERIFY(m_page_directory->get_lock().is_locked_by_current_processor());
VERIFY(s_mm_lock.is_locked_by_current_processor());
auto page_vaddr = vaddr_from_page_index(page_index);
bool user_allowed = page_vaddr.get() >= USER_RANGE_BASE && is_user_address(page_vaddr);
@ -184,9 +186,6 @@ bool Region::map_individual_page_impl(size_t page_index)
PANIC("About to map mmap'ed page at a kernel address");
}
// NOTE: We have to take the MM lock for PTE's to stay valid while we use them.
SpinlockLocker mm_locker(s_mm_lock);
auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
if (!pte)
return false;
@ -215,6 +214,7 @@ bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
if (!translate_vmobject_page(page_index))
return true; // not an error, region doesn't map this page
SpinlockLocker page_lock(m_page_directory->get_lock());
SpinlockLocker lock(s_mm_lock);
VERIFY(physical_page(page_index));
bool success = map_individual_page_impl(page_index);
if (with_flush)