mirror of
https://github.com/RGBCube/serenity
synced 2025-05-28 11:45:11 +00:00
Kernel: Fix inconsistent lock acquisition order in kmalloc
We always want to grab the page directory lock before the MM lock. This fixes a deadlock I encountered when building DOOM with make -j4.
This commit is contained in:
parent
27c1135d30
commit
abb84b9fcd
1 changed files with 2 additions and 2 deletions
|
@ -332,8 +332,8 @@ struct KmallocGlobalData {
|
|||
|
||||
auto cpu_supports_nx = Processor::current().has_nx();
|
||||
|
||||
SpinlockLocker mm_locker(Memory::s_mm_lock);
|
||||
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
|
||||
SpinlockLocker mm_locker(Memory::s_mm_lock);
|
||||
|
||||
for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) {
|
||||
// FIXME: We currently leak physical memory when mapping it into the kmalloc heap.
|
||||
|
@ -365,8 +365,8 @@ struct KmallocGlobalData {
|
|||
|
||||
// Make sure the entire kmalloc VM range is backed by page tables.
|
||||
// This avoids having to deal with lazy page table allocation during heap expansion.
|
||||
SpinlockLocker mm_locker(Memory::s_mm_lock);
|
||||
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
|
||||
SpinlockLocker mm_locker(Memory::s_mm_lock);
|
||||
for (auto vaddr = reserved_region->range().base(); vaddr < reserved_region->range().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
|
||||
MM.ensure_pte(MM.kernel_page_directory(), vaddr);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue