mirror of
https://github.com/RGBCube/serenity
synced 2025-07-03 05:32:13 +00:00
Kernel: Stop taking MM lock while using PD/PT quickmaps
This is no longer required as these quickmaps are now per-CPU. :^)
This commit is contained in:
parent
a838fdfd88
commit
c8375c51ff
3 changed files with 0 additions and 12 deletions
|
@ -334,7 +334,6 @@ struct KmallocGlobalData {
|
|||
auto cpu_supports_nx = Processor::current().has_nx();
|
||||
|
||||
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
|
||||
SpinlockLocker mm_locker(Memory::s_mm_lock);
|
||||
|
||||
for (auto vaddr = new_subheap_base; !physical_pages.is_empty(); vaddr = vaddr.offset(PAGE_SIZE)) {
|
||||
// FIXME: We currently leak physical memory when mapping it into the kmalloc heap.
|
||||
|
@ -367,7 +366,6 @@ struct KmallocGlobalData {
|
|||
// Make sure the entire kmalloc VM range is backed by page tables.
|
||||
// This avoids having to deal with lazy page table allocation during heap expansion.
|
||||
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
|
||||
SpinlockLocker mm_locker(Memory::s_mm_lock);
|
||||
for (auto vaddr = reserved_region->range().base(); vaddr < reserved_region->range().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
|
||||
MM.ensure_pte(MM.kernel_page_directory(), vaddr);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue