1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-28 07:25:07 +00:00

Kernel: Use intrusive RegionTree solution for kernel regions as well

This patch ports MemoryManager to RegionTree as well. The biggest
difference between this and the userspace code is that kernel regions
are owned by extant OwnPtr<Region> objects spread around the kernel,
while userspace regions are owned by the AddressSpace itself.

For kernelspace, there are a couple of situations where we need to make
large VM reservations that never get backed by regular VMObjects
(for example the kernel image reservation, or the big kmalloc range.)
Since we can't make a VM reservation without a Region object anymore,
this patch adds a way to create unbacked Region objects that can be
used for this exact purpose. They have no internal VMObject.)
This commit is contained in:
Andreas Kling 2022-04-03 13:28:16 +02:00
parent ffe2e77eba
commit e8f543c390
12 changed files with 72 additions and 50 deletions

View file

@ -353,20 +353,22 @@ struct KmallocGlobalData {
void enable_expansion()
{
// FIXME: This range can be much bigger on 64-bit, but we need to figure something out for 32-bit.
auto virtual_range = MM.kernel_page_directory().range_allocator().try_allocate_anywhere(64 * MiB, 1 * MiB);
auto reserved_region = MUST(MM.region_tree().allocate_unbacked_anywhere(64 * MiB, 1 * MiB));
expansion_data = KmallocGlobalData::ExpansionData {
.virtual_range = virtual_range.value(),
.next_virtual_address = virtual_range.value().base(),
.virtual_range = reserved_region->range(),
.next_virtual_address = reserved_region->range().base(),
};
// Make sure the entire kmalloc VM range is backed by page tables.
// This avoids having to deal with lazy page table allocation during heap expansion.
SpinlockLocker mm_locker(Memory::s_mm_lock);
SpinlockLocker pd_locker(MM.kernel_page_directory().get_lock());
for (auto vaddr = virtual_range.value().base(); vaddr < virtual_range.value().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
for (auto vaddr = reserved_region->range().base(); vaddr < reserved_region->range().end(); vaddr = vaddr.offset(PAGE_SIZE)) {
MM.ensure_pte(MM.kernel_page_directory(), vaddr);
}
(void)reserved_region.leak_ptr();
}
struct ExpansionData {