1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 04:48:14 +00:00

Kernel: Use intrusive RegionTree solution for kernel regions as well

This patch ports MemoryManager to RegionTree as well. The biggest
difference between this and the userspace code is that kernel regions
are owned by extant OwnPtr<Region> objects spread around the kernel,
while userspace regions are owned by the AddressSpace itself.

For kernelspace, there are a couple of situations where we need to make
large VM reservations that never get backed by regular VMObjects
(for example the kernel image reservation, or the big kmalloc range.)
Since we can't make a VM reservation without a Region object anymore,
this patch adds a way to create unbacked Region objects that can be
used for this exact purpose. They have no internal VMObject.)
This commit is contained in:
Andreas Kling 2022-04-03 13:28:16 +02:00
parent ffe2e77eba
commit e8f543c390
12 changed files with 72 additions and 50 deletions

View file

@ -22,6 +22,13 @@
namespace Kernel::Memory {
Region::Region(VirtualRange const& range)
: m_range(range)
{
if (is_kernel())
MM.register_kernel_region(*this);
}
Region::Region(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
: m_range(range)
, m_offset_in_vmobject(offset_in_vmobject)
@ -56,10 +63,7 @@ Region::~Region()
if (m_page_directory) {
SpinlockLocker pd_locker(m_page_directory->get_lock());
if (!is_readable() && !is_writable() && !is_executable()) {
// If the region is "PROT_NONE", we didn't map it in the first place,
// so all we need to do here is deallocate the VM.
if (is_kernel())
m_page_directory->range_allocator().deallocate(range());
// If the region is "PROT_NONE", we didn't map it in the first place.
} else {
SpinlockLocker mm_locker(s_mm_lock);
unmap_with_locks_held(ShouldDeallocateVirtualRange::Yes, ShouldFlushTLB::Yes, pd_locker, mm_locker);
@ -68,6 +72,11 @@ Region::~Region()
}
}
ErrorOr<NonnullOwnPtr<Region>> Region::create_unbacked(VirtualRange const& range)
{
return adopt_nonnull_own_or_enomem(new (nothrow) Region(range));
}
ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
{
VERIFY(Process::has_current());
@ -84,7 +93,7 @@ ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
region_name = TRY(m_name->try_clone());
auto region = TRY(Region::try_create_user_accessible(
m_range, m_vmobject, m_offset_in_vmobject, move(region_name), access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared));
m_range, vmobject(), m_offset_in_vmobject, move(region_name), access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared));
region->set_mmap(m_mmap);
region->set_shared(m_shared);
region->set_syscall_region(is_syscall_region());
@ -259,7 +268,7 @@ void Region::unmap(ShouldDeallocateVirtualRange should_deallocate_range, ShouldF
unmap_with_locks_held(should_deallocate_range, should_flush_tlb, pd_locker, mm_locker);
}
void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange deallocate_range, ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&)
void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange, ShouldFlushTLB should_flush_tlb, SpinlockLocker<RecursiveSpinlock>&, SpinlockLocker<RecursiveSpinlock>&)
{
if (!m_page_directory)
return;
@ -270,10 +279,6 @@ void Region::unmap_with_locks_held(ShouldDeallocateVirtualRange deallocate_range
}
if (should_flush_tlb == ShouldFlushTLB::Yes)
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_count());
if (deallocate_range == ShouldDeallocateVirtualRange::Yes) {
if (is_kernel())
m_page_directory->range_allocator().deallocate(range());
}
m_page_directory = nullptr;
}