mirror of
https://github.com/RGBCube/serenity
synced 2025-05-28 22:05:06 +00:00
Kernel: Wrap RegionTree objects in SpinlockProtected
This makes locking them much more straightforward, and we can remove a bunch of confusing use of AddressSpace::m_lock. That lock will also be converted to use of SpinlockProtected in a subsequent patch.
This commit is contained in:
parent
352d6545a9
commit
dc9d2c1b10
9 changed files with 251 additions and 234 deletions
|
@ -83,7 +83,7 @@ static UNMAP_AFTER_INIT VirtualRange kernel_virtual_range()
|
|||
}
|
||||
|
||||
UNMAP_AFTER_INIT MemoryManager::MemoryManager()
|
||||
: m_region_tree(kernel_virtual_range())
|
||||
: m_region_tree(LockRank::None, kernel_virtual_range())
|
||||
{
|
||||
s_the = this;
|
||||
|
||||
|
@ -434,13 +434,13 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
|||
// Carve out the whole page directory covering the kernel image to make MemoryManager::initialize_physical_pages() happy
|
||||
FlatPtr start_of_range = ((FlatPtr)start_of_kernel_image & ~(FlatPtr)0x1fffff);
|
||||
FlatPtr end_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
|
||||
MUST(m_region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), VirtualRange { VirtualAddress(start_of_range), end_of_range - start_of_range }));
|
||||
MUST(m_region_tree.with([&](auto& region_tree) { return region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), VirtualRange { VirtualAddress(start_of_range), end_of_range - start_of_range }); }));
|
||||
}
|
||||
|
||||
// Allocate a virtual address range for our array
|
||||
// This looks awkward, but it basically creates a dummy region to occupy the address range permanently.
|
||||
auto& region = *MUST(Region::create_unbacked()).leak_ptr();
|
||||
MUST(m_region_tree.place_anywhere(region, RandomizeVirtualAddress::No, physical_page_array_pages * PAGE_SIZE));
|
||||
MUST(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(region, RandomizeVirtualAddress::No, physical_page_array_pages * PAGE_SIZE); }));
|
||||
auto range = region.range();
|
||||
|
||||
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
|
||||
|
@ -643,7 +643,7 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress address)
|
|||
if (is_user_address(address))
|
||||
return nullptr;
|
||||
|
||||
return MM.m_region_tree.find_region_containing(address);
|
||||
return MM.m_region_tree.with([&](auto& region_tree) { return region_tree.find_region_containing(address); });
|
||||
}
|
||||
|
||||
Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, VirtualAddress vaddr)
|
||||
|
@ -747,7 +747,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
|
|||
name_kstring = TRY(KString::try_create(name));
|
||||
auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size));
|
||||
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
|
||||
TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
|
||||
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
|
||||
TRY(region->map(kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
@ -790,7 +790,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size
|
|||
name_kstring = TRY(KString::try_create(name));
|
||||
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
|
||||
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
|
||||
TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
|
||||
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
|
||||
TRY(region->map(kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
@ -803,7 +803,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAdd
|
|||
if (!name.is_null())
|
||||
name_kstring = TRY(KString::try_create(name));
|
||||
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
|
||||
TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE));
|
||||
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE); }));
|
||||
TRY(region->map(kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
@ -817,7 +817,7 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobje
|
|||
name_kstring = TRY(KString::try_create(name));
|
||||
|
||||
auto region = TRY(Region::create_unplaced(vmobject, 0, move(name_kstring), access, cacheable));
|
||||
TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size));
|
||||
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
|
||||
TRY(region->map(kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
@ -1120,7 +1120,7 @@ bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vadd
|
|||
void MemoryManager::unregister_kernel_region(Region& region)
|
||||
{
|
||||
VERIFY(region.is_kernel());
|
||||
m_region_tree.remove(region);
|
||||
m_region_tree.with([&](auto& region_tree) { region_tree.remove(region); });
|
||||
}
|
||||
|
||||
void MemoryManager::dump_kernel_regions()
|
||||
|
@ -1134,20 +1134,21 @@ void MemoryManager::dump_kernel_regions()
|
|||
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
|
||||
addr_padding, addr_padding, addr_padding);
|
||||
SpinlockLocker lock(s_mm_lock);
|
||||
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||
for (auto const& region : m_region_tree.regions()) {
|
||||
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
|
||||
region.vaddr().get(),
|
||||
region.vaddr().offset(region.size() - 1).get(),
|
||||
region.size(),
|
||||
region.is_readable() ? 'R' : ' ',
|
||||
region.is_writable() ? 'W' : ' ',
|
||||
region.is_executable() ? 'X' : ' ',
|
||||
region.is_shared() ? 'S' : ' ',
|
||||
region.is_stack() ? 'T' : ' ',
|
||||
region.is_syscall_region() ? 'C' : ' ',
|
||||
region.name());
|
||||
}
|
||||
m_region_tree.with([&](auto& region_tree) {
|
||||
for (auto& region : region_tree.regions()) {
|
||||
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
|
||||
region.vaddr().get(),
|
||||
region.vaddr().offset(region.size() - 1).get(),
|
||||
region.size(),
|
||||
region.is_readable() ? 'R' : ' ',
|
||||
region.is_writable() ? 'W' : ' ',
|
||||
region.is_executable() ? 'X' : ' ',
|
||||
region.is_shared() ? 'S' : ' ',
|
||||
region.is_stack() ? 'T' : ' ',
|
||||
region.is_syscall_region() ? 'C' : ' ',
|
||||
region.name());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
|
||||
|
@ -1201,7 +1202,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::create_identity_mapped_reg
|
|||
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_unbacked_region_anywhere(size_t size, size_t alignment)
|
||||
{
|
||||
auto region = TRY(Region::create_unbacked());
|
||||
TRY(m_region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment));
|
||||
TRY(m_region_tree.with([&](auto& region_tree) { return region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment); }));
|
||||
return region;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue