diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp index 50adbb9f29..d4cd3bafad 100644 --- a/Kernel/Memory/AddressSpace.cpp +++ b/Kernel/Memory/AddressSpace.cpp @@ -230,6 +230,7 @@ void AddressSpace::deallocate_region(Region& region) NonnullOwnPtr AddressSpace::take_region(Region& region) { SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); auto did_remove = m_region_tree.regions().remove(region.vaddr().get()); VERIFY(did_remove); return NonnullOwnPtr { NonnullOwnPtr::Adopt, region }; @@ -238,6 +239,7 @@ NonnullOwnPtr AddressSpace::take_region(Region& region) Region* AddressSpace::find_region_from_range(VirtualRange const& range) { SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); auto* found_region = m_region_tree.regions().find(range.base().get()); if (!found_region) return nullptr; @@ -251,6 +253,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range) Region* AddressSpace::find_region_containing(VirtualRange const& range) { SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); auto* candidate = m_region_tree.regions().find_largest_not_above(range.base().get()); if (!candidate) return nullptr; @@ -263,6 +266,7 @@ ErrorOr> AddressSpace::find_regions_intersecting(VirtualRange co size_t total_size_collected = 0; SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); auto* found_region = m_region_tree.regions().find_largest_not_above(range.base().get()); if (!found_region) @@ -313,6 +317,7 @@ void AddressSpace::dump_regions() addr_padding, addr_padding, addr_padding); SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); for (auto const& region : m_region_tree.regions()) { dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(), @@ -334,6 +339,7 @@ void AddressSpace::remove_all_regions(Badge) { SpinlockLocker pd_locker(m_page_directory->get_lock()); SpinlockLocker mm_locker(s_mm_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); for (auto& region : m_region_tree.regions()) region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker); } @@ -344,6 +350,7 @@ void AddressSpace::remove_all_regions(Badge) size_t AddressSpace::amount_dirty_private() const { SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); // FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject. // The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping. // That's probably a situation that needs to be looked at in general. @@ -358,6 +365,7 @@ size_t AddressSpace::amount_dirty_private() const ErrorOr AddressSpace::amount_clean_inode() const { SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); HashTable vmobjects; for (auto const& region : m_region_tree.regions()) { if (region.vmobject().is_inode()) @@ -372,6 +380,7 @@ ErrorOr AddressSpace::amount_clean_inode() const size_t AddressSpace::amount_virtual() const { SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); size_t amount = 0; for (auto const& region : m_region_tree.regions()) { amount += region.size(); @@ -382,6 +391,7 @@ size_t AddressSpace::amount_virtual() const size_t AddressSpace::amount_resident() const { SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); // FIXME: This will double count if multiple regions use the same physical page. size_t amount = 0; for (auto const& region : m_region_tree.regions()) { @@ -393,6 +403,7 @@ size_t AddressSpace::amount_resident() const size_t AddressSpace::amount_shared() const { SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); // FIXME: This will double count if multiple regions use the same physical page. // FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts, // and each PhysicalPage is only reffed by its VMObject. This needs to be refactored @@ -407,6 +418,7 @@ size_t AddressSpace::amount_shared() const size_t AddressSpace::amount_purgeable_volatile() const { SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); size_t amount = 0; for (auto const& region : m_region_tree.regions()) { if (!region.vmobject().is_anonymous()) @@ -421,6 +433,7 @@ size_t AddressSpace::amount_purgeable_volatile() const size_t AddressSpace::amount_purgeable_nonvolatile() const { SpinlockLocker lock(m_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); size_t amount = 0; for (auto const& region : m_region_tree.regions()) { if (!region.vmobject().is_anonymous()) diff --git a/Kernel/Memory/AddressSpace.h b/Kernel/Memory/AddressSpace.h index 664dbf36f0..bcb580eee5 100644 --- a/Kernel/Memory/AddressSpace.h +++ b/Kernel/Memory/AddressSpace.h @@ -26,8 +26,6 @@ public: PageDirectory& page_directory() { return *m_page_directory; } PageDirectory const& page_directory() const { return *m_page_directory; } - size_t region_count() const { return m_region_tree.regions().size(); } - auto& regions() { return m_region_tree.regions(); } auto const& regions() const { return m_region_tree.regions(); } diff --git a/Kernel/Memory/MemoryManager.cpp b/Kernel/Memory/MemoryManager.cpp index e6cc5b7d74..7bce3b3504 100644 --- a/Kernel/Memory/MemoryManager.cpp +++ b/Kernel/Memory/MemoryManager.cpp @@ -664,6 +664,8 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr) return nullptr; SpinlockLocker lock(s_mm_lock); + SpinlockLocker tree_locker(MM.m_region_tree.get_lock()); + auto* region = MM.m_region_tree.regions().find_largest_not_above(vaddr.get()); if (!region || !region->contains(vaddr)) return nullptr; @@ -1167,6 +1169,7 @@ void MemoryManager::unregister_kernel_region(Region& region) { VERIFY(region.is_kernel()); SpinlockLocker lock(s_mm_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); m_region_tree.regions().remove(region.vaddr().get()); } @@ -1181,6 +1184,7 @@ void MemoryManager::dump_kernel_regions() dbgln("BEGIN{} END{} SIZE{} ACCESS NAME", addr_padding, addr_padding, addr_padding); SpinlockLocker lock(s_mm_lock); + SpinlockLocker tree_locker(m_region_tree.get_lock()); for (auto const& region : m_region_tree.regions()) { dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), diff --git a/Kernel/Memory/RegionTree.h b/Kernel/Memory/RegionTree.h index 8fe7f8ec8b..f94892d583 100644 --- a/Kernel/Memory/RegionTree.h +++ b/Kernel/Memory/RegionTree.h @@ -49,12 +49,15 @@ public: void delete_all_regions_assuming_they_are_unmapped(); + // FIXME: Access the region tree through a SpinlockProtected or similar. + RecursiveSpinlock& get_lock() const { return m_lock; } + private: ErrorOr allocate_range_anywhere(size_t size, size_t alignment = PAGE_SIZE); ErrorOr allocate_range_specific(VirtualAddress base, size_t size); ErrorOr allocate_range_randomized(size_t size, size_t alignment = PAGE_SIZE); - Spinlock m_lock; + RecursiveSpinlock mutable m_lock; IntrusiveRedBlackTree<&Region::m_tree_node> m_regions; VirtualRange const m_total_range;