1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-18 20:25:07 +00:00

Kernel: Remove unnecessary counting of VMObject-attached Regions

VMObject already has an IntrusiveList of all the Regions that map it.
We were keeping a counter in addition to this, and only using it in
a single place to avoid iterating over the list in case it only had
1 entry.

Simplify VMObject by removing this counter and always iterating the
list even if there's only 1 entry. :^)
This commit is contained in:
Andreas Kling 2021-07-25 17:11:50 +02:00
parent ae3778c303
commit 0d963fd641
3 changed files with 5 additions and 16 deletions

View file

@ -225,18 +225,12 @@ bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
bool Region::remap_vmobject_page(size_t page_index, bool with_flush) bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
{ {
bool success = true;
auto& vmobject = this->vmobject(); auto& vmobject = this->vmobject();
ScopedSpinLock lock(vmobject.m_lock); bool success = true;
if (vmobject.is_shared_by_multiple_regions()) { vmobject.for_each_region([&](auto& region) {
vmobject.for_each_region([&](auto& region) { if (!region.do_remap_vmobject_page(page_index, with_flush))
if (!region.do_remap_vmobject_page(page_index, with_flush))
success = false;
});
} else {
if (!do_remap_vmobject_page(page_index, with_flush))
success = false; success = false;
} });
return success; return success;
} }

View file

@ -31,7 +31,7 @@ VMObject::~VMObject()
} }
MM.unregister_vmobject(*this); MM.unregister_vmobject(*this);
VERIFY(m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) == 0); VERIFY(m_regions.is_empty());
} }
} }

View file

@ -52,19 +52,15 @@ public:
ALWAYS_INLINE void add_region(Region& region) ALWAYS_INLINE void add_region(Region& region)
{ {
ScopedSpinLock locker(m_lock); ScopedSpinLock locker(m_lock);
m_regions_count++;
m_regions.append(region); m_regions.append(region);
} }
ALWAYS_INLINE void remove_region(Region& region) ALWAYS_INLINE void remove_region(Region& region)
{ {
ScopedSpinLock locker(m_lock); ScopedSpinLock locker(m_lock);
m_regions_count--;
m_regions.remove(region); m_regions.remove(region);
} }
ALWAYS_INLINE bool is_shared_by_multiple_regions() const { return m_regions_count > 1; }
void register_on_deleted_handler(VMObjectDeletedHandler& handler) void register_on_deleted_handler(VMObjectDeletedHandler& handler)
{ {
ScopedSpinLock locker(m_on_deleted_lock); ScopedSpinLock locker(m_on_deleted_lock);
@ -93,7 +89,6 @@ private:
VMObject& operator=(VMObject&&) = delete; VMObject& operator=(VMObject&&) = delete;
VMObject(VMObject&&) = delete; VMObject(VMObject&&) = delete;
Atomic<u32, AK::MemoryOrder::memory_order_relaxed> m_regions_count { 0 };
HashTable<VMObjectDeletedHandler*> m_on_deleted; HashTable<VMObjectDeletedHandler*> m_on_deleted;
SpinLock<u8> m_on_deleted_lock; SpinLock<u8> m_on_deleted_lock;