1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 06:37:43 +00:00

Kernel: Take the RegionTree spinlock when inspecting tree from outside

This patch adds RegionTree::get_lock() which exposes the internal lock
inside RegionTree. We can then lock it from the outside when doing
lookups or traversal.

This solution is not very beautiful, we should find a way to protect
this data with SpinlockProtected or something similar. This is a stopgap
patch to try and fix the currently flaky CI.
This commit is contained in:
Andreas Kling 2022-04-05 01:07:23 +02:00
parent e3e1d79a7d
commit f0f97e1db0
4 changed files with 21 additions and 3 deletions

View file

@ -230,6 +230,7 @@ void AddressSpace::deallocate_region(Region& region)
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
auto did_remove = m_region_tree.regions().remove(region.vaddr().get());
VERIFY(did_remove);
return NonnullOwnPtr { NonnullOwnPtr<Region>::Adopt, region };
@ -238,6 +239,7 @@ NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
auto* found_region = m_region_tree.regions().find(range.base().get());
if (!found_region)
return nullptr;
@ -251,6 +253,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
Region* AddressSpace::find_region_containing(VirtualRange const& range)
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
auto* candidate = m_region_tree.regions().find_largest_not_above(range.base().get());
if (!candidate)
return nullptr;
@ -263,6 +266,7 @@ ErrorOr<Vector<Region*>> AddressSpace::find_regions_intersecting(VirtualRange co
size_t total_size_collected = 0;
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
auto* found_region = m_region_tree.regions().find_largest_not_above(range.base().get());
if (!found_region)
@ -313,6 +317,7 @@ void AddressSpace::dump_regions()
addr_padding, addr_padding, addr_padding);
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
for (auto const& region : m_region_tree.regions()) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
@ -334,6 +339,7 @@ void AddressSpace::remove_all_regions(Badge<Process>)
{
SpinlockLocker pd_locker(m_page_directory->get_lock());
SpinlockLocker mm_locker(s_mm_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
for (auto& region : m_region_tree.regions())
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
}
@ -344,6 +350,7 @@ void AddressSpace::remove_all_regions(Badge<Process>)
size_t AddressSpace::amount_dirty_private() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
// That's probably a situation that needs to be looked at in general.
@ -358,6 +365,7 @@ size_t AddressSpace::amount_dirty_private() const
ErrorOr<size_t> AddressSpace::amount_clean_inode() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
HashTable<InodeVMObject const*> vmobjects;
for (auto const& region : m_region_tree.regions()) {
if (region.vmobject().is_inode())
@ -372,6 +380,7 @@ ErrorOr<size_t> AddressSpace::amount_clean_inode() const
size_t AddressSpace::amount_virtual() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
amount += region.size();
@ -382,6 +391,7 @@ size_t AddressSpace::amount_virtual() const
size_t AddressSpace::amount_resident() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This will double count if multiple regions use the same physical page.
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
@ -393,6 +403,7 @@ size_t AddressSpace::amount_resident() const
size_t AddressSpace::amount_shared() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This will double count if multiple regions use the same physical page.
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
@ -407,6 +418,7 @@ size_t AddressSpace::amount_shared() const
size_t AddressSpace::amount_purgeable_volatile() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
if (!region.vmobject().is_anonymous())
@ -421,6 +433,7 @@ size_t AddressSpace::amount_purgeable_volatile() const
size_t AddressSpace::amount_purgeable_nonvolatile() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
if (!region.vmobject().is_anonymous())