mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 02:27:35 +00:00
Kernel: Take the RegionTree spinlock when inspecting tree from outside
This patch adds RegionTree::get_lock() which exposes the internal lock inside RegionTree. We can then lock it from the outside when doing lookups or traversal. This solution is not very beautiful, we should find a way to protect this data with SpinlockProtected or something similar. This is a stopgap patch to try and fix the currently flaky CI.
This commit is contained in:
parent
e3e1d79a7d
commit
f0f97e1db0
4 changed files with 21 additions and 3 deletions
|
@ -230,6 +230,7 @@ void AddressSpace::deallocate_region(Region& region)
|
||||||
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
|
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
auto did_remove = m_region_tree.regions().remove(region.vaddr().get());
|
auto did_remove = m_region_tree.regions().remove(region.vaddr().get());
|
||||||
VERIFY(did_remove);
|
VERIFY(did_remove);
|
||||||
return NonnullOwnPtr { NonnullOwnPtr<Region>::Adopt, region };
|
return NonnullOwnPtr { NonnullOwnPtr<Region>::Adopt, region };
|
||||||
|
@ -238,6 +239,7 @@ NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
|
||||||
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
|
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
auto* found_region = m_region_tree.regions().find(range.base().get());
|
auto* found_region = m_region_tree.regions().find(range.base().get());
|
||||||
if (!found_region)
|
if (!found_region)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -251,6 +253,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
|
||||||
Region* AddressSpace::find_region_containing(VirtualRange const& range)
|
Region* AddressSpace::find_region_containing(VirtualRange const& range)
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
auto* candidate = m_region_tree.regions().find_largest_not_above(range.base().get());
|
auto* candidate = m_region_tree.regions().find_largest_not_above(range.base().get());
|
||||||
if (!candidate)
|
if (!candidate)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -263,6 +266,7 @@ ErrorOr<Vector<Region*>> AddressSpace::find_regions_intersecting(VirtualRange co
|
||||||
size_t total_size_collected = 0;
|
size_t total_size_collected = 0;
|
||||||
|
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
|
|
||||||
auto* found_region = m_region_tree.regions().find_largest_not_above(range.base().get());
|
auto* found_region = m_region_tree.regions().find_largest_not_above(range.base().get());
|
||||||
if (!found_region)
|
if (!found_region)
|
||||||
|
@ -313,6 +317,7 @@ void AddressSpace::dump_regions()
|
||||||
addr_padding, addr_padding, addr_padding);
|
addr_padding, addr_padding, addr_padding);
|
||||||
|
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
|
|
||||||
for (auto const& region : m_region_tree.regions()) {
|
for (auto const& region : m_region_tree.regions()) {
|
||||||
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
|
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
|
||||||
|
@ -334,6 +339,7 @@ void AddressSpace::remove_all_regions(Badge<Process>)
|
||||||
{
|
{
|
||||||
SpinlockLocker pd_locker(m_page_directory->get_lock());
|
SpinlockLocker pd_locker(m_page_directory->get_lock());
|
||||||
SpinlockLocker mm_locker(s_mm_lock);
|
SpinlockLocker mm_locker(s_mm_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
for (auto& region : m_region_tree.regions())
|
for (auto& region : m_region_tree.regions())
|
||||||
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
|
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
|
||||||
}
|
}
|
||||||
|
@ -344,6 +350,7 @@ void AddressSpace::remove_all_regions(Badge<Process>)
|
||||||
size_t AddressSpace::amount_dirty_private() const
|
size_t AddressSpace::amount_dirty_private() const
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
|
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
|
||||||
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
|
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
|
||||||
// That's probably a situation that needs to be looked at in general.
|
// That's probably a situation that needs to be looked at in general.
|
||||||
|
@ -358,6 +365,7 @@ size_t AddressSpace::amount_dirty_private() const
|
||||||
ErrorOr<size_t> AddressSpace::amount_clean_inode() const
|
ErrorOr<size_t> AddressSpace::amount_clean_inode() const
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
HashTable<InodeVMObject const*> vmobjects;
|
HashTable<InodeVMObject const*> vmobjects;
|
||||||
for (auto const& region : m_region_tree.regions()) {
|
for (auto const& region : m_region_tree.regions()) {
|
||||||
if (region.vmobject().is_inode())
|
if (region.vmobject().is_inode())
|
||||||
|
@ -372,6 +380,7 @@ ErrorOr<size_t> AddressSpace::amount_clean_inode() const
|
||||||
size_t AddressSpace::amount_virtual() const
|
size_t AddressSpace::amount_virtual() const
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
size_t amount = 0;
|
size_t amount = 0;
|
||||||
for (auto const& region : m_region_tree.regions()) {
|
for (auto const& region : m_region_tree.regions()) {
|
||||||
amount += region.size();
|
amount += region.size();
|
||||||
|
@ -382,6 +391,7 @@ size_t AddressSpace::amount_virtual() const
|
||||||
size_t AddressSpace::amount_resident() const
|
size_t AddressSpace::amount_resident() const
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
// FIXME: This will double count if multiple regions use the same physical page.
|
// FIXME: This will double count if multiple regions use the same physical page.
|
||||||
size_t amount = 0;
|
size_t amount = 0;
|
||||||
for (auto const& region : m_region_tree.regions()) {
|
for (auto const& region : m_region_tree.regions()) {
|
||||||
|
@ -393,6 +403,7 @@ size_t AddressSpace::amount_resident() const
|
||||||
size_t AddressSpace::amount_shared() const
|
size_t AddressSpace::amount_shared() const
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
// FIXME: This will double count if multiple regions use the same physical page.
|
// FIXME: This will double count if multiple regions use the same physical page.
|
||||||
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
|
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
|
||||||
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
|
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
|
||||||
|
@ -407,6 +418,7 @@ size_t AddressSpace::amount_shared() const
|
||||||
size_t AddressSpace::amount_purgeable_volatile() const
|
size_t AddressSpace::amount_purgeable_volatile() const
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
size_t amount = 0;
|
size_t amount = 0;
|
||||||
for (auto const& region : m_region_tree.regions()) {
|
for (auto const& region : m_region_tree.regions()) {
|
||||||
if (!region.vmobject().is_anonymous())
|
if (!region.vmobject().is_anonymous())
|
||||||
|
@ -421,6 +433,7 @@ size_t AddressSpace::amount_purgeable_volatile() const
|
||||||
size_t AddressSpace::amount_purgeable_nonvolatile() const
|
size_t AddressSpace::amount_purgeable_nonvolatile() const
|
||||||
{
|
{
|
||||||
SpinlockLocker lock(m_lock);
|
SpinlockLocker lock(m_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
size_t amount = 0;
|
size_t amount = 0;
|
||||||
for (auto const& region : m_region_tree.regions()) {
|
for (auto const& region : m_region_tree.regions()) {
|
||||||
if (!region.vmobject().is_anonymous())
|
if (!region.vmobject().is_anonymous())
|
||||||
|
|
|
@ -26,8 +26,6 @@ public:
|
||||||
PageDirectory& page_directory() { return *m_page_directory; }
|
PageDirectory& page_directory() { return *m_page_directory; }
|
||||||
PageDirectory const& page_directory() const { return *m_page_directory; }
|
PageDirectory const& page_directory() const { return *m_page_directory; }
|
||||||
|
|
||||||
size_t region_count() const { return m_region_tree.regions().size(); }
|
|
||||||
|
|
||||||
auto& regions() { return m_region_tree.regions(); }
|
auto& regions() { return m_region_tree.regions(); }
|
||||||
auto const& regions() const { return m_region_tree.regions(); }
|
auto const& regions() const { return m_region_tree.regions(); }
|
||||||
|
|
||||||
|
|
|
@ -664,6 +664,8 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
SpinlockLocker lock(s_mm_lock);
|
SpinlockLocker lock(s_mm_lock);
|
||||||
|
SpinlockLocker tree_locker(MM.m_region_tree.get_lock());
|
||||||
|
|
||||||
auto* region = MM.m_region_tree.regions().find_largest_not_above(vaddr.get());
|
auto* region = MM.m_region_tree.regions().find_largest_not_above(vaddr.get());
|
||||||
if (!region || !region->contains(vaddr))
|
if (!region || !region->contains(vaddr))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -1167,6 +1169,7 @@ void MemoryManager::unregister_kernel_region(Region& region)
|
||||||
{
|
{
|
||||||
VERIFY(region.is_kernel());
|
VERIFY(region.is_kernel());
|
||||||
SpinlockLocker lock(s_mm_lock);
|
SpinlockLocker lock(s_mm_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
m_region_tree.regions().remove(region.vaddr().get());
|
m_region_tree.regions().remove(region.vaddr().get());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1181,6 +1184,7 @@ void MemoryManager::dump_kernel_regions()
|
||||||
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
|
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
|
||||||
addr_padding, addr_padding, addr_padding);
|
addr_padding, addr_padding, addr_padding);
|
||||||
SpinlockLocker lock(s_mm_lock);
|
SpinlockLocker lock(s_mm_lock);
|
||||||
|
SpinlockLocker tree_locker(m_region_tree.get_lock());
|
||||||
for (auto const& region : m_region_tree.regions()) {
|
for (auto const& region : m_region_tree.regions()) {
|
||||||
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
|
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
|
||||||
region.vaddr().get(),
|
region.vaddr().get(),
|
||||||
|
|
|
@ -49,12 +49,15 @@ public:
|
||||||
|
|
||||||
void delete_all_regions_assuming_they_are_unmapped();
|
void delete_all_regions_assuming_they_are_unmapped();
|
||||||
|
|
||||||
|
// FIXME: Access the region tree through a SpinlockProtected or similar.
|
||||||
|
RecursiveSpinlock& get_lock() const { return m_lock; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ErrorOr<VirtualRange> allocate_range_anywhere(size_t size, size_t alignment = PAGE_SIZE);
|
ErrorOr<VirtualRange> allocate_range_anywhere(size_t size, size_t alignment = PAGE_SIZE);
|
||||||
ErrorOr<VirtualRange> allocate_range_specific(VirtualAddress base, size_t size);
|
ErrorOr<VirtualRange> allocate_range_specific(VirtualAddress base, size_t size);
|
||||||
ErrorOr<VirtualRange> allocate_range_randomized(size_t size, size_t alignment = PAGE_SIZE);
|
ErrorOr<VirtualRange> allocate_range_randomized(size_t size, size_t alignment = PAGE_SIZE);
|
||||||
|
|
||||||
Spinlock m_lock;
|
RecursiveSpinlock mutable m_lock;
|
||||||
|
|
||||||
IntrusiveRedBlackTree<&Region::m_tree_node> m_regions;
|
IntrusiveRedBlackTree<&Region::m_tree_node> m_regions;
|
||||||
VirtualRange const m_total_range;
|
VirtualRange const m_total_range;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue