1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-06-29 03:52:12 +00:00

Kernel: Wrap RegionTree objects in SpinlockProtected

This makes locking them much more straightforward, and we can remove
a bunch of confusing use of AddressSpace::m_lock. That lock will also
be converted to use of SpinlockProtected in a subsequent patch.
This commit is contained in:
Andreas Kling 2022-08-23 12:28:04 +02:00
parent 352d6545a9
commit dc9d2c1b10
9 changed files with 251 additions and 234 deletions

View file

@ -25,7 +25,7 @@ ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const
VirtualRange total_range = [&]() -> VirtualRange {
if (parent)
return parent->m_region_tree.total_range();
return parent->m_total_range;
constexpr FlatPtr userspace_range_base = USER_RANGE_BASE;
FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;
size_t random_offset = (get_fast_random<u8>() % 2 * MiB) & PAGE_MASK;
@ -40,7 +40,8 @@ ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const
AddressSpace::AddressSpace(NonnullLockRefPtr<PageDirectory> page_directory, VirtualRange total_range)
: m_page_directory(move(page_directory))
, m_region_tree(total_range)
, m_total_range(total_range)
, m_region_tree(LockRank::None, total_range)
{
}
@ -148,8 +149,10 @@ ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_re
if (source_region.should_cow(page_offset_in_source_region + i))
TRY(new_region->set_should_cow(i, true));
}
SpinlockLocker locker(m_lock);
TRY(m_region_tree.place_specifically(*new_region, range));
TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
TRY(region_tree.place_specifically(*new_region, range));
return {};
}));
return new_region.leak_ptr();
}
@ -164,11 +167,14 @@ ErrorOr<Region*> AddressSpace::allocate_region(RandomizeVirtualAddress randomize
region_name = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot)));
if (requested_address.is_null()) {
TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
} else {
TRY(m_region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
}
TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
if (requested_address.is_null()) {
TRY(region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
} else {
TRY(region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
}
return {};
}));
TRY(region->map(page_directory(), ShouldFlushTLB::No));
return region.leak_ptr();
}
@ -204,29 +210,29 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(RandomizeVirtualAdd
auto region = TRY(Region::create_unplaced(move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
SpinlockLocker locker(m_lock);
return m_region_tree.with([&](auto& region_tree) -> ErrorOr<Region*> {
if (requested_address.is_null())
TRY(region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
else
TRY(region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
if (requested_address.is_null())
TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
else
TRY(m_region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
ArmedScopeGuard remove_region_from_tree_on_failure = [&] {
// At this point the region is already part of the Process region tree, so we have to make sure
// we remove it from the tree before returning an error, or else the Region tree will contain
// a dangling pointer to the free'd Region instance
region_tree.remove(*region);
};
ArmedScopeGuard remove_region_from_tree_on_failure = [this, &region]() {
// At this point the region is already part of the Process region tree, so we have to make sure
// we remove it from the tree before returning an error, or else the Region tree will contain
// a dangling pointer to the free'd Region instance
m_region_tree.remove(*region);
};
if (prot == PROT_NONE) {
// For PROT_NONE mappings, we don't have to set up any page table mappings.
// We do still need to attach the region to the page_directory though.
region->set_page_directory(page_directory());
} else {
TRY(region->map(page_directory(), ShouldFlushTLB::No));
}
remove_region_from_tree_on_failure.disarm();
return region.leak_ptr();
if (prot == PROT_NONE) {
// For PROT_NONE mappings, we don't have to set up any page table mappings.
// We do still need to attach the region to the page_directory though.
region->set_page_directory(page_directory());
} else {
TRY(region->map(page_directory(), ShouldFlushTLB::No));
}
remove_region_from_tree_on_failure.disarm();
return region.leak_ptr();
});
}
void AddressSpace::deallocate_region(Region& region)
@ -236,16 +242,14 @@ void AddressSpace::deallocate_region(Region& region)
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
{
auto did_remove = m_region_tree.remove(region);
auto did_remove = m_region_tree.with([&](auto& region_tree) { return region_tree.remove(region); });
VERIFY(did_remove);
return NonnullOwnPtr { NonnullOwnPtr<Region>::Adopt, region };
}
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
auto* found_region = m_region_tree.regions().find(range.base().get());
auto* found_region = m_region_tree.with([&](auto& region_tree) { return region_tree.regions().find(range.base().get()); });
if (!found_region)
return nullptr;
auto& region = *found_region;
@ -257,7 +261,9 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
Region* AddressSpace::find_region_containing(VirtualRange const& range)
{
return m_region_tree.find_region_containing(range);
return m_region_tree.with([&](auto& region_tree) {
return region_tree.find_region_containing(range);
});
}
ErrorOr<Vector<Region*, 4>> AddressSpace::find_regions_intersecting(VirtualRange const& range)
@ -265,24 +271,23 @@ ErrorOr<Vector<Region*, 4>> AddressSpace::find_regions_intersecting(VirtualRange
Vector<Region*, 4> regions = {};
size_t total_size_collected = 0;
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
return m_region_tree.with([&](auto& region_tree) -> ErrorOr<Vector<Region*, 4>> {
auto* found_region = region_tree.regions().find_largest_not_above(range.base().get());
if (!found_region)
return regions;
for (auto iter = region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) {
auto const& iter_range = (*iter).range();
if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
TRY(regions.try_append(&*iter));
auto* found_region = m_region_tree.regions().find_largest_not_above(range.base().get());
if (!found_region)
return regions;
for (auto iter = m_region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) {
auto const& iter_range = (*iter).range();
if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
TRY(regions.try_append(&*iter));
total_size_collected += (*iter).size() - iter_range.intersect(range).size();
if (total_size_collected == range.size())
break;
total_size_collected += (*iter).size() - iter_range.intersect(range).size();
if (total_size_collected == range.size())
break;
}
}
}
return regions;
return regions;
});
}
// Carve out a virtual address range from a region and return the two regions on either side
@ -316,61 +321,63 @@ void AddressSpace::dump_regions()
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
addr_padding, addr_padding, addr_padding);
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
for (auto const& region : m_region_tree.regions()) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
region.is_readable() ? 'R' : ' ',
region.is_writable() ? 'W' : ' ',
region.is_executable() ? 'X' : ' ',
region.is_shared() ? 'S' : ' ',
region.is_stack() ? 'T' : ' ',
region.is_syscall_region() ? 'C' : ' ',
region.name());
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
region.is_readable() ? 'R' : ' ',
region.is_writable() ? 'W' : ' ',
region.is_executable() ? 'X' : ' ',
region.is_shared() ? 'S' : ' ',
region.is_stack() ? 'T' : ' ',
region.is_syscall_region() ? 'C' : ' ',
region.name());
}
});
MM.dump_kernel_regions();
}
void AddressSpace::remove_all_regions(Badge<Process>)
{
VERIFY(Thread::current() == g_finalizer);
SpinlockLocker locker(m_lock);
{
SpinlockLocker pd_locker(m_page_directory->get_lock());
SpinlockLocker mm_locker(s_mm_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
for (auto& region : m_region_tree.regions())
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
m_region_tree.with([&](auto& region_tree) {
for (auto& region : region_tree.regions())
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker, mm_locker);
});
}
m_region_tree.delete_all_regions_assuming_they_are_unmapped();
m_region_tree.with([&](auto& region_tree) {
region_tree.delete_all_regions_assuming_they_are_unmapped();
});
}
size_t AddressSpace::amount_dirty_private() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
// That's probably a situation that needs to be looked at in general.
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
if (!region.is_shared())
amount += region.amount_dirty();
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
if (!region.is_shared())
amount += region.amount_dirty();
}
});
return amount;
}
ErrorOr<size_t> AddressSpace::amount_clean_inode() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
HashTable<InodeVMObject const*> vmobjects;
for (auto const& region : m_region_tree.regions()) {
if (region.vmobject().is_inode())
TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region.vmobject())));
}
HashTable<LockRefPtr<InodeVMObject>> vmobjects;
TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
for (auto const& region : region_tree.regions()) {
if (region.vmobject().is_inode())
TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region.vmobject())));
}
return {};
}));
size_t amount = 0;
for (auto& vmobject : vmobjects)
amount += vmobject->amount_clean();
@ -379,69 +386,68 @@ ErrorOr<size_t> AddressSpace::amount_clean_inode() const
size_t AddressSpace::amount_virtual() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
amount += region.size();
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
amount += region.size();
}
});
return amount;
}
size_t AddressSpace::amount_resident() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This will double count if multiple regions use the same physical page.
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
amount += region.amount_resident();
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
amount += region.amount_resident();
}
});
return amount;
}
size_t AddressSpace::amount_shared() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
// FIXME: This will double count if multiple regions use the same physical page.
// FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
// so that every Region contributes +1 ref to each of its PhysicalPages.
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
amount += region.amount_shared();
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
amount += region.amount_shared();
}
});
return amount;
}
size_t AddressSpace::amount_purgeable_volatile() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
if (!region.vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
if (vmobject.is_purgeable() && vmobject.is_volatile())
amount += region.amount_resident();
}
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
if (!region.vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
if (vmobject.is_purgeable() && vmobject.is_volatile())
amount += region.amount_resident();
}
});
return amount;
}
size_t AddressSpace::amount_purgeable_nonvolatile() const
{
SpinlockLocker lock(m_lock);
SpinlockLocker tree_locker(m_region_tree.get_lock());
size_t amount = 0;
for (auto const& region : m_region_tree.regions()) {
m_region_tree.with([&](auto& region_tree) {
for (auto const& region : region_tree.regions()) {
if (!region.vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
if (vmobject.is_purgeable() && !vmobject.is_volatile())
amount += region.amount_resident();
}
} });
return amount;
}