mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 19:07:35 +00:00
Kernel: Don't wrap AddressSpace's RegionTree in SpinlockProtected
Now that AddressSpace itself is always SpinlockProtected, we don't need to also wrap the RegionTree. Whoever has the AddressSpace locked is free to poke around its tree.
This commit is contained in:
parent
d3e8eb5918
commit
da24a937f5
6 changed files with 212 additions and 265 deletions
|
@ -25,7 +25,7 @@ ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const
|
|||
|
||||
VirtualRange total_range = [&]() -> VirtualRange {
|
||||
if (parent)
|
||||
return parent->m_total_range;
|
||||
return parent->m_region_tree.total_range();
|
||||
constexpr FlatPtr userspace_range_base = USER_RANGE_BASE;
|
||||
FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;
|
||||
size_t random_offset = (get_fast_random<u8>() % 2 * MiB) & PAGE_MASK;
|
||||
|
@ -40,8 +40,7 @@ ErrorOr<NonnullOwnPtr<AddressSpace>> AddressSpace::try_create(AddressSpace const
|
|||
|
||||
AddressSpace::AddressSpace(NonnullLockRefPtr<PageDirectory> page_directory, VirtualRange total_range)
|
||||
: m_page_directory(move(page_directory))
|
||||
, m_total_range(total_range)
|
||||
, m_region_tree(LockRank::None, total_range)
|
||||
, m_region_tree(total_range)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -149,10 +148,7 @@ ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_re
|
|||
if (source_region.should_cow(page_offset_in_source_region + i))
|
||||
TRY(new_region->set_should_cow(i, true));
|
||||
}
|
||||
TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
|
||||
TRY(region_tree.place_specifically(*new_region, range));
|
||||
return {};
|
||||
}));
|
||||
TRY(m_region_tree.place_specifically(*new_region, range));
|
||||
return new_region.leak_ptr();
|
||||
}
|
||||
|
||||
|
@ -167,14 +163,11 @@ ErrorOr<Region*> AddressSpace::allocate_region(RandomizeVirtualAddress randomize
|
|||
region_name = TRY(KString::try_create(name));
|
||||
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
|
||||
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot)));
|
||||
TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
|
||||
if (requested_address.is_null()) {
|
||||
TRY(region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
|
||||
} else {
|
||||
TRY(region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
|
||||
}
|
||||
return {};
|
||||
}));
|
||||
if (requested_address.is_null()) {
|
||||
TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
|
||||
} else {
|
||||
TRY(m_region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
|
||||
}
|
||||
TRY(region->map(page_directory(), ShouldFlushTLB::No));
|
||||
return region.leak_ptr();
|
||||
}
|
||||
|
@ -210,29 +203,27 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(RandomizeVirtualAdd
|
|||
|
||||
auto region = TRY(Region::create_unplaced(move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
|
||||
|
||||
return m_region_tree.with([&](auto& region_tree) -> ErrorOr<Region*> {
|
||||
if (requested_address.is_null())
|
||||
TRY(region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
|
||||
else
|
||||
TRY(region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
|
||||
if (requested_address.is_null())
|
||||
TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment));
|
||||
else
|
||||
TRY(m_region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
|
||||
|
||||
ArmedScopeGuard remove_region_from_tree_on_failure = [&] {
|
||||
// At this point the region is already part of the Process region tree, so we have to make sure
|
||||
// we remove it from the tree before returning an error, or else the Region tree will contain
|
||||
// a dangling pointer to the free'd Region instance
|
||||
region_tree.remove(*region);
|
||||
};
|
||||
ArmedScopeGuard remove_region_from_tree_on_failure = [&] {
|
||||
// At this point the region is already part of the Process region tree, so we have to make sure
|
||||
// we remove it from the tree before returning an error, or else the Region tree will contain
|
||||
// a dangling pointer to the free'd Region instance
|
||||
m_region_tree.remove(*region);
|
||||
};
|
||||
|
||||
if (prot == PROT_NONE) {
|
||||
// For PROT_NONE mappings, we don't have to set up any page table mappings.
|
||||
// We do still need to attach the region to the page_directory though.
|
||||
region->set_page_directory(page_directory());
|
||||
} else {
|
||||
TRY(region->map(page_directory(), ShouldFlushTLB::No));
|
||||
}
|
||||
remove_region_from_tree_on_failure.disarm();
|
||||
return region.leak_ptr();
|
||||
});
|
||||
if (prot == PROT_NONE) {
|
||||
// For PROT_NONE mappings, we don't have to set up any page table mappings.
|
||||
// We do still need to attach the region to the page_directory though.
|
||||
region->set_page_directory(page_directory());
|
||||
} else {
|
||||
TRY(region->map(page_directory(), ShouldFlushTLB::No));
|
||||
}
|
||||
remove_region_from_tree_on_failure.disarm();
|
||||
return region.leak_ptr();
|
||||
}
|
||||
|
||||
void AddressSpace::deallocate_region(Region& region)
|
||||
|
@ -242,14 +233,14 @@ void AddressSpace::deallocate_region(Region& region)
|
|||
|
||||
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
|
||||
{
|
||||
auto did_remove = m_region_tree.with([&](auto& region_tree) { return region_tree.remove(region); });
|
||||
auto did_remove = m_region_tree.remove(region);
|
||||
VERIFY(did_remove);
|
||||
return NonnullOwnPtr { NonnullOwnPtr<Region>::Adopt, region };
|
||||
}
|
||||
|
||||
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
|
||||
{
|
||||
auto* found_region = m_region_tree.with([&](auto& region_tree) { return region_tree.regions().find(range.base().get()); });
|
||||
auto* found_region = m_region_tree.regions().find(range.base().get());
|
||||
if (!found_region)
|
||||
return nullptr;
|
||||
auto& region = *found_region;
|
||||
|
@ -261,9 +252,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
|
|||
|
||||
Region* AddressSpace::find_region_containing(VirtualRange const& range)
|
||||
{
|
||||
return m_region_tree.with([&](auto& region_tree) {
|
||||
return region_tree.find_region_containing(range);
|
||||
});
|
||||
return m_region_tree.find_region_containing(range);
|
||||
}
|
||||
|
||||
ErrorOr<Vector<Region*, 4>> AddressSpace::find_regions_intersecting(VirtualRange const& range)
|
||||
|
@ -271,23 +260,21 @@ ErrorOr<Vector<Region*, 4>> AddressSpace::find_regions_intersecting(VirtualRange
|
|||
Vector<Region*, 4> regions = {};
|
||||
size_t total_size_collected = 0;
|
||||
|
||||
return m_region_tree.with([&](auto& region_tree) -> ErrorOr<Vector<Region*, 4>> {
|
||||
auto* found_region = region_tree.regions().find_largest_not_above(range.base().get());
|
||||
if (!found_region)
|
||||
return regions;
|
||||
for (auto iter = region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) {
|
||||
auto const& iter_range = (*iter).range();
|
||||
if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
|
||||
TRY(regions.try_append(&*iter));
|
||||
|
||||
total_size_collected += (*iter).size() - iter_range.intersect(range).size();
|
||||
if (total_size_collected == range.size())
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
auto* found_region = m_region_tree.regions().find_largest_not_above(range.base().get());
|
||||
if (!found_region)
|
||||
return regions;
|
||||
});
|
||||
for (auto iter = m_region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) {
|
||||
auto const& iter_range = (*iter).range();
|
||||
if (iter_range.base() < range.end() && iter_range.end() > range.base()) {
|
||||
TRY(regions.try_append(&*iter));
|
||||
|
||||
total_size_collected += (*iter).size() - iter_range.intersect(range).size();
|
||||
if (total_size_collected == range.size())
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return regions;
|
||||
}
|
||||
|
||||
// Carve out a virtual address range from a region and return the two regions on either side
|
||||
|
@ -321,18 +308,16 @@ void AddressSpace::dump_regions()
|
|||
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
|
||||
addr_padding, addr_padding, addr_padding);
|
||||
|
||||
m_region_tree.with([&](auto& region_tree) {
|
||||
for (auto const& region : region_tree.regions()) {
|
||||
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
|
||||
region.is_readable() ? 'R' : ' ',
|
||||
region.is_writable() ? 'W' : ' ',
|
||||
region.is_executable() ? 'X' : ' ',
|
||||
region.is_shared() ? 'S' : ' ',
|
||||
region.is_stack() ? 'T' : ' ',
|
||||
region.is_syscall_region() ? 'C' : ' ',
|
||||
region.name());
|
||||
}
|
||||
});
|
||||
for (auto const& region : m_region_tree.regions()) {
|
||||
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(),
|
||||
region.is_readable() ? 'R' : ' ',
|
||||
region.is_writable() ? 'W' : ' ',
|
||||
region.is_executable() ? 'X' : ' ',
|
||||
region.is_shared() ? 'S' : ' ',
|
||||
region.is_stack() ? 'T' : ' ',
|
||||
region.is_syscall_region() ? 'C' : ' ',
|
||||
region.name());
|
||||
}
|
||||
MM.dump_kernel_regions();
|
||||
}
|
||||
|
||||
|
@ -341,15 +326,11 @@ void AddressSpace::remove_all_regions(Badge<Process>)
|
|||
VERIFY(Thread::current() == g_finalizer);
|
||||
{
|
||||
SpinlockLocker pd_locker(m_page_directory->get_lock());
|
||||
m_region_tree.with([&](auto& region_tree) {
|
||||
for (auto& region : region_tree.regions())
|
||||
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker);
|
||||
});
|
||||
for (auto& region : m_region_tree.regions())
|
||||
region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker);
|
||||
}
|
||||
|
||||
m_region_tree.with([&](auto& region_tree) {
|
||||
region_tree.delete_all_regions_assuming_they_are_unmapped();
|
||||
});
|
||||
m_region_tree.delete_all_regions_assuming_they_are_unmapped();
|
||||
}
|
||||
|
||||
size_t AddressSpace::amount_dirty_private() const
|
||||
|
@ -358,25 +339,20 @@ size_t AddressSpace::amount_dirty_private() const
|
|||
// The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
|
||||
// That's probably a situation that needs to be looked at in general.
|
||||
size_t amount = 0;
|
||||
m_region_tree.with([&](auto& region_tree) {
|
||||
for (auto const& region : region_tree.regions()) {
|
||||
if (!region.is_shared())
|
||||
amount += region.amount_dirty();
|
||||
}
|
||||
});
|
||||
for (auto const& region : m_region_tree.regions()) {
|
||||
if (!region.is_shared())
|
||||
amount += region.amount_dirty();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
||||
ErrorOr<size_t> AddressSpace::amount_clean_inode() const
|
||||
{
|
||||
HashTable<LockRefPtr<InodeVMObject>> vmobjects;
|
||||
TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr<void> {
|
||||
for (auto const& region : region_tree.regions()) {
|
||||
if (region.vmobject().is_inode())
|
||||
TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region.vmobject())));
|
||||
}
|
||||
return {};
|
||||
}));
|
||||
for (auto const& region : m_region_tree.regions()) {
|
||||
if (region.vmobject().is_inode())
|
||||
TRY(vmobjects.try_set(&static_cast<InodeVMObject const&>(region.vmobject())));
|
||||
}
|
||||
size_t amount = 0;
|
||||
for (auto& vmobject : vmobjects)
|
||||
amount += vmobject->amount_clean();
|
||||
|
@ -386,11 +362,9 @@ ErrorOr<size_t> AddressSpace::amount_clean_inode() const
|
|||
size_t AddressSpace::amount_virtual() const
|
||||
{
|
||||
size_t amount = 0;
|
||||
m_region_tree.with([&](auto& region_tree) {
|
||||
for (auto const& region : region_tree.regions()) {
|
||||
amount += region.size();
|
||||
}
|
||||
});
|
||||
for (auto const& region : m_region_tree.regions()) {
|
||||
amount += region.size();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
||||
|
@ -398,11 +372,9 @@ size_t AddressSpace::amount_resident() const
|
|||
{
|
||||
// FIXME: This will double count if multiple regions use the same physical page.
|
||||
size_t amount = 0;
|
||||
m_region_tree.with([&](auto& region_tree) {
|
||||
for (auto const& region : region_tree.regions()) {
|
||||
amount += region.amount_resident();
|
||||
}
|
||||
});
|
||||
for (auto const& region : m_region_tree.regions()) {
|
||||
amount += region.amount_resident();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
||||
|
@ -413,40 +385,35 @@ size_t AddressSpace::amount_shared() const
|
|||
// and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
|
||||
// so that every Region contributes +1 ref to each of its PhysicalPages.
|
||||
size_t amount = 0;
|
||||
m_region_tree.with([&](auto& region_tree) {
|
||||
for (auto const& region : region_tree.regions()) {
|
||||
amount += region.amount_shared();
|
||||
}
|
||||
});
|
||||
for (auto const& region : m_region_tree.regions()) {
|
||||
amount += region.amount_shared();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
||||
size_t AddressSpace::amount_purgeable_volatile() const
|
||||
{
|
||||
size_t amount = 0;
|
||||
m_region_tree.with([&](auto& region_tree) {
|
||||
for (auto const& region : region_tree.regions()) {
|
||||
if (!region.vmobject().is_anonymous())
|
||||
continue;
|
||||
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
|
||||
if (vmobject.is_purgeable() && vmobject.is_volatile())
|
||||
amount += region.amount_resident();
|
||||
}
|
||||
});
|
||||
for (auto const& region : m_region_tree.regions()) {
|
||||
if (!region.vmobject().is_anonymous())
|
||||
continue;
|
||||
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
|
||||
if (vmobject.is_purgeable() && vmobject.is_volatile())
|
||||
amount += region.amount_resident();
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
||||
size_t AddressSpace::amount_purgeable_nonvolatile() const
|
||||
{
|
||||
size_t amount = 0;
|
||||
m_region_tree.with([&](auto& region_tree) {
|
||||
for (auto const& region : region_tree.regions()) {
|
||||
for (auto const& region : m_region_tree.regions()) {
|
||||
if (!region.vmobject().is_anonymous())
|
||||
continue;
|
||||
auto const& vmobject = static_cast<AnonymousVMObject const&>(region.vmobject());
|
||||
if (vmobject.is_purgeable() && !vmobject.is_volatile())
|
||||
amount += region.amount_resident();
|
||||
} });
|
||||
}
|
||||
return amount;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue