mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 11:17:44 +00:00
Kernel: Make VM allocation atomic for userspace regions
This patch move AddressSpace (the per-process memory manager) to using the new atomic "place" APIs in RegionTree as well, just like we did for MemoryManager in the previous commit. This required updating quite a few places where VM allocation and actually committing a Region object to the AddressSpace were separated by other code. All you have to do now is call into AddressSpace once and it'll take care of everything for you.
This commit is contained in:
parent
e852a69a06
commit
07f3d09c55
13 changed files with 84 additions and 91 deletions
|
@ -136,23 +136,14 @@ ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
|
|||
return {};
|
||||
}
|
||||
|
||||
ErrorOr<VirtualRange> AddressSpace::try_allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
|
||||
{
|
||||
vaddr.mask(PAGE_MASK);
|
||||
size = TRY(page_round_up(size));
|
||||
if (vaddr.is_null())
|
||||
return m_region_tree.try_allocate_anywhere(size, alignment);
|
||||
return m_region_tree.try_allocate_specific(vaddr, size);
|
||||
}
|
||||
|
||||
ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)
|
||||
{
|
||||
OwnPtr<KString> region_name;
|
||||
if (!source_region.name().is_null())
|
||||
region_name = TRY(KString::try_create(source_region.name()));
|
||||
|
||||
auto new_region = TRY(Region::try_create_user_accessible(
|
||||
range, source_region.vmobject(), offset_in_vmobject, move(region_name), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()));
|
||||
auto new_region = TRY(Region::create_unplaced(
|
||||
source_region.vmobject(), offset_in_vmobject, move(region_name), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()));
|
||||
new_region->set_syscall_region(source_region.is_syscall_region());
|
||||
new_region->set_mmap(source_region.is_mmap());
|
||||
new_region->set_stack(source_region.is_stack());
|
||||
|
@ -161,29 +152,46 @@ ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_re
|
|||
if (source_region.should_cow(page_offset_in_source_region + i))
|
||||
TRY(new_region->set_should_cow(i, true));
|
||||
}
|
||||
return add_region(move(new_region));
|
||||
SpinlockLocker locker(m_lock);
|
||||
TRY(m_region_tree.place_specifically(*new_region, range));
|
||||
return new_region.leak_ptr();
|
||||
}
|
||||
|
||||
ErrorOr<Region*> AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
|
||||
ErrorOr<Region*> AddressSpace::allocate_region(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot, AllocationStrategy strategy)
|
||||
{
|
||||
VERIFY(range.is_valid());
|
||||
if (!requested_address.is_page_aligned())
|
||||
return EINVAL;
|
||||
auto size = TRY(Memory::page_round_up(requested_size));
|
||||
auto alignment = TRY(Memory::page_round_up(requested_alignment));
|
||||
OwnPtr<KString> region_name;
|
||||
if (!name.is_null())
|
||||
region_name = TRY(KString::try_create(name));
|
||||
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(range.size(), strategy));
|
||||
auto region = TRY(Region::try_create_user_accessible(range, move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, false));
|
||||
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
|
||||
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot)));
|
||||
if (requested_address.is_null())
|
||||
TRY(m_region_tree.place_anywhere(*region, size, alignment));
|
||||
else
|
||||
TRY(m_region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
|
||||
TRY(region->map(page_directory(), ShouldFlushTLB::No));
|
||||
return add_region(move(region));
|
||||
return region.leak_ptr();
|
||||
}
|
||||
|
||||
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
||||
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange requested_range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
||||
{
|
||||
VERIFY(range.is_valid());
|
||||
size_t end_in_vmobject = offset_in_vmobject + range.size();
|
||||
if (end_in_vmobject <= offset_in_vmobject) {
|
||||
dbgln("allocate_region_with_vmobject: Overflow (offset + size)");
|
||||
return allocate_region_with_vmobject(requested_range.base(), requested_range.size(), PAGE_SIZE, move(vmobject), offset_in_vmobject, name, prot, shared);
|
||||
}
|
||||
|
||||
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
|
||||
{
|
||||
if (!requested_address.is_page_aligned())
|
||||
return EINVAL;
|
||||
}
|
||||
auto size = TRY(page_round_up(requested_size));
|
||||
auto alignment = TRY(page_round_up(requested_alignment));
|
||||
|
||||
if (Checked<size_t>::addition_would_overflow(offset_in_vmobject, requested_size))
|
||||
return EOVERFLOW;
|
||||
|
||||
size_t end_in_vmobject = offset_in_vmobject + requested_size;
|
||||
if (offset_in_vmobject >= vmobject->size()) {
|
||||
dbgln("allocate_region_with_vmobject: Attempt to allocate a region with an offset past the end of its VMObject.");
|
||||
return EINVAL;
|
||||
|
@ -196,7 +204,16 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const&
|
|||
OwnPtr<KString> region_name;
|
||||
if (!name.is_null())
|
||||
region_name = TRY(KString::try_create(name));
|
||||
auto region = TRY(Region::try_create_user_accessible(range, move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
|
||||
|
||||
auto region = TRY(Region::create_unplaced(move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
|
||||
|
||||
SpinlockLocker locker(m_lock);
|
||||
|
||||
if (requested_address.is_null())
|
||||
TRY(m_region_tree.place_anywhere(*region, size, alignment));
|
||||
else
|
||||
TRY(m_region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
|
||||
|
||||
if (prot == PROT_NONE) {
|
||||
// For PROT_NONE mappings, we don't have to set up any page table mappings.
|
||||
// We do still need to attach the region to the page_directory though.
|
||||
|
@ -205,7 +222,7 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const&
|
|||
} else {
|
||||
TRY(region->map(page_directory(), ShouldFlushTLB::No));
|
||||
}
|
||||
return add_region(move(region));
|
||||
return region.leak_ptr();
|
||||
}
|
||||
|
||||
void AddressSpace::deallocate_region(Region& region)
|
||||
|
@ -267,15 +284,6 @@ ErrorOr<Vector<Region*>> AddressSpace::find_regions_intersecting(VirtualRange co
|
|||
return regions;
|
||||
}
|
||||
|
||||
ErrorOr<Region*> AddressSpace::add_region(NonnullOwnPtr<Region> region)
|
||||
{
|
||||
SpinlockLocker lock(m_lock);
|
||||
// NOTE: We leak the region into the IRBT here. It must be deleted or readopted when removed from the tree.
|
||||
auto* ptr = region.leak_ptr();
|
||||
m_region_tree.regions().insert(ptr->vaddr().get(), *ptr);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// Carve out a virtual address range from a region and return the two regions on either side
|
||||
ErrorOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(Region const& source_region, VirtualRange const& desired_range)
|
||||
{
|
||||
|
|
|
@ -26,8 +26,6 @@ public:
|
|||
PageDirectory& page_directory() { return *m_page_directory; }
|
||||
PageDirectory const& page_directory() const { return *m_page_directory; }
|
||||
|
||||
ErrorOr<Region*> add_region(NonnullOwnPtr<Region>);
|
||||
|
||||
size_t region_count() const { return m_region_tree.regions().size(); }
|
||||
|
||||
auto& regions() { return m_region_tree.regions(); }
|
||||
|
@ -37,10 +35,9 @@ public:
|
|||
|
||||
ErrorOr<void> unmap_mmap_range(VirtualAddress, size_t);
|
||||
|
||||
ErrorOr<VirtualRange> try_allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
|
||||
|
||||
ErrorOr<Region*> allocate_region_with_vmobject(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
|
||||
ErrorOr<Region*> allocate_region(VirtualRange const&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
|
||||
ErrorOr<Region*> allocate_region_with_vmobject(VirtualRange requested_range, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
|
||||
ErrorOr<Region*> allocate_region_with_vmobject(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
|
||||
ErrorOr<Region*> allocate_region(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
|
||||
void deallocate_region(Region& region);
|
||||
NonnullOwnPtr<Region> take_region(Region& region);
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ bool MemoryManager::is_initialized()
|
|||
|
||||
static UNMAP_AFTER_INIT VirtualRange kernel_virtual_range()
|
||||
{
|
||||
auto kernel_range_start = kernel_mapping_base + 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
|
||||
size_t kernel_range_start = kernel_mapping_base + 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
|
||||
return VirtualRange { VirtualAddress(kernel_range_start), KERNEL_PD_END - kernel_range_start };
|
||||
}
|
||||
|
||||
|
|
|
@ -84,9 +84,9 @@ ErrorOr<NonnullOwnPtr<Region>> Region::create_unbacked()
|
|||
return adopt_nonnull_own_or_enomem(new (nothrow) Region);
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::create_unplaced(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::create_unplaced(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
{
|
||||
return adopt_nonnull_own_or_enomem(new (nothrow) Region(move(vmobject), offset_in_vmobject, move(name), access, cacheable, false));
|
||||
return adopt_nonnull_own_or_enomem(new (nothrow) Region(move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
|
||||
|
|
|
@ -57,7 +57,7 @@ public:
|
|||
static ErrorOr<NonnullOwnPtr<Region>> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
|
||||
static ErrorOr<NonnullOwnPtr<Region>> try_create_kernel_only(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
|
||||
static ErrorOr<NonnullOwnPtr<Region>> create_unbacked();
|
||||
static ErrorOr<NonnullOwnPtr<Region>> create_unplaced(NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
|
||||
static ErrorOr<NonnullOwnPtr<Region>> create_unplaced(NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes, bool shared = false);
|
||||
|
||||
~Region();
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
|
||||
#include <AK/Format.h>
|
||||
#include <Kernel/Memory/AnonymousVMObject.h>
|
||||
#include <Kernel/Memory/MemoryManager.h>
|
||||
#include <Kernel/Memory/RegionTree.h>
|
||||
#include <Kernel/Random.h>
|
||||
|
@ -167,4 +168,14 @@ ErrorOr<void> RegionTree::place_specifically(Region& region, VirtualRange const&
|
|||
return {};
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> RegionTree::create_identity_mapped_region(PhysicalAddress paddr, size_t size)
|
||||
{
|
||||
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size));
|
||||
auto region = TRY(Memory::Region::create_unplaced(move(vmobject), 0, {}, Memory::Region::Access::ReadWriteExecute));
|
||||
Memory::VirtualRange range { VirtualAddress { (FlatPtr)paddr.get() }, size };
|
||||
region->m_range = range;
|
||||
TRY(region->map(MM.kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -37,6 +37,8 @@ public:
|
|||
ErrorOr<void> place_anywhere(Region&, size_t size, size_t alignment = PAGE_SIZE);
|
||||
ErrorOr<void> place_specifically(Region&, VirtualRange const&);
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> create_identity_mapped_region(PhysicalAddress, size_t);
|
||||
|
||||
ErrorOr<VirtualRange> try_allocate_anywhere(size_t size, size_t alignment = PAGE_SIZE);
|
||||
ErrorOr<VirtualRange> try_allocate_specific(VirtualAddress base, size_t size);
|
||||
ErrorOr<VirtualRange> try_allocate_randomized(size_t size, size_t alignment = PAGE_SIZE);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue