1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-28 07:25:07 +00:00

Kernel: Make VirtualRangeAllocator return KResultOr<VirtualRange>

This achieves two things:
- The allocator can report more specific errors
- Callers can (and now do) use TRY() :^)
This commit is contained in:
Andreas Kling 2021-09-05 23:12:16 +02:00
parent 21f7932ae2
commit f4a9a0d561
9 changed files with 82 additions and 95 deletions

View file

@ -131,13 +131,13 @@ KResult AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
return KSuccess;
}
Optional<VirtualRange> AddressSpace::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
KResultOr<VirtualRange> AddressSpace::try_allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
{
vaddr.mask(PAGE_MASK);
size = page_round_up(size);
if (vaddr.is_null())
return page_directory().range_allocator().allocate_anywhere(size, alignment);
return page_directory().range_allocator().allocate_specific(vaddr, size);
return page_directory().range_allocator().try_allocate_anywhere(size, alignment);
return page_directory().range_allocator().try_allocate_specific(vaddr, size);
}
KResultOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)

View file

@ -35,7 +35,7 @@ public:
KResult unmap_mmap_range(VirtualAddress, size_t);
Optional<VirtualRange> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
KResultOr<VirtualRange> try_allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
KResultOr<Region*> allocate_region_with_vmobject(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
KResultOr<Region*> allocate_region(VirtualRange const&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);

View file

@ -403,11 +403,12 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory();
// Allocate a virtual address range for our array
auto range = m_kernel_page_directory->range_allocator().allocate_anywhere(physical_page_array_pages * PAGE_SIZE);
if (!range.has_value()) {
auto range_or_error = m_kernel_page_directory->range_allocator().try_allocate_anywhere(physical_page_array_pages * PAGE_SIZE);
if (range_or_error.is_error()) {
dmesgln("MM: Could not allocate {} bytes to map physical page array!", physical_page_array_pages * PAGE_SIZE);
VERIFY_NOT_REACHED();
}
auto range = range_or_error.release_value();
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
// try to map the entire region into kernel space so we always have it
@ -419,7 +420,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
auto page_tables_base = m_physical_pages_region->lower();
auto physical_page_array_base = page_tables_base.offset(needed_page_table_count * PAGE_SIZE);
auto physical_page_array_current_page = physical_page_array_base.get();
auto virtual_page_array_base = range.value().base().get();
auto virtual_page_array_base = range.base().get();
auto virtual_page_array_current_page = virtual_page_array_base;
for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
auto virtual_page_base_for_this_pt = virtual_page_array_current_page;
@ -461,7 +462,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
}
// We now have the entire PhysicalPageEntry array mapped!
m_physical_page_entries = (PhysicalPageEntry*)range.value().base().get();
m_physical_page_entries = (PhysicalPageEntry*)range.base().get();
for (size_t i = 0; i < m_physical_page_entries_count; i++)
new (&m_physical_page_entries[i]) PageTableEntry();
@ -474,7 +475,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
auto& kernel_page_tables = kernel_page_directory().m_page_tables;
virtual_page_array_current_page = virtual_page_array_base;
for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
VERIFY(virtual_page_array_current_page <= range.value().end().get());
VERIFY(virtual_page_array_current_page <= range.end().get());
auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
auto& physical_page_entry = m_physical_page_entries[physical_page_index];
@ -485,7 +486,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
virtual_page_array_current_page += (PAGE_SIZE / sizeof(PageTableEntry)) * PAGE_SIZE;
}
dmesgln("MM: Physical page entries: {}", range.value());
dmesgln("MM: Physical page entries: {}", range);
}
PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physical_address)
@ -703,16 +704,17 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, Str
{
VERIFY(!(size % PAGE_SIZE));
SpinlockLocker lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size);
if (range_or_error.is_error())
return {};
auto range = range_or_error.release_value();
auto maybe_vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(size);
if (maybe_vmobject.is_error()) {
kernel_page_directory().range_allocator().deallocate(range.value());
kernel_page_directory().range_allocator().deallocate(range);
// FIXME: Would be nice to be able to return a KResultOr from here.
return {};
}
return allocate_kernel_region_with_vmobject(range.value(), maybe_vmobject.release_value(), name, access, cacheable);
return allocate_kernel_region_with_vmobject(range, maybe_vmobject.release_value(), name, access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
@ -722,10 +724,11 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView nam
if (maybe_vm_object.is_error())
return {};
SpinlockLocker lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size);
if (range_or_error.is_error())
return {};
return allocate_kernel_region_with_vmobject(range.value(), maybe_vm_object.release_value(), name, access, cacheable);
auto range = range_or_error.release_value();
return allocate_kernel_region_with_vmobject(range, maybe_vm_object.release_value(), name, access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
@ -735,10 +738,11 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
return {};
VERIFY(!(size % PAGE_SIZE));
SpinlockLocker lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size);
if (range_or_error.is_error())
return {};
return allocate_kernel_region_with_vmobject(range.value(), maybe_vm_object.release_value(), name, access, cacheable);
auto range = range_or_error.release_value();
return allocate_kernel_region_with_vmobject(range, maybe_vm_object.release_value(), name, access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
@ -757,10 +761,11 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
{
VERIFY(!(size % PAGE_SIZE));
SpinlockLocker lock(kernel_page_directory().get_lock());
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size);
if (range_or_error.is_error())
return {};
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, cacheable);
auto range = range_or_error.release_value();
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, cacheable);
}
KResultOr<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)

View file

@ -56,10 +56,10 @@ void VirtualRangeAllocator::carve_at_iterator(auto& it, VirtualRange const& rang
}
}
Optional<VirtualRange> VirtualRangeAllocator::allocate_randomized(size_t size, size_t alignment)
KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_randomized(size_t size, size_t alignment)
{
if (!size)
return {};
return EINVAL;
VERIFY((size % PAGE_SIZE) == 0);
VERIFY((alignment % PAGE_SIZE) == 0);
@ -72,18 +72,18 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_randomized(size_t size, s
if (!m_total_range.contains(random_address, size))
continue;
auto range = allocate_specific(random_address, size);
if (range.has_value())
return range;
auto range_or_error = try_allocate_specific(random_address, size);
if (!range_or_error.is_error())
return range_or_error.release_value();
}
return allocate_anywhere(size, alignment);
return try_allocate_anywhere(size, alignment);
}
Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, size_t alignment)
KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_anywhere(size_t size, size_t alignment)
{
if (!size)
return {};
return EINVAL;
VERIFY((size % PAGE_SIZE) == 0);
VERIFY((alignment % PAGE_SIZE) == 0);
@ -91,7 +91,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, siz
#ifdef VM_GUARD_PAGES
// NOTE: We pad VM allocations with a guard page on each side.
if (Checked<size_t>::addition_would_overflow(size, PAGE_SIZE * 2))
return {};
return EOVERFLOW;
size_t effective_size = size + PAGE_SIZE * 2;
size_t offset_from_effective_base = PAGE_SIZE;
@ -101,7 +101,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, siz
#endif
if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
return {};
return EOVERFLOW;
SpinlockLocker lock(m_lock);
@ -126,21 +126,20 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, siz
return allocated_range;
}
dmesgln("VirtualRangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
return {};
return ENOMEM;
}
Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress base, size_t size)
KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_specific(VirtualAddress base, size_t size)
{
if (!size)
return {};
return EINVAL;
VERIFY(base.is_page_aligned());
VERIFY((size % PAGE_SIZE) == 0);
VirtualRange const allocated_range(base, size);
if (!m_total_range.contains(allocated_range)) {
return {};
}
if (!m_total_range.contains(allocated_range))
return ENOMEM;
SpinlockLocker lock(m_lock);
for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
@ -154,7 +153,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress b
carve_at_iterator(it, allocated_range);
return allocated_range;
}
return {};
return ENOMEM;
}
void VirtualRangeAllocator::deallocate(VirtualRange const& range)

View file

@ -21,9 +21,9 @@ public:
void initialize_with_range(VirtualAddress, size_t);
void initialize_from_parent(VirtualRangeAllocator const&);
Optional<VirtualRange> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
Optional<VirtualRange> allocate_specific(VirtualAddress, size_t);
Optional<VirtualRange> allocate_randomized(size_t, size_t alignment);
KResultOr<VirtualRange> try_allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
KResultOr<VirtualRange> try_allocate_specific(VirtualAddress, size_t);
KResultOr<VirtualRange> try_allocate_randomized(size_t, size_t alignment);
void deallocate(VirtualRange const&);
void dump() const;

View file

@ -17,11 +17,8 @@ KResultOr<FlatPtr> Process::sys$map_time_page()
auto& vmobject = TimeManagement::the().time_page_vmobject();
auto range = address_space().page_directory().range_allocator().allocate_randomized(PAGE_SIZE, PAGE_SIZE);
if (!range.has_value())
return ENOMEM;
auto* region = TRY(address_space().allocate_region_with_vmobject(range.value(), vmobject, 0, "Kernel time page"sv, PROT_READ, true));
auto range = TRY(address_space().page_directory().range_allocator().try_allocate_randomized(PAGE_SIZE, PAGE_SIZE));
auto* region = TRY(address_space().allocate_region_with_vmobject(range, vmobject, 0, "Kernel time page"sv, PROT_READ, true));
return region->vaddr().get();
}

View file

@ -316,13 +316,14 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace>
return IterationDecision::Break;
}
auto range = new_space->allocate_range({}, program_header.size_in_memory());
if (!range.has_value()) {
auto range_or_error = new_space->try_allocate_range({}, program_header.size_in_memory());
if (range_or_error.is_error()) {
ph_load_result = ENOMEM;
return IterationDecision::Break;
}
auto range = range_or_error.release_value();
auto region_or_error = new_space->allocate_region(range.value(), String::formatted("{} (master-tls)", elf_name), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
auto region_or_error = new_space->allocate_region(range, String::formatted("{} (master-tls)", elf_name), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
if (region_or_error.is_error()) {
ph_load_result = region_or_error.error();
return IterationDecision::Break;
@ -365,12 +366,14 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace>
auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
auto range_end = VirtualAddress { Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()) };
auto range = new_space->allocate_range(range_base, range_end.get() - range_base.get());
if (!range.has_value()) {
auto range_or_error = new_space->try_allocate_range(range_base, range_end.get() - range_base.get());
if (range_or_error.is_error()) {
ph_load_result = ENOMEM;
return IterationDecision::Break;
}
auto region_or_error = new_space->allocate_region(range.value(), region_name, prot, AllocationStrategy::Reserve);
auto range = range_or_error.release_value();
auto region_or_error = new_space->allocate_region(range, region_name, prot, AllocationStrategy::Reserve);
if (region_or_error.is_error()) {
ph_load_result = region_or_error.error();
return IterationDecision::Break;
@ -405,12 +408,13 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace>
auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
auto range_end = VirtualAddress { Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()) };
auto range = new_space->allocate_range(range_base, range_end.get() - range_base.get());
if (!range.has_value()) {
auto range_or_error = new_space->try_allocate_range(range_base, range_end.get() - range_base.get());
if (range_or_error.is_error()) {
ph_load_result = ENOMEM;
return IterationDecision::Break;
}
auto region_or_error = new_space->allocate_region_with_vmobject(range.value(), *vmobject, program_header.offset(), elf_name, prot, true);
auto range = range_or_error.release_value();
auto region_or_error = new_space->allocate_region_with_vmobject(range, *vmobject, program_header.offset(), elf_name, prot, true);
if (region_or_error.is_error()) {
ph_load_result = region_or_error.error();
return IterationDecision::Break;
@ -432,13 +436,8 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace>
return ENOEXEC;
}
auto stack_range = new_space->allocate_range({}, Thread::default_userspace_stack_size);
if (!stack_range.has_value()) {
dbgln("do_exec: Failed to allocate VM range for stack");
return ENOMEM;
}
auto* stack_region = TRY(new_space->allocate_region(stack_range.value(), "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
auto stack_range = TRY(new_space->try_allocate_range({}, Thread::default_userspace_stack_size));
auto* stack_region = TRY(new_space->allocate_region(stack_range, "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
stack_region->set_stack(true);
return LoadResult {
@ -512,12 +511,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
auto main_program_metadata = main_program_description->metadata();
auto load_result = TRY(load(main_program_description, interpreter_description, main_program_header));
auto signal_trampoline_range = load_result.space->allocate_range({}, PAGE_SIZE);
if (!signal_trampoline_range.has_value()) {
dbgln("do_exec: Failed to allocate VM for signal trampoline");
return ENOMEM;
}
auto signal_trampoline_range = TRY(load_result.space->try_allocate_range({}, PAGE_SIZE));
// We commit to the new executable at this point. There is no turning back!
@ -558,7 +552,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
}
Memory::MemoryManager::enter_space(*m_space);
auto signal_trampoline_region = m_space->allocate_region_with_vmobject(signal_trampoline_range.value(), g_signal_trampoline_region->vmobject(), 0, "Signal trampoline", PROT_READ | PROT_EXEC, true);
auto signal_trampoline_region = m_space->allocate_region_with_vmobject(signal_trampoline_range, g_signal_trampoline_region->vmobject(), 0, "Signal trampoline", PROT_READ | PROT_EXEC, true);
if (signal_trampoline_region.is_error()) {
VERIFY_NOT_REACHED();
}

View file

@ -191,22 +191,20 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
return EINVAL;
Memory::Region* region = nullptr;
Optional<Memory::VirtualRange> range;
auto range = TRY([&]() -> KResultOr<Memory::VirtualRange> {
if (map_randomized) {
range = address_space().page_directory().range_allocator().allocate_randomized(Memory::page_round_up(size), alignment);
} else {
range = address_space().allocate_range(VirtualAddress(addr), size, alignment);
if (!range.has_value()) {
return address_space().page_directory().range_allocator().try_allocate_randomized(Memory::page_round_up(size), alignment);
}
auto range = address_space().try_allocate_range(VirtualAddress(addr), size, alignment);
if (range.is_error()) {
if (addr && !map_fixed) {
// If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
range = address_space().allocate_range({}, size, alignment);
range = address_space().try_allocate_range({}, size, alignment);
}
}
}
if (!range.has_value())
return ENOMEM;
return range;
}());
if (map_anonymous) {
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
@ -217,7 +215,7 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(size), strategy));
}
region = TRY(address_space().allocate_region_with_vmobject(range.value(), vmobject.release_nonnull(), 0, {}, prot, map_shared));
region = TRY(address_space().allocate_region_with_vmobject(range, vmobject.release_nonnull(), 0, {}, prot, map_shared));
} else {
if (offset < 0)
return EINVAL;
@ -238,7 +236,7 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
return EACCES;
}
region = TRY(description->mmap(*this, range.value(), static_cast<u64>(offset), prot, map_shared));
region = TRY(description->mmap(*this, range, static_cast<u64>(offset), prot, map_shared));
}
if (!region)
@ -549,11 +547,8 @@ KResultOr<FlatPtr> Process::sys$allocate_tls(Userspace<const char*> initial_data
if (multiple_threads)
return EINVAL;
auto range = address_space().allocate_range({}, size);
if (!range.has_value())
return ENOMEM;
auto region = TRY(address_space().allocate_region(range.value(), String("Master TLS"), PROT_READ | PROT_WRITE));
auto range = TRY(address_space().try_allocate_range({}, size));
auto region = TRY(address_space().allocate_region(range, String("Master TLS"), PROT_READ | PROT_WRITE));
m_master_tls_region = region->make_weak_ptr();
m_master_tls_size = size;

View file

@ -1190,13 +1190,10 @@ KResult Thread::make_thread_specific_region(Badge<Process>)
if (!process().m_master_tls_region)
return KSuccess;
auto range = process().address_space().allocate_range({}, thread_specific_region_size());
if (!range.has_value())
return ENOMEM;
auto range = TRY(process().address_space().try_allocate_range({}, thread_specific_region_size()));
auto* region = TRY(process().address_space().allocate_region(range, "Thread-specific", PROT_READ | PROT_WRITE));
auto* region = TRY(process().address_space().allocate_region(range.value(), "Thread-specific", PROT_READ | PROT_WRITE));
m_thread_specific_range = range.value();
m_thread_specific_range = range;
SmapDisabler disabler;
auto* thread_specific_data = (ThreadSpecificData*)region->vaddr().offset(align_up_to(process().m_master_tls_size, thread_specific_region_alignment())).as_ptr();