mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 06:17:34 +00:00
Kernel: Make VirtualRangeAllocator return KResultOr<VirtualRange>
This achieves two things: - The allocator can report more specific errors - Callers can (and now do) use TRY() :^)
This commit is contained in:
parent
21f7932ae2
commit
f4a9a0d561
9 changed files with 82 additions and 95 deletions
|
@ -131,13 +131,13 @@ KResult AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
|
|||
return KSuccess;
|
||||
}
|
||||
|
||||
Optional<VirtualRange> AddressSpace::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
|
||||
KResultOr<VirtualRange> AddressSpace::try_allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
|
||||
{
|
||||
vaddr.mask(PAGE_MASK);
|
||||
size = page_round_up(size);
|
||||
if (vaddr.is_null())
|
||||
return page_directory().range_allocator().allocate_anywhere(size, alignment);
|
||||
return page_directory().range_allocator().allocate_specific(vaddr, size);
|
||||
return page_directory().range_allocator().try_allocate_anywhere(size, alignment);
|
||||
return page_directory().range_allocator().try_allocate_specific(vaddr, size);
|
||||
}
|
||||
|
||||
KResultOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)
|
||||
|
|
|
@ -35,7 +35,7 @@ public:
|
|||
|
||||
KResult unmap_mmap_range(VirtualAddress, size_t);
|
||||
|
||||
Optional<VirtualRange> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
|
||||
KResultOr<VirtualRange> try_allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
|
||||
|
||||
KResultOr<Region*> allocate_region_with_vmobject(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
|
||||
KResultOr<Region*> allocate_region(VirtualRange const&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
|
||||
|
|
|
@ -403,11 +403,12 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
|||
m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory();
|
||||
|
||||
// Allocate a virtual address range for our array
|
||||
auto range = m_kernel_page_directory->range_allocator().allocate_anywhere(physical_page_array_pages * PAGE_SIZE);
|
||||
if (!range.has_value()) {
|
||||
auto range_or_error = m_kernel_page_directory->range_allocator().try_allocate_anywhere(physical_page_array_pages * PAGE_SIZE);
|
||||
if (range_or_error.is_error()) {
|
||||
dmesgln("MM: Could not allocate {} bytes to map physical page array!", physical_page_array_pages * PAGE_SIZE);
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
auto range = range_or_error.release_value();
|
||||
|
||||
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
|
||||
// try to map the entire region into kernel space so we always have it
|
||||
|
@ -419,7 +420,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
|||
auto page_tables_base = m_physical_pages_region->lower();
|
||||
auto physical_page_array_base = page_tables_base.offset(needed_page_table_count * PAGE_SIZE);
|
||||
auto physical_page_array_current_page = physical_page_array_base.get();
|
||||
auto virtual_page_array_base = range.value().base().get();
|
||||
auto virtual_page_array_base = range.base().get();
|
||||
auto virtual_page_array_current_page = virtual_page_array_base;
|
||||
for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
|
||||
auto virtual_page_base_for_this_pt = virtual_page_array_current_page;
|
||||
|
@ -461,7 +462,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
|||
}
|
||||
|
||||
// We now have the entire PhysicalPageEntry array mapped!
|
||||
m_physical_page_entries = (PhysicalPageEntry*)range.value().base().get();
|
||||
m_physical_page_entries = (PhysicalPageEntry*)range.base().get();
|
||||
for (size_t i = 0; i < m_physical_page_entries_count; i++)
|
||||
new (&m_physical_page_entries[i]) PageTableEntry();
|
||||
|
||||
|
@ -474,7 +475,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
|||
auto& kernel_page_tables = kernel_page_directory().m_page_tables;
|
||||
virtual_page_array_current_page = virtual_page_array_base;
|
||||
for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
|
||||
VERIFY(virtual_page_array_current_page <= range.value().end().get());
|
||||
VERIFY(virtual_page_array_current_page <= range.end().get());
|
||||
auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
|
||||
auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
|
||||
auto& physical_page_entry = m_physical_page_entries[physical_page_index];
|
||||
|
@ -485,7 +486,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
|||
virtual_page_array_current_page += (PAGE_SIZE / sizeof(PageTableEntry)) * PAGE_SIZE;
|
||||
}
|
||||
|
||||
dmesgln("MM: Physical page entries: {}", range.value());
|
||||
dmesgln("MM: Physical page entries: {}", range);
|
||||
}
|
||||
|
||||
PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physical_address)
|
||||
|
@ -703,16 +704,17 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, Str
|
|||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
SpinlockLocker lock(kernel_page_directory().get_lock());
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size);
|
||||
if (range_or_error.is_error())
|
||||
return {};
|
||||
auto range = range_or_error.release_value();
|
||||
auto maybe_vmobject = AnonymousVMObject::try_create_physically_contiguous_with_size(size);
|
||||
if (maybe_vmobject.is_error()) {
|
||||
kernel_page_directory().range_allocator().deallocate(range.value());
|
||||
kernel_page_directory().range_allocator().deallocate(range);
|
||||
// FIXME: Would be nice to be able to return a KResultOr from here.
|
||||
return {};
|
||||
}
|
||||
return allocate_kernel_region_with_vmobject(range.value(), maybe_vmobject.release_value(), name, access, cacheable);
|
||||
return allocate_kernel_region_with_vmobject(range, maybe_vmobject.release_value(), name, access, cacheable);
|
||||
}
|
||||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
|
||||
|
@ -722,10 +724,11 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView nam
|
|||
if (maybe_vm_object.is_error())
|
||||
return {};
|
||||
SpinlockLocker lock(kernel_page_directory().get_lock());
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size);
|
||||
if (range_or_error.is_error())
|
||||
return {};
|
||||
return allocate_kernel_region_with_vmobject(range.value(), maybe_vm_object.release_value(), name, access, cacheable);
|
||||
auto range = range_or_error.release_value();
|
||||
return allocate_kernel_region_with_vmobject(range, maybe_vm_object.release_value(), name, access, cacheable);
|
||||
}
|
||||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
|
@ -735,10 +738,11 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
|
|||
return {};
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
SpinlockLocker lock(kernel_page_directory().get_lock());
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size);
|
||||
if (range_or_error.is_error())
|
||||
return {};
|
||||
return allocate_kernel_region_with_vmobject(range.value(), maybe_vm_object.release_value(), name, access, cacheable);
|
||||
auto range = range_or_error.release_value();
|
||||
return allocate_kernel_region_with_vmobject(range, maybe_vm_object.release_value(), name, access, cacheable);
|
||||
}
|
||||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
|
@ -757,10 +761,11 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
|
|||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
SpinlockLocker lock(kernel_page_directory().get_lock());
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
auto range_or_error = kernel_page_directory().range_allocator().try_allocate_anywhere(size);
|
||||
if (range_or_error.is_error())
|
||||
return {};
|
||||
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, cacheable);
|
||||
auto range = range_or_error.release_value();
|
||||
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, cacheable);
|
||||
}
|
||||
|
||||
KResultOr<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
|
||||
|
|
|
@ -56,10 +56,10 @@ void VirtualRangeAllocator::carve_at_iterator(auto& it, VirtualRange const& rang
|
|||
}
|
||||
}
|
||||
|
||||
Optional<VirtualRange> VirtualRangeAllocator::allocate_randomized(size_t size, size_t alignment)
|
||||
KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_randomized(size_t size, size_t alignment)
|
||||
{
|
||||
if (!size)
|
||||
return {};
|
||||
return EINVAL;
|
||||
|
||||
VERIFY((size % PAGE_SIZE) == 0);
|
||||
VERIFY((alignment % PAGE_SIZE) == 0);
|
||||
|
@ -72,18 +72,18 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_randomized(size_t size, s
|
|||
if (!m_total_range.contains(random_address, size))
|
||||
continue;
|
||||
|
||||
auto range = allocate_specific(random_address, size);
|
||||
if (range.has_value())
|
||||
return range;
|
||||
auto range_or_error = try_allocate_specific(random_address, size);
|
||||
if (!range_or_error.is_error())
|
||||
return range_or_error.release_value();
|
||||
}
|
||||
|
||||
return allocate_anywhere(size, alignment);
|
||||
return try_allocate_anywhere(size, alignment);
|
||||
}
|
||||
|
||||
Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
||||
KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_anywhere(size_t size, size_t alignment)
|
||||
{
|
||||
if (!size)
|
||||
return {};
|
||||
return EINVAL;
|
||||
|
||||
VERIFY((size % PAGE_SIZE) == 0);
|
||||
VERIFY((alignment % PAGE_SIZE) == 0);
|
||||
|
@ -91,7 +91,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, siz
|
|||
#ifdef VM_GUARD_PAGES
|
||||
// NOTE: We pad VM allocations with a guard page on each side.
|
||||
if (Checked<size_t>::addition_would_overflow(size, PAGE_SIZE * 2))
|
||||
return {};
|
||||
return EOVERFLOW;
|
||||
|
||||
size_t effective_size = size + PAGE_SIZE * 2;
|
||||
size_t offset_from_effective_base = PAGE_SIZE;
|
||||
|
@ -101,7 +101,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, siz
|
|||
#endif
|
||||
|
||||
if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
|
||||
return {};
|
||||
return EOVERFLOW;
|
||||
|
||||
SpinlockLocker lock(m_lock);
|
||||
|
||||
|
@ -126,21 +126,20 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, siz
|
|||
return allocated_range;
|
||||
}
|
||||
dmesgln("VirtualRangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
|
||||
return {};
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress base, size_t size)
|
||||
KResultOr<VirtualRange> VirtualRangeAllocator::try_allocate_specific(VirtualAddress base, size_t size)
|
||||
{
|
||||
if (!size)
|
||||
return {};
|
||||
return EINVAL;
|
||||
|
||||
VERIFY(base.is_page_aligned());
|
||||
VERIFY((size % PAGE_SIZE) == 0);
|
||||
|
||||
VirtualRange const allocated_range(base, size);
|
||||
if (!m_total_range.contains(allocated_range)) {
|
||||
return {};
|
||||
}
|
||||
if (!m_total_range.contains(allocated_range))
|
||||
return ENOMEM;
|
||||
|
||||
SpinlockLocker lock(m_lock);
|
||||
for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
|
||||
|
@ -154,7 +153,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress b
|
|||
carve_at_iterator(it, allocated_range);
|
||||
return allocated_range;
|
||||
}
|
||||
return {};
|
||||
return ENOMEM;
|
||||
}
|
||||
|
||||
void VirtualRangeAllocator::deallocate(VirtualRange const& range)
|
||||
|
|
|
@ -21,9 +21,9 @@ public:
|
|||
void initialize_with_range(VirtualAddress, size_t);
|
||||
void initialize_from_parent(VirtualRangeAllocator const&);
|
||||
|
||||
Optional<VirtualRange> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
|
||||
Optional<VirtualRange> allocate_specific(VirtualAddress, size_t);
|
||||
Optional<VirtualRange> allocate_randomized(size_t, size_t alignment);
|
||||
KResultOr<VirtualRange> try_allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
|
||||
KResultOr<VirtualRange> try_allocate_specific(VirtualAddress, size_t);
|
||||
KResultOr<VirtualRange> try_allocate_randomized(size_t, size_t alignment);
|
||||
void deallocate(VirtualRange const&);
|
||||
|
||||
void dump() const;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue