1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 04:27:45 +00:00

Kernel: Remove Range "valid" state and use Optional<Range> instead

It's easier to understand VM ranges if they are always valid. We can
simply use an empty Optional<Range> to encode absence when needed.
This commit is contained in:
Andreas Kling 2021-01-27 21:01:45 +01:00
parent 67bc5e0bbd
commit e67402c702
8 changed files with 39 additions and 37 deletions

View file

@ -399,10 +399,10 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, con
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.is_valid())
if (!range.has_value())
return {};
auto vmobject = ContiguousVMObject::create_with_size(size);
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, AllocationStrategy strategy, bool cacheable)
@ -410,12 +410,12 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.is_valid())
if (!range.has_value())
return {};
auto vmobject = AnonymousVMObject::create_with_size(size, strategy);
if (!vmobject)
return {};
return allocate_kernel_region_with_vmobject(range, vmobject.release_nonnull(), name, access, user_accessible, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), name, access, user_accessible, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
@ -423,12 +423,12 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.is_valid())
if (!range.has_value())
return {};
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
if (!vmobject)
return {};
return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
@ -436,12 +436,12 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size);
if (!range.is_valid())
if (!range.has_value())
return {};
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
if (!vmobject)
return {};
return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
@ -467,9 +467,9 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.is_valid())
if (!range.has_value())
return {};
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable);
}
bool MemoryManager::commit_user_physical_pages(size_t page_count)

View file

@ -36,6 +36,7 @@
namespace Kernel {
RangeAllocator::RangeAllocator()
: m_total_range({}, 0)
{
}
@ -105,7 +106,7 @@ void RangeAllocator::carve_at_index(int index, const Range& range)
m_available_ranges.insert(index + 1, move(remaining_parts[1]));
}
Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
{
if (!size)
return {};
@ -148,7 +149,7 @@ Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
return {};
}
Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
{
if (!size)
return {};
@ -178,7 +179,7 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
return {};
}
void RangeAllocator::deallocate(Range range)
void RangeAllocator::deallocate(const Range& range)
{
ScopedSpinLock lock(m_lock);
ASSERT(m_total_range.contains(range));

View file

@ -38,7 +38,7 @@ class Range {
friend class RangeAllocator;
public:
Range() { }
Range() = delete;
Range(VirtualAddress base, size_t size)
: m_base(base)
, m_size(size)
@ -85,9 +85,9 @@ public:
void initialize_with_range(VirtualAddress, size_t);
void initialize_from_parent(const RangeAllocator&);
Range allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
Range allocate_specific(VirtualAddress, size_t);
void deallocate(Range);
Optional<Range> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
Optional<Range> allocate_specific(VirtualAddress, size_t);
void deallocate(const Range&);
void dump() const;