mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 02:48:11 +00:00
Kernel: Remove Range "valid" state and use Optional<Range> instead
It's easier to understand VM ranges if they are always valid. We can simply use an empty Optional<Range> to encode absence when needed.
This commit is contained in:
parent
67bc5e0bbd
commit
e67402c702
8 changed files with 39 additions and 37 deletions
|
@ -116,7 +116,7 @@ bool Process::in_group(gid_t gid) const
|
||||||
return m_gid == gid || m_extra_gids.contains_slow(gid);
|
return m_gid == gid || m_extra_gids.contains_slow(gid);
|
||||||
}
|
}
|
||||||
|
|
||||||
Range Process::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
|
Optional<Range> Process::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
|
||||||
{
|
{
|
||||||
vaddr.mask(PAGE_MASK);
|
vaddr.mask(PAGE_MASK);
|
||||||
size = PAGE_ROUND_UP(size);
|
size = PAGE_ROUND_UP(size);
|
||||||
|
@ -195,7 +195,7 @@ bool Process::deallocate_region(Region& region)
|
||||||
Region* Process::find_region_from_range(const Range& range)
|
Region* Process::find_region_from_range(const Range& range)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
if (m_region_lookup_cache.range == range && m_region_lookup_cache.region)
|
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
|
||||||
return m_region_lookup_cache.region.unsafe_ptr();
|
return m_region_lookup_cache.region.unsafe_ptr();
|
||||||
|
|
||||||
size_t size = PAGE_ROUND_UP(range.size());
|
size_t size = PAGE_ROUND_UP(range.size());
|
||||||
|
|
|
@ -512,7 +512,7 @@ private:
|
||||||
Process(RefPtr<Thread>& first_thread, const String& name, uid_t, gid_t, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
|
Process(RefPtr<Thread>& first_thread, const String& name, uid_t, gid_t, ProcessID ppid, bool is_kernel_process, RefPtr<Custody> cwd = nullptr, RefPtr<Custody> executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr);
|
||||||
static ProcessID allocate_pid();
|
static ProcessID allocate_pid();
|
||||||
|
|
||||||
Range allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
|
Optional<Range> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
|
||||||
|
|
||||||
Region& add_region(NonnullOwnPtr<Region>);
|
Region& add_region(NonnullOwnPtr<Region>);
|
||||||
|
|
||||||
|
@ -614,7 +614,7 @@ private:
|
||||||
|
|
||||||
NonnullOwnPtrVector<Region> m_regions;
|
NonnullOwnPtrVector<Region> m_regions;
|
||||||
struct RegionLookupCache {
|
struct RegionLookupCache {
|
||||||
Range range;
|
Optional<Range> range;
|
||||||
WeakPtr<Region> region;
|
WeakPtr<Region> region;
|
||||||
};
|
};
|
||||||
RegionLookupCache m_region_lookup_cache;
|
RegionLookupCache m_region_lookup_cache;
|
||||||
|
|
|
@ -185,12 +185,12 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
|
||||||
}
|
}
|
||||||
|
|
||||||
auto range = allocate_range({}, program_header.size_in_memory());
|
auto range = allocate_range({}, program_header.size_in_memory());
|
||||||
if (!range.is_valid()) {
|
if (!range.has_value()) {
|
||||||
ph_load_result = ENOMEM;
|
ph_load_result = ENOMEM;
|
||||||
return IterationDecision::Break;
|
return IterationDecision::Break;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto region_or_error = allocate_region(range, String::formatted("{} (master-tls)", elf_name), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
|
auto region_or_error = allocate_region(range.value(), String::formatted("{} (master-tls)", elf_name), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
|
||||||
if (region_or_error.is_error()) {
|
if (region_or_error.is_error()) {
|
||||||
ph_load_result = region_or_error.error();
|
ph_load_result = region_or_error.error();
|
||||||
return IterationDecision::Break;
|
return IterationDecision::Break;
|
||||||
|
@ -226,11 +226,11 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
|
||||||
prot |= PROT_WRITE;
|
prot |= PROT_WRITE;
|
||||||
auto region_name = String::formatted("{} (data-{}{})", elf_name, program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : "");
|
auto region_name = String::formatted("{} (data-{}{})", elf_name, program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : "");
|
||||||
auto range = allocate_range(program_header.vaddr().offset(load_offset), program_header.size_in_memory());
|
auto range = allocate_range(program_header.vaddr().offset(load_offset), program_header.size_in_memory());
|
||||||
if (!range.is_valid()) {
|
if (!range.has_value()) {
|
||||||
ph_load_result = ENOMEM;
|
ph_load_result = ENOMEM;
|
||||||
return IterationDecision::Break;
|
return IterationDecision::Break;
|
||||||
}
|
}
|
||||||
auto region_or_error = allocate_region(range, region_name, prot, AllocationStrategy::Reserve);
|
auto region_or_error = allocate_region(range.value(), region_name, prot, AllocationStrategy::Reserve);
|
||||||
if (region_or_error.is_error()) {
|
if (region_or_error.is_error()) {
|
||||||
ph_load_result = region_or_error.error();
|
ph_load_result = region_or_error.error();
|
||||||
return IterationDecision::Break;
|
return IterationDecision::Break;
|
||||||
|
@ -263,11 +263,11 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
|
||||||
if (program_header.is_executable())
|
if (program_header.is_executable())
|
||||||
prot |= PROT_EXEC;
|
prot |= PROT_EXEC;
|
||||||
auto range = allocate_range(program_header.vaddr().offset(load_offset), program_header.size_in_memory());
|
auto range = allocate_range(program_header.vaddr().offset(load_offset), program_header.size_in_memory());
|
||||||
if (!range.is_valid()) {
|
if (!range.has_value()) {
|
||||||
ph_load_result = ENOMEM;
|
ph_load_result = ENOMEM;
|
||||||
return IterationDecision::Break;
|
return IterationDecision::Break;
|
||||||
}
|
}
|
||||||
auto region_or_error = allocate_region_with_vmobject(range, *vmobject, program_header.offset(), elf_name, prot, true);
|
auto region_or_error = allocate_region_with_vmobject(range.value(), *vmobject, program_header.offset(), elf_name, prot, true);
|
||||||
if (region_or_error.is_error()) {
|
if (region_or_error.is_error()) {
|
||||||
ph_load_result = region_or_error.error();
|
ph_load_result = region_or_error.error();
|
||||||
return IterationDecision::Break;
|
return IterationDecision::Break;
|
||||||
|
@ -288,12 +288,12 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
|
||||||
}
|
}
|
||||||
|
|
||||||
auto stack_range = allocate_range({}, Thread::default_userspace_stack_size);
|
auto stack_range = allocate_range({}, Thread::default_userspace_stack_size);
|
||||||
if (!stack_range.is_valid()) {
|
if (!stack_range.has_value()) {
|
||||||
dbgln("do_exec: Failed to allocate VM range for stack");
|
dbgln("do_exec: Failed to allocate VM range for stack");
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto stack_region_or_error = allocate_region(stack_range, "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
|
auto stack_region_or_error = allocate_region(stack_range.value(), "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
|
||||||
if (stack_region_or_error.is_error())
|
if (stack_region_or_error.is_error())
|
||||||
return stack_region_or_error.error();
|
return stack_region_or_error.error();
|
||||||
auto& stack_region = *stack_region_or_error.value();
|
auto& stack_region = *stack_region_or_error.value();
|
||||||
|
|
|
@ -134,17 +134,18 @@ void* Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
|
||||||
|
|
||||||
Region* region = nullptr;
|
Region* region = nullptr;
|
||||||
auto range = allocate_range(VirtualAddress(addr), size, alignment);
|
auto range = allocate_range(VirtualAddress(addr), size, alignment);
|
||||||
if (!range.is_valid()) {
|
if (!range.has_value()) {
|
||||||
if (addr && !map_fixed) {
|
if (addr && !map_fixed) {
|
||||||
// If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
|
// If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
|
||||||
range = allocate_range({}, size, alignment);
|
range = allocate_range({}, size, alignment);
|
||||||
}
|
}
|
||||||
return (void*)-ENOMEM;
|
if (!range.has_value())
|
||||||
|
return (void*)-ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (map_anonymous) {
|
if (map_anonymous) {
|
||||||
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
|
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
|
||||||
auto region_or_error = allocate_region(range, !name.is_null() ? name : "mmap", prot, strategy);
|
auto region_or_error = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, strategy);
|
||||||
if (region_or_error.is_error())
|
if (region_or_error.is_error())
|
||||||
return (void*)region_or_error.error().error();
|
return (void*)region_or_error.error().error();
|
||||||
region = region_or_error.value();
|
region = region_or_error.value();
|
||||||
|
@ -170,7 +171,7 @@ void* Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
|
||||||
return (void*)-EACCES;
|
return (void*)-EACCES;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto region_or_error = description->mmap(*this, range, static_cast<size_t>(offset), prot, map_shared);
|
auto region_or_error = description->mmap(*this, range.value(), static_cast<size_t>(offset), prot, map_shared);
|
||||||
if (region_or_error.is_error())
|
if (region_or_error.is_error())
|
||||||
return (void*)region_or_error.error().error();
|
return (void*)region_or_error.error().error();
|
||||||
region = region_or_error.value();
|
region = region_or_error.value();
|
||||||
|
@ -447,10 +448,10 @@ void* Process::sys$allocate_tls(size_t size)
|
||||||
ASSERT(main_thread);
|
ASSERT(main_thread);
|
||||||
|
|
||||||
auto range = allocate_range({}, size);
|
auto range = allocate_range({}, size);
|
||||||
if (!range.is_valid())
|
if (!range.has_value())
|
||||||
return (void*)-ENOMEM;
|
return (void*)-ENOMEM;
|
||||||
|
|
||||||
auto region_or_error = allocate_region(range, String(), PROT_READ | PROT_WRITE);
|
auto region_or_error = allocate_region(range.value(), String(), PROT_READ | PROT_WRITE);
|
||||||
if (region_or_error.is_error())
|
if (region_or_error.is_error())
|
||||||
return (void*)region_or_error.error().error();
|
return (void*)region_or_error.error().error();
|
||||||
|
|
||||||
|
|
|
@ -1059,10 +1059,10 @@ KResult Thread::make_thread_specific_region(Badge<Process>)
|
||||||
return KSuccess;
|
return KSuccess;
|
||||||
|
|
||||||
auto range = process().allocate_range({}, thread_specific_region_size());
|
auto range = process().allocate_range({}, thread_specific_region_size());
|
||||||
if (!range.is_valid())
|
if (!range.has_value())
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
|
|
||||||
auto region_or_error = process().allocate_region(range, "Thread-specific", PROT_READ | PROT_WRITE);
|
auto region_or_error = process().allocate_region(range.value(), "Thread-specific", PROT_READ | PROT_WRITE);
|
||||||
if (region_or_error.is_error())
|
if (region_or_error.is_error())
|
||||||
return region_or_error.error();
|
return region_or_error.error();
|
||||||
|
|
||||||
|
|
|
@ -399,10 +399,10 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, con
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||||
if (!range.is_valid())
|
if (!range.has_value())
|
||||||
return {};
|
return {};
|
||||||
auto vmobject = ContiguousVMObject::create_with_size(size);
|
auto vmobject = ContiguousVMObject::create_with_size(size);
|
||||||
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, AllocationStrategy strategy, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, AllocationStrategy strategy, bool cacheable)
|
||||||
|
@ -410,12 +410,12 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||||
if (!range.is_valid())
|
if (!range.has_value())
|
||||||
return {};
|
return {};
|
||||||
auto vmobject = AnonymousVMObject::create_with_size(size, strategy);
|
auto vmobject = AnonymousVMObject::create_with_size(size, strategy);
|
||||||
if (!vmobject)
|
if (!vmobject)
|
||||||
return {};
|
return {};
|
||||||
return allocate_kernel_region_with_vmobject(range, vmobject.release_nonnull(), name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), name, access, user_accessible, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
||||||
|
@ -423,12 +423,12 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||||
if (!range.is_valid())
|
if (!range.has_value())
|
||||||
return {};
|
return {};
|
||||||
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
|
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
|
||||||
if (!vmobject)
|
if (!vmobject)
|
||||||
return {};
|
return {};
|
||||||
return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
||||||
|
@ -436,12 +436,12 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size);
|
auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size);
|
||||||
if (!range.is_valid())
|
if (!range.has_value())
|
||||||
return {};
|
return {};
|
||||||
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
|
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
|
||||||
if (!vmobject)
|
if (!vmobject)
|
||||||
return {};
|
return {};
|
||||||
return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
|
||||||
|
@ -467,9 +467,9 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||||
if (!range.is_valid())
|
if (!range.has_value())
|
||||||
return {};
|
return {};
|
||||||
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemoryManager::commit_user_physical_pages(size_t page_count)
|
bool MemoryManager::commit_user_physical_pages(size_t page_count)
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
RangeAllocator::RangeAllocator()
|
RangeAllocator::RangeAllocator()
|
||||||
|
: m_total_range({}, 0)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,7 +106,7 @@ void RangeAllocator::carve_at_index(int index, const Range& range)
|
||||||
m_available_ranges.insert(index + 1, move(remaining_parts[1]));
|
m_available_ranges.insert(index + 1, move(remaining_parts[1]));
|
||||||
}
|
}
|
||||||
|
|
||||||
Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
||||||
{
|
{
|
||||||
if (!size)
|
if (!size)
|
||||||
return {};
|
return {};
|
||||||
|
@ -148,7 +149,7 @@ Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
|
Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
|
||||||
{
|
{
|
||||||
if (!size)
|
if (!size)
|
||||||
return {};
|
return {};
|
||||||
|
@ -178,7 +179,7 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
void RangeAllocator::deallocate(Range range)
|
void RangeAllocator::deallocate(const Range& range)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
ASSERT(m_total_range.contains(range));
|
ASSERT(m_total_range.contains(range));
|
||||||
|
|
|
@ -38,7 +38,7 @@ class Range {
|
||||||
friend class RangeAllocator;
|
friend class RangeAllocator;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
Range() { }
|
Range() = delete;
|
||||||
Range(VirtualAddress base, size_t size)
|
Range(VirtualAddress base, size_t size)
|
||||||
: m_base(base)
|
: m_base(base)
|
||||||
, m_size(size)
|
, m_size(size)
|
||||||
|
@ -85,9 +85,9 @@ public:
|
||||||
void initialize_with_range(VirtualAddress, size_t);
|
void initialize_with_range(VirtualAddress, size_t);
|
||||||
void initialize_from_parent(const RangeAllocator&);
|
void initialize_from_parent(const RangeAllocator&);
|
||||||
|
|
||||||
Range allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
|
Optional<Range> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
|
||||||
Range allocate_specific(VirtualAddress, size_t);
|
Optional<Range> allocate_specific(VirtualAddress, size_t);
|
||||||
void deallocate(Range);
|
void deallocate(const Range&);
|
||||||
|
|
||||||
void dump() const;
|
void dump() const;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue