mirror of
https://github.com/RGBCube/serenity
synced 2025-07-28 02:47:34 +00:00
Kernel: Rename Range => VirtualRange
...and also RangeAllocator => VirtualRangeAllocator. This clarifies that the ranges we're dealing with are *virtual* memory ranges and not anything else.
This commit is contained in:
parent
93d98d4976
commit
cd5faf4e42
39 changed files with 207 additions and 207 deletions
|
@ -154,12 +154,12 @@ static KResultOr<FlatPtr> make_userspace_context_for_main_thread([[maybe_unused]
|
|||
return new_sp;
|
||||
}
|
||||
|
||||
struct RequiredLoadRange {
|
||||
struct RequiredLoadVirtualRange {
|
||||
FlatPtr start { 0 };
|
||||
FlatPtr end { 0 };
|
||||
};
|
||||
|
||||
static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& program_description)
|
||||
static KResultOr<RequiredLoadVirtualRange> get_required_load_range(FileDescription& program_description)
|
||||
{
|
||||
auto& inode = *(program_description.inode());
|
||||
auto vmobject = Memory::SharedInodeVMObject::try_create_with_inode(inode);
|
||||
|
@ -181,7 +181,7 @@ static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& pro
|
|||
return EINVAL;
|
||||
}
|
||||
|
||||
RequiredLoadRange range {};
|
||||
RequiredLoadVirtualRange range {};
|
||||
elf_image.for_each_program_header([&range](const auto& pheader) {
|
||||
if (pheader.type() != PT_LOAD)
|
||||
return;
|
||||
|
@ -221,7 +221,7 @@ static KResultOr<FlatPtr> get_load_offset(const ElfW(Ehdr) & main_program_header
|
|||
|
||||
auto main_program_load_range = main_program_load_range_result.value();
|
||||
|
||||
RequiredLoadRange selected_range {};
|
||||
RequiredLoadVirtualRange selected_range {};
|
||||
|
||||
if (interpreter_description) {
|
||||
auto interpreter_load_range_result = get_required_load_range(*interpreter_description);
|
||||
|
@ -235,8 +235,8 @@ static KResultOr<FlatPtr> get_load_offset(const ElfW(Ehdr) & main_program_header
|
|||
if (main_program_load_range.end < load_range_start || main_program_load_range.start > interpreter_load_range_end)
|
||||
return random_load_offset_in_range(load_range_start, load_range_size);
|
||||
|
||||
RequiredLoadRange first_available_part = { load_range_start, main_program_load_range.start };
|
||||
RequiredLoadRange second_available_part = { main_program_load_range.end, interpreter_load_range_end };
|
||||
RequiredLoadVirtualRange first_available_part = { load_range_start, main_program_load_range.start };
|
||||
RequiredLoadVirtualRange second_available_part = { main_program_load_range.end, interpreter_load_range_end };
|
||||
|
||||
// Select larger part
|
||||
if (first_available_part.end - first_available_part.start > second_available_part.end - second_available_part.start)
|
||||
|
|
|
@ -129,7 +129,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
|||
// acquiring the queue lock
|
||||
RefPtr<Memory::VMObject> vmobject, vmobject2;
|
||||
if (!is_private) {
|
||||
auto region = space().find_region_containing(Memory::Range { VirtualAddress { user_address_or_offset }, sizeof(u32) });
|
||||
auto region = space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset }, sizeof(u32) });
|
||||
if (!region)
|
||||
return EFAULT;
|
||||
vmobject = region->vmobject();
|
||||
|
@ -139,7 +139,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
|
|||
case FUTEX_REQUEUE:
|
||||
case FUTEX_CMP_REQUEUE:
|
||||
case FUTEX_WAKE_OP: {
|
||||
auto region2 = space().find_region_containing(Memory::Range { VirtualAddress { user_address_or_offset2 }, sizeof(u32) });
|
||||
auto region2 = space().find_region_containing(Memory::VirtualRange { VirtualAddress { user_address_or_offset2 }, sizeof(u32) });
|
||||
if (!region2)
|
||||
return EFAULT;
|
||||
vmobject2 = region2->vmobject();
|
||||
|
|
|
@ -14,7 +14,7 @@ KResultOr<FlatPtr> Process::sys$get_stack_bounds(Userspace<FlatPtr*> user_stack_
|
|||
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
|
||||
auto& regs = Thread::current()->get_register_dump_from_stack();
|
||||
FlatPtr stack_pointer = regs.userspace_sp();
|
||||
auto* stack_region = space().find_region_containing(Memory::Range { VirtualAddress(stack_pointer), 1 });
|
||||
auto* stack_region = space().find_region_containing(Memory::VirtualRange { VirtualAddress(stack_pointer), 1 });
|
||||
|
||||
// The syscall handler should have killed us if we had an invalid stack pointer.
|
||||
VERIFY(stack_region);
|
||||
|
|
|
@ -199,7 +199,7 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
|
|||
return EINVAL;
|
||||
|
||||
Memory::Region* region = nullptr;
|
||||
Optional<Memory::Range> range;
|
||||
Optional<Memory::VirtualRange> range;
|
||||
|
||||
if (map_randomized) {
|
||||
range = space().page_directory().range_allocator().allocate_randomized(Memory::page_round_up(size), alignment);
|
||||
|
@ -272,7 +272,7 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
|
|||
return region->vaddr().get();
|
||||
}
|
||||
|
||||
static KResultOr<Memory::Range> expand_range_to_page_boundaries(FlatPtr address, size_t size)
|
||||
static KResultOr<Memory::VirtualRange> expand_range_to_page_boundaries(FlatPtr address, size_t size)
|
||||
{
|
||||
if (Memory::page_round_up_would_wrap(size))
|
||||
return EINVAL;
|
||||
|
@ -286,7 +286,7 @@ static KResultOr<Memory::Range> expand_range_to_page_boundaries(FlatPtr address,
|
|||
auto base = VirtualAddress { address }.page_base();
|
||||
auto end = Memory::page_round_up(address + size);
|
||||
|
||||
return Memory::Range { base, end - base.get() };
|
||||
return Memory::VirtualRange { base, end - base.get() };
|
||||
}
|
||||
|
||||
KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int prot)
|
||||
|
@ -346,7 +346,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
|||
auto region = space().take_region(*old_region);
|
||||
|
||||
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||
|
||||
// This vector is the region(s) adjacent to our range.
|
||||
// We need to allocate a new region for the range we wanted to change permission bits on.
|
||||
|
@ -409,7 +409,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
|
|||
auto region = space().take_region(*old_region);
|
||||
|
||||
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
|
||||
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||
|
||||
// This vector is the region(s) adjacent to our range.
|
||||
// We need to allocate a new region for the range we wanted to change permission bits on.
|
||||
|
@ -566,7 +566,7 @@ KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params
|
|||
auto old_name = old_region->take_name();
|
||||
|
||||
// Unmap without deallocating the VM range since we're going to reuse it.
|
||||
old_region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No);
|
||||
old_region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
|
||||
space().deallocate_region(*old_region);
|
||||
|
||||
auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false);
|
||||
|
@ -657,7 +657,7 @@ KResultOr<FlatPtr> Process::sys$msyscall(Userspace<void*> address)
|
|||
if (!Memory::is_user_address(VirtualAddress { address }))
|
||||
return EFAULT;
|
||||
|
||||
auto* region = space().find_region_containing(Memory::Range { VirtualAddress { address }, 1 });
|
||||
auto* region = space().find_region_containing(Memory::VirtualRange { VirtualAddress { address }, 1 });
|
||||
if (!region)
|
||||
return EINVAL;
|
||||
|
||||
|
|
|
@ -194,7 +194,7 @@ KResultOr<u32> Process::peek_user_data(Userspace<const u32*> address)
|
|||
|
||||
KResult Process::poke_user_data(Userspace<u32*> address, u32 data)
|
||||
{
|
||||
Memory::Range range = { VirtualAddress(address), sizeof(u32) };
|
||||
Memory::VirtualRange range = { VirtualAddress(address), sizeof(u32) };
|
||||
auto* region = space().find_region_containing(range);
|
||||
if (!region)
|
||||
return EFAULT;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue