1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 02:47:35 +00:00

Kernel: Move Kernel/Memory/ code into Kernel::Memory namespace

This commit is contained in:
Andreas Kling 2021-08-06 13:49:36 +02:00
parent a1d7ebf85a
commit 93d98d4976
153 changed files with 473 additions and 467 deletions

View file

@ -29,7 +29,7 @@ KResultOr<FlatPtr> Process::sys$anon_create(size_t size, int options)
if (new_fd_or_error.is_error())
return new_fd_or_error.error();
auto new_fd = new_fd_or_error.release_value();
auto vmobject = AnonymousVMObject::try_create_purgeable_with_size(size, AllocationStrategy::Reserve);
auto vmobject = Memory::AnonymousVMObject::try_create_purgeable_with_size(size, AllocationStrategy::Reserve);
if (!vmobject)
return ENOMEM;

View file

@ -27,17 +27,17 @@
namespace Kernel {
extern Region* g_signal_trampoline_region;
extern Memory::Region* g_signal_trampoline_region;
struct LoadResult {
OwnPtr<Space> space;
OwnPtr<Memory::Space> space;
FlatPtr load_base { 0 };
FlatPtr entry_eip { 0 };
size_t size { 0 };
WeakPtr<Region> tls_region;
WeakPtr<Memory::Region> tls_region;
size_t tls_size { 0 };
size_t tls_alignment { 0 };
WeakPtr<Region> stack_region;
WeakPtr<Memory::Region> stack_region;
};
static Vector<ELF::AuxiliaryValue> generate_auxiliary_vector(FlatPtr load_base, FlatPtr entry_eip, uid_t uid, uid_t euid, gid_t gid, gid_t egid, String executable_path, int main_program_fd);
@ -68,7 +68,7 @@ static bool validate_stack_size(const Vector<String>& arguments, const Vector<St
return true;
}
static KResultOr<FlatPtr> make_userspace_context_for_main_thread([[maybe_unused]] ThreadRegisters& regs, Region& region, Vector<String> arguments,
static KResultOr<FlatPtr> make_userspace_context_for_main_thread([[maybe_unused]] ThreadRegisters& regs, Memory::Region& region, Vector<String> arguments,
Vector<String> environment, Vector<ELF::AuxiliaryValue> auxiliary_values)
{
FlatPtr new_sp = region.range().end().get();
@ -162,7 +162,7 @@ struct RequiredLoadRange {
static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& program_description)
{
auto& inode = *(program_description.inode());
auto vmobject = SharedInodeVMObject::try_create_with_inode(inode);
auto vmobject = Memory::SharedInodeVMObject::try_create_with_inode(inode);
if (!vmobject) {
dbgln("get_required_load_range: Unable to allocate SharedInodeVMObject");
return ENOMEM;
@ -170,7 +170,7 @@ static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& pro
size_t executable_size = inode.size();
auto region = MM.allocate_kernel_region_with_vmobject(*vmobject, page_round_up(executable_size), "ELF memory range calculation", Region::Access::Read);
auto region = MM.allocate_kernel_region_with_vmobject(*vmobject, Memory::page_round_up(executable_size), "ELF memory range calculation", Memory::Region::Access::Read);
if (!region) {
dbgln("Could not allocate memory for ELF");
return ENOMEM;
@ -205,7 +205,7 @@ static KResultOr<FlatPtr> get_load_offset(const ElfW(Ehdr) & main_program_header
constexpr FlatPtr minimum_load_offset_randomization_size = 10 * MiB;
auto random_load_offset_in_range([](auto start, auto size) {
return page_round_down(start + get_good_random<FlatPtr>() % size);
return Memory::page_round_down(start + get_good_random<FlatPtr>() % size);
});
if (main_program_header.e_type == ET_DYN) {
@ -263,11 +263,11 @@ enum class ShouldAllowSyscalls {
Yes,
};
static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, FileDescription& object_description,
static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::Space> new_space, FileDescription& object_description,
FlatPtr load_offset, ShouldAllocateTls should_allocate_tls, ShouldAllowSyscalls should_allow_syscalls)
{
auto& inode = *(object_description.inode());
auto vmobject = SharedInodeVMObject::try_create_with_inode(inode);
auto vmobject = Memory::SharedInodeVMObject::try_create_with_inode(inode);
if (!vmobject) {
dbgln("load_elf_object: Unable to allocate SharedInodeVMObject");
return ENOMEM;
@ -280,7 +280,7 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
size_t executable_size = inode.size();
auto executable_region = MM.allocate_kernel_region_with_vmobject(*vmobject, page_round_up(executable_size), "ELF loading", Region::Access::Read);
auto executable_region = MM.allocate_kernel_region_with_vmobject(*vmobject, Memory::page_round_up(executable_size), "ELF loading", Memory::Region::Access::Read);
if (!executable_region) {
dbgln("Could not allocate memory for ELF loading");
return ENOMEM;
@ -291,7 +291,7 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
if (!elf_image.is_valid())
return ENOEXEC;
Region* master_tls_region { nullptr };
Memory::Region* master_tls_region { nullptr };
size_t master_tls_size = 0;
size_t master_tls_alignment = 0;
FlatPtr load_base_address = 0;
@ -299,7 +299,7 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
String elf_name = object_description.absolute_path();
VERIFY(!Processor::current().in_critical());
MemoryManager::enter_space(*new_space);
Memory::MemoryManager::enter_space(*new_space);
KResult ph_load_result = KSuccess;
elf_image.for_each_program_header([&](const ELF::Image::ProgramHeader& program_header) {
@ -356,8 +356,8 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
prot |= PROT_WRITE;
auto region_name = String::formatted("{} (data-{}{})", elf_name, program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : "");
auto range_base = VirtualAddress { page_round_down(program_header.vaddr().offset(load_offset).get()) };
auto range_end = VirtualAddress { page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()) };
auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
auto range_end = VirtualAddress { Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()) };
auto range = new_space->allocate_range(range_base, range_end.get() - range_base.get());
if (!range.has_value()) {
@ -397,8 +397,8 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
if (program_header.is_executable())
prot |= PROT_EXEC;
auto range_base = VirtualAddress { page_round_down(program_header.vaddr().offset(load_offset).get()) };
auto range_end = VirtualAddress { page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()) };
auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
auto range_end = VirtualAddress { Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()) };
auto range = new_space->allocate_range(range_base, range_end.get() - range_base.get());
if (!range.has_value()) {
ph_load_result = ENOMEM;
@ -453,12 +453,12 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
KResultOr<LoadResult> Process::load(NonnullRefPtr<FileDescription> main_program_description,
RefPtr<FileDescription> interpreter_description, const ElfW(Ehdr) & main_program_header)
{
auto new_space = Space::try_create(*this, nullptr);
auto new_space = Memory::Space::try_create(*this, nullptr);
if (!new_space)
return ENOMEM;
ScopeGuard space_guard([&]() {
MemoryManager::enter_process_paging_scope(*this);
Memory::MemoryManager::enter_process_paging_scope(*this);
});
auto load_offset = get_load_offset(main_program_header, main_program_description, interpreter_description);
@ -560,7 +560,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
TemporaryChange global_profiling_disabler(g_profiling_all_threads, false);
m_space = load_result.space.release_nonnull();
}
MemoryManager::enter_space(*m_space);
Memory::MemoryManager::enter_space(*m_space);
auto signal_trampoline_region = m_space->allocate_region_with_vmobject(signal_trampoline_range.value(), g_signal_trampoline_region->vmobject(), 0, "Signal trampoline", PROT_READ | PROT_EXEC, true);
if (signal_trampoline_region.is_error()) {

View file

@ -108,7 +108,7 @@ KResultOr<FlatPtr> Process::sys$fork(RegisterState& regs)
// TODO: tear down new process?
return ENOMEM;
}
child_region->map(child->space().page_directory(), ShouldFlushTLB::No);
child_region->map(child->space().page_directory(), Memory::ShouldFlushTLB::No);
if (region == m_master_tls_region.unsafe_ptr())
child->m_master_tls_region = child_region;

View file

@ -12,9 +12,9 @@
namespace Kernel {
static SpinLock<u8> g_global_futex_lock;
static AK::Singleton<HashMap<VMObject*, FutexQueues>> g_global_futex_queues;
static AK::Singleton<HashMap<Memory::VMObject*, FutexQueues>> g_global_futex_queues;
FutexQueue::FutexQueue(FlatPtr user_address_or_offset, VMObject* vmobject)
FutexQueue::FutexQueue(FlatPtr user_address_or_offset, Memory::VMObject* vmobject)
: m_user_address_or_offset(user_address_or_offset)
, m_is_global(vmobject != nullptr)
{
@ -40,7 +40,7 @@ FutexQueue::~FutexQueue()
m_is_global ? " (global)" : " (local)");
}
void FutexQueue::vmobject_deleted(VMObject& vmobject)
void FutexQueue::vmobject_deleted(Memory::VMObject& vmobject)
{
VERIFY(m_is_global); // If we got called we must be a global futex
// Because we're taking ourselves out of the global queue, we need
@ -127,9 +127,9 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
// If this is a global lock, look up the underlying VMObject *before*
// acquiring the queue lock
RefPtr<VMObject> vmobject, vmobject2;
RefPtr<Memory::VMObject> vmobject, vmobject2;
if (!is_private) {
auto region = space().find_region_containing(Range { VirtualAddress { user_address_or_offset }, sizeof(u32) });
auto region = space().find_region_containing(Memory::Range { VirtualAddress { user_address_or_offset }, sizeof(u32) });
if (!region)
return EFAULT;
vmobject = region->vmobject();
@ -139,7 +139,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
case FUTEX_REQUEUE:
case FUTEX_CMP_REQUEUE:
case FUTEX_WAKE_OP: {
auto region2 = space().find_region_containing(Range { VirtualAddress { user_address_or_offset2 }, sizeof(u32) });
auto region2 = space().find_region_containing(Memory::Range { VirtualAddress { user_address_or_offset2 }, sizeof(u32) });
if (!region2)
return EFAULT;
vmobject2 = region2->vmobject();
@ -149,7 +149,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
}
}
auto find_global_futex_queues = [&](VMObject& vmobject, bool create_if_not_found) -> FutexQueues* {
auto find_global_futex_queues = [&](Memory::VMObject& vmobject, bool create_if_not_found) -> FutexQueues* {
auto& global_queues = *g_global_futex_queues;
auto it = global_queues.find(&vmobject);
if (it != global_queues.end())
@ -165,7 +165,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
return nullptr;
};
auto find_futex_queue = [&](VMObject* vmobject, FlatPtr user_address_or_offset, bool create_if_not_found, bool* did_create = nullptr) -> RefPtr<FutexQueue> {
auto find_futex_queue = [&](Memory::VMObject* vmobject, FlatPtr user_address_or_offset, bool create_if_not_found, bool* did_create = nullptr) -> RefPtr<FutexQueue> {
VERIFY(is_private || vmobject);
VERIFY(!create_if_not_found || did_create != nullptr);
auto* queues = is_private ? &m_futex_queues : find_global_futex_queues(*vmobject, create_if_not_found);
@ -184,7 +184,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
return {};
};
auto remove_futex_queue = [&](VMObject* vmobject, FlatPtr user_address_or_offset) {
auto remove_futex_queue = [&](Memory::VMObject* vmobject, FlatPtr user_address_or_offset) {
auto* queues = is_private ? &m_futex_queues : find_global_futex_queues(*vmobject, false);
if (queues) {
if (auto it = queues->find(user_address_or_offset); it != queues->end()) {
@ -198,7 +198,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
}
};
auto do_wake = [&](VMObject* vmobject, FlatPtr user_address_or_offset, u32 count, Optional<u32> bitmask) -> int {
auto do_wake = [&](Memory::VMObject* vmobject, FlatPtr user_address_or_offset, u32 count, Optional<u32> bitmask) -> int {
if (count == 0)
return 0;
ScopedSpinLock lock(queue_lock);

View file

@ -14,7 +14,7 @@ KResultOr<FlatPtr> Process::sys$get_stack_bounds(Userspace<FlatPtr*> user_stack_
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this);
auto& regs = Thread::current()->get_register_dump_from_stack();
FlatPtr stack_pointer = regs.userspace_sp();
auto* stack_region = space().find_region_containing(Range { VirtualAddress(stack_pointer), 1 });
auto* stack_region = space().find_region_containing(Memory::Range { VirtualAddress(stack_pointer), 1 });
// The syscall handler should have killed us if we had an invalid stack pointer.
VERIFY(stack_region);

View file

@ -23,7 +23,7 @@
namespace Kernel {
static bool should_make_executable_exception_for_dynamic_loader(bool make_readable, bool make_writable, bool make_executable, const Region& region)
static bool should_make_executable_exception_for_dynamic_loader(bool make_readable, bool make_writable, bool make_executable, Memory::Region const& region)
{
// Normally we don't allow W -> X transitions, but we have to make an exception
// for the dynamic loader, which needs to do this after performing text relocations.
@ -48,7 +48,7 @@ static bool should_make_executable_exception_for_dynamic_loader(bool make_readab
if (!region.vmobject().is_private_inode())
return false;
auto& inode_vm = static_cast<const InodeVMObject&>(region.vmobject());
auto& inode_vm = static_cast<Memory::InodeVMObject const&>(region.vmobject());
auto& inode = inode_vm.inode();
ElfW(Ehdr) header;
@ -69,7 +69,7 @@ static bool should_make_executable_exception_for_dynamic_loader(bool make_readab
return true;
}
static bool validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, const Region* region = nullptr)
static bool validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, Memory::Region const* region = nullptr)
{
bool make_readable = prot & PROT_READ;
bool make_writable = prot & PROT_WRITE;
@ -154,10 +154,10 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
if (alignment & ~PAGE_MASK)
return EINVAL;
if (page_round_up_would_wrap(size))
if (Memory::page_round_up_would_wrap(size))
return EINVAL;
if (!is_user_range(VirtualAddress(addr), page_round_up(size)))
if (!Memory::is_user_range(VirtualAddress(addr), Memory::page_round_up(size)))
return EFAULT;
OwnPtr<KString> name;
@ -198,11 +198,11 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
if (map_stack && (!map_private || !map_anonymous))
return EINVAL;
Region* region = nullptr;
Optional<Range> range;
Memory::Region* region = nullptr;
Optional<Memory::Range> range;
if (map_randomized) {
range = space().page_directory().range_allocator().allocate_randomized(page_round_up(size), alignment);
range = space().page_directory().range_allocator().allocate_randomized(Memory::page_round_up(size), alignment);
} else {
range = space().allocate_range(VirtualAddress(addr), size, alignment);
if (!range.has_value()) {
@ -218,11 +218,11 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
if (map_anonymous) {
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
RefPtr<AnonymousVMObject> vmobject;
RefPtr<Memory::AnonymousVMObject> vmobject;
if (flags & MAP_PURGEABLE)
vmobject = AnonymousVMObject::try_create_purgeable_with_size(page_round_up(size), strategy);
vmobject = Memory::AnonymousVMObject::try_create_purgeable_with_size(Memory::page_round_up(size), strategy);
else
vmobject = AnonymousVMObject::try_create_with_size(page_round_up(size), strategy);
vmobject = Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(size), strategy);
if (!vmobject)
return ENOMEM;
auto region_or_error = space().allocate_region_with_vmobject(range.value(), vmobject.release_nonnull(), 0, {}, prot, map_shared);
@ -272,21 +272,21 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
return region->vaddr().get();
}
static KResultOr<Range> expand_range_to_page_boundaries(FlatPtr address, size_t size)
static KResultOr<Memory::Range> expand_range_to_page_boundaries(FlatPtr address, size_t size)
{
if (page_round_up_would_wrap(size))
if (Memory::page_round_up_would_wrap(size))
return EINVAL;
if ((address + size) < address)
return EINVAL;
if (page_round_up_would_wrap(address + size))
if (Memory::page_round_up_would_wrap(address + size))
return EINVAL;
auto base = VirtualAddress { address }.page_base();
auto end = page_round_up(address + size);
auto end = Memory::page_round_up(address + size);
return Range { base, end - base.get() };
return Memory::Range { base, end - base.get() };
}
KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int prot)
@ -314,10 +314,10 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
return EPERM;
if (!validate_mmap_prot(prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region))
return EINVAL;
if (whole_region->access() == prot_to_region_access_flags(prot))
if (whole_region->access() == Memory::prot_to_region_access_flags(prot))
return 0;
if (whole_region->vmobject().is_inode()
&& !validate_inode_mmap_prot(*this, prot, static_cast<const InodeVMObject&>(whole_region->vmobject()).inode(), whole_region->is_shared())) {
&& !validate_inode_mmap_prot(*this, prot, static_cast<Memory::InodeVMObject const&>(whole_region->vmobject()).inode(), whole_region->is_shared())) {
return EACCES;
}
whole_region->set_readable(prot & PROT_READ);
@ -334,10 +334,10 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
return EPERM;
if (!validate_mmap_prot(prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region))
return EINVAL;
if (old_region->access() == prot_to_region_access_flags(prot))
if (old_region->access() == Memory::prot_to_region_access_flags(prot))
return 0;
if (old_region->vmobject().is_inode()
&& !validate_inode_mmap_prot(*this, prot, static_cast<const InodeVMObject&>(old_region->vmobject()).inode(), old_region->is_shared())) {
&& !validate_inode_mmap_prot(*this, prot, static_cast<Memory::InodeVMObject const&>(old_region->vmobject()).inode(), old_region->is_shared())) {
return EACCES;
}
@ -346,7 +346,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
auto region = space().take_region(*old_region);
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No);
// This vector is the region(s) adjacent to our range.
// We need to allocate a new region for the range we wanted to change permission bits on.
@ -380,10 +380,10 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
return EPERM;
if (!validate_mmap_prot(prot, region->is_stack(), region->vmobject().is_anonymous(), region))
return EINVAL;
if (region->access() == prot_to_region_access_flags(prot))
if (region->access() == Memory::prot_to_region_access_flags(prot))
return 0;
if (region->vmobject().is_inode()
&& !validate_inode_mmap_prot(*this, prot, static_cast<const InodeVMObject&>(region->vmobject()).inode(), region->is_shared())) {
&& !validate_inode_mmap_prot(*this, prot, static_cast<Memory::InodeVMObject const&>(region->vmobject()).inode(), region->is_shared())) {
return EACCES;
}
full_size_found += region->range().intersect(range_to_mprotect).size();
@ -409,7 +409,7 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
auto region = space().take_region(*old_region);
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No);
// This vector is the region(s) adjacent to our range.
// We need to allocate a new region for the range we wanted to change permission bits on.
@ -473,7 +473,7 @@ KResultOr<FlatPtr> Process::sys$madvise(Userspace<void*> address, size_t size, i
if (set_volatile || set_nonvolatile) {
if (!region->vmobject().is_anonymous())
return EINVAL;
auto& vmobject = static_cast<AnonymousVMObject&>(region->vmobject());
auto& vmobject = static_cast<Memory::AnonymousVMObject&>(region->vmobject());
if (!vmobject.is_purgeable())
return EINVAL;
bool was_purged = false;
@ -557,16 +557,16 @@ KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params
auto range = old_region->range();
auto old_prot = region_access_flags_to_prot(old_region->access());
auto old_offset = old_region->offset_in_vmobject();
NonnullRefPtr inode = static_cast<SharedInodeVMObject&>(old_region->vmobject()).inode();
NonnullRefPtr inode = static_cast<Memory::SharedInodeVMObject&>(old_region->vmobject()).inode();
auto new_vmobject = PrivateInodeVMObject::try_create_with_inode(inode);
auto new_vmobject = Memory::PrivateInodeVMObject::try_create_with_inode(inode);
if (!new_vmobject)
return ENOMEM;
auto old_name = old_region->take_name();
// Unmap without deallocating the VM range since we're going to reuse it.
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
old_region->unmap(Memory::Region::ShouldDeallocateVirtualMemoryRange::No);
space().deallocate_region(*old_region);
auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false);
@ -654,10 +654,10 @@ KResultOr<FlatPtr> Process::sys$msyscall(Userspace<void*> address)
return 0;
}
if (!is_user_address(VirtualAddress { address }))
if (!Memory::is_user_address(VirtualAddress { address }))
return EFAULT;
auto* region = space().find_region_containing(Range { VirtualAddress { address }, 1 });
auto* region = space().find_region_containing(Memory::Range { VirtualAddress { address }, 1 });
if (!region)
return EINVAL;

View file

@ -55,7 +55,7 @@ KResultOr<FlatPtr> Process::sys$module_load(Userspace<const char*> user_path, si
elf_image->for_each_section_of_type(SHT_PROGBITS, [&](const ELF::Image::Section& section) {
if (!section.size())
return;
auto section_storage = KBuffer::copy(section.raw_data(), section.size(), Region::Access::Read | Region::Access::Write | Region::Access::Execute);
auto section_storage = KBuffer::copy(section.raw_data(), section.size(), Memory::Region::Access::Read | Memory::Region::Access::Write | Memory::Region::Access::Execute);
section_storage_by_name.set(section.name(), section_storage.data());
module->sections.append(move(section_storage));
});

View file

@ -121,7 +121,7 @@ static KResultOr<u32> handle_ptrace(const Kernel::Syscall::SC_ptrace_params& par
Kernel::Syscall::SC_ptrace_peek_params peek_params {};
if (!copy_from_user(&peek_params, reinterpret_cast<Kernel::Syscall::SC_ptrace_peek_params*>(params.addr)))
return EFAULT;
if (!is_user_address(VirtualAddress { peek_params.address }))
if (!Memory::is_user_address(VirtualAddress { peek_params.address }))
return EFAULT;
auto result = peer->process().peek_user_data(Userspace<const u32*> { (FlatPtr)peek_params.address });
if (result.is_error())
@ -132,7 +132,7 @@ static KResultOr<u32> handle_ptrace(const Kernel::Syscall::SC_ptrace_params& par
}
case PT_POKE:
if (!is_user_address(VirtualAddress { params.addr }))
if (!Memory::is_user_address(VirtualAddress { params.addr }))
return EFAULT;
return peer->process().poke_user_data(Userspace<u32*> { (FlatPtr)params.addr }, params.data);
@ -194,7 +194,7 @@ KResultOr<u32> Process::peek_user_data(Userspace<const u32*> address)
KResult Process::poke_user_data(Userspace<u32*> address, u32 data)
{
Range range = { VirtualAddress(address), sizeof(u32) };
Memory::Range range = { VirtualAddress(address), sizeof(u32) };
auto* region = space().find_region_containing(range);
if (!region)
return EFAULT;
@ -203,7 +203,7 @@ KResult Process::poke_user_data(Userspace<u32*> address, u32 data)
// If the region is shared, we change its vmobject to a PrivateInodeVMObject
// to prevent the write operation from changing any shared inode data
VERIFY(region->vmobject().is_shared_inode());
auto vmobject = PrivateInodeVMObject::try_create_with_inode(static_cast<SharedInodeVMObject&>(region->vmobject()).inode());
auto vmobject = Memory::PrivateInodeVMObject::try_create_with_inode(static_cast<Memory::SharedInodeVMObject&>(region->vmobject()).inode());
if (!vmobject)
return ENOMEM;
region->set_vmobject(vmobject.release_nonnull());

View file

@ -20,7 +20,7 @@ KResultOr<FlatPtr> Process::sys$purge(int mode)
return EPERM;
size_t purged_page_count = 0;
if (mode & PURGE_ALL_VOLATILE) {
NonnullRefPtrVector<AnonymousVMObject> vmobjects;
NonnullRefPtrVector<Memory::AnonymousVMObject> vmobjects;
{
KResult result(KSuccess);
MM.for_each_vmobject([&](auto& vmobject) {
@ -43,14 +43,14 @@ KResultOr<FlatPtr> Process::sys$purge(int mode)
}
}
if (mode & PURGE_ALL_CLEAN_INODE) {
NonnullRefPtrVector<InodeVMObject> vmobjects;
NonnullRefPtrVector<Memory::InodeVMObject> vmobjects;
{
KResult result(KSuccess);
MM.for_each_vmobject([&](auto& vmobject) {
if (vmobject.is_inode()) {
// In the event that the append fails, only attempt to continue
// the purge if we have already appended something successfully.
if (!vmobjects.try_append(static_cast<InodeVMObject&>(vmobject)) && vmobjects.is_empty()) {
if (!vmobjects.try_append(static_cast<Memory::InodeVMObject&>(vmobject)) && vmobjects.is_empty()) {
result = ENOMEM;
return IterationDecision::Break;
}