1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 03:07:44 +00:00

Kernel: Remove user/kernel flags from Region

Now that we no longer need to support the signal trampolines being
user-accessible inside the kernel memory range, we can get rid of the
"kernel" and "user-accessible" flags on Region and simply use the
address of the region to determine whether it's kernel or user.

This also tightens the page table mapping code, since it can now set
user-accessibility based solely on the virtual address of a page.
This commit is contained in:
Andreas Kling 2021-02-14 01:25:22 +01:00
parent 1593219a41
commit 8415866c03
13 changed files with 54 additions and 70 deletions

View file

@ -319,7 +319,7 @@ static bool procfs$pid_vm(InodeIdentifier identifier, KBufferBuilder& builder)
{
ScopedSpinLock lock(process->space().get_lock());
for (auto& region : process->space().regions()) {
if (!region.is_user_accessible() && !Process::current()->is_superuser())
if (!region.is_user() && !Process::current()->is_superuser())
continue;
auto region_object = array.add_object();
region_object.add("readable", region.is_readable());
@ -328,13 +328,11 @@ static bool procfs$pid_vm(InodeIdentifier identifier, KBufferBuilder& builder)
region_object.add("stack", region.is_stack());
region_object.add("shared", region.is_shared());
region_object.add("syscall", region.is_syscall_region());
region_object.add("user_accessible", region.is_user_accessible());
region_object.add("purgeable", region.vmobject().is_anonymous());
if (region.vmobject().is_anonymous()) {
region_object.add("volatile", static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile());
}
region_object.add("cacheable", region.is_cacheable());
region_object.add("kernel", region.is_kernel());
region_object.add("address", region.vaddr().get());
region_object.add("size", region.size());
region_object.add("amount_resident", region.amount_resident());

View file

@ -112,7 +112,7 @@ struct KmallocGlobalHeap {
// allocations not including the original allocation_request
// that triggered heap expansion. If we don't allocate
memory_size += 1 * MiB;
region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow);
region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
if (region) {
klog() << "kmalloc(): Adding even more memory to heap at " << region->vaddr() << ", bytes: " << region->size();
@ -176,7 +176,7 @@ struct KmallocGlobalHeap {
{
if (m_backup_memory)
return;
m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow);
m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
}
size_t backup_memory_bytes() const

View file

@ -314,7 +314,7 @@ void APIC::do_boot_aps()
// Allocate enough stacks for all APs
Vector<OwnPtr<Region>> apic_ap_stacks;
for (u32 i = 0; i < aps_to_enable; i++) {
auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow, true);
auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
if (!stack_region) {
klog() << "APIC: Failed to allocate stack for AP #" << i;
return;

View file

@ -50,7 +50,7 @@ class KBufferImpl : public RefCounted<KBufferImpl> {
public:
static RefPtr<KBufferImpl> try_create_with_size(size_t size, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), name, access, false, strategy);
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), name, access, strategy);
if (!region)
return nullptr;
return adopt(*new KBufferImpl(region.release_nonnull(), size, strategy));
@ -58,7 +58,7 @@ public:
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
{
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(bytes.size()), name, access, false, strategy);
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(bytes.size()), name, access, strategy);
if (!region)
return nullptr;
memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size());
@ -81,7 +81,7 @@ public:
bool expand(size_t new_capacity)
{
auto new_region = MM.allocate_kernel_region(PAGE_ROUND_UP(new_capacity), m_region->name(), m_region->access(), false, m_allocation_strategy);
auto new_region = MM.allocate_kernel_region(PAGE_ROUND_UP(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy);
if (!new_region)
return false;
if (m_region && m_size > 0)

View file

@ -206,7 +206,7 @@ E1000NetworkAdapter::E1000NetworkAdapter(PCI::Address address, u8 irq)
enable_bus_mastering(pci_address());
size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0);
m_mmio_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), PAGE_ROUND_UP(mmio_base_size), "E1000 MMIO", Region::Access::Read | Region::Access::Write, false, false);
m_mmio_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), PAGE_ROUND_UP(mmio_base_size), "E1000 MMIO", Region::Access::Read | Region::Access::Write, Region::Cacheable::No);
m_mmio_base = m_mmio_region->vaddr();
m_use_mmio = true;
m_interrupt_line = PCI::get_interrupt_line(pci_address());

View file

@ -288,7 +288,7 @@ extern "C" void asm_signal_trampoline_end(void);
void create_signal_trampoline()
{
// NOTE: We leak this region.
g_signal_trampoline_region = MM.allocate_kernel_region(PAGE_SIZE, "Signal trampolines", Region::Access::Read | Region::Access::Write, false).leak_ptr();
g_signal_trampoline_region = MM.allocate_kernel_region(PAGE_SIZE, "Signal trampolines", Region::Access::Read | Region::Access::Write).leak_ptr();
g_signal_trampoline_region->set_syscall_region(true);
u8* trampoline = (u8*)asm_signal_trampoline;

View file

@ -55,7 +55,7 @@ void Thread::initialize()
KResultOr<NonnullRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> process)
{
auto kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow);
auto kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
if (!kernel_stack_region)
return ENOMEM;
kernel_stack_region->set_stack(true);

View file

@ -454,7 +454,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
return region->handle_fault(fault, lock);
}
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, const StringView& name, u8 access, size_t physical_alignment, bool user_accessible, bool cacheable)
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, String name, u8 access, size_t physical_alignment, Region::Cacheable cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
@ -462,10 +462,10 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, con
if (!range.has_value())
return {};
auto vmobject = ContiguousVMObject::create_with_size(size, physical_alignment);
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), vmobject, move(name), access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, AllocationStrategy strategy, bool cacheable)
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String name, u8 access, AllocationStrategy strategy, Region::Cacheable cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
@ -475,10 +475,10 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
auto vmobject = AnonymousVMObject::create_with_size(size, strategy);
if (!vmobject)
return {};
return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), name, access, user_accessible, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), move(name), access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, String name, u8 access, Region::Cacheable cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
@ -488,10 +488,10 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
if (!vmobject)
return {};
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, String name, u8 access, Region::Cacheable cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
@ -501,35 +501,26 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
if (!vmobject)
return {};
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
{
return allocate_kernel_region(size, name, access, true, AllocationStrategy::Reserve, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, const StringView& name, u8 access, bool user_accessible, bool cacheable)
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, String name, u8 access, Region::Cacheable cacheable)
{
ScopedSpinLock lock(s_mm_lock);
OwnPtr<Region> region;
if (user_accessible)
region = Region::create_user_accessible(nullptr, range, vmobject, 0, name, access, cacheable, false);
else
region = Region::create_kernel_only(range, vmobject, 0, name, access, cacheable);
auto region = Region::create_kernel_only(range, vmobject, 0, move(name), access, cacheable);
if (region)
region->map(kernel_page_directory());
return region;
}
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, String name, u8 access, Region::Cacheable cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.has_value())
return {};
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable);
return allocate_kernel_region_with_vmobject(range.value(), vmobject, move(name), access, cacheable);
}
bool MemoryManager::commit_user_physical_pages(size_t page_count)
@ -843,7 +834,7 @@ bool MemoryManager::validate_user_stack(const Process& process, VirtualAddress v
return false;
ScopedSpinLock lock(s_mm_lock);
auto* region = user_region_from_vaddr(const_cast<Process&>(process).space(), vaddr);
return region && region->is_user_accessible() && region->is_stack();
return region && region->is_user() && region->is_stack();
}
void MemoryManager::register_vmobject(VMObject& vmobject)

View file

@ -145,13 +145,12 @@ public:
void deallocate_user_physical_page(const PhysicalPage&);
void deallocate_supervisor_physical_page(const PhysicalPage&);
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, size_t physical_alignment = PAGE_SIZE, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, AllocationStrategy strategy = AllocationStrategy::Reserve, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = true);
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, String name, u8 access, size_t physical_alignment = PAGE_SIZE, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region(size_t, String name, u8 access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes);
unsigned user_physical_pages() const { return m_user_physical_pages; }
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }

View file

@ -38,16 +38,15 @@
namespace Kernel {
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access, bool cacheable, bool kernel, bool shared)
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, u8 access, Cacheable cacheable, bool shared)
: PurgeablePageRanges(vmobject)
, m_range(range)
, m_offset_in_vmobject(offset_in_vmobject)
, m_vmobject(move(vmobject))
, m_name(name)
, m_name(move(name))
, m_access(access | ((access & 0x7) << 4))
, m_shared(shared)
, m_cacheable(cacheable)
, m_kernel(kernel)
, m_cacheable(cacheable == Cacheable::Yes)
{
ASSERT(m_range.base().is_page_aligned());
ASSERT(m_range.size());
@ -104,7 +103,7 @@ OwnPtr<Region> Region::clone(Process& new_owner)
// Create a new region backed by the same VMObject.
auto region = Region::create_user_accessible(
&new_owner, m_range, m_vmobject, m_offset_in_vmobject, m_name, m_access, m_cacheable, m_shared);
&new_owner, m_range, m_vmobject, m_offset_in_vmobject, m_name, m_access, m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
if (m_vmobject->is_anonymous())
region->copy_purgeable_page_ranges(*this);
region->set_mmap(m_mmap);
@ -123,7 +122,7 @@ OwnPtr<Region> Region::clone(Process& new_owner)
// Set up a COW region. The parent (this) region becomes COW as well!
remap();
auto clone_region = Region::create_user_accessible(
&new_owner, m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, m_access, m_cacheable, m_shared);
&new_owner, m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, m_access, m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
if (m_vmobject->is_anonymous())
clone_region->copy_purgeable_page_ranges(*this);
if (m_stack) {
@ -228,20 +227,17 @@ size_t Region::amount_shared() const
return bytes;
}
NonnullOwnPtr<Region> Region::create_user_accessible(Process* owner, const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable, bool shared)
NonnullOwnPtr<Region> Region::create_user_accessible(Process* owner, const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, u8 access, Cacheable cacheable, bool shared)
{
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable, false, shared);
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared);
if (owner)
region->m_owner = owner->make_weak_ptr();
region->m_user_accessible = true;
return region;
}
NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, u8 access, Cacheable cacheable)
{
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable, true, false);
region->m_user_accessible = false;
return region;
return make<Region>(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false);
}
bool Region::should_cow(size_t page_index) const
@ -278,7 +274,7 @@ bool Region::map_individual_page_impl(size_t page_index)
pte->set_writable(is_writable());
if (Processor::current().has_feature(CPUFeature::NX))
pte->set_execute_disabled(!is_executable());
pte->set_user_allowed(is_user_accessible());
pte->set_user_allowed(page_vaddr.get() >= 0x00800000 && is_user_address(page_vaddr));
}
return true;
}
@ -388,7 +384,7 @@ bool Region::map(PageDirectory& page_directory)
ScopedSpinLock page_lock(page_directory.get_lock());
// FIXME: Find a better place for this sanity check(?)
if (is_user_accessible() && !is_shared()) {
if (is_user() && !is_shared()) {
ASSERT(!vmobject().is_shared_inode());
}

View file

@ -59,8 +59,13 @@ public:
HasBeenExecutable = 64,
};
static NonnullOwnPtr<Region> create_user_accessible(Process*, const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable, bool shared);
static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable = true);
enum class Cacheable {
No = 0,
Yes,
};
static NonnullOwnPtr<Region> create_user_accessible(Process*, const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String name, u8 access, Cacheable, bool shared);
static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String name, u8 access, Cacheable = Cacheable::Yes);
~Region();
@ -79,8 +84,7 @@ public:
const String& name() const { return m_name; }
unsigned access() const { return m_access; }
void set_name(const String& name) { m_name = name; }
void set_name(String&& name) { m_name = move(name); }
void set_name(String name) { m_name = move(name); }
const VMObject& vmobject() const { return *m_vmobject; }
VMObject& vmobject() { return *m_vmobject; }
@ -95,8 +99,8 @@ public:
bool is_mmap() const { return m_mmap; }
void set_mmap(bool mmap) { m_mmap = mmap; }
bool is_user_accessible() const { return m_user_accessible; }
bool is_kernel() const { return m_kernel || vaddr().get() >= 0xc0000000; }
bool is_user() const { return !is_kernel(); }
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= 0xc0000000; }
PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock<RecursiveSpinLock>&);
@ -225,7 +229,7 @@ public:
Region* m_prev { nullptr };
// NOTE: These are public so we can make<> them.
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access, bool cacheable, bool kernel, bool shared);
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String, u8 access, Cacheable, bool shared);
bool remap_vmobject_page_range(size_t page_index, size_t page_count);
@ -272,11 +276,9 @@ private:
String m_name;
u8 m_access { 0 };
bool m_shared : 1 { false };
bool m_user_accessible : 1 { false };
bool m_cacheable : 1 { false };
bool m_stack : 1 { false };
bool m_mmap : 1 { false };
bool m_kernel : 1 { false };
bool m_syscall_region : 1 { false };
WeakPtr<Process> m_owner;
};

View file

@ -66,7 +66,7 @@ Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t
Region& Space::allocate_split_region(const Region& source_region, const Range& range, size_t offset_in_vmobject)
{
auto& region = add_region(Region::create_user_accessible(
m_process, range, source_region.vmobject(), offset_in_vmobject, source_region.name(), source_region.access(), source_region.is_cacheable(), source_region.is_shared()));
m_process, range, source_region.vmobject(), offset_in_vmobject, source_region.name(), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()));
region.set_syscall_region(source_region.is_syscall_region());
region.set_mmap(source_region.is_mmap());
region.set_stack(source_region.is_stack());
@ -84,7 +84,7 @@ KResultOr<Region*> Space::allocate_region(const Range& range, const String& name
auto vmobject = AnonymousVMObject::create_with_size(range.size(), strategy);
if (!vmobject)
return ENOMEM;
auto region = Region::create_user_accessible(m_process, range, vmobject.release_nonnull(), 0, name, prot_to_region_access_flags(prot), true, false);
auto region = Region::create_user_accessible(m_process, range, vmobject.release_nonnull(), 0, name, prot_to_region_access_flags(prot), Region::Cacheable::Yes, false);
if (!region->map(page_directory()))
return ENOMEM;
return &add_region(move(region));
@ -107,7 +107,7 @@ KResultOr<Region*> Space::allocate_region_with_vmobject(const Range& range, Nonn
return EINVAL;
}
offset_in_vmobject &= PAGE_MASK;
auto& region = add_region(Region::create_user_accessible(m_process, range, move(vmobject), offset_in_vmobject, name, prot_to_region_access_flags(prot), true, shared));
auto& region = add_region(Region::create_user_accessible(m_process, range, move(vmobject), offset_in_vmobject, name, prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
if (!region.map(page_directory())) {
// FIXME: What is an appropriate error code here, really?
return ENOMEM;

View file

@ -79,8 +79,6 @@ ProcessMemoryMapWidget::ProcessMemoryMapWidget()
pid_vm_fields.empend("amount_dirty", "Dirty", Gfx::TextAlignment::CenterRight);
pid_vm_fields.empend("Access", Gfx::TextAlignment::CenterLeft, [](auto& object) {
StringBuilder builder;
if (!object.get("user_accessible").to_bool())
builder.append('K');
if (object.get("readable").to_bool())
builder.append('R');
if (object.get("writable").to_bool())