mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 03:07:44 +00:00
Kernel: Remove user/kernel flags from Region
Now that we no longer need to support the signal trampolines being user-accessible inside the kernel memory range, we can get rid of the "kernel" and "user-accessible" flags on Region and simply use the address of the region to determine whether it's kernel or user. This also tightens the page table mapping code, since it can now set user-accessibility based solely on the virtual address of a page.
This commit is contained in:
parent
1593219a41
commit
8415866c03
13 changed files with 54 additions and 70 deletions
|
@ -319,7 +319,7 @@ static bool procfs$pid_vm(InodeIdentifier identifier, KBufferBuilder& builder)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(process->space().get_lock());
|
ScopedSpinLock lock(process->space().get_lock());
|
||||||
for (auto& region : process->space().regions()) {
|
for (auto& region : process->space().regions()) {
|
||||||
if (!region.is_user_accessible() && !Process::current()->is_superuser())
|
if (!region.is_user() && !Process::current()->is_superuser())
|
||||||
continue;
|
continue;
|
||||||
auto region_object = array.add_object();
|
auto region_object = array.add_object();
|
||||||
region_object.add("readable", region.is_readable());
|
region_object.add("readable", region.is_readable());
|
||||||
|
@ -328,13 +328,11 @@ static bool procfs$pid_vm(InodeIdentifier identifier, KBufferBuilder& builder)
|
||||||
region_object.add("stack", region.is_stack());
|
region_object.add("stack", region.is_stack());
|
||||||
region_object.add("shared", region.is_shared());
|
region_object.add("shared", region.is_shared());
|
||||||
region_object.add("syscall", region.is_syscall_region());
|
region_object.add("syscall", region.is_syscall_region());
|
||||||
region_object.add("user_accessible", region.is_user_accessible());
|
|
||||||
region_object.add("purgeable", region.vmobject().is_anonymous());
|
region_object.add("purgeable", region.vmobject().is_anonymous());
|
||||||
if (region.vmobject().is_anonymous()) {
|
if (region.vmobject().is_anonymous()) {
|
||||||
region_object.add("volatile", static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile());
|
region_object.add("volatile", static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile());
|
||||||
}
|
}
|
||||||
region_object.add("cacheable", region.is_cacheable());
|
region_object.add("cacheable", region.is_cacheable());
|
||||||
region_object.add("kernel", region.is_kernel());
|
|
||||||
region_object.add("address", region.vaddr().get());
|
region_object.add("address", region.vaddr().get());
|
||||||
region_object.add("size", region.size());
|
region_object.add("size", region.size());
|
||||||
region_object.add("amount_resident", region.amount_resident());
|
region_object.add("amount_resident", region.amount_resident());
|
||||||
|
|
|
@ -112,7 +112,7 @@ struct KmallocGlobalHeap {
|
||||||
// allocations not including the original allocation_request
|
// allocations not including the original allocation_request
|
||||||
// that triggered heap expansion. If we don't allocate
|
// that triggered heap expansion. If we don't allocate
|
||||||
memory_size += 1 * MiB;
|
memory_size += 1 * MiB;
|
||||||
region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow);
|
region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
|
||||||
if (region) {
|
if (region) {
|
||||||
klog() << "kmalloc(): Adding even more memory to heap at " << region->vaddr() << ", bytes: " << region->size();
|
klog() << "kmalloc(): Adding even more memory to heap at " << region->vaddr() << ", bytes: " << region->size();
|
||||||
|
|
||||||
|
@ -176,7 +176,7 @@ struct KmallocGlobalHeap {
|
||||||
{
|
{
|
||||||
if (m_backup_memory)
|
if (m_backup_memory)
|
||||||
return;
|
return;
|
||||||
m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow);
|
m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t backup_memory_bytes() const
|
size_t backup_memory_bytes() const
|
||||||
|
|
|
@ -314,7 +314,7 @@ void APIC::do_boot_aps()
|
||||||
// Allocate enough stacks for all APs
|
// Allocate enough stacks for all APs
|
||||||
Vector<OwnPtr<Region>> apic_ap_stacks;
|
Vector<OwnPtr<Region>> apic_ap_stacks;
|
||||||
for (u32 i = 0; i < aps_to_enable; i++) {
|
for (u32 i = 0; i < aps_to_enable; i++) {
|
||||||
auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow, true);
|
auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
|
||||||
if (!stack_region) {
|
if (!stack_region) {
|
||||||
klog() << "APIC: Failed to allocate stack for AP #" << i;
|
klog() << "APIC: Failed to allocate stack for AP #" << i;
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -50,7 +50,7 @@ class KBufferImpl : public RefCounted<KBufferImpl> {
|
||||||
public:
|
public:
|
||||||
static RefPtr<KBufferImpl> try_create_with_size(size_t size, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
|
static RefPtr<KBufferImpl> try_create_with_size(size_t size, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
|
||||||
{
|
{
|
||||||
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), name, access, false, strategy);
|
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), name, access, strategy);
|
||||||
if (!region)
|
if (!region)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
return adopt(*new KBufferImpl(region.release_nonnull(), size, strategy));
|
return adopt(*new KBufferImpl(region.release_nonnull(), size, strategy));
|
||||||
|
@ -58,7 +58,7 @@ public:
|
||||||
|
|
||||||
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
|
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, u8 access, const char* name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
|
||||||
{
|
{
|
||||||
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(bytes.size()), name, access, false, strategy);
|
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(bytes.size()), name, access, strategy);
|
||||||
if (!region)
|
if (!region)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size());
|
memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size());
|
||||||
|
@ -81,7 +81,7 @@ public:
|
||||||
|
|
||||||
bool expand(size_t new_capacity)
|
bool expand(size_t new_capacity)
|
||||||
{
|
{
|
||||||
auto new_region = MM.allocate_kernel_region(PAGE_ROUND_UP(new_capacity), m_region->name(), m_region->access(), false, m_allocation_strategy);
|
auto new_region = MM.allocate_kernel_region(PAGE_ROUND_UP(new_capacity), m_region->name(), m_region->access(), m_allocation_strategy);
|
||||||
if (!new_region)
|
if (!new_region)
|
||||||
return false;
|
return false;
|
||||||
if (m_region && m_size > 0)
|
if (m_region && m_size > 0)
|
||||||
|
|
|
@ -206,7 +206,7 @@ E1000NetworkAdapter::E1000NetworkAdapter(PCI::Address address, u8 irq)
|
||||||
enable_bus_mastering(pci_address());
|
enable_bus_mastering(pci_address());
|
||||||
|
|
||||||
size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0);
|
size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0);
|
||||||
m_mmio_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), PAGE_ROUND_UP(mmio_base_size), "E1000 MMIO", Region::Access::Read | Region::Access::Write, false, false);
|
m_mmio_region = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), PAGE_ROUND_UP(mmio_base_size), "E1000 MMIO", Region::Access::Read | Region::Access::Write, Region::Cacheable::No);
|
||||||
m_mmio_base = m_mmio_region->vaddr();
|
m_mmio_base = m_mmio_region->vaddr();
|
||||||
m_use_mmio = true;
|
m_use_mmio = true;
|
||||||
m_interrupt_line = PCI::get_interrupt_line(pci_address());
|
m_interrupt_line = PCI::get_interrupt_line(pci_address());
|
||||||
|
|
|
@ -288,7 +288,7 @@ extern "C" void asm_signal_trampoline_end(void);
|
||||||
void create_signal_trampoline()
|
void create_signal_trampoline()
|
||||||
{
|
{
|
||||||
// NOTE: We leak this region.
|
// NOTE: We leak this region.
|
||||||
g_signal_trampoline_region = MM.allocate_kernel_region(PAGE_SIZE, "Signal trampolines", Region::Access::Read | Region::Access::Write, false).leak_ptr();
|
g_signal_trampoline_region = MM.allocate_kernel_region(PAGE_SIZE, "Signal trampolines", Region::Access::Read | Region::Access::Write).leak_ptr();
|
||||||
g_signal_trampoline_region->set_syscall_region(true);
|
g_signal_trampoline_region->set_syscall_region(true);
|
||||||
|
|
||||||
u8* trampoline = (u8*)asm_signal_trampoline;
|
u8* trampoline = (u8*)asm_signal_trampoline;
|
||||||
|
|
|
@ -55,7 +55,7 @@ void Thread::initialize()
|
||||||
|
|
||||||
KResultOr<NonnullRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> process)
|
KResultOr<NonnullRefPtr<Thread>> Thread::try_create(NonnullRefPtr<Process> process)
|
||||||
{
|
{
|
||||||
auto kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow);
|
auto kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, AllocationStrategy::AllocateNow);
|
||||||
if (!kernel_stack_region)
|
if (!kernel_stack_region)
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
kernel_stack_region->set_stack(true);
|
kernel_stack_region->set_stack(true);
|
||||||
|
|
|
@ -454,7 +454,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
||||||
return region->handle_fault(fault, lock);
|
return region->handle_fault(fault, lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, const StringView& name, u8 access, size_t physical_alignment, bool user_accessible, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, String name, u8 access, size_t physical_alignment, Region::Cacheable cacheable)
|
||||||
{
|
{
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
|
@ -462,10 +462,10 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, con
|
||||||
if (!range.has_value())
|
if (!range.has_value())
|
||||||
return {};
|
return {};
|
||||||
auto vmobject = ContiguousVMObject::create_with_size(size, physical_alignment);
|
auto vmobject = ContiguousVMObject::create_with_size(size, physical_alignment);
|
||||||
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), vmobject, move(name), access, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, AllocationStrategy strategy, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String name, u8 access, AllocationStrategy strategy, Region::Cacheable cacheable)
|
||||||
{
|
{
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
|
@ -475,10 +475,10 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
|
||||||
auto vmobject = AnonymousVMObject::create_with_size(size, strategy);
|
auto vmobject = AnonymousVMObject::create_with_size(size, strategy);
|
||||||
if (!vmobject)
|
if (!vmobject)
|
||||||
return {};
|
return {};
|
||||||
return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), move(name), access, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, String name, u8 access, Region::Cacheable cacheable)
|
||||||
{
|
{
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
|
@ -488,10 +488,10 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
|
||||||
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
|
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
|
||||||
if (!vmobject)
|
if (!vmobject)
|
||||||
return {};
|
return {};
|
||||||
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, String name, u8 access, Region::Cacheable cacheable)
|
||||||
{
|
{
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
|
@ -501,35 +501,26 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa
|
||||||
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
|
auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size);
|
||||||
if (!vmobject)
|
if (!vmobject)
|
||||||
return {};
|
return {};
|
||||||
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), *vmobject, move(name), access, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, String name, u8 access, Region::Cacheable cacheable)
|
||||||
{
|
|
||||||
return allocate_kernel_region(size, name, access, true, AllocationStrategy::Reserve, cacheable);
|
|
||||||
}
|
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
OwnPtr<Region> region;
|
auto region = Region::create_kernel_only(range, vmobject, 0, move(name), access, cacheable);
|
||||||
if (user_accessible)
|
|
||||||
region = Region::create_user_accessible(nullptr, range, vmobject, 0, name, access, cacheable, false);
|
|
||||||
else
|
|
||||||
region = Region::create_kernel_only(range, vmobject, 0, name, access, cacheable);
|
|
||||||
if (region)
|
if (region)
|
||||||
region->map(kernel_page_directory());
|
region->map(kernel_page_directory());
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, String name, u8 access, Region::Cacheable cacheable)
|
||||||
{
|
{
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||||
if (!range.has_value())
|
if (!range.has_value())
|
||||||
return {};
|
return {};
|
||||||
return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range.value(), vmobject, move(name), access, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemoryManager::commit_user_physical_pages(size_t page_count)
|
bool MemoryManager::commit_user_physical_pages(size_t page_count)
|
||||||
|
@ -843,7 +834,7 @@ bool MemoryManager::validate_user_stack(const Process& process, VirtualAddress v
|
||||||
return false;
|
return false;
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
auto* region = user_region_from_vaddr(const_cast<Process&>(process).space(), vaddr);
|
auto* region = user_region_from_vaddr(const_cast<Process&>(process).space(), vaddr);
|
||||||
return region && region->is_user_accessible() && region->is_stack();
|
return region && region->is_user() && region->is_stack();
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryManager::register_vmobject(VMObject& vmobject)
|
void MemoryManager::register_vmobject(VMObject& vmobject)
|
||||||
|
|
|
@ -145,13 +145,12 @@ public:
|
||||||
void deallocate_user_physical_page(const PhysicalPage&);
|
void deallocate_user_physical_page(const PhysicalPage&);
|
||||||
void deallocate_supervisor_physical_page(const PhysicalPage&);
|
void deallocate_supervisor_physical_page(const PhysicalPage&);
|
||||||
|
|
||||||
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, size_t physical_alignment = PAGE_SIZE, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, String name, u8 access, size_t physical_alignment = PAGE_SIZE, Region::Cacheable = Region::Cacheable::Yes);
|
||||||
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, AllocationStrategy strategy = AllocationStrategy::Reserve, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region(size_t, String name, u8 access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
|
||||||
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes);
|
||||||
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes);
|
||||||
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes);
|
||||||
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, String name, u8 access, Region::Cacheable = Region::Cacheable::Yes);
|
||||||
OwnPtr<Region> allocate_user_accessible_kernel_region(size_t, const StringView& name, u8 access, bool cacheable = true);
|
|
||||||
|
|
||||||
unsigned user_physical_pages() const { return m_user_physical_pages; }
|
unsigned user_physical_pages() const { return m_user_physical_pages; }
|
||||||
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }
|
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }
|
||||||
|
|
|
@ -38,16 +38,15 @@
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, u8 access, bool cacheable, bool kernel, bool shared)
|
Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, u8 access, Cacheable cacheable, bool shared)
|
||||||
: PurgeablePageRanges(vmobject)
|
: PurgeablePageRanges(vmobject)
|
||||||
, m_range(range)
|
, m_range(range)
|
||||||
, m_offset_in_vmobject(offset_in_vmobject)
|
, m_offset_in_vmobject(offset_in_vmobject)
|
||||||
, m_vmobject(move(vmobject))
|
, m_vmobject(move(vmobject))
|
||||||
, m_name(name)
|
, m_name(move(name))
|
||||||
, m_access(access | ((access & 0x7) << 4))
|
, m_access(access | ((access & 0x7) << 4))
|
||||||
, m_shared(shared)
|
, m_shared(shared)
|
||||||
, m_cacheable(cacheable)
|
, m_cacheable(cacheable == Cacheable::Yes)
|
||||||
, m_kernel(kernel)
|
|
||||||
{
|
{
|
||||||
ASSERT(m_range.base().is_page_aligned());
|
ASSERT(m_range.base().is_page_aligned());
|
||||||
ASSERT(m_range.size());
|
ASSERT(m_range.size());
|
||||||
|
@ -104,7 +103,7 @@ OwnPtr<Region> Region::clone(Process& new_owner)
|
||||||
|
|
||||||
// Create a new region backed by the same VMObject.
|
// Create a new region backed by the same VMObject.
|
||||||
auto region = Region::create_user_accessible(
|
auto region = Region::create_user_accessible(
|
||||||
&new_owner, m_range, m_vmobject, m_offset_in_vmobject, m_name, m_access, m_cacheable, m_shared);
|
&new_owner, m_range, m_vmobject, m_offset_in_vmobject, m_name, m_access, m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
|
||||||
if (m_vmobject->is_anonymous())
|
if (m_vmobject->is_anonymous())
|
||||||
region->copy_purgeable_page_ranges(*this);
|
region->copy_purgeable_page_ranges(*this);
|
||||||
region->set_mmap(m_mmap);
|
region->set_mmap(m_mmap);
|
||||||
|
@ -123,7 +122,7 @@ OwnPtr<Region> Region::clone(Process& new_owner)
|
||||||
// Set up a COW region. The parent (this) region becomes COW as well!
|
// Set up a COW region. The parent (this) region becomes COW as well!
|
||||||
remap();
|
remap();
|
||||||
auto clone_region = Region::create_user_accessible(
|
auto clone_region = Region::create_user_accessible(
|
||||||
&new_owner, m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, m_access, m_cacheable, m_shared);
|
&new_owner, m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, m_access, m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared);
|
||||||
if (m_vmobject->is_anonymous())
|
if (m_vmobject->is_anonymous())
|
||||||
clone_region->copy_purgeable_page_ranges(*this);
|
clone_region->copy_purgeable_page_ranges(*this);
|
||||||
if (m_stack) {
|
if (m_stack) {
|
||||||
|
@ -228,20 +227,17 @@ size_t Region::amount_shared() const
|
||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
NonnullOwnPtr<Region> Region::create_user_accessible(Process* owner, const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable, bool shared)
|
NonnullOwnPtr<Region> Region::create_user_accessible(Process* owner, const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, u8 access, Cacheable cacheable, bool shared)
|
||||||
{
|
{
|
||||||
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable, false, shared);
|
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared);
|
||||||
if (owner)
|
if (owner)
|
||||||
region->m_owner = owner->make_weak_ptr();
|
region->m_owner = owner->make_weak_ptr();
|
||||||
region->m_user_accessible = true;
|
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable)
|
NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, String name, u8 access, Cacheable cacheable)
|
||||||
{
|
{
|
||||||
auto region = make<Region>(range, move(vmobject), offset_in_vmobject, name, access, cacheable, true, false);
|
return make<Region>(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false);
|
||||||
region->m_user_accessible = false;
|
|
||||||
return region;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Region::should_cow(size_t page_index) const
|
bool Region::should_cow(size_t page_index) const
|
||||||
|
@ -278,7 +274,7 @@ bool Region::map_individual_page_impl(size_t page_index)
|
||||||
pte->set_writable(is_writable());
|
pte->set_writable(is_writable());
|
||||||
if (Processor::current().has_feature(CPUFeature::NX))
|
if (Processor::current().has_feature(CPUFeature::NX))
|
||||||
pte->set_execute_disabled(!is_executable());
|
pte->set_execute_disabled(!is_executable());
|
||||||
pte->set_user_allowed(is_user_accessible());
|
pte->set_user_allowed(page_vaddr.get() >= 0x00800000 && is_user_address(page_vaddr));
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -388,7 +384,7 @@ bool Region::map(PageDirectory& page_directory)
|
||||||
ScopedSpinLock page_lock(page_directory.get_lock());
|
ScopedSpinLock page_lock(page_directory.get_lock());
|
||||||
|
|
||||||
// FIXME: Find a better place for this sanity check(?)
|
// FIXME: Find a better place for this sanity check(?)
|
||||||
if (is_user_accessible() && !is_shared()) {
|
if (is_user() && !is_shared()) {
|
||||||
ASSERT(!vmobject().is_shared_inode());
|
ASSERT(!vmobject().is_shared_inode());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -59,8 +59,13 @@ public:
|
||||||
HasBeenExecutable = 64,
|
HasBeenExecutable = 64,
|
||||||
};
|
};
|
||||||
|
|
||||||
static NonnullOwnPtr<Region> create_user_accessible(Process*, const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable, bool shared);
|
enum class Cacheable {
|
||||||
static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const StringView& name, u8 access, bool cacheable = true);
|
No = 0,
|
||||||
|
Yes,
|
||||||
|
};
|
||||||
|
|
||||||
|
static NonnullOwnPtr<Region> create_user_accessible(Process*, const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String name, u8 access, Cacheable, bool shared);
|
||||||
|
static NonnullOwnPtr<Region> create_kernel_only(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String name, u8 access, Cacheable = Cacheable::Yes);
|
||||||
|
|
||||||
~Region();
|
~Region();
|
||||||
|
|
||||||
|
@ -79,8 +84,7 @@ public:
|
||||||
const String& name() const { return m_name; }
|
const String& name() const { return m_name; }
|
||||||
unsigned access() const { return m_access; }
|
unsigned access() const { return m_access; }
|
||||||
|
|
||||||
void set_name(const String& name) { m_name = name; }
|
void set_name(String name) { m_name = move(name); }
|
||||||
void set_name(String&& name) { m_name = move(name); }
|
|
||||||
|
|
||||||
const VMObject& vmobject() const { return *m_vmobject; }
|
const VMObject& vmobject() const { return *m_vmobject; }
|
||||||
VMObject& vmobject() { return *m_vmobject; }
|
VMObject& vmobject() { return *m_vmobject; }
|
||||||
|
@ -95,8 +99,8 @@ public:
|
||||||
bool is_mmap() const { return m_mmap; }
|
bool is_mmap() const { return m_mmap; }
|
||||||
void set_mmap(bool mmap) { m_mmap = mmap; }
|
void set_mmap(bool mmap) { m_mmap = mmap; }
|
||||||
|
|
||||||
bool is_user_accessible() const { return m_user_accessible; }
|
bool is_user() const { return !is_kernel(); }
|
||||||
bool is_kernel() const { return m_kernel || vaddr().get() >= 0xc0000000; }
|
bool is_kernel() const { return vaddr().get() < 0x00800000 || vaddr().get() >= 0xc0000000; }
|
||||||
|
|
||||||
PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock<RecursiveSpinLock>&);
|
PageFaultResponse handle_fault(const PageFault&, ScopedSpinLock<RecursiveSpinLock>&);
|
||||||
|
|
||||||
|
@ -225,7 +229,7 @@ public:
|
||||||
Region* m_prev { nullptr };
|
Region* m_prev { nullptr };
|
||||||
|
|
||||||
// NOTE: These are public so we can make<> them.
|
// NOTE: These are public so we can make<> them.
|
||||||
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String&, u8 access, bool cacheable, bool kernel, bool shared);
|
Region(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, String, u8 access, Cacheable, bool shared);
|
||||||
|
|
||||||
bool remap_vmobject_page_range(size_t page_index, size_t page_count);
|
bool remap_vmobject_page_range(size_t page_index, size_t page_count);
|
||||||
|
|
||||||
|
@ -272,11 +276,9 @@ private:
|
||||||
String m_name;
|
String m_name;
|
||||||
u8 m_access { 0 };
|
u8 m_access { 0 };
|
||||||
bool m_shared : 1 { false };
|
bool m_shared : 1 { false };
|
||||||
bool m_user_accessible : 1 { false };
|
|
||||||
bool m_cacheable : 1 { false };
|
bool m_cacheable : 1 { false };
|
||||||
bool m_stack : 1 { false };
|
bool m_stack : 1 { false };
|
||||||
bool m_mmap : 1 { false };
|
bool m_mmap : 1 { false };
|
||||||
bool m_kernel : 1 { false };
|
|
||||||
bool m_syscall_region : 1 { false };
|
bool m_syscall_region : 1 { false };
|
||||||
WeakPtr<Process> m_owner;
|
WeakPtr<Process> m_owner;
|
||||||
};
|
};
|
||||||
|
|
|
@ -66,7 +66,7 @@ Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t
|
||||||
Region& Space::allocate_split_region(const Region& source_region, const Range& range, size_t offset_in_vmobject)
|
Region& Space::allocate_split_region(const Region& source_region, const Range& range, size_t offset_in_vmobject)
|
||||||
{
|
{
|
||||||
auto& region = add_region(Region::create_user_accessible(
|
auto& region = add_region(Region::create_user_accessible(
|
||||||
m_process, range, source_region.vmobject(), offset_in_vmobject, source_region.name(), source_region.access(), source_region.is_cacheable(), source_region.is_shared()));
|
m_process, range, source_region.vmobject(), offset_in_vmobject, source_region.name(), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()));
|
||||||
region.set_syscall_region(source_region.is_syscall_region());
|
region.set_syscall_region(source_region.is_syscall_region());
|
||||||
region.set_mmap(source_region.is_mmap());
|
region.set_mmap(source_region.is_mmap());
|
||||||
region.set_stack(source_region.is_stack());
|
region.set_stack(source_region.is_stack());
|
||||||
|
@ -84,7 +84,7 @@ KResultOr<Region*> Space::allocate_region(const Range& range, const String& name
|
||||||
auto vmobject = AnonymousVMObject::create_with_size(range.size(), strategy);
|
auto vmobject = AnonymousVMObject::create_with_size(range.size(), strategy);
|
||||||
if (!vmobject)
|
if (!vmobject)
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
auto region = Region::create_user_accessible(m_process, range, vmobject.release_nonnull(), 0, name, prot_to_region_access_flags(prot), true, false);
|
auto region = Region::create_user_accessible(m_process, range, vmobject.release_nonnull(), 0, name, prot_to_region_access_flags(prot), Region::Cacheable::Yes, false);
|
||||||
if (!region->map(page_directory()))
|
if (!region->map(page_directory()))
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
return &add_region(move(region));
|
return &add_region(move(region));
|
||||||
|
@ -107,7 +107,7 @@ KResultOr<Region*> Space::allocate_region_with_vmobject(const Range& range, Nonn
|
||||||
return EINVAL;
|
return EINVAL;
|
||||||
}
|
}
|
||||||
offset_in_vmobject &= PAGE_MASK;
|
offset_in_vmobject &= PAGE_MASK;
|
||||||
auto& region = add_region(Region::create_user_accessible(m_process, range, move(vmobject), offset_in_vmobject, name, prot_to_region_access_flags(prot), true, shared));
|
auto& region = add_region(Region::create_user_accessible(m_process, range, move(vmobject), offset_in_vmobject, name, prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
|
||||||
if (!region.map(page_directory())) {
|
if (!region.map(page_directory())) {
|
||||||
// FIXME: What is an appropriate error code here, really?
|
// FIXME: What is an appropriate error code here, really?
|
||||||
return ENOMEM;
|
return ENOMEM;
|
||||||
|
|
|
@ -79,8 +79,6 @@ ProcessMemoryMapWidget::ProcessMemoryMapWidget()
|
||||||
pid_vm_fields.empend("amount_dirty", "Dirty", Gfx::TextAlignment::CenterRight);
|
pid_vm_fields.empend("amount_dirty", "Dirty", Gfx::TextAlignment::CenterRight);
|
||||||
pid_vm_fields.empend("Access", Gfx::TextAlignment::CenterLeft, [](auto& object) {
|
pid_vm_fields.empend("Access", Gfx::TextAlignment::CenterLeft, [](auto& object) {
|
||||||
StringBuilder builder;
|
StringBuilder builder;
|
||||||
if (!object.get("user_accessible").to_bool())
|
|
||||||
builder.append('K');
|
|
||||||
if (object.get("readable").to_bool())
|
if (object.get("readable").to_bool())
|
||||||
builder.append('R');
|
builder.append('R');
|
||||||
if (object.get("writable").to_bool())
|
if (object.get("writable").to_bool())
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue