mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 13:38:11 +00:00
Kernel: Merge PurgeableVMObject into AnonymousVMObject
This implements memory commitments and lazy-allocation of committed memory.
This commit is contained in:
parent
b2a52f6208
commit
476f17b3f1
35 changed files with 937 additions and 564 deletions
|
@ -95,8 +95,6 @@ ProcessMemoryMapWidget::ProcessMemoryMapWidget()
|
||||||
});
|
});
|
||||||
pid_vm_fields.empend("vmobject", "VMObject type", Gfx::TextAlignment::CenterLeft);
|
pid_vm_fields.empend("vmobject", "VMObject type", Gfx::TextAlignment::CenterLeft);
|
||||||
pid_vm_fields.empend("Purgeable", Gfx::TextAlignment::CenterLeft, [](auto& object) {
|
pid_vm_fields.empend("Purgeable", Gfx::TextAlignment::CenterLeft, [](auto& object) {
|
||||||
if (!object.get("purgeable").to_bool())
|
|
||||||
return "";
|
|
||||||
if (object.get("volatile").to_bool())
|
if (object.get("volatile").to_bool())
|
||||||
return "Volatile";
|
return "Volatile";
|
||||||
return "Non-volatile";
|
return "Non-volatile";
|
||||||
|
|
|
@ -200,7 +200,7 @@ set(KERNEL_SOURCES
|
||||||
VM/PhysicalRegion.cpp
|
VM/PhysicalRegion.cpp
|
||||||
VM/PrivateInodeVMObject.cpp
|
VM/PrivateInodeVMObject.cpp
|
||||||
VM/ProcessPagingScope.cpp
|
VM/ProcessPagingScope.cpp
|
||||||
VM/PurgeableVMObject.cpp
|
VM/PurgeablePageRanges.cpp
|
||||||
VM/RangeAllocator.cpp
|
VM/RangeAllocator.cpp
|
||||||
VM/Region.cpp
|
VM/Region.cpp
|
||||||
VM/SharedInodeVMObject.cpp
|
VM/SharedInodeVMObject.cpp
|
||||||
|
|
|
@ -51,6 +51,8 @@ DoubleBuffer::DoubleBuffer(size_t capacity)
|
||||||
|
|
||||||
void DoubleBuffer::flip()
|
void DoubleBuffer::flip()
|
||||||
{
|
{
|
||||||
|
if (m_storage.is_null())
|
||||||
|
return;
|
||||||
ASSERT(m_read_buffer_index == m_read_buffer->size);
|
ASSERT(m_read_buffer_index == m_read_buffer->size);
|
||||||
swap(m_read_buffer, m_write_buffer);
|
swap(m_read_buffer, m_write_buffer);
|
||||||
m_write_buffer->size = 0;
|
m_write_buffer->size = 0;
|
||||||
|
@ -60,7 +62,7 @@ void DoubleBuffer::flip()
|
||||||
|
|
||||||
ssize_t DoubleBuffer::write(const UserOrKernelBuffer& data, size_t size)
|
ssize_t DoubleBuffer::write(const UserOrKernelBuffer& data, size_t size)
|
||||||
{
|
{
|
||||||
if (!size)
|
if (!size || m_storage.is_null())
|
||||||
return 0;
|
return 0;
|
||||||
ASSERT(size > 0);
|
ASSERT(size > 0);
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
|
@ -77,7 +79,7 @@ ssize_t DoubleBuffer::write(const UserOrKernelBuffer& data, size_t size)
|
||||||
|
|
||||||
ssize_t DoubleBuffer::read(UserOrKernelBuffer& data, size_t size)
|
ssize_t DoubleBuffer::read(UserOrKernelBuffer& data, size_t size)
|
||||||
{
|
{
|
||||||
if (!size)
|
if (!size || m_storage.is_null())
|
||||||
return 0;
|
return 0;
|
||||||
ASSERT(size > 0);
|
ASSERT(size > 0);
|
||||||
LOCKER(m_lock);
|
LOCKER(m_lock);
|
||||||
|
|
|
@ -56,8 +56,8 @@
|
||||||
#include <Kernel/Scheduler.h>
|
#include <Kernel/Scheduler.h>
|
||||||
#include <Kernel/StdLib.h>
|
#include <Kernel/StdLib.h>
|
||||||
#include <Kernel/TTY/TTY.h>
|
#include <Kernel/TTY/TTY.h>
|
||||||
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <Kernel/VM/PurgeableVMObject.h>
|
|
||||||
#include <LibC/errno_numbers.h>
|
#include <LibC/errno_numbers.h>
|
||||||
|
|
||||||
//#define PROCFS_DEBUG
|
//#define PROCFS_DEBUG
|
||||||
|
@ -329,9 +329,9 @@ static OwnPtr<KBuffer> procfs$pid_vm(InodeIdentifier identifier)
|
||||||
region_object.add("stack", region.is_stack());
|
region_object.add("stack", region.is_stack());
|
||||||
region_object.add("shared", region.is_shared());
|
region_object.add("shared", region.is_shared());
|
||||||
region_object.add("user_accessible", region.is_user_accessible());
|
region_object.add("user_accessible", region.is_user_accessible());
|
||||||
region_object.add("purgeable", region.vmobject().is_purgeable());
|
region_object.add("purgeable", region.vmobject().is_anonymous());
|
||||||
if (region.vmobject().is_purgeable()) {
|
if (region.vmobject().is_anonymous()) {
|
||||||
region_object.add("volatile", static_cast<const PurgeableVMObject&>(region.vmobject()).is_any_volatile());
|
region_object.add("volatile", static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile());
|
||||||
}
|
}
|
||||||
region_object.add("cacheable", region.is_cacheable());
|
region_object.add("cacheable", region.is_cacheable());
|
||||||
region_object.add("kernel", region.is_kernel());
|
region_object.add("kernel", region.is_kernel());
|
||||||
|
@ -1254,6 +1254,10 @@ ssize_t ProcFSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer&
|
||||||
|
|
||||||
if (!data)
|
if (!data)
|
||||||
return 0;
|
return 0;
|
||||||
|
if (data->is_null()) {
|
||||||
|
dbg() << "ProcFS: Not enough memory!";
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if ((size_t)offset >= data->size())
|
if ((size_t)offset >= data->size())
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -112,7 +112,7 @@ struct KmallocGlobalHeap {
|
||||||
// allocations not including the original allocation_request
|
// allocations not including the original allocation_request
|
||||||
// that triggered heap expansion. If we don't allocate
|
// that triggered heap expansion. If we don't allocate
|
||||||
memory_size += 1 * MiB;
|
memory_size += 1 * MiB;
|
||||||
region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write);
|
region = MM.allocate_kernel_region(memory_size, "kmalloc subheap", Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow);
|
||||||
if (region) {
|
if (region) {
|
||||||
klog() << "kmalloc(): Adding even more memory to heap at " << region->vaddr() << ", bytes: " << region->size();
|
klog() << "kmalloc(): Adding even more memory to heap at " << region->vaddr() << ", bytes: " << region->size();
|
||||||
|
|
||||||
|
@ -176,7 +176,7 @@ struct KmallocGlobalHeap {
|
||||||
{
|
{
|
||||||
if (m_backup_memory)
|
if (m_backup_memory)
|
||||||
return;
|
return;
|
||||||
m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write);
|
m_backup_memory = MM.allocate_kernel_region(1 * MiB, "kmalloc subheap", Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t backup_memory_bytes() const
|
size_t backup_memory_bytes() const
|
||||||
|
|
|
@ -316,7 +316,7 @@ void APIC::do_boot_aps()
|
||||||
// Allocate enough stacks for all APs
|
// Allocate enough stacks for all APs
|
||||||
Vector<OwnPtr<Region>> apic_ap_stacks;
|
Vector<OwnPtr<Region>> apic_ap_stacks;
|
||||||
for (u32 i = 0; i < aps_to_enable; i++) {
|
for (u32 i = 0; i < aps_to_enable; i++) {
|
||||||
auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, false, true, true);
|
auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow, true);
|
||||||
if (!stack_region) {
|
if (!stack_region) {
|
||||||
klog() << "APIC: Failed to allocate stack for AP #" << i;
|
klog() << "APIC: Failed to allocate stack for AP #" << i;
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -48,36 +48,33 @@ namespace Kernel {
|
||||||
|
|
||||||
class KBufferImpl : public RefCounted<KBufferImpl> {
|
class KBufferImpl : public RefCounted<KBufferImpl> {
|
||||||
public:
|
public:
|
||||||
static RefPtr<KBufferImpl> try_create_with_size(size_t size, u8 access, const char* name)
|
static RefPtr<KBufferImpl> try_create_with_size(size_t size, u8 access, const char* name, AllocationStrategy strategy = AllocationStrategy::Reserve)
|
||||||
{
|
{
|
||||||
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), name, access, false, false);
|
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(size), name, access, false, strategy);
|
||||||
if (!region)
|
if (!region)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
return adopt(*new KBufferImpl(region.release_nonnull(), size));
|
return adopt(*new KBufferImpl(region.release_nonnull(), size));
|
||||||
}
|
}
|
||||||
|
|
||||||
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, u8 access, const char* name)
|
static RefPtr<KBufferImpl> try_create_with_bytes(ReadonlyBytes bytes, u8 access, const char* name, AllocationStrategy strategy = AllocationStrategy::Reserve)
|
||||||
{
|
{
|
||||||
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(bytes.size()), name, access, false, false);
|
auto region = MM.allocate_kernel_region(PAGE_ROUND_UP(bytes.size()), name, access, false, strategy);
|
||||||
if (!region)
|
if (!region)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
if (!region->commit())
|
|
||||||
return nullptr;
|
|
||||||
memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size());
|
memcpy(region->vaddr().as_ptr(), bytes.data(), bytes.size());
|
||||||
return adopt(*new KBufferImpl(region.release_nonnull(), bytes.size()));
|
return adopt(*new KBufferImpl(region.release_nonnull(), bytes.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
static NonnullRefPtr<KBufferImpl> create_with_size(size_t size, u8 access, const char* name)
|
static RefPtr<KBufferImpl> create_with_size(size_t size, u8 access, const char* name, AllocationStrategy strategy = AllocationStrategy::Reserve)
|
||||||
{
|
{
|
||||||
auto impl = try_create_with_size(size, access, name);
|
return try_create_with_size(size, access, name, strategy);
|
||||||
ASSERT(impl);
|
|
||||||
return impl.release_nonnull();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static NonnullRefPtr<KBufferImpl> copy(const void* data, size_t size, u8 access, const char* name)
|
static RefPtr<KBufferImpl> copy(const void* data, size_t size, u8 access, const char* name)
|
||||||
{
|
{
|
||||||
auto buffer = create_with_size(size, access, name);
|
auto buffer = create_with_size(size, access, name, AllocationStrategy::AllocateNow);
|
||||||
buffer->region().commit();
|
if (!buffer)
|
||||||
|
return {};
|
||||||
memcpy(buffer->data(), data, size);
|
memcpy(buffer->data(), data, size);
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
@ -135,17 +132,19 @@ public:
|
||||||
return KBuffer(KBufferImpl::copy(data, size, access, name));
|
return KBuffer(KBufferImpl::copy(data, size, access, name));
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* data() { return m_impl->data(); }
|
bool is_null() const { return !m_impl; }
|
||||||
const u8* data() const { return m_impl->data(); }
|
|
||||||
size_t size() const { return m_impl->size(); }
|
u8* data() { return m_impl ? m_impl->data() : nullptr; }
|
||||||
size_t capacity() const { return m_impl->capacity(); }
|
const u8* data() const { return m_impl ? m_impl->data() : nullptr; }
|
||||||
|
size_t size() const { return m_impl ? m_impl->size() : 0; }
|
||||||
|
size_t capacity() const { return m_impl ? m_impl->capacity() : 0; }
|
||||||
|
|
||||||
void* end_pointer() { return data() + size(); }
|
void* end_pointer() { return data() + size(); }
|
||||||
const void* end_pointer() const { return data() + size(); }
|
const void* end_pointer() const { return data() + size(); }
|
||||||
|
|
||||||
void set_size(size_t size) { m_impl->set_size(size); }
|
void set_size(size_t size) { m_impl->set_size(size); }
|
||||||
|
|
||||||
const KBufferImpl& impl() const { return m_impl; }
|
const KBufferImpl& impl() const { return *m_impl; }
|
||||||
|
|
||||||
KBuffer(const ByteBuffer& buffer, u8 access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer")
|
KBuffer(const ByteBuffer& buffer, u8 access = Region::Access::Read | Region::Access::Write, const char* name = "KBuffer")
|
||||||
: m_impl(KBufferImpl::copy(buffer.data(), buffer.size(), access, name))
|
: m_impl(KBufferImpl::copy(buffer.data(), buffer.size(), access, name))
|
||||||
|
@ -153,12 +152,12 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
explicit KBuffer(NonnullRefPtr<KBufferImpl>&& impl)
|
explicit KBuffer(RefPtr<KBufferImpl>&& impl)
|
||||||
: m_impl(move(impl))
|
: m_impl(move(impl))
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
NonnullRefPtr<KBufferImpl> m_impl;
|
RefPtr<KBufferImpl> m_impl;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline const LogStream& operator<<(const LogStream& stream, const KBuffer& value)
|
inline const LogStream& operator<<(const LogStream& stream, const KBuffer& value)
|
||||||
|
|
|
@ -35,14 +35,15 @@ inline bool KBufferBuilder::can_append(size_t size) const
|
||||||
{
|
{
|
||||||
if (!m_buffer)
|
if (!m_buffer)
|
||||||
return false;
|
return false;
|
||||||
bool has_space = ((m_size + size) < m_buffer->size());
|
return ((m_size + size) < m_buffer->size());
|
||||||
ASSERT(has_space);
|
|
||||||
return has_space;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<KBuffer> KBufferBuilder::build()
|
OwnPtr<KBuffer> KBufferBuilder::build()
|
||||||
{
|
{
|
||||||
m_buffer->set_size(m_size);
|
if (!m_buffer)
|
||||||
|
return {};
|
||||||
|
if (!m_buffer->is_null())
|
||||||
|
m_buffer->set_size(m_size);
|
||||||
return m_buffer.release_nonnull();
|
return m_buffer.release_nonnull();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -106,7 +106,7 @@ void NetworkTask_main(void*)
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t buffer_size = 64 * KiB;
|
size_t buffer_size = 64 * KiB;
|
||||||
auto buffer_region = MM.allocate_kernel_region(buffer_size, "Kernel Packet Buffer", Region::Access::Read | Region::Access::Write, false, true);
|
auto buffer_region = MM.allocate_kernel_region(buffer_size, "Kernel Packet Buffer", Region::Access::Read | Region::Access::Write);
|
||||||
auto buffer = (u8*)buffer_region->vaddr().get();
|
auto buffer = (u8*)buffer_region->vaddr().get();
|
||||||
timeval packet_timestamp;
|
timeval packet_timestamp;
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,8 @@
|
||||||
#include <Kernel/TTY/TTY.h>
|
#include <Kernel/TTY/TTY.h>
|
||||||
#include <Kernel/Thread.h>
|
#include <Kernel/Thread.h>
|
||||||
#include <Kernel/VM/PageDirectory.h>
|
#include <Kernel/VM/PageDirectory.h>
|
||||||
|
#include <Kernel/VM/PrivateInodeVMObject.h>
|
||||||
|
#include <Kernel/VM/ProcessPagingScope.h>
|
||||||
#include <Kernel/VM/SharedInodeVMObject.h>
|
#include <Kernel/VM/SharedInodeVMObject.h>
|
||||||
#include <LibC/errno_numbers.h>
|
#include <LibC/errno_numbers.h>
|
||||||
#include <LibC/limits.h>
|
#include <LibC/limits.h>
|
||||||
|
@ -141,29 +143,27 @@ Region& Process::allocate_split_region(const Region& source_region, const Range&
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
Region* Process::allocate_region(const Range& range, const String& name, int prot, bool should_commit)
|
Region* Process::allocate_region(const Range& range, const String& name, int prot, AllocationStrategy strategy)
|
||||||
{
|
{
|
||||||
ASSERT(range.is_valid());
|
ASSERT(range.is_valid());
|
||||||
auto vmobject = PurgeableVMObject::create_with_size(range.size());
|
auto vmobject = AnonymousVMObject::create_with_size(range.size(), strategy);
|
||||||
if (!vmobject)
|
if (!vmobject)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
auto region = Region::create_user_accessible(this, range, vmobject.release_nonnull(), 0, name, prot_to_region_access_flags(prot));
|
auto region = Region::create_user_accessible(this, range, vmobject.release_nonnull(), 0, name, prot_to_region_access_flags(prot));
|
||||||
if (!region->map(page_directory()))
|
if (!region->map(page_directory()))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
if (should_commit && region->can_commit() && !region->commit())
|
|
||||||
return nullptr;
|
|
||||||
return &add_region(move(region));
|
return &add_region(move(region));
|
||||||
}
|
}
|
||||||
|
|
||||||
Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String& name, int prot, bool should_commit)
|
Region* Process::allocate_region(VirtualAddress vaddr, size_t size, const String& name, int prot, AllocationStrategy strategy)
|
||||||
{
|
{
|
||||||
auto range = allocate_range(vaddr, size);
|
auto range = allocate_range(vaddr, size);
|
||||||
if (!range.is_valid())
|
if (!range.is_valid())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
return allocate_region(range, name, prot, should_commit);
|
return allocate_region(range, name, prot, strategy);
|
||||||
}
|
}
|
||||||
|
|
||||||
Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool should_commit)
|
Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot)
|
||||||
{
|
{
|
||||||
ASSERT(range.is_valid());
|
ASSERT(range.is_valid());
|
||||||
size_t end_in_vmobject = offset_in_vmobject + range.size();
|
size_t end_in_vmobject = offset_in_vmobject + range.size();
|
||||||
|
@ -183,17 +183,15 @@ Region* Process::allocate_region_with_vmobject(const Range& range, NonnullRefPtr
|
||||||
auto& region = add_region(Region::create_user_accessible(this, range, move(vmobject), offset_in_vmobject, name, prot_to_region_access_flags(prot)));
|
auto& region = add_region(Region::create_user_accessible(this, range, move(vmobject), offset_in_vmobject, name, prot_to_region_access_flags(prot)));
|
||||||
if (!region.map(page_directory()))
|
if (!region.map(page_directory()))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
if (should_commit && region.can_commit() && !region.commit())
|
|
||||||
return nullptr;
|
|
||||||
return ®ion;
|
return ®ion;
|
||||||
}
|
}
|
||||||
|
|
||||||
Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool should_commit)
|
Region* Process::allocate_region_with_vmobject(VirtualAddress vaddr, size_t size, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot)
|
||||||
{
|
{
|
||||||
auto range = allocate_range(vaddr, size);
|
auto range = allocate_range(vaddr, size);
|
||||||
if (!range.is_valid())
|
if (!range.is_valid())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
return allocate_region_with_vmobject(range, move(vmobject), offset_in_vmobject, name, prot, should_commit);
|
return allocate_region_with_vmobject(range, move(vmobject), offset_in_vmobject, name, prot);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Process::deallocate_region(Region& region)
|
bool Process::deallocate_region(Region& region)
|
||||||
|
@ -295,6 +293,8 @@ RefPtr<Process> Process::create_user_process(RefPtr<Thread>& first_thread, const
|
||||||
root = VFS::the().root_custody();
|
root = VFS::the().root_custody();
|
||||||
|
|
||||||
auto process = adopt(*new Process(first_thread, parts.take_last(), uid, gid, parent_pid, false, move(cwd), nullptr, tty));
|
auto process = adopt(*new Process(first_thread, parts.take_last(), uid, gid, parent_pid, false, move(cwd), nullptr, tty));
|
||||||
|
if (!first_thread)
|
||||||
|
return {};
|
||||||
process->m_fds.resize(m_max_open_file_descriptors);
|
process->m_fds.resize(m_max_open_file_descriptors);
|
||||||
auto& device_to_use_as_tty = tty ? (CharacterDevice&)*tty : NullDevice::the();
|
auto& device_to_use_as_tty = tty ? (CharacterDevice&)*tty : NullDevice::the();
|
||||||
auto description = device_to_use_as_tty.open(O_RDWR).value();
|
auto description = device_to_use_as_tty.open(O_RDWR).value();
|
||||||
|
@ -318,9 +318,11 @@ RefPtr<Process> Process::create_user_process(RefPtr<Thread>& first_thread, const
|
||||||
return process;
|
return process;
|
||||||
}
|
}
|
||||||
|
|
||||||
NonnullRefPtr<Process> Process::create_kernel_process(RefPtr<Thread>& first_thread, String&& name, void (*entry)(void*), void* entry_data, u32 affinity)
|
RefPtr<Process> Process::create_kernel_process(RefPtr<Thread>& first_thread, String&& name, void (*entry)(void*), void* entry_data, u32 affinity)
|
||||||
{
|
{
|
||||||
auto process = adopt(*new Process(first_thread, move(name), (uid_t)0, (gid_t)0, ProcessID(0), true));
|
auto process = adopt(*new Process(first_thread, move(name), (uid_t)0, (gid_t)0, ProcessID(0), true));
|
||||||
|
if (!first_thread)
|
||||||
|
return {};
|
||||||
first_thread->tss().eip = (FlatPtr)entry;
|
first_thread->tss().eip = (FlatPtr)entry;
|
||||||
first_thread->tss().esp = FlatPtr(entry_data); // entry function argument is expected to be in tss.esp
|
first_thread->tss().esp = FlatPtr(entry_data); // entry function argument is expected to be in tss.esp
|
||||||
|
|
||||||
|
@ -369,6 +371,11 @@ Process::Process(RefPtr<Thread>& first_thread, const String& name, uid_t uid, gi
|
||||||
first_thread = adopt(*new Thread(*this));
|
first_thread = adopt(*new Thread(*this));
|
||||||
first_thread->detach();
|
first_thread->detach();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (first_thread && !first_thread->was_created()) {
|
||||||
|
// We couldn't entirely create or clone this thread, abort
|
||||||
|
first_thread = nullptr;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Process::~Process()
|
Process::~Process()
|
||||||
|
@ -400,7 +407,7 @@ void Process::dump_regions()
|
||||||
|
|
||||||
for (auto& sorted_region : sorted_regions) {
|
for (auto& sorted_region : sorted_regions) {
|
||||||
auto& region = *sorted_region;
|
auto& region = *sorted_region;
|
||||||
klog() << String::format("%08x", region.vaddr().get()) << " -- " << String::format("%08x", region.vaddr().offset(region.size() - 1).get()) << " " << String::format("%08x", region.size()) << " " << (region.is_readable() ? 'R' : ' ') << (region.is_writable() ? 'W' : ' ') << (region.is_executable() ? 'X' : ' ') << (region.is_shared() ? 'S' : ' ') << (region.is_stack() ? 'T' : ' ') << (region.vmobject().is_purgeable() ? 'P' : ' ') << " " << region.name().characters();
|
klog() << String::format("%08x", region.vaddr().get()) << " -- " << String::format("%08x", region.vaddr().offset(region.size() - 1).get()) << " " << String::format("%08x", region.size()) << " " << (region.is_readable() ? 'R' : ' ') << (region.is_writable() ? 'W' : ' ') << (region.is_executable() ? 'X' : ' ') << (region.is_shared() ? 'S' : ' ') << (region.is_stack() ? 'T' : ' ') << (region.vmobject().is_anonymous() ? 'A' : ' ') << " " << region.name().characters();
|
||||||
}
|
}
|
||||||
MM.dump_kernel_regions();
|
MM.dump_kernel_regions();
|
||||||
}
|
}
|
||||||
|
@ -768,7 +775,7 @@ size_t Process::amount_purgeable_volatile() const
|
||||||
size_t amount = 0;
|
size_t amount = 0;
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
for (auto& region : m_regions) {
|
for (auto& region : m_regions) {
|
||||||
if (region.vmobject().is_purgeable() && static_cast<const PurgeableVMObject&>(region.vmobject()).is_any_volatile())
|
if (region.vmobject().is_anonymous() && static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile())
|
||||||
amount += region.amount_resident();
|
amount += region.amount_resident();
|
||||||
}
|
}
|
||||||
return amount;
|
return amount;
|
||||||
|
@ -779,7 +786,7 @@ size_t Process::amount_purgeable_nonvolatile() const
|
||||||
size_t amount = 0;
|
size_t amount = 0;
|
||||||
ScopedSpinLock lock(m_lock);
|
ScopedSpinLock lock(m_lock);
|
||||||
for (auto& region : m_regions) {
|
for (auto& region : m_regions) {
|
||||||
if (region.vmobject().is_purgeable() && !static_cast<const PurgeableVMObject&>(region.vmobject()).is_any_volatile())
|
if (region.vmobject().is_anonymous() && !static_cast<const AnonymousVMObject&>(region.vmobject()).is_any_volatile())
|
||||||
amount += region.amount_resident();
|
amount += region.amount_resident();
|
||||||
}
|
}
|
||||||
return amount;
|
return amount;
|
||||||
|
@ -823,6 +830,10 @@ RefPtr<Thread> Process::create_kernel_thread(void (*entry)(void*), void* entry_d
|
||||||
// FIXME: Do something with guard pages?
|
// FIXME: Do something with guard pages?
|
||||||
|
|
||||||
auto thread = adopt(*new Thread(*this));
|
auto thread = adopt(*new Thread(*this));
|
||||||
|
if (!thread->was_created()) {
|
||||||
|
// Could not fully create this thread
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
thread->set_name(name);
|
thread->set_name(name);
|
||||||
thread->set_affinity(affinity);
|
thread->set_affinity(affinity);
|
||||||
|
|
|
@ -45,6 +45,7 @@
|
||||||
#include <Kernel/ThreadTracer.h>
|
#include <Kernel/ThreadTracer.h>
|
||||||
#include <Kernel/UnixTypes.h>
|
#include <Kernel/UnixTypes.h>
|
||||||
#include <Kernel/UnveilNode.h>
|
#include <Kernel/UnveilNode.h>
|
||||||
|
#include <Kernel/VM/AllocationStrategy.h>
|
||||||
#include <Kernel/VM/RangeAllocator.h>
|
#include <Kernel/VM/RangeAllocator.h>
|
||||||
#include <LibC/signal_numbers.h>
|
#include <LibC/signal_numbers.h>
|
||||||
|
|
||||||
|
@ -112,7 +113,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename EntryFunction>
|
template<typename EntryFunction>
|
||||||
static NonnullRefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, String&& name, EntryFunction entry, u32 affinity = THREAD_AFFINITY_DEFAULT)
|
static RefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, String&& name, EntryFunction entry, u32 affinity = THREAD_AFFINITY_DEFAULT)
|
||||||
{
|
{
|
||||||
auto* entry_func = new EntryFunction(move(entry));
|
auto* entry_func = new EntryFunction(move(entry));
|
||||||
return create_kernel_process(
|
return create_kernel_process(
|
||||||
|
@ -124,7 +125,7 @@ public:
|
||||||
entry_func, affinity);
|
entry_func, affinity);
|
||||||
}
|
}
|
||||||
|
|
||||||
static NonnullRefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, String&& name, void (*entry)(void*), void* entry_data = nullptr, u32 affinity = THREAD_AFFINITY_DEFAULT);
|
static RefPtr<Process> create_kernel_process(RefPtr<Thread>& first_thread, String&& name, void (*entry)(void*), void* entry_data = nullptr, u32 affinity = THREAD_AFFINITY_DEFAULT);
|
||||||
static RefPtr<Process> create_user_process(RefPtr<Thread>& first_thread, const String& path, uid_t, gid_t, ProcessID ppid, int& error, Vector<String>&& arguments = Vector<String>(), Vector<String>&& environment = Vector<String>(), TTY* = nullptr);
|
static RefPtr<Process> create_user_process(RefPtr<Thread>& first_thread, const String& path, uid_t, gid_t, ProcessID ppid, int& error, Vector<String>&& arguments = Vector<String>(), Vector<String>&& environment = Vector<String>(), TTY* = nullptr);
|
||||||
~Process();
|
~Process();
|
||||||
|
|
||||||
|
@ -436,10 +437,10 @@ public:
|
||||||
return m_euid == 0;
|
return m_euid == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
Region* allocate_region_with_vmobject(VirtualAddress, size_t, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool should_commit = true);
|
Region* allocate_region_with_vmobject(VirtualAddress, size_t, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot);
|
||||||
Region* allocate_region(VirtualAddress, size_t, const String& name, int prot = PROT_READ | PROT_WRITE, bool should_commit = true);
|
Region* allocate_region(VirtualAddress, size_t, const String& name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
|
||||||
Region* allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot, bool should_commit = true);
|
Region* allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, const String& name, int prot);
|
||||||
Region* allocate_region(const Range&, const String& name, int prot = PROT_READ | PROT_WRITE, bool should_commit = true);
|
Region* allocate_region(const Range&, const String& name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
|
||||||
bool deallocate_region(Region& region);
|
bool deallocate_region(Region& region);
|
||||||
|
|
||||||
Region& allocate_split_region(const Region& source_region, const Range&, size_t offset_in_vmobject);
|
Region& allocate_split_region(const Region& source_region, const Range&, size_t offset_in_vmobject);
|
||||||
|
|
|
@ -64,7 +64,6 @@ void start(Process& process)
|
||||||
|
|
||||||
if (!s_profiling_buffer) {
|
if (!s_profiling_buffer) {
|
||||||
s_profiling_buffer = RefPtr<KBufferImpl>(KBuffer::create_with_size(8 * MiB).impl()).leak_ref();
|
s_profiling_buffer = RefPtr<KBufferImpl>(KBuffer::create_with_size(8 * MiB).impl()).leak_ref();
|
||||||
s_profiling_buffer->region().commit();
|
|
||||||
s_slot_count = s_profiling_buffer->size() / sizeof(Sample);
|
s_slot_count = s_profiling_buffer->size() / sizeof(Sample);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -437,7 +437,7 @@ void Scheduler::initialize()
|
||||||
g_finalizer_wait_queue = new WaitQueue;
|
g_finalizer_wait_queue = new WaitQueue;
|
||||||
|
|
||||||
g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
|
g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
|
||||||
s_colonel_process = &Process::create_kernel_process(idle_thread, "colonel", idle_loop, nullptr, 1).leak_ref();
|
s_colonel_process = Process::create_kernel_process(idle_thread, "colonel", idle_loop, nullptr, 1).leak_ref();
|
||||||
ASSERT(s_colonel_process);
|
ASSERT(s_colonel_process);
|
||||||
ASSERT(idle_thread);
|
ASSERT(idle_thread);
|
||||||
idle_thread->set_priority(THREAD_PRIORITY_MIN);
|
idle_thread->set_priority(THREAD_PRIORITY_MIN);
|
||||||
|
|
|
@ -229,14 +229,12 @@ auto SharedBuffer::set_volatile_all(bool is_volatile, bool& was_purged) -> SetVo
|
||||||
if (ref.pid == pid) {
|
if (ref.pid == pid) {
|
||||||
if (Region* region = ref.region.unsafe_ptr()) {
|
if (Region* region = ref.region.unsafe_ptr()) {
|
||||||
switch (region->set_volatile(region->vaddr(), region->size(), is_volatile, was_purged)) {
|
switch (region->set_volatile(region->vaddr(), region->size(), is_volatile, was_purged)) {
|
||||||
case Region::SetVolatileError::Success:
|
case Region::SetVolatileError::Success:
|
||||||
if (!was_purged && was_purged)
|
return SetVolatileError::Success;
|
||||||
klog() << "Region @ " << region->vaddr() << " - " << region->vaddr().offset(region->size()) << " was purged!";
|
case Region::SetVolatileError::NotPurgeable:
|
||||||
return SetVolatileError::Success;
|
return SetVolatileError::NotPurgeable;
|
||||||
case Region::SetVolatileError::NotPurgeable:
|
case Region::SetVolatileError::OutOfMemory:
|
||||||
return SetVolatileError::NotPurgeable;
|
return SetVolatileError::OutOfMemory;
|
||||||
case Region::SetVolatileError::OutOfMemory:
|
|
||||||
return SetVolatileError::OutOfMemory;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,8 +28,8 @@
|
||||||
|
|
||||||
#include <AK/OwnPtr.h>
|
#include <AK/OwnPtr.h>
|
||||||
#include <AK/WeakPtr.h>
|
#include <AK/WeakPtr.h>
|
||||||
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <Kernel/VM/PurgeableVMObject.h>
|
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ private:
|
||||||
};
|
};
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SharedBuffer(int id, NonnullRefPtr<PurgeableVMObject>&& vmobject)
|
SharedBuffer(int id, NonnullRefPtr<AnonymousVMObject>&& vmobject)
|
||||||
: m_shbuf_id(id)
|
: m_shbuf_id(id)
|
||||||
, m_vmobject(move(vmobject))
|
, m_vmobject(move(vmobject))
|
||||||
{
|
{
|
||||||
|
@ -82,15 +82,15 @@ public:
|
||||||
NotMapped
|
NotMapped
|
||||||
};
|
};
|
||||||
SetVolatileError set_volatile_all(bool is_volatile, bool& was_purged);
|
SetVolatileError set_volatile_all(bool is_volatile, bool& was_purged);
|
||||||
PurgeableVMObject& vmobject() { return m_vmobject; }
|
AnonymousVMObject& vmobject() { return m_vmobject; }
|
||||||
const PurgeableVMObject& vmobject() const { return m_vmobject; }
|
const AnonymousVMObject& vmobject() const { return m_vmobject; }
|
||||||
int id() const { return m_shbuf_id; }
|
int id() const { return m_shbuf_id; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int m_shbuf_id { -1 };
|
int m_shbuf_id { -1 };
|
||||||
bool m_writable { true };
|
bool m_writable { true };
|
||||||
bool m_global { false };
|
bool m_global { false };
|
||||||
NonnullRefPtr<PurgeableVMObject> m_vmobject;
|
NonnullRefPtr<AnonymousVMObject> m_vmobject;
|
||||||
Vector<Reference, 2> m_refs;
|
Vector<Reference, 2> m_refs;
|
||||||
unsigned m_total_refs { 0 };
|
unsigned m_total_refs { 0 };
|
||||||
};
|
};
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
#include <Kernel/Profiling.h>
|
#include <Kernel/Profiling.h>
|
||||||
#include <Kernel/Random.h>
|
#include <Kernel/Random.h>
|
||||||
#include <Kernel/Time/TimeManagement.h>
|
#include <Kernel/Time/TimeManagement.h>
|
||||||
|
#include <Kernel/VM/AllocationStrategy.h>
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <Kernel/VM/PageDirectory.h>
|
#include <Kernel/VM/PageDirectory.h>
|
||||||
#include <Kernel/VM/Region.h>
|
#include <Kernel/VM/Region.h>
|
||||||
|
@ -172,7 +173,7 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
|
||||||
return IterationDecision::Break;
|
return IterationDecision::Break;
|
||||||
}
|
}
|
||||||
|
|
||||||
master_tls_region = allocate_region({}, program_header.size_in_memory(), String::formatted("{} (master-tls)", elf_name), PROT_READ | PROT_WRITE);
|
master_tls_region = allocate_region({}, program_header.size_in_memory(), String::formatted("{} (master-tls)", elf_name), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
|
||||||
if (!master_tls_region) {
|
if (!master_tls_region) {
|
||||||
ph_load_result = KResult(-ENOMEM);
|
ph_load_result = KResult(-ENOMEM);
|
||||||
return IterationDecision::Break;
|
return IterationDecision::Break;
|
||||||
|
@ -206,7 +207,7 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
|
||||||
if (program_header.is_writable())
|
if (program_header.is_writable())
|
||||||
prot |= PROT_WRITE;
|
prot |= PROT_WRITE;
|
||||||
auto region_name = String::formatted("{} (data-{}{})", elf_name, program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : "");
|
auto region_name = String::formatted("{} (data-{}{})", elf_name, program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : "");
|
||||||
auto* region = allocate_region(program_header.vaddr().offset(load_offset), program_header.size_in_memory(), move(region_name), prot);
|
auto* region = allocate_region(program_header.vaddr().offset(load_offset), program_header.size_in_memory(), move(region_name), prot, AllocationStrategy::Reserve);
|
||||||
if (!region) {
|
if (!region) {
|
||||||
ph_load_result = KResult(-ENOMEM);
|
ph_load_result = KResult(-ENOMEM);
|
||||||
return IterationDecision::Break;
|
return IterationDecision::Break;
|
||||||
|
@ -259,7 +260,7 @@ KResultOr<Process::LoadResult> Process::load_elf_object(FileDescription& object_
|
||||||
return KResult(-ENOEXEC);
|
return KResult(-ENOEXEC);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto* stack_region = allocate_region(VirtualAddress(), Thread::default_userspace_stack_size, "Stack (Main thread)", PROT_READ | PROT_WRITE, false);
|
auto* stack_region = allocate_region(VirtualAddress(), Thread::default_userspace_stack_size, "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve);
|
||||||
if (!stack_region)
|
if (!stack_region)
|
||||||
return KResult(-ENOMEM);
|
return KResult(-ENOMEM);
|
||||||
stack_region->set_stack(true);
|
stack_region->set_stack(true);
|
||||||
|
|
|
@ -104,13 +104,17 @@ pid_t Process::sys$fork(RegisterState& regs)
|
||||||
|
|
||||||
ScopedSpinLock processes_lock(g_processes_lock);
|
ScopedSpinLock processes_lock(g_processes_lock);
|
||||||
g_processes->prepend(child);
|
g_processes->prepend(child);
|
||||||
child->ref(); // This reference will be dropped by Process::reap
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ScopedSpinLock lock(g_scheduler_lock);
|
ScopedSpinLock lock(g_scheduler_lock);
|
||||||
child_first_thread->set_affinity(Thread::current()->affinity());
|
child_first_thread->set_affinity(Thread::current()->affinity());
|
||||||
child_first_thread->set_state(Thread::State::Runnable);
|
child_first_thread->set_state(Thread::State::Runnable);
|
||||||
return child->pid().value();
|
|
||||||
|
auto child_pid = child->pid().value();
|
||||||
|
// We need to leak one reference so we don't destroy the Process,
|
||||||
|
// which will be dropped by Process::reap
|
||||||
|
(void)child.leak_ref();
|
||||||
|
return child_pid;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,6 @@
|
||||||
#include <Kernel/Process.h>
|
#include <Kernel/Process.h>
|
||||||
#include <Kernel/VM/PageDirectory.h>
|
#include <Kernel/VM/PageDirectory.h>
|
||||||
#include <Kernel/VM/PrivateInodeVMObject.h>
|
#include <Kernel/VM/PrivateInodeVMObject.h>
|
||||||
#include <Kernel/VM/PurgeableVMObject.h>
|
|
||||||
#include <Kernel/VM/Region.h>
|
#include <Kernel/VM/Region.h>
|
||||||
#include <Kernel/VM/SharedInodeVMObject.h>
|
#include <Kernel/VM/SharedInodeVMObject.h>
|
||||||
#include <LibC/limits.h>
|
#include <LibC/limits.h>
|
||||||
|
@ -142,9 +141,10 @@ void* Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> user_params)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (map_anonymous) {
|
if (map_anonymous) {
|
||||||
region = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, !map_noreserve);
|
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
|
||||||
|
region = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, strategy);
|
||||||
if (!region && (!map_fixed && addr != 0))
|
if (!region && (!map_fixed && addr != 0))
|
||||||
region = allocate_region(allocate_range({}, size), !name.is_null() ? name : "mmap", prot, !map_noreserve);
|
region = allocate_region(allocate_range({}, size), !name.is_null() ? name : "mmap", prot, strategy);
|
||||||
} else {
|
} else {
|
||||||
if (offset < 0)
|
if (offset < 0)
|
||||||
return (void*)-EINVAL;
|
return (void*)-EINVAL;
|
||||||
|
@ -280,7 +280,7 @@ int Process::sys$madvise(void* address, size_t size, int advice)
|
||||||
if (set_volatile && set_nonvolatile)
|
if (set_volatile && set_nonvolatile)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (set_volatile || set_nonvolatile) {
|
if (set_volatile || set_nonvolatile) {
|
||||||
if (!region->vmobject().is_purgeable())
|
if (!region->vmobject().is_anonymous())
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
bool was_purged = false;
|
bool was_purged = false;
|
||||||
switch (region->set_volatile(VirtualAddress(address), size, set_volatile, was_purged)) {
|
switch (region->set_volatile(VirtualAddress(address), size, set_volatile, was_purged)) {
|
||||||
|
@ -296,7 +296,7 @@ int Process::sys$madvise(void* address, size_t size, int advice)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (advice & MADV_GET_VOLATILE) {
|
if (advice & MADV_GET_VOLATILE) {
|
||||||
if (!region->vmobject().is_purgeable())
|
if (!region->vmobject().is_anonymous())
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
return region->is_volatile(VirtualAddress(address), size) ? 0 : 1;
|
return region->is_volatile(VirtualAddress(address), size) ? 0 : 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,9 @@
|
||||||
|
|
||||||
#include <AK/NonnullRefPtrVector.h>
|
#include <AK/NonnullRefPtrVector.h>
|
||||||
#include <Kernel/Process.h>
|
#include <Kernel/Process.h>
|
||||||
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
#include <Kernel/VM/InodeVMObject.h>
|
#include <Kernel/VM/InodeVMObject.h>
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <Kernel/VM/PurgeableVMObject.h>
|
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
@ -39,12 +39,11 @@ int Process::sys$purge(int mode)
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
int purged_page_count = 0;
|
int purged_page_count = 0;
|
||||||
if (mode & PURGE_ALL_VOLATILE) {
|
if (mode & PURGE_ALL_VOLATILE) {
|
||||||
NonnullRefPtrVector<PurgeableVMObject> vmobjects;
|
NonnullRefPtrVector<AnonymousVMObject> vmobjects;
|
||||||
{
|
{
|
||||||
InterruptDisabler disabler;
|
InterruptDisabler disabler;
|
||||||
MM.for_each_vmobject([&](auto& vmobject) {
|
MM.for_each_vmobject([&](auto& vmobject) {
|
||||||
if (vmobject.is_purgeable())
|
vmobjects.append(vmobject);
|
||||||
vmobjects.append(static_cast<PurgeableVMObject&>(vmobject));
|
|
||||||
return IterationDecision::Continue;
|
return IterationDecision::Continue;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ int Process::sys$shbuf_create(int size, void** buffer)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
size = PAGE_ROUND_UP(size);
|
size = PAGE_ROUND_UP(size);
|
||||||
|
|
||||||
auto vmobject = PurgeableVMObject::create_with_size(size);
|
auto vmobject = AnonymousVMObject::create_with_size(size, AllocationStrategy::Reserve);
|
||||||
if (!vmobject)
|
if (!vmobject)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
|
@ -61,6 +61,10 @@ int Process::sys$create_thread(void* (*entry)(void*), Userspace<const Syscall::S
|
||||||
// FIXME: Do something with guard pages?
|
// FIXME: Do something with guard pages?
|
||||||
|
|
||||||
auto thread = adopt(*new Thread(*this));
|
auto thread = adopt(*new Thread(*this));
|
||||||
|
if (!thread->was_created()) {
|
||||||
|
// Could not fully create a thread
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
// We know this thread is not the main_thread,
|
// We know this thread is not the main_thread,
|
||||||
// So give it a unique name until the user calls $set_thread_name on it
|
// So give it a unique name until the user calls $set_thread_name on it
|
||||||
|
|
|
@ -86,7 +86,12 @@ Thread::Thread(NonnullRefPtr<Process> process)
|
||||||
|
|
||||||
m_tss.cr3 = m_process->page_directory().cr3();
|
m_tss.cr3 = m_process->page_directory().cr3();
|
||||||
|
|
||||||
m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid.value()), Region::Access::Read | Region::Access::Write, false, true);
|
m_kernel_stack_region = MM.allocate_kernel_region(default_kernel_stack_size, String::format("Kernel Stack (Thread %d)", m_tid.value()), Region::Access::Read | Region::Access::Write, false, AllocationStrategy::AllocateNow);
|
||||||
|
if (!m_kernel_stack_region) {
|
||||||
|
// Abort creating this thread, was_created() will return false
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
m_kernel_stack_region->set_stack(true);
|
m_kernel_stack_region->set_stack(true);
|
||||||
m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
|
m_kernel_stack_base = m_kernel_stack_region->vaddr().get();
|
||||||
m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
|
m_kernel_stack_top = m_kernel_stack_region->vaddr().offset(default_kernel_stack_size).get() & 0xfffffff8u;
|
||||||
|
@ -858,6 +863,10 @@ RegisterState& Thread::get_register_dump_from_stack()
|
||||||
RefPtr<Thread> Thread::clone(Process& process)
|
RefPtr<Thread> Thread::clone(Process& process)
|
||||||
{
|
{
|
||||||
auto clone = adopt(*new Thread(process));
|
auto clone = adopt(*new Thread(process));
|
||||||
|
if (!clone->was_created()) {
|
||||||
|
// We failed to clone this thread
|
||||||
|
return {};
|
||||||
|
}
|
||||||
memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
|
memcpy(clone->m_signal_action_data, m_signal_action_data, sizeof(m_signal_action_data));
|
||||||
clone->m_signal_mask = m_signal_mask;
|
clone->m_signal_mask = m_signal_mask;
|
||||||
memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
|
memcpy(clone->m_fpu_state, m_fpu_state, sizeof(FPUState));
|
||||||
|
@ -1052,7 +1061,7 @@ KResult Thread::make_thread_specific_region(Badge<Process>)
|
||||||
if (!process().m_master_tls_region)
|
if (!process().m_master_tls_region)
|
||||||
return KSuccess;
|
return KSuccess;
|
||||||
|
|
||||||
auto* region = process().allocate_region({}, thread_specific_region_size(), "Thread-specific", PROT_READ | PROT_WRITE, true);
|
auto* region = process().allocate_region({}, thread_specific_region_size(), "Thread-specific", PROT_READ | PROT_WRITE);
|
||||||
if (!region)
|
if (!region)
|
||||||
return KResult(-ENOMEM);
|
return KResult(-ENOMEM);
|
||||||
|
|
||||||
|
|
|
@ -1074,6 +1074,11 @@ public:
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
bool was_created() const
|
||||||
|
{
|
||||||
|
return m_kernel_stack_region;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
IntrusiveListNode m_runnable_list_node;
|
IntrusiveListNode m_runnable_list_node;
|
||||||
|
|
||||||
|
|
37
Kernel/VM/AllocationStrategy.h
Normal file
37
Kernel/VM/AllocationStrategy.h
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2020, The SerenityOS developers.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
enum class AllocationStrategy {
|
||||||
|
Reserve = 0,
|
||||||
|
AllocateNow,
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -24,15 +24,67 @@
|
||||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <Kernel/Process.h>
|
||||||
#include <Kernel/VM/AnonymousVMObject.h>
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <Kernel/VM/PhysicalPage.h>
|
#include <Kernel/VM/PhysicalPage.h>
|
||||||
|
|
||||||
|
//#define COMMIT_DEBUG
|
||||||
|
//#define PAGE_FAULT_DEBUG
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_size(size_t size)
|
RefPtr<VMObject> AnonymousVMObject::clone()
|
||||||
{
|
{
|
||||||
return adopt(*new AnonymousVMObject(size));
|
// We need to acquire our lock so we copy a sane state
|
||||||
|
ScopedSpinLock lock(m_lock);
|
||||||
|
|
||||||
|
// We're the parent. Since we're about to become COW we need to
|
||||||
|
// commit the number of pages that we need to potentially allocate
|
||||||
|
// so that the parent is still guaranteed to be able to have all
|
||||||
|
// non-volatile memory available.
|
||||||
|
size_t need_cow_pages = 0;
|
||||||
|
{
|
||||||
|
// We definitely need to commit non-volatile areas
|
||||||
|
for_each_nonvolatile_range([&](const VolatilePageRange& nonvolatile_range) {
|
||||||
|
need_cow_pages += nonvolatile_range.count;
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef COMMIT_DEBUG
|
||||||
|
klog() << "Cloning " << this << ", need " << need_cow_pages << " committed cow pages";
|
||||||
|
#endif
|
||||||
|
if (!MM.commit_user_physical_pages(need_cow_pages))
|
||||||
|
return {};
|
||||||
|
// Create or replace the committed cow pages. When cloning a previously
|
||||||
|
// cloned vmobject, we want to essentially "fork", leaving us and the
|
||||||
|
// new clone with one set of shared committed cow pages, and the original
|
||||||
|
// one would keep the one it still has. This ensures that the original
|
||||||
|
// one and this one, as well as the clone have sufficient resources
|
||||||
|
// to cow all pages as needed
|
||||||
|
m_shared_committed_cow_pages = adopt(*new CommittedCowPages(need_cow_pages));
|
||||||
|
|
||||||
|
// Both original and clone become COW. So create a COW map for ourselves
|
||||||
|
// or reset all pages to be copied again if we were previously cloned
|
||||||
|
ensure_or_reset_cow_map();
|
||||||
|
|
||||||
|
return adopt(*new AnonymousVMObject(*this));
|
||||||
|
}
|
||||||
|
|
||||||
|
RefPtr<AnonymousVMObject> AnonymousVMObject::create_with_size(size_t size, AllocationStrategy commit)
|
||||||
|
{
|
||||||
|
if (commit == AllocationStrategy::Reserve || commit == AllocationStrategy::AllocateNow) {
|
||||||
|
// We need to attempt to commit before actually creating the object
|
||||||
|
if (!MM.commit_user_physical_pages(ceil_div(size, PAGE_SIZE)))
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
return adopt(*new AnonymousVMObject(size, commit));
|
||||||
|
}
|
||||||
|
|
||||||
|
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_physical_page(PhysicalPage& page)
|
||||||
|
{
|
||||||
|
return adopt(*new AnonymousVMObject(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
RefPtr<AnonymousVMObject> AnonymousVMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
|
RefPtr<AnonymousVMObject> AnonymousVMObject::create_for_physical_range(PhysicalAddress paddr, size_t size)
|
||||||
|
@ -44,49 +96,403 @@ RefPtr<AnonymousVMObject> AnonymousVMObject::create_for_physical_range(PhysicalA
|
||||||
return adopt(*new AnonymousVMObject(paddr, size));
|
return adopt(*new AnonymousVMObject(paddr, size));
|
||||||
}
|
}
|
||||||
|
|
||||||
NonnullRefPtr<AnonymousVMObject> AnonymousVMObject::create_with_physical_page(PhysicalPage& page)
|
AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy)
|
||||||
{
|
|
||||||
auto vmobject = create_with_size(PAGE_SIZE);
|
|
||||||
vmobject->m_physical_pages[0] = page;
|
|
||||||
return vmobject;
|
|
||||||
}
|
|
||||||
|
|
||||||
AnonymousVMObject::AnonymousVMObject(size_t size, bool initialize_pages)
|
|
||||||
: VMObject(size)
|
: VMObject(size)
|
||||||
|
, m_volatile_ranges_cache({ 0, page_count() })
|
||||||
|
, m_unused_committed_pages(strategy == AllocationStrategy::Reserve ? page_count() : 0)
|
||||||
{
|
{
|
||||||
if (initialize_pages) {
|
if (strategy == AllocationStrategy::AllocateNow) {
|
||||||
#ifndef MAP_SHARED_ZERO_PAGE_LAZILY
|
// Allocate all pages right now. We know we can get all because we committed the amount needed
|
||||||
for (size_t i = 0; i < page_count(); ++i)
|
for (size_t i = 0; i < page_count(); ++i)
|
||||||
physical_pages()[i] = MM.shared_zero_page();
|
physical_pages()[i] = MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
||||||
#endif
|
} else {
|
||||||
|
auto& initial_page = (strategy == AllocationStrategy::Reserve) ? MM.lazy_committed_page() : MM.shared_zero_page();
|
||||||
|
for (size_t i = 0; i < page_count(); ++i)
|
||||||
|
physical_pages()[i] = initial_page;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
|
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
|
||||||
: VMObject(size)
|
: VMObject(size)
|
||||||
|
, m_volatile_ranges_cache({ 0, page_count() })
|
||||||
{
|
{
|
||||||
ASSERT(paddr.page_base() == paddr);
|
ASSERT(paddr.page_base() == paddr);
|
||||||
for (size_t i = 0; i < page_count(); ++i)
|
for (size_t i = 0; i < page_count(); ++i)
|
||||||
physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), false, false);
|
physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
AnonymousVMObject::AnonymousVMObject(PhysicalPage& page)
|
||||||
|
: VMObject(PAGE_SIZE)
|
||||||
|
, m_volatile_ranges_cache({ 0, page_count() })
|
||||||
|
{
|
||||||
|
physical_pages()[0] = page;
|
||||||
|
}
|
||||||
|
|
||||||
AnonymousVMObject::AnonymousVMObject(const AnonymousVMObject& other)
|
AnonymousVMObject::AnonymousVMObject(const AnonymousVMObject& other)
|
||||||
: VMObject(other)
|
: VMObject(other)
|
||||||
|
, m_volatile_ranges_cache({ 0, page_count() }) // do *not* clone this
|
||||||
|
, m_volatile_ranges_cache_dirty(true) // do *not* clone this
|
||||||
|
, m_purgeable_ranges() // do *not* clone this
|
||||||
|
, m_unused_committed_pages(other.m_unused_committed_pages)
|
||||||
|
, m_cow_map() // do *not* clone this
|
||||||
|
, m_shared_committed_cow_pages(other.m_shared_committed_cow_pages) // share the pool
|
||||||
{
|
{
|
||||||
|
// We can't really "copy" a spinlock. But we're holding it. Clear in the clone
|
||||||
|
ASSERT(other.m_lock.is_locked());
|
||||||
|
m_lock.initialize();
|
||||||
|
|
||||||
|
// The clone also becomes COW
|
||||||
|
ensure_or_reset_cow_map();
|
||||||
|
|
||||||
|
if (m_unused_committed_pages > 0) {
|
||||||
|
// The original vmobject didn't use up all commited pages. When
|
||||||
|
// cloning (fork) we will overcommit. For this purpose we drop all
|
||||||
|
// lazy-commit references and replace them with shared zero pages.
|
||||||
|
for (size_t i = 0; i < page_count(); i++) {
|
||||||
|
auto& phys_page = m_physical_pages[i];
|
||||||
|
if (phys_page && phys_page->is_lazy_committed_page()) {
|
||||||
|
phys_page = MM.shared_zero_page();
|
||||||
|
if (--m_unused_committed_pages == 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ASSERT(m_unused_committed_pages == 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AnonymousVMObject::~AnonymousVMObject()
|
AnonymousVMObject::~AnonymousVMObject()
|
||||||
{
|
{
|
||||||
|
// Return any unused committed pages
|
||||||
|
if (m_unused_committed_pages > 0)
|
||||||
|
MM.uncommit_user_physical_pages(m_unused_committed_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
RefPtr<VMObject> AnonymousVMObject::clone()
|
int AnonymousVMObject::purge()
|
||||||
{
|
{
|
||||||
return adopt(*new AnonymousVMObject(*this));
|
LOCKER(m_paging_lock);
|
||||||
|
return purge_impl();
|
||||||
}
|
}
|
||||||
|
|
||||||
RefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(size_t)
|
int AnonymousVMObject::purge_with_interrupts_disabled(Badge<MemoryManager>)
|
||||||
{
|
{
|
||||||
return {};
|
ASSERT_INTERRUPTS_DISABLED();
|
||||||
|
if (m_paging_lock.is_locked())
|
||||||
|
return 0;
|
||||||
|
return purge_impl();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AnonymousVMObject::set_was_purged(const VolatilePageRange& range)
|
||||||
|
{
|
||||||
|
ASSERT(m_lock.is_locked());
|
||||||
|
for (auto* purgeable_ranges : m_purgeable_ranges)
|
||||||
|
purgeable_ranges->set_was_purged(range);
|
||||||
|
}
|
||||||
|
|
||||||
|
int AnonymousVMObject::purge_impl()
|
||||||
|
{
|
||||||
|
int purged_page_count = 0;
|
||||||
|
ScopedSpinLock lock(m_lock);
|
||||||
|
for_each_volatile_range([&](const auto& range) {
|
||||||
|
int purged_in_range = 0;
|
||||||
|
auto range_end = range.base + range.count;
|
||||||
|
for (size_t i = range.base; i < range_end; i++) {
|
||||||
|
auto& phys_page = m_physical_pages[i];
|
||||||
|
if (phys_page && !phys_page->is_shared_zero_page()) {
|
||||||
|
ASSERT(!phys_page->is_lazy_committed_page());
|
||||||
|
++purged_in_range;
|
||||||
|
}
|
||||||
|
phys_page = MM.shared_zero_page();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (purged_in_range > 0) {
|
||||||
|
purged_page_count += purged_in_range;
|
||||||
|
set_was_purged(range);
|
||||||
|
for_each_region([&](auto& region) {
|
||||||
|
if (®ion.vmobject() == this) {
|
||||||
|
if (auto owner = region.get_owner()) {
|
||||||
|
// we need to hold a reference the process here (if there is one) as we may not own this region
|
||||||
|
klog() << "Purged " << purged_in_range << " pages from region " << region.name() << " owned by " << *owner << " at " << region.vaddr_from_page_index(range.base) << " - " << region.vaddr_from_page_index(range.base + range.count);
|
||||||
|
} else {
|
||||||
|
klog() << "Purged " << purged_in_range << " pages from region " << region.name() << " (no ownership) at " << region.vaddr_from_page_index(range.base) << " - " << region.vaddr_from_page_index(range.base + range.count);
|
||||||
|
}
|
||||||
|
region.remap_page_range(range.base, range.count);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
});
|
||||||
|
return purged_page_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AnonymousVMObject::register_purgeable_page_ranges(PurgeablePageRanges& purgeable_page_ranges)
|
||||||
|
{
|
||||||
|
ScopedSpinLock lock(m_lock);
|
||||||
|
purgeable_page_ranges.set_vmobject(this);
|
||||||
|
ASSERT(!m_purgeable_ranges.contains_slow(&purgeable_page_ranges));
|
||||||
|
m_purgeable_ranges.append(&purgeable_page_ranges);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AnonymousVMObject::unregister_purgeable_page_ranges(PurgeablePageRanges& purgeable_page_ranges)
|
||||||
|
{
|
||||||
|
ScopedSpinLock lock(m_lock);
|
||||||
|
for (size_t i = 0; i < m_purgeable_ranges.size(); i++) {
|
||||||
|
if (m_purgeable_ranges[i] != &purgeable_page_ranges)
|
||||||
|
continue;
|
||||||
|
purgeable_page_ranges.set_vmobject(nullptr);
|
||||||
|
m_purgeable_ranges.remove(i);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ASSERT_NOT_REACHED();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AnonymousVMObject::is_any_volatile() const
|
||||||
|
{
|
||||||
|
ScopedSpinLock lock(m_lock);
|
||||||
|
for (auto& volatile_ranges : m_purgeable_ranges) {
|
||||||
|
ScopedSpinLock lock(volatile_ranges->m_volatile_ranges_lock);
|
||||||
|
if (!volatile_ranges->is_empty())
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AnonymousVMObject::remove_lazy_commit_pages(const VolatilePageRange& range)
|
||||||
|
{
|
||||||
|
ASSERT(m_lock.is_locked());
|
||||||
|
|
||||||
|
size_t removed_count = 0;
|
||||||
|
auto range_end = range.base + range.count;
|
||||||
|
for (size_t i = range.base; i < range_end; i++) {
|
||||||
|
auto& phys_page = m_physical_pages[i];
|
||||||
|
if (phys_page && phys_page->is_lazy_committed_page()) {
|
||||||
|
phys_page = MM.shared_zero_page();
|
||||||
|
removed_count++;
|
||||||
|
ASSERT(m_unused_committed_pages > 0);
|
||||||
|
if (--m_unused_committed_pages == 0)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return removed_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AnonymousVMObject::update_volatile_cache()
|
||||||
|
{
|
||||||
|
ASSERT(m_lock.is_locked());
|
||||||
|
ASSERT(m_volatile_ranges_cache_dirty);
|
||||||
|
|
||||||
|
m_volatile_ranges_cache.clear();
|
||||||
|
for_each_nonvolatile_range([&](const VolatilePageRange& range) {
|
||||||
|
m_volatile_ranges_cache.add_unchecked(range);
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
});
|
||||||
|
|
||||||
|
m_volatile_ranges_cache_dirty = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AnonymousVMObject::range_made_volatile(const VolatilePageRange& range)
|
||||||
|
{
|
||||||
|
ASSERT(m_lock.is_locked());
|
||||||
|
|
||||||
|
if (m_unused_committed_pages == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
// We need to check this range for any pages that are marked for
|
||||||
|
// lazy committed allocation and turn them into shared zero pages
|
||||||
|
// and also adjust the m_unused_committed_pages for each such page.
|
||||||
|
// Take into account all the other views as well.
|
||||||
|
size_t uncommit_page_count = 0;
|
||||||
|
for_each_volatile_range([&](const auto& r) {
|
||||||
|
auto intersected = range.intersected(r);
|
||||||
|
if (!intersected.is_empty()) {
|
||||||
|
uncommit_page_count += remove_lazy_commit_pages(intersected);
|
||||||
|
if (m_unused_committed_pages == 0)
|
||||||
|
return IterationDecision::Break;
|
||||||
|
}
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
});
|
||||||
|
|
||||||
|
// Return those committed pages back to the system
|
||||||
|
if (uncommit_page_count > 0) {
|
||||||
|
#ifdef COMMIT_DEBUG
|
||||||
|
klog() << "Uncommit " << uncommit_page_count << " lazy-commit pages from " << this;
|
||||||
|
#endif
|
||||||
|
MM.uncommit_user_physical_pages(uncommit_page_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
m_volatile_ranges_cache_dirty = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AnonymousVMObject::range_made_nonvolatile(const VolatilePageRange&)
|
||||||
|
{
|
||||||
|
ASSERT(m_lock.is_locked());
|
||||||
|
m_volatile_ranges_cache_dirty = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AnonymousVMObject::count_needed_commit_pages_for_nonvolatile_range(const VolatilePageRange& range)
|
||||||
|
{
|
||||||
|
ASSERT(m_lock.is_locked());
|
||||||
|
ASSERT(!range.is_empty());
|
||||||
|
|
||||||
|
size_t need_commit_pages = 0;
|
||||||
|
auto range_end = range.base + range.count;
|
||||||
|
for (size_t page_index = range.base; page_index < range_end; page_index++) {
|
||||||
|
// COW pages are accounted for in m_shared_committed_cow_pages
|
||||||
|
if (m_cow_map && m_cow_map->get(page_index))
|
||||||
|
continue;
|
||||||
|
auto& phys_page = m_physical_pages[page_index];
|
||||||
|
if (phys_page && phys_page->is_shared_zero_page())
|
||||||
|
need_commit_pages++;
|
||||||
|
}
|
||||||
|
return need_commit_pages;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AnonymousVMObject::mark_committed_pages_for_nonvolatile_range(const VolatilePageRange& range, size_t mark_total)
|
||||||
|
{
|
||||||
|
ASSERT(m_lock.is_locked());
|
||||||
|
ASSERT(!range.is_empty());
|
||||||
|
ASSERT(mark_total > 0);
|
||||||
|
|
||||||
|
size_t pages_updated = 0;
|
||||||
|
auto range_end = range.base + range.count;
|
||||||
|
for (size_t page_index = range.base; page_index < range_end; page_index++) {
|
||||||
|
// COW pages are accounted for in m_shared_committed_cow_pages
|
||||||
|
if (m_cow_map && m_cow_map->get(page_index))
|
||||||
|
continue;
|
||||||
|
auto& phys_page = m_physical_pages[page_index];
|
||||||
|
if (phys_page && phys_page->is_shared_zero_page()) {
|
||||||
|
phys_page = MM.lazy_committed_page();
|
||||||
|
if (++pages_updated == mark_total)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef COMMIT_DEBUG
|
||||||
|
klog() << "Added " << pages_updated << " lazy-commit pages to " << this;
|
||||||
|
#endif
|
||||||
|
m_unused_committed_pages += pages_updated;
|
||||||
|
return pages_updated;
|
||||||
|
}
|
||||||
|
|
||||||
|
RefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(size_t page_index)
|
||||||
|
{
|
||||||
|
{
|
||||||
|
ScopedSpinLock lock(m_lock);
|
||||||
|
|
||||||
|
ASSERT(m_unused_committed_pages > 0);
|
||||||
|
|
||||||
|
// We should't have any committed page tags in volatile regions
|
||||||
|
ASSERT([&]() {
|
||||||
|
for (auto* purgeable_ranges : m_purgeable_ranges) {
|
||||||
|
if (purgeable_ranges->is_volatile(page_index))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}());
|
||||||
|
|
||||||
|
m_unused_committed_pages--;
|
||||||
|
}
|
||||||
|
return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
||||||
|
}
|
||||||
|
|
||||||
|
Bitmap& AnonymousVMObject::ensure_cow_map()
|
||||||
|
{
|
||||||
|
if (!m_cow_map)
|
||||||
|
m_cow_map = make<Bitmap>(page_count(), true);
|
||||||
|
return *m_cow_map;
|
||||||
|
}
|
||||||
|
|
||||||
|
void AnonymousVMObject::ensure_or_reset_cow_map()
|
||||||
|
{
|
||||||
|
if (!m_cow_map)
|
||||||
|
m_cow_map = make<Bitmap>(page_count(), true);
|
||||||
|
else
|
||||||
|
m_cow_map->fill(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AnonymousVMObject::should_cow(size_t page_index, bool is_shared) const
|
||||||
|
{
|
||||||
|
auto& page = physical_pages()[page_index];
|
||||||
|
if (page && (page->is_shared_zero_page() || page->is_lazy_committed_page()))
|
||||||
|
return true;
|
||||||
|
if (is_shared)
|
||||||
|
return false;
|
||||||
|
return m_cow_map && m_cow_map->get(page_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AnonymousVMObject::set_should_cow(size_t page_index, bool cow)
|
||||||
|
{
|
||||||
|
ensure_cow_map().set(page_index, cow);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t AnonymousVMObject::cow_pages() const
|
||||||
|
{
|
||||||
|
if (!m_cow_map)
|
||||||
|
return 0;
|
||||||
|
return m_cow_map->count_slow(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AnonymousVMObject::is_nonvolatile(size_t page_index)
|
||||||
|
{
|
||||||
|
if (m_volatile_ranges_cache_dirty)
|
||||||
|
update_volatile_cache();
|
||||||
|
return !m_volatile_ranges_cache.contains(page_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr)
|
||||||
|
{
|
||||||
|
ASSERT_INTERRUPTS_DISABLED();
|
||||||
|
ScopedSpinLock lock(m_lock);
|
||||||
|
auto& page_slot = physical_pages()[page_index];
|
||||||
|
bool have_committed = m_shared_committed_cow_pages && is_nonvolatile(page_index);
|
||||||
|
if (page_slot->ref_count() == 1) {
|
||||||
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
|
dbg() << " >> It's a COW page but nobody is sharing it anymore. Remap r/w";
|
||||||
|
#endif
|
||||||
|
set_should_cow(page_index, false);
|
||||||
|
if (have_committed) {
|
||||||
|
if (m_shared_committed_cow_pages->return_one())
|
||||||
|
m_shared_committed_cow_pages = nullptr;
|
||||||
|
}
|
||||||
|
return PageFaultResponse::Continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
RefPtr<PhysicalPage> page;
|
||||||
|
if (have_committed) {
|
||||||
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
|
dbg() << " >> It's a committed COW page and it's time to COW!";
|
||||||
|
#endif
|
||||||
|
page = m_shared_committed_cow_pages->allocate_one();
|
||||||
|
} else {
|
||||||
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
|
dbg() << " >> It's a COW page and it's time to COW!";
|
||||||
|
#endif
|
||||||
|
page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
|
||||||
|
if (page.is_null()) {
|
||||||
|
klog() << "MM: handle_cow_fault was unable to allocate a physical page";
|
||||||
|
return PageFaultResponse::OutOfMemory;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
u8* dest_ptr = MM.quickmap_page(*page);
|
||||||
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
|
dbg() << " >> COW " << page->paddr() << " <- " << page_slot->paddr();
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
SmapDisabler disabler;
|
||||||
|
void* fault_at;
|
||||||
|
if (!safe_memcpy(dest_ptr, vaddr.as_ptr(), PAGE_SIZE, fault_at)) {
|
||||||
|
if ((u8*)fault_at >= dest_ptr && (u8*)fault_at <= dest_ptr + PAGE_SIZE)
|
||||||
|
dbg() << " >> COW: error copying page " << page_slot->paddr() << "/" << vaddr << " to " << page->paddr() << "/" << VirtualAddress(dest_ptr) << ": failed to write to page at " << VirtualAddress(fault_at);
|
||||||
|
else if ((u8*)fault_at >= vaddr.as_ptr() && (u8*)fault_at <= vaddr.as_ptr() + PAGE_SIZE)
|
||||||
|
dbg() << " >> COW: error copying page " << page_slot->paddr() << "/" << vaddr << " to " << page->paddr() << "/" << VirtualAddress(dest_ptr) << ": failed to read from page at " << VirtualAddress(fault_at);
|
||||||
|
else
|
||||||
|
ASSERT_NOT_REACHED();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
page_slot = move(page);
|
||||||
|
MM.unquickmap_page();
|
||||||
|
set_should_cow(page_index, false);
|
||||||
|
return PageFaultResponse::Continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,35 +27,132 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Kernel/PhysicalAddress.h>
|
#include <Kernel/PhysicalAddress.h>
|
||||||
|
#include <Kernel/VM/AllocationStrategy.h>
|
||||||
|
#include <Kernel/VM/PageFaultResponse.h>
|
||||||
|
#include <Kernel/VM/PurgeablePageRanges.h>
|
||||||
#include <Kernel/VM/VMObject.h>
|
#include <Kernel/VM/VMObject.h>
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
class AnonymousVMObject : public VMObject {
|
class AnonymousVMObject : public VMObject {
|
||||||
|
friend class PurgeablePageRanges;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
virtual ~AnonymousVMObject() override;
|
virtual ~AnonymousVMObject() override;
|
||||||
|
|
||||||
static NonnullRefPtr<AnonymousVMObject> create_with_size(size_t);
|
static RefPtr<AnonymousVMObject> create_with_size(size_t, AllocationStrategy);
|
||||||
static RefPtr<AnonymousVMObject> create_for_physical_range(PhysicalAddress, size_t);
|
static RefPtr<AnonymousVMObject> create_for_physical_range(PhysicalAddress paddr, size_t size);
|
||||||
static NonnullRefPtr<AnonymousVMObject> create_with_physical_page(PhysicalPage&);
|
static NonnullRefPtr<AnonymousVMObject> create_with_physical_page(PhysicalPage& page);
|
||||||
virtual RefPtr<VMObject> clone() override;
|
virtual RefPtr<VMObject> clone() override;
|
||||||
|
|
||||||
virtual RefPtr<PhysicalPage> allocate_committed_page(size_t);
|
RefPtr<PhysicalPage> allocate_committed_page(size_t);
|
||||||
|
PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
|
||||||
|
size_t cow_pages() const;
|
||||||
|
bool should_cow(size_t page_index, bool) const;
|
||||||
|
void set_should_cow(size_t page_index, bool);
|
||||||
|
|
||||||
protected:
|
void register_purgeable_page_ranges(PurgeablePageRanges&);
|
||||||
explicit AnonymousVMObject(size_t, bool initialize_pages = true);
|
void unregister_purgeable_page_ranges(PurgeablePageRanges&);
|
||||||
|
|
||||||
|
int purge();
|
||||||
|
int purge_with_interrupts_disabled(Badge<MemoryManager>);
|
||||||
|
|
||||||
|
bool is_any_volatile() const;
|
||||||
|
|
||||||
|
template<typename F>
|
||||||
|
IterationDecision for_each_volatile_range(F f) const
|
||||||
|
{
|
||||||
|
ASSERT(m_lock.is_locked());
|
||||||
|
// This is a little ugly. Basically, we're trying to find the
|
||||||
|
// volatile ranges that all share, because those are the only
|
||||||
|
// pages we can actually purge
|
||||||
|
for (auto* purgeable_range : m_purgeable_ranges) {
|
||||||
|
ScopedSpinLock purgeable_lock(purgeable_range->m_volatile_ranges_lock);
|
||||||
|
for (auto& r1 : purgeable_range->volatile_ranges().ranges()) {
|
||||||
|
VolatilePageRange range(r1);
|
||||||
|
for (auto* purgeable_range2 : m_purgeable_ranges) {
|
||||||
|
if (purgeable_range2 == purgeable_range)
|
||||||
|
continue;
|
||||||
|
ScopedSpinLock purgeable2_lock(purgeable_range2->m_volatile_ranges_lock);
|
||||||
|
if (purgeable_range2->is_empty()) {
|
||||||
|
// If just one doesn't allow any purging, we can
|
||||||
|
// immediately bail
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
}
|
||||||
|
for (const auto& r2 : purgeable_range2->volatile_ranges().ranges()) {
|
||||||
|
range = range.intersected(r2);
|
||||||
|
if (range.is_empty())
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (range.is_empty())
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (range.is_empty())
|
||||||
|
continue;
|
||||||
|
IterationDecision decision = f(range);
|
||||||
|
if (decision != IterationDecision::Continue)
|
||||||
|
return decision;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename F>
|
||||||
|
IterationDecision for_each_nonvolatile_range(F f) const
|
||||||
|
{
|
||||||
|
size_t base = 0;
|
||||||
|
for_each_volatile_range([&](const VolatilePageRange& volatile_range) {
|
||||||
|
if (volatile_range.base == base)
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
IterationDecision decision = f({ base, volatile_range.base - base });
|
||||||
|
if (decision != IterationDecision::Continue)
|
||||||
|
return decision;
|
||||||
|
base = volatile_range.base + volatile_range.count;
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
});
|
||||||
|
if (base < page_count())
|
||||||
|
return f({ base, page_count() - base });
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t get_lazy_committed_page_count() const;
|
||||||
|
|
||||||
|
private:
|
||||||
|
explicit AnonymousVMObject(size_t, AllocationStrategy);
|
||||||
|
explicit AnonymousVMObject(PhysicalAddress, size_t);
|
||||||
|
explicit AnonymousVMObject(PhysicalPage&);
|
||||||
explicit AnonymousVMObject(const AnonymousVMObject&);
|
explicit AnonymousVMObject(const AnonymousVMObject&);
|
||||||
|
|
||||||
virtual const char* class_name() const override { return "AnonymousVMObject"; }
|
virtual const char* class_name() const override { return "AnonymousVMObject"; }
|
||||||
|
|
||||||
private:
|
int purge_impl();
|
||||||
AnonymousVMObject(PhysicalAddress, size_t);
|
void update_volatile_cache();
|
||||||
|
void set_was_purged(const VolatilePageRange&);
|
||||||
|
size_t remove_lazy_commit_pages(const VolatilePageRange&);
|
||||||
|
void range_made_volatile(const VolatilePageRange&);
|
||||||
|
void range_made_nonvolatile(const VolatilePageRange&);
|
||||||
|
size_t count_needed_commit_pages_for_nonvolatile_range(const VolatilePageRange&);
|
||||||
|
size_t mark_committed_pages_for_nonvolatile_range(const VolatilePageRange&, size_t);
|
||||||
|
bool is_nonvolatile(size_t page_index);
|
||||||
|
|
||||||
AnonymousVMObject& operator=(const AnonymousVMObject&) = delete;
|
AnonymousVMObject& operator=(const AnonymousVMObject&) = delete;
|
||||||
AnonymousVMObject& operator=(AnonymousVMObject&&) = delete;
|
AnonymousVMObject& operator=(AnonymousVMObject&&) = delete;
|
||||||
AnonymousVMObject(AnonymousVMObject&&) = delete;
|
AnonymousVMObject(AnonymousVMObject&&) = delete;
|
||||||
|
|
||||||
virtual bool is_anonymous() const override { return true; }
|
virtual bool is_anonymous() const override { return true; }
|
||||||
|
|
||||||
|
Bitmap& ensure_cow_map();
|
||||||
|
void ensure_or_reset_cow_map();
|
||||||
|
|
||||||
|
VolatilePageRanges m_volatile_ranges_cache;
|
||||||
|
bool m_volatile_ranges_cache_dirty { true };
|
||||||
|
Vector<PurgeablePageRanges*> m_purgeable_ranges;
|
||||||
|
size_t m_unused_committed_pages { 0 };
|
||||||
|
|
||||||
|
mutable OwnPtr<Bitmap> m_cow_map;
|
||||||
|
|
||||||
|
// We share a pool of committed cow-pages with clones
|
||||||
|
RefPtr<CommittedCowPages> m_shared_committed_cow_pages;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,6 @@
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <Kernel/VM/PageDirectory.h>
|
#include <Kernel/VM/PageDirectory.h>
|
||||||
#include <Kernel/VM/PhysicalRegion.h>
|
#include <Kernel/VM/PhysicalRegion.h>
|
||||||
#include <Kernel/VM/PurgeableVMObject.h>
|
|
||||||
#include <Kernel/VM/SharedInodeVMObject.h>
|
#include <Kernel/VM/SharedInodeVMObject.h>
|
||||||
|
|
||||||
//#define MM_DEBUG
|
//#define MM_DEBUG
|
||||||
|
@ -381,7 +380,6 @@ Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr)
|
||||||
PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
||||||
{
|
{
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
ASSERT_INTERRUPTS_DISABLED();
|
||||||
ASSERT(Thread::current() != nullptr);
|
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
if (Processor::current().in_irq()) {
|
if (Processor::current().in_irq()) {
|
||||||
dbg() << "CPU[" << Processor::current().id() << "] BUG! Page fault while handling IRQ! code=" << fault.code() << ", vaddr=" << fault.vaddr() << ", irq level: " << Processor::current().in_irq();
|
dbg() << "CPU[" << Processor::current().id() << "] BUG! Page fault while handling IRQ! code=" << fault.code() << ", vaddr=" << fault.vaddr() << ", irq level: " << Processor::current().in_irq();
|
||||||
|
@ -408,26 +406,20 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, con
|
||||||
if (!range.is_valid())
|
if (!range.is_valid())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
auto vmobject = ContiguousVMObject::create_with_size(size);
|
auto vmobject = ContiguousVMObject::create_with_size(size);
|
||||||
auto region = allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
|
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
|
||||||
if (!region)
|
|
||||||
return nullptr;
|
|
||||||
return region;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, AllocationStrategy strategy, bool cacheable)
|
||||||
{
|
{
|
||||||
ASSERT(!(size % PAGE_SIZE));
|
ASSERT(!(size % PAGE_SIZE));
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||||
if (!range.is_valid())
|
if (!range.is_valid())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
auto vmobject = AnonymousVMObject::create_with_size(size);
|
auto vmobject = AnonymousVMObject::create_with_size(size, strategy);
|
||||||
auto region = allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable);
|
if (!vmobject)
|
||||||
if (!region)
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
if (should_commit && !region->commit())
|
return allocate_kernel_region_with_vmobject(range, vmobject.release_nonnull(), name, access, user_accessible, cacheable);
|
||||||
return nullptr;
|
|
||||||
return region;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
||||||
|
@ -458,7 +450,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable)
|
||||||
{
|
{
|
||||||
return allocate_kernel_region(size, name, access, true, true, cacheable);
|
return allocate_kernel_region(size, name, access, true, AllocationStrategy::Reserve, cacheable);
|
||||||
}
|
}
|
||||||
|
|
||||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, const StringView& name, u8 access, bool user_accessible, bool cacheable)
|
||||||
|
@ -576,11 +568,11 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
|
||||||
// We didn't have a single free physical page. Let's try to free something up!
|
// We didn't have a single free physical page. Let's try to free something up!
|
||||||
// First, we look for a purgeable VMObject in the volatile state.
|
// First, we look for a purgeable VMObject in the volatile state.
|
||||||
for_each_vmobject([&](auto& vmobject) {
|
for_each_vmobject([&](auto& vmobject) {
|
||||||
if (!vmobject.is_purgeable())
|
if (!vmobject.is_anonymous())
|
||||||
return IterationDecision::Continue;
|
return IterationDecision::Continue;
|
||||||
int purged_page_count = static_cast<PurgeableVMObject&>(vmobject).purge_with_interrupts_disabled({});
|
int purged_page_count = static_cast<AnonymousVMObject&>(vmobject).purge_with_interrupts_disabled({});
|
||||||
if (purged_page_count) {
|
if (purged_page_count) {
|
||||||
klog() << "MM: Purge saved the day! Purged " << purged_page_count << " pages from PurgeableVMObject{" << &vmobject << "}";
|
klog() << "MM: Purge saved the day! Purged " << purged_page_count << " pages from AnonymousVMObject{" << &vmobject << "}";
|
||||||
page = find_free_user_physical_page(false);
|
page = find_free_user_physical_page(false);
|
||||||
purged_pages = true;
|
purged_pages = true;
|
||||||
ASSERT(page);
|
ASSERT(page);
|
||||||
|
@ -890,7 +882,7 @@ void MemoryManager::dump_kernel_regions()
|
||||||
klog() << "BEGIN END SIZE ACCESS NAME";
|
klog() << "BEGIN END SIZE ACCESS NAME";
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
for (auto& region : MM.m_kernel_regions) {
|
for (auto& region : MM.m_kernel_regions) {
|
||||||
klog() << String::format("%08x", region.vaddr().get()) << " -- " << String::format("%08x", region.vaddr().offset(region.size() - 1).get()) << " " << String::format("%08x", region.size()) << " " << (region.is_readable() ? 'R' : ' ') << (region.is_writable() ? 'W' : ' ') << (region.is_executable() ? 'X' : ' ') << (region.is_shared() ? 'S' : ' ') << (region.is_stack() ? 'T' : ' ') << (region.vmobject().is_purgeable() ? 'P' : ' ') << " " << region.name().characters();
|
klog() << String::format("%08x", region.vaddr().get()) << " -- " << String::format("%08x", region.vaddr().offset(region.size() - 1).get()) << " " << String::format("%08x", region.size()) << " " << (region.is_readable() ? 'R' : ' ') << (region.is_writable() ? 'W' : ' ') << (region.is_executable() ? 'X' : ' ') << (region.is_shared() ? 'S' : ' ') << (region.is_stack() ? 'T' : ' ') << (region.vmobject().is_anonymous() ? 'A' : ' ') << " " << region.name().characters();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include <Kernel/Arch/i386/CPU.h>
|
#include <Kernel/Arch/i386/CPU.h>
|
||||||
#include <Kernel/Forward.h>
|
#include <Kernel/Forward.h>
|
||||||
#include <Kernel/SpinLock.h>
|
#include <Kernel/SpinLock.h>
|
||||||
|
#include <Kernel/VM/AllocationStrategy.h>
|
||||||
#include <Kernel/VM/PhysicalPage.h>
|
#include <Kernel/VM/PhysicalPage.h>
|
||||||
#include <Kernel/VM/Region.h>
|
#include <Kernel/VM/Region.h>
|
||||||
#include <Kernel/VM/VMObject.h>
|
#include <Kernel/VM/VMObject.h>
|
||||||
|
@ -83,6 +84,7 @@ class MemoryManager {
|
||||||
friend class PageDirectory;
|
friend class PageDirectory;
|
||||||
friend class PhysicalPage;
|
friend class PhysicalPage;
|
||||||
friend class PhysicalRegion;
|
friend class PhysicalRegion;
|
||||||
|
friend class AnonymousVMObject;
|
||||||
friend class Region;
|
friend class Region;
|
||||||
friend class VMObject;
|
friend class VMObject;
|
||||||
friend OwnPtr<KBuffer> procfs$mm(InodeIdentifier);
|
friend OwnPtr<KBuffer> procfs$mm(InodeIdentifier);
|
||||||
|
@ -120,7 +122,7 @@ public:
|
||||||
void deallocate_supervisor_physical_page(const PhysicalPage&);
|
void deallocate_supervisor_physical_page(const PhysicalPage&);
|
||||||
|
|
||||||
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_contiguous_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
||||||
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, bool should_commit = true, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region(size_t, const StringView& name, u8 access, bool user_accessible = false, AllocationStrategy strategy = AllocationStrategy::Reserve, bool cacheable = true);
|
||||||
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
||||||
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
||||||
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, const StringView& name, u8 access, bool user_accessible = false, bool cacheable = true);
|
||||||
|
|
37
Kernel/VM/PageFaultResponse.h
Normal file
37
Kernel/VM/PageFaultResponse.h
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2020, The SerenityOS developers.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are met:
|
||||||
|
*
|
||||||
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
||||||
|
* list of conditions and the following disclaimer.
|
||||||
|
*
|
||||||
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
* this list of conditions and the following disclaimer in the documentation
|
||||||
|
* and/or other materials provided with the distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||||
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||||
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||||
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||||
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||||
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
|
||||||
|
enum class PageFaultResponse {
|
||||||
|
ShouldCrash,
|
||||||
|
OutOfMemory,
|
||||||
|
Continue,
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
|
@ -27,10 +27,12 @@
|
||||||
#include <AK/BinarySearch.h>
|
#include <AK/BinarySearch.h>
|
||||||
#include <AK/ScopeGuard.h>
|
#include <AK/ScopeGuard.h>
|
||||||
#include <Kernel/Process.h>
|
#include <Kernel/Process.h>
|
||||||
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <Kernel/VM/PhysicalPage.h>
|
#include <Kernel/VM/PhysicalPage.h>
|
||||||
#include <Kernel/VM/PurgeableVMObject.h>
|
#include <Kernel/VM/PurgeablePageRanges.h>
|
||||||
|
|
||||||
|
//#define PAGE_FAULT_DEBUG
|
||||||
//#define VOLATILE_PAGE_RANGES_DEBUG
|
//#define VOLATILE_PAGE_RANGES_DEBUG
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
@ -51,6 +53,14 @@ static void dump_volatile_page_ranges(const Vector<VolatilePageRange>& ranges)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void VolatilePageRanges::add_unchecked(const VolatilePageRange& range)
|
||||||
|
{
|
||||||
|
auto add_range = m_total_range.intersected(range);
|
||||||
|
if (add_range.is_empty())
|
||||||
|
return;
|
||||||
|
m_ranges.append(range);
|
||||||
|
}
|
||||||
|
|
||||||
bool VolatilePageRanges::add(const VolatilePageRange& range)
|
bool VolatilePageRanges::add(const VolatilePageRange& range)
|
||||||
{
|
{
|
||||||
auto add_range = m_total_range.intersected(range);
|
auto add_range = m_total_range.intersected(range);
|
||||||
|
@ -185,7 +195,7 @@ bool VolatilePageRanges::intersects(const VolatilePageRange& range) const
|
||||||
}
|
}
|
||||||
|
|
||||||
PurgeablePageRanges::PurgeablePageRanges(const VMObject& vmobject)
|
PurgeablePageRanges::PurgeablePageRanges(const VMObject& vmobject)
|
||||||
: m_volatile_ranges({ 0, vmobject.is_purgeable() ? static_cast<const PurgeableVMObject&>(vmobject).page_count() : 0 })
|
: m_volatile_ranges({ 0, vmobject.is_anonymous() ? vmobject.page_count() : 0 })
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,11 +204,11 @@ bool PurgeablePageRanges::add_volatile_range(const VolatilePageRange& range)
|
||||||
if (range.is_empty())
|
if (range.is_empty())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// Since we may need to call into PurgeableVMObject we need to acquire
|
// Since we may need to call into AnonymousVMObject we need to acquire
|
||||||
// its lock as well, and acquire it first. This is important so that
|
// its lock as well, and acquire it first. This is important so that
|
||||||
// we don't deadlock when a page fault (e.g. on another processor)
|
// we don't deadlock when a page fault (e.g. on another processor)
|
||||||
// happens that is meant to lazy-allocate a committed page. It would
|
// happens that is meant to lazy-allocate a committed page. It would
|
||||||
// call into PurgeableVMObject::range_made_volatile, which then would
|
// call into AnonymousVMObject::range_made_volatile, which then would
|
||||||
// also call into this object and need to acquire m_lock. By acquiring
|
// also call into this object and need to acquire m_lock. By acquiring
|
||||||
// the vmobject lock first in both cases, we avoid deadlocking.
|
// the vmobject lock first in both cases, we avoid deadlocking.
|
||||||
// We can access m_vmobject without any locks for that purpose because
|
// We can access m_vmobject without any locks for that purpose because
|
||||||
|
@ -212,13 +222,47 @@ bool PurgeablePageRanges::add_volatile_range(const VolatilePageRange& range)
|
||||||
return added;
|
return added;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PurgeablePageRanges::remove_volatile_range(const VolatilePageRange& range, bool& was_purged)
|
auto PurgeablePageRanges::remove_volatile_range(const VolatilePageRange& range, bool& was_purged) -> RemoveVolatileError
|
||||||
{
|
{
|
||||||
if (range.is_empty())
|
if (range.is_empty()) {
|
||||||
return false;
|
was_purged = false;
|
||||||
|
return RemoveVolatileError::Success;
|
||||||
|
}
|
||||||
|
ScopedSpinLock vmobject_lock(m_vmobject->m_lock); // see comment in add_volatile_range
|
||||||
ScopedSpinLock lock(m_volatile_ranges_lock);
|
ScopedSpinLock lock(m_volatile_ranges_lock);
|
||||||
ASSERT(m_vmobject);
|
ASSERT(m_vmobject);
|
||||||
return m_volatile_ranges.remove(range, was_purged);
|
|
||||||
|
// Before we actually remove this range, we need to check if we need
|
||||||
|
// to commit any pages, which may fail. If it fails, we don't actually
|
||||||
|
// want to make any modifications. COW pages are already accounted for
|
||||||
|
// in m_shared_committed_cow_pages
|
||||||
|
size_t need_commit_pages = 0;
|
||||||
|
m_volatile_ranges.for_each_intersecting_range(range, [&](const VolatilePageRange& intersected_range) {
|
||||||
|
need_commit_pages += m_vmobject->count_needed_commit_pages_for_nonvolatile_range(intersected_range);
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
});
|
||||||
|
if (need_commit_pages > 0) {
|
||||||
|
// See if we can grab enough pages for what we're marking non-volatile
|
||||||
|
if (!MM.commit_user_physical_pages(need_commit_pages))
|
||||||
|
return RemoveVolatileError::OutOfMemory;
|
||||||
|
|
||||||
|
// Now that we are committed to these pages, mark them for lazy-commit allocation
|
||||||
|
auto pages_to_mark = need_commit_pages;
|
||||||
|
m_volatile_ranges.for_each_intersecting_range(range, [&](const VolatilePageRange& intersected_range) {
|
||||||
|
auto pages_marked = m_vmobject->mark_committed_pages_for_nonvolatile_range(intersected_range, pages_to_mark);
|
||||||
|
pages_to_mark -= pages_marked;
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now actually remove the range
|
||||||
|
if (m_volatile_ranges.remove(range, was_purged)) {
|
||||||
|
m_vmobject->range_made_nonvolatile(range);
|
||||||
|
return RemoveVolatileError::Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
ASSERT(need_commit_pages == 0); // We should have not touched anything
|
||||||
|
return RemoveVolatileError::SuccessNoChange;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool PurgeablePageRanges::is_volatile_range(const VolatilePageRange& range) const
|
bool PurgeablePageRanges::is_volatile_range(const VolatilePageRange& range) const
|
||||||
|
@ -241,7 +285,7 @@ void PurgeablePageRanges::set_was_purged(const VolatilePageRange& range)
|
||||||
m_volatile_ranges.add({ range.base, range.count, true });
|
m_volatile_ranges.add({ range.base, range.count, true });
|
||||||
}
|
}
|
||||||
|
|
||||||
void PurgeablePageRanges::set_vmobject(PurgeableVMObject* vmobject)
|
void PurgeablePageRanges::set_vmobject(AnonymousVMObject* vmobject)
|
||||||
{
|
{
|
||||||
// No lock needed here
|
// No lock needed here
|
||||||
if (vmobject) {
|
if (vmobject) {
|
||||||
|
@ -253,207 +297,33 @@ void PurgeablePageRanges::set_vmobject(PurgeableVMObject* vmobject)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RefPtr<PurgeableVMObject> PurgeableVMObject::create_with_size(size_t size)
|
CommittedCowPages::CommittedCowPages(size_t committed_pages)
|
||||||
|
: m_committed_pages(committed_pages)
|
||||||
{
|
{
|
||||||
// We need to attempt to commit before actually creating the object
|
|
||||||
if (!MM.commit_user_physical_pages(ceil_div(size, PAGE_SIZE)))
|
|
||||||
return {};
|
|
||||||
return adopt(*new PurgeableVMObject(size));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
PurgeableVMObject::PurgeableVMObject(size_t size)
|
CommittedCowPages::~CommittedCowPages()
|
||||||
: AnonymousVMObject(size, false)
|
|
||||||
, m_unused_committed_pages(page_count())
|
|
||||||
{
|
{
|
||||||
for (size_t i = 0; i < page_count(); ++i)
|
// Return unused committed pages
|
||||||
physical_pages()[i] = MM.lazy_committed_page();
|
if (m_committed_pages > 0)
|
||||||
|
MM.uncommit_user_physical_pages(m_committed_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
PurgeableVMObject::PurgeableVMObject(const PurgeableVMObject& other)
|
NonnullRefPtr<PhysicalPage> CommittedCowPages::allocate_one()
|
||||||
: AnonymousVMObject(other)
|
|
||||||
, m_purgeable_ranges() // do *not* clone this
|
|
||||||
, m_unused_committed_pages(other.m_unused_committed_pages)
|
|
||||||
{
|
{
|
||||||
// We can't really "copy" a spinlock. But we're holding it. Clear in the clone
|
ASSERT(m_committed_pages > 0);
|
||||||
ASSERT(other.m_lock.is_locked());
|
m_committed_pages--;
|
||||||
m_lock.initialize();
|
|
||||||
}
|
|
||||||
|
|
||||||
PurgeableVMObject::~PurgeableVMObject()
|
|
||||||
{
|
|
||||||
if (m_unused_committed_pages > 0)
|
|
||||||
MM.uncommit_user_physical_pages(m_unused_committed_pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
RefPtr<VMObject> PurgeableVMObject::clone()
|
|
||||||
{
|
|
||||||
// We need to acquire our lock so we copy a sane state
|
|
||||||
ScopedSpinLock lock(m_lock);
|
|
||||||
if (m_unused_committed_pages > 0) {
|
|
||||||
// We haven't used up all committed pages. In order to be able
|
|
||||||
// to clone ourselves, we need to be able to commit the same number
|
|
||||||
// of pages first
|
|
||||||
if (!MM.commit_user_physical_pages(m_unused_committed_pages))
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
return adopt(*new PurgeableVMObject(*this));
|
|
||||||
}
|
|
||||||
|
|
||||||
int PurgeableVMObject::purge()
|
|
||||||
{
|
|
||||||
LOCKER(m_paging_lock);
|
|
||||||
return purge_impl();
|
|
||||||
}
|
|
||||||
|
|
||||||
int PurgeableVMObject::purge_with_interrupts_disabled(Badge<MemoryManager>)
|
|
||||||
{
|
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
|
||||||
if (m_paging_lock.is_locked())
|
|
||||||
return 0;
|
|
||||||
return purge_impl();
|
|
||||||
}
|
|
||||||
|
|
||||||
void PurgeableVMObject::set_was_purged(const VolatilePageRange& range)
|
|
||||||
{
|
|
||||||
ASSERT(m_lock.is_locked());
|
|
||||||
for (auto* purgeable_ranges : m_purgeable_ranges)
|
|
||||||
purgeable_ranges->set_was_purged(range);
|
|
||||||
}
|
|
||||||
|
|
||||||
int PurgeableVMObject::purge_impl()
|
|
||||||
{
|
|
||||||
int purged_page_count = 0;
|
|
||||||
ScopedSpinLock lock(m_lock);
|
|
||||||
for_each_volatile_range([&](const auto& range) {
|
|
||||||
int purged_in_range = 0;
|
|
||||||
auto range_end = range.base + range.count;
|
|
||||||
for (size_t i = range.base; i < range_end; i++) {
|
|
||||||
auto& phys_page = m_physical_pages[i];
|
|
||||||
if (phys_page && !phys_page->is_shared_zero_page()) {
|
|
||||||
ASSERT(!phys_page->is_lazy_committed_page());
|
|
||||||
++purged_in_range;
|
|
||||||
}
|
|
||||||
phys_page = MM.shared_zero_page();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (purged_in_range > 0) {
|
|
||||||
purged_page_count += purged_in_range;
|
|
||||||
set_was_purged(range);
|
|
||||||
for_each_region([&](auto& region) {
|
|
||||||
if (®ion.vmobject() == this) {
|
|
||||||
if (auto owner = region.get_owner()) {
|
|
||||||
// we need to hold a reference the process here (if there is one) as we may not own this region
|
|
||||||
klog() << "Purged " << purged_in_range << " pages from region " << region.name() << " owned by " << *owner << " at " << region.vaddr_from_page_index(range.base) << " - " << region.vaddr_from_page_index(range.base + range.count);
|
|
||||||
} else {
|
|
||||||
klog() << "Purged " << purged_in_range << " pages from region " << region.name() << " (no ownership) at " << region.vaddr_from_page_index(range.base) << " - " << region.vaddr_from_page_index(range.base + range.count);
|
|
||||||
}
|
|
||||||
region.remap_page_range(range.base, range.count);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return IterationDecision::Continue;
|
|
||||||
});
|
|
||||||
return purged_page_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
void PurgeableVMObject::register_purgeable_page_ranges(PurgeablePageRanges& purgeable_page_ranges)
|
|
||||||
{
|
|
||||||
ScopedSpinLock lock(m_lock);
|
|
||||||
purgeable_page_ranges.set_vmobject(this);
|
|
||||||
ASSERT(!m_purgeable_ranges.contains_slow(&purgeable_page_ranges));
|
|
||||||
m_purgeable_ranges.append(&purgeable_page_ranges);
|
|
||||||
}
|
|
||||||
|
|
||||||
void PurgeableVMObject::unregister_purgeable_page_ranges(PurgeablePageRanges& purgeable_page_ranges)
|
|
||||||
{
|
|
||||||
ScopedSpinLock lock(m_lock);
|
|
||||||
for (size_t i = 0; i < m_purgeable_ranges.size(); i++) {
|
|
||||||
if (m_purgeable_ranges[i] != &purgeable_page_ranges)
|
|
||||||
continue;
|
|
||||||
purgeable_page_ranges.set_vmobject(nullptr);
|
|
||||||
m_purgeable_ranges.remove(i);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool PurgeableVMObject::is_any_volatile() const
|
|
||||||
{
|
|
||||||
ScopedSpinLock lock(m_lock);
|
|
||||||
for (auto& volatile_ranges : m_purgeable_ranges) {
|
|
||||||
ScopedSpinLock lock(volatile_ranges->m_volatile_ranges_lock);
|
|
||||||
if (!volatile_ranges->is_empty())
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t PurgeableVMObject::remove_lazy_commit_pages(const VolatilePageRange& range)
|
|
||||||
{
|
|
||||||
ASSERT(m_lock.is_locked());
|
|
||||||
|
|
||||||
size_t removed_count = 0;
|
|
||||||
auto range_end = range.base + range.count;
|
|
||||||
for (size_t i = range.base; i < range_end; i++) {
|
|
||||||
auto& phys_page = m_physical_pages[i];
|
|
||||||
if (phys_page && phys_page->is_lazy_committed_page()) {
|
|
||||||
phys_page = MM.shared_zero_page();
|
|
||||||
removed_count++;
|
|
||||||
ASSERT(m_unused_committed_pages > 0);
|
|
||||||
m_unused_committed_pages--;
|
|
||||||
// if (--m_unused_committed_pages == 0)
|
|
||||||
// break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return removed_count;
|
|
||||||
}
|
|
||||||
|
|
||||||
void PurgeableVMObject::range_made_volatile(const VolatilePageRange& range)
|
|
||||||
{
|
|
||||||
ASSERT(m_lock.is_locked());
|
|
||||||
|
|
||||||
if (m_unused_committed_pages == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
// We need to check this range for any pages that are marked for
|
|
||||||
// lazy committed allocation and turn them into shared zero pages
|
|
||||||
// and also adjust the m_unused_committed_pages for each such page.
|
|
||||||
// Take into account all the other views as well.
|
|
||||||
size_t uncommit_page_count = 0;
|
|
||||||
for_each_volatile_range([&](const auto& r) {
|
|
||||||
auto intersected = range.intersected(r);
|
|
||||||
if (!intersected.is_empty()) {
|
|
||||||
uncommit_page_count += remove_lazy_commit_pages(intersected);
|
|
||||||
// if (m_unused_committed_pages == 0)
|
|
||||||
// return IterationDecision::Break;
|
|
||||||
}
|
|
||||||
return IterationDecision::Continue;
|
|
||||||
});
|
|
||||||
|
|
||||||
// Return those committed pages back to the system
|
|
||||||
if (uncommit_page_count > 0)
|
|
||||||
MM.uncommit_user_physical_pages(uncommit_page_count);
|
|
||||||
}
|
|
||||||
|
|
||||||
RefPtr<PhysicalPage> PurgeableVMObject::allocate_committed_page(size_t page_index)
|
|
||||||
{
|
|
||||||
{
|
|
||||||
ScopedSpinLock lock(m_lock);
|
|
||||||
|
|
||||||
ASSERT(m_unused_committed_pages > 0);
|
|
||||||
|
|
||||||
// We should't have any committed page tags in volatile regions
|
|
||||||
ASSERT([&]() {
|
|
||||||
for (auto* purgeable_ranges : m_purgeable_ranges) {
|
|
||||||
if (purgeable_ranges->is_volatile(page_index))
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}());
|
|
||||||
|
|
||||||
m_unused_committed_pages--;
|
|
||||||
}
|
|
||||||
return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool CommittedCowPages::return_one()
|
||||||
|
{
|
||||||
|
ASSERT(m_committed_pages > 0);
|
||||||
|
m_committed_pages--;
|
||||||
|
|
||||||
|
MM.uncommit_user_physical_pages(1);
|
||||||
|
return m_committed_pages == 0;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -26,8 +26,9 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <AK/Bitmap.h>
|
||||||
|
#include <AK/RefCounted.h>
|
||||||
#include <Kernel/SpinLock.h>
|
#include <Kernel/SpinLock.h>
|
||||||
#include <Kernel/VM/AnonymousVMObject.h>
|
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
|
||||||
|
@ -118,7 +119,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_empty() const { return m_ranges.is_empty(); }
|
bool is_empty() const { return m_ranges.is_empty(); }
|
||||||
void clear() { m_ranges.clear(); }
|
void clear() { m_ranges.clear_with_capacity(); }
|
||||||
|
|
||||||
bool is_all() const
|
bool is_all() const
|
||||||
{
|
{
|
||||||
|
@ -142,8 +143,60 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
bool add(const VolatilePageRange&);
|
bool add(const VolatilePageRange&);
|
||||||
|
void add_unchecked(const VolatilePageRange&);
|
||||||
bool remove(const VolatilePageRange&, bool&);
|
bool remove(const VolatilePageRange&, bool&);
|
||||||
|
|
||||||
|
template<typename F>
|
||||||
|
IterationDecision for_each_intersecting_range(const VolatilePageRange& range, F f)
|
||||||
|
{
|
||||||
|
auto r = m_total_range.intersected(range);
|
||||||
|
if (r.is_empty())
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
|
||||||
|
size_t nearby_index = 0;
|
||||||
|
auto* existing_range = binary_search(
|
||||||
|
m_ranges.span(), r, &nearby_index, [](auto& a, auto& b) {
|
||||||
|
if (a.intersects(b))
|
||||||
|
return 0;
|
||||||
|
return (signed)(a.base - (b.base + b.count - 1));
|
||||||
|
});
|
||||||
|
if (!existing_range)
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
|
||||||
|
if (existing_range->range_equals(r))
|
||||||
|
return f(r);
|
||||||
|
ASSERT(existing_range == &m_ranges[nearby_index]); // sanity check
|
||||||
|
while (nearby_index < m_ranges.size()) {
|
||||||
|
existing_range = &m_ranges[nearby_index];
|
||||||
|
if (!existing_range->intersects(range))
|
||||||
|
break;
|
||||||
|
|
||||||
|
IterationDecision decision = f(existing_range->intersected(r));
|
||||||
|
if (decision != IterationDecision::Continue)
|
||||||
|
return decision;
|
||||||
|
|
||||||
|
nearby_index++;
|
||||||
|
}
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename F>
|
||||||
|
IterationDecision for_each_nonvolatile_range(F f) const
|
||||||
|
{
|
||||||
|
size_t base = m_total_range.base;
|
||||||
|
for (const auto& volatile_range : m_ranges) {
|
||||||
|
if (volatile_range.base == base)
|
||||||
|
continue;
|
||||||
|
IterationDecision decision = f({ base, volatile_range.base - base });
|
||||||
|
if (decision != IterationDecision::Continue)
|
||||||
|
return decision;
|
||||||
|
base = volatile_range.base + volatile_range.count;
|
||||||
|
}
|
||||||
|
if (base < m_total_range.base + m_total_range.count)
|
||||||
|
return f({ base, (m_total_range.base + m_total_range.count) - base });
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
}
|
||||||
|
|
||||||
Vector<VolatilePageRange>& ranges() { return m_ranges; }
|
Vector<VolatilePageRange>& ranges() { return m_ranges; }
|
||||||
const Vector<VolatilePageRange>& ranges() const { return m_ranges; }
|
const Vector<VolatilePageRange>& ranges() const { return m_ranges; }
|
||||||
|
|
||||||
|
@ -152,15 +205,15 @@ private:
|
||||||
VolatilePageRange m_total_range;
|
VolatilePageRange m_total_range;
|
||||||
};
|
};
|
||||||
|
|
||||||
class PurgeableVMObject;
|
class AnonymousVMObject;
|
||||||
|
|
||||||
class PurgeablePageRanges {
|
class PurgeablePageRanges {
|
||||||
friend class PurgeableVMObject;
|
friend class AnonymousVMObject;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
PurgeablePageRanges(const VMObject&);
|
PurgeablePageRanges(const VMObject&);
|
||||||
|
|
||||||
void set_purgeable_page_ranges(const PurgeablePageRanges& other)
|
void copy_purgeable_page_ranges(const PurgeablePageRanges& other)
|
||||||
{
|
{
|
||||||
if (this == &other)
|
if (this == &other)
|
||||||
return;
|
return;
|
||||||
|
@ -171,7 +224,12 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
bool add_volatile_range(const VolatilePageRange& range);
|
bool add_volatile_range(const VolatilePageRange& range);
|
||||||
bool remove_volatile_range(const VolatilePageRange& range, bool& was_purged);
|
enum class RemoveVolatileError {
|
||||||
|
Success = 0,
|
||||||
|
SuccessNoChange,
|
||||||
|
OutOfMemory
|
||||||
|
};
|
||||||
|
RemoveVolatileError remove_volatile_range(const VolatilePageRange& range, bool& was_purged);
|
||||||
bool is_volatile_range(const VolatilePageRange& range) const;
|
bool is_volatile_range(const VolatilePageRange& range) const;
|
||||||
bool is_volatile(size_t) const;
|
bool is_volatile(size_t) const;
|
||||||
|
|
||||||
|
@ -182,92 +240,27 @@ public:
|
||||||
const VolatilePageRanges& volatile_ranges() const { return m_volatile_ranges; }
|
const VolatilePageRanges& volatile_ranges() const { return m_volatile_ranges; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void set_vmobject(PurgeableVMObject*);
|
void set_vmobject(AnonymousVMObject*);
|
||||||
|
|
||||||
VolatilePageRanges m_volatile_ranges;
|
VolatilePageRanges m_volatile_ranges;
|
||||||
mutable RecursiveSpinLock m_volatile_ranges_lock;
|
mutable RecursiveSpinLock m_volatile_ranges_lock;
|
||||||
PurgeableVMObject* m_vmobject { nullptr };
|
AnonymousVMObject* m_vmobject { nullptr };
|
||||||
};
|
};
|
||||||
|
|
||||||
class PurgeableVMObject final : public AnonymousVMObject {
|
class CommittedCowPages : public RefCounted<CommittedCowPages> {
|
||||||
friend class PurgeablePageRanges;
|
AK_MAKE_NONCOPYABLE(CommittedCowPages);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
virtual ~PurgeableVMObject() override;
|
CommittedCowPages() = delete;
|
||||||
|
|
||||||
static RefPtr<PurgeableVMObject> create_with_size(size_t);
|
CommittedCowPages(size_t);
|
||||||
virtual RefPtr<VMObject> clone() override;
|
~CommittedCowPages();
|
||||||
|
|
||||||
virtual RefPtr<PhysicalPage> allocate_committed_page(size_t) override;
|
NonnullRefPtr<PhysicalPage> allocate_one();
|
||||||
|
bool return_one();
|
||||||
void register_purgeable_page_ranges(PurgeablePageRanges&);
|
|
||||||
void unregister_purgeable_page_ranges(PurgeablePageRanges&);
|
|
||||||
|
|
||||||
int purge();
|
|
||||||
int purge_with_interrupts_disabled(Badge<MemoryManager>);
|
|
||||||
|
|
||||||
bool is_any_volatile() const;
|
|
||||||
|
|
||||||
template<typename F>
|
|
||||||
IterationDecision for_each_volatile_range(F f)
|
|
||||||
{
|
|
||||||
ASSERT(m_lock.is_locked());
|
|
||||||
// This is a little ugly. Basically, we're trying to find the
|
|
||||||
// volatile ranges that all share, because those are the only
|
|
||||||
// pages we can actually purge
|
|
||||||
for (auto* purgeable_range : m_purgeable_ranges) {
|
|
||||||
ScopedSpinLock purgeable_lock(purgeable_range->m_volatile_ranges_lock);
|
|
||||||
for (auto& r1 : purgeable_range->volatile_ranges().ranges()) {
|
|
||||||
VolatilePageRange range(r1);
|
|
||||||
for (auto* purgeable_range2 : m_purgeable_ranges) {
|
|
||||||
if (purgeable_range2 == purgeable_range)
|
|
||||||
continue;
|
|
||||||
ScopedSpinLock purgeable2_lock(purgeable_range2->m_volatile_ranges_lock);
|
|
||||||
if (purgeable_range2->is_empty()) {
|
|
||||||
// If just one doesn't allow any purging, we can
|
|
||||||
// immediately bail
|
|
||||||
return IterationDecision::Continue;
|
|
||||||
}
|
|
||||||
for (const auto& r2 : purgeable_range2->volatile_ranges().ranges()) {
|
|
||||||
range = range.intersected(r2);
|
|
||||||
if (range.is_empty())
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (range.is_empty())
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (range.is_empty())
|
|
||||||
continue;
|
|
||||||
IterationDecision decision = f(range);
|
|
||||||
if (decision != IterationDecision::Continue)
|
|
||||||
return decision;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return IterationDecision::Continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t get_lazy_committed_page_count() const;
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
explicit PurgeableVMObject(size_t);
|
size_t m_committed_pages;
|
||||||
explicit PurgeableVMObject(const PurgeableVMObject&);
|
|
||||||
|
|
||||||
virtual const char* class_name() const override { return "PurgeableVMObject"; }
|
|
||||||
|
|
||||||
int purge_impl();
|
|
||||||
void set_was_purged(const VolatilePageRange&);
|
|
||||||
size_t remove_lazy_commit_pages(const VolatilePageRange&);
|
|
||||||
void range_made_volatile(const VolatilePageRange&);
|
|
||||||
|
|
||||||
PurgeableVMObject& operator=(const PurgeableVMObject&) = delete;
|
|
||||||
PurgeableVMObject& operator=(PurgeableVMObject&&) = delete;
|
|
||||||
PurgeableVMObject(PurgeableVMObject&&) = delete;
|
|
||||||
|
|
||||||
virtual bool is_purgeable() const override { return true; }
|
|
||||||
|
|
||||||
Vector<PurgeablePageRanges*> m_purgeable_ranges;
|
|
||||||
mutable SpinLock<u8> m_lock;
|
|
||||||
size_t m_unused_committed_pages { 0 };
|
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
|
@ -32,7 +32,6 @@
|
||||||
#include <Kernel/VM/AnonymousVMObject.h>
|
#include <Kernel/VM/AnonymousVMObject.h>
|
||||||
#include <Kernel/VM/MemoryManager.h>
|
#include <Kernel/VM/MemoryManager.h>
|
||||||
#include <Kernel/VM/PageDirectory.h>
|
#include <Kernel/VM/PageDirectory.h>
|
||||||
#include <Kernel/VM/PurgeableVMObject.h>
|
|
||||||
#include <Kernel/VM/Region.h>
|
#include <Kernel/VM/Region.h>
|
||||||
#include <Kernel/VM/SharedInodeVMObject.h>
|
#include <Kernel/VM/SharedInodeVMObject.h>
|
||||||
|
|
||||||
|
@ -73,16 +72,16 @@ Region::~Region()
|
||||||
|
|
||||||
void Region::register_purgeable_page_ranges()
|
void Region::register_purgeable_page_ranges()
|
||||||
{
|
{
|
||||||
if (m_vmobject->is_purgeable()) {
|
if (m_vmobject->is_anonymous()) {
|
||||||
auto& vmobject = static_cast<PurgeableVMObject&>(*m_vmobject);
|
auto& vmobject = static_cast<AnonymousVMObject&>(*m_vmobject);
|
||||||
vmobject.register_purgeable_page_ranges(*this);
|
vmobject.register_purgeable_page_ranges(*this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Region::unregister_purgeable_page_ranges()
|
void Region::unregister_purgeable_page_ranges()
|
||||||
{
|
{
|
||||||
if (m_vmobject->is_purgeable()) {
|
if (m_vmobject->is_anonymous()) {
|
||||||
auto& vmobject = static_cast<PurgeableVMObject&>(*m_vmobject);
|
auto& vmobject = static_cast<AnonymousVMObject&>(*m_vmobject);
|
||||||
vmobject.unregister_purgeable_page_ranges(*this);
|
vmobject.unregister_purgeable_page_ranges(*this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -96,8 +95,11 @@ OwnPtr<Region> Region::clone()
|
||||||
ASSERT(m_mmap);
|
ASSERT(m_mmap);
|
||||||
ASSERT(!m_shared);
|
ASSERT(!m_shared);
|
||||||
ASSERT(vmobject().is_anonymous());
|
ASSERT(vmobject().is_anonymous());
|
||||||
auto zeroed_region = Region::create_user_accessible(get_owner().ptr(), m_range, AnonymousVMObject::create_with_size(size()), 0, m_name, m_access);
|
auto new_vmobject = AnonymousVMObject::create_with_size(size(), AllocationStrategy::Reserve); // TODO: inherit committed non-volatile areas?
|
||||||
zeroed_region->set_purgeable_page_ranges(*this);
|
if (!new_vmobject)
|
||||||
|
return {};
|
||||||
|
auto zeroed_region = Region::create_user_accessible(get_owner().ptr(), m_range, new_vmobject.release_nonnull(), 0, m_name, m_access);
|
||||||
|
zeroed_region->copy_purgeable_page_ranges(*this);
|
||||||
zeroed_region->set_mmap(m_mmap);
|
zeroed_region->set_mmap(m_mmap);
|
||||||
zeroed_region->set_inherit_mode(m_inherit_mode);
|
zeroed_region->set_inherit_mode(m_inherit_mode);
|
||||||
return zeroed_region;
|
return zeroed_region;
|
||||||
|
@ -113,7 +115,8 @@ OwnPtr<Region> Region::clone()
|
||||||
|
|
||||||
// Create a new region backed by the same VMObject.
|
// Create a new region backed by the same VMObject.
|
||||||
auto region = Region::create_user_accessible(get_owner().ptr(), m_range, m_vmobject, m_offset_in_vmobject, m_name, m_access);
|
auto region = Region::create_user_accessible(get_owner().ptr(), m_range, m_vmobject, m_offset_in_vmobject, m_name, m_access);
|
||||||
region->set_purgeable_page_ranges(*this);
|
if (m_vmobject->is_anonymous())
|
||||||
|
region->copy_purgeable_page_ranges(*this);
|
||||||
region->set_mmap(m_mmap);
|
region->set_mmap(m_mmap);
|
||||||
region->set_shared(m_shared);
|
region->set_shared(m_shared);
|
||||||
return region;
|
return region;
|
||||||
|
@ -122,7 +125,7 @@ OwnPtr<Region> Region::clone()
|
||||||
if (vmobject().is_inode())
|
if (vmobject().is_inode())
|
||||||
ASSERT(vmobject().is_private_inode());
|
ASSERT(vmobject().is_private_inode());
|
||||||
|
|
||||||
auto vmobject_clone = m_vmobject->clone();
|
auto vmobject_clone = vmobject().clone();
|
||||||
if (!vmobject_clone)
|
if (!vmobject_clone)
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
|
@ -130,11 +133,10 @@ OwnPtr<Region> Region::clone()
|
||||||
dbg() << "Region::clone(): CoWing " << name() << " (" << vaddr() << ")";
|
dbg() << "Region::clone(): CoWing " << name() << " (" << vaddr() << ")";
|
||||||
#endif
|
#endif
|
||||||
// Set up a COW region. The parent (this) region becomes COW as well!
|
// Set up a COW region. The parent (this) region becomes COW as well!
|
||||||
ensure_cow_map().fill(true);
|
|
||||||
remap();
|
remap();
|
||||||
auto clone_region = Region::create_user_accessible(get_owner().ptr(), m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, m_access);
|
auto clone_region = Region::create_user_accessible(get_owner().ptr(), m_range, vmobject_clone.release_nonnull(), m_offset_in_vmobject, m_name, m_access);
|
||||||
clone_region->set_purgeable_page_ranges(*this);
|
if (m_vmobject->is_anonymous())
|
||||||
clone_region->ensure_cow_map();
|
clone_region->copy_purgeable_page_ranges(*this);
|
||||||
if (m_stack) {
|
if (m_stack) {
|
||||||
ASSERT(is_readable());
|
ASSERT(is_readable());
|
||||||
ASSERT(is_writable());
|
ASSERT(is_writable());
|
||||||
|
@ -156,7 +158,7 @@ void Region::set_vmobject(NonnullRefPtr<VMObject>&& obj)
|
||||||
|
|
||||||
bool Region::is_volatile(VirtualAddress vaddr, size_t size) const
|
bool Region::is_volatile(VirtualAddress vaddr, size_t size) const
|
||||||
{
|
{
|
||||||
if (!m_vmobject->is_purgeable())
|
if (!m_vmobject->is_anonymous())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject);
|
auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject);
|
||||||
|
@ -168,7 +170,7 @@ bool Region::is_volatile(VirtualAddress vaddr, size_t size) const
|
||||||
auto Region::set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, bool& was_purged) -> SetVolatileError
|
auto Region::set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, bool& was_purged) -> SetVolatileError
|
||||||
{
|
{
|
||||||
was_purged = false;
|
was_purged = false;
|
||||||
if (!m_vmobject->is_purgeable())
|
if (!m_vmobject->is_anonymous())
|
||||||
return SetVolatileError::NotPurgeable;
|
return SetVolatileError::NotPurgeable;
|
||||||
|
|
||||||
auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject);
|
auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject);
|
||||||
|
@ -187,70 +189,22 @@ auto Region::set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, b
|
||||||
// end of the range doesn't inadvertedly get discarded.
|
// end of the range doesn't inadvertedly get discarded.
|
||||||
size_t first_page_index = PAGE_ROUND_DOWN(offset_in_vmobject) / PAGE_SIZE;
|
size_t first_page_index = PAGE_ROUND_DOWN(offset_in_vmobject) / PAGE_SIZE;
|
||||||
size_t last_page_index = PAGE_ROUND_UP(offset_in_vmobject + size) / PAGE_SIZE;
|
size_t last_page_index = PAGE_ROUND_UP(offset_in_vmobject + size) / PAGE_SIZE;
|
||||||
if (remove_volatile_range({ first_page_index, last_page_index - first_page_index }, was_purged)) {
|
switch (remove_volatile_range({ first_page_index, last_page_index - first_page_index }, was_purged)) {
|
||||||
// Attempt to remap the page range. We want to make sure we have
|
case PurgeablePageRanges::RemoveVolatileError::Success:
|
||||||
// enough memory, if not we need to inform the caller of that
|
case PurgeablePageRanges::RemoveVolatileError::SuccessNoChange:
|
||||||
// fact
|
break;
|
||||||
if (!remap_page_range(first_page_index, last_page_index - first_page_index))
|
case PurgeablePageRanges::RemoveVolatileError::OutOfMemory:
|
||||||
return SetVolatileError::OutOfMemory;
|
return SetVolatileError::OutOfMemory;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return SetVolatileError::Success;
|
return SetVolatileError::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Region::can_commit() const
|
size_t Region::cow_pages() const
|
||||||
{
|
{
|
||||||
return vmobject().is_anonymous() || vmobject().is_purgeable();
|
if (!vmobject().is_anonymous())
|
||||||
}
|
|
||||||
|
|
||||||
bool Region::commit()
|
|
||||||
{
|
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
|
||||||
#ifdef MM_DEBUG
|
|
||||||
dbg() << "MM: Commit " << page_count() << " pages in Region " << this << " (VMO=" << &vmobject() << ") at " << vaddr();
|
|
||||||
#endif
|
|
||||||
for (size_t i = 0; i < page_count(); ++i) {
|
|
||||||
if (!commit(i)) {
|
|
||||||
// Flush what we did commit
|
|
||||||
if (i > 0)
|
|
||||||
MM.flush_tlb(vaddr(), i + 1);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MM.flush_tlb(vaddr(), page_count());
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool Region::commit(size_t page_index)
|
|
||||||
{
|
|
||||||
ASSERT(vmobject().is_anonymous() || vmobject().is_purgeable());
|
|
||||||
ASSERT(s_mm_lock.own_lock());
|
|
||||||
auto& vmobject_physical_page_entry = physical_page_slot(page_index);
|
|
||||||
if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page())
|
|
||||||
return true;
|
|
||||||
RefPtr<PhysicalPage> physical_page;
|
|
||||||
if (vmobject_physical_page_entry->is_lazy_committed_page()) {
|
|
||||||
physical_page = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index);
|
|
||||||
} else {
|
|
||||||
physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
|
||||||
if (!physical_page) {
|
|
||||||
klog() << "MM: commit was unable to allocate a physical page";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vmobject_physical_page_entry = move(physical_page);
|
|
||||||
remap_page(page_index, false); // caller is in charge of flushing tlb
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
u32 Region::cow_pages() const
|
|
||||||
{
|
|
||||||
if (!m_cow_map)
|
|
||||||
return 0;
|
return 0;
|
||||||
u32 count = 0;
|
return static_cast<const AnonymousVMObject&>(vmobject()).cow_pages();
|
||||||
for (size_t i = 0; i < m_cow_map->size(); ++i)
|
|
||||||
count += m_cow_map->get(i);
|
|
||||||
return count;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t Region::amount_dirty() const
|
size_t Region::amount_dirty() const
|
||||||
|
@ -300,25 +254,16 @@ NonnullOwnPtr<Region> Region::create_kernel_only(const Range& range, NonnullRefP
|
||||||
|
|
||||||
bool Region::should_cow(size_t page_index) const
|
bool Region::should_cow(size_t page_index) const
|
||||||
{
|
{
|
||||||
auto* page = physical_page(page_index);
|
if (!vmobject().is_anonymous())
|
||||||
if (page && (page->is_shared_zero_page() || page->is_lazy_committed_page()))
|
|
||||||
return true;
|
|
||||||
if (m_shared)
|
|
||||||
return false;
|
return false;
|
||||||
return m_cow_map && m_cow_map->get(page_index);
|
return static_cast<const AnonymousVMObject&>(vmobject()).should_cow(first_page_index() + page_index, m_shared);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Region::set_should_cow(size_t page_index, bool cow)
|
void Region::set_should_cow(size_t page_index, bool cow)
|
||||||
{
|
{
|
||||||
ASSERT(!m_shared);
|
ASSERT(!m_shared);
|
||||||
ensure_cow_map().set(page_index, cow);
|
if (vmobject().is_anonymous())
|
||||||
}
|
static_cast<AnonymousVMObject&>(vmobject()).set_should_cow(first_page_index() + page_index, cow);
|
||||||
|
|
||||||
Bitmap& Region::ensure_cow_map() const
|
|
||||||
{
|
|
||||||
if (!m_cow_map)
|
|
||||||
m_cow_map = make<Bitmap>(page_count(), true);
|
|
||||||
return *m_cow_map;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Region::map_individual_page_impl(size_t page_index)
|
bool Region::map_individual_page_impl(size_t page_index)
|
||||||
|
@ -339,7 +284,7 @@ bool Region::map_individual_page_impl(size_t page_index)
|
||||||
pte->set_cache_disabled(!m_cacheable);
|
pte->set_cache_disabled(!m_cacheable);
|
||||||
pte->set_physical_page_base(page->paddr().get());
|
pte->set_physical_page_base(page->paddr().get());
|
||||||
pte->set_present(true);
|
pte->set_present(true);
|
||||||
if (should_cow(page_index))
|
if (page->is_shared_zero_page() || page->is_lazy_committed_page() || should_cow(page_index))
|
||||||
pte->set_writable(false);
|
pte->set_writable(false);
|
||||||
else
|
else
|
||||||
pte->set_writable(is_writable());
|
pte->set_writable(is_writable());
|
||||||
|
@ -387,7 +332,8 @@ bool Region::remap_page(size_t page_index, bool with_flush)
|
||||||
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
||||||
{
|
{
|
||||||
ScopedSpinLock lock(s_mm_lock);
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
ASSERT(m_page_directory);
|
if (!m_page_directory)
|
||||||
|
return;
|
||||||
ScopedSpinLock page_lock(m_page_directory->get_lock());
|
ScopedSpinLock page_lock(m_page_directory->get_lock());
|
||||||
size_t count = page_count();
|
size_t count = page_count();
|
||||||
for (size_t i = 0; i < count; ++i) {
|
for (size_t i = 0; i < count; ++i) {
|
||||||
|
@ -444,6 +390,7 @@ void Region::remap()
|
||||||
|
|
||||||
PageFaultResponse Region::handle_fault(const PageFault& fault)
|
PageFaultResponse Region::handle_fault(const PageFault& fault)
|
||||||
{
|
{
|
||||||
|
ScopedSpinLock lock(s_mm_lock);
|
||||||
auto page_index_in_region = page_index_from_address(fault.vaddr());
|
auto page_index_in_region = page_index_from_address(fault.vaddr());
|
||||||
if (fault.type() == PageFault::Type::PageNotPresent) {
|
if (fault.type() == PageFault::Type::PageNotPresent) {
|
||||||
if (fault.is_read() && !is_readable()) {
|
if (fault.is_read() && !is_readable()) {
|
||||||
|
@ -482,12 +429,12 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
|
||||||
ASSERT(fault.type() == PageFault::Type::ProtectionViolation);
|
ASSERT(fault.type() == PageFault::Type::ProtectionViolation);
|
||||||
if (fault.access() == PageFault::Access::Write && is_writable() && should_cow(page_index_in_region)) {
|
if (fault.access() == PageFault::Access::Write && is_writable() && should_cow(page_index_in_region)) {
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
dbg() << "PV(cow) fault in Region{" << this << "}[" << page_index_in_region << "]";
|
dbg() << "PV(cow) fault in Region{" << this << "}[" << page_index_in_region << "] at " << fault.vaddr();
|
||||||
#endif
|
#endif
|
||||||
auto* phys_page = physical_page(page_index_in_region);
|
auto* phys_page = physical_page(page_index_in_region);
|
||||||
if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) {
|
if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) {
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
dbg() << "NP(zero) fault in Region{" << this << "}[" << page_index_in_region << "]";
|
dbg() << "NP(zero) fault in Region{" << this << "}[" << page_index_in_region << "] at " << fault.vaddr();
|
||||||
#endif
|
#endif
|
||||||
return handle_zero_fault(page_index_in_region);
|
return handle_zero_fault(page_index_in_region);
|
||||||
}
|
}
|
||||||
|
@ -521,17 +468,20 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
|
||||||
|
|
||||||
if (page_slot->is_lazy_committed_page()) {
|
if (page_slot->is_lazy_committed_page()) {
|
||||||
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_region);
|
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_region);
|
||||||
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
|
dbg() << " >> ALLOCATED COMMITTED " << page_slot->paddr();
|
||||||
|
#endif
|
||||||
} else {
|
} else {
|
||||||
page_slot = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
page_slot = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
||||||
if (page_slot.is_null()) {
|
if (page_slot.is_null()) {
|
||||||
klog() << "MM: handle_zero_fault was unable to allocate a physical page";
|
klog() << "MM: handle_zero_fault was unable to allocate a physical page";
|
||||||
return PageFaultResponse::OutOfMemory;
|
return PageFaultResponse::OutOfMemory;
|
||||||
}
|
}
|
||||||
|
#ifdef PAGE_FAULT_DEBUG
|
||||||
|
dbg() << " >> ALLOCATED " << page_slot->paddr();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
|
||||||
dbg() << " >> ZERO " << page_slot->paddr();
|
|
||||||
#endif
|
|
||||||
if (!remap_page(page_index_in_region)) {
|
if (!remap_page(page_index_in_region)) {
|
||||||
klog() << "MM: handle_zero_fault was unable to allocate a page table to map " << page_slot;
|
klog() << "MM: handle_zero_fault was unable to allocate a page table to map " << page_slot;
|
||||||
return PageFaultResponse::OutOfMemory;
|
return PageFaultResponse::OutOfMemory;
|
||||||
|
@ -542,53 +492,17 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
|
||||||
PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
|
PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
|
||||||
{
|
{
|
||||||
ASSERT_INTERRUPTS_DISABLED();
|
ASSERT_INTERRUPTS_DISABLED();
|
||||||
auto& page_slot = physical_page_slot(page_index_in_region);
|
|
||||||
if (page_slot->ref_count() == 1) {
|
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
|
||||||
dbg() << " >> It's a COW page but nobody is sharing it anymore. Remap r/w";
|
|
||||||
#endif
|
|
||||||
set_should_cow(page_index_in_region, false);
|
|
||||||
if (!remap_page(page_index_in_region))
|
|
||||||
return PageFaultResponse::OutOfMemory;
|
|
||||||
return PageFaultResponse::Continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto current_thread = Thread::current();
|
auto current_thread = Thread::current();
|
||||||
if (current_thread)
|
if (current_thread)
|
||||||
current_thread->did_cow_fault();
|
current_thread->did_cow_fault();
|
||||||
|
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
if (!vmobject().is_anonymous())
|
||||||
dbg() << " >> It's a COW page and it's time to COW!";
|
return PageFaultResponse::ShouldCrash;
|
||||||
#endif
|
|
||||||
auto page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);
|
|
||||||
if (page.is_null()) {
|
|
||||||
klog() << "MM: handle_cow_fault was unable to allocate a physical page";
|
|
||||||
return PageFaultResponse::OutOfMemory;
|
|
||||||
}
|
|
||||||
|
|
||||||
u8* dest_ptr = MM.quickmap_page(*page);
|
auto response = reinterpret_cast<AnonymousVMObject&>(vmobject()).handle_cow_fault(first_page_index() + page_index_in_region, vaddr().offset(page_index_in_region * PAGE_SIZE));
|
||||||
const u8* src_ptr = vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
|
|
||||||
#ifdef PAGE_FAULT_DEBUG
|
|
||||||
dbg() << " >> COW " << page->paddr() << " <- " << page_slot->paddr();
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
SmapDisabler disabler;
|
|
||||||
void* fault_at;
|
|
||||||
if (!safe_memcpy(dest_ptr, src_ptr, PAGE_SIZE, fault_at)) {
|
|
||||||
if ((u8*)fault_at >= dest_ptr && (u8*)fault_at <= dest_ptr + PAGE_SIZE)
|
|
||||||
dbg() << " >> COW: error copying page " << page_slot->paddr() << "/" << VirtualAddress(src_ptr) << " to " << page->paddr() << "/" << VirtualAddress(dest_ptr) << ": failed to write to page at " << VirtualAddress(fault_at);
|
|
||||||
else if ((u8*)fault_at >= src_ptr && (u8*)fault_at <= src_ptr + PAGE_SIZE)
|
|
||||||
dbg() << " >> COW: error copying page " << page_slot->paddr() << "/" << VirtualAddress(src_ptr) << " to " << page->paddr() << "/" << VirtualAddress(dest_ptr) << ": failed to read from page at " << VirtualAddress(fault_at);
|
|
||||||
else
|
|
||||||
ASSERT_NOT_REACHED();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
page_slot = move(page);
|
|
||||||
MM.unquickmap_page();
|
|
||||||
set_should_cow(page_index_in_region, false);
|
|
||||||
if (!remap_page(page_index_in_region))
|
if (!remap_page(page_index_in_region))
|
||||||
return PageFaultResponse::OutOfMemory;
|
return PageFaultResponse::OutOfMemory;
|
||||||
return PageFaultResponse::Continue;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
|
||||||
|
|
|
@ -32,7 +32,8 @@
|
||||||
#include <AK/Weakable.h>
|
#include <AK/Weakable.h>
|
||||||
#include <Kernel/Arch/i386/CPU.h>
|
#include <Kernel/Arch/i386/CPU.h>
|
||||||
#include <Kernel/Heap/SlabAllocator.h>
|
#include <Kernel/Heap/SlabAllocator.h>
|
||||||
#include <Kernel/VM/PurgeableVMObject.h>
|
#include <Kernel/VM/PageFaultResponse.h>
|
||||||
|
#include <Kernel/VM/PurgeablePageRanges.h>
|
||||||
#include <Kernel/VM/RangeAllocator.h>
|
#include <Kernel/VM/RangeAllocator.h>
|
||||||
#include <Kernel/VM/VMObject.h>
|
#include <Kernel/VM/VMObject.h>
|
||||||
|
|
||||||
|
@ -41,12 +42,6 @@ namespace Kernel {
|
||||||
class Inode;
|
class Inode;
|
||||||
class VMObject;
|
class VMObject;
|
||||||
|
|
||||||
enum class PageFaultResponse {
|
|
||||||
ShouldCrash,
|
|
||||||
OutOfMemory,
|
|
||||||
Continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
class Region final
|
class Region final
|
||||||
: public InlineLinkedListNode<Region>
|
: public InlineLinkedListNode<Region>
|
||||||
, public Weakable<Region>
|
, public Weakable<Region>
|
||||||
|
@ -159,9 +154,6 @@ public:
|
||||||
return m_offset_in_vmobject;
|
return m_offset_in_vmobject;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool can_commit() const;
|
|
||||||
bool commit();
|
|
||||||
|
|
||||||
size_t amount_resident() const;
|
size_t amount_resident() const;
|
||||||
size_t amount_shared() const;
|
size_t amount_shared() const;
|
||||||
size_t amount_dirty() const;
|
size_t amount_dirty() const;
|
||||||
|
@ -169,7 +161,7 @@ public:
|
||||||
bool should_cow(size_t page_index) const;
|
bool should_cow(size_t page_index) const;
|
||||||
void set_should_cow(size_t page_index, bool);
|
void set_should_cow(size_t page_index, bool);
|
||||||
|
|
||||||
u32 cow_pages() const;
|
size_t cow_pages() const;
|
||||||
|
|
||||||
void set_readable(bool b) { set_access_bit(Access::Read, b); }
|
void set_readable(bool b) { set_access_bit(Access::Read, b); }
|
||||||
void set_writable(bool b) { set_access_bit(Access::Write, b); }
|
void set_writable(bool b) { set_access_bit(Access::Write, b); }
|
||||||
|
@ -207,8 +199,6 @@ public:
|
||||||
RefPtr<Process> get_owner();
|
RefPtr<Process> get_owner();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Bitmap& ensure_cow_map() const;
|
|
||||||
|
|
||||||
void set_access_bit(Access access, bool b)
|
void set_access_bit(Access access, bool b)
|
||||||
{
|
{
|
||||||
if (b)
|
if (b)
|
||||||
|
@ -217,7 +207,6 @@ private:
|
||||||
m_access &= ~access;
|
m_access &= ~access;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool commit(size_t page_index);
|
|
||||||
bool remap_page(size_t index, bool with_flush = true);
|
bool remap_page(size_t index, bool with_flush = true);
|
||||||
|
|
||||||
PageFaultResponse handle_cow_fault(size_t page_index);
|
PageFaultResponse handle_cow_fault(size_t page_index);
|
||||||
|
@ -242,7 +231,6 @@ private:
|
||||||
bool m_stack : 1 { false };
|
bool m_stack : 1 { false };
|
||||||
bool m_mmap : 1 { false };
|
bool m_mmap : 1 { false };
|
||||||
bool m_kernel : 1 { false };
|
bool m_kernel : 1 { false };
|
||||||
mutable OwnPtr<Bitmap> m_cow_map;
|
|
||||||
WeakPtr<Process> m_owner;
|
WeakPtr<Process> m_owner;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,6 @@ public:
|
||||||
virtual RefPtr<VMObject> clone() = 0;
|
virtual RefPtr<VMObject> clone() = 0;
|
||||||
|
|
||||||
virtual bool is_anonymous() const { return false; }
|
virtual bool is_anonymous() const { return false; }
|
||||||
virtual bool is_purgeable() const { return false; }
|
|
||||||
virtual bool is_inode() const { return false; }
|
virtual bool is_inode() const { return false; }
|
||||||
virtual bool is_shared_inode() const { return false; }
|
virtual bool is_shared_inode() const { return false; }
|
||||||
virtual bool is_private_inode() const { return false; }
|
virtual bool is_private_inode() const { return false; }
|
||||||
|
@ -78,6 +77,8 @@ protected:
|
||||||
Vector<RefPtr<PhysicalPage>> m_physical_pages;
|
Vector<RefPtr<PhysicalPage>> m_physical_pages;
|
||||||
Lock m_paging_lock { "VMObject" };
|
Lock m_paging_lock { "VMObject" };
|
||||||
|
|
||||||
|
mutable SpinLock<u8> m_lock;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
VMObject& operator=(const VMObject&) = delete;
|
VMObject& operator=(const VMObject&) = delete;
|
||||||
VMObject& operator=(VMObject&&) = delete;
|
VMObject& operator=(VMObject&&) = delete;
|
||||||
|
|
|
@ -10,6 +10,7 @@ add_compile_definitions("CACHE_DEBUG")
|
||||||
add_compile_definitions("CALLBACK_MACHINE_DEBUG")
|
add_compile_definitions("CALLBACK_MACHINE_DEBUG")
|
||||||
add_compile_definitions("CHTTPJOB_DEBUG")
|
add_compile_definitions("CHTTPJOB_DEBUG")
|
||||||
add_compile_definitions("CNETWORKJOB_DEBUG")
|
add_compile_definitions("CNETWORKJOB_DEBUG")
|
||||||
|
add_compile_definitions("COMMIT_DEBUG")
|
||||||
add_compile_definitions("COMPOSE_DEBUG")
|
add_compile_definitions("COMPOSE_DEBUG")
|
||||||
add_compile_definitions("CONTEXT_SWITCH_DEBUG")
|
add_compile_definitions("CONTEXT_SWITCH_DEBUG")
|
||||||
add_compile_definitions("CONTIGUOUS_VMOBJECT_DEBUG")
|
add_compile_definitions("CONTIGUOUS_VMOBJECT_DEBUG")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue