mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 14:17:36 +00:00
Kernel: Start implementing purgeable memory support
It's now possible to get purgeable memory by using mmap(MAP_PURGEABLE). Purgeable memory has a "volatile" flag that can be set using madvise(): - madvise(..., MADV_SET_VOLATILE) - madvise(..., MADV_SET_NONVOLATILE) When in the "volatile" state, the kernel may take away the underlying physical memory pages at any time, without notifying the owner. This gives you a guilt discount when caching very large things. :^) Setting a purgeable region to non-volatile will return whether or not the memory has been taken away by the kernel while being volatile. Basically, if madvise(..., MADV_SET_NONVOLATILE) returns 1, that means the memory was purged while volatile, and whatever was in that piece of memory needs to be reconstructed before use.
This commit is contained in:
parent
7248c34e35
commit
dbb644f20c
13 changed files with 196 additions and 9 deletions
|
@ -3,7 +3,7 @@
|
|||
#include <Kernel/VM/PhysicalAddress.h>
|
||||
#include <Kernel/VM/VMObject.h>
|
||||
|
||||
class AnonymousVMObject final : public VMObject {
|
||||
class AnonymousVMObject : public VMObject {
|
||||
public:
|
||||
virtual ~AnonymousVMObject() override;
|
||||
|
||||
|
@ -11,9 +11,11 @@ public:
|
|||
static NonnullRefPtr<AnonymousVMObject> create_for_physical_range(PhysicalAddress, size_t);
|
||||
virtual NonnullRefPtr<VMObject> clone() override;
|
||||
|
||||
private:
|
||||
protected:
|
||||
explicit AnonymousVMObject(size_t);
|
||||
explicit AnonymousVMObject(const AnonymousVMObject&);
|
||||
|
||||
private:
|
||||
AnonymousVMObject(PhysicalAddress, size_t);
|
||||
|
||||
AnonymousVMObject& operator=(const AnonymousVMObject&) = delete;
|
||||
|
|
41
Kernel/VM/PurgeableVMObject.cpp
Normal file
41
Kernel/VM/PurgeableVMObject.cpp
Normal file
|
@ -0,0 +1,41 @@
|
|||
#include <Kernel/VM/PurgeableVMObject.h>
|
||||
#include <Kernel/VM/PhysicalPage.h>
|
||||
|
||||
NonnullRefPtr<PurgeableVMObject> PurgeableVMObject::create_with_size(size_t size)
|
||||
{
|
||||
return adopt(*new PurgeableVMObject(size));
|
||||
}
|
||||
|
||||
PurgeableVMObject::PurgeableVMObject(size_t size)
|
||||
: AnonymousVMObject(size)
|
||||
{
|
||||
}
|
||||
|
||||
PurgeableVMObject::PurgeableVMObject(const PurgeableVMObject& other)
|
||||
: AnonymousVMObject(other)
|
||||
{
|
||||
}
|
||||
|
||||
PurgeableVMObject::~PurgeableVMObject()
|
||||
{
|
||||
}
|
||||
|
||||
NonnullRefPtr<VMObject> PurgeableVMObject::clone()
|
||||
{
|
||||
return adopt(*new PurgeableVMObject(*this));
|
||||
}
|
||||
|
||||
int PurgeableVMObject::purge()
|
||||
{
|
||||
LOCKER(m_paging_lock);
|
||||
if (!m_volatile)
|
||||
return 0;
|
||||
int purged_page_count = 0;
|
||||
for (size_t i = 0; i < m_physical_pages.size(); ++i) {
|
||||
if (m_physical_pages[i])
|
||||
++purged_page_count;
|
||||
m_physical_pages[i] = nullptr;
|
||||
}
|
||||
m_was_purged = true;
|
||||
return purged_page_count;
|
||||
}
|
32
Kernel/VM/PurgeableVMObject.h
Normal file
32
Kernel/VM/PurgeableVMObject.h
Normal file
|
@ -0,0 +1,32 @@
|
|||
#pragma once
|
||||
|
||||
#include <Kernel/VM/AnonymousVMObject.h>
|
||||
|
||||
class PurgeableVMObject final : public AnonymousVMObject {
|
||||
public:
|
||||
virtual ~PurgeableVMObject() override;
|
||||
|
||||
static NonnullRefPtr<PurgeableVMObject> create_with_size(size_t);
|
||||
virtual NonnullRefPtr<VMObject> clone() override;
|
||||
|
||||
int purge();
|
||||
|
||||
bool was_purged() const { return m_was_purged; }
|
||||
void set_was_purged(bool b) { m_was_purged = b; }
|
||||
|
||||
bool is_volatile() const { return m_volatile; }
|
||||
void set_volatile(bool b) { m_volatile = b; }
|
||||
|
||||
private:
|
||||
explicit PurgeableVMObject(size_t);
|
||||
explicit PurgeableVMObject(const PurgeableVMObject&);
|
||||
|
||||
PurgeableVMObject& operator=(const PurgeableVMObject&) = delete;
|
||||
PurgeableVMObject& operator=(PurgeableVMObject&&) = delete;
|
||||
PurgeableVMObject(PurgeableVMObject&&) = delete;
|
||||
|
||||
virtual bool is_purgeable() const override { return true; }
|
||||
|
||||
bool m_was_purged { false };
|
||||
bool m_volatile { false };
|
||||
};
|
|
@ -299,10 +299,11 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
|
|||
ASSERT_INTERRUPTS_DISABLED();
|
||||
ASSERT(vmobject().is_anonymous());
|
||||
|
||||
auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region];
|
||||
sti();
|
||||
LOCKER(vmobject().m_paging_lock);
|
||||
cli();
|
||||
|
||||
// NOTE: We don't need to acquire the VMObject's lock.
|
||||
// This function is already exclusive due to interrupts being blocked.
|
||||
auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + page_index_in_region];
|
||||
|
||||
if (!vmobject_physical_page_entry.is_null()) {
|
||||
#ifdef PAGE_FAULT_DEBUG
|
||||
|
|
|
@ -22,6 +22,7 @@ public:
|
|||
virtual NonnullRefPtr<VMObject> clone() = 0;
|
||||
|
||||
virtual bool is_anonymous() const { return false; }
|
||||
virtual bool is_purgeable() const { return false; }
|
||||
virtual bool is_inode() const { return false; }
|
||||
|
||||
size_t page_count() const { return m_physical_pages.size(); }
|
||||
|
@ -42,11 +43,10 @@ protected:
|
|||
void for_each_region(Callback);
|
||||
|
||||
FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
|
||||
Lock m_paging_lock { "VMObject" };
|
||||
|
||||
private:
|
||||
VMObject& operator=(const VMObject&) = delete;
|
||||
VMObject& operator=(VMObject&&) = delete;
|
||||
VMObject(VMObject&&) = delete;
|
||||
|
||||
Lock m_paging_lock { "VMObject" };
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue