mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 00:07:34 +00:00
Kernel: Use a shared physical page for zero-filled pages until written
This patch adds a globally shared zero-filled PhysicalPage that will be mapped into every slot of every zero-filled AnonymousVMObject until that page is written to, achieving CoW-like zero-filled pages. Initial testing show that this doesn't actually achieve any sharing yet but it seems like a good design regardless, since it may reduce the number of page faults taken by programs. If you look at the refcount of MM.shared_zero_page() it will have quite a high refcount, but that's just because everything maps it everywhere. If you want to see the "real" refcount, you can build with the MAP_SHARED_ZERO_PAGE_LAZILY flag, and we'll defer mapping of the shared zero page until the first NP read fault. I've left this behavior behind a flag for future testing of this code.
This commit is contained in:
parent
a4d857e3c5
commit
c624d3875e
5 changed files with 41 additions and 8 deletions
|
@ -132,6 +132,8 @@ public:
|
|||
|
||||
void dump_kernel_regions();
|
||||
|
||||
PhysicalPage& shared_zero_page() { return *m_shared_zero_page; }
|
||||
|
||||
private:
|
||||
MemoryManager();
|
||||
~MemoryManager();
|
||||
|
@ -172,6 +174,8 @@ private:
|
|||
RefPtr<PageDirectory> m_kernel_page_directory;
|
||||
RefPtr<PhysicalPage> m_low_page_table;
|
||||
|
||||
RefPtr<PhysicalPage> m_shared_zero_page;
|
||||
|
||||
unsigned m_user_physical_pages { 0 };
|
||||
unsigned m_user_physical_pages_used { 0 };
|
||||
unsigned m_super_physical_pages { 0 };
|
||||
|
@ -223,3 +227,8 @@ inline bool is_user_range(VirtualAddress vaddr, size_t size)
|
|||
return false;
|
||||
return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
|
||||
}
|
||||
|
||||
inline bool PhysicalPage::is_shared_zero_page() const
|
||||
{
|
||||
return this == &MM.shared_zero_page();
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue