1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-28 04:45:09 +00:00

Kernel: Refactor MemoryManager to use a Bitmap rather than a Vector

This significantly reduces the pressure on the kernel heap when
allocating a lot of pages.

Previously at about 250MB allocated, the free page list would outgrow
the kernel's heap. Given that there is no longer a page list, this does
not happen.

The next barrier will be the kernel memory used by the page records for
in-use memory. This kicks in at about 1GB.
This commit is contained in:
Conrad Pankoff 2019-06-11 21:13:02 +10:00 committed by Andreas Kling
parent 1a77dfed23
commit aee9317d86
8 changed files with 278 additions and 51 deletions

View file

@ -387,8 +387,8 @@ ByteBuffer procfs$mm(InodeIdentifier)
vmo->name().characters()); vmo->name().characters());
} }
builder.appendf("VMO count: %u\n", MM.m_vmos.size()); builder.appendf("VMO count: %u\n", MM.m_vmos.size());
builder.appendf("Free physical pages: %u\n", MM.m_free_physical_pages.size()); builder.appendf("Free physical pages: %u\n", MM.user_physical_pages() - MM.user_physical_pages_used());
builder.appendf("Free supervisor physical pages: %u\n", MM.m_free_supervisor_physical_pages.size()); builder.appendf("Free supervisor physical pages: %u\n", MM.super_physical_pages() - MM.super_physical_pages_used());
return builder.to_byte_buffer(); return builder.to_byte_buffer();
} }
@ -544,10 +544,10 @@ ByteBuffer procfs$memstat(InodeIdentifier)
kmalloc_sum_eternal, kmalloc_sum_eternal,
sum_alloc, sum_alloc,
sum_free, sum_free,
MM.user_physical_pages_in_existence() - MM.m_free_physical_pages.size(), MM.user_physical_pages_used(),
MM.m_free_physical_pages.size(), MM.user_physical_pages() - MM.user_physical_pages_used(),
MM.super_physical_pages_in_existence() - MM.m_free_supervisor_physical_pages.size(), MM.super_physical_pages_used(),
MM.m_free_supervisor_physical_pages.size(), MM.super_physical_pages() - MM.super_physical_pages_used(),
g_kmalloc_call_count, g_kmalloc_call_count,
g_kfree_call_count); g_kfree_call_count);
return builder.to_byte_buffer(); return builder.to_byte_buffer();

View file

@ -18,6 +18,7 @@ KERNEL_OBJS = \
VM/VMObject.o \ VM/VMObject.o \
VM/PageDirectory.o \ VM/PageDirectory.o \
VM/PhysicalPage.o \ VM/PhysicalPage.o \
VM/PhysicalRegion.o \
VM/RangeAllocator.o \ VM/RangeAllocator.o \
Console.o \ Console.o \
IRQHandler.o \ IRQHandler.o \

View file

@ -12,8 +12,6 @@
//#define PAGE_FAULT_DEBUG //#define PAGE_FAULT_DEBUG
static MemoryManager* s_the; static MemoryManager* s_the;
unsigned MemoryManager::s_user_physical_pages_in_existence;
unsigned MemoryManager::s_super_physical_pages_in_existence;
MemoryManager& MM MemoryManager& MM
{ {
@ -78,6 +76,14 @@ void MemoryManager::initialize_paging()
// 5 MB -> 0xc0000000 Userspace physical pages (available for allocation!) // 5 MB -> 0xc0000000 Userspace physical pages (available for allocation!)
// 0xc0000000-0xffffffff Kernel-only linear address space // 0xc0000000-0xffffffff Kernel-only linear address space
#ifdef MM_DEBUG
dbgprintf("MM: Quickmap will use %p\n", m_quickmap_addr.get());
#endif
m_quickmap_addr = VirtualAddress((1 * MB) - PAGE_SIZE);
RetainPtr<PhysicalRegion> region = nullptr;
bool region_is_super = false;
for (auto* mmap = (multiboot_memory_map_t*)multiboot_info_ptr->mmap_addr; (unsigned long)mmap < multiboot_info_ptr->mmap_addr + multiboot_info_ptr->mmap_length; mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) { for (auto* mmap = (multiboot_memory_map_t*)multiboot_info_ptr->mmap_addr; (unsigned long)mmap < multiboot_info_ptr->mmap_addr + multiboot_info_ptr->mmap_length; mmap = (multiboot_memory_map_t*)((unsigned long)mmap + mmap->size + sizeof(mmap->size))) {
kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n", kprintf("MM: Multiboot mmap: base_addr = 0x%x%08x, length = 0x%x%08x, type = 0x%x\n",
(dword)(mmap->addr >> 32), (dword)(mmap->addr >> 32),
@ -88,26 +94,48 @@ void MemoryManager::initialize_paging()
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE) if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
continue; continue;
// FIXME: Maybe make use of stuff below the 1MB mark? // FIXME: Maybe make use of stuff below the 1MB mark?
if (mmap->addr < (1 * MB)) if (mmap->addr < (1 * MB))
continue; continue;
for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) { #ifdef MM_DEBUG
if (page_base < (4 * MB)) { kprintf("MM: considering memory at %p - %p\n",
// Skip over pages managed by kmalloc. (dword)mmap->addr, (dword)(mmap->addr + mmap->len));
continue; #endif
}
if (page_base < (5 * MB)) for (size_t page_base = mmap->addr; page_base < (mmap->addr + mmap->len); page_base += PAGE_SIZE) {
m_free_supervisor_physical_pages.append(PhysicalPage::create_eternal(PhysicalAddress(page_base), true)); auto addr = PhysicalAddress(page_base);
else
m_free_physical_pages.append(PhysicalPage::create_eternal(PhysicalAddress(page_base), false)); if (page_base < 4 * MB) {
// nothing
} else if (page_base >= 4 * MB && page_base < 5 * MB) {
if (region.is_null() || !region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
m_super_physical_regions.append(PhysicalRegion::create(addr, addr));
region = m_super_physical_regions.last();
region_is_super = true;
} else {
region->expand(region->lower(), addr);
}
} else {
if (region.is_null() || region_is_super || region->upper().offset(PAGE_SIZE) != addr) {
m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
region = m_user_physical_regions.last();
region_is_super = false;
} else {
region->expand(region->lower(), addr);
}
}
} }
} }
m_quickmap_addr = VirtualAddress((1 * MB) - PAGE_SIZE); for (auto& region : m_super_physical_regions)
m_super_physical_pages += region->finalize_capacity();
for (auto& region : m_user_physical_regions)
m_user_physical_pages += region->finalize_capacity();
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbgprintf("MM: Quickmap will use P%x\n", m_quickmap_addr.get());
dbgprintf("MM: Installing page directory\n"); dbgprintf("MM: Installing page directory\n");
#endif #endif
@ -282,7 +310,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
remap_region_page(region, page_index_in_region, true); remap_region_page(region, page_index_in_region, true);
return true; return true;
} }
auto physical_page = allocate_physical_page(ShouldZeroFill::Yes); auto physical_page = allocate_user_physical_page(ShouldZeroFill::Yes);
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbgprintf(" >> ZERO P%x\n", physical_page->paddr().get()); dbgprintf(" >> ZERO P%x\n", physical_page->paddr().get());
#endif #endif
@ -309,7 +337,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
dbgprintf(" >> It's a COW page and it's time to COW!\n"); dbgprintf(" >> It's a COW page and it's time to COW!\n");
#endif #endif
auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]); auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]);
auto physical_page = allocate_physical_page(ShouldZeroFill::No); auto physical_page = allocate_user_physical_page(ShouldZeroFill::No);
byte* dest_ptr = quickmap_page(*physical_page); byte* dest_ptr = quickmap_page(*physical_page);
const byte* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr(); const byte* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
@ -360,7 +388,7 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
memset(page_buffer + nread, 0, PAGE_SIZE - nread); memset(page_buffer + nread, 0, PAGE_SIZE - nread);
} }
cli(); cli();
vmo_page = allocate_physical_page(ShouldZeroFill::No); vmo_page = allocate_user_physical_page(ShouldZeroFill::No);
if (vmo_page.is_null()) { if (vmo_page.is_null()) {
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n"); kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
return false; return false;
@ -430,40 +458,114 @@ RetainPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String&& na
return region; return region;
} }
RetainPtr<PhysicalPage> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill) void MemoryManager::deallocate_user_physical_page(PhysicalPage& page)
{
for (auto& region : m_user_physical_regions) {
if (!region->contains(page)) {
kprintf(
"MM: deallocate_user_physical_page: %p not in %p -> %p\n",
page.paddr(), region->lower().get(), region->upper().get());
continue;
}
region->return_page(page);
m_user_physical_pages_used--;
return;
}
kprintf("MM: deallocate_user_physical_page couldn't figure out region for user page @ %p\n", page.paddr());
ASSERT_NOT_REACHED();
}
RetainPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill)
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
if (1 > m_free_physical_pages.size()) {
kprintf("FUCK! No physical pages available.\n"); RetainPtr<PhysicalPage> page = nullptr;
for (auto& region : m_user_physical_regions) {
page = region->take_free_page(false);
if (page.is_null())
continue;
}
if (!page) {
if (m_user_physical_regions.is_empty()) {
kprintf("MM: no user physical regions available (?)\n");
}
kprintf("MM: no user physical pages available\n");
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
return {}; return {};
} }
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbgprintf("MM: allocate_physical_page vending P%x (%u remaining)\n", m_free_physical_pages.last()->paddr().get(), m_free_physical_pages.size()); dbgprintf("MM: allocate_user_physical_page vending P%p\n", page->paddr().get());
#endif #endif
auto physical_page = m_free_physical_pages.take_last();
if (should_zero_fill == ShouldZeroFill::Yes) { if (should_zero_fill == ShouldZeroFill::Yes) {
auto* ptr = (dword*)quickmap_page(*physical_page); auto* ptr = (dword*)quickmap_page(*page);
fast_dword_fill(ptr, 0, PAGE_SIZE / sizeof(dword)); fast_dword_fill(ptr, 0, PAGE_SIZE / sizeof(dword));
unquickmap_page(); unquickmap_page();
} }
return physical_page;
m_user_physical_pages_used++;
return page;
}
void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage& page)
{
for (auto& region : m_super_physical_regions) {
if (!region->contains(page)) {
kprintf(
"MM: deallocate_supervisor_physical_page: %p not in %p -> %p\n",
page.paddr(), region->lower().get(), region->upper().get());
continue;
}
region->return_page(page);
m_super_physical_pages_used--;
return;
}
kprintf("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ %p\n", page.paddr());
ASSERT_NOT_REACHED();
} }
RetainPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page() RetainPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
{ {
InterruptDisabler disabler; InterruptDisabler disabler;
if (1 > m_free_supervisor_physical_pages.size()) {
kprintf("FUCK! No physical pages available.\n"); RetainPtr<PhysicalPage> page = nullptr;
for (auto& region : m_super_physical_regions) {
page = region->take_free_page(true);
if (page.is_null())
continue;
}
if (!page) {
if (m_super_physical_regions.is_empty()) {
kprintf("MM: no super physical regions available (?)\n");
}
kprintf("MM: no super physical pages available\n");
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
return {}; return {};
} }
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbgprintf("MM: allocate_supervisor_physical_page vending P%x (%u remaining)\n", m_free_supervisor_physical_pages.last()->paddr().get(), m_free_supervisor_physical_pages.size()); dbgprintf("MM: allocate_supervisor_physical_page vending P%p\n", page->paddr().get());
#endif #endif
auto physical_page = m_free_supervisor_physical_pages.take_last();
fast_dword_fill((dword*)physical_page->paddr().as_ptr(), 0, PAGE_SIZE / sizeof(dword)); fast_dword_fill((dword*)page->paddr().as_ptr(), 0, PAGE_SIZE / sizeof(dword));
return physical_page;
m_super_physical_pages_used++;
return page;
} }
void MemoryManager::enter_process_paging_scope(Process& process) void MemoryManager::enter_process_paging_scope(Process& process)

View file

@ -13,6 +13,7 @@
#include <Kernel/Arch/i386/CPU.h> #include <Kernel/Arch/i386/CPU.h>
#include <Kernel/FileSystem/InodeIdentifier.h> #include <Kernel/FileSystem/InodeIdentifier.h>
#include <Kernel/VM/PhysicalPage.h> #include <Kernel/VM/PhysicalPage.h>
#include <Kernel/VM/PhysicalRegion.h>
#include <Kernel/VM/Region.h> #include <Kernel/VM/Region.h>
#include <Kernel/VM/VMObject.h> #include <Kernel/VM/VMObject.h>
#include <Kernel/VirtualAddress.h> #include <Kernel/VirtualAddress.h>
@ -32,6 +33,7 @@ class MemoryManager {
AK_MAKE_ETERNAL AK_MAKE_ETERNAL
friend class PageDirectory; friend class PageDirectory;
friend class PhysicalPage; friend class PhysicalPage;
friend class PhysicalRegion;
friend class Region; friend class Region;
friend class VMObject; friend class VMObject;
friend ByteBuffer procfs$mm(InodeIdentifier); friend ByteBuffer procfs$mm(InodeIdentifier);
@ -59,19 +61,23 @@ public:
Yes Yes
}; };
RetainPtr<PhysicalPage> allocate_physical_page(ShouldZeroFill); RetainPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill);
RetainPtr<PhysicalPage> allocate_supervisor_physical_page(); RetainPtr<PhysicalPage> allocate_supervisor_physical_page();
void deallocate_user_physical_page(PhysicalPage&);
void deallocate_supervisor_physical_page(PhysicalPage&);
void remap_region(PageDirectory&, Region&); void remap_region(PageDirectory&, Region&);
int user_physical_pages_in_existence() const { return s_user_physical_pages_in_existence; }
int super_physical_pages_in_existence() const { return s_super_physical_pages_in_existence; }
void map_for_kernel(VirtualAddress, PhysicalAddress); void map_for_kernel(VirtualAddress, PhysicalAddress);
RetainPtr<Region> allocate_kernel_region(size_t, String&& name); RetainPtr<Region> allocate_kernel_region(size_t, String&& name);
void map_region_at_address(PageDirectory&, Region&, VirtualAddress, bool user_accessible); void map_region_at_address(PageDirectory&, Region&, VirtualAddress, bool user_accessible);
unsigned user_physical_pages() const { return m_user_physical_pages; }
unsigned user_physical_pages_used() const { return m_user_physical_pages_used; }
unsigned super_physical_pages() const { return m_super_physical_pages; }
unsigned super_physical_pages_used() const { return m_super_physical_pages_used; }
private: private:
MemoryManager(); MemoryManager();
~MemoryManager(); ~MemoryManager();
@ -206,9 +212,6 @@ private:
dword* m_pte; dword* m_pte;
}; };
static unsigned s_user_physical_pages_in_existence;
static unsigned s_super_physical_pages_in_existence;
PageTableEntry ensure_pte(PageDirectory&, VirtualAddress); PageTableEntry ensure_pte(PageDirectory&, VirtualAddress);
RetainPtr<PageDirectory> m_kernel_page_directory; RetainPtr<PageDirectory> m_kernel_page_directory;
@ -217,8 +220,13 @@ private:
VirtualAddress m_quickmap_addr; VirtualAddress m_quickmap_addr;
Vector<Retained<PhysicalPage>> m_free_physical_pages; unsigned m_user_physical_pages { 0 };
Vector<Retained<PhysicalPage>> m_free_supervisor_physical_pages; unsigned m_user_physical_pages_used { 0 };
unsigned m_super_physical_pages { 0 };
unsigned m_super_physical_pages_used { 0 };
Vector<Retained<PhysicalRegion>> m_user_physical_regions {};
Vector<Retained<PhysicalRegion>> m_super_physical_regions {};
HashTable<VMObject*> m_vmos; HashTable<VMObject*> m_vmos;
HashTable<Region*> m_user_regions; HashTable<Region*> m_user_regions;

View file

@ -21,21 +21,21 @@ PhysicalPage::PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_retu
, m_supervisor(supervisor) , m_supervisor(supervisor)
, m_paddr(paddr) , m_paddr(paddr)
{ {
if (supervisor)
++MemoryManager::s_super_physical_pages_in_existence;
else
++MemoryManager::s_user_physical_pages_in_existence;
} }
void PhysicalPage::return_to_freelist() void PhysicalPage::return_to_freelist()
{ {
ASSERT((paddr().get() & ~PAGE_MASK) == 0); ASSERT((paddr().get() & ~PAGE_MASK) == 0);
InterruptDisabler disabler; InterruptDisabler disabler;
m_retain_count = 1; m_retain_count = 1;
if (m_supervisor) if (m_supervisor)
MM.m_free_supervisor_physical_pages.append(adopt(*this)); MM.deallocate_supervisor_physical_page(*this);
else else
MM.m_free_physical_pages.append(adopt(*this)); MM.deallocate_user_physical_page(*this);
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbgprintf("MM: P%x released to freelist\n", m_paddr.get()); dbgprintf("MM: P%x released to freelist\n", m_paddr.get());
#endif #endif

View file

@ -0,0 +1,77 @@
#include <AK/Bitmap.h>
#include <AK/Retained.h>
#include <AK/RetainPtr.h>
#include <Kernel/Assertions.h>
#include <Kernel/PhysicalAddress.h>
#include <Kernel/VM/PhysicalPage.h>
#include <Kernel/VM/PhysicalRegion.h>
Retained<PhysicalRegion> PhysicalRegion::create(PhysicalAddress lower, PhysicalAddress upper)
{
return adopt(*new PhysicalRegion(lower, upper));
}
PhysicalRegion::PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper)
: m_lower(lower)
, m_upper(upper)
, m_bitmap(Bitmap::create())
{
}
void PhysicalRegion::expand(PhysicalAddress lower, PhysicalAddress upper)
{
ASSERT(!m_pages);
m_lower = lower;
m_upper = upper;
}
unsigned PhysicalRegion::finalize_capacity()
{
ASSERT(!m_pages);
m_pages = (m_upper.get() - m_lower.get()) / PAGE_SIZE;
m_bitmap.grow(m_pages, false);
return size();
}
RetainPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
{
ASSERT(m_pages);
if (m_used == m_pages)
return nullptr;
for (unsigned page = m_last; page < m_pages; page++) {
if (!m_bitmap.get(page)) {
m_bitmap.set(page, true);
m_used++;
m_last = page + 1;
return PhysicalPage::create(m_lower.offset(page * PAGE_SIZE), supervisor);
}
}
ASSERT_NOT_REACHED();
return nullptr;
}
void PhysicalRegion::return_page_at(PhysicalAddress addr)
{
ASSERT(m_pages);
if (m_used == 0) {
ASSERT_NOT_REACHED();
}
int local_offset = addr.get() - m_lower.get();
ASSERT(local_offset >= 0);
ASSERT(local_offset < m_pages * PAGE_SIZE);
auto page = local_offset / PAGE_SIZE;
if (page < m_last) m_last = page;
m_bitmap.set(page, false);
m_used--;
}

View file

@ -0,0 +1,39 @@
#pragma once
#include <AK/Bitmap.h>
#include <AK/Retainable.h>
#include <AK/Retained.h>
#include <Kernel/PhysicalAddress.h>
#include <Kernel/VM/PhysicalPage.h>
class PhysicalRegion : public Retainable<PhysicalRegion> {
AK_MAKE_ETERNAL
public:
static Retained<PhysicalRegion> create(PhysicalAddress lower, PhysicalAddress upper);
~PhysicalRegion() {}
void expand(PhysicalAddress lower, PhysicalAddress upper);
unsigned finalize_capacity();
PhysicalAddress lower() const { return m_lower; }
PhysicalAddress upper() const { return m_upper; }
unsigned size() const { return m_pages; }
unsigned used() const { return m_used; }
unsigned free() const { return m_pages - m_used; }
bool contains(PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
RetainPtr<PhysicalPage> take_free_page(bool supervisor);
void return_page_at(PhysicalAddress addr);
void return_page(PhysicalPage& page) { return_page_at(page.paddr()); }
private:
PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper);
PhysicalAddress m_lower;
PhysicalAddress m_upper;
unsigned m_pages { 0 };
unsigned m_used { 0 };
unsigned m_last { 0 };
Bitmap m_bitmap;
};

View file

@ -103,7 +103,7 @@ int Region::commit()
for (size_t i = first_page_index(); i <= last_page_index(); ++i) { for (size_t i = first_page_index(); i <= last_page_index(); ++i) {
if (!vmo().physical_pages()[i].is_null()) if (!vmo().physical_pages()[i].is_null())
continue; continue;
auto physical_page = MM.allocate_physical_page(MemoryManager::ShouldZeroFill::Yes); auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
if (!physical_page) { if (!physical_page) {
kprintf("MM: commit was unable to allocate a physical page\n"); kprintf("MM: commit was unable to allocate a physical page\n");
return -ENOMEM; return -ENOMEM;