1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-15 03:47:35 +00:00

Add slightly better kmalloc_aligned() and kfree_aligned().

Process page directories can now actually be freed. This could definitely
be implemented in a nicer, less wasteful way, but this works for now.

The spawn stress test can now run for a lot longer but eventually dies
due to kmalloc running out of memory.
This commit is contained in:
Andreas Kling 2018-12-26 21:31:46 +01:00
parent 55c722096d
commit f6179ad9f9
5 changed files with 26 additions and 20 deletions

View file

@ -58,6 +58,8 @@ void MemoryManager::release_page_directory(PageDirectory& page_directory)
#ifdef SCRUB_DEALLOCATED_PAGE_TABLES #ifdef SCRUB_DEALLOCATED_PAGE_TABLES
memset(&page_directory, 0xc9, sizeof(PageDirectory)); memset(&page_directory, 0xc9, sizeof(PageDirectory));
#endif #endif
kfree_aligned(&page_directory);
} }
void MemoryManager::initialize_paging() void MemoryManager::initialize_paging()

View file

@ -318,7 +318,7 @@ ByteBuffer procfs$kmalloc()
{ {
auto buffer = ByteBuffer::create_uninitialized(256); auto buffer = ByteBuffer::create_uninitialized(256);
char* ptr = (char*)buffer.pointer(); char* ptr = (char*)buffer.pointer();
ptr += ksprintf(ptr, "eternal: %u\npage-aligned: %u\nallocated: %u\nfree: %u\n", kmalloc_sum_eternal, sum_alloc, sum_free); ptr += ksprintf(ptr, "eternal: %u\nallocated: %u\nfree: %u\n", kmalloc_sum_eternal, sum_alloc, sum_free);
buffer.trim(ptr - (char*)buffer.pointer()); buffer.trim(ptr - (char*)buffer.pointer());
return buffer; return buffer;
} }

View file

@ -40,8 +40,8 @@ static void spawn_stress()
for (unsigned i = 0; i < 10000; ++i) { for (unsigned i = 0; i < 10000; ++i) {
int error; int error;
Process::create_user_process("/bin/id", (uid_t)100, (gid_t)100, (pid_t)0, error, Vector<String>(), Vector<String>(), tty0); Process::create_user_process("/bin/true", (uid_t)100, (gid_t)100, (pid_t)0, error, Vector<String>(), Vector<String>(), tty0);
kprintf("malloc stats: alloc:%u free:%u page_aligned:%u eternal:%u\n", sum_alloc, sum_free, kmalloc_page_aligned, kmalloc_sum_eternal); kprintf("malloc stats: alloc:%u free:%u eternal:%u ", sum_alloc, sum_free, kmalloc_sum_eternal);
kprintf("delta:%u\n", sum_alloc - lastAlloc); kprintf("delta:%u\n", sum_alloc - lastAlloc);
lastAlloc = sum_alloc; lastAlloc = sum_alloc;
sleep(60); sleep(60);

View file

@ -32,20 +32,14 @@ static byte alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
volatile size_t sum_alloc = 0; volatile size_t sum_alloc = 0;
volatile size_t sum_free = POOL_SIZE; volatile size_t sum_free = POOL_SIZE;
volatile size_t kmalloc_sum_eternal = 0; volatile size_t kmalloc_sum_eternal = 0;
volatile size_t kmalloc_sum_page_aligned = 0;
static byte* s_next_eternal_ptr; static byte* s_next_eternal_ptr;
static byte* s_next_page_aligned_ptr;
static byte* s_end_of_eternal_range; static byte* s_end_of_eternal_range;
static byte* s_end_of_page_aligned_range;
bool is_kmalloc_address(void* ptr) bool is_kmalloc_address(void* ptr)
{ {
if (ptr >= (byte*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr) if (ptr >= (byte*)ETERNAL_BASE_PHYSICAL && ptr < s_next_eternal_ptr)
return true; return true;
if (ptr >= (byte*)PAGE_ALIGNED_BASE_PHYSICAL && ptr < s_next_page_aligned_ptr)
return true;
return (dword)ptr >= BASE_PHYS && (dword)ptr <= (BASE_PHYS + POOL_SIZE); return (dword)ptr >= BASE_PHYS && (dword)ptr <= (BASE_PHYS + POOL_SIZE);
} }
@ -55,15 +49,11 @@ void kmalloc_init()
memset( (void *)BASE_PHYS, 0, POOL_SIZE ); memset( (void *)BASE_PHYS, 0, POOL_SIZE );
kmalloc_sum_eternal = 0; kmalloc_sum_eternal = 0;
kmalloc_sum_page_aligned = 0;
sum_alloc = 0; sum_alloc = 0;
sum_free = POOL_SIZE; sum_free = POOL_SIZE;
s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL; s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL;
s_next_page_aligned_ptr = (byte*)PAGE_ALIGNED_BASE_PHYSICAL;
s_end_of_eternal_range = s_next_eternal_ptr + RANGE_SIZE; s_end_of_eternal_range = s_next_eternal_ptr + RANGE_SIZE;
s_end_of_page_aligned_range = s_next_page_aligned_ptr + RANGE_SIZE;
} }
void* kmalloc_eternal(size_t size) void* kmalloc_eternal(size_t size)
@ -75,16 +65,28 @@ void* kmalloc_eternal(size_t size)
return ptr; return ptr;
} }
void* kmalloc_page_aligned(size_t size) void* kmalloc_aligned(size_t size, size_t alignment)
{ {
ASSERT((size % PAGE_SIZE) == 0); void* ptr = kmalloc(size + alignment + sizeof(void*));
void* ptr = s_next_page_aligned_ptr; dword max_addr = (dword)ptr + alignment;
s_next_page_aligned_ptr += size; void* aligned_ptr = (void*)(max_addr - (max_addr % alignment));
ASSERT(s_next_page_aligned_ptr < s_end_of_page_aligned_range);
kmalloc_sum_page_aligned += size; ((void**)aligned_ptr)[-1] = ptr;
return ptr; return aligned_ptr;
} }
void kfree_aligned(void* ptr)
{
kfree(((void**)ptr)[-1]);
}
void* kmalloc_page_aligned(size_t size)
{
void* ptr = kmalloc_aligned(size, PAGE_SIZE);
dword d = (dword)ptr;
ASSERT((d & PAGE_MASK) == d);
return ptr;
}
void* kmalloc(dword size) void* kmalloc(dword size)
{ {

View file

@ -4,7 +4,9 @@ void kmalloc_init();
void *kmalloc(dword size) __attribute__ ((malloc)); void *kmalloc(dword size) __attribute__ ((malloc));
void* kmalloc_eternal(size_t) __attribute__ ((malloc)); void* kmalloc_eternal(size_t) __attribute__ ((malloc));
void* kmalloc_page_aligned(size_t) __attribute__ ((malloc)); void* kmalloc_page_aligned(size_t) __attribute__ ((malloc));
void* kmalloc_aligned(size_t, size_t alignment) __attribute__ ((malloc));
void kfree(void*); void kfree(void*);
void kfree_aligned(void*);
bool is_kmalloc_address(void*); bool is_kmalloc_address(void*);