1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-14 09:24:57 +00:00

LibC: Lazily initialize malloc chunks

By default malloc manages memory internally in larger blocks. When
one of those blocks is added we initialize a free list by touching
each of the new block's pages, thereby committing all that memory
upfront.

This changes malloc to build the free list on demand which as a
bonus also distributes the latency hit for new blocks more evenly
because the page faults for the zero pages now don't happen all at
once.
This commit is contained in:
Gunnar Beutner 2021-05-05 05:02:36 +02:00 committed by Andreas Kling
parent 3aaffa2c47
commit 3438829431
2 changed files with 7 additions and 9 deletions

View file

@ -244,8 +244,13 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
--block->m_free_chunks;
void* ptr = block->m_freelist;
if (ptr) {
block->m_freelist = block->m_freelist->next;
} else {
ptr = block->m_slot + block->m_next_lazy_freelist_index * block->m_size;
block->m_next_lazy_freelist_index++;
}
VERIFY(ptr);
block->m_freelist = block->m_freelist->next;
if (block->is_full()) {
g_malloc_stats.number_of_blocks_full++;
dbgln_if(MALLOC_DEBUG, "Block {:p} is now full in size class {}", block, good_size);