1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 15:38:10 +00:00

LibC: Lazily initialize malloc chunks

By default malloc manages memory internally in larger blocks. When
one of those blocks is added we initialize a free list by touching
each of the new block's pages, thereby committing all that memory
upfront.

This changes malloc to build the free list on demand which as a
bonus also distributes the latency hit for new blocks more evenly
because the page faults for the zero pages now don't happen all at
once.
This commit is contained in:
Gunnar Beutner 2021-05-05 05:02:36 +02:00 committed by Andreas Kling
parent 3aaffa2c47
commit 3438829431
2 changed files with 7 additions and 9 deletions

View file

@ -244,8 +244,13 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
--block->m_free_chunks;
void* ptr = block->m_freelist;
VERIFY(ptr);
if (ptr) {
block->m_freelist = block->m_freelist->next;
} else {
ptr = block->m_slot + block->m_next_lazy_freelist_index * block->m_size;
block->m_next_lazy_freelist_index++;
}
VERIFY(ptr);
if (block->is_full()) {
g_malloc_stats.number_of_blocks_full++;
dbgln_if(MALLOC_DEBUG, "Block {:p} is now full in size class {}", block, good_size);

View file

@ -57,18 +57,11 @@ struct ChunkedBlock
m_magic = MAGIC_PAGE_HEADER;
m_size = bytes_per_chunk;
m_free_chunks = chunk_capacity();
m_freelist = (FreelistEntry*)chunk(0);
for (size_t i = 0; i < chunk_capacity(); ++i) {
auto* entry = (FreelistEntry*)chunk(i);
if (i != chunk_capacity() - 1)
entry->next = (FreelistEntry*)chunk(i + 1);
else
entry->next = nullptr;
}
}
ChunkedBlock* m_prev { nullptr };
ChunkedBlock* m_next { nullptr };
size_t m_next_lazy_freelist_index { 0 };
FreelistEntry* m_freelist { nullptr };
size_t m_free_chunks { 0 };
[[gnu::aligned(8)]] unsigned char m_slot[0];