1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 04:17:35 +00:00

Kernel: Scrub kmalloc slabs when allocated and deallocated

This matches the behavior of the generic subheaps (and the old slab
allocator implementation.)
This commit is contained in:
Andreas Kling 2021-12-26 18:53:04 +01:00
parent 3399b6c57f
commit 63e1b904a4

View file

@ -119,11 +119,15 @@ public:
auto* ptr = block->allocate();
if (block->is_full())
m_full_blocks.append(*block);
memset(ptr, KMALLOC_SCRUB_BYTE, m_slab_size);
return ptr;
}
void deallocate(void* ptr)
{
memset(ptr, KFREE_SCRUB_BYTE, m_slab_size);
auto* block = (KmallocSlabBlock*)((FlatPtr)ptr & KmallocSlabBlock::block_mask);
bool block_was_full = block->is_full();
block->deallocate(ptr);