mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 04:17:35 +00:00
Kernel: Scrub kmalloc slabs when allocated and deallocated
This matches the behavior of the generic subheaps (and the old slab allocator implementation.)
This commit is contained in:
parent
3399b6c57f
commit
63e1b904a4
1 changed files with 4 additions and 0 deletions
|
@ -119,11 +119,15 @@ public:
|
|||
auto* ptr = block->allocate();
|
||||
if (block->is_full())
|
||||
m_full_blocks.append(*block);
|
||||
|
||||
memset(ptr, KMALLOC_SCRUB_BYTE, m_slab_size);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void deallocate(void* ptr)
|
||||
{
|
||||
memset(ptr, KFREE_SCRUB_BYTE, m_slab_size);
|
||||
|
||||
auto* block = (KmallocSlabBlock*)((FlatPtr)ptr & KmallocSlabBlock::block_mask);
|
||||
bool block_was_full = block->is_full();
|
||||
block->deallocate(ptr);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue