1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-24 18:07:35 +00:00

Kernel: Don't memset() allocated memory twice in kcalloc()

This patch adds a way to ask the allocator to skip its internal
scrubbing memset operation. Before this change, kcalloc() would scrub
twice: once internally in kmalloc() and then again in kcalloc().

The same mechanism already existed in LibC malloc, and this patch
brings it over to the kernel heap allocator as well.

This solves one FIXME in kcalloc(). :^)
This commit is contained in:
Andreas Kling 2022-12-04 19:04:39 +01:00
parent 3f38f61043
commit d8a3e2fc4e
2 changed files with 27 additions and 14 deletions

View file

@ -14,6 +14,11 @@
namespace Kernel { namespace Kernel {
enum class CallerWillInitializeMemory {
No,
Yes,
};
template<size_t CHUNK_SIZE, unsigned HEAP_SCRUB_BYTE_ALLOC = 0, unsigned HEAP_SCRUB_BYTE_FREE = 0> template<size_t CHUNK_SIZE, unsigned HEAP_SCRUB_BYTE_ALLOC = 0, unsigned HEAP_SCRUB_BYTE_FREE = 0>
class Heap { class Heap {
AK_MAKE_NONCOPYABLE(Heap); AK_MAKE_NONCOPYABLE(Heap);
@ -63,7 +68,7 @@ public:
return needed_chunks * CHUNK_SIZE + (needed_chunks + 7) / 8; return needed_chunks * CHUNK_SIZE + (needed_chunks + 7) / 8;
} }
void* allocate(size_t size) void* allocate(size_t size, CallerWillInitializeMemory caller_will_initialize_memory)
{ {
// We need space for the AllocationHeader at the head of the block. // We need space for the AllocationHeader at the head of the block.
size_t real_size = size + sizeof(AllocationHeader); size_t real_size = size + sizeof(AllocationHeader);
@ -92,8 +97,10 @@ public:
m_bitmap.set_range_and_verify_that_all_bits_flip(first_chunk.value(), chunks_needed, true); m_bitmap.set_range_and_verify_that_all_bits_flip(first_chunk.value(), chunks_needed, true);
m_allocated_chunks += chunks_needed; m_allocated_chunks += chunks_needed;
if constexpr (HEAP_SCRUB_BYTE_ALLOC != 0) { if (caller_will_initialize_memory == CallerWillInitializeMemory::No) {
__builtin_memset(ptr, HEAP_SCRUB_BYTE_ALLOC, (chunks_needed * CHUNK_SIZE) - sizeof(AllocationHeader)); if constexpr (HEAP_SCRUB_BYTE_ALLOC != 0) {
__builtin_memset(ptr, HEAP_SCRUB_BYTE_ALLOC, (chunks_needed * CHUNK_SIZE) - sizeof(AllocationHeader));
}
} }
return ptr; return ptr;
} }

View file

@ -123,7 +123,7 @@ public:
size_t slab_size() const { return m_slab_size; } size_t slab_size() const { return m_slab_size; }
void* allocate() void* allocate(CallerWillInitializeMemory caller_will_initialize_memory)
{ {
if (m_usable_blocks.is_empty()) { if (m_usable_blocks.is_empty()) {
// FIXME: This allocation wastes `block_size` bytes due to the implementation of kmalloc_aligned(). // FIXME: This allocation wastes `block_size` bytes due to the implementation of kmalloc_aligned().
@ -141,7 +141,9 @@ public:
if (block->is_full()) if (block->is_full())
m_full_blocks.append(*block); m_full_blocks.append(*block);
memset(ptr, KMALLOC_SCRUB_BYTE, m_slab_size); if (caller_will_initialize_memory == CallerWillInitializeMemory::No) {
memset(ptr, KMALLOC_SCRUB_BYTE, m_slab_size);
}
return ptr; return ptr;
} }
@ -220,17 +222,17 @@ struct KmallocGlobalData {
subheaps.append(*subheap); subheaps.append(*subheap);
} }
void* allocate(size_t size) void* allocate(size_t size, CallerWillInitializeMemory caller_will_initialize_memory)
{ {
VERIFY(!expansion_in_progress); VERIFY(!expansion_in_progress);
for (auto& slabheap : slabheaps) { for (auto& slabheap : slabheaps) {
if (size <= slabheap.slab_size()) if (size <= slabheap.slab_size())
return slabheap.allocate(); return slabheap.allocate(caller_will_initialize_memory);
} }
for (auto& subheap : subheaps) { for (auto& subheap : subheaps) {
if (auto* ptr = subheap.allocator.allocate(size)) if (auto* ptr = subheap.allocator.allocate(size, caller_will_initialize_memory))
return ptr; return ptr;
} }
@ -247,14 +249,14 @@ struct KmallocGlobalData {
} }
} }
if (did_purge) if (did_purge)
return allocate(size); return allocate(size, caller_will_initialize_memory);
} }
if (!try_expand(size)) { if (!try_expand(size)) {
PANIC("OOM when trying to expand kmalloc heap."); PANIC("OOM when trying to expand kmalloc heap.");
} }
return allocate(size); return allocate(size, caller_will_initialize_memory);
} }
void deallocate(void* ptr, size_t size) void deallocate(void* ptr, size_t size)
@ -419,7 +421,7 @@ UNMAP_AFTER_INIT void kmalloc_init()
s_lock.initialize(); s_lock.initialize();
} }
void* kmalloc(size_t size) static void* kmalloc_impl(size_t size, CallerWillInitializeMemory caller_will_initialize_memory)
{ {
// Catch bad callers allocating under spinlock. // Catch bad callers allocating under spinlock.
if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) { if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
@ -434,7 +436,7 @@ void* kmalloc(size_t size)
Kernel::dump_backtrace(); Kernel::dump_backtrace();
} }
void* ptr = g_kmalloc_global->allocate(size); void* ptr = g_kmalloc_global->allocate(size, caller_will_initialize_memory);
Thread* current_thread = Thread::current(); Thread* current_thread = Thread::current();
if (!current_thread) if (!current_thread)
@ -449,13 +451,17 @@ void* kmalloc(size_t size)
return ptr; return ptr;
} }
void* kmalloc(size_t size)
{
return kmalloc_impl(size, CallerWillInitializeMemory::No);
}
void* kcalloc(size_t count, size_t size) void* kcalloc(size_t count, size_t size)
{ {
if (Checked<size_t>::multiplication_would_overflow(count, size)) if (Checked<size_t>::multiplication_would_overflow(count, size))
return nullptr; return nullptr;
size_t new_size = count * size; size_t new_size = count * size;
auto* ptr = kmalloc(new_size); auto* ptr = kmalloc_impl(new_size, CallerWillInitializeMemory::Yes);
// FIXME: Avoid redundantly scrubbing the memory in kmalloc()
if (ptr) if (ptr)
memset(ptr, 0, new_size); memset(ptr, 0, new_size);
return ptr; return ptr;