From c4acfdc0fbc0a34e0d9a9eff78e5792b2ad45caf Mon Sep 17 00:00:00 2001 From: Gunnar Beutner Date: Sat, 26 Jun 2021 00:57:19 +0200 Subject: [PATCH] Kernel: Add slab allocator for 256 bytes Our types are getting a tiny bit larger for x86_64 so we need another slab allocator to deal with that. --- Kernel/Heap/SlabAllocator.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Kernel/Heap/SlabAllocator.cpp b/Kernel/Heap/SlabAllocator.cpp index c83f3fd72c..7e768eef9e 100644 --- a/Kernel/Heap/SlabAllocator.cpp +++ b/Kernel/Heap/SlabAllocator.cpp @@ -110,6 +110,7 @@ static SlabAllocator<16> s_slab_allocator_16; static SlabAllocator<32> s_slab_allocator_32; static SlabAllocator<64> s_slab_allocator_64; static SlabAllocator<128> s_slab_allocator_128; +static SlabAllocator<256> s_slab_allocator_256; #if ARCH(I386) static_assert(sizeof(Region) <= s_slab_allocator_128.slab_size()); @@ -122,6 +123,7 @@ void for_each_allocator(Callback callback) callback(s_slab_allocator_32); callback(s_slab_allocator_64); callback(s_slab_allocator_128); + callback(s_slab_allocator_256); } UNMAP_AFTER_INIT void slab_alloc_init() @@ -130,6 +132,7 @@ UNMAP_AFTER_INIT void slab_alloc_init() s_slab_allocator_32.init(128 * KiB); s_slab_allocator_64.init(512 * KiB); s_slab_allocator_128.init(512 * KiB); + s_slab_allocator_256.init(128 * KiB); } void* slab_alloc(size_t slab_size) @@ -142,6 +145,8 @@ void* slab_alloc(size_t slab_size) return s_slab_allocator_64.alloc(); if (slab_size <= 128) return s_slab_allocator_128.alloc(); + if (slab_size <= 256) + return s_slab_allocator_256.alloc(); VERIFY_NOT_REACHED(); } @@ -155,6 +160,8 @@ void slab_dealloc(void* ptr, size_t slab_size) return s_slab_allocator_64.dealloc(ptr); if (slab_size <= 128) return s_slab_allocator_128.dealloc(ptr); + if (slab_size <= 256) + return s_slab_allocator_256.dealloc(ptr); VERIFY_NOT_REACHED(); }