mirror of
				https://github.com/RGBCube/serenity
				synced 2025-10-31 15:12:45 +00:00 
			
		
		
		
	Kernel: Add a simple slab allocator for small allocations
This is a freelist allocator with static size classes that works as a complement to the generic kmalloc(). It's a lot faster than kmalloc() since allocation just means popping from the freelist. It's also significantly more compact when there are a lot of objects smaller than the minimum kmalloc chunk size (32 bytes.) This patch enables it for the Region and PhysicalPage classes. In the PhysicalPage (8 bytes) case, it's a huge improvement since we no longer waste 75% of the storage allocated. There are also a number of ways this can be improved, so let's keep working on it going forward.
This commit is contained in:
		
							parent
							
								
									1c692e87a6
								
							
						
					
					
						commit
						5d491fa1cd
					
				
					 7 changed files with 150 additions and 2 deletions
				
			
		|  | @ -644,6 +644,11 @@ Optional<KBuffer> procfs$memstat(InodeIdentifier) | |||
|     json.add("super_physical_available", MM.super_physical_pages()); | ||||
|     json.add("kmalloc_call_count", g_kmalloc_call_count); | ||||
|     json.add("kfree_call_count", g_kfree_call_count); | ||||
|     slab_alloc_stats([&json](size_t slab_size, size_t num_allocated, size_t num_free) { | ||||
|         auto prefix = String::format("slab_%zu", slab_size); | ||||
|         json.add(String::format("%s_num_allocated", prefix.characters()), num_allocated); | ||||
|         json.add(String::format("%s_num_free", prefix.characters()), num_free); | ||||
|     }); | ||||
|     json.finish(); | ||||
|     return builder.build(); | ||||
| } | ||||
|  |  | |||
							
								
								
									
										118
									
								
								Kernel/Heap/SlabAllocator.cpp
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										118
									
								
								Kernel/Heap/SlabAllocator.cpp
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,118 @@ | |||
| #include <AK/Assertions.h> | ||||
| #include <Kernel/Heap/SlabAllocator.h> | ||||
| #include <Kernel/Heap/kmalloc.h> | ||||
| #include <Kernel/VM/Region.h> | ||||
| 
 | ||||
| template<size_t templated_slab_size> | ||||
| class SlabAllocator { | ||||
| public: | ||||
|     SlabAllocator() {} | ||||
| 
 | ||||
|     void init(size_t size) | ||||
|     { | ||||
|         void* base = kmalloc_eternal(size); | ||||
|         FreeSlab* slabs = (FreeSlab*)base; | ||||
|         size_t slab_count = size / templated_slab_size; | ||||
|         for (size_t i = 1; i < slab_count; ++i) { | ||||
|             slabs[i].next = &slabs[i - 1]; | ||||
|         } | ||||
|         slabs[0].next = nullptr; | ||||
|         m_freelist = &slabs[slab_count - 1]; | ||||
|         m_num_allocated = 0; | ||||
|         m_num_free = slab_count; | ||||
|     } | ||||
| 
 | ||||
|     constexpr size_t slab_size() const { return templated_slab_size; } | ||||
| 
 | ||||
|     void* alloc() | ||||
|     { | ||||
|         InterruptDisabler disabler; | ||||
|         ASSERT(m_freelist); | ||||
|         void* ptr = m_freelist; | ||||
|         m_freelist = m_freelist->next; | ||||
|         ++m_num_allocated; | ||||
|         --m_num_free; | ||||
|         return ptr; | ||||
|     } | ||||
| 
 | ||||
|     void dealloc(void* ptr) | ||||
|     { | ||||
|         InterruptDisabler disabler; | ||||
|         ASSERT(ptr); | ||||
|         ((FreeSlab*)ptr)->next = m_freelist; | ||||
|         m_freelist = (FreeSlab*)ptr; | ||||
|         ++m_num_allocated; | ||||
|         --m_num_free; | ||||
|     } | ||||
| 
 | ||||
|     size_t num_allocated() const { return m_num_allocated; } | ||||
|     size_t num_free() const { return m_num_free; } | ||||
| 
 | ||||
| private: | ||||
|     struct FreeSlab { | ||||
|         FreeSlab* next { nullptr }; | ||||
|         char padding[templated_slab_size - sizeof(FreeSlab*)]; | ||||
|     }; | ||||
| 
 | ||||
|     FreeSlab* m_freelist { nullptr }; | ||||
|     size_t m_num_allocated { 0 }; | ||||
|     size_t m_num_free { 0 }; | ||||
| 
 | ||||
|     static_assert(sizeof(FreeSlab) == templated_slab_size); | ||||
| }; | ||||
| 
 | ||||
| static SlabAllocator<8> s_slab_allocator_8; | ||||
| static SlabAllocator<16> s_slab_allocator_16; | ||||
| static SlabAllocator<32> s_slab_allocator_32; | ||||
| static SlabAllocator<52> s_slab_allocator_52; | ||||
| 
 | ||||
| static_assert(sizeof(Region) <= s_slab_allocator_52.slab_size()); | ||||
| 
 | ||||
| template<typename Callback> | ||||
| void for_each_allocator(Callback callback) | ||||
| { | ||||
|     callback(s_slab_allocator_8); | ||||
|     callback(s_slab_allocator_16); | ||||
|     callback(s_slab_allocator_32); | ||||
|     callback(s_slab_allocator_52); | ||||
| } | ||||
| 
 | ||||
| void slab_alloc_init() | ||||
| { | ||||
|     for_each_allocator([&](auto& allocator) { | ||||
|         allocator.init(128 * KB); | ||||
|     }); | ||||
| } | ||||
| 
 | ||||
| void* slab_alloc(size_t slab_size) | ||||
| { | ||||
|     if (slab_size <= 8) | ||||
|         return s_slab_allocator_8.alloc(); | ||||
|     if (slab_size <= 16) | ||||
|         return s_slab_allocator_16.alloc(); | ||||
|     if (slab_size <= 32) | ||||
|         return s_slab_allocator_32.alloc(); | ||||
|     if (slab_size <= 52) | ||||
|         return s_slab_allocator_52.alloc(); | ||||
|     ASSERT_NOT_REACHED(); | ||||
| } | ||||
| 
 | ||||
| void slab_dealloc(void* ptr, size_t slab_size) | ||||
| { | ||||
|     if (slab_size <= 8) | ||||
|         return s_slab_allocator_8.dealloc(ptr); | ||||
|     if (slab_size <= 16) | ||||
|         return s_slab_allocator_16.dealloc(ptr); | ||||
|     if (slab_size <= 32) | ||||
|         return s_slab_allocator_32.dealloc(ptr); | ||||
|     if (slab_size <= 52) | ||||
|         return s_slab_allocator_52.dealloc(ptr); | ||||
|     ASSERT_NOT_REACHED(); | ||||
| } | ||||
| 
 | ||||
| void slab_alloc_stats(Function<void(size_t slab_size, size_t allocated, size_t free)> callback) | ||||
| { | ||||
|     for_each_allocator([&](auto& allocator) { | ||||
|         callback(allocator.slab_size(), allocator.num_allocated(), allocator.num_free()); | ||||
|     }); | ||||
| } | ||||
							
								
								
									
										18
									
								
								Kernel/Heap/SlabAllocator.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								Kernel/Heap/SlabAllocator.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,18 @@ | |||
| #pragma once | ||||
| 
 | ||||
| #include <AK/Function.h> | ||||
| #include <AK/Types.h> | ||||
| 
 | ||||
| class JsonObjectSerializer; | ||||
| 
 | ||||
| void* slab_alloc(size_t slab_size); | ||||
| void slab_dealloc(void*, size_t slab_size); | ||||
| void slab_alloc_init(); | ||||
| void slab_alloc_stats(Function<void(size_t slab_size, size_t allocated, size_t free)>); | ||||
| 
 | ||||
| #define MAKE_SLAB_ALLOCATED(type)                                        \ | ||||
| public:                                                                  \ | ||||
|     void* operator new(size_t) { return slab_alloc(sizeof(type)); }      \ | ||||
|     void operator delete(void* ptr) { slab_dealloc(ptr, sizeof(type)); } \ | ||||
|                                                                          \ | ||||
| private: | ||||
|  | @ -3,6 +3,7 @@ include ../Makefile.common | |||
| KERNEL_OBJS = \
 | ||||
|        init.o \
 | ||||
|        Heap/kmalloc.o \
 | ||||
|        Heap/SlabAllocator.o \
 | ||||
|        StdLib.o \
 | ||||
|        Lock.o \
 | ||||
|        Arch/i386/CPU.o \
 | ||||
|  |  | |||
|  | @ -2,6 +2,7 @@ | |||
| 
 | ||||
| #include <AK/NonnullRefPtr.h> | ||||
| #include <Kernel/Assertions.h> | ||||
| #include <Kernel/Heap/SlabAllocator.h> | ||||
| #include <Kernel/VM/PhysicalAddress.h> | ||||
| 
 | ||||
| class PhysicalPage { | ||||
|  | @ -9,6 +10,7 @@ class PhysicalPage { | |||
|     friend class PageDirectory; | ||||
|     friend class VMObject; | ||||
| 
 | ||||
|     MAKE_SLAB_ALLOCATED(PhysicalPage) | ||||
| public: | ||||
|     PhysicalAddress paddr() const { return m_paddr; } | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,18 +1,20 @@ | |||
| #pragma once | ||||
| 
 | ||||
| #include <AK/String.h> | ||||
| #include <AK/Bitmap.h> | ||||
| #include <AK/InlineLinkedList.h> | ||||
| #include <AK/String.h> | ||||
| #include <Kernel/Heap/SlabAllocator.h> | ||||
| #include <Kernel/VM/PageDirectory.h> | ||||
| #include <Kernel/VM/RangeAllocator.h> | ||||
| 
 | ||||
| class Inode; | ||||
| class VMObject; | ||||
| 
 | ||||
| class Region : public RefCounted<Region> | ||||
| class Region final : public RefCounted<Region> | ||||
|     , public InlineLinkedListNode<Region> { | ||||
|     friend class MemoryManager; | ||||
| 
 | ||||
|     MAKE_SLAB_ALLOCATED(Region) | ||||
| public: | ||||
|     enum Access { | ||||
|         Read = 1, | ||||
|  |  | |||
|  | @ -29,6 +29,7 @@ | |||
| #include <Kernel/FileSystem/ProcFS.h> | ||||
| #include <Kernel/FileSystem/TmpFS.h> | ||||
| #include <Kernel/FileSystem/VirtualFileSystem.h> | ||||
| #include <Kernel/Heap/SlabAllocator.h> | ||||
| #include <Kernel/Heap/kmalloc.h> | ||||
| #include <Kernel/KParams.h> | ||||
| #include <Kernel/Multiboot.h> | ||||
|  | @ -187,6 +188,7 @@ extern "C" [[noreturn]] void init() | |||
|     sse_init(); | ||||
| 
 | ||||
|     kmalloc_init(); | ||||
|     slab_alloc_init(); | ||||
|     init_ksyms(); | ||||
| 
 | ||||
|     // must come after kmalloc_init because we use AK_MAKE_ETERNAL in KParams
 | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue
	
	 Andreas Kling
						Andreas Kling