1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 04:38:11 +00:00

Kernel: Remove old SlabAllocator :^)

This is no longer useful since kmalloc() does automatic slab allocation
without any of the limitations of the old SlabAllocator. :^)
This commit is contained in:
Andreas Kling 2021-12-26 18:50:03 +01:00
parent 66d35f2936
commit 3399b6c57f
9 changed files with 0 additions and 245 deletions

View file

@ -368,8 +368,6 @@ void page_fault_handler(TrapFrame* trap)
constexpr FlatPtr free_scrub_pattern = explode_byte(FREE_SCRUB_BYTE);
constexpr FlatPtr kmalloc_scrub_pattern = explode_byte(KMALLOC_SCRUB_BYTE);
constexpr FlatPtr kfree_scrub_pattern = explode_byte(KFREE_SCRUB_BYTE);
constexpr FlatPtr slab_alloc_scrub_pattern = explode_byte(SLAB_ALLOC_SCRUB_BYTE);
constexpr FlatPtr slab_dealloc_scrub_pattern = explode_byte(SLAB_DEALLOC_SCRUB_BYTE);
if ((fault_address & 0xffff0000) == (malloc_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be uninitialized malloc() memory", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (free_scrub_pattern & 0xffff0000)) {
@ -378,10 +376,6 @@ void page_fault_handler(TrapFrame* trap)
dbgln("Note: Address {} looks like it may be uninitialized kmalloc() memory", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (kfree_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be recently kfree()'d memory", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (slab_alloc_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be uninitialized slab_alloc() memory", VirtualAddress(fault_address));
} else if ((fault_address & 0xffff0000) == (slab_dealloc_scrub_pattern & 0xffff0000)) {
dbgln("Note: Address {} looks like it may be recently slab_dealloc()'d memory", VirtualAddress(fault_address));
} else if (fault_address < 4096) {
dbgln("Note: Address {} looks like a possible nullptr dereference", VirtualAddress(fault_address));
} else if constexpr (SANITIZE_PTRS) {

View file

@ -14,7 +14,6 @@ elseif("${SERENITY_ARCH}" STREQUAL "x86_64")
endif()
set(KERNEL_HEAP_SOURCES
Heap/SlabAllocator.cpp
Heap/kmalloc.cpp
)
@ -446,7 +445,6 @@ if (ENABLE_KERNEL_COVERAGE_COLLECTION)
../Kernel/Devices/KCOVInstance.cpp
../Kernel/FileSystem/File.cpp
../Kernel/FileSystem/OpenFileDescription.cpp
../Kernel/Heap/SlabAllocator.cpp
../Kernel/init.cpp
../Kernel/SanCov.cpp
# GCC assumes that the caller saves registers for functions according

View file

@ -12,7 +12,6 @@
#include <AK/RefPtr.h>
#include <AK/String.h>
#include <Kernel/Forward.h>
#include <Kernel/Heap/SlabAllocator.h>
#include <Kernel/KString.h>
namespace Kernel {

View file

@ -412,14 +412,6 @@ private:
json.add("super_physical_available", system_memory.super_physical_pages - system_memory.super_physical_pages_used);
json.add("kmalloc_call_count", stats.kmalloc_call_count);
json.add("kfree_call_count", stats.kfree_call_count);
TRY(slab_alloc_stats([&json](size_t slab_size, size_t num_allocated, size_t num_free) -> ErrorOr<void> {
auto prefix = TRY(KString::formatted("slab_{}", slab_size));
auto formatted_num_allocated = TRY(KString::formatted("{}_num_allocated", prefix));
auto formatted_num_free = TRY(KString::formatted("{}_num_free", prefix));
json.add(formatted_num_allocated->view(), num_allocated);
json.add(formatted_num_free->view(), num_free);
return {};
}));
json.finish();
return {};
}

View file

@ -1,180 +0,0 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Assertions.h>
#include <AK/Memory.h>
#include <Kernel/Heap/SlabAllocator.h>
#include <Kernel/Heap/kmalloc.h>
#include <Kernel/Memory/Region.h>
#include <Kernel/Sections.h>
#define SANITIZE_SLABS
namespace Kernel {
template<size_t templated_slab_size>
class SlabAllocator {
public:
SlabAllocator() = default;
void init(size_t size)
{
m_base = kmalloc_eternal(size);
m_end = (u8*)m_base + size;
FreeSlab* slabs = (FreeSlab*)m_base;
m_slab_count = size / templated_slab_size;
for (size_t i = 1; i < m_slab_count; ++i) {
slabs[i].next = &slabs[i - 1];
}
slabs[0].next = nullptr;
m_freelist = &slabs[m_slab_count - 1];
m_num_allocated = 0;
}
constexpr size_t slab_size() const { return templated_slab_size; }
size_t slab_count() const { return m_slab_count; }
void* alloc()
{
FreeSlab* free_slab;
{
// We want to avoid being swapped out in the middle of this
ScopedCritical critical;
FreeSlab* next_free;
free_slab = m_freelist.load(AK::memory_order_consume);
do {
if (!free_slab)
return kmalloc(slab_size());
// It's possible another processor is doing the same thing at
// the same time, so next_free *can* be a bogus pointer. However,
// in that case compare_exchange_strong would fail and we would
// try again.
next_free = free_slab->next;
} while (!m_freelist.compare_exchange_strong(free_slab, next_free, AK::memory_order_acq_rel));
m_num_allocated++;
}
#ifdef SANITIZE_SLABS
memset(free_slab, SLAB_ALLOC_SCRUB_BYTE, slab_size());
#endif
return free_slab;
}
void dealloc(void* ptr)
{
VERIFY(ptr);
if (ptr < m_base || ptr >= m_end) {
kfree_sized(ptr, slab_size());
return;
}
FreeSlab* free_slab = (FreeSlab*)ptr;
#ifdef SANITIZE_SLABS
if (slab_size() > sizeof(FreeSlab*))
memset(free_slab->padding, SLAB_DEALLOC_SCRUB_BYTE, sizeof(FreeSlab::padding));
#endif
// We want to avoid being swapped out in the middle of this
ScopedCritical critical;
FreeSlab* next_free = m_freelist.load(AK::memory_order_consume);
do {
free_slab->next = next_free;
} while (!m_freelist.compare_exchange_strong(next_free, free_slab, AK::memory_order_acq_rel));
m_num_allocated--;
}
size_t num_allocated() const { return m_num_allocated; }
size_t num_free() const { return m_slab_count - m_num_allocated; }
private:
struct FreeSlab {
FreeSlab* next;
char padding[templated_slab_size - sizeof(FreeSlab*)];
};
Atomic<FreeSlab*> m_freelist { nullptr };
Atomic<size_t, AK::MemoryOrder::memory_order_relaxed> m_num_allocated { 0 };
size_t m_slab_count { 0 };
void* m_base { nullptr };
void* m_end { nullptr };
static_assert(AssertSize<FreeSlab, templated_slab_size>());
};
static SlabAllocator<16> s_slab_allocator_16;
static SlabAllocator<32> s_slab_allocator_32;
static SlabAllocator<64> s_slab_allocator_64;
static SlabAllocator<128> s_slab_allocator_128;
static SlabAllocator<256> s_slab_allocator_256;
#if ARCH(I386)
static_assert(sizeof(Memory::Region) <= s_slab_allocator_128.slab_size());
#endif
template<typename Callback>
ErrorOr<void> for_each_allocator(Callback callback)
{
TRY(callback(s_slab_allocator_16));
TRY(callback(s_slab_allocator_32));
TRY(callback(s_slab_allocator_64));
TRY(callback(s_slab_allocator_128));
TRY(callback(s_slab_allocator_256));
return {};
}
UNMAP_AFTER_INIT void slab_alloc_init()
{
s_slab_allocator_16.init(128 * KiB);
s_slab_allocator_32.init(128 * KiB);
s_slab_allocator_64.init(512 * KiB);
s_slab_allocator_128.init(512 * KiB);
s_slab_allocator_256.init(128 * KiB);
}
void* slab_alloc(size_t slab_size)
{
if (slab_size <= 16)
return s_slab_allocator_16.alloc();
if (slab_size <= 32)
return s_slab_allocator_32.alloc();
if (slab_size <= 64)
return s_slab_allocator_64.alloc();
if (slab_size <= 128)
return s_slab_allocator_128.alloc();
if (slab_size <= 256)
return s_slab_allocator_256.alloc();
VERIFY_NOT_REACHED();
}
void slab_dealloc(void* ptr, size_t slab_size)
{
if (slab_size <= 16)
return s_slab_allocator_16.dealloc(ptr);
if (slab_size <= 32)
return s_slab_allocator_32.dealloc(ptr);
if (slab_size <= 64)
return s_slab_allocator_64.dealloc(ptr);
if (slab_size <= 128)
return s_slab_allocator_128.dealloc(ptr);
if (slab_size <= 256)
return s_slab_allocator_256.dealloc(ptr);
VERIFY_NOT_REACHED();
}
ErrorOr<void> slab_alloc_stats(Function<ErrorOr<void>(size_t slab_size, size_t allocated, size_t free)> callback)
{
TRY(for_each_allocator([&](auto& allocator) -> ErrorOr<void> {
auto num_allocated = allocator.num_allocated();
auto num_free = allocator.slab_count() - num_allocated;
TRY(callback(allocator.slab_size(), num_allocated, num_free));
return {};
}));
return {};
}
}

View file

@ -1,44 +0,0 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Error.h>
#include <AK/Function.h>
#include <AK/Types.h>
namespace Kernel {
#define SLAB_ALLOC_SCRUB_BYTE 0xab
#define SLAB_DEALLOC_SCRUB_BYTE 0xbc
void* slab_alloc(size_t slab_size);
void slab_dealloc(void*, size_t slab_size);
void slab_alloc_init();
ErrorOr<void> slab_alloc_stats(Function<ErrorOr<void>(size_t slab_size, size_t allocated, size_t free)>);
#define MAKE_SLAB_ALLOCATED(type) \
public: \
[[nodiscard]] void* operator new(size_t) \
{ \
void* ptr = slab_alloc(sizeof(type)); \
VERIFY(ptr); \
return ptr; \
} \
[[nodiscard]] void* operator new(size_t, const std::nothrow_t&) noexcept \
{ \
return slab_alloc(sizeof(type)); \
} \
void operator delete(void* ptr) noexcept \
{ \
if (!ptr) \
return; \
slab_dealloc(ptr, sizeof(type)); \
} \
\
private:
}

View file

@ -10,7 +10,6 @@
#include <AK/IntrusiveList.h>
#include <AK/Weakable.h>
#include <Kernel/Forward.h>
#include <Kernel/Heap/SlabAllocator.h>
#include <Kernel/KString.h>
#include <Kernel/Memory/PageFaultResponse.h>
#include <Kernel/Memory/VirtualRangeAllocator.h>

View file

@ -9,7 +9,6 @@
#include <AK/IntrusiveList.h>
#include <Kernel/Forward.h>
#include <Kernel/Heap/SlabAllocator.h>
#include <Kernel/Locking/SpinlockProtected.h>
namespace Kernel {

View file

@ -33,7 +33,6 @@
#include <Kernel/Firmware/ACPI/Parser.h>
#include <Kernel/Firmware/SysFSFirmware.h>
#include <Kernel/Graphics/GraphicsManagement.h>
#include <Kernel/Heap/SlabAllocator.h>
#include <Kernel/Heap/kmalloc.h>
#include <Kernel/Interrupts/APIC.h>
#include <Kernel/Interrupts/InterruptManagement.h>
@ -179,7 +178,6 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init(BootInfo const& boot_info)
for (ctor_func_t* ctor = start_heap_ctors; ctor < end_heap_ctors; ctor++)
(*ctor)();
kmalloc_init();
slab_alloc_init();
load_kernel_symbol_table();