1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 13:38:11 +00:00

Kernel: Make SlabAllocator fall back to kmalloc() when slabs run out

This is obviously not ideal, and it would be better to teach it how to
allocate more pages, etc. But since the physical page allocator itself
currently uses SlabAllocator, it's a little bit tricky :^)
This commit is contained in:
Andreas Kling 2019-10-10 11:56:57 +02:00
parent ebacef36ee
commit a6e4c504e2

View file

@ -10,8 +10,9 @@ public:
void init(size_t size)
{
void* base = kmalloc_eternal(size);
FreeSlab* slabs = (FreeSlab*)base;
m_base = kmalloc_eternal(size);
m_end = (u8*)m_base + size;
FreeSlab* slabs = (FreeSlab*)m_base;
size_t slab_count = size / templated_slab_size;
for (size_t i = 1; i < slab_count; ++i) {
slabs[i].next = &slabs[i - 1];
@ -27,6 +28,8 @@ public:
void* alloc()
{
InterruptDisabler disabler;
if (!m_freelist)
return kmalloc(slab_size());
ASSERT(m_freelist);
void* ptr = m_freelist;
m_freelist = m_freelist->next;
@ -39,6 +42,10 @@ public:
{
InterruptDisabler disabler;
ASSERT(ptr);
if (ptr < m_base || ptr >= m_end) {
kfree(ptr);
return;
}
((FreeSlab*)ptr)->next = m_freelist;
m_freelist = (FreeSlab*)ptr;
++m_num_allocated;
@ -57,6 +64,8 @@ private:
FreeSlab* m_freelist { nullptr };
size_t m_num_allocated { 0 };
size_t m_num_free { 0 };
void* m_base { nullptr };
void* m_end { nullptr };
static_assert(sizeof(FreeSlab) == templated_slab_size);
};