diff --git a/Kernel/Heap/SlabAllocator.cpp b/Kernel/Heap/SlabAllocator.cpp index c27b86857e..b3ba8c1cce 100644 --- a/Kernel/Heap/SlabAllocator.cpp +++ b/Kernel/Heap/SlabAllocator.cpp @@ -114,6 +114,7 @@ private: static SlabAllocator<16> s_slab_allocator_16; static SlabAllocator<32> s_slab_allocator_32; static SlabAllocator<64> s_slab_allocator_64; +static SlabAllocator<128> s_slab_allocator_128; static_assert(sizeof(Region) <= s_slab_allocator_64.slab_size()); @@ -130,6 +131,7 @@ void slab_alloc_init() s_slab_allocator_16.init(128 * KB); s_slab_allocator_32.init(128 * KB); s_slab_allocator_64.init(512 * KB); + s_slab_allocator_128.init(512 * KB); } void* slab_alloc(size_t slab_size) @@ -140,6 +142,8 @@ void* slab_alloc(size_t slab_size) return s_slab_allocator_32.alloc(); if (slab_size <= 64) return s_slab_allocator_64.alloc(); + if (slab_size <= 128) + return s_slab_allocator_128.alloc(); ASSERT_NOT_REACHED(); } @@ -151,6 +155,8 @@ void slab_dealloc(void* ptr, size_t slab_size) return s_slab_allocator_32.dealloc(ptr); if (slab_size <= 64) return s_slab_allocator_64.dealloc(ptr); + if (slab_size <= 128) + return s_slab_allocator_128.dealloc(ptr); ASSERT_NOT_REACHED(); } diff --git a/Kernel/WaitQueue.cpp b/Kernel/WaitQueue.cpp index c5122f64f3..77ec3caec2 100644 --- a/Kernel/WaitQueue.cpp +++ b/Kernel/WaitQueue.cpp @@ -39,13 +39,13 @@ WaitQueue::~WaitQueue() void WaitQueue::enqueue(Thread& thread) { - ScopedCritical critical; + ScopedSpinLock queue_lock(m_lock); m_threads.append(thread); } void WaitQueue::wake_one(Atomic* lock) { - ScopedCritical critical; + ScopedSpinLock queue_lock(m_lock); if (lock) *lock = false; if (m_threads.is_empty()) @@ -57,7 +57,7 @@ void WaitQueue::wake_one(Atomic* lock) void WaitQueue::wake_n(i32 wake_count) { - ScopedCritical critical; + ScopedSpinLock queue_lock(m_lock); if (m_threads.is_empty()) return; @@ -72,7 +72,7 @@ void WaitQueue::wake_n(i32 wake_count) void WaitQueue::wake_all() { - ScopedCritical critical; + ScopedSpinLock queue_lock(m_lock); if (m_threads.is_empty()) return; while (!m_threads.is_empty()) @@ -82,7 +82,7 @@ void WaitQueue::wake_all() void WaitQueue::clear() { - ScopedCritical critical; + ScopedSpinLock queue_lock(m_lock); m_threads.clear(); } diff --git a/Kernel/WaitQueue.h b/Kernel/WaitQueue.h index c4beae4617..c7705880bc 100644 --- a/Kernel/WaitQueue.h +++ b/Kernel/WaitQueue.h @@ -28,6 +28,7 @@ #include #include +#include #include namespace Kernel { @@ -46,6 +47,7 @@ public: private: typedef IntrusiveList ThreadList; ThreadList m_threads; + SpinLock m_lock; }; }