mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 14:27:35 +00:00
Kernel: Add a SpinLock to the WaitQueue
We need to be able to prevent a WaitQueue from being modified by another CPU. So, add a SpinLock to it. Because this pushes some other class over the 64 byte limit, we also need to add another 128-byte bucket to the slab allocator.
This commit is contained in:
parent
788b2d64c6
commit
49f5069b76
3 changed files with 13 additions and 5 deletions
|
@ -114,6 +114,7 @@ private:
|
||||||
static SlabAllocator<16> s_slab_allocator_16;
|
static SlabAllocator<16> s_slab_allocator_16;
|
||||||
static SlabAllocator<32> s_slab_allocator_32;
|
static SlabAllocator<32> s_slab_allocator_32;
|
||||||
static SlabAllocator<64> s_slab_allocator_64;
|
static SlabAllocator<64> s_slab_allocator_64;
|
||||||
|
static SlabAllocator<128> s_slab_allocator_128;
|
||||||
|
|
||||||
static_assert(sizeof(Region) <= s_slab_allocator_64.slab_size());
|
static_assert(sizeof(Region) <= s_slab_allocator_64.slab_size());
|
||||||
|
|
||||||
|
@ -130,6 +131,7 @@ void slab_alloc_init()
|
||||||
s_slab_allocator_16.init(128 * KB);
|
s_slab_allocator_16.init(128 * KB);
|
||||||
s_slab_allocator_32.init(128 * KB);
|
s_slab_allocator_32.init(128 * KB);
|
||||||
s_slab_allocator_64.init(512 * KB);
|
s_slab_allocator_64.init(512 * KB);
|
||||||
|
s_slab_allocator_128.init(512 * KB);
|
||||||
}
|
}
|
||||||
|
|
||||||
void* slab_alloc(size_t slab_size)
|
void* slab_alloc(size_t slab_size)
|
||||||
|
@ -140,6 +142,8 @@ void* slab_alloc(size_t slab_size)
|
||||||
return s_slab_allocator_32.alloc();
|
return s_slab_allocator_32.alloc();
|
||||||
if (slab_size <= 64)
|
if (slab_size <= 64)
|
||||||
return s_slab_allocator_64.alloc();
|
return s_slab_allocator_64.alloc();
|
||||||
|
if (slab_size <= 128)
|
||||||
|
return s_slab_allocator_128.alloc();
|
||||||
ASSERT_NOT_REACHED();
|
ASSERT_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,6 +155,8 @@ void slab_dealloc(void* ptr, size_t slab_size)
|
||||||
return s_slab_allocator_32.dealloc(ptr);
|
return s_slab_allocator_32.dealloc(ptr);
|
||||||
if (slab_size <= 64)
|
if (slab_size <= 64)
|
||||||
return s_slab_allocator_64.dealloc(ptr);
|
return s_slab_allocator_64.dealloc(ptr);
|
||||||
|
if (slab_size <= 128)
|
||||||
|
return s_slab_allocator_128.dealloc(ptr);
|
||||||
ASSERT_NOT_REACHED();
|
ASSERT_NOT_REACHED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,13 +39,13 @@ WaitQueue::~WaitQueue()
|
||||||
|
|
||||||
void WaitQueue::enqueue(Thread& thread)
|
void WaitQueue::enqueue(Thread& thread)
|
||||||
{
|
{
|
||||||
ScopedCritical critical;
|
ScopedSpinLock queue_lock(m_lock);
|
||||||
m_threads.append(thread);
|
m_threads.append(thread);
|
||||||
}
|
}
|
||||||
|
|
||||||
void WaitQueue::wake_one(Atomic<bool>* lock)
|
void WaitQueue::wake_one(Atomic<bool>* lock)
|
||||||
{
|
{
|
||||||
ScopedCritical critical;
|
ScopedSpinLock queue_lock(m_lock);
|
||||||
if (lock)
|
if (lock)
|
||||||
*lock = false;
|
*lock = false;
|
||||||
if (m_threads.is_empty())
|
if (m_threads.is_empty())
|
||||||
|
@ -57,7 +57,7 @@ void WaitQueue::wake_one(Atomic<bool>* lock)
|
||||||
|
|
||||||
void WaitQueue::wake_n(i32 wake_count)
|
void WaitQueue::wake_n(i32 wake_count)
|
||||||
{
|
{
|
||||||
ScopedCritical critical;
|
ScopedSpinLock queue_lock(m_lock);
|
||||||
if (m_threads.is_empty())
|
if (m_threads.is_empty())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -72,7 +72,7 @@ void WaitQueue::wake_n(i32 wake_count)
|
||||||
|
|
||||||
void WaitQueue::wake_all()
|
void WaitQueue::wake_all()
|
||||||
{
|
{
|
||||||
ScopedCritical critical;
|
ScopedSpinLock queue_lock(m_lock);
|
||||||
if (m_threads.is_empty())
|
if (m_threads.is_empty())
|
||||||
return;
|
return;
|
||||||
while (!m_threads.is_empty())
|
while (!m_threads.is_empty())
|
||||||
|
@ -82,7 +82,7 @@ void WaitQueue::wake_all()
|
||||||
|
|
||||||
void WaitQueue::clear()
|
void WaitQueue::clear()
|
||||||
{
|
{
|
||||||
ScopedCritical critical;
|
ScopedSpinLock queue_lock(m_lock);
|
||||||
m_threads.clear();
|
m_threads.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
|
|
||||||
#include <AK/Atomic.h>
|
#include <AK/Atomic.h>
|
||||||
#include <AK/SinglyLinkedList.h>
|
#include <AK/SinglyLinkedList.h>
|
||||||
|
#include <Kernel/SpinLock.h>
|
||||||
#include <Kernel/Thread.h>
|
#include <Kernel/Thread.h>
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
|
@ -46,6 +47,7 @@ public:
|
||||||
private:
|
private:
|
||||||
typedef IntrusiveList<Thread, &Thread::m_wait_queue_node> ThreadList;
|
typedef IntrusiveList<Thread, &Thread::m_wait_queue_node> ThreadList;
|
||||||
ThreadList m_threads;
|
ThreadList m_threads;
|
||||||
|
SpinLock<u32> m_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue