1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 04:37:44 +00:00

LibThread: Remove LOCKER() macro, as it adds no value

The LOCKER() macro appears to have been added to LibThread as a
userspace analog to the previous LOCKER() macro that existed in
the kernel. The kernel version used the macro to inject __FILE__ and
__LINE__ number into the lock acquisition for debugging. However
AK::SourceLocation was used to remove the need for the macro. So
the kernel version no longer exists. The LOCKER() in LibThread doesn't
appear to actually need to be a macro, using the type directly works
fine, and arguably is more readable as it removes an unnecessary
level of indirection.
This commit is contained in:
Brian Gianforcaro 2021-05-10 01:21:35 -07:00 committed by Andreas Kling
parent 0b7395848a
commit 691b6f69c5
5 changed files with 17 additions and 15 deletions

View file

@ -157,7 +157,7 @@ enum class CallerWillInitializeMemory {
static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_initialize_memory)
{
LOCKER(malloc_lock());
LibThread::Locker locker(malloc_lock());
if (s_log_malloc)
dbgln("LibC: malloc({})", size);
@ -278,7 +278,7 @@ static void free_impl(void* ptr)
g_malloc_stats.number_of_free_calls++;
LOCKER(malloc_lock());
LibThread::Locker locker(malloc_lock());
void* block_base = (void*)((FlatPtr)ptr & ChunkedBlock::ChunkedBlock::block_mask);
size_t magic = *(size_t*)block_base;
@ -380,7 +380,7 @@ size_t malloc_size(void* ptr)
{
if (!ptr)
return 0;
LOCKER(malloc_lock());
LibThread::Locker locker(malloc_lock());
void* page_base = (void*)((FlatPtr)ptr & ChunkedBlock::block_mask);
auto* header = (const CommonHeader*)page_base;
auto size = header->m_size;
@ -398,7 +398,7 @@ void* realloc(void* ptr, size_t size)
if (!size)
return nullptr;
LOCKER(malloc_lock());
LibThread::Locker locker(malloc_lock());
auto existing_allocation_size = malloc_size(ptr);
if (size <= existing_allocation_size) {

View file

@ -372,7 +372,7 @@ void EventLoop::pump(WaitMode mode)
decltype(m_queued_events) events;
{
LOCKER(m_private->lock);
LibThread::Locker locker(m_private->lock);
events = move(m_queued_events);
}
@ -401,7 +401,7 @@ void EventLoop::pump(WaitMode mode)
}
if (m_exit_requested) {
LOCKER(m_private->lock);
LibThread::Locker locker(m_private->lock);
dbgln_if(EVENTLOOP_DEBUG, "Core::EventLoop: Exit requested. Rejigging {} events.", events.size() - i);
decltype(m_queued_events) new_event_queue;
new_event_queue.ensure_capacity(m_queued_events.size() + events.size());
@ -416,7 +416,7 @@ void EventLoop::pump(WaitMode mode)
void EventLoop::post_event(Object& receiver, NonnullOwnPtr<Event>&& event)
{
LOCKER(m_private->lock);
LibThread::Locker lock(m_private->lock);
dbgln_if(EVENTLOOP_DEBUG, "Core::EventLoop::post_event: ({}) << receivier={}, event={}", m_queued_events.size(), receiver, event);
m_queued_events.empend(receiver, move(event));
}
@ -601,7 +601,7 @@ retry:
bool queued_events_is_empty;
{
LOCKER(m_private->lock);
LibThread::Locker locker(m_private->lock);
queued_events_is_empty = m_queued_events.is_empty();
}

View file

@ -17,7 +17,7 @@ static intptr_t background_thread_func()
while (true) {
Function<void()> work_item;
{
LOCKER(s_all_actions->lock());
LibThread::Locker locker(s_all_actions->lock());
if (!s_all_actions->resource().is_empty())
work_item = s_all_actions->resource().dequeue();

View file

@ -53,7 +53,7 @@ private:
, m_action(move(action))
, m_on_complete(move(on_complete))
{
LOCKER(all_actions().lock());
Locker locker(all_actions().lock());
all_actions().resource().enqueue([this] {
m_result = m_action();

View file

@ -70,8 +70,6 @@ inline void Lock::unlock()
--m_level;
}
# define LOCKER(lock) LibThread::Locker locker(lock)
template<typename T>
class Lockable {
public:
@ -92,7 +90,7 @@ public:
T lock_and_copy()
{
LOCKER(m_lock);
Locker locker(m_lock);
return m_resource;
}
@ -113,8 +111,12 @@ public:
~Lock() { }
};
class Locker {
public:
explicit Locker(Lock&) { }
~Locker() { }
};
}
# define LOCKER(x)
#endif