1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 10:07:43 +00:00

Kernel: Some futex improvements

This adds support for FUTEX_WAKE_OP, FUTEX_WAIT_BITSET, FUTEX_WAKE_BITSET,
FUTEX_REQUEUE, and FUTEX_CMP_REQUEUE, as well well as global and private
futex and absolute/relative timeouts against the appropriate clock. This
also changes the implementation so that kernel resources are only used when
a thread is blocked on a futex.

Global futexes are implemented as offsets in VMObjects, so that different
processes can share a futex against the same VMObject despite potentially
being mapped at different virtual addresses.
This commit is contained in:
Tom 2020-12-21 23:21:58 -07:00 committed by Andreas Kling
parent 7581b64705
commit 1d621ab172
23 changed files with 928 additions and 63 deletions

View file

@ -255,6 +255,7 @@ public:
enum class Type {
Unknown = 0,
File,
Futex,
Plan9FS,
Join,
Queue,
@ -346,6 +347,10 @@ public:
}
bool set_block_condition(BlockCondition&, void* = nullptr);
void set_block_condition_raw_locked(BlockCondition* block_condition)
{
m_block_condition = block_condition;
}
mutable RecursiveSpinLock m_lock;
@ -390,6 +395,12 @@ public:
});
}
bool is_empty() const
{
ScopedSpinLock lock(m_lock);
return is_empty_locked();
}
protected:
template<typename UnblockOne>
bool unblock(UnblockOne unblock_one)
@ -417,15 +428,52 @@ public:
return did_unblock;
}
bool is_empty_locked() const
{
ASSERT(m_lock.is_locked());
return m_blockers.is_empty();
}
virtual bool should_add_blocker(Blocker&, void*) { return true; }
SpinLock<u8> m_lock;
private:
struct BlockerInfo {
Blocker* blocker;
void* data;
};
Vector<BlockerInfo, 4> do_take_blockers(size_t count)
{
if (m_blockers.size() <= count)
return move(m_blockers);
size_t move_count = (count <= m_blockers.size()) ? count : m_blockers.size();
ASSERT(move_count > 0);
Vector<BlockerInfo, 4> taken_blockers;
taken_blockers.ensure_capacity(move_count);
for (size_t i = 0; i < move_count; i++)
taken_blockers.append(m_blockers.take(i));
m_blockers.remove(0, move_count);
return taken_blockers;
}
void do_append_blockers(Vector<BlockerInfo, 4>&& blockers_to_append)
{
if (blockers_to_append.is_empty())
return;
if (m_blockers.is_empty()) {
m_blockers = move(blockers_to_append);
return;
}
m_blockers.ensure_capacity(m_blockers.size() + blockers_to_append.size());
for (size_t i = 0; i < blockers_to_append.size(); i++)
m_blockers.append(blockers_to_append.take(i));
blockers_to_append.clear();
}
mutable SpinLock<u8> m_lock;
private:
Vector<BlockerInfo, 4> m_blockers;
};
@ -471,6 +519,39 @@ public:
bool m_did_unblock { false };
};
class FutexBlocker : public Blocker {
public:
explicit FutexBlocker(FutexQueue&, u32);
virtual ~FutexBlocker();
virtual Type blocker_type() const override { return Type::Futex; }
virtual const char* state_string() const override { return "Futex"; }
virtual void not_blocking(bool) override { }
virtual bool should_block() override
{
return m_should_block;
}
u32 bitset() const { return m_bitset; }
void begin_requeue()
{
// We need to hold the lock until we moved it over
m_relock_flags = m_lock.lock();
}
void finish_requeue(FutexQueue&);
bool unblock_bitset(u32 bitset);
bool unblock(bool force = false);
protected:
u32 m_bitset;
u32 m_relock_flags { 0 };
bool m_should_block { true };
bool m_did_unblock { false };
};
class FileBlocker : public Blocker {
public:
enum class BlockFlags : u32 {