1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 03:27:34 +00:00

AK+Kernel: Make IntrusiveList capable of holding non-raw pointers

This should allow creating intrusive lists that have smart pointers,
while remaining free (compared to the impl before this commit) when
holding raw pointers :^)
As a sidenote, this also adds a `RawPtr<T>` type, which is just
equivalent to `T*`.
Note that this does not actually use such functionality, but is only
expected to pave the way for #6369, to replace NonnullRefPtrVector<T>
with intrusive lists.

As it is with zero-cost things, this makes the interface a bit less nice
by requiring the type name of what an `IntrusiveListNode` holds (and
optionally its container, if not RawPtr), and also requiring the type of
the container (normally `RawPtr`) on the `IntrusiveList` instance.
This commit is contained in:
AnotherTest 2021-04-16 16:33:24 +04:30 committed by Andreas Kling
parent fb814ee720
commit e4412f1f59
11 changed files with 143 additions and 80 deletions

View file

@ -32,7 +32,7 @@
namespace Kernel {
struct CacheEntry {
IntrusiveListNode list_node;
IntrusiveListNode<CacheEntry> list_node;
BlockBasedFS::BlockIndex block_index { 0 };
u8* data { nullptr };
bool has_data { false };
@ -117,8 +117,8 @@ private:
BlockBasedFS& m_fs;
size_t m_entry_count { 10000 };
mutable HashMap<BlockBasedFS::BlockIndex, CacheEntry*> m_hash;
mutable IntrusiveList<CacheEntry, &CacheEntry::list_node> m_clean_list;
mutable IntrusiveList<CacheEntry, &CacheEntry::list_node> m_dirty_list;
mutable IntrusiveList<CacheEntry, RawPtr<CacheEntry>, &CacheEntry::list_node> m_clean_list;
mutable IntrusiveList<CacheEntry, RawPtr<CacheEntry>, &CacheEntry::list_node> m_dirty_list;
KBuffer m_cached_block_data;
KBuffer m_entries;
bool m_dirty { false };

View file

@ -120,7 +120,7 @@ protected:
mode_t m_umask { 022 };
VirtualAddress m_signal_trampoline;
Atomic<u32> m_thread_count { 0 };
IntrusiveList<Thread, &Thread::m_process_thread_list_node> m_thread_list;
IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_process_thread_list_node> m_thread_list;
u8 m_termination_status { 0 };
u8 m_termination_signal { 0 };
};

View file

@ -73,7 +73,7 @@ Atomic<bool> g_finalizer_has_work { false };
READONLY_AFTER_INIT static Process* s_colonel_process;
struct ThreadReadyQueue {
IntrusiveList<Thread, &Thread::m_ready_queue_node> thread_list;
IntrusiveList<Thread, RawPtr<Thread>, &Thread::m_ready_queue_node> thread_list;
};
static SpinLock<u8> g_ready_queues_lock;
static u32 g_ready_queues_mask;

View file

@ -88,7 +88,7 @@ class Thread
friend class Process;
friend class ProtectedProcessBase;
friend class Scheduler;
friend class ThreadReadyQueue;
friend struct ThreadReadyQueue;
static SpinLock<u8> g_tid_map_lock;
static HashMap<ThreadID, Thread*>* g_tid_map;
@ -1129,7 +1129,7 @@ public:
private:
Thread(NonnullRefPtr<Process>, NonnullOwnPtr<Region> kernel_stack_region);
IntrusiveListNode m_process_thread_list_node;
IntrusiveListNode<Thread> m_process_thread_list_node;
int m_runnable_priority { -1 };
friend class WaitQueue;
@ -1202,7 +1202,7 @@ private:
TSS m_tss {};
TrapFrame* m_current_trap { nullptr };
u32 m_saved_critical { 1 };
IntrusiveListNode m_ready_queue_node;
IntrusiveListNode<Thread> m_ready_queue_node;
Atomic<u32> m_cpu { 0 };
u32 m_cpu_affinity { THREAD_AFFINITY_DEFAULT };
u32 m_ticks_left { 0 };

View file

@ -75,7 +75,7 @@ public:
private:
struct WorkItem {
IntrusiveListNode m_node;
IntrusiveListNode<WorkItem> m_node;
void (*function)(void*);
void* data;
void (*free_data)(void*);
@ -86,7 +86,7 @@ private:
RefPtr<Thread> m_thread;
WaitQueue m_wait_queue;
IntrusiveList<WorkItem, &WorkItem::m_node> m_items;
IntrusiveList<WorkItem, RawPtr<WorkItem>, &WorkItem::m_node> m_items;
SpinLock<u8> m_lock;
};