mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 06:07:34 +00:00
Kernel: Add mechanism to queue deferred function calls
Function calls that are deferred will be executed before a thread enters a pre-emptable state (meaning it is not in a critical section and it is not in an irq handler). If it is not already in such a state, it will be called immediately. This is meant to be used from e.g. IRQ handlers where we might want to block a thread until an interrupt happens.
This commit is contained in:
parent
4713e6185f
commit
b9a97ff81f
2 changed files with 185 additions and 2 deletions
|
@ -1193,6 +1193,8 @@ void Processor::early_initialize(u32 cpu)
|
||||||
atomic_fetch_add(&g_total_processors, 1u, AK::MemoryOrder::memory_order_acq_rel);
|
atomic_fetch_add(&g_total_processors, 1u, AK::MemoryOrder::memory_order_acq_rel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
deferred_call_pool_init();
|
||||||
|
|
||||||
cpu_setup();
|
cpu_setup();
|
||||||
gdt_init();
|
gdt_init();
|
||||||
ASSERT(¤t() == this); // sanity check
|
ASSERT(¤t() == this); // sanity check
|
||||||
|
@ -1932,6 +1934,125 @@ void Processor::Processor::halt()
|
||||||
halt_this();
|
halt_this();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Processor::deferred_call_pool_init()
|
||||||
|
{
|
||||||
|
size_t pool_count = sizeof(m_deferred_call_pool) / sizeof(m_deferred_call_pool[0]);
|
||||||
|
for (size_t i = 0; i < pool_count; i++) {
|
||||||
|
auto& entry = m_deferred_call_pool[i];
|
||||||
|
entry.next = i < pool_count - 1 ? &m_deferred_call_pool[i + 1] : nullptr;
|
||||||
|
entry.was_allocated = false;
|
||||||
|
}
|
||||||
|
m_pending_deferred_calls = nullptr;
|
||||||
|
m_free_deferred_call_pool_entry = &m_deferred_call_pool[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
|
||||||
|
{
|
||||||
|
ASSERT(m_in_critical);
|
||||||
|
ASSERT(!entry->was_allocated);
|
||||||
|
|
||||||
|
entry->next = m_free_deferred_call_pool_entry;
|
||||||
|
m_free_deferred_call_pool_entry = entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
DeferredCallEntry* Processor::deferred_call_get_free()
|
||||||
|
{
|
||||||
|
ASSERT(m_in_critical);
|
||||||
|
|
||||||
|
if (m_free_deferred_call_pool_entry) {
|
||||||
|
// Fast path, we have an entry in our pool
|
||||||
|
auto* entry = m_free_deferred_call_pool_entry;
|
||||||
|
m_free_deferred_call_pool_entry = entry->next;
|
||||||
|
ASSERT(!entry->was_allocated);
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto* entry = new DeferredCallEntry;
|
||||||
|
entry->was_allocated = true;
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Processor::deferred_call_execute_pending()
|
||||||
|
{
|
||||||
|
ASSERT(m_in_critical);
|
||||||
|
|
||||||
|
if (!m_pending_deferred_calls)
|
||||||
|
return;
|
||||||
|
auto* pending_list = m_pending_deferred_calls;
|
||||||
|
m_pending_deferred_calls = nullptr;
|
||||||
|
|
||||||
|
// We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first
|
||||||
|
auto reverse_list =
|
||||||
|
[](DeferredCallEntry* list) -> DeferredCallEntry*
|
||||||
|
{
|
||||||
|
DeferredCallEntry* rev_list = nullptr;
|
||||||
|
while (list) {
|
||||||
|
auto next = list->next;
|
||||||
|
list->next = rev_list;
|
||||||
|
rev_list = list;
|
||||||
|
list = next;
|
||||||
|
}
|
||||||
|
return rev_list;
|
||||||
|
};
|
||||||
|
pending_list = reverse_list(pending_list);
|
||||||
|
|
||||||
|
do {
|
||||||
|
// Call the appropriate callback handler
|
||||||
|
if (pending_list->have_data) {
|
||||||
|
pending_list->callback_with_data.handler(pending_list->callback_with_data.data);
|
||||||
|
if (pending_list->callback_with_data.free)
|
||||||
|
pending_list->callback_with_data.free(pending_list->callback_with_data.data);
|
||||||
|
} else {
|
||||||
|
pending_list->callback.handler();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the entry back to the pool, or free it
|
||||||
|
auto* next = pending_list->next;
|
||||||
|
if (pending_list->was_allocated)
|
||||||
|
delete pending_list;
|
||||||
|
else
|
||||||
|
deferred_call_return_to_pool(pending_list);
|
||||||
|
pending_list = next;
|
||||||
|
} while (pending_list);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
|
||||||
|
{
|
||||||
|
ASSERT(m_in_critical);
|
||||||
|
entry->next = m_pending_deferred_calls;
|
||||||
|
m_pending_deferred_calls = entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Processor::deferred_call_queue(void (*callback)())
|
||||||
|
{
|
||||||
|
// NOTE: If we are called outside of a critical section and outside
|
||||||
|
// of an irq handler, the function will be executed before we return!
|
||||||
|
ScopedCritical critical;
|
||||||
|
auto& cur_proc = Processor::current();
|
||||||
|
|
||||||
|
auto* entry = cur_proc.deferred_call_get_free();
|
||||||
|
entry->have_data = false;
|
||||||
|
entry->callback.handler = callback;
|
||||||
|
|
||||||
|
cur_proc.deferred_call_queue_entry(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Processor::deferred_call_queue(void (*callback)(void*), void* data, void (*free_data)(void*))
|
||||||
|
{
|
||||||
|
// NOTE: If we are called outside of a critical section and outside
|
||||||
|
// of an irq handler, the function will be executed before we return!
|
||||||
|
ScopedCritical critical;
|
||||||
|
auto& cur_proc = Processor::current();
|
||||||
|
|
||||||
|
auto* entry = cur_proc.deferred_call_get_free();
|
||||||
|
entry->have_data = true;
|
||||||
|
entry->callback_with_data.handler = callback;
|
||||||
|
entry->callback_with_data.data = data;
|
||||||
|
entry->callback_with_data.free = free_data;
|
||||||
|
|
||||||
|
cur_proc.deferred_call_queue_entry(entry);
|
||||||
|
}
|
||||||
|
|
||||||
void Processor::gdt_init()
|
void Processor::gdt_init()
|
||||||
{
|
{
|
||||||
m_gdt_length = 0;
|
m_gdt_length = 0;
|
||||||
|
|
|
@ -677,6 +677,22 @@ struct ProcessorMessageEntry {
|
||||||
ProcessorMessage* msg;
|
ProcessorMessage* msg;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct DeferredCallEntry {
|
||||||
|
DeferredCallEntry* next;
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
void (*handler)();
|
||||||
|
} callback;
|
||||||
|
struct {
|
||||||
|
void* data;
|
||||||
|
void (*handler)(void*);
|
||||||
|
void (*free)(void*);
|
||||||
|
} callback_with_data;
|
||||||
|
};
|
||||||
|
bool have_data;
|
||||||
|
bool was_allocated;
|
||||||
|
};
|
||||||
|
|
||||||
class Processor {
|
class Processor {
|
||||||
friend class ProcessorInfo;
|
friend class ProcessorInfo;
|
||||||
|
|
||||||
|
@ -710,6 +726,10 @@ class Processor {
|
||||||
bool m_scheduler_initialized;
|
bool m_scheduler_initialized;
|
||||||
bool m_halt_requested;
|
bool m_halt_requested;
|
||||||
|
|
||||||
|
DeferredCallEntry* m_pending_deferred_calls; // in reverse order
|
||||||
|
DeferredCallEntry* m_free_deferred_call_pool_entry;
|
||||||
|
DeferredCallEntry m_deferred_call_pool[5];
|
||||||
|
|
||||||
void gdt_init();
|
void gdt_init();
|
||||||
void write_raw_gdt_entry(u16 selector, u32 low, u32 high);
|
void write_raw_gdt_entry(u16 selector, u32 low, u32 high);
|
||||||
void write_gdt_entry(u16 selector, Descriptor& descriptor);
|
void write_gdt_entry(u16 selector, Descriptor& descriptor);
|
||||||
|
@ -722,6 +742,12 @@ class Processor {
|
||||||
static void smp_broadcast_message(ProcessorMessage& msg, bool async);
|
static void smp_broadcast_message(ProcessorMessage& msg, bool async);
|
||||||
static void smp_broadcast_halt();
|
static void smp_broadcast_halt();
|
||||||
|
|
||||||
|
void deferred_call_pool_init();
|
||||||
|
void deferred_call_execute_pending();
|
||||||
|
DeferredCallEntry* deferred_call_get_free();
|
||||||
|
void deferred_call_return_to_pool(DeferredCallEntry*);
|
||||||
|
void deferred_call_queue_entry(DeferredCallEntry*);
|
||||||
|
|
||||||
void cpu_detect();
|
void cpu_detect();
|
||||||
void cpu_setup();
|
void cpu_setup();
|
||||||
|
|
||||||
|
@ -843,7 +869,19 @@ public:
|
||||||
ALWAYS_INLINE void restore_irq(u32 prev_irq)
|
ALWAYS_INLINE void restore_irq(u32 prev_irq)
|
||||||
{
|
{
|
||||||
ASSERT(prev_irq <= m_in_irq);
|
ASSERT(prev_irq <= m_in_irq);
|
||||||
m_in_irq = prev_irq;
|
if (!prev_irq) {
|
||||||
|
if (m_in_critical == 0) {
|
||||||
|
auto prev_critical = m_in_critical++;
|
||||||
|
m_in_irq = prev_irq;
|
||||||
|
deferred_call_execute_pending();
|
||||||
|
ASSERT(m_in_critical == prev_critical + 1);
|
||||||
|
m_in_critical = prev_critical;
|
||||||
|
}
|
||||||
|
if (!m_in_critical)
|
||||||
|
check_invoke_scheduler();
|
||||||
|
} else {
|
||||||
|
m_in_irq = prev_irq;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE u32& in_irq()
|
ALWAYS_INLINE u32& in_irq()
|
||||||
|
@ -860,10 +898,18 @@ public:
|
||||||
|
|
||||||
ALWAYS_INLINE void leave_critical(u32 prev_flags)
|
ALWAYS_INLINE void leave_critical(u32 prev_flags)
|
||||||
{
|
{
|
||||||
|
cli(); // Need to prevent IRQs from interrupting us here!
|
||||||
ASSERT(m_in_critical > 0);
|
ASSERT(m_in_critical > 0);
|
||||||
if (--m_in_critical == 0) {
|
if (m_in_critical == 1) {
|
||||||
|
if (!m_in_irq) {
|
||||||
|
deferred_call_execute_pending();
|
||||||
|
ASSERT(m_in_critical == 1);
|
||||||
|
}
|
||||||
|
m_in_critical--;
|
||||||
if (!m_in_irq)
|
if (!m_in_irq)
|
||||||
check_invoke_scheduler();
|
check_invoke_scheduler();
|
||||||
|
} else {
|
||||||
|
m_in_critical--;
|
||||||
}
|
}
|
||||||
if (prev_flags & 0x200)
|
if (prev_flags & 0x200)
|
||||||
sti();
|
sti();
|
||||||
|
@ -921,6 +967,22 @@ public:
|
||||||
static void smp_broadcast(void (*callback)(void*), void* data, void (*free_data)(void*), bool async);
|
static void smp_broadcast(void (*callback)(void*), void* data, void (*free_data)(void*), bool async);
|
||||||
static void smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count);
|
static void smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count);
|
||||||
|
|
||||||
|
template<typename Callback>
|
||||||
|
static void deferred_call_queue(Callback callback)
|
||||||
|
{
|
||||||
|
auto* data = new Callback(move(callback));
|
||||||
|
deferred_call_queue(
|
||||||
|
[](void* data) {
|
||||||
|
(*reinterpret_cast<Callback*>(data))();
|
||||||
|
},
|
||||||
|
data,
|
||||||
|
[](void* data) {
|
||||||
|
delete reinterpret_cast<Callback*>(data);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
static void deferred_call_queue(void (*callback)());
|
||||||
|
static void deferred_call_queue(void (*callback)(void*), void* data, void (*free_data)(void*));
|
||||||
|
|
||||||
ALWAYS_INLINE bool has_feature(CPUFeature f) const
|
ALWAYS_INLINE bool has_feature(CPUFeature f) const
|
||||||
{
|
{
|
||||||
return (static_cast<u32>(m_features) & static_cast<u32>(f)) != 0;
|
return (static_cast<u32>(m_features) & static_cast<u32>(f)) != 0;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue