From ab5c422a291a4d5577bfa9a0ca6e2e0861298b05 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sun, 8 Aug 2021 16:59:39 +0200 Subject: [PATCH] Kernel/SMP: Make SMP message queueing work correctly - Use the receiver's per-CPU entry in the message, instead of the sender's. (Using the sender's entry wasn't safe for broadcast messages since the same entry ended up on multiple message queues.) - Retry the CAS until it *succeeds* instead of *fails*. This closes a race window, and also ensures a correct return value. The return value is used by the caller to decide whether to broadcast an IPI. This was the main reason smp=on was so slow. We had CPUs busy-waiting until someone else triggered an IPI and moved things along. - Add a CPU pause hint to the spin loop. :^) --- Kernel/Arch/x86/common/Processor.cpp | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/Kernel/Arch/x86/common/Processor.cpp b/Kernel/Arch/x86/common/Processor.cpp index 3ffc06064a..bfc41f851b 100644 --- a/Kernel/Arch/x86/common/Processor.cpp +++ b/Kernel/Arch/x86/common/Processor.cpp @@ -863,12 +863,18 @@ bool Processor::smp_enqueue_message(ProcessorMessage& msg) // Note that it's quite possible that the other processor may pop // the queue at any given time. We rely on the fact that the messages // are pooled and never get freed! - auto& msg_entry = msg.per_proc_entries[id()]; + auto& msg_entry = msg.per_proc_entries[get_id()]; VERIFY(msg_entry.msg == &msg); ProcessorMessageEntry* next = nullptr; - do { + for (;;) { msg_entry.next = next; - } while (m_message_queue.compare_exchange_strong(next, &msg_entry, AK::MemoryOrder::memory_order_acq_rel)); + if (m_message_queue.compare_exchange_strong(next, &msg_entry, AK::MemoryOrder::memory_order_acq_rel)) + break; + Processor::pause(); + } + + // If the enqueued message was the only message in the queue when posted, + // we return true. This is used by callers when deciding whether to generate an IPI. return next == nullptr; }