From 2079728a7455c7c2d852c2784f7b02bcf16f3583 Mon Sep 17 00:00:00 2001 From: Brian Gianforcaro Date: Sat, 15 Oct 2022 00:56:42 -0700 Subject: [PATCH] Kernel: Add formal Processor::verify_no_spinlocks_held() API In a few places we check `!Processor::in_critical()` to validate that the current processor doesn't hold any kernel spinlocks. Instead lets provide it a first class name for readability. I'll also be adding more of these, so I would rather add more usages of a nice API instead of this implicit/assumed logic. --- Kernel/Arch/aarch64/Processor.h | 5 +++++ Kernel/Arch/x86/Processor.h | 5 +++++ Kernel/Heap/kmalloc.cpp | 20 ++++++++++---------- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/Kernel/Arch/aarch64/Processor.h b/Kernel/Arch/aarch64/Processor.h index 163d5815ad..57a2b07c47 100644 --- a/Kernel/Arch/aarch64/Processor.h +++ b/Kernel/Arch/aarch64/Processor.h @@ -147,6 +147,11 @@ public: return current().m_in_critical; } + ALWAYS_INLINE static void verify_no_spinlocks_held() + { + VERIFY(!Processor::in_critical()); + } + // FIXME: Actually return the idle thread once aarch64 supports threading. ALWAYS_INLINE static Thread* idle_thread() { diff --git a/Kernel/Arch/x86/Processor.h b/Kernel/Arch/x86/Processor.h index 6cb8dffd69..64209ffbad 100644 --- a/Kernel/Arch/x86/Processor.h +++ b/Kernel/Arch/x86/Processor.h @@ -370,6 +370,11 @@ public: return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical)); } + ALWAYS_INLINE static void verify_no_spinlocks_held() + { + VERIFY(!Processor::in_critical()); + } + ALWAYS_INLINE static FPUState const& clean_fpu_state() { return s_clean_fpu_state; } static void smp_enable(); diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 5925566e9e..e158ca6fc5 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -410,14 +410,6 @@ void kmalloc_enable_expand() g_kmalloc_global->enable_expansion(); } -static inline void kmalloc_verify_nospinlock_held() -{ - // Catch bad callers allocating under spinlock. - if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) { - VERIFY(!Processor::in_critical()); - } -} - UNMAP_AFTER_INIT void kmalloc_init() { // Zero out heap since it's placed after end_of_kernel_bss. @@ -429,7 +421,11 @@ UNMAP_AFTER_INIT void kmalloc_init() void* kmalloc(size_t size) { - kmalloc_verify_nospinlock_held(); + // Catch bad callers allocating under spinlock. + if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) { + Processor::verify_no_spinlocks_held(); + } + SpinlockLocker lock(s_lock); ++g_kmalloc_call_count; @@ -472,7 +468,11 @@ void kfree_sized(void* ptr, size_t size) VERIFY(size > 0); - kmalloc_verify_nospinlock_held(); + // Catch bad callers allocating under spinlock. + if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) { + Processor::verify_no_spinlocks_held(); + } + SpinlockLocker lock(s_lock); ++g_kfree_call_count; ++g_nested_kfree_calls;