diff --git a/Kernel/Arch/aarch64/Processor.h b/Kernel/Arch/aarch64/Processor.h index 163d5815ad..57a2b07c47 100644 --- a/Kernel/Arch/aarch64/Processor.h +++ b/Kernel/Arch/aarch64/Processor.h @@ -147,6 +147,11 @@ public: return current().m_in_critical; } + ALWAYS_INLINE static void verify_no_spinlocks_held() + { + VERIFY(!Processor::in_critical()); + } + // FIXME: Actually return the idle thread once aarch64 supports threading. ALWAYS_INLINE static Thread* idle_thread() { diff --git a/Kernel/Arch/x86/Processor.h b/Kernel/Arch/x86/Processor.h index 6cb8dffd69..64209ffbad 100644 --- a/Kernel/Arch/x86/Processor.h +++ b/Kernel/Arch/x86/Processor.h @@ -370,6 +370,11 @@ public: return read_gs_ptr(__builtin_offsetof(Processor, m_in_critical)); } + ALWAYS_INLINE static void verify_no_spinlocks_held() + { + VERIFY(!Processor::in_critical()); + } + ALWAYS_INLINE static FPUState const& clean_fpu_state() { return s_clean_fpu_state; } static void smp_enable(); diff --git a/Kernel/Heap/kmalloc.cpp b/Kernel/Heap/kmalloc.cpp index 5925566e9e..e158ca6fc5 100644 --- a/Kernel/Heap/kmalloc.cpp +++ b/Kernel/Heap/kmalloc.cpp @@ -410,14 +410,6 @@ void kmalloc_enable_expand() g_kmalloc_global->enable_expansion(); } -static inline void kmalloc_verify_nospinlock_held() -{ - // Catch bad callers allocating under spinlock. - if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) { - VERIFY(!Processor::in_critical()); - } -} - UNMAP_AFTER_INIT void kmalloc_init() { // Zero out heap since it's placed after end_of_kernel_bss. @@ -429,7 +421,11 @@ UNMAP_AFTER_INIT void kmalloc_init() void* kmalloc(size_t size) { - kmalloc_verify_nospinlock_held(); + // Catch bad callers allocating under spinlock. + if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) { + Processor::verify_no_spinlocks_held(); + } + SpinlockLocker lock(s_lock); ++g_kmalloc_call_count; @@ -472,7 +468,11 @@ void kfree_sized(void* ptr, size_t size) VERIFY(size > 0); - kmalloc_verify_nospinlock_held(); + // Catch bad callers allocating under spinlock. + if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) { + Processor::verify_no_spinlocks_held(); + } + SpinlockLocker lock(s_lock); ++g_kfree_call_count; ++g_nested_kfree_calls;