mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 12:17:44 +00:00
Kernel: Add the ability to verify we don't kmalloc under spinlock.
Ideally we would never allocate under a spinlock, as it has many performance and potentially functionality (deadlock) pitfalls. We violate that rule in many places today, but we need a tool to track them all down and fix them. This change introduces a new macro option named `KMALLOC_VERIFY_NO_SPINLOCK_HELD` which can catch these situations at runtime via an assert.
This commit is contained in:
parent
6329e9fce6
commit
9c38475608
3 changed files with 18 additions and 0 deletions
|
@ -134,6 +134,10 @@
|
||||||
#cmakedefine01 KMALLOC_DEBUG
|
#cmakedefine01 KMALLOC_DEBUG
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef KMALLOC_VERIFY_NO_SPINLOCK_HELD
|
||||||
|
#cmakedefine01 KMALLOC_VERIFY_NO_SPINLOCK_HELD
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef LOCAL_SOCKET_DEBUG
|
#ifndef LOCAL_SOCKET_DEBUG
|
||||||
#cmakedefine01 LOCAL_SOCKET_DEBUG
|
#cmakedefine01 LOCAL_SOCKET_DEBUG
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -204,6 +204,14 @@ void kmalloc_enable_expand()
|
||||||
g_kmalloc_global->allocate_backup_memory();
|
g_kmalloc_global->allocate_backup_memory();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void kmalloc_verify_nospinlock_held()
|
||||||
|
{
|
||||||
|
// Catch bad callers allocating under spinlock.
|
||||||
|
if constexpr (KMALLOC_VERIFY_NO_SPINLOCK_HELD) {
|
||||||
|
VERIFY(!Processor::current().in_critical());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
UNMAP_AFTER_INIT void kmalloc_init()
|
UNMAP_AFTER_INIT void kmalloc_init()
|
||||||
{
|
{
|
||||||
// Zero out heap since it's placed after end_of_kernel_bss.
|
// Zero out heap since it's placed after end_of_kernel_bss.
|
||||||
|
@ -219,6 +227,8 @@ UNMAP_AFTER_INIT void kmalloc_init()
|
||||||
|
|
||||||
void* kmalloc_eternal(size_t size)
|
void* kmalloc_eternal(size_t size)
|
||||||
{
|
{
|
||||||
|
kmalloc_verify_nospinlock_held();
|
||||||
|
|
||||||
size = round_up_to_power_of_two(size, sizeof(void*));
|
size = round_up_to_power_of_two(size, sizeof(void*));
|
||||||
|
|
||||||
ScopedSpinLock lock(s_lock);
|
ScopedSpinLock lock(s_lock);
|
||||||
|
@ -231,6 +241,7 @@ void* kmalloc_eternal(size_t size)
|
||||||
|
|
||||||
void* kmalloc(size_t size)
|
void* kmalloc(size_t size)
|
||||||
{
|
{
|
||||||
|
kmalloc_verify_nospinlock_held();
|
||||||
ScopedSpinLock lock(s_lock);
|
ScopedSpinLock lock(s_lock);
|
||||||
++g_kmalloc_call_count;
|
++g_kmalloc_call_count;
|
||||||
|
|
||||||
|
@ -252,6 +263,7 @@ void kfree(void* ptr)
|
||||||
if (!ptr)
|
if (!ptr)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
kmalloc_verify_nospinlock_held();
|
||||||
ScopedSpinLock lock(s_lock);
|
ScopedSpinLock lock(s_lock);
|
||||||
++g_kfree_call_count;
|
++g_kfree_call_count;
|
||||||
|
|
||||||
|
@ -260,6 +272,7 @@ void kfree(void* ptr)
|
||||||
|
|
||||||
void* krealloc(void* ptr, size_t new_size)
|
void* krealloc(void* ptr, size_t new_size)
|
||||||
{
|
{
|
||||||
|
kmalloc_verify_nospinlock_held();
|
||||||
ScopedSpinLock lock(s_lock);
|
ScopedSpinLock lock(s_lock);
|
||||||
return g_kmalloc_global->m_heap.reallocate(ptr, new_size);
|
return g_kmalloc_global->m_heap.reallocate(ptr, new_size);
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,6 +65,7 @@ set(PORTABLE_IMAGE_LOADER_DEBUG ON)
|
||||||
set(SYNTAX_HIGHLIGHTING_DEBUG ON)
|
set(SYNTAX_HIGHLIGHTING_DEBUG ON)
|
||||||
set(KEYBOARD_SHORTCUTS_DEBUG ON)
|
set(KEYBOARD_SHORTCUTS_DEBUG ON)
|
||||||
set(KMALLOC_DEBUG ON)
|
set(KMALLOC_DEBUG ON)
|
||||||
|
set(KMALLOC_VERIFY_NO_SPINLOCK_HELD ON)
|
||||||
set(MARKDOWN_DEBUG ON)
|
set(MARKDOWN_DEBUG ON)
|
||||||
set(REGEX_DEBUG ON)
|
set(REGEX_DEBUG ON)
|
||||||
set(TLS_DEBUG ON)
|
set(TLS_DEBUG ON)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue