mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 02:57:36 +00:00
Kernel: Allow preventing kmalloc and kfree
For "destructive" disallowance of allocations throughout the system, Thread gains a member that controls whether allocations are currently allowed or not. kmalloc checks this member on both allocations and deallocations (with the exception of early boot) and panics the kernel if allocations are disabled. This will allow for critical sections that can't be allowed to allocate to fail-fast, making for easier debugging. PS: My first proper Kernel commit :^)
This commit is contained in:
parent
b8d640c3f9
commit
e2c9578390
2 changed files with 12 additions and 2 deletions
|
@ -362,8 +362,12 @@ void* kmalloc(size_t size)
|
||||||
Thread* current_thread = Thread::current();
|
Thread* current_thread = Thread::current();
|
||||||
if (!current_thread)
|
if (!current_thread)
|
||||||
current_thread = Processor::idle_thread();
|
current_thread = Processor::idle_thread();
|
||||||
if (current_thread)
|
if (current_thread) {
|
||||||
|
// FIXME: By the time we check this, we have already allocated above.
|
||||||
|
// This means that in the case of an infinite recursion, we can't catch it this way.
|
||||||
|
VERIFY(current_thread->is_allocation_enabled());
|
||||||
PerformanceManager::add_kmalloc_perf_event(*current_thread, size, (FlatPtr)ptr);
|
PerformanceManager::add_kmalloc_perf_event(*current_thread, size, (FlatPtr)ptr);
|
||||||
|
}
|
||||||
|
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
|
@ -384,8 +388,10 @@ void kfree_sized(void* ptr, size_t size)
|
||||||
Thread* current_thread = Thread::current();
|
Thread* current_thread = Thread::current();
|
||||||
if (!current_thread)
|
if (!current_thread)
|
||||||
current_thread = Processor::idle_thread();
|
current_thread = Processor::idle_thread();
|
||||||
if (current_thread)
|
if (current_thread) {
|
||||||
|
VERIFY(current_thread->is_allocation_enabled());
|
||||||
PerformanceManager::add_kfree_perf_event(*current_thread, 0, (FlatPtr)ptr);
|
PerformanceManager::add_kfree_perf_event(*current_thread, 0, (FlatPtr)ptr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
g_kmalloc_global->deallocate(ptr, size);
|
g_kmalloc_global->deallocate(ptr, size);
|
||||||
|
|
|
@ -1244,6 +1244,9 @@ public:
|
||||||
bool is_promise_violation_pending() const { return m_is_promise_violation_pending; }
|
bool is_promise_violation_pending() const { return m_is_promise_violation_pending; }
|
||||||
void set_promise_violation_pending(bool value) { m_is_promise_violation_pending = value; }
|
void set_promise_violation_pending(bool value) { m_is_promise_violation_pending = value; }
|
||||||
|
|
||||||
|
bool is_allocation_enabled() const { return m_allocation_enabled; }
|
||||||
|
void set_allocation_enabled(bool value) { m_allocation_enabled = value; }
|
||||||
|
|
||||||
String backtrace();
|
String backtrace();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -1348,6 +1351,7 @@ private:
|
||||||
u32 m_lock_requested_count { 0 };
|
u32 m_lock_requested_count { 0 };
|
||||||
IntrusiveListNode<Thread> m_blocked_threads_list_node;
|
IntrusiveListNode<Thread> m_blocked_threads_list_node;
|
||||||
LockRank m_lock_rank_mask { LockRank::None };
|
LockRank m_lock_rank_mask { LockRank::None };
|
||||||
|
bool m_allocation_enabled { true };
|
||||||
|
|
||||||
#if LOCK_DEBUG
|
#if LOCK_DEBUG
|
||||||
struct HoldingLockInfo {
|
struct HoldingLockInfo {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue