mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 05:17:34 +00:00
Kernel: Add initial basic support for KASAN
This commit adds minimal support for compiler-instrumentation based memory access sanitization. Currently we only support detection of kmalloc redzone accesses, and kmalloc use-after-free accesses. Support for inline checks (for improved performance), and for stack use-after-return and use-after-return detection is left for future PRs.
This commit is contained in:
parent
7ad7ae7000
commit
f7a1f28d7f
10 changed files with 538 additions and 63 deletions
|
@ -11,6 +11,7 @@
|
|||
#include <AK/TemporaryChange.h>
|
||||
#include <AK/Vector.h>
|
||||
#include <AK/kmalloc.h>
|
||||
#include <Kernel/Security/AddressSanitizer.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
|
@ -68,7 +69,7 @@ public:
|
|||
return needed_chunks * CHUNK_SIZE + (needed_chunks + 7) / 8;
|
||||
}
|
||||
|
||||
void* allocate(size_t size, size_t alignment, CallerWillInitializeMemory caller_will_initialize_memory)
|
||||
void* allocate(size_t size, size_t alignment, [[maybe_unused]] CallerWillInitializeMemory caller_will_initialize_memory)
|
||||
{
|
||||
// The minimum possible alignment is CHUNK_SIZE, since we only track chunks here, nothing smaller.
|
||||
if (alignment < CHUNK_SIZE)
|
||||
|
@ -104,17 +105,23 @@ public:
|
|||
VERIFY(first_chunk.value() <= aligned_first_chunk);
|
||||
VERIFY(aligned_first_chunk + chunks_needed <= first_chunk.value() + chunks_needed + chunk_alignment);
|
||||
|
||||
#ifdef HAS_ADDRESS_SANITIZER
|
||||
AddressSanitizer::mark_region((FlatPtr)a, real_size, (chunks_needed * CHUNK_SIZE), AddressSanitizer::ShadowType::Malloc);
|
||||
#endif
|
||||
|
||||
u8* ptr = a->data;
|
||||
a->allocation_size_in_chunks = chunks_needed;
|
||||
|
||||
m_bitmap.set_range_and_verify_that_all_bits_flip(aligned_first_chunk, chunks_needed, true);
|
||||
|
||||
m_allocated_chunks += chunks_needed;
|
||||
#ifndef HAS_ADDRESS_SANITIZER
|
||||
if (caller_will_initialize_memory == CallerWillInitializeMemory::No) {
|
||||
if constexpr (HEAP_SCRUB_BYTE_ALLOC != 0) {
|
||||
__builtin_memset(ptr, HEAP_SCRUB_BYTE_ALLOC, (chunks_needed * CHUNK_SIZE) - sizeof(AllocationHeader));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
VERIFY((FlatPtr)ptr % alignment == 0);
|
||||
return ptr;
|
||||
|
@ -137,9 +144,13 @@ public:
|
|||
VERIFY(m_allocated_chunks >= a->allocation_size_in_chunks);
|
||||
m_allocated_chunks -= a->allocation_size_in_chunks;
|
||||
|
||||
#ifdef HAS_ADDRESS_SANITIZER
|
||||
AddressSanitizer::fill_shadow((FlatPtr)a, a->allocation_size_in_chunks * CHUNK_SIZE, AddressSanitizer::ShadowType::Free);
|
||||
#else
|
||||
if constexpr (HEAP_SCRUB_BYTE_FREE != 0) {
|
||||
__builtin_memset(a, HEAP_SCRUB_BYTE_FREE, a->allocation_size_in_chunks * CHUNK_SIZE);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool contains(void const* ptr) const
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <Kernel/Locking/Spinlock.h>
|
||||
#include <Kernel/Memory/MemoryManager.h>
|
||||
#include <Kernel/Sections.h>
|
||||
#include <Kernel/Security/AddressSanitizer.h>
|
||||
#include <Kernel/Tasks/PerformanceManager.h>
|
||||
|
||||
#if ARCH(X86_64) || ARCH(AARCH64) || ARCH(RISCV64)
|
||||
|
@ -65,11 +66,18 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
void* allocate()
|
||||
void* allocate([[maybe_unused]] size_t requested_size)
|
||||
{
|
||||
VERIFY(m_freelist);
|
||||
++m_allocated_slabs;
|
||||
return exchange(m_freelist, m_freelist->next);
|
||||
#ifdef HAS_ADDRESS_SANITIZER
|
||||
AddressSanitizer::fill_shadow((FlatPtr)m_freelist, sizeof(FreelistEntry::next), Kernel::AddressSanitizer::ShadowType::Unpoisoned8Bytes);
|
||||
#endif
|
||||
auto* ptr = exchange(m_freelist, m_freelist->next);
|
||||
#ifdef HAS_ADDRESS_SANITIZER
|
||||
AddressSanitizer::mark_region((FlatPtr)ptr, requested_size, m_slab_size, AddressSanitizer::ShadowType::Malloc);
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void deallocate(void* ptr)
|
||||
|
@ -77,7 +85,13 @@ public:
|
|||
VERIFY(ptr >= &m_data && ptr < ((u8*)this + block_size));
|
||||
--m_allocated_slabs;
|
||||
auto* freelist_entry = (FreelistEntry*)ptr;
|
||||
#ifdef HAS_ADDRESS_SANITIZER
|
||||
AddressSanitizer::fill_shadow((FlatPtr)freelist_entry, sizeof(FreelistEntry::next), Kernel::AddressSanitizer::ShadowType::Unpoisoned8Bytes);
|
||||
#endif
|
||||
freelist_entry->next = m_freelist;
|
||||
#ifdef HAS_ADDRESS_SANITIZER
|
||||
AddressSanitizer::fill_shadow((FlatPtr)freelist_entry, m_slab_size, AddressSanitizer::ShadowType::Free);
|
||||
#endif
|
||||
m_freelist = freelist_entry;
|
||||
}
|
||||
|
||||
|
@ -122,7 +136,7 @@ public:
|
|||
|
||||
size_t slab_size() const { return m_slab_size; }
|
||||
|
||||
void* allocate(CallerWillInitializeMemory caller_will_initialize_memory)
|
||||
void* allocate(size_t requested_size, [[maybe_unused]] CallerWillInitializeMemory caller_will_initialize_memory)
|
||||
{
|
||||
if (m_usable_blocks.is_empty()) {
|
||||
// FIXME: This allocation wastes `block_size` bytes due to the implementation of kmalloc_aligned().
|
||||
|
@ -136,19 +150,23 @@ public:
|
|||
m_usable_blocks.append(*block);
|
||||
}
|
||||
auto* block = m_usable_blocks.first();
|
||||
auto* ptr = block->allocate();
|
||||
auto* ptr = block->allocate(requested_size);
|
||||
if (block->is_full())
|
||||
m_full_blocks.append(*block);
|
||||
|
||||
#ifndef HAS_ADDRESS_SANITIZER
|
||||
if (caller_will_initialize_memory == CallerWillInitializeMemory::No) {
|
||||
memset(ptr, KMALLOC_SCRUB_BYTE, m_slab_size);
|
||||
}
|
||||
#endif
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void deallocate(void* ptr)
|
||||
{
|
||||
#ifndef HAS_ADDRESS_SANITIZER
|
||||
memset(ptr, KFREE_SCRUB_BYTE, m_slab_size);
|
||||
#endif
|
||||
|
||||
auto* block = (KmallocSlabBlock*)((FlatPtr)ptr & KmallocSlabBlock::block_mask);
|
||||
bool block_was_full = block->is_full();
|
||||
|
@ -227,7 +245,7 @@ struct KmallocGlobalData {
|
|||
|
||||
for (auto& slabheap : slabheaps) {
|
||||
if (size <= slabheap.slab_size() && alignment <= slabheap.slab_size())
|
||||
return slabheap.allocate(caller_will_initialize_memory);
|
||||
return slabheap.allocate(size, caller_will_initialize_memory);
|
||||
}
|
||||
|
||||
for (auto& subheap : subheaps) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue