mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 04:37:34 +00:00
LibJS: Never give back virtual memory once it belongs to a cell type
Instead of returning HeapBlock memory to the kernel (or a non-type specific shared cache), we now keep a BlockAllocator per CellAllocator and implement "deallocation" by basically informing the kernel that we don't need the physical memory right now. This is done with MADV_FREE or MADV_DONTNEED if available, but for other platforms (including SerenityOS) we munmap and then re-mmap the memory to achieve the same effect. It's definitely clunky, so I've added a FIXME about implementing the madvise options on SerenityOS too. The important outcome of this change is that GC types that use a type-specific allocator become immune to use-after-free type confusion attacks, since their virtual addresses will only ever be re-used for the same exact type again and again. Fixes #22274
This commit is contained in:
parent
bcb1e548f1
commit
b6d4eea7ac
10 changed files with 47 additions and 36 deletions
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
|
||||
* Copyright (c) 2021-2023, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
@ -15,20 +15,24 @@
|
|||
# include <sanitizer/asan_interface.h>
|
||||
#endif
|
||||
|
||||
// FIXME: Implement MADV_FREE and/or MADV_DONTNEED on SerenityOS.
|
||||
#if defined(AK_OS_SERENITY) || (!defined(MADV_FREE) && !defined(MADV_DONTNEED))
|
||||
# define USE_FALLBACK_BLOCK_DEALLOCATION
|
||||
#endif
|
||||
|
||||
namespace JS {
|
||||
|
||||
// NOTE: If this changes, we need to update the mmap() code to ensure correct alignment.
|
||||
static_assert(HeapBlock::block_size == 4096);
|
||||
|
||||
BlockAllocator::~BlockAllocator()
|
||||
{
|
||||
for (auto* block : m_blocks) {
|
||||
ASAN_UNPOISON_MEMORY_REGION(block, HeapBlock::block_size);
|
||||
#ifdef AK_OS_SERENITY
|
||||
if (munmap(block, HeapBlock::block_size) < 0) {
|
||||
perror("munmap");
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
#else
|
||||
free(block);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -50,28 +54,38 @@ void* BlockAllocator::allocate_block([[maybe_unused]] char const* name)
|
|||
|
||||
#ifdef AK_OS_SERENITY
|
||||
auto* block = (HeapBlock*)serenity_mmap(nullptr, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, HeapBlock::block_size, name);
|
||||
VERIFY(block != MAP_FAILED);
|
||||
#else
|
||||
auto* block = (HeapBlock*)aligned_alloc(HeapBlock::block_size, HeapBlock::block_size);
|
||||
VERIFY(block);
|
||||
auto* block = (HeapBlock*)mmap(nullptr, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0);
|
||||
#endif
|
||||
VERIFY(block != MAP_FAILED);
|
||||
return block;
|
||||
}
|
||||
|
||||
void BlockAllocator::deallocate_block(void* block)
|
||||
{
|
||||
VERIFY(block);
|
||||
if (m_blocks.size() >= max_cached_blocks) {
|
||||
#ifdef AK_OS_SERENITY
|
||||
if (munmap(block, HeapBlock::block_size) < 0) {
|
||||
perror("munmap");
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
#else
|
||||
free(block);
|
||||
#endif
|
||||
return;
|
||||
|
||||
#if defined(USE_FALLBACK_BLOCK_DEALLOCATION)
|
||||
// If we can't use any of the nicer techniques, unmap and remap the block to return the physical pages while keeping the VM.
|
||||
if (munmap(block, HeapBlock::block_size) < 0) {
|
||||
perror("munmap");
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
if (mmap(block, HeapBlock::block_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0) != block) {
|
||||
perror("mmap");
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
#elif defined(MADV_FREE)
|
||||
if (madvise(block, HeapBlock::block_size, MADV_FREE) < 0) {
|
||||
perror("madvise(MADV_FREE)");
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
#elif defined(MADV_DONTNEED)
|
||||
if (madvise(block, HeapBlock::block_size, MADV_DONTNEED) < 0) {
|
||||
perror("madvise(MADV_DONTNEED)");
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
#endif
|
||||
|
||||
ASAN_POISON_MEMORY_REGION(block, HeapBlock::block_size);
|
||||
m_blocks.append(block);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue