mirror of
https://github.com/RGBCube/serenity
synced 2025-07-26 02:27:43 +00:00
LibC: Protect empty-but-kept-around ChunkedBlocks with PROT_NONE
We now keep a separate queue of empty ChunkedBlocks in each allocator. The underlying memory for each block is mprotect'ed with PROT_NONE to provoke crashes on use-after-free. This is not going to catch *all* use-after-frees, but if it catches some, that's still pretty nice. :^) The malloc memory region names are now updated to reflect their reuse status: "malloc: ChunkedBlock(size) (free/reused)"
This commit is contained in:
parent
61f298faf3
commit
ddd5411472
1 changed files with 22 additions and 7 deletions
|
@ -89,6 +89,7 @@ struct ChunkedBlock : public CommonHeader
|
||||||
struct Allocator {
|
struct Allocator {
|
||||||
size_t size { 0 };
|
size_t size { 0 };
|
||||||
size_t block_count { 0 };
|
size_t block_count { 0 };
|
||||||
|
ChunkedBlock* empty_block_queue { nullptr };
|
||||||
InlineLinkedList<ChunkedBlock> usable_blocks;
|
InlineLinkedList<ChunkedBlock> usable_blocks;
|
||||||
InlineLinkedList<ChunkedBlock> full_blocks;
|
InlineLinkedList<ChunkedBlock> full_blocks;
|
||||||
};
|
};
|
||||||
|
@ -176,6 +177,20 @@ void* malloc(size_t size)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!block && allocator->empty_block_queue) {
|
||||||
|
block = allocator->empty_block_queue;
|
||||||
|
int rc = mprotect(block, PAGE_SIZE, PROT_READ | PROT_WRITE);
|
||||||
|
if (rc < 0) {
|
||||||
|
perror("mprotect");
|
||||||
|
ASSERT_NOT_REACHED();
|
||||||
|
}
|
||||||
|
allocator->empty_block_queue = block->m_next;
|
||||||
|
char buffer[64];
|
||||||
|
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu) (reused)", good_size);
|
||||||
|
set_mmap_name(block, PAGE_SIZE, buffer);
|
||||||
|
allocator->usable_blocks.append(block);
|
||||||
|
}
|
||||||
|
|
||||||
if (!block) {
|
if (!block) {
|
||||||
char buffer[64];
|
char buffer[64];
|
||||||
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
|
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
|
||||||
|
@ -262,13 +277,13 @@ void free(void* ptr)
|
||||||
#ifdef MALLOC_DEBUG
|
#ifdef MALLOC_DEBUG
|
||||||
dbgprintf("Keeping block %p around for size class %u\n", block, good_size);
|
dbgprintf("Keeping block %p around for size class %u\n", block, good_size);
|
||||||
#endif
|
#endif
|
||||||
if (allocator->usable_blocks.tail() != block) {
|
allocator->usable_blocks.remove(block);
|
||||||
#ifdef MALLOC_DEBUG
|
block->m_next = allocator->empty_block_queue;
|
||||||
dbgprintf("Moving block %p to tail of list for size class %u\n", block, good_size);
|
allocator->empty_block_queue = block;
|
||||||
#endif
|
char buffer[64];
|
||||||
allocator->usable_blocks.remove(block);
|
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu) (free)", good_size);
|
||||||
allocator->usable_blocks.append(block);
|
set_mmap_name(block, PAGE_SIZE, buffer);
|
||||||
}
|
mprotect(block, PAGE_SIZE, PROT_NONE);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#ifdef MALLOC_DEBUG
|
#ifdef MALLOC_DEBUG
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue