mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 11:58:12 +00:00
LibC: Also mark empty-but-kept-around BigAllocationBlocks as PROT_NONE
This extends the opportunistic protection of empty-but-kept-around to also cover BigAllocationBlocks. Since we only cache 4KB BAB's at the moment, this sees limited use, but it does work.
This commit is contained in:
parent
05c65fb4f1
commit
d422c46ebb
1 changed files with 10 additions and 0 deletions
|
@ -161,6 +161,11 @@ void* malloc(size_t size)
|
|||
if (auto* allocator = big_allocator_for_size(real_size)) {
|
||||
if (!allocator->blocks.is_empty()) {
|
||||
auto* block = allocator->blocks.take_last();
|
||||
if (mprotect(block, real_size, PROT_READ | PROT_WRITE) < 0) {
|
||||
perror("mprotect");
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
set_mmap_name(block, PAGE_SIZE, "malloc: BigAllocationBlock (reused)");
|
||||
return &block->m_slot[0];
|
||||
}
|
||||
}
|
||||
|
@ -236,6 +241,11 @@ void free(void* ptr)
|
|||
if (auto* allocator = big_allocator_for_size(block->m_size)) {
|
||||
if (allocator->blocks.size() < number_of_big_blocks_to_keep_around_per_size_class) {
|
||||
allocator->blocks.append(block);
|
||||
set_mmap_name(block, PAGE_SIZE, "malloc: BigAllocationBlock (free)");
|
||||
if (mprotect(block, PAGE_SIZE, PROT_NONE) < 0) {
|
||||
perror("mprotect");
|
||||
ASSERT_NOT_REACHED();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue