mirror of
https://github.com/RGBCube/serenity
synced 2025-07-25 14:17:36 +00:00
LibC: Switch ChunkedBlock to IntrusiveList from InlineLinkedList
This commit is contained in:
parent
48da8a568d
commit
e37f39d980
2 changed files with 21 additions and 22 deletions
|
@ -5,7 +5,6 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <AK/Debug.h>
|
#include <AK/Debug.h>
|
||||||
#include <AK/InlineLinkedList.h>
|
|
||||||
#include <AK/ScopedValueRollback.h>
|
#include <AK/ScopedValueRollback.h>
|
||||||
#include <AK/Vector.h>
|
#include <AK/Vector.h>
|
||||||
#include <LibELF/AuxiliaryVector.h>
|
#include <LibELF/AuxiliaryVector.h>
|
||||||
|
@ -100,8 +99,8 @@ static ChunkedBlock* s_cold_empty_blocks[number_of_cold_chunked_blocks_to_keep_a
|
||||||
struct Allocator {
|
struct Allocator {
|
||||||
size_t size { 0 };
|
size_t size { 0 };
|
||||||
size_t block_count { 0 };
|
size_t block_count { 0 };
|
||||||
InlineLinkedList<ChunkedBlock> usable_blocks;
|
ChunkedBlock::List usable_blocks;
|
||||||
InlineLinkedList<ChunkedBlock> full_blocks;
|
ChunkedBlock::List full_blocks;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct BigAllocator {
|
struct BigAllocator {
|
||||||
|
@ -221,11 +220,12 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
|
||||||
}
|
}
|
||||||
|
|
||||||
ChunkedBlock* block = nullptr;
|
ChunkedBlock* block = nullptr;
|
||||||
|
for (auto& current : allocator->usable_blocks) {
|
||||||
for (block = allocator->usable_blocks.head(); block; block = block->next()) {
|
if (current.free_chunks()) {
|
||||||
if (block->free_chunks())
|
block = ¤t;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!block && s_hot_empty_block_count) {
|
if (!block && s_hot_empty_block_count) {
|
||||||
g_malloc_stats.number_of_hot_empty_block_hits++;
|
g_malloc_stats.number_of_hot_empty_block_hits++;
|
||||||
|
@ -237,7 +237,7 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
|
||||||
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
|
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
|
||||||
set_mmap_name(block, ChunkedBlock::block_size, buffer);
|
set_mmap_name(block, ChunkedBlock::block_size, buffer);
|
||||||
}
|
}
|
||||||
allocator->usable_blocks.append(block);
|
allocator->usable_blocks.append(*block);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!block && s_cold_empty_block_count) {
|
if (!block && s_cold_empty_block_count) {
|
||||||
|
@ -260,7 +260,7 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
|
||||||
new (block) ChunkedBlock(good_size);
|
new (block) ChunkedBlock(good_size);
|
||||||
ue_notify_chunk_size_changed(block, good_size);
|
ue_notify_chunk_size_changed(block, good_size);
|
||||||
}
|
}
|
||||||
allocator->usable_blocks.append(block);
|
allocator->usable_blocks.append(*block);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!block) {
|
if (!block) {
|
||||||
|
@ -269,7 +269,7 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
|
||||||
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
|
snprintf(buffer, sizeof(buffer), "malloc: ChunkedBlock(%zu)", good_size);
|
||||||
block = (ChunkedBlock*)os_alloc(ChunkedBlock::block_size, buffer);
|
block = (ChunkedBlock*)os_alloc(ChunkedBlock::block_size, buffer);
|
||||||
new (block) ChunkedBlock(good_size);
|
new (block) ChunkedBlock(good_size);
|
||||||
allocator->usable_blocks.append(block);
|
allocator->usable_blocks.append(*block);
|
||||||
++allocator->block_count;
|
++allocator->block_count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -285,8 +285,8 @@ static void* malloc_impl(size_t size, CallerWillInitializeMemory caller_will_ini
|
||||||
if (block->is_full()) {
|
if (block->is_full()) {
|
||||||
g_malloc_stats.number_of_blocks_full++;
|
g_malloc_stats.number_of_blocks_full++;
|
||||||
dbgln_if(MALLOC_DEBUG, "Block {:p} is now full in size class {}", block, good_size);
|
dbgln_if(MALLOC_DEBUG, "Block {:p} is now full in size class {}", block, good_size);
|
||||||
allocator->usable_blocks.remove(block);
|
allocator->usable_blocks.remove(*block);
|
||||||
allocator->full_blocks.append(block);
|
allocator->full_blocks.append(*block);
|
||||||
}
|
}
|
||||||
dbgln_if(MALLOC_DEBUG, "LibC: allocated {:p} (chunk in block {:p}, size {})", ptr, block, block->bytes_per_chunk());
|
dbgln_if(MALLOC_DEBUG, "LibC: allocated {:p} (chunk in block {:p}, size {})", ptr, block, block->bytes_per_chunk());
|
||||||
|
|
||||||
|
@ -353,8 +353,8 @@ static void free_impl(void* ptr)
|
||||||
auto* allocator = allocator_for_size(block->m_size, good_size);
|
auto* allocator = allocator_for_size(block->m_size, good_size);
|
||||||
dbgln_if(MALLOC_DEBUG, "Block {:p} no longer full in size class {}", block, good_size);
|
dbgln_if(MALLOC_DEBUG, "Block {:p} no longer full in size class {}", block, good_size);
|
||||||
g_malloc_stats.number_of_freed_full_blocks++;
|
g_malloc_stats.number_of_freed_full_blocks++;
|
||||||
allocator->full_blocks.remove(block);
|
allocator->full_blocks.remove(*block);
|
||||||
allocator->usable_blocks.prepend(block);
|
allocator->usable_blocks.prepend(*block);
|
||||||
}
|
}
|
||||||
|
|
||||||
++block->m_free_chunks;
|
++block->m_free_chunks;
|
||||||
|
@ -365,14 +365,14 @@ static void free_impl(void* ptr)
|
||||||
if (s_hot_empty_block_count < number_of_hot_chunked_blocks_to_keep_around) {
|
if (s_hot_empty_block_count < number_of_hot_chunked_blocks_to_keep_around) {
|
||||||
dbgln_if(MALLOC_DEBUG, "Keeping hot block {:p} around", block);
|
dbgln_if(MALLOC_DEBUG, "Keeping hot block {:p} around", block);
|
||||||
g_malloc_stats.number_of_hot_keeps++;
|
g_malloc_stats.number_of_hot_keeps++;
|
||||||
allocator->usable_blocks.remove(block);
|
allocator->usable_blocks.remove(*block);
|
||||||
s_hot_empty_blocks[s_hot_empty_block_count++] = block;
|
s_hot_empty_blocks[s_hot_empty_block_count++] = block;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (s_cold_empty_block_count < number_of_cold_chunked_blocks_to_keep_around) {
|
if (s_cold_empty_block_count < number_of_cold_chunked_blocks_to_keep_around) {
|
||||||
dbgln_if(MALLOC_DEBUG, "Keeping cold block {:p} around", block);
|
dbgln_if(MALLOC_DEBUG, "Keeping cold block {:p} around", block);
|
||||||
g_malloc_stats.number_of_cold_keeps++;
|
g_malloc_stats.number_of_cold_keeps++;
|
||||||
allocator->usable_blocks.remove(block);
|
allocator->usable_blocks.remove(*block);
|
||||||
s_cold_empty_blocks[s_cold_empty_block_count++] = block;
|
s_cold_empty_blocks[s_cold_empty_block_count++] = block;
|
||||||
mprotect(block, ChunkedBlock::block_size, PROT_NONE);
|
mprotect(block, ChunkedBlock::block_size, PROT_NONE);
|
||||||
madvise(block, ChunkedBlock::block_size, MADV_SET_VOLATILE);
|
madvise(block, ChunkedBlock::block_size, MADV_SET_VOLATILE);
|
||||||
|
@ -380,7 +380,7 @@ static void free_impl(void* ptr)
|
||||||
}
|
}
|
||||||
dbgln_if(MALLOC_DEBUG, "Releasing block {:p} for size class {}", block, good_size);
|
dbgln_if(MALLOC_DEBUG, "Releasing block {:p} for size class {}", block, good_size);
|
||||||
g_malloc_stats.number_of_frees++;
|
g_malloc_stats.number_of_frees++;
|
||||||
allocator->usable_blocks.remove(block);
|
allocator->usable_blocks.remove(*block);
|
||||||
--allocator->block_count;
|
--allocator->block_count;
|
||||||
os_free(block, ChunkedBlock::block_size);
|
os_free(block, ChunkedBlock::block_size);
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <AK/InlineLinkedList.h>
|
#include <AK/IntrusiveList.h>
|
||||||
#include <AK/Types.h>
|
#include <AK/Types.h>
|
||||||
|
|
||||||
#define MAGIC_PAGE_HEADER 0x42657274 // 'Bert'
|
#define MAGIC_PAGE_HEADER 0x42657274 // 'Bert'
|
||||||
|
@ -45,9 +45,7 @@ struct FreelistEntry {
|
||||||
FreelistEntry* next;
|
FreelistEntry* next;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ChunkedBlock
|
struct ChunkedBlock : public CommonHeader {
|
||||||
: public CommonHeader
|
|
||||||
, public InlineLinkedListNode<ChunkedBlock> {
|
|
||||||
|
|
||||||
static constexpr size_t block_size = 64 * KiB;
|
static constexpr size_t block_size = 64 * KiB;
|
||||||
static constexpr size_t block_mask = ~(block_size - 1);
|
static constexpr size_t block_mask = ~(block_size - 1);
|
||||||
|
@ -59,8 +57,7 @@ struct ChunkedBlock
|
||||||
m_free_chunks = chunk_capacity();
|
m_free_chunks = chunk_capacity();
|
||||||
}
|
}
|
||||||
|
|
||||||
ChunkedBlock* m_prev { nullptr };
|
IntrusiveListNode<ChunkedBlock> m_list_node;
|
||||||
ChunkedBlock* m_next { nullptr };
|
|
||||||
size_t m_next_lazy_freelist_index { 0 };
|
size_t m_next_lazy_freelist_index { 0 };
|
||||||
FreelistEntry* m_freelist { nullptr };
|
FreelistEntry* m_freelist { nullptr };
|
||||||
size_t m_free_chunks { 0 };
|
size_t m_free_chunks { 0 };
|
||||||
|
@ -75,4 +72,6 @@ struct ChunkedBlock
|
||||||
size_t free_chunks() const { return m_free_chunks; }
|
size_t free_chunks() const { return m_free_chunks; }
|
||||||
size_t used_chunks() const { return chunk_capacity() - m_free_chunks; }
|
size_t used_chunks() const { return chunk_capacity() - m_free_chunks; }
|
||||||
size_t chunk_capacity() const { return (block_size - sizeof(ChunkedBlock)) / m_size; }
|
size_t chunk_capacity() const { return (block_size - sizeof(ChunkedBlock)) / m_size; }
|
||||||
|
|
||||||
|
using List = IntrusiveList<ChunkedBlock, RawPtr<ChunkedBlock>, &ChunkedBlock::m_list_node>;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue