mirror of
https://github.com/RGBCube/serenity
synced 2025-05-31 08:08:12 +00:00
LibC: Simplify malloc size classes
Problem: - `size_classes` is a C-style array which makes it difficult to use in algorithms. - `all_of` algorithm is re-written for the specific implementation. Solution: - Change `size_classes` to be an `Array`. - Directly use the generic `all_of` algorithm instead of reimplementing.
This commit is contained in:
parent
c2ae6c189e
commit
f91bcb8895
3 changed files with 12 additions and 18 deletions
|
@ -68,7 +68,7 @@ void MallocTracer::target_did_malloc(Badge<Emulator>, FlatPtr address, size_t si
|
||||||
}));
|
}));
|
||||||
auto& malloc_data = *mmap_region.malloc_metadata();
|
auto& malloc_data = *mmap_region.malloc_metadata();
|
||||||
|
|
||||||
bool is_chunked_block = malloc_data.chunk_size <= size_classes[num_size_classes - 1];
|
bool is_chunked_block = malloc_data.chunk_size <= size_classes[size_classes.size() - 1];
|
||||||
if (is_chunked_block)
|
if (is_chunked_block)
|
||||||
malloc_data.mallocations.resize((ChunkedBlock::block_size - sizeof(ChunkedBlock)) / malloc_data.chunk_size);
|
malloc_data.mallocations.resize((ChunkedBlock::block_size - sizeof(ChunkedBlock)) / malloc_data.chunk_size);
|
||||||
else
|
else
|
||||||
|
@ -92,7 +92,7 @@ ALWAYS_INLINE Mallocation* MallocRegionMetadata::mallocation_for_address(FlatPtr
|
||||||
|
|
||||||
ALWAYS_INLINE Optional<size_t> MallocRegionMetadata::chunk_index_for_address(FlatPtr address) const
|
ALWAYS_INLINE Optional<size_t> MallocRegionMetadata::chunk_index_for_address(FlatPtr address) const
|
||||||
{
|
{
|
||||||
bool is_chunked_block = chunk_size <= size_classes[num_size_classes - 1];
|
bool is_chunked_block = chunk_size <= size_classes[size_classes.size() - 1];
|
||||||
if (!is_chunked_block) {
|
if (!is_chunked_block) {
|
||||||
// This is a BigAllocationBlock
|
// This is a BigAllocationBlock
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -102,12 +102,12 @@ struct BigAllocator {
|
||||||
// are run. Similarly, we can not allow global destructors to destruct
|
// are run. Similarly, we can not allow global destructors to destruct
|
||||||
// them. We could have used AK::NeverDestoyed to prevent the latter,
|
// them. We could have used AK::NeverDestoyed to prevent the latter,
|
||||||
// but it would have not helped with the former.
|
// but it would have not helped with the former.
|
||||||
static u8 g_allocators_storage[sizeof(Allocator) * num_size_classes];
|
static u8 g_allocators_storage[sizeof(Allocator) * size_classes.size()];
|
||||||
static u8 g_big_allocators_storage[sizeof(BigAllocator)];
|
static u8 g_big_allocators_storage[sizeof(BigAllocator)];
|
||||||
|
|
||||||
static inline Allocator (&allocators())[num_size_classes]
|
static inline Allocator (&allocators())[size_classes.size()]
|
||||||
{
|
{
|
||||||
return reinterpret_cast<Allocator(&)[num_size_classes]>(g_allocators_storage);
|
return reinterpret_cast<Allocator(&)[size_classes.size()]>(g_allocators_storage);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline BigAllocator (&big_allocators())[1]
|
static inline BigAllocator (&big_allocators())[1]
|
||||||
|
@ -442,7 +442,7 @@ void __malloc_init()
|
||||||
if (secure_getenv("LIBC_PROFILE_MALLOC"))
|
if (secure_getenv("LIBC_PROFILE_MALLOC"))
|
||||||
s_profiling = true;
|
s_profiling = true;
|
||||||
|
|
||||||
for (size_t i = 0; i < num_size_classes; ++i) {
|
for (size_t i = 0; i < size_classes.size(); ++i) {
|
||||||
new (&allocators()[i]) Allocator();
|
new (&allocators()[i]) Allocator();
|
||||||
allocators()[i].size = size_classes[i];
|
allocators()[i].size = size_classes[i];
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,8 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <AK/AllOf.h>
|
||||||
|
#include <AK/Array.h>
|
||||||
#include <AK/InlineLinkedList.h>
|
#include <AK/InlineLinkedList.h>
|
||||||
#include <AK/Types.h>
|
#include <AK/Types.h>
|
||||||
|
|
||||||
|
@ -14,18 +16,10 @@
|
||||||
#define MALLOC_SCRUB_BYTE 0xdc
|
#define MALLOC_SCRUB_BYTE 0xdc
|
||||||
#define FREE_SCRUB_BYTE 0xed
|
#define FREE_SCRUB_BYTE 0xed
|
||||||
|
|
||||||
static constexpr unsigned short size_classes[] = { 8, 16, 32, 64, 128, 256, 504, 1016, 2032, 4088, 8184, 16376, 32752, 0 };
|
static constexpr Array<unsigned short, 13> size_classes { 8, 16, 32, 64, 128, 256, 504, 1016, 2032, 4088, 8184, 16376, 32752 };
|
||||||
static constexpr size_t num_size_classes = (sizeof(size_classes) / sizeof(unsigned short)) - 1;
|
static constexpr auto malloc_alignment = 8;
|
||||||
|
static_assert(all_of(size_classes.begin(), size_classes.end(),
|
||||||
consteval bool check_size_classes_alignment()
|
[](const auto val) { return val % malloc_alignment == 0; }));
|
||||||
{
|
|
||||||
for (size_t i = 0; i < num_size_classes; i++) {
|
|
||||||
if ((size_classes[i] % 8) != 0)
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
static_assert(check_size_classes_alignment());
|
|
||||||
|
|
||||||
struct CommonHeader {
|
struct CommonHeader {
|
||||||
size_t m_magic;
|
size_t m_magic;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue