mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 03:37:45 +00:00
AK: Make BumpAllocator work in multi-threaded environments
Fixes #10578.
This commit is contained in:
parent
2c901ae2be
commit
03526a7f2b
1 changed files with 19 additions and 12 deletions
|
@ -6,6 +6,7 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <AK/Atomic.h>
|
||||||
#include <AK/StdLibExtras.h>
|
#include <AK/StdLibExtras.h>
|
||||||
#include <AK/Types.h>
|
#include <AK/Types.h>
|
||||||
#include <AK/kmalloc.h>
|
#include <AK/kmalloc.h>
|
||||||
|
@ -55,14 +56,22 @@ public:
|
||||||
{
|
{
|
||||||
if (!m_head_chunk)
|
if (!m_head_chunk)
|
||||||
return;
|
return;
|
||||||
for_each_chunk([this](auto chunk) {
|
// Note that 'cache_filled' is just an educated guess, and we don't rely on it.
|
||||||
if (!s_unused_allocation_cache) {
|
// If we determine 'cache_filled=true' and the cache becomes empty in the meantime,
|
||||||
auto next_chunk = ((ChunkHeader const*)chunk)->next_chunk;
|
// then we haven't lost much; it was a close call anyway.
|
||||||
if (!next_chunk) {
|
// If we determine 'cache_filled=false' and the cache becomes full in the meantime,
|
||||||
s_unused_allocation_cache = chunk;
|
// then we'll end up with a different chunk to munmap(), no big difference.
|
||||||
|
bool cache_filled = s_unused_allocation_cache.load(MemoryOrder::memory_order_relaxed);
|
||||||
|
for_each_chunk([&](auto chunk) {
|
||||||
|
if (!cache_filled) {
|
||||||
|
cache_filled = true;
|
||||||
|
((ChunkHeader*)chunk)->next_chunk = 0;
|
||||||
|
chunk = s_unused_allocation_cache.exchange(chunk);
|
||||||
|
if (!chunk)
|
||||||
return;
|
return;
|
||||||
}
|
// The cache got filled in the meantime. Oh well, we have to call munmap() anyway.
|
||||||
}
|
}
|
||||||
|
|
||||||
if constexpr (use_mmap) {
|
if constexpr (use_mmap) {
|
||||||
munmap((void*)chunk, m_chunk_size);
|
munmap((void*)chunk, m_chunk_size);
|
||||||
} else {
|
} else {
|
||||||
|
@ -91,10 +100,8 @@ protected:
|
||||||
{
|
{
|
||||||
// dbgln("Allocated {} entries in previous chunk and have {} unusable bytes", m_allocations_in_previous_chunk, m_chunk_size - m_byte_offset_into_current_chunk);
|
// dbgln("Allocated {} entries in previous chunk and have {} unusable bytes", m_allocations_in_previous_chunk, m_chunk_size - m_byte_offset_into_current_chunk);
|
||||||
// m_allocations_in_previous_chunk = 0;
|
// m_allocations_in_previous_chunk = 0;
|
||||||
void* new_chunk;
|
void* new_chunk = (void*)s_unused_allocation_cache.exchange(0);
|
||||||
if (s_unused_allocation_cache) {
|
if (!new_chunk) {
|
||||||
new_chunk = (void*)exchange(s_unused_allocation_cache, 0);
|
|
||||||
} else {
|
|
||||||
if constexpr (use_mmap) {
|
if constexpr (use_mmap) {
|
||||||
#ifdef __serenity__
|
#ifdef __serenity__
|
||||||
new_chunk = serenity_mmap(nullptr, m_chunk_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, m_chunk_size, "BumpAllocator Chunk");
|
new_chunk = serenity_mmap(nullptr, m_chunk_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_RANDOMIZED | MAP_PRIVATE, 0, 0, m_chunk_size, "BumpAllocator Chunk");
|
||||||
|
@ -140,7 +147,7 @@ protected:
|
||||||
FlatPtr m_current_chunk { 0 };
|
FlatPtr m_current_chunk { 0 };
|
||||||
size_t m_byte_offset_into_current_chunk { 0 };
|
size_t m_byte_offset_into_current_chunk { 0 };
|
||||||
size_t m_chunk_size { 0 };
|
size_t m_chunk_size { 0 };
|
||||||
static FlatPtr s_unused_allocation_cache;
|
static Atomic<FlatPtr> s_unused_allocation_cache;
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename T, bool use_mmap = false, size_t chunk_size = use_mmap ? 4 * MiB : 4 * KiB>
|
template<typename T, bool use_mmap = false, size_t chunk_size = use_mmap ? 4 * MiB : 4 * KiB>
|
||||||
|
@ -186,7 +193,7 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
template<bool use_mmap, size_t size>
|
template<bool use_mmap, size_t size>
|
||||||
inline FlatPtr BumpAllocator<use_mmap, size>::s_unused_allocation_cache { 0 };
|
inline Atomic<FlatPtr> BumpAllocator<use_mmap, size>::s_unused_allocation_cache { 0 };
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue