1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 01:47:35 +00:00

Kernel: Make kmalloc heap expansion kmalloc-free

Previously, the heap expansion logic could end up calling kmalloc
recursively, which was quite messy and hard to reason about.

This patch redesigns heap expansion so that it's kmalloc-free:

- We make a single large virtual range allocation at startup
- When expanding, we bump allocate VM from that region
- When expanding, we populate page tables directly ourselves,
  instead of going via MemoryManager.

This makes heap expansion a great deal simpler. However, do note that it
introduces two new flaws that we'll need to deal with eventually:

- The single virtual range allocation is limited to 64 MiB and once
  exhausted, kmalloc() will fail. (Actually, it will PANIC for now..)

- The kmalloc heap can no longer shrink once expanded. Subheaps stay
  in place once constructed.
This commit is contained in:
Andreas Kling 2021-12-25 17:23:18 +01:00
parent 1a35e27490
commit f7a4c34929
3 changed files with 140 additions and 370 deletions

View file

@ -144,214 +144,4 @@ private:
Bitmap m_bitmap;
};
template<typename ExpandHeap>
struct ExpandableHeapTraits {
static bool add_memory(ExpandHeap& expand, size_t allocation_request)
{
return expand.add_memory(allocation_request);
}
static bool remove_memory(ExpandHeap& expand, void* memory)
{
return expand.remove_memory(memory);
}
};
struct DefaultExpandHeap {
bool add_memory(size_t)
{
// Requires explicit implementation
return false;
}
bool remove_memory(void*)
{
return false;
}
};
template<size_t CHUNK_SIZE, unsigned HEAP_SCRUB_BYTE_ALLOC = 0, unsigned HEAP_SCRUB_BYTE_FREE = 0, typename ExpandHeap = DefaultExpandHeap>
class ExpandableHeap {
AK_MAKE_NONCOPYABLE(ExpandableHeap);
AK_MAKE_NONMOVABLE(ExpandableHeap);
public:
using ExpandHeapType = ExpandHeap;
using HeapType = Heap<CHUNK_SIZE, HEAP_SCRUB_BYTE_ALLOC, HEAP_SCRUB_BYTE_FREE>;
struct SubHeap {
HeapType heap;
SubHeap* next { nullptr };
size_t memory_size { 0 };
template<typename... Args>
SubHeap(size_t memory_size, Args&&... args)
: heap(forward<Args>(args)...)
, memory_size(memory_size)
{
}
};
ExpandableHeap(u8* memory, size_t memory_size, const ExpandHeapType& expand = ExpandHeapType())
: m_heaps(memory_size, memory, memory_size)
, m_expand(expand)
{
}
~ExpandableHeap()
{
// We don't own the main heap, only remove memory that we added previously
SubHeap* next;
for (auto* heap = m_heaps.next; heap; heap = next) {
next = heap->next;
heap->~SubHeap();
ExpandableHeapTraits<ExpandHeap>::remove_memory(m_expand, (void*)heap);
}
}
static size_t calculate_memory_for_bytes(size_t bytes)
{
return sizeof(SubHeap) + HeapType::calculate_memory_for_bytes(bytes);
}
bool expand_memory(size_t size)
{
if (m_expanding)
return false;
// Allocating more memory itself may trigger allocations and deallocations
// on this heap. We need to prevent recursive expansion. We also disable
// removing memory while trying to expand the heap.
TemporaryChange change(m_expanding, true);
return ExpandableHeapTraits<ExpandHeap>::add_memory(m_expand, size);
}
void* allocate(size_t size)
{
int attempt = 0;
do {
for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) {
if (void* ptr = subheap->heap.allocate(size))
return ptr;
}
// We need to loop because we won't know how much memory was added.
// Even though we make a best guess how much memory needs to be added,
// it doesn't guarantee that enough will be available after adding it.
// This is especially true for the kmalloc heap, where adding memory
// requires several other objects to be allocated just to be able to
// expand the heap.
// To avoid an infinite expansion loop, limit to two attempts
if (attempt++ >= 2)
break;
} while (expand_memory(size));
return nullptr;
}
void deallocate(void* ptr)
{
if (!ptr)
return;
for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) {
if (subheap->heap.contains(ptr)) {
subheap->heap.deallocate(ptr);
if (subheap->heap.allocated_chunks() == 0 && subheap != &m_heaps && !m_expanding) {
// remove_memory expects the memory to be unused and
// may deallocate the memory. We need to therefore first
// unlink the subheap and destroy it. If remove_memory
// ends up not not removing the memory, we'll initialize
// a new subheap and re-add it.
// We need to remove the subheap before calling remove_memory
// because it's possible that remove_memory itself could
// cause a memory allocation that we don't want to end up
// potentially being made in the subheap we're about to remove.
{
auto* subheap2 = m_heaps.next;
auto** subheap_link = &m_heaps.next;
while (subheap2 != subheap) {
subheap_link = &subheap2->next;
subheap2 = subheap2->next;
}
*subheap_link = subheap->next;
}
auto memory_size = subheap->memory_size;
subheap->~SubHeap();
if (!ExpandableHeapTraits<ExpandHeap>::remove_memory(m_expand, subheap)) {
// Removal of the subheap was rejected, add it back in and
// re-initialize with a clean subheap.
add_subheap(subheap, memory_size);
}
}
return;
}
}
VERIFY_NOT_REACHED();
}
HeapType& add_subheap(void* memory, size_t memory_size)
{
VERIFY(memory_size > sizeof(SubHeap));
// Place the SubHeap structure at the beginning of the new memory block
memory_size -= sizeof(SubHeap);
SubHeap* new_heap = (SubHeap*)memory;
new (new_heap) SubHeap(memory_size, (u8*)(new_heap + 1), memory_size);
// Add the subheap to the list (but leave the main heap where it is)
SubHeap* next_heap = m_heaps.next;
SubHeap** next_heap_link = &m_heaps.next;
while (next_heap) {
if (new_heap->heap.memory() < next_heap->heap.memory())
break;
next_heap_link = &next_heap->next;
next_heap = next_heap->next;
}
new_heap->next = *next_heap_link;
*next_heap_link = new_heap;
return new_heap->heap;
}
bool contains(const void* ptr) const
{
for (auto* subheap = &m_heaps; subheap; subheap = subheap->next) {
if (subheap->heap.contains(ptr))
return true;
}
return false;
}
size_t total_chunks() const
{
size_t total = 0;
for (auto* subheap = &m_heaps; subheap; subheap = subheap->next)
total += subheap->heap.total_chunks();
return total;
}
size_t total_bytes() const { return total_chunks() * CHUNK_SIZE; }
size_t free_chunks() const
{
size_t total = 0;
for (auto* subheap = &m_heaps; subheap; subheap = subheap->next)
total += subheap->heap.free_chunks();
return total;
}
size_t free_bytes() const { return free_chunks() * CHUNK_SIZE; }
size_t allocated_chunks() const
{
size_t total = 0;
for (auto* subheap = &m_heaps; subheap; subheap = subheap->next)
total += subheap->heap.allocated_chunks();
return total;
}
size_t allocated_bytes() const { return allocated_chunks() * CHUNK_SIZE; }
private:
SubHeap m_heaps;
ExpandHeap m_expand;
bool m_expanding { false };
};
}