1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 21:37:35 +00:00

Kernel: Implement zone-based buddy allocator for physical memory

The previous allocator was very naive and kept the state of all pages
in one big bitmap. When allocating, we had to scan through the bitmap
until we found an unset bit.

This patch introduces a new binary buddy allocator that manages the
physical memory pages.

Each PhysicalRegion is divided into zones (PhysicalZone) of 16MB each.
Any extra pages at the end of physical RAM that don't fit into a 16MB
zone are turned into 15 or fewer 1MB zones.

Each zone starts out with one full-sized block, which is then
recursively subdivided into halves upon allocation, until a block of
the request size can be returned.

There are more opportunities for improvement here: the way zone objects
are allocated and stored is non-optimal. Same goes for the allocation
of buddy block state bitmaps.
This commit is contained in:
Andreas Kling 2021-07-12 22:52:17 +02:00
parent be83b3aff4
commit ba87571366
9 changed files with 411 additions and 145 deletions

86
Kernel/VM/PhysicalZone.h Normal file
View file

@ -0,0 +1,86 @@
/*
* Copyright (c) 2021, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Bitmap.h>
#include <AK/Forward.h>
#include <AK/Types.h>
#include <Kernel/Forward.h>
namespace Kernel {
// A PhysicalZone is an allocator that manages a sub-area of a PhysicalRegion.
// Its total size is always a power of two.
// You allocate chunks at a time. One chunk is PAGE_SIZE/2, and the minimum allocation size is 2 chunks.
// The allocator uses a buddy block scheme internally.
class PhysicalZone {
public:
static constexpr size_t ZONE_CHUNK_SIZE = PAGE_SIZE / 2;
using ChunkIndex = i16;
PhysicalZone(PhysicalAddress base, size_t page_count);
Optional<PhysicalAddress> allocate_block(size_t order);
void deallocate_block(PhysicalAddress, size_t order);
void dump() const;
size_t available() const { return m_page_count - (m_used_chunks / 2); }
PhysicalAddress base() const { return m_base_address; }
bool contains(PhysicalAddress paddr) const
{
return paddr >= m_base_address && paddr < m_base_address.offset(m_page_count * PAGE_SIZE);
}
private:
Optional<ChunkIndex> allocate_block_impl(size_t order);
void deallocate_block_impl(ChunkIndex, size_t order);
struct BuddyBucket {
bool get_buddy_bit(ChunkIndex index) const
{
return bitmap.get(buddy_bit_index(index));
}
void set_buddy_bit(ChunkIndex index, bool value)
{
bitmap.set(buddy_bit_index(index), value);
}
size_t buddy_bit_index(ChunkIndex index) const
{
// NOTE: We cut the index in half since one chunk is half a page.
return (index >> 1) >> (1 + order);
}
// This bucket's index in the m_buckets array. (Redundant data kept here for convenience.)
size_t order { 0 };
// This is the start of the freelist for this buddy size.
// It's an index into the global PhysicalPageEntry array (offset by this PhysicalRegion's base.)
// A value of -1 indicates an empty freelist.
ChunkIndex freelist { -1 };
// Bitmap with 1 bit per buddy pair.
// 0 == Both blocks either free or used.
// 1 == One block free, one block used.
Bitmap bitmap;
};
static constexpr size_t max_order = 12;
BuddyBucket m_buckets[max_order + 1];
PhysicalPageEntry& get_freelist_entry(ChunkIndex) const;
void remove_from_freelist(BuddyBucket&, ChunkIndex);
PhysicalAddress m_base_address { 0 };
size_t m_page_count { 0 };
size_t m_used_chunks { 0 };
};
}