mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 10:27:35 +00:00
Kernel: Implement zone-based buddy allocator for physical memory
The previous allocator was very naive and kept the state of all pages in one big bitmap. When allocating, we had to scan through the bitmap until we found an unset bit. This patch introduces a new binary buddy allocator that manages the physical memory pages. Each PhysicalRegion is divided into zones (PhysicalZone) of 16MB each. Any extra pages at the end of physical RAM that don't fit into a 16MB zone are turned into 15 or fewer 1MB zones. Each zone starts out with one full-sized block, which is then recursively subdivided into halves upon allocation, until a block of the request size can be returned. There are more opportunities for improvement here: the way zone objects are allocated and stored is non-optimal. Same goes for the allocation of buddy block state bitmaps.
This commit is contained in:
parent
be83b3aff4
commit
ba87571366
9 changed files with 411 additions and 145 deletions
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
||||
* Copyright (c) 2018-2021, Andreas Kling <kling@serenityos.org>
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-2-Clause
|
||||
*/
|
||||
|
@ -7,49 +7,51 @@
|
|||
#pragma once
|
||||
|
||||
#include <AK/Bitmap.h>
|
||||
#include <AK/NonnullOwnPtrVector.h>
|
||||
#include <AK/Optional.h>
|
||||
#include <AK/OwnPtr.h>
|
||||
#include <Kernel/VM/PhysicalPage.h>
|
||||
|
||||
namespace Kernel {
|
||||
|
||||
class PhysicalZone;
|
||||
|
||||
class PhysicalRegion {
|
||||
public:
|
||||
static PhysicalRegion create(PhysicalAddress lower, PhysicalAddress upper)
|
||||
static OwnPtr<PhysicalRegion> try_create(PhysicalAddress lower, PhysicalAddress upper)
|
||||
{
|
||||
return { lower, upper };
|
||||
return adopt_own_if_nonnull(new PhysicalRegion { lower, upper });
|
||||
}
|
||||
|
||||
~PhysicalRegion();
|
||||
|
||||
void expand(PhysicalAddress lower, PhysicalAddress upper);
|
||||
unsigned finalize_capacity();
|
||||
void initialize_zones();
|
||||
|
||||
PhysicalAddress lower() const { return m_lower; }
|
||||
PhysicalAddress upper() const { return m_upper; }
|
||||
unsigned size() const { return m_pages; }
|
||||
unsigned used() const { return m_used - m_recently_returned.size(); }
|
||||
unsigned free() const { return m_pages - m_used + m_recently_returned.size(); }
|
||||
unsigned used() const { return m_used; }
|
||||
unsigned free() const { return m_pages - m_used; }
|
||||
bool contains(PhysicalAddress paddr) const { return paddr >= m_lower && paddr <= m_upper; }
|
||||
|
||||
PhysicalRegion take_pages_from_beginning(unsigned);
|
||||
OwnPtr<PhysicalRegion> try_take_pages_from_beginning(unsigned);
|
||||
|
||||
RefPtr<PhysicalPage> take_free_page();
|
||||
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE);
|
||||
void return_page(PhysicalAddress);
|
||||
|
||||
private:
|
||||
unsigned find_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE);
|
||||
Optional<unsigned> find_and_allocate_contiguous_range(size_t count, unsigned alignment = 1);
|
||||
Optional<unsigned> find_one_free_page();
|
||||
void free_page_at(PhysicalAddress);
|
||||
|
||||
PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper);
|
||||
|
||||
NonnullOwnPtrVector<PhysicalZone> m_zones;
|
||||
|
||||
size_t m_used { 0 };
|
||||
|
||||
PhysicalAddress m_lower;
|
||||
PhysicalAddress m_upper;
|
||||
unsigned m_pages { 0 };
|
||||
unsigned m_used { 0 };
|
||||
Bitmap m_bitmap;
|
||||
size_t m_free_hint { 0 };
|
||||
Vector<PhysicalAddress, 256> m_recently_returned;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue