1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 03:37:34 +00:00

Kernel: Optimize single physical page allocation and randomize returns

Rather than trying to find a contiguous set of bits of size 1, just
find one single available bit using a hint.

Also, try to randomize returned physical pages a bit by placing them
into a 256 entry queue rather than making them available immediately.
Then, once the queue is filled, pick a random one, make it available
again and use that slot for the latest page to be returned.
This commit is contained in:
Tom 2020-09-07 23:18:38 -06:00 committed by Andreas Kling
parent 92e400c7f9
commit efe2b75017
2 changed files with 55 additions and 10 deletions

View file

@ -29,6 +29,7 @@
#include <AK/RefPtr.h>
#include <AK/Vector.h>
#include <Kernel/Assertions.h>
#include <Kernel/Random.h>
#include <Kernel/VM/PhysicalPage.h>
#include <Kernel/VM/PhysicalRegion.h>
@ -88,6 +89,31 @@ unsigned PhysicalRegion::find_contiguous_free_pages(size_t count)
return range.value();
}
Optional<unsigned> PhysicalRegion::find_one_free_page()
{
if (m_used == m_pages) {
// We know we don't have any free pages, no need to check the bitmap
// Check if we can draw one from the return queue
if (m_recently_returned.size() > 0) {
u8 index = get_fast_random<u8>() % m_recently_returned.size();
ptrdiff_t local_offset = m_recently_returned[index].get() - m_lower.get();
m_recently_returned.remove(index);
ASSERT(local_offset >= 0);
ASSERT((FlatPtr)local_offset < (FlatPtr)(m_pages * PAGE_SIZE));
return local_offset / PAGE_SIZE;
}
return {};
}
auto free_index = m_bitmap.find_one_anywhere_unset(m_free_hint);
ASSERT(free_index.has_value());
auto page_index = free_index.value();
m_bitmap.set(page_index, true);
m_used++;
m_free_hint = free_index.value() + 1; // Just a guess
return page_index;
}
Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t count)
{
ASSERT(count != 0);
@ -98,10 +124,9 @@ Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t cou
auto page = first_index.value();
if (count == found_pages_count) {
for (unsigned page_index = page; page_index < (page + count); page_index++) {
m_bitmap.set(page_index, true);
}
m_bitmap.set_range<true>(page, count);
m_used += count;
m_free_hint = first_index.value() + count + 1; // Just a guess
return page;
}
return {};
@ -111,13 +136,14 @@ RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
{
ASSERT(m_pages);
if (m_used == m_pages)
auto free_index = find_one_free_page();
if (!free_index.has_value())
return nullptr;
return PhysicalPage::create(m_lower.offset(find_contiguous_free_pages(1) * PAGE_SIZE), supervisor);
return PhysicalPage::create(m_lower.offset(free_index.value() * PAGE_SIZE), supervisor);
}
void PhysicalRegion::return_page_at(PhysicalAddress addr)
void PhysicalRegion::free_page_at(PhysicalAddress addr)
{
ASSERT(m_pages);
@ -131,7 +157,23 @@ void PhysicalRegion::return_page_at(PhysicalAddress addr)
auto page = (FlatPtr)local_offset / PAGE_SIZE;
m_bitmap.set(page, false);
m_free_hint = page; // We know we can find one here for sure
m_used--;
}
void PhysicalRegion::return_page(const PhysicalPage& page)
{
auto returned_count = m_recently_returned.size();
if (returned_count >= m_recently_returned.capacity()) {
// Return queue is full, pick a random entry and free that page
// and replace the entry with this page
auto& entry = m_recently_returned[get_fast_random<u8>()];
free_page_at(entry);
entry = page.paddr();
} else {
// Still filling the return queue, just append it
m_recently_returned.append(page.paddr());
}
}
}