1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 02:17:34 +00:00

Kernel: Optimize single physical page allocation and randomize returns

Rather than trying to find a contiguous set of bits of size 1, just
find one single available bit using a hint.

Also, try to randomize returned physical pages a bit by placing them
into a 256 entry queue rather than making them available immediately.
Then, once the queue is filled, pick a random one, make it available
again and use that slot for the latest page to be returned.
This commit is contained in:
Tom 2020-09-07 23:18:38 -06:00 committed by Andreas Kling
parent 92e400c7f9
commit efe2b75017
2 changed files with 55 additions and 10 deletions

View file

@ -29,6 +29,7 @@
#include <AK/RefPtr.h> #include <AK/RefPtr.h>
#include <AK/Vector.h> #include <AK/Vector.h>
#include <Kernel/Assertions.h> #include <Kernel/Assertions.h>
#include <Kernel/Random.h>
#include <Kernel/VM/PhysicalPage.h> #include <Kernel/VM/PhysicalPage.h>
#include <Kernel/VM/PhysicalRegion.h> #include <Kernel/VM/PhysicalRegion.h>
@ -88,6 +89,31 @@ unsigned PhysicalRegion::find_contiguous_free_pages(size_t count)
return range.value(); return range.value();
} }
Optional<unsigned> PhysicalRegion::find_one_free_page()
{
if (m_used == m_pages) {
// We know we don't have any free pages, no need to check the bitmap
// Check if we can draw one from the return queue
if (m_recently_returned.size() > 0) {
u8 index = get_fast_random<u8>() % m_recently_returned.size();
ptrdiff_t local_offset = m_recently_returned[index].get() - m_lower.get();
m_recently_returned.remove(index);
ASSERT(local_offset >= 0);
ASSERT((FlatPtr)local_offset < (FlatPtr)(m_pages * PAGE_SIZE));
return local_offset / PAGE_SIZE;
}
return {};
}
auto free_index = m_bitmap.find_one_anywhere_unset(m_free_hint);
ASSERT(free_index.has_value());
auto page_index = free_index.value();
m_bitmap.set(page_index, true);
m_used++;
m_free_hint = free_index.value() + 1; // Just a guess
return page_index;
}
Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t count) Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t count)
{ {
ASSERT(count != 0); ASSERT(count != 0);
@ -98,10 +124,9 @@ Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t cou
auto page = first_index.value(); auto page = first_index.value();
if (count == found_pages_count) { if (count == found_pages_count) {
for (unsigned page_index = page; page_index < (page + count); page_index++) { m_bitmap.set_range<true>(page, count);
m_bitmap.set(page_index, true);
}
m_used += count; m_used += count;
m_free_hint = first_index.value() + count + 1; // Just a guess
return page; return page;
} }
return {}; return {};
@ -111,13 +136,14 @@ RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
{ {
ASSERT(m_pages); ASSERT(m_pages);
if (m_used == m_pages) auto free_index = find_one_free_page();
if (!free_index.has_value())
return nullptr; return nullptr;
return PhysicalPage::create(m_lower.offset(find_contiguous_free_pages(1) * PAGE_SIZE), supervisor); return PhysicalPage::create(m_lower.offset(free_index.value() * PAGE_SIZE), supervisor);
} }
void PhysicalRegion::return_page_at(PhysicalAddress addr) void PhysicalRegion::free_page_at(PhysicalAddress addr)
{ {
ASSERT(m_pages); ASSERT(m_pages);
@ -131,7 +157,23 @@ void PhysicalRegion::return_page_at(PhysicalAddress addr)
auto page = (FlatPtr)local_offset / PAGE_SIZE; auto page = (FlatPtr)local_offset / PAGE_SIZE;
m_bitmap.set(page, false); m_bitmap.set(page, false);
m_free_hint = page; // We know we can find one here for sure
m_used--; m_used--;
} }
void PhysicalRegion::return_page(const PhysicalPage& page)
{
auto returned_count = m_recently_returned.size();
if (returned_count >= m_recently_returned.capacity()) {
// Return queue is full, pick a random entry and free that page
// and replace the entry with this page
auto& entry = m_recently_returned[get_fast_random<u8>()];
free_page_at(entry);
entry = page.paddr();
} else {
// Still filling the return queue, just append it
m_recently_returned.append(page.paddr());
}
}
} }

View file

@ -47,18 +47,19 @@ public:
PhysicalAddress lower() const { return m_lower; } PhysicalAddress lower() const { return m_lower; }
PhysicalAddress upper() const { return m_upper; } PhysicalAddress upper() const { return m_upper; }
unsigned size() const { return m_pages; } unsigned size() const { return m_pages; }
unsigned used() const { return m_used; } unsigned used() const { return m_used - m_recently_returned.size(); }
unsigned free() const { return m_pages - m_used; } unsigned free() const { return m_pages - m_used + m_recently_returned.size(); }
bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; } bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; }
RefPtr<PhysicalPage> take_free_page(bool supervisor); RefPtr<PhysicalPage> take_free_page(bool supervisor);
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor); NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor);
void return_page_at(PhysicalAddress addr); void return_page(const PhysicalPage& page);
void return_page(const PhysicalPage& page) { return_page_at(page.paddr()); }
private: private:
unsigned find_contiguous_free_pages(size_t count); unsigned find_contiguous_free_pages(size_t count);
Optional<unsigned> find_and_allocate_contiguous_range(size_t count); Optional<unsigned> find_and_allocate_contiguous_range(size_t count);
Optional<unsigned> find_one_free_page();
void free_page_at(PhysicalAddress addr);
PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper); PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper);
@ -67,6 +68,8 @@ private:
unsigned m_pages { 0 }; unsigned m_pages { 0 };
unsigned m_used { 0 }; unsigned m_used { 0 };
Bitmap m_bitmap; Bitmap m_bitmap;
size_t m_free_hint { 0 };
Vector<PhysicalAddress, 256> m_recently_returned;
}; };
} }