From efe2b75017215a5e5d4e0c9ee92fc16082532fb8 Mon Sep 17 00:00:00 2001 From: Tom Date: Mon, 7 Sep 2020 23:18:38 -0600 Subject: [PATCH] Kernel: Optimize single physical page allocation and randomize returns Rather than trying to find a contiguous set of bits of size 1, just find one single available bit using a hint. Also, try to randomize returned physical pages a bit by placing them into a 256 entry queue rather than making them available immediately. Then, once the queue is filled, pick a random one, make it available again and use that slot for the latest page to be returned. --- Kernel/VM/PhysicalRegion.cpp | 54 ++++++++++++++++++++++++++++++++---- Kernel/VM/PhysicalRegion.h | 11 +++++--- 2 files changed, 55 insertions(+), 10 deletions(-) diff --git a/Kernel/VM/PhysicalRegion.cpp b/Kernel/VM/PhysicalRegion.cpp index 26439c9de7..085d33a01a 100644 --- a/Kernel/VM/PhysicalRegion.cpp +++ b/Kernel/VM/PhysicalRegion.cpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -88,6 +89,31 @@ unsigned PhysicalRegion::find_contiguous_free_pages(size_t count) return range.value(); } +Optional PhysicalRegion::find_one_free_page() +{ + if (m_used == m_pages) { + // We know we don't have any free pages, no need to check the bitmap + // Check if we can draw one from the return queue + if (m_recently_returned.size() > 0) { + u8 index = get_fast_random() % m_recently_returned.size(); + ptrdiff_t local_offset = m_recently_returned[index].get() - m_lower.get(); + m_recently_returned.remove(index); + ASSERT(local_offset >= 0); + ASSERT((FlatPtr)local_offset < (FlatPtr)(m_pages * PAGE_SIZE)); + return local_offset / PAGE_SIZE; + } + return {}; + } + auto free_index = m_bitmap.find_one_anywhere_unset(m_free_hint); + ASSERT(free_index.has_value()); + + auto page_index = free_index.value(); + m_bitmap.set(page_index, true); + m_used++; + m_free_hint = free_index.value() + 1; // Just a guess + return page_index; +} + Optional PhysicalRegion::find_and_allocate_contiguous_range(size_t count) { ASSERT(count != 0); @@ -98,10 +124,9 @@ Optional PhysicalRegion::find_and_allocate_contiguous_range(size_t cou auto page = first_index.value(); if (count == found_pages_count) { - for (unsigned page_index = page; page_index < (page + count); page_index++) { - m_bitmap.set(page_index, true); - } + m_bitmap.set_range(page, count); m_used += count; + m_free_hint = first_index.value() + count + 1; // Just a guess return page; } return {}; @@ -111,13 +136,14 @@ RefPtr PhysicalRegion::take_free_page(bool supervisor) { ASSERT(m_pages); - if (m_used == m_pages) + auto free_index = find_one_free_page(); + if (!free_index.has_value()) return nullptr; - return PhysicalPage::create(m_lower.offset(find_contiguous_free_pages(1) * PAGE_SIZE), supervisor); + return PhysicalPage::create(m_lower.offset(free_index.value() * PAGE_SIZE), supervisor); } -void PhysicalRegion::return_page_at(PhysicalAddress addr) +void PhysicalRegion::free_page_at(PhysicalAddress addr) { ASSERT(m_pages); @@ -131,7 +157,23 @@ void PhysicalRegion::return_page_at(PhysicalAddress addr) auto page = (FlatPtr)local_offset / PAGE_SIZE; m_bitmap.set(page, false); + m_free_hint = page; // We know we can find one here for sure m_used--; } +void PhysicalRegion::return_page(const PhysicalPage& page) +{ + auto returned_count = m_recently_returned.size(); + if (returned_count >= m_recently_returned.capacity()) { + // Return queue is full, pick a random entry and free that page + // and replace the entry with this page + auto& entry = m_recently_returned[get_fast_random()]; + free_page_at(entry); + entry = page.paddr(); + } else { + // Still filling the return queue, just append it + m_recently_returned.append(page.paddr()); + } +} + } diff --git a/Kernel/VM/PhysicalRegion.h b/Kernel/VM/PhysicalRegion.h index dff192586e..9122548c41 100644 --- a/Kernel/VM/PhysicalRegion.h +++ b/Kernel/VM/PhysicalRegion.h @@ -47,18 +47,19 @@ public: PhysicalAddress lower() const { return m_lower; } PhysicalAddress upper() const { return m_upper; } unsigned size() const { return m_pages; } - unsigned used() const { return m_used; } - unsigned free() const { return m_pages - m_used; } + unsigned used() const { return m_used - m_recently_returned.size(); } + unsigned free() const { return m_pages - m_used + m_recently_returned.size(); } bool contains(const PhysicalPage& page) const { return page.paddr() >= m_lower && page.paddr() <= m_upper; } RefPtr take_free_page(bool supervisor); NonnullRefPtrVector take_contiguous_free_pages(size_t count, bool supervisor); - void return_page_at(PhysicalAddress addr); - void return_page(const PhysicalPage& page) { return_page_at(page.paddr()); } + void return_page(const PhysicalPage& page); private: unsigned find_contiguous_free_pages(size_t count); Optional find_and_allocate_contiguous_range(size_t count); + Optional find_one_free_page(); + void free_page_at(PhysicalAddress addr); PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper); @@ -67,6 +68,8 @@ private: unsigned m_pages { 0 }; unsigned m_used { 0 }; Bitmap m_bitmap; + size_t m_free_hint { 0 }; + Vector m_recently_returned; }; }