From 259cca456f1e60e28cbcad8dbf3b7f2858d8164b Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sat, 11 Dec 2021 19:00:47 +0100 Subject: [PATCH] Kernel: Make PhysicalRegion::return_page() do arithmetic instead of loop Most of the time, we will be freeing physical pages within the full-sized zones. We can do some simple math to find the right zone immediately instead of looping through the zones, checking each one. We still do loop through the slack/remainder zones at the end. There's probably an even nicer way to solve this, but this is already a nice improvement. :^) --- Kernel/Memory/PhysicalRegion.cpp | 26 ++++++++++++++++++-------- Kernel/Memory/PhysicalRegion.h | 3 +++ 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/Kernel/Memory/PhysicalRegion.cpp b/Kernel/Memory/PhysicalRegion.cpp index 134e908d20..9c41ab619d 100644 --- a/Kernel/Memory/PhysicalRegion.cpp +++ b/Kernel/Memory/PhysicalRegion.cpp @@ -42,7 +42,8 @@ void PhysicalRegion::initialize_zones() size_t remaining_pages = m_pages; auto base_address = m_lower; - auto make_zones = [&](size_t pages_per_zone) { + auto make_zones = [&](size_t zone_size) { + size_t pages_per_zone = zone_size / PAGE_SIZE; size_t zone_count = 0; auto first_address = base_address; while (remaining_pages >= pages_per_zone) { @@ -57,10 +58,10 @@ void PhysicalRegion::initialize_zones() }; // First make 16 MiB zones (with 4096 pages each) - make_zones(4096); + make_zones(large_zone_size); // Then divide any remaining space into 1 MiB zones (with 256 pages each) - make_zones(256); + make_zones(small_zone_size); } OwnPtr PhysicalRegion::try_take_pages_from_beginning(unsigned page_count) @@ -122,11 +123,20 @@ RefPtr PhysicalRegion::take_free_page() void PhysicalRegion::return_page(PhysicalAddress paddr) { - // FIXME: Find a way to avoid looping over the zones here. - // (Do some math on the address to find the right zone index.) - // The main thing that gets in the way of this is non-uniform zone sizes. - // Perhaps it would be better if all zones had the same size. - for (auto& zone : m_zones) { + size_t full_size_zone_index = (paddr.get() - lower().get()) / large_zone_size; + size_t large_zone_count = m_pages / (large_zone_size / PAGE_SIZE); + + if (full_size_zone_index < large_zone_count) { + auto& zone = m_zones[full_size_zone_index]; + VERIFY(zone.contains(paddr)); + zone.deallocate_block(paddr, 0); + if (m_full_zones.contains(zone)) + m_usable_zones.append(zone); + return; + } + + for (size_t i = large_zone_count; i < m_zones.size(); ++i) { + auto& zone = m_zones[i]; if (zone.contains(paddr)) { zone.deallocate_block(paddr, 0); if (m_full_zones.contains(zone)) diff --git a/Kernel/Memory/PhysicalRegion.h b/Kernel/Memory/PhysicalRegion.h index ec20e0029c..d31a092c4b 100644 --- a/Kernel/Memory/PhysicalRegion.h +++ b/Kernel/Memory/PhysicalRegion.h @@ -41,6 +41,9 @@ public: private: PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper); + static constexpr size_t large_zone_size = 16 * MiB; + static constexpr size_t small_zone_size = 1 * MiB; + NonnullOwnPtrVector m_zones; PhysicalZone::List m_usable_zones;