1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 23:17:45 +00:00

Kernel: Remove pointless ref-counting from PhysicalRegion

These are not multiple-owner objects and have no use for ref-counting.
Make them simple value types instead (not eternal heap-allocated.)
This commit is contained in:
Andreas Kling 2021-07-11 14:29:02 +02:00
parent 29d53cbee2
commit b2cd9b2c88
4 changed files with 20 additions and 29 deletions

View file

@ -174,7 +174,7 @@ bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_addres
UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
{
RefPtr<PhysicalRegion> physical_region;
PhysicalRegion* physical_region { nullptr };
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
@ -256,9 +256,9 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
continue;
// Assign page to user physical physical_region.
if (physical_region.is_null() || physical_region->upper().offset(PAGE_SIZE) != addr) {
if (!physical_region || physical_region->upper().offset(PAGE_SIZE) != addr) {
m_user_physical_regions.append(PhysicalRegion::create(addr, addr));
physical_region = m_user_physical_regions.last();
physical_region = &m_user_physical_regions.last();
} else {
physical_region->expand(physical_region->lower(), addr);
}
@ -336,10 +336,10 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
auto physical_page_array_pages_and_page_tables_count = physical_page_array_pages + needed_page_table_count;
// Now that we know how much memory we need for a contiguous array of PhysicalPage instances, find a memory region that can fit it
RefPtr<PhysicalRegion> found_region;
PhysicalRegion* found_region { nullptr };
for (auto& region : m_user_physical_regions) {
if (region.size() >= physical_page_array_pages_and_page_tables_count) {
found_region = region;
found_region = &region;
break;
}
}
@ -354,10 +354,10 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
if (found_region->size() == physical_page_array_pages_and_page_tables_count) {
// We're stealing the entire region
m_physical_pages_region = move(*found_region);
m_user_physical_regions.remove_first_matching([&](auto& region) {
return region == found_region.ptr();
return &region == found_region;
});
m_physical_pages_region = found_region.release_nonnull();
} else {
m_physical_pages_region = found_region->take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
}

View file

@ -16,6 +16,7 @@
#include <Kernel/SpinLock.h>
#include <Kernel/VM/AllocationStrategy.h>
#include <Kernel/VM/PhysicalPage.h>
#include <Kernel/VM/PhysicalRegion.h>
#include <Kernel/VM/Region.h>
#include <Kernel/VM/VMObject.h>
@ -245,9 +246,9 @@ private:
SystemMemoryInfo m_system_memory_info;
NonnullRefPtrVector<PhysicalRegion> m_user_physical_regions;
NonnullRefPtrVector<PhysicalRegion> m_super_physical_regions;
RefPtr<PhysicalRegion> m_physical_pages_region;
Vector<PhysicalRegion> m_user_physical_regions;
Vector<PhysicalRegion> m_super_physical_regions;
Optional<PhysicalRegion> m_physical_pages_region;
PhysicalPageEntry* m_physical_page_entries { nullptr };
size_t m_physical_page_entries_free { 0 };
size_t m_physical_page_entries_count { 0 };

View file

@ -4,22 +4,14 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Bitmap.h>
#include <AK/NonnullRefPtr.h>
#include <AK/RefPtr.h>
#include <AK/Vector.h>
#include <Kernel/Assertions.h>
#include <Kernel/Random.h>
#include <Kernel/VM/PhysicalPage.h>
#include <Kernel/VM/PhysicalRegion.h>
namespace Kernel {
NonnullRefPtr<PhysicalRegion> PhysicalRegion::create(PhysicalAddress lower, PhysicalAddress upper)
{
return adopt_ref(*new PhysicalRegion(lower, upper));
}
PhysicalRegion::PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper)
: m_lower(lower)
, m_upper(upper)
@ -44,7 +36,7 @@ unsigned PhysicalRegion::finalize_capacity()
return size();
}
NonnullRefPtr<PhysicalRegion> PhysicalRegion::take_pages_from_beginning(unsigned page_count)
PhysicalRegion PhysicalRegion::take_pages_from_beginning(unsigned page_count)
{
VERIFY(m_used == 0);
VERIFY(page_count > 0);
@ -59,7 +51,7 @@ NonnullRefPtr<PhysicalRegion> PhysicalRegion::take_pages_from_beginning(unsigned
finalize_capacity();
auto taken_region = create(taken_lower, taken_upper);
taken_region->finalize_capacity();
taken_region.finalize_capacity();
return taken_region;
}

View file

@ -7,19 +7,17 @@
#pragma once
#include <AK/Bitmap.h>
#include <AK/NonnullRefPtrVector.h>
#include <AK/Optional.h>
#include <AK/RefCounted.h>
#include <Kernel/VM/PhysicalPage.h>
namespace Kernel {
class PhysicalRegion : public RefCounted<PhysicalRegion> {
AK_MAKE_ETERNAL
class PhysicalRegion {
public:
static NonnullRefPtr<PhysicalRegion> create(PhysicalAddress lower, PhysicalAddress upper);
~PhysicalRegion() = default;
static PhysicalRegion create(PhysicalAddress lower, PhysicalAddress upper)
{
return { lower, upper };
}
void expand(PhysicalAddress lower, PhysicalAddress upper);
unsigned finalize_capacity();
@ -31,7 +29,7 @@ public:
unsigned free() const { return m_pages - m_used + m_recently_returned.size(); }
bool contains(PhysicalAddress paddr) const { return paddr >= m_lower && paddr <= m_upper; }
NonnullRefPtr<PhysicalRegion> take_pages_from_beginning(unsigned);
PhysicalRegion take_pages_from_beginning(unsigned);
RefPtr<PhysicalPage> take_free_page(bool supervisor);
NonnullRefPtrVector<PhysicalPage> take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment = PAGE_SIZE);
@ -41,7 +39,7 @@ private:
unsigned find_contiguous_free_pages(size_t count, size_t physical_alignment = PAGE_SIZE);
Optional<unsigned> find_and_allocate_contiguous_range(size_t count, unsigned alignment = 1);
Optional<unsigned> find_one_free_page();
void free_page_at(PhysicalAddress addr);
void free_page_at(PhysicalAddress);
PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper);