1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 22:47:44 +00:00

Kernel: Rename Range => VirtualRange

...and also RangeAllocator => VirtualRangeAllocator.

This clarifies that the ranges we're dealing with are *virtual* memory
ranges and not anything else.
This commit is contained in:
Andreas Kling 2021-08-06 13:54:48 +02:00
parent 93d98d4976
commit cd5faf4e42
39 changed files with 207 additions and 207 deletions

View file

@ -154,12 +154,12 @@ void MemoryManager::unmap_ksyms_after_init()
UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
{
VERIFY(!m_physical_memory_ranges.is_empty());
ContiguousReservedMemoryRange range;
ContiguousReservedMemoryVirtualRange range;
for (auto& current_range : m_physical_memory_ranges) {
if (current_range.type != PhysicalMemoryRangeType::Reserved) {
if (current_range.type != PhysicalMemoryVirtualRangeType::Reserved) {
if (range.start.is_null())
continue;
m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, current_range.start.get() - range.start.get() });
m_reserved_memory_ranges.append(ContiguousReservedMemoryVirtualRange { range.start, current_range.start.get() - range.start.get() });
range.start.set((FlatPtr) nullptr);
continue;
}
@ -168,14 +168,14 @@ UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
}
range.start = current_range.start;
}
if (m_physical_memory_ranges.last().type != PhysicalMemoryRangeType::Reserved)
if (m_physical_memory_ranges.last().type != PhysicalMemoryVirtualRangeType::Reserved)
return;
if (range.start.is_null())
return;
m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() });
m_reserved_memory_ranges.append(ContiguousReservedMemoryVirtualRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() });
}
bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, Range const& range) const
bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, VirtualRange const& range) const
{
VERIFY(!m_reserved_memory_ranges.is_empty());
for (auto& current_range : m_reserved_memory_ranges) {
@ -194,28 +194,28 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
{
// Register used memory regions that we know of.
m_used_memory_ranges.ensure_capacity(4);
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image });
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::Prekernel, start_of_prekernel_image, end_of_prekernel_image });
m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
if (multiboot_flags & 0x4) {
auto* bootmods_start = multiboot_copy_boot_modules_array;
auto* bootmods_end = bootmods_start + multiboot_copy_boot_modules_count;
for (auto* bootmod = bootmods_start; bootmod < bootmods_end; bootmod++) {
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) });
m_used_memory_ranges.append(UsedMemoryVirtualRange { UsedMemoryVirtualRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) });
}
}
auto* mmap_begin = multiboot_memory_map;
auto* mmap_end = multiboot_memory_map + multiboot_memory_map_count;
struct ContiguousPhysicalRange {
struct ContiguousPhysicalVirtualRange {
PhysicalAddress lower;
PhysicalAddress upper;
};
Vector<ContiguousPhysicalRange> contiguous_physical_ranges;
Vector<ContiguousPhysicalVirtualRange> contiguous_physical_ranges;
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", mmap->addr, mmap->len, mmap->type);
@ -224,24 +224,24 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
auto length = mmap->len;
switch (mmap->type) {
case (MULTIBOOT_MEMORY_AVAILABLE):
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::Usable, start_address, length });
break;
case (MULTIBOOT_MEMORY_RESERVED):
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, start_address, length });
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::Reserved, start_address, length });
break;
case (MULTIBOOT_MEMORY_ACPI_RECLAIMABLE):
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_Reclaimable, start_address, length });
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::ACPI_Reclaimable, start_address, length });
break;
case (MULTIBOOT_MEMORY_NVS):
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_NVS, start_address, length });
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::ACPI_NVS, start_address, length });
break;
case (MULTIBOOT_MEMORY_BADRAM):
dmesgln("MM: Warning, detected bad memory range!");
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::BadMemory, start_address, length });
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::BadMemory, start_address, length });
break;
default:
dbgln("MM: Unknown range!");
m_physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Unknown, start_address, length });
m_physical_memory_ranges.append(PhysicalMemoryVirtualRange { PhysicalMemoryVirtualRangeType::Unknown, start_address, length });
break;
}
@ -280,7 +280,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
continue;
if (contiguous_physical_ranges.is_empty() || contiguous_physical_ranges.last().upper.offset(PAGE_SIZE) != addr) {
contiguous_physical_ranges.append(ContiguousPhysicalRange {
contiguous_physical_ranges.append(ContiguousPhysicalVirtualRange {
.lower = addr,
.upper = addr,
});
@ -322,7 +322,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
m_system_memory_info.user_physical_pages_uncommitted = m_system_memory_info.user_physical_pages;
for (auto& used_range : m_used_memory_ranges) {
dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr());
dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryVirtualRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr());
}
dmesgln("MM: Super physical region: {} - {} (size {:#x})", m_super_physical_region->lower(), m_super_physical_region->upper().offset(-1), PAGE_SIZE * m_super_physical_region->size());
@ -389,7 +389,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
} else {
m_physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
}
m_used_memory_ranges.append({ UsedMemoryRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() });
m_used_memory_ranges.append({ UsedMemoryVirtualRangeType::PhysicalPages, m_physical_pages_region->lower(), m_physical_pages_region->upper() });
// Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators!
m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory();
@ -746,7 +746,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa
return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable);
}
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(Range const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
{
ScopedSpinLock lock(s_mm_lock);
auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable);

View file

@ -46,7 +46,7 @@ inline FlatPtr virtual_to_low_physical(FlatPtr virtual_)
return virtual_ - physical_to_virtual_offset;
}
enum class UsedMemoryRangeType {
enum class UsedMemoryVirtualRangeType {
LowMemory = 0,
Prekernel,
Kernel,
@ -54,7 +54,7 @@ enum class UsedMemoryRangeType {
PhysicalPages,
};
static constexpr StringView UserMemoryRangeTypeNames[] {
static constexpr StringView UserMemoryVirtualRangeTypeNames[] {
"Low memory",
"Prekernel",
"Kernel",
@ -62,18 +62,18 @@ static constexpr StringView UserMemoryRangeTypeNames[] {
"Physical Pages"
};
struct UsedMemoryRange {
UsedMemoryRangeType type {};
struct UsedMemoryVirtualRange {
UsedMemoryVirtualRangeType type {};
PhysicalAddress start;
PhysicalAddress end;
};
struct ContiguousReservedMemoryRange {
struct ContiguousReservedMemoryVirtualRange {
PhysicalAddress start;
PhysicalSize length {};
};
enum class PhysicalMemoryRangeType {
enum class PhysicalMemoryVirtualRangeType {
Usable = 0,
Reserved,
ACPI_Reclaimable,
@ -82,8 +82,8 @@ enum class PhysicalMemoryRangeType {
Unknown,
};
struct PhysicalMemoryRange {
PhysicalMemoryRangeType type { PhysicalMemoryRangeType::Unknown };
struct PhysicalMemoryVirtualRange {
PhysicalMemoryVirtualRangeType type { PhysicalMemoryVirtualRangeType::Unknown };
PhysicalAddress start;
PhysicalSize length {};
};
@ -185,7 +185,7 @@ public:
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(Range const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
struct SystemMemoryInfo {
PhysicalSize user_physical_pages { 0 };
@ -230,8 +230,8 @@ public:
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
Vector<UsedMemoryRange> const& used_memory_ranges() { return m_used_memory_ranges; }
bool is_allowed_to_mmap_to_userspace(PhysicalAddress, Range const&) const;
Vector<UsedMemoryVirtualRange> const& used_memory_ranges() { return m_used_memory_ranges; }
bool is_allowed_to_mmap_to_userspace(PhysicalAddress, VirtualRange const&) const;
PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
PhysicalAddress get_physical_address(PhysicalPage const&);
@ -288,9 +288,9 @@ private:
Region::ListInMemoryManager m_user_regions;
Region::ListInMemoryManager m_kernel_regions;
Vector<UsedMemoryRange> m_used_memory_ranges;
Vector<PhysicalMemoryRange> m_physical_memory_ranges;
Vector<ContiguousReservedMemoryRange> m_reserved_memory_ranges;
Vector<UsedMemoryVirtualRange> m_used_memory_ranges;
Vector<PhysicalMemoryVirtualRange> m_physical_memory_ranges;
Vector<ContiguousReservedMemoryVirtualRange> m_reserved_memory_ranges;
VMObject::List m_vmobjects;
};
@ -307,7 +307,7 @@ inline bool is_user_range(VirtualAddress vaddr, size_t size)
return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
}
inline bool is_user_range(Range const& range)
inline bool is_user_range(VirtualRange const& range)
{
return is_user_range(range.base(), range.size());
}

View file

@ -43,7 +43,7 @@ UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_
return directory;
}
RefPtr<PageDirectory> PageDirectory::try_create_for_userspace(RangeAllocator const* parent_range_allocator)
RefPtr<PageDirectory> PageDirectory::try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator)
{
constexpr FlatPtr userspace_range_base = 0x00800000;
FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING;

View file

@ -11,7 +11,7 @@
#include <AK/RefPtr.h>
#include <Kernel/Forward.h>
#include <Kernel/Memory/PhysicalPage.h>
#include <Kernel/Memory/RangeAllocator.h>
#include <Kernel/Memory/VirtualRangeAllocator.h>
namespace Kernel::Memory {
@ -19,7 +19,7 @@ class PageDirectory : public RefCounted<PageDirectory> {
friend class MemoryManager;
public:
static RefPtr<PageDirectory> try_create_for_userspace(RangeAllocator const* parent_range_allocator = nullptr);
static RefPtr<PageDirectory> try_create_for_userspace(VirtualRangeAllocator const* parent_range_allocator = nullptr);
static NonnullRefPtr<PageDirectory> must_create_kernel_page_directory();
static RefPtr<PageDirectory> find_by_cr3(FlatPtr);
@ -36,10 +36,10 @@ public:
#endif
}
RangeAllocator& range_allocator() { return m_range_allocator; }
const RangeAllocator& range_allocator() const { return m_range_allocator; }
VirtualRangeAllocator& range_allocator() { return m_range_allocator; }
VirtualRangeAllocator const& range_allocator() const { return m_range_allocator; }
RangeAllocator& identity_range_allocator() { return m_identity_range_allocator; }
VirtualRangeAllocator& identity_range_allocator() { return m_identity_range_allocator; }
Space* space() { return m_space; }
const Space* space() const { return m_space; }
@ -52,8 +52,8 @@ private:
PageDirectory();
Space* m_space { nullptr };
RangeAllocator m_range_allocator;
RangeAllocator m_identity_range_allocator;
VirtualRangeAllocator m_range_allocator;
VirtualRangeAllocator m_identity_range_allocator;
#if ARCH(X86_64)
RefPtr<PhysicalPage> m_pml4t;
#endif

View file

@ -1,48 +0,0 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/RedBlackTree.h>
#include <AK/Traits.h>
#include <Kernel/Memory/Range.h>
#include <Kernel/SpinLock.h>
namespace Kernel::Memory {
class RangeAllocator {
public:
RangeAllocator();
~RangeAllocator() = default;
void initialize_with_range(VirtualAddress, size_t);
void initialize_from_parent(RangeAllocator const&);
Optional<Range> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
Optional<Range> allocate_specific(VirtualAddress, size_t);
Optional<Range> allocate_randomized(size_t, size_t alignment);
void deallocate(Range const&);
void dump() const;
bool contains(Range const& range) const { return m_total_range.contains(range); }
private:
void carve_at_iterator(auto&, Range const&);
RedBlackTree<FlatPtr, Range> m_available_ranges;
Range m_total_range;
mutable SpinLock<u8> m_lock;
};
}
namespace AK {
template<>
struct Traits<Kernel::Memory::Range> : public GenericTraits<Kernel::Memory::Range> {
static constexpr bool is_trivial() { return true; }
};
}

View file

@ -19,7 +19,7 @@
namespace Kernel::Memory {
Region::Region(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
Region::Region(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
: m_range(range)
, m_offset_in_vmobject(offset_in_vmobject)
, m_vmobject(move(vmobject))
@ -41,11 +41,11 @@ Region::~Region()
m_vmobject->remove_region(*this);
// Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
// Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
// Unmapping the region will give the VM back to the VirtualRangeAllocator, so an interrupt handler would
// find the address<->region mappings in an invalid state there.
ScopedSpinLock lock(s_mm_lock);
if (m_page_directory) {
unmap(ShouldDeallocateVirtualMemoryRange::Yes);
unmap(ShouldDeallocateVirtualMemoryVirtualRange::Yes);
VERIFY(!m_page_directory);
}
@ -147,7 +147,7 @@ size_t Region::amount_shared() const
return bytes;
}
OwnPtr<Region> Region::try_create_user_accessible(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
OwnPtr<Region> Region::try_create_user_accessible(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
{
auto region = adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
if (!region)
@ -155,7 +155,7 @@ OwnPtr<Region> Region::try_create_user_accessible(Range const& range, NonnullRef
return region;
}
OwnPtr<Region> Region::try_create_kernel_only(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
OwnPtr<Region> Region::try_create_kernel_only(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
{
return adopt_own_if_nonnull(new (nothrow) Region(range, move(vmobject), offset_in_vmobject, move(name), access, cacheable, false));
}
@ -234,7 +234,7 @@ bool Region::remap_vmobject_page(size_t page_index, bool with_flush)
return success;
}
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
void Region::unmap(ShouldDeallocateVirtualMemoryVirtualRange deallocate_range)
{
ScopedSpinLock lock(s_mm_lock);
if (!m_page_directory)
@ -246,7 +246,7 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
MM.release_pte(*m_page_directory, vaddr, i == count - 1);
}
MM.flush_tlb(m_page_directory, vaddr(), page_count());
if (deallocate_range == ShouldDeallocateVirtualMemoryRange::Yes) {
if (deallocate_range == ShouldDeallocateVirtualMemoryVirtualRange::Yes) {
if (m_page_directory->range_allocator().contains(range()))
m_page_directory->range_allocator().deallocate(range());
else

View file

@ -14,7 +14,7 @@
#include <Kernel/Heap/SlabAllocator.h>
#include <Kernel/KString.h>
#include <Kernel/Memory/PageFaultResponse.h>
#include <Kernel/Memory/RangeAllocator.h>
#include <Kernel/Memory/VirtualRangeAllocator.h>
#include <Kernel/Sections.h>
#include <Kernel/UnixTypes.h>
@ -46,12 +46,12 @@ public:
Yes,
};
static OwnPtr<Region> try_create_user_accessible(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
static OwnPtr<Region> try_create_kernel_only(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
static OwnPtr<Region> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
static OwnPtr<Region> try_create_kernel_only(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
~Region();
Range const& range() const { return m_range; }
VirtualRange const& range() const { return m_range; }
VirtualAddress vaddr() const { return m_range.base(); }
size_t size() const { return m_range.size(); }
bool is_readable() const { return m_access & Access::Read; }
@ -94,7 +94,7 @@ public:
return m_range.contains(vaddr);
}
bool contains(Range const& range) const
bool contains(VirtualRange const& range) const
{
return m_range.contains(range);
}
@ -168,11 +168,11 @@ public:
void set_page_directory(PageDirectory&);
bool map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
enum class ShouldDeallocateVirtualMemoryRange {
enum class ShouldDeallocateVirtualMemoryVirtualRange {
No,
Yes,
};
void unmap(ShouldDeallocateVirtualMemoryRange = ShouldDeallocateVirtualMemoryRange::Yes);
void unmap(ShouldDeallocateVirtualMemoryVirtualRange = ShouldDeallocateVirtualMemoryVirtualRange::Yes);
void remap();
@ -180,7 +180,7 @@ public:
void set_syscall_region(bool b) { m_syscall_region = b; }
private:
Region(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
Region(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
bool remap_vmobject_page(size_t page_index, bool with_flush = true);
bool do_remap_vmobject_page(size_t page_index, bool with_flush = true);
@ -200,7 +200,7 @@ private:
bool map_individual_page_impl(size_t page_index);
RefPtr<PageDirectory> m_page_directory;
Range m_range;
VirtualRange m_range;
size_t m_offset_in_vmobject { 0 };
NonnullRefPtr<VMObject> m_vmobject;
OwnPtr<KString> m_name;

View file

@ -42,7 +42,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
if (!size)
return EINVAL;
auto range_or_error = Range::expand_to_page_boundaries(addr.get(), size);
auto range_or_error = VirtualRange::expand_to_page_boundaries(addr.get(), size);
if (range_or_error.is_error())
return range_or_error.error();
auto range_to_unmap = range_or_error.value();
@ -69,7 +69,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
auto region = take_region(*old_region);
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
region->unmap(Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
auto new_regions_or_error = try_split_region_around_range(*region, range_to_unmap);
if (new_regions_or_error.is_error())
@ -115,7 +115,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
auto region = take_region(*old_region);
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
region->unmap(Region::ShouldDeallocateVirtualMemoryVirtualRange::No);
// Otherwise, split the regions and collect them for future mapping.
auto split_regions_or_error = try_split_region_around_range(*region, range_to_unmap);
@ -139,7 +139,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
return KSuccess;
}
Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
Optional<VirtualRange> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
{
vaddr.mask(PAGE_MASK);
size = page_round_up(size);
@ -148,7 +148,7 @@ Optional<Range> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t
return page_directory().range_allocator().allocate_specific(vaddr, size);
}
KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, Range const& range, size_t offset_in_vmobject)
KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)
{
auto new_region = Region::try_create_user_accessible(
range, source_region.vmobject(), offset_in_vmobject, KString::try_create(source_region.name()), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared());
@ -168,7 +168,7 @@ KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region,
return region;
}
KResultOr<Region*> Space::allocate_region(Range const& range, StringView name, int prot, AllocationStrategy strategy)
KResultOr<Region*> Space::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
{
VERIFY(range.is_valid());
auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy);
@ -185,7 +185,7 @@ KResultOr<Region*> Space::allocate_region(Range const& range, StringView name, i
return added_region;
}
KResultOr<Region*> Space::allocate_region_with_vmobject(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
KResultOr<Region*> Space::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
{
VERIFY(range.is_valid());
size_t end_in_vmobject = offset_in_vmobject + range.size();
@ -232,7 +232,7 @@ NonnullOwnPtr<Region> Space::take_region(Region& region)
return found_region;
}
Region* Space::find_region_from_range(const Range& range)
Region* Space::find_region_from_range(VirtualRange const& range)
{
ScopedSpinLock lock(m_lock);
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
@ -250,7 +250,7 @@ Region* Space::find_region_from_range(const Range& range)
return region;
}
Region* Space::find_region_containing(const Range& range)
Region* Space::find_region_containing(VirtualRange const& range)
{
ScopedSpinLock lock(m_lock);
auto candidate = m_regions.find_largest_not_above(range.base().get());
@ -259,7 +259,7 @@ Region* Space::find_region_containing(const Range& range)
return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr;
}
Vector<Region*> Space::find_regions_intersecting(const Range& range)
Vector<Region*> Space::find_regions_intersecting(VirtualRange const& range)
{
Vector<Region*> regions = {};
size_t total_size_collected = 0;
@ -291,13 +291,13 @@ Region* Space::add_region(NonnullOwnPtr<Region> region)
}
// Carve out a virtual address range from a region and return the two regions on either side
KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region& source_region, const Range& desired_range)
KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range)
{
Range old_region_range = source_region.range();
VirtualRange old_region_range = source_region.range();
auto remaining_ranges_after_unmap = old_region_range.carve(desired_range);
VERIFY(!remaining_ranges_after_unmap.is_empty());
auto try_make_replacement_region = [&](const Range& new_range) -> KResultOr<Region*> {
auto try_make_replacement_region = [&](VirtualRange const& new_range) -> KResultOr<Region*> {
VERIFY(old_region_range.contains(new_range));
size_t new_range_offset_in_vmobject = source_region.offset_in_vmobject() + (new_range.base().get() - old_region_range.base().get());
return try_allocate_split_region(source_region, new_range, new_range_offset_in_vmobject);

View file

@ -35,20 +35,20 @@ public:
KResult unmap_mmap_range(VirtualAddress, size_t);
Optional<Range> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
Optional<VirtualRange> allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
KResultOr<Region*> allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
KResultOr<Region*> allocate_region(const Range&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
KResultOr<Region*> allocate_region_with_vmobject(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
KResultOr<Region*> allocate_region(VirtualRange const&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
void deallocate_region(Region& region);
NonnullOwnPtr<Region> take_region(Region& region);
KResultOr<Region*> try_allocate_split_region(Region const& source_region, Range const&, size_t offset_in_vmobject);
KResultOr<Vector<Region*, 2>> try_split_region_around_range(Region const& source_region, Range const&);
KResultOr<Region*> try_allocate_split_region(Region const& source_region, VirtualRange const&, size_t offset_in_vmobject);
KResultOr<Vector<Region*, 2>> try_split_region_around_range(Region const& source_region, VirtualRange const&);
Region* find_region_from_range(const Range&);
Region* find_region_containing(const Range&);
Region* find_region_from_range(VirtualRange const&);
Region* find_region_containing(VirtualRange const&);
Vector<Region*> find_regions_intersecting(const Range&);
Vector<Region*> find_regions_intersecting(VirtualRange const&);
bool enforces_syscall_regions() const { return m_enforces_syscall_regions; }
void set_enforces_syscall_regions(bool b) { m_enforces_syscall_regions = b; }
@ -76,7 +76,7 @@ private:
RedBlackTree<FlatPtr, NonnullOwnPtr<Region>> m_regions;
struct RegionLookupCache {
Optional<Range> range;
Optional<VirtualRange> range;
WeakPtr<Region> region;
};
RegionLookupCache m_region_lookup_cache;

View file

@ -7,16 +7,16 @@
#include <AK/Vector.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/Range.h>
#include <Kernel/Memory/VirtualRange.h>
#include <LibC/limits.h>
namespace Kernel::Memory {
Vector<Range, 2> Range::carve(const Range& taken) const
Vector<VirtualRange, 2> VirtualRange::carve(VirtualRange const& taken) const
{
VERIFY((taken.size() % PAGE_SIZE) == 0);
Vector<Range, 2> parts;
Vector<VirtualRange, 2> parts;
if (taken == *this)
return {};
if (taken.base() > base())
@ -25,7 +25,7 @@ Vector<Range, 2> Range::carve(const Range& taken) const
parts.append({ taken.end(), end().get() - taken.end().get() });
return parts;
}
Range Range::intersect(const Range& other) const
VirtualRange VirtualRange::intersect(VirtualRange const& other) const
{
if (*this == other) {
return *this;
@ -33,10 +33,10 @@ Range Range::intersect(const Range& other) const
auto new_base = max(base(), other.base());
auto new_end = min(end(), other.end());
VERIFY(new_base < new_end);
return Range(new_base, (new_end - new_base).get());
return VirtualRange(new_base, (new_end - new_base).get());
}
KResultOr<Range> Range::expand_to_page_boundaries(FlatPtr address, size_t size)
KResultOr<VirtualRange> VirtualRange::expand_to_page_boundaries(FlatPtr address, size_t size)
{
if (page_round_up_would_wrap(size))
return EINVAL;
@ -50,7 +50,7 @@ KResultOr<Range> Range::expand_to_page_boundaries(FlatPtr address, size_t size)
auto base = VirtualAddress { address }.page_base();
auto end = page_round_up(address + size);
return Range { base, end - base.get() };
return VirtualRange { base, end - base.get() };
}
}

View file

@ -12,12 +12,12 @@
namespace Kernel::Memory {
class Range {
friend class RangeAllocator;
class VirtualRange {
friend class VirtualRangeAllocator;
public:
Range() = delete;
Range(VirtualAddress base, size_t size)
VirtualRange() = delete;
VirtualRange(VirtualAddress base, size_t size)
: m_base(base)
, m_size(size)
{
@ -31,7 +31,7 @@ public:
VirtualAddress end() const { return m_base.offset(m_size); }
bool operator==(const Range& other) const
bool operator==(VirtualRange const& other) const
{
return m_base == other.m_base && m_size == other.m_size;
}
@ -43,15 +43,15 @@ public:
return base >= m_base && base.offset(size) <= end();
}
bool contains(const Range& other) const
bool contains(VirtualRange const& other) const
{
return contains(other.base(), other.size());
}
Vector<Range, 2> carve(const Range&) const;
Range intersect(const Range&) const;
Vector<VirtualRange, 2> carve(VirtualRange const&) const;
VirtualRange intersect(VirtualRange const&) const;
static KResultOr<Range> expand_to_page_boundaries(FlatPtr address, size_t size);
static KResultOr<VirtualRange> expand_to_page_boundaries(FlatPtr address, size_t size);
private:
VirtualAddress m_base;
@ -61,8 +61,8 @@ private:
}
template<>
struct AK::Formatter<Kernel::Memory::Range> : Formatter<FormatString> {
void format(FormatBuilder& builder, Kernel::Memory::Range value)
struct AK::Formatter<Kernel::Memory::VirtualRange> : Formatter<FormatString> {
void format(FormatBuilder& builder, Kernel::Memory::VirtualRange value)
{
return Formatter<FormatString>::format(builder, "{} - {} (size {:p})", value.base().as_ptr(), value.base().offset(value.size() - 1).as_ptr(), value.size());
}

View file

@ -5,25 +5,25 @@
*/
#include <AK/Checked.h>
#include <Kernel/Memory/RangeAllocator.h>
#include <Kernel/Memory/VirtualRangeAllocator.h>
#include <Kernel/Random.h>
#define VM_GUARD_PAGES
namespace Kernel::Memory {
RangeAllocator::RangeAllocator()
VirtualRangeAllocator::VirtualRangeAllocator()
: m_total_range({}, 0)
{
}
void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
void VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
{
m_total_range = { base, size };
m_available_ranges.insert(base.get(), Range { base, size });
m_available_ranges.insert(base.get(), VirtualRange { base, size });
}
void RangeAllocator::initialize_from_parent(RangeAllocator const& parent_allocator)
void VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator)
{
ScopedSpinLock lock(parent_allocator.m_lock);
m_total_range = parent_allocator.m_total_range;
@ -33,16 +33,16 @@ void RangeAllocator::initialize_from_parent(RangeAllocator const& parent_allocat
}
}
void RangeAllocator::dump() const
void VirtualRangeAllocator::dump() const
{
VERIFY(m_lock.is_locked());
dbgln("RangeAllocator({})", this);
dbgln("VirtualRangeAllocator({})", this);
for (auto& range : m_available_ranges) {
dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
}
}
void RangeAllocator::carve_at_iterator(auto& it, Range const& range)
void VirtualRangeAllocator::carve_at_iterator(auto& it, VirtualRange const& range)
{
VERIFY(m_lock.is_locked());
auto remaining_parts = (*it).carve(range);
@ -56,7 +56,7 @@ void RangeAllocator::carve_at_iterator(auto& it, Range const& range)
}
}
Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignment)
Optional<VirtualRange> VirtualRangeAllocator::allocate_randomized(size_t size, size_t alignment)
{
if (!size)
return {};
@ -80,7 +80,7 @@ Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignmen
return allocate_anywhere(size, alignment);
}
Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, size_t alignment)
{
if (!size)
return {};
@ -114,7 +114,7 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
FlatPtr initial_base = available_range.base().offset(offset_from_effective_base).get();
FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
Range const allocated_range(VirtualAddress(aligned_base), size);
VirtualRange const allocated_range(VirtualAddress(aligned_base), size);
VERIFY(m_total_range.contains(allocated_range));
@ -125,11 +125,11 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
carve_at_iterator(it, allocated_range);
return allocated_range;
}
dmesgln("RangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
dmesgln("VirtualRangeAllocator: Failed to allocate anywhere: size={}, alignment={}", size, alignment);
return {};
}
Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress base, size_t size)
{
if (!size)
return {};
@ -137,7 +137,7 @@ Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t si
VERIFY(base.is_page_aligned());
VERIFY((size % PAGE_SIZE) == 0);
Range const allocated_range(base, size);
VirtualRange const allocated_range(base, size);
if (!m_total_range.contains(allocated_range)) {
return {};
}
@ -157,7 +157,7 @@ Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t si
return {};
}
void RangeAllocator::deallocate(Range const& range)
void VirtualRangeAllocator::deallocate(VirtualRange const& range)
{
ScopedSpinLock lock(m_lock);
VERIFY(m_total_range.contains(range));
@ -166,7 +166,7 @@ void RangeAllocator::deallocate(Range const& range)
VERIFY(range.base() < range.end());
VERIFY(!m_available_ranges.is_empty());
Range merged_range = range;
VirtualRange merged_range = range;
{
// Try merging with preceding range.

View file

@ -0,0 +1,48 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/RedBlackTree.h>
#include <AK/Traits.h>
#include <Kernel/Memory/VirtualRange.h>
#include <Kernel/SpinLock.h>
namespace Kernel::Memory {
class VirtualRangeAllocator {
public:
VirtualRangeAllocator();
~VirtualRangeAllocator() = default;
void initialize_with_range(VirtualAddress, size_t);
void initialize_from_parent(VirtualRangeAllocator const&);
Optional<VirtualRange> allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
Optional<VirtualRange> allocate_specific(VirtualAddress, size_t);
Optional<VirtualRange> allocate_randomized(size_t, size_t alignment);
void deallocate(VirtualRange const&);
void dump() const;
bool contains(VirtualRange const& range) const { return m_total_range.contains(range); }
private:
void carve_at_iterator(auto&, VirtualRange const&);
RedBlackTree<FlatPtr, VirtualRange> m_available_ranges;
VirtualRange m_total_range;
mutable SpinLock<u8> m_lock;
};
}
namespace AK {
template<>
struct Traits<Kernel::Memory::VirtualRange> : public GenericTraits<Kernel::Memory::VirtualRange> {
static constexpr bool is_trivial() { return true; }
};
}