1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 10:37:45 +00:00

Kernel: Move Memory/PageDirectory.{cpp,h} to arch-specific directory

The handling of page tables is very architecture specific, so belongs
in the Arch directory. Some parts were already architecture-specific,
however this commit moves the rest of the PageDirectory class into the
Arch directory.

While we're here the aarch64/PageDirectory.{h,cpp} files are updated to
be aarch64 specific, by renaming some members and removing x86_64
specific code.
This commit is contained in:
Timon Kruiper 2023-01-19 22:36:48 +01:00 committed by Andreas Kling
parent 55d756a813
commit 697c5ca5e5
23 changed files with 304 additions and 228 deletions

View file

@ -9,10 +9,10 @@
#include <AK/RedBlackTree.h>
#include <AK/Vector.h>
#include <Kernel/Arch/PageDirectory.h>
#include <Kernel/Library/LockWeakPtr.h>
#include <Kernel/Locking/SpinlockProtected.h>
#include <Kernel/Memory/AllocationStrategy.h>
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Memory/Region.h>
#include <Kernel/Memory/RegionTree.h>
#include <Kernel/UnixTypes.h>

View file

@ -18,7 +18,6 @@
#include <Kernel/KSyms.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Memory/PhysicalRegion.h>
#include <Kernel/Memory/SharedInodeVMObject.h>
#include <Kernel/Multiboot.h>

View file

@ -21,15 +21,13 @@
#include <Kernel/Memory/RegionTree.h>
#include <Kernel/Memory/VMObject.h>
namespace Kernel {
class PageDirectoryEntry;
class PageTableEntry;
}
struct KmallocGlobalData;
namespace Kernel::Memory {
class PageDirectoryEntry;
class PageTableEntry;
ErrorOr<FlatPtr> page_round_up(FlatPtr x);
constexpr FlatPtr page_round_down(FlatPtr x)

View file

@ -1,119 +0,0 @@
/*
* Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Singleton.h>
#include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/PageDirectory.h>
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Prekernel/Prekernel.h>
#include <Kernel/Process.h>
#include <Kernel/Random.h>
#include <Kernel/Sections.h>
extern u8 start_of_kernel_image[];
extern u8 end_of_kernel_image[];
namespace Kernel::Memory {
UNMAP_AFTER_INIT NonnullLockRefPtr<PageDirectory> PageDirectory::must_create_kernel_page_directory()
{
return adopt_lock_ref_if_nonnull(new (nothrow) PageDirectory).release_nonnull();
}
ErrorOr<NonnullLockRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace()
{
auto directory = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) PageDirectory));
#if ARCH(X86_64)
directory->m_pml4t = TRY(MM.allocate_physical_page());
#endif
directory->m_directory_table = TRY(MM.allocate_physical_page());
auto kernel_pd_index = (kernel_mapping_base >> 30) & 0x1ffu;
for (size_t i = 0; i < kernel_pd_index; i++) {
directory->m_directory_pages[i] = TRY(MM.allocate_physical_page());
}
// Share the top 1 GiB of kernel-only mappings (>=kernel_mapping_base)
directory->m_directory_pages[kernel_pd_index] = MM.kernel_page_directory().m_directory_pages[kernel_pd_index];
#if ARCH(X86_64)
{
InterruptDisabler disabler;
auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_pml4t);
table.raw[0] = (FlatPtr)directory->m_directory_table->paddr().as_ptr() | 7;
MM.unquickmap_page();
}
#endif
{
InterruptDisabler disabler;
auto& table = *(PageDirectoryPointerTable*)MM.quickmap_page(*directory->m_directory_table);
for (size_t i = 0; i < sizeof(m_directory_pages) / sizeof(m_directory_pages[0]); i++) {
if (directory->m_directory_pages[i]) {
table.raw[i] = (FlatPtr)directory->m_directory_pages[i]->paddr().as_ptr() | 7;
}
}
#if ARCH(X86_64)
// 2 ** MAXPHYADDR - 1
// Where MAXPHYADDR = physical_address_bit_width
u64 max_physical_address = (1ULL << Processor::current().physical_address_bit_width()) - 1;
// bit 63 = no execute
// bit 7 = page size
// bit 5 = accessed
// bit 4 = cache disable
// bit 3 = write through
// bit 2 = user/supervisor
// bit 1 = read/write
// bit 0 = present
constexpr u64 pdpte_bit_flags = 0x80000000000000BF;
// This is to notify us of bugs where we're:
// 1. Going over what the processor is capable of.
// 2. Writing into the reserved bits (51:MAXPHYADDR), where doing so throws a GPF
// when writing out the PDPT pointer to CR3.
// The reason we're not checking the page directory's physical address directly is because
// we're checking for sign extension when putting it into a PDPTE. See issue #4584.
for (auto table_entry : table.raw)
VERIFY((table_entry & ~pdpte_bit_flags) <= max_physical_address);
#endif
MM.unquickmap_page();
}
register_page_directory(directory);
return directory;
}
PageDirectory::PageDirectory() = default;
UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
{
// Adopt the page tables already set up by boot.S
#if ARCH(X86_64)
dmesgln("MM: boot_pml4t @ {}", boot_pml4t);
m_pml4t = PhysicalPage::create(boot_pml4t, MayReturnToFreeList::No);
#endif
dmesgln("MM: boot_pdpt @ {}", boot_pdpt);
dmesgln("MM: boot_pd0 @ {}", boot_pd0);
dmesgln("MM: boot_pd_kernel @ {}", boot_pd_kernel);
m_directory_table = PhysicalPage::create(boot_pdpt, MayReturnToFreeList::No);
m_directory_pages[0] = PhysicalPage::create(boot_pd0, MayReturnToFreeList::No);
m_directory_pages[(kernel_mapping_base >> 30) & 0x1ff] = PhysicalPage::create(boot_pd_kernel, MayReturnToFreeList::No);
}
PageDirectory::~PageDirectory()
{
if (is_cr3_initialized()) {
deregister_page_directory(this);
}
}
}

View file

@ -1,77 +0,0 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/AtomicRefCounted.h>
#include <AK/Badge.h>
#include <AK/HashMap.h>
#include <AK/IntrusiveRedBlackTree.h>
#include <AK/RefPtr.h>
#include <Kernel/Forward.h>
#include <Kernel/Locking/Spinlock.h>
#include <Kernel/Memory/PhysicalPage.h>
namespace Kernel::Memory {
class PageDirectory final : public AtomicRefCounted<PageDirectory> {
friend class MemoryManager;
public:
static ErrorOr<NonnullLockRefPtr<PageDirectory>> try_create_for_userspace();
static NonnullLockRefPtr<PageDirectory> must_create_kernel_page_directory();
static LockRefPtr<PageDirectory> find_current();
~PageDirectory();
void allocate_kernel_directory();
FlatPtr cr3() const
{
#if ARCH(X86_64)
return m_pml4t->paddr().get();
#else
return m_directory_table->paddr().get();
#endif
}
bool is_cr3_initialized() const
{
#if ARCH(X86_64)
return m_pml4t;
#else
return m_directory_table;
#endif
}
AddressSpace* address_space() { return m_space; }
AddressSpace const* address_space() const { return m_space; }
void set_space(Badge<AddressSpace>, AddressSpace& space) { m_space = &space; }
RecursiveSpinlock<LockRank::None>& get_lock() { return m_lock; }
// This has to be public to let the global singleton access the member pointer
IntrusiveRedBlackTreeNode<FlatPtr, PageDirectory, RawPtr<PageDirectory>> m_tree_node;
private:
PageDirectory();
static void register_page_directory(PageDirectory* directory);
static void deregister_page_directory(PageDirectory* directory);
AddressSpace* m_space { nullptr };
#if ARCH(X86_64)
RefPtr<PhysicalPage> m_pml4t;
#endif
RefPtr<PhysicalPage> m_directory_table;
RefPtr<PhysicalPage> m_directory_pages[512];
RecursiveSpinlock<LockRank::None> m_lock {};
};
void activate_kernel_page_directory(PageDirectory const& pgd);
void activate_page_directory(PageDirectory const& pgd, Thread* current_thread);
}

View file

@ -12,7 +12,6 @@
#include <Kernel/InterruptDisabler.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PageDirectory.h>
#include <Kernel/Memory/Region.h>
#include <Kernel/Memory/SharedInodeVMObject.h>
#include <Kernel/Panic.h>