diff --git a/Kernel/Arch/x86/common/CPU.cpp b/Kernel/Arch/x86/common/CPU.cpp index d3d393320e..53afe555d2 100644 --- a/Kernel/Arch/x86/common/CPU.cpp +++ b/Kernel/Arch/x86/common/CPU.cpp @@ -9,6 +9,8 @@ #include #include +using namespace Kernel; + void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func) { asm volatile("cli"); @@ -28,7 +30,7 @@ void __assertion_failed(const char* msg, const char* file, unsigned line, const // Switch back to the current process's page tables if there are any. // Otherwise stack walking will be a disaster. if (Process::has_current()) - MM.enter_process_address_space(Process::current()); + Memory::MemoryManager::enter_process_address_space(Process::current()); PANIC("Aborted"); } diff --git a/Kernel/Arch/x86/common/Interrupts.cpp b/Kernel/Arch/x86/common/Interrupts.cpp index b600bcc5d1..847dcbf236 100644 --- a/Kernel/Arch/x86/common/Interrupts.cpp +++ b/Kernel/Arch/x86/common/Interrupts.cpp @@ -221,7 +221,7 @@ void handle_crash(RegisterState const& regs, char const* description, int signal // If a process crashed while inspecting another process, // make sure we switch back to the right page tables. - MM.enter_process_address_space(process); + Memory::MemoryManager::enter_process_address_space(process); dmesgln("CRASH: CPU #{} {} in ring {}", Processor::current_id(), description, (regs.cs & 3)); dump(regs); diff --git a/Kernel/Memory/Region.cpp b/Kernel/Memory/Region.cpp index 559b540766..8a5e04c87c 100644 --- a/Kernel/Memory/Region.cpp +++ b/Kernel/Memory/Region.cpp @@ -213,7 +213,7 @@ bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush) VERIFY(physical_page(page_index)); bool success = map_individual_page_impl(page_index); if (with_flush) - MM.flush_tlb(m_page_directory, vaddr_from_page_index(page_index)); + MemoryManager::flush_tlb(m_page_directory, vaddr_from_page_index(page_index)); return success; } @@ -239,7 +239,7 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range) auto vaddr = vaddr_from_page_index(i); MM.release_pte(*m_page_directory, vaddr, i == count - 1); } - MM.flush_tlb(m_page_directory, vaddr(), page_count()); + MemoryManager::flush_tlb(m_page_directory, vaddr(), page_count()); if (deallocate_range == ShouldDeallocateVirtualRange::Yes) { m_page_directory->range_allocator().deallocate(range()); } @@ -272,7 +272,7 @@ KResult Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_t } if (page_index > 0) { if (should_flush_tlb == ShouldFlushTLB::Yes) - MM.flush_tlb(m_page_directory, vaddr(), page_index); + MemoryManager::flush_tlb(m_page_directory, vaddr(), page_index); if (page_index == page_count()) return KSuccess; } diff --git a/Kernel/Memory/ScopedAddressSpaceSwitcher.cpp b/Kernel/Memory/ScopedAddressSpaceSwitcher.cpp index 622eec26a0..f007141a68 100644 --- a/Kernel/Memory/ScopedAddressSpaceSwitcher.cpp +++ b/Kernel/Memory/ScopedAddressSpaceSwitcher.cpp @@ -14,7 +14,7 @@ ScopedAddressSpaceSwitcher::ScopedAddressSpaceSwitcher(Process& process) { VERIFY(Thread::current() != nullptr); m_previous_cr3 = read_cr3(); - MM.enter_process_address_space(process); + Memory::MemoryManager::enter_process_address_space(process); } ScopedAddressSpaceSwitcher::~ScopedAddressSpaceSwitcher() diff --git a/Kernel/Syscall.cpp b/Kernel/Syscall.cpp index 3d840cd789..d4ca07b2ce 100644 --- a/Kernel/Syscall.cpp +++ b/Kernel/Syscall.cpp @@ -204,7 +204,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap) PANIC("Syscall from process with IOPL != 0"); } - MM.validate_syscall_preconditions(process.address_space(), regs); + Memory::MemoryManager::validate_syscall_preconditions(process.address_space(), regs); FlatPtr function; FlatPtr arg1; diff --git a/Kernel/Syscalls/purge.cpp b/Kernel/Syscalls/purge.cpp index 72057dbafe..b71d3f29c4 100644 --- a/Kernel/Syscalls/purge.cpp +++ b/Kernel/Syscalls/purge.cpp @@ -23,7 +23,7 @@ KResultOr Process::sys$purge(int mode) NonnullRefPtrVector vmobjects; { KResult result(KSuccess); - MM.for_each_vmobject([&](auto& vmobject) { + Memory::MemoryManager::for_each_vmobject([&](auto& vmobject) { if (vmobject.is_anonymous()) { // In the event that the append fails, only attempt to continue // the purge if we have already appended something successfully. @@ -46,7 +46,7 @@ KResultOr Process::sys$purge(int mode) NonnullRefPtrVector vmobjects; { KResult result(KSuccess); - MM.for_each_vmobject([&](auto& vmobject) { + Memory::MemoryManager::for_each_vmobject([&](auto& vmobject) { if (vmobject.is_inode()) { // In the event that the append fails, only attempt to continue // the purge if we have already appended something successfully.