1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 08:57:34 +00:00

Kernel: Access MemoryManager static functions statically

SonarCloud flagged this "Code Smell", where we are accessing these
static methods as if they are instance methods. While it is technically
possible, it is very confusing to read when you realize they are static
functions.
This commit is contained in:
Brian Gianforcaro 2021-10-01 23:45:15 -07:00 committed by Andreas Kling
parent 024367d82e
commit 0223faf6f4
6 changed files with 11 additions and 9 deletions

View file

@ -9,6 +9,8 @@
#include <Kernel/Panic.h>
#include <Kernel/Process.h>
using namespace Kernel;
void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func)
{
asm volatile("cli");
@ -28,7 +30,7 @@ void __assertion_failed(const char* msg, const char* file, unsigned line, const
// Switch back to the current process's page tables if there are any.
// Otherwise stack walking will be a disaster.
if (Process::has_current())
MM.enter_process_address_space(Process::current());
Memory::MemoryManager::enter_process_address_space(Process::current());
PANIC("Aborted");
}

View file

@ -221,7 +221,7 @@ void handle_crash(RegisterState const& regs, char const* description, int signal
// If a process crashed while inspecting another process,
// make sure we switch back to the right page tables.
MM.enter_process_address_space(process);
Memory::MemoryManager::enter_process_address_space(process);
dmesgln("CRASH: CPU #{} {} in ring {}", Processor::current_id(), description, (regs.cs & 3));
dump(regs);

View file

@ -213,7 +213,7 @@ bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
VERIFY(physical_page(page_index));
bool success = map_individual_page_impl(page_index);
if (with_flush)
MM.flush_tlb(m_page_directory, vaddr_from_page_index(page_index));
MemoryManager::flush_tlb(m_page_directory, vaddr_from_page_index(page_index));
return success;
}
@ -239,7 +239,7 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
auto vaddr = vaddr_from_page_index(i);
MM.release_pte(*m_page_directory, vaddr, i == count - 1);
}
MM.flush_tlb(m_page_directory, vaddr(), page_count());
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_count());
if (deallocate_range == ShouldDeallocateVirtualRange::Yes) {
m_page_directory->range_allocator().deallocate(range());
}
@ -272,7 +272,7 @@ KResult Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_t
}
if (page_index > 0) {
if (should_flush_tlb == ShouldFlushTLB::Yes)
MM.flush_tlb(m_page_directory, vaddr(), page_index);
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_index);
if (page_index == page_count())
return KSuccess;
}

View file

@ -14,7 +14,7 @@ ScopedAddressSpaceSwitcher::ScopedAddressSpaceSwitcher(Process& process)
{
VERIFY(Thread::current() != nullptr);
m_previous_cr3 = read_cr3();
MM.enter_process_address_space(process);
Memory::MemoryManager::enter_process_address_space(process);
}
ScopedAddressSpaceSwitcher::~ScopedAddressSpaceSwitcher()

View file

@ -204,7 +204,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
PANIC("Syscall from process with IOPL != 0");
}
MM.validate_syscall_preconditions(process.address_space(), regs);
Memory::MemoryManager::validate_syscall_preconditions(process.address_space(), regs);
FlatPtr function;
FlatPtr arg1;

View file

@ -23,7 +23,7 @@ KResultOr<FlatPtr> Process::sys$purge(int mode)
NonnullRefPtrVector<Memory::AnonymousVMObject> vmobjects;
{
KResult result(KSuccess);
MM.for_each_vmobject([&](auto& vmobject) {
Memory::MemoryManager::for_each_vmobject([&](auto& vmobject) {
if (vmobject.is_anonymous()) {
// In the event that the append fails, only attempt to continue
// the purge if we have already appended something successfully.
@ -46,7 +46,7 @@ KResultOr<FlatPtr> Process::sys$purge(int mode)
NonnullRefPtrVector<Memory::InodeVMObject> vmobjects;
{
KResult result(KSuccess);
MM.for_each_vmobject([&](auto& vmobject) {
Memory::MemoryManager::for_each_vmobject([&](auto& vmobject) {
if (vmobject.is_inode()) {
// In the event that the append fails, only attempt to continue
// the purge if we have already appended something successfully.