1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 14:17:36 +00:00

Kernel: Run clang-format on everything.

This commit is contained in:
Andreas Kling 2019-06-07 11:43:58 +02:00
parent 98eeb8f22d
commit bc951ca565
63 changed files with 974 additions and 856 deletions

View file

@ -1,11 +1,11 @@
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/FileSystem/Inode.h>
#include "CMOS.h"
#include "Process.h"
#include "StdLib.h"
#include "i386.h"
#include <AK/Assertions.h>
#include <AK/kstdio.h>
#include "i386.h"
#include "StdLib.h"
#include "Process.h"
#include "CMOS.h"
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/VM/MemoryManager.h>
//#define MM_DEBUG
//#define PAGE_FAULT_DEBUG
@ -96,12 +96,12 @@ void MemoryManager::initialize_paging()
dbgprintf("MM: Installing page directory\n");
#endif
asm volatile("movl %%eax, %%cr3"::"a"(kernel_page_directory().cr3()));
asm volatile("movl %%eax, %%cr3" ::"a"(kernel_page_directory().cr3()));
asm volatile(
"movl %%cr0, %%eax\n"
"orl $0x80000001, %%eax\n"
"movl %%eax, %%cr0\n"
:::"%eax", "memory");
"movl %%eax, %%cr0\n" ::
: "%eax", "memory");
#ifdef MM_DEBUG
dbgprintf("MM: Paging initialized.\n");
@ -302,7 +302,6 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
return true;
}
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
{
ASSERT(region.page_directory());
@ -416,7 +415,7 @@ RetainPtr<PhysicalPage> MemoryManager::allocate_physical_page(ShouldZeroFill sho
if (1 > m_free_physical_pages.size()) {
kprintf("FUCK! No physical pages available.\n");
ASSERT_NOT_REACHED();
return { };
return {};
}
#ifdef MM_DEBUG
dbgprintf("MM: allocate_physical_page vending P%x (%u remaining)\n", m_free_physical_pages.last()->paddr().get(), m_free_physical_pages.size());
@ -436,7 +435,7 @@ RetainPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
if (1 > m_free_supervisor_physical_pages.size()) {
kprintf("FUCK! No physical pages available.\n");
ASSERT_NOT_REACHED();
return { };
return {};
}
#ifdef MM_DEBUG
dbgprintf("MM: allocate_supervisor_physical_page vending P%x (%u remaining)\n", m_free_supervisor_physical_pages.last()->paddr().get(), m_free_supervisor_physical_pages.size());
@ -451,21 +450,24 @@ void MemoryManager::enter_process_paging_scope(Process& process)
ASSERT(current);
InterruptDisabler disabler;
current->tss().cr3 = process.page_directory().cr3();
asm volatile("movl %%eax, %%cr3"::"a"(process.page_directory().cr3()):"memory");
asm volatile("movl %%eax, %%cr3" ::"a"(process.page_directory().cr3())
: "memory");
}
void MemoryManager::flush_entire_tlb()
{
asm volatile(
"mov %%cr3, %%eax\n"
"mov %%eax, %%cr3\n"
::: "%eax", "memory"
);
"mov %%eax, %%cr3\n" ::
: "%eax", "memory");
}
void MemoryManager::flush_tlb(LinearAddress laddr)
{
asm volatile("invlpg %0": :"m" (*(char*)laddr.get()) : "memory");
asm volatile("invlpg %0"
:
: "m"(*(char*)laddr.get())
: "memory");
}
void MemoryManager::map_for_kernel(LinearAddress laddr, PhysicalAddress paddr)

View file

@ -1,7 +1,7 @@
#include <Kernel/VM/PageDirectory.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/Process.h>
#include <Kernel/Thread.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PageDirectory.h>
static const dword userspace_range_base = 0x01000000;
static const dword kernelspace_range_base = 0xc0000000;

View file

@ -1,5 +1,5 @@
#include <Kernel/VM/PhysicalPage.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PhysicalPage.h>
#include <Kernel/kmalloc.h>
Retained<PhysicalPage> PhysicalPage::create_eternal(PhysicalAddress paddr, bool supervisor)

View file

@ -1,6 +1,6 @@
#include <AK/QuickSort.h>
#include <Kernel/VM/RangeAllocator.h>
#include <Kernel/kstdio.h>
#include <AK/QuickSort.h>
//#define VRA_DEBUG
@ -33,7 +33,7 @@ Vector<Range, 2> Range::carve(const Range& taken)
{
Vector<Range, 2> parts;
if (taken == *this)
return { };
return {};
if (taken.base() > base())
parts.append({ base(), taken.base().get() - base().get() });
if (taken.end() < end())
@ -79,7 +79,7 @@ Range RangeAllocator::allocate_anywhere(size_t size)
return allocated_range;
}
kprintf("VRA: Failed to allocate anywhere: %u\n", size);
return { };
return {};
}
Range RangeAllocator::allocate_specific(LinearAddress base, size_t size)
@ -101,7 +101,7 @@ Range RangeAllocator::allocate_specific(LinearAddress base, size_t size)
return allocated_range;
}
kprintf("VRA: Failed to allocate specific range: %x(%u)\n", base.get(), size);
return { };
return {};
}
void RangeAllocator::deallocate(Range range)
@ -121,7 +121,7 @@ void RangeAllocator::deallocate(Range range)
sort_and_merge:
// FIXME: We don't have to sort if we insert at the right position immediately.
quick_sort(m_available_ranges.begin(), m_available_ranges.end(), [] (auto& a, auto& b) {
quick_sort(m_available_ranges.begin(), m_available_ranges.end(), [](auto& a, auto& b) {
return a.base() < b.base();
});

View file

@ -1,8 +1,8 @@
#include <Kernel/VM/Region.h>
#include <Kernel/VM/VMObject.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/Process.h>
#include <Kernel/Thread.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/Region.h>
#include <Kernel/VM/VMObject.h>
Region::Region(const Range& range, String&& n, byte access, bool cow)
: m_range(range)
@ -72,10 +72,10 @@ Retained<Region> Region::clone()
if (m_shared || (is_readable() && !is_writable())) {
#ifdef MM_DEBUG
dbgprintf("%s<%u> Region::clone(): sharing %s (L%x)\n",
current->process().name().characters(),
current->pid(),
m_name.characters(),
laddr().get());
current->process().name().characters(),
current->pid(),
m_name.characters(),
laddr().get());
#endif
// Create a new region backed by the same VMObject.
return adopt(*new Region(m_range, m_vmo.copy_ref(), m_offset_in_vmo, String(m_name), m_access));
@ -83,10 +83,10 @@ Retained<Region> Region::clone()
#ifdef MM_DEBUG
dbgprintf("%s<%u> Region::clone(): cowing %s (L%x)\n",
current->process().name().characters(),
current->pid(),
m_name.characters(),
laddr().get());
current->process().name().characters(),
current->pid(),
m_name.characters(),
laddr().get());
#endif
// Set up a COW region. The parent (this) region becomes COW as well!
m_cow_map.fill(true);

View file

@ -10,6 +10,7 @@ class VMObject;
class Region : public Retainable<Region> {
friend class MemoryManager;
public:
enum Access
{

View file

@ -1,7 +1,7 @@
#include <Kernel/VM/VMObject.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/FileSystem/FileSystem.h>
#include <Kernel/FileSystem/Inode.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/VMObject.h>
Retained<VMObject> VMObject::create_file_backed(RetainPtr<Inode>&& inode)
{
@ -59,7 +59,6 @@ VMObject::VMObject(PhysicalAddress paddr, size_t size)
ASSERT(m_physical_pages.size() == page_count());
}
VMObject::VMObject(RetainPtr<Inode>&& inode)
: m_inode(move(inode))
{
@ -113,7 +112,7 @@ void VMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new_size
}
// FIXME: Consolidate with inode_contents_changed() so we only do a single walk.
for_each_region([] (Region& region) {
for_each_region([](Region& region) {
ASSERT(region.page_directory());
MM.remap_region(*region.page_directory(), region);
});
@ -165,7 +164,7 @@ void VMObject::inode_contents_changed(Badge<Inode>, off_t offset, ssize_t size,
#endif
// FIXME: Consolidate with inode_size_changed() so we only do a single walk.
for_each_region([] (Region& region) {
for_each_region([](Region& region) {
ASSERT(region.page_directory());
MM.remap_region(*region.page_directory(), region);
});