1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 08:47:35 +00:00

Kernel: Make Arch/i386/CPU.cpp safe to run through clang-format

This file was far too messy, and touching it was a major pain.
Also enable clang-format linting on it.
This commit is contained in:
AnotherTest 2021-02-07 16:58:53 +03:30 committed by Andreas Kling
parent 53ce923e10
commit 1f8a633cc7
2 changed files with 109 additions and 98 deletions

View file

@ -25,7 +25,6 @@
*/ */
#include <AK/Assertions.h> #include <AK/Assertions.h>
#include <Kernel/Debug.h>
#include <AK/ScopeGuard.h> #include <AK/ScopeGuard.h>
#include <AK/String.h> #include <AK/String.h>
#include <AK/StringBuilder.h> #include <AK/StringBuilder.h>
@ -34,6 +33,7 @@
#include <Kernel/Arch/i386/ISRStubs.h> #include <Kernel/Arch/i386/ISRStubs.h>
#include <Kernel/Arch/i386/ProcessorInfo.h> #include <Kernel/Arch/i386/ProcessorInfo.h>
#include <Kernel/Arch/i386/SafeMem.h> #include <Kernel/Arch/i386/SafeMem.h>
#include <Kernel/Debug.h>
#include <Kernel/IO.h> #include <Kernel/IO.h>
#include <Kernel/Interrupts/APIC.h> #include <Kernel/Interrupts/APIC.h>
#include <Kernel/Interrupts/GenericInterruptHandler.h> #include <Kernel/Interrupts/GenericInterruptHandler.h>
@ -59,7 +59,7 @@ static Descriptor s_idt[256];
static GenericInterruptHandler* s_interrupt_handler[GENERIC_INTERRUPT_HANDLERS_COUNT]; static GenericInterruptHandler* s_interrupt_handler[GENERIC_INTERRUPT_HANDLERS_COUNT];
static EntropySource s_entropy_source_interrupts{EntropySource::Static::Interrupts}; static EntropySource s_entropy_source_interrupts { EntropySource::Static::Interrupts };
// The compiler can't see the calls to these functions inside assembly. // The compiler can't see the calls to these functions inside assembly.
// Declare them, to avoid dead code warnings. // Declare them, to avoid dead code warnings.
@ -71,6 +71,8 @@ extern "C" void pre_init_finished(void);
extern "C" void post_init_finished(void); extern "C" void post_init_finished(void);
extern "C" void handle_interrupt(TrapFrame*); extern "C" void handle_interrupt(TrapFrame*);
// clang-format off
#define EH_ENTRY(ec, title) \ #define EH_ENTRY(ec, title) \
extern "C" void title##_asm_entry(); \ extern "C" void title##_asm_entry(); \
extern "C" void title##_handler(TrapFrame*); \ extern "C" void title##_handler(TrapFrame*); \
@ -122,6 +124,8 @@ extern "C" void handle_interrupt(TrapFrame*);
" call " #title "_handler\n" \ " call " #title "_handler\n" \
" jmp common_trap_exit \n"); " jmp common_trap_exit \n");
// clang-format on
static void dump(const RegisterState& regs) static void dump(const RegisterState& regs)
{ {
u16 ss; u16 ss;
@ -217,7 +221,6 @@ void fpu_exception_handler(TrapFrame*)
asm volatile("clts"); asm volatile("clts");
} }
// 14: Page Fault // 14: Page Fault
EH_ENTRY(14, page_fault); EH_ENTRY(14, page_fault);
void page_fault_handler(TrapFrame* trap) void page_fault_handler(TrapFrame* trap)
@ -974,8 +977,7 @@ String Processor::features_string() const
{ {
StringBuilder builder; StringBuilder builder;
auto feature_to_str = auto feature_to_str =
[](CPUFeature f) -> const char* [](CPUFeature f) -> const char* {
{
switch (f) { switch (f) {
case CPUFeature::NX: case CPUFeature::NX:
return "nx"; return "nx";
@ -1159,8 +1161,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
FlatPtr frame_ptr = 0, eip = 0; FlatPtr frame_ptr = 0, eip = 0;
Vector<FlatPtr, 32> stack_trace; Vector<FlatPtr, 32> stack_trace;
auto walk_stack = [&](FlatPtr stack_ptr) auto walk_stack = [&](FlatPtr stack_ptr) {
{
static constexpr size_t max_stack_frames = 4096; static constexpr size_t max_stack_frames = 4096;
stack_trace.append(eip); stack_trace.append(eip);
size_t count = 1; size_t count = 1;
@ -1187,8 +1188,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
} }
} }
}; };
auto capture_current_thread = [&]() auto capture_current_thread = [&]() {
{
frame_ptr = (FlatPtr)__builtin_frame_address(0); frame_ptr = (FlatPtr)__builtin_frame_address(0);
eip = (FlatPtr)__builtin_return_address(0); eip = (FlatPtr)__builtin_return_address(0);
@ -1215,7 +1215,8 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// an IPI to that processor, have it walk the stack and wait // an IPI to that processor, have it walk the stack and wait
// until it returns the data back to us // until it returns the data back to us
auto& proc = Processor::current(); auto& proc = Processor::current();
smp_unicast(thread.cpu(), smp_unicast(
thread.cpu(),
[&]() { [&]() {
dbgln("CPU[{}] getting stack for cpu #{}", Processor::id(), proc.get_id()); dbgln("CPU[{}] getting stack for cpu #{}", Processor::id(), proc.get_id());
ProcessPagingScope paging_scope(thread.process()); ProcessPagingScope paging_scope(thread.process());
@ -1229,7 +1230,8 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// because the other processor is still holding the // because the other processor is still holding the
// scheduler lock... // scheduler lock...
capture_current_thread(); capture_current_thread();
}, false); },
false);
} else { } else {
switch (thread.state()) { switch (thread.state()) {
case Thread::Running: case Thread::Running:
@ -1298,8 +1300,7 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
to_thread->set_cpu(processor.get_id()); to_thread->set_cpu(processor.get_id());
processor.restore_in_critical(to_thread->saved_critical()); processor.restore_in_critical(to_thread->saved_critical());
asm volatile("fxrstor %0" asm volatile("fxrstor %0" ::"m"(to_thread->fpu_state()));
::"m"(to_thread->fpu_state()));
// TODO: debug registers // TODO: debug registers
// TODO: ioperm? // TODO: ioperm?
@ -1316,6 +1317,7 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
dbgln<CONTEXT_SWITCH_DEBUG>("switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread); dbgln<CONTEXT_SWITCH_DEBUG>("switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
from_thread->save_critical(m_in_critical); from_thread->save_critical(m_in_critical);
// clang-format off
// Switch to new thread context, passing from_thread and to_thread // Switch to new thread context, passing from_thread and to_thread
// through to the new context using registers edx and eax // through to the new context using registers edx and eax
asm volatile( asm volatile(
@ -1356,6 +1358,7 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
[to_thread] "a" (to_thread) [to_thread] "a" (to_thread)
: "memory" : "memory"
); );
// clang-format on
dbgln<CONTEXT_SWITCH_DEBUG>("switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread); dbgln<CONTEXT_SWITCH_DEBUG>("switch_context <-- from {} {} to {} {}", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
@ -1383,6 +1386,8 @@ extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe
} }
extern "C" void thread_context_first_enter(void); extern "C" void thread_context_first_enter(void);
// clang-format off
asm( asm(
// enter_thread_context returns to here first time a thread is executing // enter_thread_context returns to here first time a thread is executing
".globl thread_context_first_enter \n" ".globl thread_context_first_enter \n"
@ -1397,6 +1402,7 @@ asm(
" movl %ebx, 0(%esp) \n" // push pointer to TrapFrame " movl %ebx, 0(%esp) \n" // push pointer to TrapFrame
" jmp common_trap_exit \n" " jmp common_trap_exit \n"
); );
// clang-format on
void exit_kernel_thread(void) void exit_kernel_thread(void)
{ {
@ -1523,6 +1529,7 @@ extern "C" u32 do_init_context(Thread* thread, u32 flags)
extern "C" void do_assume_context(Thread* thread, u32 flags); extern "C" void do_assume_context(Thread* thread, u32 flags);
// clang-format off
asm( asm(
".global do_assume_context \n" ".global do_assume_context \n"
"do_assume_context: \n" "do_assume_context: \n"
@ -1542,6 +1549,7 @@ asm(
" pushl $thread_context_first_enter \n" // should be same as tss.eip " pushl $thread_context_first_enter \n" // should be same as tss.eip
" jmp enter_thread_context \n" " jmp enter_thread_context \n"
); );
// clang-format on
void Processor::assume_context(Thread& thread, u32 flags) void Processor::assume_context(Thread& thread, u32 flags)
{ {
@ -1590,6 +1598,7 @@ void Processor::initialize_context_switching(Thread& initial_thread)
m_scheduler_initialized = true; m_scheduler_initialized = true;
// clang-format off
asm volatile( asm volatile(
"movl %[new_esp], %%esp \n" // switch to new stack "movl %[new_esp], %%esp \n" // switch to new stack
"pushl %[from_to_thread] \n" // to_thread "pushl %[from_to_thread] \n" // to_thread
@ -1613,6 +1622,7 @@ void Processor::initialize_context_switching(Thread& initial_thread)
[from_to_thread] "b" (&initial_thread), [from_to_thread] "b" (&initial_thread),
[cpu] "c" (id()) [cpu] "c" (id())
); );
// clang-format on
ASSERT_NOT_REACHED(); ASSERT_NOT_REACHED();
} }
@ -1680,10 +1690,12 @@ void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
{ {
auto ptr = vaddr.as_ptr(); auto ptr = vaddr.as_ptr();
while (page_count > 0) { while (page_count > 0) {
// clang-format off
asm volatile("invlpg %0" asm volatile("invlpg %0"
: :
: "m"(*ptr) : "m"(*ptr)
: "memory"); : "memory");
// clang-format on
ptr += PAGE_SIZE; ptr += PAGE_SIZE;
page_count--; page_count--;
} }
@ -1735,7 +1747,7 @@ ProcessorMessage& Processor::smp_get_from_pool()
return *msg; return *msg;
} }
Atomic<u32> Processor::s_idle_cpu_mask{ 0 }; Atomic<u32> Processor::s_idle_cpu_mask { 0 };
u32 Processor::smp_wake_n_idle_processors(u32 wake_count) u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
{ {
@ -1829,8 +1841,7 @@ bool Processor::smp_process_pending_messages()
if (auto pending_msgs = atomic_exchange(&m_message_queue, nullptr, AK::MemoryOrder::memory_order_acq_rel)) { if (auto pending_msgs = atomic_exchange(&m_message_queue, nullptr, AK::MemoryOrder::memory_order_acq_rel)) {
// We pulled the stack of pending messages in LIFO order, so we need to reverse the list first // We pulled the stack of pending messages in LIFO order, so we need to reverse the list first
auto reverse_list = auto reverse_list =
[](ProcessorMessageEntry* list) -> ProcessorMessageEntry* [](ProcessorMessageEntry* list) -> ProcessorMessageEntry* {
{
ProcessorMessageEntry* rev_list = nullptr; ProcessorMessageEntry* rev_list = nullptr;
while (list) { while (list) {
auto next = list->next; auto next = list->next;
@ -2111,8 +2122,7 @@ void Processor::deferred_call_execute_pending()
// We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first // We pulled the stack of pending deferred calls in LIFO order, so we need to reverse the list first
auto reverse_list = auto reverse_list =
[](DeferredCallEntry* list) -> DeferredCallEntry* [](DeferredCallEntry* list) -> DeferredCallEntry* {
{
DeferredCallEntry* rev_list = nullptr; DeferredCallEntry* rev_list = nullptr;
while (list) { while (list) {
auto next = list->next; auto next = list->next;
@ -2240,9 +2250,11 @@ void Processor::gdt_init()
set_fs(GDT_SELECTOR_PROC); set_fs(GDT_SELECTOR_PROC);
// Make sure CS points to the kernel code descriptor. // Make sure CS points to the kernel code descriptor.
// clang-format off
asm volatile( asm volatile(
"ljmpl $" __STRINGIFY(GDT_SELECTOR_CODE0) ", $sanity\n" "ljmpl $" __STRINGIFY(GDT_SELECTOR_CODE0) ", $sanity\n"
"sanity:\n"); "sanity:\n");
// clang-format on
} }
void Processor::set_thread_specific(u8* data, size_t len) void Processor::set_thread_specific(u8* data, size_t len)

View file

@ -11,7 +11,6 @@ if [ "$#" -eq "1" ]; then
'*.cpp' \ '*.cpp' \
'*.h' \ '*.h' \
':!:Base' \ ':!:Base' \
':!:Kernel/Arch/i386/CPU.cpp' \
':!:Kernel/FileSystem/ext2_fs.h' \ ':!:Kernel/FileSystem/ext2_fs.h' \
':!:Userland/Libraries/LibC/getopt.cpp' \ ':!:Userland/Libraries/LibC/getopt.cpp' \
':!:Userland/Libraries/LibC/syslog.h' \ ':!:Userland/Libraries/LibC/syslog.h' \