diff --git a/Kernel/ACPI/MultiProcessorParser.cpp b/Kernel/ACPI/MultiProcessorParser.cpp index 80076181bc..c458d4e7c5 100644 --- a/Kernel/ACPI/MultiProcessorParser.cpp +++ b/Kernel/ACPI/MultiProcessorParser.cpp @@ -57,7 +57,7 @@ void MultiProcessorParser::parse_floating_pointer_data() { auto floating_pointer = map_typed(m_floating_pointer); m_configuration_table = PhysicalAddress(floating_pointer->physical_address_ptr); - dbg() << "Features " << floating_pointer->feature_info[0] << ", IMCR? " << (floating_pointer->feature_info[0] & (1 << 7)); + dbgln("Features {}, IMCR? {}", floating_pointer->feature_info[0], (floating_pointer->feature_info[0] & (1 << 7))); } void MultiProcessorParser::parse_configuration_table() @@ -126,7 +126,7 @@ Vector MultiProcessorParser::get_pci_bus_ids() const Vector MultiProcessorParser::get_pci_interrupt_redirections() { - dbg() << "MultiProcessor: Get PCI IOAPIC redirections"; + dbgln("MultiProcessor: Get PCI IOAPIC redirections"); Vector overrides; auto pci_bus_ids = get_pci_bus_ids(); for (auto& entry : m_io_interrupt_assignment_entries) { diff --git a/Kernel/ACPI/Parser.cpp b/Kernel/ACPI/Parser.cpp index 18d4e52266..adb792eaec 100644 --- a/Kernel/ACPI/Parser.cpp +++ b/Kernel/ACPI/Parser.cpp @@ -150,15 +150,15 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s switch ((GenericAddressStructure::AddressSpace)structure.address_space) { case GenericAddressStructure::AddressSpace::SystemIO: { IOAddress address(structure.address); - dbg() << "ACPI: Sending value 0x" << String::format("%x", value) << " to " << address; + dbgln("ACPI: Sending value {:x} to {:p}", value, address); switch (structure.access_size) { case (u8)GenericAddressStructure::AccessSize::QWord: { - dbg() << "Trying to send QWord to IO port"; + dbgln("Trying to send QWord to IO port"); ASSERT_NOT_REACHED(); break; } case (u8)GenericAddressStructure::AccessSize::Undefined: { - dbg() << "ACPI Warning: Unknown access size " << structure.access_size; + dbgln("ACPI Warning: Unknown access size {}", structure.access_size); ASSERT(structure.bit_width != (u8)GenericAddressStructure::BitWidth::QWord); ASSERT(structure.bit_width != (u8)GenericAddressStructure::BitWidth::Undefined); dbg() << "ACPI: Bit Width - " << structure.bit_width << " bits"; @@ -172,7 +172,7 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s return; } case GenericAddressStructure::AddressSpace::SystemMemory: { - dbg() << "ACPI: Sending value 0x" << String::format("%x", value) << " to " << PhysicalAddress(structure.address); + dbgln("ACPI: Sending value {:x} to {}", value, PhysicalAddress(structure.address)); switch ((GenericAddressStructure::AccessSize)structure.access_size) { case GenericAddressStructure::AccessSize::Byte: *map_typed(PhysicalAddress(structure.address)) = value; @@ -195,10 +195,10 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s case GenericAddressStructure::AddressSpace::PCIConfigurationSpace: { // According to the ACPI specification 6.2, page 168, PCI addresses must be confined to devices on Segment group 0, bus 0. auto pci_address = PCI::Address(0, 0, ((structure.address >> 24) & 0xFF), ((structure.address >> 16) & 0xFF)); - dbg() << "ACPI: Sending value 0x" << String::format("%x", value) << " to " << pci_address; + dbgln("ACPI: Sending value {:x} to {}", value, pci_address); u32 offset_in_pci_address = structure.address & 0xFFFF; if (structure.access_size == (u8)GenericAddressStructure::AccessSize::QWord) { - dbg() << "Trying to send QWord to PCI configuration space"; + dbgln("Trying to send QWord to PCI configuration space"); ASSERT_NOT_REACHED(); } ASSERT(structure.access_size != (u8)GenericAddressStructure::AccessSize::Undefined); diff --git a/Kernel/IO.h b/Kernel/IO.h index 4d22f43d5b..f6d971371b 100644 --- a/Kernel/IO.h +++ b/Kernel/IO.h @@ -176,3 +176,11 @@ inline const LogStream& operator<<(const LogStream& stream, IOAddress value) { return stream << "IO " << String::format("%x", value.get()); } + +template<> +struct AK::Formatter : AK::Formatter { + void format(FormatBuilder& builder, IOAddress value) + { + return Formatter::format(builder, "IO {:x}", value.get()); + } +}; diff --git a/Kernel/PCI/Definitions.h b/Kernel/PCI/Definitions.h index 147944ea7b..2e4b723ec2 100644 --- a/Kernel/PCI/Definitions.h +++ b/Kernel/PCI/Definitions.h @@ -247,3 +247,13 @@ class Device; } } + +template<> +struct AK::Formatter : Formatter { + void format(FormatBuilder& builder, Kernel::PCI::Address value) + { + return Formatter::format( + builder, + "PCI [{:04x}:{:02x}:{:02x}:{:02x}]", value.seg(), value.bus(), value.slot(), value.function()); + } +}; diff --git a/Kernel/PhysicalAddress.h b/Kernel/PhysicalAddress.h index ada55c2a33..1a8d9244f1 100644 --- a/Kernel/PhysicalAddress.h +++ b/Kernel/PhysicalAddress.h @@ -65,3 +65,11 @@ inline const LogStream& operator<<(const LogStream& stream, PhysicalAddress valu { return stream << 'P' << value.as_ptr(); } + +template<> +struct AK::Formatter : AK::Formatter { + void format(FormatBuilder& builder, PhysicalAddress value) + { + return AK::Formatter::format(builder, "P{}", value.as_ptr()); + } +}; diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index af14611dd3..8bb6664ff8 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -238,7 +238,7 @@ void Thread::die_if_needed() // actual context switch u32 prev_flags; Processor::current().clear_critical(prev_flags, false); - dbg() << "die_if_needed returned from clear_critical!!! in irq: " << Processor::current().in_irq(); + dbgln("die_if_needed returned from clear_critical!!! in irq: {}", Processor::current().in_irq()); // We should never get here, but the scoped scheduler lock // will be released by Scheduler::context_switch again ASSERT_NOT_REACHED(); @@ -377,7 +377,7 @@ void Thread::finalize() } if (m_dump_backtrace_on_finalization) - dbg() << backtrace_impl(); + dbgln("{}", backtrace_impl()); kfree_aligned(m_fpu_state); drop_thread_count(false); @@ -897,7 +897,7 @@ void Thread::set_state(State new_state, u8 stop_signal) if (previous_state == Invalid) { // If we were *just* created, we may have already pending signals if (has_unmasked_pending_signals()) { - dbg() << "Dispatch pending signals to new thread " << *this; + dbgln("Dispatch pending signals to new thread {}", *this); dispatch_one_pending_signal(); } } @@ -1115,3 +1115,10 @@ bool Thread::should_be_stopped() const } } + +void AK::Formatter::format(FormatBuilder& builder, const Kernel::Thread& value) +{ + return AK::Formatter::format( + builder, + "{}({}:{})", value.process().name(), value.pid().value(), value.tid().value()); +} diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 6c0ef52571..7b21ef610c 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -822,13 +822,13 @@ public: scheduler_lock.lock(); ScopedSpinLock block_lock2(m_block_lock); if (should_be_stopped() || state() == Stopped) { - dbg() << "Thread should be stopped, current state: " << state_string(); + dbgln("Thread should be stopped, current state: {}", state_string()); set_state(Thread::Blocked); continue; } if (m_blocker && !m_blocker->can_be_interrupted() && !m_should_die) { block_lock2.unlock(); - dbg() << "Thread should not be unblocking, current state: " << state_string(); + dbgln("Thread should not be unblocking, current state: ", state_string()); set_state(Thread::Blocked); continue; } @@ -1311,3 +1311,8 @@ inline IterationDecision Scheduler::for_each_nonrunnable(Callback callback) } } + +template<> +struct AK::Formatter : AK::Formatter { + void format(FormatBuilder&, const Kernel::Thread&); +};