1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 07:17:35 +00:00

Everywhere: Use CMake to generate AK/Debug.h.

This was done with the help of several scripts, I dump them here to
easily find them later:

    awk '/#ifdef/ { print "#cmakedefine01 "$2 }' AK/Debug.h.in

    for debug_macro in $(awk '/#ifdef/ { print $2 }' AK/Debug.h.in)
    do
        find . \( -name '*.cpp' -o -name '*.h' -o -name '*.in' \) -not -path './Toolchain/*' -not -path './Build/*' -exec sed -i -E 's/#ifdef '$debug_macro'/#if '$debug_macro'/' {} \;
    done

    # Remember to remove WRAPPER_GERNERATOR_DEBUG from the list.
    awk '/#cmake/ { print "set("$2" ON)" }' AK/Debug.h.in
This commit is contained in:
asynts 2021-01-23 23:29:11 +01:00 committed by Andreas Kling
parent 76f2918416
commit 1a3a0836c0
59 changed files with 475 additions and 459 deletions

View file

@ -235,7 +235,7 @@ void Parser::try_acpi_shutdown()
size_t Parser::get_table_size(PhysicalAddress table_header)
{
InterruptDisabler disabler;
#ifdef ACPI_DEBUG
#if ACPI_DEBUG
dbgln("ACPI: Checking SDT Length");
#endif
return map_typed<Structures::SDTHeader>(table_header)->length;
@ -244,7 +244,7 @@ size_t Parser::get_table_size(PhysicalAddress table_header)
u8 Parser::get_table_revision(PhysicalAddress table_header)
{
InterruptDisabler disabler;
#ifdef ACPI_DEBUG
#if ACPI_DEBUG
dbgln("ACPI: Checking SDT Revision");
#endif
return map_typed<Structures::SDTHeader>(table_header)->revision;
@ -252,7 +252,7 @@ u8 Parser::get_table_revision(PhysicalAddress table_header)
void Parser::initialize_main_system_description_table()
{
#ifdef ACPI_DEBUG
#if ACPI_DEBUG
dbgln("ACPI: Checking Main SDT Length to choose the correct mapping size");
#endif
ASSERT(!m_main_system_description_table.is_null());

View file

@ -318,7 +318,7 @@ void page_fault_handler(TrapFrame* trap)
handle_crash(regs, "Page Fault", SIGSEGV, response == PageFaultResponse::OutOfMemory);
} else if (response == PageFaultResponse::Continue) {
#ifdef PAGE_FAULT_DEBUG
#if PAGE_FAULT_DEBUG
dbgln("Continuing after resolved page fault");
#endif
} else {

View file

@ -184,7 +184,7 @@ MousePacket PS2MouseDevice::parse_data_packet(const RawPacket& raw_packet)
}
packet.is_relative = true;
#ifdef PS2MOUSE_DEBUG
#if PS2MOUSE_DEBUG
dbgln("PS2 Relative Mouse: Buttons {:x}", packet.buttons);
dbgln("Mouse: X {}, Y {}, Z {}", packet.x, packet.y, packet.z);
#endif

View file

@ -215,7 +215,7 @@ Optional<MousePacket> VMWareBackdoor::receive_mouse_packet()
command.command = VMMOUSE_STATUS;
send(command);
if (command.ax == 0xFFFF0000) {
#ifdef PS2MOUSE_DEBUG
#if PS2MOUSE_DEBUG
klog() << "PS2MouseDevice: Resetting VMWare mouse";
#endif
disable_absolute_vmmouse();

View file

@ -58,7 +58,7 @@ VFS& VFS::the()
VFS::VFS()
{
#ifdef VFS_DEBUG
#if VFS_DEBUG
klog() << "VFS: Constructing VFS";
#endif
}

View file

@ -42,7 +42,7 @@ void SharedIRQHandler::initialize(u8 interrupt_number)
void SharedIRQHandler::register_handler(GenericInterruptHandler& handler)
{
#ifdef INTERRUPT_DEBUG
#if INTERRUPT_DEBUG
klog() << "Interrupt Handler registered @ Shared Interrupt Handler " << interrupt_number();
#endif
m_handlers.set(&handler);
@ -50,7 +50,7 @@ void SharedIRQHandler::register_handler(GenericInterruptHandler& handler)
}
void SharedIRQHandler::unregister_handler(GenericInterruptHandler& handler)
{
#ifdef INTERRUPT_DEBUG
#if INTERRUPT_DEBUG
klog() << "Interrupt Handler unregistered @ Shared Interrupt Handler " << interrupt_number();
#endif
m_handlers.remove(&handler);
@ -69,7 +69,7 @@ SharedIRQHandler::SharedIRQHandler(u8 irq)
: GenericInterruptHandler(irq)
, m_responsible_irq_controller(InterruptManagement::the().get_responsible_irq_controller(irq))
{
#ifdef INTERRUPT_DEBUG
#if INTERRUPT_DEBUG
klog() << "Shared Interrupt Handler registered @ " << interrupt_number();
#endif
disable_interrupt_vector();
@ -77,7 +77,7 @@ SharedIRQHandler::SharedIRQHandler(u8 irq)
SharedIRQHandler::~SharedIRQHandler()
{
#ifdef INTERRUPT_DEBUG
#if INTERRUPT_DEBUG
klog() << "Shared Interrupt Handler unregistered @ " << interrupt_number();
#endif
disable_interrupt_vector();

View file

@ -32,7 +32,7 @@
namespace Kernel {
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
void Lock::lock(Mode mode)
{
lock("unknown", 0, mode);
@ -68,7 +68,7 @@ void Lock::lock(Mode mode)
}
ASSERT(m_times_locked == 0);
m_times_locked++;
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
current_thread->holding_lock(*this, 1, file, line);
#endif
m_lock.store(false, AK::memory_order_release);
@ -90,7 +90,7 @@ void Lock::lock(Mode mode)
ASSERT(mode == Mode::Exclusive || mode == Mode::Shared);
ASSERT(m_times_locked > 0);
m_times_locked++;
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
current_thread->holding_lock(*this, 1, file, line);
#endif
m_lock.store(false, AK::memory_order_release);
@ -111,7 +111,7 @@ void Lock::lock(Mode mode)
it->value++;
else
m_shared_holders.set(current_thread, 1);
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
current_thread->holding_lock(*this, 1, file, line);
#endif
m_lock.store(false, AK::memory_order_release);
@ -179,7 +179,7 @@ void Lock::unlock()
m_mode = Mode::Unlocked;
}
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
current_thread->holding_lock(*this, -1);
#endif
@ -219,7 +219,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
m_times_locked = 0;
m_mode = Mode::Unlocked;
m_lock.store(false, AK::memory_order_release);
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
m_holder->holding_lock(*this, -(int)lock_count_to_restore);
#endif
previous_mode = Mode::Exclusive;
@ -240,7 +240,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
ASSERT(it->value > 0);
lock_count_to_restore = it->value;
ASSERT(lock_count_to_restore > 0);
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
m_holder->holding_lock(*this, -(int)lock_count_to_restore);
#endif
m_shared_holders.remove(it);
@ -269,7 +269,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
}
}
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
void Lock::restore_lock(Mode mode, u32 lock_count)
{
return restore_lock("unknown", 0, mode, lock_count);
@ -301,7 +301,7 @@ void Lock::restore_lock(Mode mode, u32 lock_count)
ASSERT(m_shared_holders.is_empty());
m_holder = current_thread;
m_lock.store(false, AK::memory_order_release);
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
m_holder->holding_lock(*this, (int)lock_count, file, line);
#endif
return;
@ -322,7 +322,7 @@ void Lock::restore_lock(Mode mode, u32 lock_count)
// There may be other shared lock holders already, but we should not have an entry yet
ASSERT(set_result == AK::HashSetResult::InsertedNewEntry);
m_lock.store(false, AK::memory_order_release);
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
m_holder->holding_lock(*this, (int)lock_count, file, line);
#endif
return;

View file

@ -51,7 +51,7 @@ public:
~Lock() { }
void lock(Mode = Mode::Exclusive);
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
void lock(const char* file, int line, Mode mode = Mode::Exclusive);
void restore_lock(const char* file, int line, Mode, u32);
#endif
@ -98,7 +98,7 @@ private:
class Locker {
public:
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
ALWAYS_INLINE explicit Locker(const char* file, int line, Lock& l, Lock::Mode mode = Lock::Mode::Exclusive)
: m_lock(l)
{
@ -133,7 +133,7 @@ private:
bool m_locked { true };
};
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
# define LOCKER(...) Locker locker(__FILE__, __LINE__, __VA_ARGS__)
# define RESTORE_LOCK(lock, ...) (lock).restore_lock(__FILE__, __LINE__, __VA_ARGS__)
#else

View file

@ -423,7 +423,7 @@ void E1000NetworkAdapter::send_raw(ReadonlyBytes payload)
{
disable_irq();
size_t tx_current = in32(REG_TXDESCTAIL) % number_of_tx_descriptors;
#ifdef E1000_DEBUG
#if E1000_DEBUG
klog() << "E1000: Sending packet (" << payload.size() << " bytes)";
#endif
auto* tx_descriptors = (e1000_tx_desc*)m_tx_descriptors_region->vaddr().as_ptr();
@ -434,7 +434,7 @@ void E1000NetworkAdapter::send_raw(ReadonlyBytes payload)
descriptor.length = payload.size();
descriptor.status = 0;
descriptor.cmd = CMD_EOP | CMD_IFCS | CMD_RS;
#ifdef E1000_DEBUG
#if E1000_DEBUG
klog() << "E1000: Using tx descriptor " << tx_current << " (head is at " << in32(REG_TXDESCHEAD) << ")";
#endif
tx_current = (tx_current + 1) % number_of_tx_descriptors;
@ -448,7 +448,7 @@ void E1000NetworkAdapter::send_raw(ReadonlyBytes payload)
}
m_wait_queue.wait_on({}, "E1000NetworkAdapter");
}
#ifdef E1000_DEBUG
#if E1000_DEBUG
dbgln("E1000: Sent packet, status is now {:#02x}!", (u8)descriptor.status);
#endif
}
@ -467,7 +467,7 @@ void E1000NetworkAdapter::receive()
auto* buffer = m_rx_buffers_regions[rx_current].vaddr().as_ptr();
u16 length = rx_descriptors[rx_current].length;
ASSERT(length <= 8192);
#ifdef E1000_DEBUG
#if E1000_DEBUG
klog() << "E1000: Received 1 packet @ " << buffer << " (" << length << ") bytes!";
#endif
did_receive({ buffer, length });

View file

@ -222,7 +222,7 @@ KResultOr<size_t> IPv4Socket::sendto(FileDescription&, const UserOrKernelBuffer&
if (rc < 0)
return rc;
#ifdef IPV4_SOCKET_DEBUG
#if IPV4_SOCKET_DEBUG
klog() << "sendto: destination=" << m_peer_address.to_string().characters() << ":" << m_peer_port;
#endif
@ -364,7 +364,7 @@ KResultOr<size_t> IPv4Socket::recvfrom(FileDescription& description, UserOrKerne
return EINVAL;
}
#ifdef IPV4_SOCKET_DEBUG
#if IPV4_SOCKET_DEBUG
klog() << "recvfrom: type=" << type() << ", local_port=" << local_port();
#endif

View file

@ -169,7 +169,7 @@ KResultOr<size_t> TCPSocket::protocol_receive(ReadonlyBytes raw_ipv4_packet, Use
auto& ipv4_packet = *reinterpret_cast<const IPv4Packet*>(raw_ipv4_packet.data());
auto& tcp_packet = *static_cast<const TCPPacket*>(ipv4_packet.payload());
size_t payload_size = raw_ipv4_packet.size() - sizeof(IPv4Packet) - tcp_packet.header_size();
#ifdef TCP_SOCKET_DEBUG
#if TCP_SOCKET_DEBUG
klog() << "payload_size " << payload_size << ", will it fit in " << buffer_size << "?";
#endif
ASSERT(buffer_size >= payload_size);
@ -252,7 +252,7 @@ void TCPSocket::send_outgoing_packets()
packet.tx_time = now;
packet.tx_counter++;
#ifdef TCP_SOCKET_DEBUG
#if TCP_SOCKET_DEBUG
auto& tcp_packet = *(TCPPacket*)(packet.buffer.data());
klog() << "sending tcp packet from " << local_address().to_string().characters() << ":" << local_port() << " to " << peer_address().to_string().characters() << ":" << peer_port() << " with (" << (tcp_packet.has_syn() ? "SYN " : "") << (tcp_packet.has_ack() ? "ACK " : "") << (tcp_packet.has_fin() ? "FIN " : "") << (tcp_packet.has_rst() ? "RST " : "") << ") seq_no=" << tcp_packet.sequence_number() << ", ack_no=" << tcp_packet.ack_number() << ", tx_counter=" << packet.tx_counter;
#endif
@ -450,7 +450,7 @@ bool TCPSocket::protocol_is_disconnected() const
void TCPSocket::shut_down_for_writing()
{
if (state() == State::Established) {
#ifdef TCP_SOCKET_DEBUG
#if TCP_SOCKET_DEBUG
dbgln(" Sending FIN/ACK from Established and moving into FinWait1");
#endif
[[maybe_unused]] auto rc = send_tcp_packet(TCPFlags::FIN | TCPFlags::ACK);
@ -465,7 +465,7 @@ KResult TCPSocket::close()
Locker socket_locker(lock());
auto result = IPv4Socket::close();
if (state() == State::CloseWait) {
#ifdef TCP_SOCKET_DEBUG
#if TCP_SOCKET_DEBUG
dbgln(" Sending FIN from CloseWait and moving into LastAck");
#endif
[[maybe_unused]] auto rc = send_tcp_packet(TCPFlags::FIN | TCPFlags::ACK);

View file

@ -109,7 +109,7 @@ void Access::enumerate_functions(int type, u8 bus, u8 slot, u8 function, Functio
callback(address, { early_read16_field(address, PCI_VENDOR_ID), early_read16_field(address, PCI_DEVICE_ID) });
if (early_read_type(address) == PCI_TYPE_BRIDGE) {
u8 secondary_bus = early_read8_field(address, PCI_SECONDARY_BUS);
#ifdef PCI_DEBUG
#if PCI_DEBUG
klog() << "PCI: Found secondary bus: " << secondary_bus;
#endif
ASSERT(secondary_bus != bus);

View file

@ -86,7 +86,7 @@ void IOAccess::write32_field(Address address, u32 field, u32 value)
void IOAccess::enumerate_hardware(Function<void(Address, ID)> callback)
{
#ifdef PCI_DEBUG
#if PCI_DEBUG
dbgln("PCI: IO enumerating hardware");
#endif
// Single PCI host controller.

View file

@ -85,7 +85,7 @@ void MMIOAccess::initialize(PhysicalAddress mcfg)
{
if (!Access::is_initialized()) {
new MMIOAccess(mcfg);
#ifdef PCI_DEBUG
#if PCI_DEBUG
dbgln("PCI: MMIO access initialised.");
#endif
}
@ -97,7 +97,7 @@ MMIOAccess::MMIOAccess(PhysicalAddress p_mcfg)
klog() << "PCI: Using MMIO for PCI configuration space access";
auto checkup_region = MM.allocate_kernel_region(p_mcfg.page_base(), (PAGE_SIZE * 2), "PCI MCFG Checkup", Region::Access::Read | Region::Access::Write);
#ifdef PCI_DEBUG
#if PCI_DEBUG
dbgln("PCI: Checking MCFG Table length to choose the correct mapping size");
#endif

View file

@ -146,7 +146,7 @@ IDEChannel::~IDEChannel()
void IDEChannel::start_request(AsyncBlockDeviceRequest& request, bool use_dma, bool is_slave)
{
ScopedSpinLock lock(m_request_lock);
#ifdef PATA_DEBUG
#if PATA_DEBUG
dbgln("IDEChannel::start_request");
#endif
m_current_request = &request;
@ -231,19 +231,19 @@ void IDEChannel::handle_irq(const RegisterState&)
u8 bstatus = m_io_group.bus_master_base().offset(2).in<u8>();
if (!(bstatus & 0x4)) {
// interrupt not from this device, ignore
#ifdef PATA_DEBUG
#if PATA_DEBUG
klog() << "IDEChannel: ignore interrupt";
#endif
return;
}
ScopedSpinLock lock(m_request_lock);
#ifdef PATA_DEBUG
#if PATA_DEBUG
klog() << "IDEChannel: interrupt: DRQ=" << ((status & ATA_SR_DRQ) != 0) << " BSY=" << ((status & ATA_SR_BSY) != 0) << " DRDY=" << ((status & ATA_SR_DRDY) != 0);
#endif
if (!m_current_request) {
#ifdef PATA_DEBUG
#if PATA_DEBUG
dbgln("IDEChannel: IRQ but no pending request!");
#endif
return;
@ -324,7 +324,7 @@ void IDEChannel::detect_disks()
;
if (m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>() == 0x00) {
#ifdef PATA_DEBUG
#if PATA_DEBUG
klog() << "IDEChannel: No " << (i == 0 ? "master" : "slave") << " disk detected!";
#endif
continue;
@ -439,7 +439,7 @@ void IDEChannel::ata_read_sectors(bool slave_request)
{
auto& request = *m_current_request;
ASSERT(request.block_count() <= 256);
#ifdef PATA_DEBUG
#if PATA_DEBUG
dbgln("IDEChannel::ata_read_sectors");
#endif
@ -447,7 +447,7 @@ void IDEChannel::ata_read_sectors(bool slave_request)
;
auto lba = request.block_index();
#ifdef PATA_DEBUG
#if PATA_DEBUG
klog() << "IDEChannel: Reading " << request.block_count() << " sector(s) @ LBA " << lba;
#endif
@ -570,14 +570,14 @@ void IDEChannel::ata_write_sectors(bool slave_request)
ASSERT(request.block_count() <= 256);
u32 start_sector = request.block_index();
u32 count = request.block_count();
#ifdef PATA_DEBUG
#if PATA_DEBUG
klog() << "IDEChannel::ata_write_sectors request (" << count << " sector(s) @ " << start_sector << ")";
#endif
while (m_io_group.io_base().offset(ATA_REG_STATUS).in<u8>() & ATA_SR_BSY)
;
#ifdef PATA_DEBUG
#if PATA_DEBUG
klog() << "IDEChannel: Writing " << count << " sector(s) @ LBA " << start_sector;
#endif

View file

@ -72,7 +72,7 @@ pid_t Process::sys$fork(RegisterState& regs)
child_tss.gs = regs.gs;
child_tss.ss = regs.userspace_ss;
#ifdef FORK_DEBUG
#if FORK_DEBUG
dbgln("fork: child will begin executing at {:04x}:{:08x} with stack {:04x}:{:08x}, kstack {:04x}:{:08x}", child_tss.cs, child_tss.eip, child_tss.ss, child_tss.esp, child_tss.ss0, child_tss.esp0);
#endif

View file

@ -357,7 +357,7 @@ void Thread::finalize()
ASSERT(Thread::current() == g_finalizer);
ASSERT(Thread::current() != this);
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
ASSERT(!m_lock.own_lock());
if (lock_count() > 0) {
dbgln("Thread {} leaking {} Locks!", *this, lock_count());
@ -687,7 +687,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
ASSERT(process().is_user_process());
ASSERT(this == Thread::current());
#ifdef SIGNAL_DEBUG
#if SIGNAL_DEBUG
klog() << "signal: dispatch signal " << signal << " to " << *this << " state: " << state_string();
#endif
@ -756,7 +756,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
}
if (handler_vaddr.as_ptr() == SIG_IGN) {
#ifdef SIGNAL_DEBUG
#if SIGNAL_DEBUG
klog() << "signal: " << *this << " ignored signal " << signal;
#endif
return DispatchSignalResult::Continue;
@ -780,7 +780,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
u32 ret_eip = state.eip;
u32 ret_eflags = state.eflags;
#ifdef SIGNAL_DEBUG
#if SIGNAL_DEBUG
klog() << "signal: setting up user stack to return to eip: " << String::format("%p", (void*)ret_eip) << " esp: " << String::format("%p", (void*)old_esp);
#endif
@ -820,7 +820,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
setup_stack(regs);
regs.eip = g_return_to_ring3_from_signal_trampoline.get();
#ifdef SIGNAL_DEBUG
#if SIGNAL_DEBUG
dbgln("signal: Thread in state '{}' has been primed with signal handler {:04x}:{:08x} to deliver {}", state_string(), m_tss.cs, m_tss.eip, signal);
#endif
return DispatchSignalResult::Continue;

View file

@ -26,6 +26,7 @@
#pragma once
#include <AK/Debug.h>
#include <AK/Function.h>
#include <AK/IntrusiveList.h>
#include <AK/Optional.h>
@ -47,8 +48,6 @@
#include <LibC/fd_set.h>
#include <LibELF/AuxiliaryVector.h>
//#define LOCK_DEBUG
namespace Kernel {
enum class DispatchSignalResult {
@ -1109,7 +1108,7 @@ public:
RecursiveSpinLock& get_lock() const { return m_lock; }
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
void holding_lock(Lock& lock, int refs_delta, const char* file = nullptr, int line = 0)
{
ASSERT(refs_delta != 0);
@ -1246,7 +1245,7 @@ private:
SignalActionData m_signal_action_data[32];
Blocker* m_blocker { nullptr };
#ifdef LOCK_DEBUG
#if LOCK_DEBUG
struct HoldingLockInfo {
Lock* lock;
const char* file;

View file

@ -275,7 +275,7 @@ u64 HPET::update_time(u64& seconds_since_boot, u32& ticks_this_second, bool quer
void HPET::enable_periodic_interrupt(const HPETComparator& comparator)
{
#ifdef HPET_DEBUG
#if HPET_DEBUG
klog() << "HPET: Set comparator " << comparator.comparator_number() << " to be periodic.";
#endif
disable(comparator);
@ -289,7 +289,7 @@ void HPET::enable_periodic_interrupt(const HPETComparator& comparator)
}
void HPET::disable_periodic_interrupt(const HPETComparator& comparator)
{
#ifdef HPET_DEBUG
#if HPET_DEBUG
klog() << "HPET: Disable periodic interrupt in comparator " << comparator.comparator_number() << ".";
#endif
disable(comparator);
@ -304,7 +304,7 @@ void HPET::disable_periodic_interrupt(const HPETComparator& comparator)
void HPET::disable(const HPETComparator& comparator)
{
#ifdef HPET_DEBUG
#if HPET_DEBUG
klog() << "HPET: Disable comparator " << comparator.comparator_number() << ".";
#endif
ASSERT(comparator.comparator_number() <= m_comparators.size());
@ -313,7 +313,7 @@ void HPET::disable(const HPETComparator& comparator)
}
void HPET::enable(const HPETComparator& comparator)
{
#ifdef HPET_DEBUG
#if HPET_DEBUG
klog() << "HPET: Enable comparator " << comparator.comparator_number() << ".";
#endif
ASSERT(comparator.comparator_number() <= m_comparators.size());

View file

@ -443,7 +443,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
auto& page_slot = physical_pages()[page_index];
bool have_committed = m_shared_committed_cow_pages && is_nonvolatile(page_index);
if (page_slot->ref_count() == 1) {
#ifdef PAGE_FAULT_DEBUG
#if PAGE_FAULT_DEBUG
dbgln(" >> It's a COW page but nobody is sharing it anymore. Remap r/w");
#endif
set_should_cow(page_index, false);
@ -456,12 +456,12 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
RefPtr<PhysicalPage> page;
if (have_committed) {
#ifdef PAGE_FAULT_DEBUG
#if PAGE_FAULT_DEBUG
dbgln(" >> It's a committed COW page and it's time to COW!");
#endif
page = m_shared_committed_cow_pages->allocate_one();
} else {
#ifdef PAGE_FAULT_DEBUG
#if PAGE_FAULT_DEBUG
dbgln(" >> It's a COW page and it's time to COW!");
#endif
page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::No);

View file

@ -384,7 +384,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
dump_kernel_regions();
return PageFaultResponse::ShouldCrash;
}
#ifdef PAGE_FAULT_DEBUG
#if PAGE_FAULT_DEBUG
dbgln("MM: CPU[{}] handle_page_fault({:#04x}) at {}", Processor::current().id(), fault.code(), fault.vaddr());
#endif
auto* region = find_region_from_vaddr(fault.vaddr());

View file

@ -43,7 +43,7 @@ void RangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
{
m_total_range = { base, size };
m_available_ranges.append({ base, size });
#ifdef VRA_DEBUG
#if VRA_DEBUG
ScopedSpinLock lock(m_lock);
dump();
#endif
@ -215,7 +215,7 @@ void RangeAllocator::deallocate(Range range)
return;
}
}
#ifdef VRA_DEBUG
#if VRA_DEBUG
dbgln("VRA: After deallocate");
dump();
#endif

View file

@ -458,7 +458,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) {
#ifdef PAGE_FAULT_DEBUG
#if PAGE_FAULT_DEBUG
dbgln("MM: zero_page() but page already present. Fine with me!");
#endif
if (!remap_vmobject_page(page_index_in_vmobject))