1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 09:47:35 +00:00

Everywhere: Debug macros instead of constexpr.

This was done with the following script:

    find . \( -name '*.cpp' -o -name '*.h' -o -name '*.in' \) -not -path './Toolchain/*' -not -path './Build/*' -exec sed -i -E 's/dbgln<debug_([a-z_]+)>/dbgln<\U\1_DEBUG>/' {} \;

    find . \( -name '*.cpp' -o -name '*.h' -o -name '*.in' \) -not -path './Toolchain/*' -not -path './Build/*' -exec sed -i -E 's/if constexpr \(debug_([a-z0-9_]+)/if constexpr \(\U\1_DEBUG/' {} \;
This commit is contained in:
asynts 2021-01-23 23:59:27 +01:00 committed by Andreas Kling
parent bb483f7ef4
commit 8465683dcf
98 changed files with 414 additions and 972 deletions

View file

@ -472,7 +472,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
}
u8* dest_ptr = MM.quickmap_page(*page);
dbgln<debug_page_fault>(" >> COW {} <- {}", page->paddr(), page_slot->paddr());
dbgln<PAGE_FAULT_DEBUG>(" >> COW {} <- {}", page->paddr(), page_slot->paddr());
{
SmapDisabler disabler;
void* fault_at;

View file

@ -42,7 +42,7 @@ ContiguousVMObject::ContiguousVMObject(size_t size)
auto contiguous_physical_pages = MM.allocate_contiguous_supervisor_physical_pages(size);
for (size_t i = 0; i < page_count(); i++) {
physical_pages()[i] = contiguous_physical_pages[i];
dbgln<debug_contiguous_vmobject>("Contiguous page[{}]: {}", i, physical_pages()[i]->paddr());
dbgln<CONTIGUOUS_VMOBJECT_DEBUG>("Contiguous page[{}]: {}", i, physical_pages()[i]->paddr());
}
}

View file

@ -79,7 +79,7 @@ Vector<Range, 2> Range::carve(const Range& taken)
if (taken.end() < end())
parts.append({ taken.end(), end().get() - taken.end().get() });
if constexpr (debug_vra) {
if constexpr (VRA_DEBUG) {
dbgln("VRA: carve: take {:x}-{:x} from {:x}-{:x}",
taken.base().get(),
taken.end().get() - 1,
@ -129,13 +129,13 @@ Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
Range allocated_range(VirtualAddress(aligned_base), size);
if (available_range == allocated_range) {
dbgln<debug_vra>("VRA: Allocated perfect-fit anywhere({}, {}): {}", size, alignment, allocated_range.base().get());
dbgln<VRA_DEBUG>("VRA: Allocated perfect-fit anywhere({}, {}): {}", size, alignment, allocated_range.base().get());
m_available_ranges.remove(i);
return allocated_range;
}
carve_at_index(i, allocated_range);
if constexpr (debug_vra) {
dbgln<debug_vra>("VRA: Allocated anywhere({}, {}): {}", size, alignment, allocated_range.base().get());
if constexpr (VRA_DEBUG) {
dbgln<VRA_DEBUG>("VRA: Allocated anywhere({}, {}): {}", size, alignment, allocated_range.base().get());
dump();
}
return allocated_range;
@ -161,7 +161,7 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size)
}
carve_at_index(i, allocated_range);
if constexpr (debug_vra) {
if constexpr (VRA_DEBUG) {
dbgln("VRA: Allocated specific({}): {}", size, available_range.base().get());
dump();
}
@ -179,7 +179,7 @@ void RangeAllocator::deallocate(Range range)
ASSERT(range.size());
ASSERT(range.base() < range.end());
if constexpr (debug_vra) {
if constexpr (VRA_DEBUG) {
dbgln("VRA: Deallocate: {}({})", range.base().get(), range.size());
dump();
}

View file

@ -410,7 +410,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
return PageFaultResponse::ShouldCrash;
}
if (vmobject().is_inode()) {
dbgln<debug_page_fault>("NP(inode) fault in Region({})[{}]", this, page_index_in_region);
dbgln<PAGE_FAULT_DEBUG>("NP(inode) fault in Region({})[{}]", this, page_index_in_region);
return handle_inode_fault(page_index_in_region);
}
@ -435,10 +435,10 @@ PageFaultResponse Region::handle_fault(const PageFault& fault)
}
ASSERT(fault.type() == PageFault::Type::ProtectionViolation);
if (fault.access() == PageFault::Access::Write && is_writable() && should_cow(page_index_in_region)) {
dbgln<debug_page_fault>("PV(cow) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
dbgln<PAGE_FAULT_DEBUG>("PV(cow) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
auto* phys_page = physical_page(page_index_in_region);
if (phys_page->is_shared_zero_page() || phys_page->is_lazy_committed_page()) {
dbgln<debug_page_fault>("NP(zero) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
dbgln<PAGE_FAULT_DEBUG>("NP(zero) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
return handle_zero_fault(page_index_in_region);
}
return handle_cow_fault(page_index_in_region);
@ -472,14 +472,14 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
if (page_slot->is_lazy_committed_page()) {
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page(page_index_in_vmobject);
dbgln<debug_page_fault>(" >> ALLOCATED COMMITTED {}", page_slot->paddr());
dbgln<PAGE_FAULT_DEBUG>(" >> ALLOCATED COMMITTED {}", page_slot->paddr());
} else {
page_slot = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
if (page_slot.is_null()) {
klog() << "MM: handle_zero_fault was unable to allocate a physical page";
return PageFaultResponse::OutOfMemory;
}
dbgln<debug_page_fault>(" >> ALLOCATED {}", page_slot->paddr());
dbgln<PAGE_FAULT_DEBUG>(" >> ALLOCATED {}", page_slot->paddr());
}
if (!remap_vmobject_page(page_index_in_vmobject)) {
@ -518,10 +518,10 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject];
dbgln<debug_page_fault>("Inode fault in {} page index: {}", name(), page_index_in_region);
dbgln<PAGE_FAULT_DEBUG>("Inode fault in {} page index: {}", name(), page_index_in_region);
if (!vmobject_physical_page_entry.is_null()) {
dbgln<debug_page_fault>("MM: page_in_from_inode() but page already present. Fine with me!");
dbgln<PAGE_FAULT_DEBUG>("MM: page_in_from_inode() but page already present. Fine with me!");
if (!remap_vmobject_page(page_index_in_vmobject))
return PageFaultResponse::OutOfMemory;
return PageFaultResponse::Continue;