1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-26 12:47:45 +00:00

Everywhere: Rename ASSERT => VERIFY

(...and ASSERT_NOT_REACHED => VERIFY_NOT_REACHED)

Since all of these checks are done in release builds as well,
let's rename them to VERIFY to prevent confusion, as everyone is
used to assertions being compiled out in release.

We can introduce a new ASSERT macro that is specifically for debug
checks, but I'm doing this wholesale conversion first since we've
accumulated thousands of these already, and it's not immediately
obvious which ones are suitable for ASSERT.
This commit is contained in:
Andreas Kling 2021-02-23 20:42:32 +01:00
parent b33a6a443e
commit 5d180d1f99
725 changed files with 3448 additions and 3448 deletions

View file

@ -40,39 +40,39 @@ UNMAP_AFTER_INIT DynamicParser::DynamicParser(PhysicalAddress rsdp)
void DynamicParser::handle_irq(const RegisterState&)
{
// FIXME: Implement IRQ handling of ACPI signals!
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::enable_aml_interpretation()
{
// FIXME: Implement AML Interpretation
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::enable_aml_interpretation(File&)
{
// FIXME: Implement AML Interpretation
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::enable_aml_interpretation(u8*, u32)
{
// FIXME: Implement AML Interpretation
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::disable_aml_interpretation()
{
// FIXME: Implement AML Interpretation
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::try_acpi_shutdown()
{
// FIXME: Implement AML Interpretation to perform ACPI shutdown
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DynamicParser::build_namespace()
{
// FIXME: Implement AML Interpretation to build the ACPI namespace
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

View file

@ -96,7 +96,7 @@ UNMAP_AFTER_INIT void MultiProcessorParser::parse_configuration_table()
entry = (MultiProcessor::EntryHeader*)(FlatPtr)entry + sizeof(MultiProcessor::CompatibilityBusAddressSpaceModifierEntry);
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
--entry_count;
}

View file

@ -47,7 +47,7 @@ Parser* Parser::the()
void Parser::set_the(Parser& parser)
{
ASSERT(!s_acpi_parser);
VERIFY(!s_acpi_parser);
s_acpi_parser = &parser;
}
@ -89,7 +89,7 @@ UNMAP_AFTER_INIT void Parser::init_fadt()
klog() << "ACPI: Searching for the Fixed ACPI Data Table";
m_fadt = find_table("FACP");
ASSERT(!m_fadt.is_null());
VERIFY(!m_fadt.is_null());
auto sdt = map_typed<Structures::FADT>(m_fadt);
@ -148,13 +148,13 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s
switch (structure.access_size) {
case (u8)GenericAddressStructure::AccessSize::QWord: {
dbgln("Trying to send QWord to IO port");
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
break;
}
case (u8)GenericAddressStructure::AccessSize::Undefined: {
dbgln("ACPI Warning: Unknown access size {}", structure.access_size);
ASSERT(structure.bit_width != (u8)GenericAddressStructure::BitWidth::QWord);
ASSERT(structure.bit_width != (u8)GenericAddressStructure::BitWidth::Undefined);
VERIFY(structure.bit_width != (u8)GenericAddressStructure::BitWidth::QWord);
VERIFY(structure.bit_width != (u8)GenericAddressStructure::BitWidth::Undefined);
dbgln("ACPI: Bit Width - {} bits", structure.bit_width);
address.out(value, structure.bit_width);
break;
@ -182,7 +182,7 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s
break;
}
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
return;
}
@ -193,16 +193,16 @@ void Parser::access_generic_address(const Structures::GenericAddressStructure& s
u32 offset_in_pci_address = structure.address & 0xFFFF;
if (structure.access_size == (u8)GenericAddressStructure::AccessSize::QWord) {
dbgln("Trying to send QWord to PCI configuration space");
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
ASSERT(structure.access_size != (u8)GenericAddressStructure::AccessSize::Undefined);
VERIFY(structure.access_size != (u8)GenericAddressStructure::AccessSize::Undefined);
PCI::raw_access(pci_address, offset_in_pci_address, (1 << (structure.access_size - 1)), value);
return;
}
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
bool Parser::validate_reset_register()
@ -222,7 +222,7 @@ void Parser::try_acpi_reboot()
dbgln_if(ACPI_DEBUG, "ACPI: Rebooting, Probing FADT ({})", m_fadt);
auto fadt = map_typed<Structures::FADT>(m_fadt);
ASSERT(validate_reset_register());
VERIFY(validate_reset_register());
access_generic_address(fadt->reset_reg, fadt->reset_value);
Processor::halt();
}
@ -255,7 +255,7 @@ UNMAP_AFTER_INIT void Parser::initialize_main_system_description_table()
#if ACPI_DEBUG
dbgln("ACPI: Checking Main SDT Length to choose the correct mapping size");
#endif
ASSERT(!m_main_system_description_table.is_null());
VERIFY(!m_main_system_description_table.is_null());
auto length = get_table_size(m_main_system_description_table);
auto revision = get_table_revision(m_main_system_description_table);
@ -333,7 +333,7 @@ UNMAP_AFTER_INIT Optional<PhysicalAddress> StaticParsing::find_rsdp()
UNMAP_AFTER_INIT PhysicalAddress StaticParsing::find_table(PhysicalAddress rsdp_address, const StringView& signature)
{
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
ASSERT(signature.length() == 4);
VERIFY(signature.length() == 4);
auto rsdp = map_typed<Structures::RSDPDescriptor20>(rsdp_address);
@ -345,13 +345,13 @@ UNMAP_AFTER_INIT PhysicalAddress StaticParsing::find_table(PhysicalAddress rsdp_
return search_table_in_xsdt(PhysicalAddress(rsdp->xsdt_ptr), signature);
return search_table_in_rsdt(PhysicalAddress(rsdp->base.rsdt_ptr), signature);
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT static PhysicalAddress search_table_in_xsdt(PhysicalAddress xsdt_address, const StringView& signature)
{
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
ASSERT(signature.length() == 4);
VERIFY(signature.length() == 4);
auto xsdt = map_typed<Structures::XSDT>(xsdt_address);
@ -365,7 +365,7 @@ UNMAP_AFTER_INIT static PhysicalAddress search_table_in_xsdt(PhysicalAddress xsd
static bool match_table_signature(PhysicalAddress table_header, const StringView& signature)
{
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
ASSERT(signature.length() == 4);
VERIFY(signature.length() == 4);
auto table = map_typed<Structures::RSDT>(table_header);
return !strncmp(table->h.sig, signature.characters_without_null_termination(), 4);
@ -374,7 +374,7 @@ static bool match_table_signature(PhysicalAddress table_header, const StringView
UNMAP_AFTER_INIT static PhysicalAddress search_table_in_rsdt(PhysicalAddress rsdt_address, const StringView& signature)
{
// FIXME: There's no validation of ACPI tables here. Use the checksum to validate the tables.
ASSERT(signature.length() == 4);
VERIFY(signature.length() == 4);
auto rsdt = map_typed<Structures::RSDT>(rsdt_address);
@ -387,22 +387,22 @@ UNMAP_AFTER_INIT static PhysicalAddress search_table_in_rsdt(PhysicalAddress rsd
void Parser::enable_aml_interpretation()
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void Parser::enable_aml_interpretation(File&)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void Parser::enable_aml_interpretation(u8*, u32)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void Parser::disable_aml_interpretation()
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

View file

@ -319,7 +319,7 @@ void page_fault_handler(TrapFrame* trap)
dbgln("Continuing after resolved page fault");
#endif
} else {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}
@ -390,7 +390,7 @@ static void unimp_trap()
GenericInterruptHandler& get_interrupt_handler(u8 interrupt_number)
{
ASSERT(s_interrupt_handler[interrupt_number] != nullptr);
VERIFY(s_interrupt_handler[interrupt_number] != nullptr);
return *s_interrupt_handler[interrupt_number];
}
@ -401,14 +401,14 @@ static void revert_to_unused_handler(u8 interrupt_number)
void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
{
ASSERT(interrupt_number < GENERIC_INTERRUPT_HANDLERS_COUNT);
VERIFY(interrupt_number < GENERIC_INTERRUPT_HANDLERS_COUNT);
if (s_interrupt_handler[interrupt_number] != nullptr) {
if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
s_interrupt_handler[interrupt_number] = &handler;
return;
}
if (s_interrupt_handler[interrupt_number]->is_shared_handler() && !s_interrupt_handler[interrupt_number]->is_sharing_with_others()) {
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
return;
}
@ -417,7 +417,7 @@ void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHan
static_cast<SpuriousInterruptHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
return;
}
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
auto& previous_handler = *s_interrupt_handler[interrupt_number];
s_interrupt_handler[interrupt_number] = nullptr;
SharedIRQHandler::initialize(interrupt_number);
@ -425,7 +425,7 @@ void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHan
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->register_handler(handler);
return;
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
} else {
s_interrupt_handler[interrupt_number] = &handler;
}
@ -433,13 +433,13 @@ void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHan
void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
{
ASSERT(s_interrupt_handler[interrupt_number] != nullptr);
VERIFY(s_interrupt_handler[interrupt_number] != nullptr);
if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
dbgln("Trying to unregister unused handler (?)");
return;
}
if (s_interrupt_handler[interrupt_number]->is_shared_handler() && !s_interrupt_handler[interrupt_number]->is_sharing_with_others()) {
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::SharedIRQHandler);
static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->unregister_handler(handler);
if (!static_cast<SharedIRQHandler*>(s_interrupt_handler[interrupt_number])->sharing_devices_count()) {
revert_to_unused_handler(interrupt_number);
@ -447,11 +447,11 @@ void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptH
return;
}
if (!s_interrupt_handler[interrupt_number]->is_shared_handler()) {
ASSERT(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
VERIFY(s_interrupt_handler[interrupt_number]->type() == HandlerType::IRQHandler);
revert_to_unused_handler(interrupt_number);
return;
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT void register_interrupt_handler(u8 index, void (*f)())
@ -692,11 +692,11 @@ void handle_interrupt(TrapFrame* trap)
{
clac();
auto& regs = *trap->regs;
ASSERT(regs.isr_number >= IRQ_VECTOR_BASE && regs.isr_number <= (IRQ_VECTOR_BASE + GENERIC_INTERRUPT_HANDLERS_COUNT));
VERIFY(regs.isr_number >= IRQ_VECTOR_BASE && regs.isr_number <= (IRQ_VECTOR_BASE + GENERIC_INTERRUPT_HANDLERS_COUNT));
u8 irq = (u8)(regs.isr_number - 0x50);
s_entropy_source_interrupts.add_random_event(irq);
auto* handler = s_interrupt_handler[irq];
ASSERT(handler);
VERIFY(handler);
handler->increment_invoking_counter();
handler->handle_interrupt(regs);
handler->eoi();
@ -792,7 +792,7 @@ static volatile bool s_smp_enabled;
Vector<Processor*>& Processor::processors()
{
ASSERT(s_processors);
VERIFY(s_processors);
return *s_processors;
}
@ -803,8 +803,8 @@ Processor& Processor::by_id(u32 cpu)
// for all APs to finish, after which this array never gets modified
// again, so it's safe to not protect access to it here
auto& procs = processors();
ASSERT(procs[cpu] != nullptr);
ASSERT(procs.size() > cpu);
VERIFY(procs[cpu] != nullptr);
VERIFY(procs.size() > cpu);
return *procs[cpu];
}
@ -861,7 +861,7 @@ UNMAP_AFTER_INIT void Processor::cpu_detect()
u32 max_extended_leaf = CPUID(0x80000000).eax();
ASSERT(max_extended_leaf >= 0x80000001);
VERIFY(max_extended_leaf >= 0x80000001);
CPUID extended_processor_info(0x80000001);
if (extended_processor_info.edx() & (1 << 20))
set_feature(CPUFeature::NX);
@ -1049,14 +1049,14 @@ UNMAP_AFTER_INIT void Processor::early_initialize(u32 cpu)
cpu_setup();
gdt_init();
ASSERT(is_initialized()); // sanity check
ASSERT(&current() == this); // sanity check
VERIFY(is_initialized()); // sanity check
VERIFY(&current() == this); // sanity check
}
UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
{
ASSERT(m_self == this);
ASSERT(&current() == this); // sanity check
VERIFY(m_self == this);
VERIFY(&current() == this); // sanity check
dmesgln("CPU[{}]: Supported features: {}", id(), features_string());
if (!has_feature(CPUFeature::RDRAND))
@ -1069,7 +1069,7 @@ UNMAP_AFTER_INIT void Processor::initialize(u32 cpu)
flush_idt();
if (cpu == 0) {
ASSERT((FlatPtr(&s_clean_fpu_state) & 0xF) == 0);
VERIFY((FlatPtr(&s_clean_fpu_state) & 0xF) == 0);
asm volatile("fninit");
asm volatile("fxsave %0"
: "=m"(s_clean_fpu_state));
@ -1095,7 +1095,7 @@ void Processor::write_raw_gdt_entry(u16 selector, u32 low, u32 high)
if (i > m_gdt_length) {
m_gdt_length = i + 1;
ASSERT(m_gdt_length <= sizeof(m_gdt) / sizeof(m_gdt[0]));
VERIFY(m_gdt_length <= sizeof(m_gdt) / sizeof(m_gdt[0]));
m_gdtr.limit = (m_gdt_length + 1) * 8 - 1;
}
m_gdt[i].low = low;
@ -1178,14 +1178,14 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
// reflect the status at the last context switch.
ScopedSpinLock lock(g_scheduler_lock);
if (&thread == Processor::current_thread()) {
ASSERT(thread.state() == Thread::Running);
VERIFY(thread.state() == Thread::Running);
// Leave the scheduler lock. If we trigger page faults we may
// need to be preempted. Since this is our own thread it won't
// cause any problems as the stack won't change below this frame.
lock.unlock();
capture_current_thread();
} else if (thread.is_active()) {
ASSERT(thread.cpu() != Processor::id());
VERIFY(thread.cpu() != Processor::id());
// If this is the case, the thread is currently running
// on another processor. We can't trust the kernel stack as
// it may be changing at any time. We need to probably send
@ -1197,8 +1197,8 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
[&]() {
dbgln("CPU[{}] getting stack for cpu #{}", Processor::id(), proc.get_id());
ProcessPagingScope paging_scope(thread.process());
ASSERT(&Processor::current() != &proc);
ASSERT(&thread == Processor::current_thread());
VERIFY(&Processor::current() != &proc);
VERIFY(&thread == Processor::current_thread());
// NOTE: Because the other processor is still holding the
// scheduler lock while waiting for this callback to finish,
// the current thread on the target processor cannot change
@ -1212,7 +1212,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
} else {
switch (thread.state()) {
case Thread::Running:
ASSERT_NOT_REACHED(); // should have been handled above
VERIFY_NOT_REACHED(); // should have been handled above
case Thread::Runnable:
case Thread::Stopped:
case Thread::Blocked:
@ -1251,8 +1251,8 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
{
ASSERT(from_thread == to_thread || from_thread->state() != Thread::Running);
ASSERT(to_thread->state() == Thread::Running);
VERIFY(from_thread == to_thread || from_thread->state() != Thread::Running);
VERIFY(to_thread->state() == Thread::Running);
Processor::set_current_thread(*to_thread);
@ -1287,9 +1287,9 @@ extern "C" void enter_thread_context(Thread* from_thread, Thread* to_thread)
void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
{
ASSERT(!in_irq());
ASSERT(m_in_critical == 1);
ASSERT(is_kernel_mode());
VERIFY(!in_irq());
VERIFY(m_in_critical == 1);
VERIFY(is_kernel_mode());
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context --> switching out of: {} {}", VirtualAddress(from_thread), *from_thread);
from_thread->save_critical(m_in_critical);
@ -1344,12 +1344,12 @@ void Processor::switch_context(Thread*& from_thread, Thread*& to_thread)
extern "C" void context_first_init([[maybe_unused]] Thread* from_thread, [[maybe_unused]] Thread* to_thread, [[maybe_unused]] TrapFrame* trap)
{
ASSERT(!are_interrupts_enabled());
ASSERT(is_kernel_mode());
VERIFY(!are_interrupts_enabled());
VERIFY(is_kernel_mode());
dbgln_if(CONTEXT_SWITCH_DEBUG, "switch_context <-- from {} {} to {} {} (context_first_init)", VirtualAddress(from_thread), *from_thread, VirtualAddress(to_thread), *to_thread);
ASSERT(to_thread == Thread::current());
VERIFY(to_thread == Thread::current());
Scheduler::enter_current(*from_thread, true);
@ -1388,13 +1388,13 @@ void exit_kernel_thread(void)
u32 Processor::init_context(Thread& thread, bool leave_crit)
{
ASSERT(is_kernel_mode());
ASSERT(g_scheduler_lock.is_locked());
VERIFY(is_kernel_mode());
VERIFY(g_scheduler_lock.is_locked());
if (leave_crit) {
// Leave the critical section we set up in in Process::exec,
// but because we still have the scheduler lock we should end up with 1
m_in_critical--; // leave it without triggering anything or restoring flags
ASSERT(in_critical() == 1);
VERIFY(in_critical() == 1);
}
u32 kernel_stack_top = thread.kernel_stack_top();
@ -1405,7 +1405,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
u32 stack_top = kernel_stack_top;
// TODO: handle NT?
ASSERT((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
VERIFY((cpu_flags() & 0x24000) == 0); // Assume !(NT | VM)
auto& tss = thread.tss();
bool return_to_user = (tss.cs & 3) != 0;
@ -1503,7 +1503,7 @@ u32 Processor::init_context(Thread& thread, bool leave_crit)
extern "C" u32 do_init_context(Thread* thread, u32 flags)
{
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
thread->tss().eflags = flags;
return Processor::current().init_context(*thread, true);
}
@ -1536,18 +1536,18 @@ void Processor::assume_context(Thread& thread, u32 flags)
{
dbgln_if(CONTEXT_SWITCH_DEBUG, "Assume context for thread {} {}", VirtualAddress(&thread), thread);
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
Scheduler::prepare_after_exec();
// in_critical() should be 2 here. The critical section in Process::exec
// and then the scheduler lock
ASSERT(Processor::current().in_critical() == 2);
VERIFY(Processor::current().in_critical() == 2);
do_assume_context(&thread, flags);
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
extern "C" UNMAP_AFTER_INIT void pre_init_finished(void)
{
ASSERT(g_scheduler_lock.own_lock());
VERIFY(g_scheduler_lock.own_lock());
// Because init_finished() will wait on the other APs, we need
// to release the scheduler lock so that the other APs can also get
@ -1567,7 +1567,7 @@ extern "C" UNMAP_AFTER_INIT void post_init_finished(void)
UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_thread)
{
ASSERT(initial_thread.process().is_kernel_process());
VERIFY(initial_thread.process().is_kernel_process());
auto& tss = initial_thread.tss();
m_tss = tss;
@ -1605,13 +1605,13 @@ UNMAP_AFTER_INIT void Processor::initialize_context_switching(Thread& initial_th
);
// clang-format on
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(&Processor::current() == this);
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
trap.prev_irq_level = m_in_irq;
if (raise_irq)
m_in_irq++;
@ -1629,9 +1629,9 @@ void Processor::enter_trap(TrapFrame& trap, bool raise_irq)
void Processor::exit_trap(TrapFrame& trap)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(&Processor::current() == this);
ASSERT(m_in_irq >= trap.prev_irq_level);
VERIFY_INTERRUPTS_DISABLED();
VERIFY(&Processor::current() == this);
VERIFY(m_in_irq >= trap.prev_irq_level);
m_in_irq = trap.prev_irq_level;
smp_process_pending_messages();
@ -1644,7 +1644,7 @@ void Processor::exit_trap(TrapFrame& trap)
auto& current_trap = current_thread->current_trap();
current_trap = trap.next_trap;
if (current_trap) {
ASSERT(current_trap->regs);
VERIFY(current_trap->regs);
// If we have another higher level trap then we probably returned
// from an interrupt or irq handler. The cs register of the
// new/higher level trap tells us what the mode prior to it was
@ -1659,8 +1659,8 @@ void Processor::exit_trap(TrapFrame& trap)
void Processor::check_invoke_scheduler()
{
ASSERT(!m_in_irq);
ASSERT(!m_in_critical);
VERIFY(!m_in_irq);
VERIFY(!m_in_critical);
if (m_invoke_scheduler_async && m_scheduler_initialized) {
m_invoke_scheduler_async = false;
Scheduler::invoke_async();
@ -1724,7 +1724,7 @@ ProcessorMessage& Processor::smp_get_from_pool()
}
}
ASSERT(msg != nullptr);
VERIFY(msg != nullptr);
return *msg;
}
@ -1732,15 +1732,15 @@ Atomic<u32> Processor::s_idle_cpu_mask { 0 };
u32 Processor::smp_wake_n_idle_processors(u32 wake_count)
{
ASSERT(Processor::current().in_critical());
ASSERT(wake_count > 0);
VERIFY(Processor::current().in_critical());
VERIFY(wake_count > 0);
if (!s_smp_enabled)
return 0;
// Wake at most N - 1 processors
if (wake_count >= Processor::count()) {
wake_count = Processor::count() - 1;
ASSERT(wake_count > 0);
VERIFY(wake_count > 0);
}
u32 current_id = Processor::current().id();
@ -1853,7 +1853,7 @@ bool Processor::smp_process_pending_messages()
case ProcessorMessage::FlushTlb:
if (is_user_address(VirtualAddress(msg->flush_tlb.ptr))) {
// We assume that we don't cross into kernel land!
ASSERT(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
VERIFY(is_user_range(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count * PAGE_SIZE));
if (read_cr3() != msg->flush_tlb.page_directory->cr3()) {
// This processor isn't using this page directory right now, we can ignore this request
dbgln_if(SMP_DEBUG, "SMP[{}]: No need to flush {} pages at {}", id(), msg->flush_tlb.page_count, VirtualAddress(msg->flush_tlb.ptr));
@ -1866,7 +1866,7 @@ bool Processor::smp_process_pending_messages()
bool is_async = msg->async; // Need to cache this value *before* dropping the ref count!
auto prev_refs = atomic_fetch_sub(&msg->refs, 1u, AK::MemoryOrder::memory_order_acq_rel);
ASSERT(prev_refs != 0);
VERIFY(prev_refs != 0);
if (prev_refs == 1) {
// All processors handled this. If this is an async message,
// we need to clean it up and return it to the pool
@ -1894,7 +1894,7 @@ bool Processor::smp_queue_message(ProcessorMessage& msg)
// the queue at any given time. We rely on the fact that the messages
// are pooled and never get freed!
auto& msg_entry = msg.per_proc_entries[id()];
ASSERT(msg_entry.msg == &msg);
VERIFY(msg_entry.msg == &msg);
ProcessorMessageEntry* next = nullptr;
do {
msg_entry.next = next;
@ -1909,7 +1909,7 @@ void Processor::smp_broadcast_message(ProcessorMessage& msg)
dbgln_if(SMP_DEBUG, "SMP[{}]: Broadcast message {} to cpus: {} proc: {}", cur_proc.get_id(), VirtualAddress(&msg), count(), VirtualAddress(&cur_proc));
atomic_store(&msg.refs, count() - 1, AK::MemoryOrder::memory_order_release);
ASSERT(msg.refs > 0);
VERIFY(msg.refs > 0);
bool need_broadcast = false;
for_each(
[&](Processor& proc) -> IterationDecision {
@ -1928,7 +1928,7 @@ void Processor::smp_broadcast_message(ProcessorMessage& msg)
void Processor::smp_broadcast_wait_sync(ProcessorMessage& msg)
{
auto& cur_proc = Processor::current();
ASSERT(!msg.async);
VERIFY(!msg.async);
// If synchronous then we must cleanup and return the message back
// to the pool. Otherwise, the last processor to complete it will return it
while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
@ -1971,7 +1971,7 @@ void Processor::smp_broadcast(void (*callback)(), bool async)
void Processor::smp_unicast_message(u32 cpu, ProcessorMessage& msg, bool async)
{
auto& cur_proc = Processor::current();
ASSERT(cpu != cur_proc.get_id());
VERIFY(cpu != cur_proc.get_id());
auto& target_proc = processors()[cpu];
msg.async = async;
@ -2068,8 +2068,8 @@ UNMAP_AFTER_INIT void Processor::deferred_call_pool_init()
void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
{
ASSERT(m_in_critical);
ASSERT(!entry->was_allocated);
VERIFY(m_in_critical);
VERIFY(!entry->was_allocated);
entry->next = m_free_deferred_call_pool_entry;
m_free_deferred_call_pool_entry = entry;
@ -2077,13 +2077,13 @@ void Processor::deferred_call_return_to_pool(DeferredCallEntry* entry)
DeferredCallEntry* Processor::deferred_call_get_free()
{
ASSERT(m_in_critical);
VERIFY(m_in_critical);
if (m_free_deferred_call_pool_entry) {
// Fast path, we have an entry in our pool
auto* entry = m_free_deferred_call_pool_entry;
m_free_deferred_call_pool_entry = entry->next;
ASSERT(!entry->was_allocated);
VERIFY(!entry->was_allocated);
return entry;
}
@ -2094,7 +2094,7 @@ DeferredCallEntry* Processor::deferred_call_get_free()
void Processor::deferred_call_execute_pending()
{
ASSERT(m_in_critical);
VERIFY(m_in_critical);
if (!m_pending_deferred_calls)
return;
@ -2137,7 +2137,7 @@ void Processor::deferred_call_execute_pending()
void Processor::deferred_call_queue_entry(DeferredCallEntry* entry)
{
ASSERT(m_in_critical);
VERIFY(m_in_critical);
entry->next = m_pending_deferred_calls;
m_pending_deferred_calls = entry;
}

View file

@ -912,14 +912,14 @@ public:
ALWAYS_INLINE void restore_irq(u32 prev_irq)
{
ASSERT(prev_irq <= m_in_irq);
VERIFY(prev_irq <= m_in_irq);
if (!prev_irq) {
u32 prev_critical = 0;
if (m_in_critical.compare_exchange_strong(prev_critical, 1)) {
m_in_irq = prev_irq;
deferred_call_execute_pending();
auto prev_raised = m_in_critical.exchange(prev_critical);
ASSERT(prev_raised == prev_critical + 1);
VERIFY(prev_raised == prev_critical + 1);
check_invoke_scheduler();
} else if (prev_critical == 0) {
check_invoke_scheduler();
@ -949,11 +949,11 @@ public:
ALWAYS_INLINE void leave_critical(u32 prev_flags)
{
cli(); // Need to prevent IRQs from interrupting us here!
ASSERT(m_in_critical > 0);
VERIFY(m_in_critical > 0);
if (m_in_critical == 1) {
if (!m_in_irq) {
deferred_call_execute_pending();
ASSERT(m_in_critical == 1);
VERIFY(m_in_critical == 1);
}
m_in_critical--;
if (!m_in_irq)
@ -981,7 +981,7 @@ public:
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
{
m_in_critical.store(prev_crit, AK::MemoryOrder::memory_order_release);
ASSERT(!prev_crit || !(prev_flags & 0x200));
VERIFY(!prev_crit || !(prev_flags & 0x200));
if (prev_flags & 0x200)
sti();
else
@ -1105,14 +1105,14 @@ public:
void leave()
{
ASSERT(m_valid);
VERIFY(m_valid);
m_valid = false;
Processor::current().leave_critical(m_prev_flags);
}
void enter()
{
ASSERT(!m_valid);
VERIFY(!m_valid);
m_valid = true;
Processor::current().enter_critical(m_prev_flags);
}

View file

@ -52,7 +52,7 @@ ProcessorInfo::ProcessorInfo(Processor& processor)
m_cpuid = builder.build();
}
{
ASSERT(max_leaf >= 1);
VERIFY(max_leaf >= 1);
CPUID cpuid(1);
m_stepping = cpuid.eax() & 0xf;
u32 model = (cpuid.eax() >> 4) & 0xf;

View file

@ -31,11 +31,11 @@
#ifdef DEBUG
[[noreturn]] void __assertion_failed(const char* msg, const char* file, unsigned line, const char* func);
# define ASSERT(expr) (static_cast<bool>(expr) ? void(0) : __assertion_failed(# expr, __FILE__, __LINE__, __PRETTY_FUNCTION__))
# define ASSERT_NOT_REACHED() ASSERT(false)
# define VERIFY(expr) (static_cast<bool>(expr) ? void(0) : __assertion_failed(# expr, __FILE__, __LINE__, __PRETTY_FUNCTION__))
# define VERIFY_NOT_REACHED() VERIFY(false)
#else
# define ASSERT(expr)
# define ASSERT_NOT_REACHED() CRASH()
# define VERIFY(expr)
# define VERIFY_NOT_REACHED() CRASH()
#endif
#define CRASH() \
do { \
@ -47,6 +47,6 @@
CRASH(); \
} while (0)
#define ASSERT_INTERRUPTS_DISABLED() ASSERT(!(cpu_flags() & 0x200))
#define ASSERT_INTERRUPTS_ENABLED() ASSERT(cpu_flags() & 0x200)
#define TODO ASSERT_NOT_REACHED
#define VERIFY_INTERRUPTS_DISABLED() VERIFY(!(cpu_flags() & 0x200))
#define VERIFY_INTERRUPTS_ENABLED() VERIFY(cpu_flags() & 0x200)
#define TODO VERIFY_NOT_REACHED

View file

@ -45,13 +45,13 @@ UNMAP_AFTER_INIT void CommandLine::early_initialize(const char* cmd_line)
const CommandLine& kernel_command_line()
{
ASSERT(s_the);
VERIFY(s_the);
return *s_the;
}
UNMAP_AFTER_INIT void CommandLine::initialize()
{
ASSERT(!s_the);
VERIFY(!s_the);
s_the = new CommandLine(s_cmd_line);
}

View file

@ -81,7 +81,7 @@ size_t DMIExpose::structure_table_length() const
UNMAP_AFTER_INIT void DMIExpose::initialize_exposer()
{
ASSERT(!(m_entry_point.is_null()));
VERIFY(!(m_entry_point.is_null()));
if (m_using_64bit_entry_point) {
set_64_bit_entry_initialization_values();
} else {

View file

@ -39,8 +39,8 @@ AsyncDeviceRequest::~AsyncDeviceRequest()
{
{
ScopedSpinLock lock(m_lock);
ASSERT(is_completed_result(m_result));
ASSERT(m_sub_requests_pending.is_empty());
VERIFY(is_completed_result(m_result));
VERIFY(m_sub_requests_pending.is_empty());
}
// We should not need any locking here anymore. The destructor should
@ -50,8 +50,8 @@ AsyncDeviceRequest::~AsyncDeviceRequest()
// Which means there should be no more pending sub-requests and the
// entire AsyncDeviceRequest hierarchy should be immutable.
for (auto& sub_request : m_sub_requests_complete) {
ASSERT(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
ASSERT(sub_request.m_parent_request == this);
VERIFY(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
VERIFY(sub_request.m_parent_request == this);
sub_request.m_parent_request = nullptr;
}
}
@ -70,7 +70,7 @@ void AsyncDeviceRequest::request_finished()
auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult
{
ASSERT(!m_parent_request);
VERIFY(!m_parent_request);
auto request_result = get_request_result();
if (is_completed_result(request_result))
return { request_result, Thread::BlockResult::NotBlocked };
@ -87,14 +87,14 @@ auto AsyncDeviceRequest::get_request_result() const -> RequestResult
void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request)
{
// Sub-requests cannot be for the same device
ASSERT(&m_device != &sub_request->m_device);
ASSERT(sub_request->m_parent_request == nullptr);
VERIFY(&m_device != &sub_request->m_device);
VERIFY(sub_request->m_parent_request == nullptr);
sub_request->m_parent_request = this;
bool should_start;
{
ScopedSpinLock lock(m_lock);
ASSERT(!is_completed_result(m_result));
VERIFY(!is_completed_result(m_result));
m_sub_requests_pending.append(sub_request);
should_start = (m_result == Started);
}
@ -107,7 +107,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
bool all_completed;
{
ScopedSpinLock lock(m_lock);
ASSERT(m_result == Started);
VERIFY(m_result == Started);
size_t index;
for (index = 0; index < m_sub_requests_pending.size(); index++) {
if (&m_sub_requests_pending[index] == &sub_request) {
@ -117,7 +117,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
break;
}
}
ASSERT(index < m_sub_requests_pending.size());
VERIFY(index < m_sub_requests_pending.size());
all_completed = m_sub_requests_pending.is_empty();
if (all_completed) {
// Aggregate any errors
@ -126,7 +126,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
for (index = 0; index < m_sub_requests_complete.size(); index++) {
auto& sub_request = m_sub_requests_complete[index];
auto sub_result = sub_request.get_request_result();
ASSERT(is_completed_result(sub_result));
VERIFY(is_completed_result(sub_result));
switch (sub_result) {
case Failure:
any_failures = true;
@ -154,11 +154,11 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
void AsyncDeviceRequest::complete(RequestResult result)
{
ASSERT(result == Success || result == Failure || result == MemoryFault);
VERIFY(result == Success || result == Failure || result == MemoryFault);
ScopedCritical critical;
{
ScopedSpinLock lock(m_lock);
ASSERT(m_result == Started);
VERIFY(m_result == Started);
m_result = result;
}
if (Processor::current().in_irq()) {

View file

@ -87,7 +87,7 @@ public:
void set_private(void* priv)
{
ASSERT(!m_private || !priv);
VERIFY(!m_private || !priv);
m_private = priv;
}
void* get_private() const { return m_private; }

View file

@ -101,7 +101,7 @@ u16 BXVGADevice::get_register(u16 index)
void BXVGADevice::revert_resolution()
{
set_resolution_registers(m_framebuffer_width, m_framebuffer_height);
ASSERT(validate_setup_resolution(m_framebuffer_width, m_framebuffer_height));
VERIFY(validate_setup_resolution(m_framebuffer_width, m_framebuffer_height));
}
void BXVGADevice::set_resolution_registers(size_t width, size_t height)
@ -152,7 +152,7 @@ bool BXVGADevice::validate_setup_resolution(size_t width, size_t height)
void BXVGADevice::set_y_offset(size_t y_offset)
{
ASSERT(y_offset == 0 || y_offset == m_framebuffer_height);
VERIFY(y_offset == 0 || y_offset == m_framebuffer_height);
m_y_offset = y_offset;
set_register(VBE_DISPI_INDEX_Y_OFFSET, (u16)y_offset);
}

View file

@ -64,7 +64,7 @@ bool BlockDevice::read_block(unsigned index, UserOrKernelBuffer& buffer)
dbgln("BlockDevice::read_block({}) cancelled", index);
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
return false;
}
@ -85,7 +85,7 @@ bool BlockDevice::write_block(unsigned index, const UserOrKernelBuffer& buffer)
dbgln("BlockDevice::write_block({}) cancelled", index);
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
return false;
}

View file

@ -57,7 +57,7 @@ public:
case Write:
return "BlockDeviceRequest (write)";
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

View file

@ -61,7 +61,7 @@ Device::Device(unsigned major, unsigned minor)
if (it != all_devices().end()) {
dbgln("Already registered {},{}: {}", major, minor, it->value->class_name());
}
ASSERT(!all_devices().contains(device_id));
VERIFY(!all_devices().contains(device_id));
all_devices().set(device_id, this);
}
@ -86,8 +86,8 @@ void Device::process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncD
{
ScopedSpinLock lock(m_requests_lock);
ASSERT(!m_requests.is_empty());
ASSERT(m_requests.first().ptr() == &completed_request);
VERIFY(!m_requests.is_empty());
VERIFY(m_requests.first().ptr() == &completed_request);
m_requests.remove(m_requests.begin());
if (!m_requests.is_empty())
next_request = m_requests.first().ptr();

View file

@ -41,13 +41,13 @@ UNMAP_AFTER_INIT void I8042Controller::initialize()
I8042Controller& I8042Controller::the()
{
ASSERT(s_the);
VERIFY(s_the);
return *s_the;
}
UNMAP_AFTER_INIT I8042Controller::I8042Controller()
{
ASSERT(!s_the);
VERIFY(!s_the);
s_the = this;
u8 configuration;
@ -148,7 +148,7 @@ UNMAP_AFTER_INIT I8042Controller::I8042Controller()
void I8042Controller::irq_process_input_buffer(Device)
{
ASSERT(Processor::current().in_irq());
VERIFY(Processor::current().in_irq());
u8 status = IO::in8(I8042_STATUS);
if (!(status & I8042_BUFFER_FULL))
@ -171,10 +171,10 @@ void I8042Controller::do_drain()
bool I8042Controller::do_reset_device(Device device)
{
ASSERT(device != Device::None);
ASSERT(m_lock.is_locked());
VERIFY(device != Device::None);
VERIFY(m_lock.is_locked());
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
if (do_send_command(device, 0xff) != I8042_ACK)
return false;
// Wait until we get the self-test result
@ -183,20 +183,20 @@ bool I8042Controller::do_reset_device(Device device)
u8 I8042Controller::do_send_command(Device device, u8 command)
{
ASSERT(device != Device::None);
ASSERT(m_lock.is_locked());
VERIFY(device != Device::None);
VERIFY(m_lock.is_locked());
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
return do_write_to_device(device, command);
}
u8 I8042Controller::do_send_command(Device device, u8 command, u8 data)
{
ASSERT(device != Device::None);
ASSERT(m_lock.is_locked());
VERIFY(device != Device::None);
VERIFY(m_lock.is_locked());
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
u8 response = do_write_to_device(device, command);
if (response == I8042_ACK)
@ -206,10 +206,10 @@ u8 I8042Controller::do_send_command(Device device, u8 command, u8 data)
u8 I8042Controller::do_write_to_device(Device device, u8 data)
{
ASSERT(device != Device::None);
ASSERT(m_lock.is_locked());
VERIFY(device != Device::None);
VERIFY(m_lock.is_locked());
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
int attempts = 0;
u8 response;
@ -230,7 +230,7 @@ u8 I8042Controller::do_write_to_device(Device device, u8 data)
u8 I8042Controller::do_read_from_device(Device device)
{
ASSERT(device != Device::None);
VERIFY(device != Device::None);
prepare_for_input(device);
return IO::in8(I8042_BUFFER);
@ -238,7 +238,7 @@ u8 I8042Controller::do_read_from_device(Device device)
void I8042Controller::prepare_for_input(Device device)
{
ASSERT(m_lock.is_locked());
VERIFY(m_lock.is_locked());
const u8 buffer_type = device == Device::Keyboard ? I8042_KEYBOARD_BUFFER : I8042_MOUSE_BUFFER;
for (;;) {
u8 status = IO::in8(I8042_STATUS);
@ -249,7 +249,7 @@ void I8042Controller::prepare_for_input(Device device)
void I8042Controller::prepare_for_output()
{
ASSERT(m_lock.is_locked());
VERIFY(m_lock.is_locked());
for (;;) {
if (!(IO::in8(I8042_STATUS) & 2))
return;
@ -258,14 +258,14 @@ void I8042Controller::prepare_for_output()
void I8042Controller::do_wait_then_write(u8 port, u8 data)
{
ASSERT(m_lock.is_locked());
VERIFY(m_lock.is_locked());
prepare_for_output();
IO::out8(port, data);
}
u8 I8042Controller::do_wait_then_read(u8 port)
{
ASSERT(m_lock.is_locked());
VERIFY(m_lock.is_locked());
prepare_for_input(Device::None);
return IO::in8(port);
}

View file

@ -113,7 +113,7 @@ private:
static int device_to_deviceinfo_index(Device device)
{
ASSERT(device != Device::None);
VERIFY(device != Device::None);
return (device == Device::Keyboard) ? 0 : 1;
}

View file

@ -450,7 +450,7 @@ KResultOr<size_t> KeyboardDevice::read(FileDescription&, size_t, UserOrKernelBuf
});
if (n < 0)
return KResult((ErrnoCode)-n);
ASSERT((size_t)n == sizeof(Event));
VERIFY((size_t)n == sizeof(Event));
nread += sizeof(Event);
lock.lock();

View file

@ -114,7 +114,7 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte)
evaluate_block_conditions();
};
ASSERT(m_data_state < sizeof(m_data.bytes) / sizeof(m_data.bytes[0]));
VERIFY(m_data_state < sizeof(m_data.bytes) / sizeof(m_data.bytes[0]));
m_data.bytes[m_data_state] = byte;
switch (m_data_state) {
@ -136,7 +136,7 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte)
commit_packet();
break;
case 3:
ASSERT(m_has_wheel);
VERIFY(m_has_wheel);
commit_packet();
break;
}
@ -275,7 +275,7 @@ bool PS2MouseDevice::can_read(const FileDescription&, size_t) const
KResultOr<size_t> PS2MouseDevice::read(FileDescription&, size_t, UserOrKernelBuffer& buffer, size_t size)
{
ASSERT(size > 0);
VERIFY(size > 0);
size_t nread = 0;
size_t remaining_space_in_buffer = static_cast<size_t>(size) - nread;
ScopedSpinLock lock(m_queue_lock);

View file

@ -153,7 +153,7 @@ void SB16::set_irq_register(u8 irq_number)
bitmask = 0b1000;
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
IO::out8(0x224, 0x80);
IO::out8(0x225, bitmask);
@ -258,7 +258,7 @@ KResultOr<size_t> SB16::write(FileDescription&, size_t, const UserOrKernelBuffer
#if SB16_DEBUG
klog() << "SB16: Writing buffer of " << length << " bytes";
#endif
ASSERT(length <= PAGE_SIZE);
VERIFY(length <= PAGE_SIZE);
const int BLOCK_SIZE = 32 * 1024;
if (length > BLOCK_SIZE) {
return ENOSPC;

View file

@ -296,7 +296,7 @@ QueueHead* UHCIController::allocate_queue_head() const
}
}
ASSERT_NOT_REACHED(); // Let's just assert for now, this should never happen
VERIFY_NOT_REACHED(); // Let's just assert for now, this should never happen
return nullptr; // Huh!? We're outta queue heads!
}
@ -312,7 +312,7 @@ TransferDescriptor* UHCIController::allocate_transfer_descriptor() const
}
}
ASSERT_NOT_REACHED(); // Let's just assert for now, this should never happen
VERIFY_NOT_REACHED(); // Let's just assert for now, this should never happen
return nullptr; // Huh?! We're outta TDs!!
}

View file

@ -105,13 +105,13 @@ struct alignas(16) TransferDescriptor final {
void set_in_use(bool in_use) { m_in_use = in_use; }
void set_max_len(u16 max_len)
{
ASSERT(max_len < 0x500 || max_len == 0x7ff);
VERIFY(max_len < 0x500 || max_len == 0x7ff);
m_token |= (max_len << 21);
}
void set_device_address(u8 address)
{
ASSERT(address <= 0x7f);
VERIFY(address <= 0x7f);
m_token |= (address << 8);
}

View file

@ -53,7 +53,7 @@ void DoubleBuffer::flip()
{
if (m_storage.is_null())
return;
ASSERT(m_read_buffer_index == m_read_buffer->size);
VERIFY(m_read_buffer_index == m_read_buffer->size);
swap(m_read_buffer, m_write_buffer);
m_write_buffer->size = 0;
m_read_buffer_index = 0;
@ -64,7 +64,7 @@ ssize_t DoubleBuffer::write(const UserOrKernelBuffer& data, size_t size)
{
if (!size || m_storage.is_null())
return 0;
ASSERT(size > 0);
VERIFY(size > 0);
LOCKER(m_lock);
size_t bytes_to_write = min(size, m_space_for_writing);
u8* write_ptr = m_write_buffer->data + m_write_buffer->size;
@ -81,7 +81,7 @@ ssize_t DoubleBuffer::read(UserOrKernelBuffer& data, size_t size)
{
if (!size || m_storage.is_null())
return 0;
ASSERT(size > 0);
VERIFY(size > 0);
LOCKER(m_lock);
if (m_read_buffer_index >= m_read_buffer->size && m_write_buffer->size != 0)
flip();

View file

@ -56,7 +56,7 @@ public:
void set_unblock_callback(Function<void()> callback)
{
ASSERT(!m_unblock_callback);
VERIFY(!m_unblock_callback);
m_unblock_callback = move(callback);
}

View file

@ -78,7 +78,7 @@ public:
{
if (auto it = m_hash.find(block_index); it != m_hash.end()) {
auto& entry = const_cast<CacheEntry&>(*it->value);
ASSERT(entry.block_index == block_index);
VERIFY(entry.block_index == block_index);
return entry;
}
@ -90,7 +90,7 @@ public:
return get(block_index);
}
ASSERT(m_clean_list.last());
VERIFY(m_clean_list.last());
auto& new_entry = *m_clean_list.last();
m_clean_list.prepend(new_entry);
@ -127,7 +127,7 @@ private:
BlockBasedFS::BlockBasedFS(FileDescription& file_description)
: FileBackedFS(file_description)
{
ASSERT(file_description.file().is_seekable());
VERIFY(file_description.file().is_seekable());
}
BlockBasedFS::~BlockBasedFS()
@ -136,8 +136,8 @@ BlockBasedFS::~BlockBasedFS()
KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache)
{
ASSERT(m_logical_block_size);
ASSERT(offset + count <= block_size());
VERIFY(m_logical_block_size);
VERIFY(offset + count <= block_size());
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_block {}, size={}", index, count);
if (!allow_cache) {
@ -147,7 +147,7 @@ KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& da
auto nwritten = file_description().write(data, count);
if (nwritten.is_error())
return nwritten.error();
ASSERT(nwritten.value() == count);
VERIFY(nwritten.value() == count);
return KSuccess;
}
@ -171,8 +171,8 @@ bool BlockBasedFS::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
u32 base_offset = index.value() * m_logical_block_size;
file_description().seek(base_offset, SEEK_SET);
auto nread = file_description().read(buffer, m_logical_block_size);
ASSERT(!nread.is_error());
ASSERT(nread.value() == m_logical_block_size);
VERIFY(!nread.is_error());
VERIFY(nread.value() == m_logical_block_size);
return true;
}
bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
@ -180,8 +180,8 @@ bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
size_t base_offset = index.value() * m_logical_block_size;
file_description().seek(base_offset, SEEK_SET);
auto nwritten = file_description().write(buffer, m_logical_block_size);
ASSERT(!nwritten.is_error());
ASSERT(nwritten.value() == m_logical_block_size);
VERIFY(!nwritten.is_error());
VERIFY(nwritten.value() == m_logical_block_size);
return true;
}
@ -208,7 +208,7 @@ bool BlockBasedFS::raw_write_blocks(BlockIndex index, size_t count, const UserOr
KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserOrKernelBuffer& data, bool allow_cache)
{
ASSERT(m_logical_block_size);
VERIFY(m_logical_block_size);
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::write_blocks {}, count={}", index, count);
for (unsigned i = 0; i < count; ++i) {
auto result = write_block(BlockIndex { index.value() + i }, data.offset(i * block_size()), block_size(), 0, allow_cache);
@ -220,8 +220,8 @@ KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserO
KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, size_t count, size_t offset, bool allow_cache) const
{
ASSERT(m_logical_block_size);
ASSERT(offset + count <= block_size());
VERIFY(m_logical_block_size);
VERIFY(offset + count <= block_size());
dbgln_if(BBFS_DEBUG, "BlockBasedFileSystem::read_block {}", index);
if (!allow_cache) {
@ -231,7 +231,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
auto nread = file_description().read(*buffer, count);
if (nread.is_error())
return nread.error();
ASSERT(nread.value() == count);
VERIFY(nread.value() == count);
return KSuccess;
}
@ -243,7 +243,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
auto nread = file_description().read(entry_data_buffer, block_size());
if (nread.is_error())
return nread.error();
ASSERT(nread.value() == block_size());
VERIFY(nread.value() == block_size());
entry.has_data = true;
}
if (buffer && !buffer->write(entry.data + offset, count))
@ -253,7 +253,7 @@ KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, s
KResult BlockBasedFS::read_blocks(BlockIndex index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache) const
{
ASSERT(m_logical_block_size);
VERIFY(m_logical_block_size);
if (!count)
return EINVAL;
if (count == 1)

View file

@ -61,7 +61,7 @@ size_t DevFS::allocate_inode_index()
{
LOCKER(m_lock);
m_next_inode_index = m_next_inode_index.value() + 1;
ASSERT(m_next_inode_index > 0);
VERIFY(m_next_inode_index > 0);
return 1 + m_next_inode_index.value();
}
@ -102,17 +102,17 @@ DevFSInode::DevFSInode(DevFS& fs)
}
ssize_t DevFSInode::read_bytes(off_t, ssize_t, UserOrKernelBuffer&, FileDescription*) const
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
KResult DevFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)>) const
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
RefPtr<Inode> DevFSInode::lookup(StringView)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void DevFSInode::flush_metadata()
@ -121,7 +121,7 @@ void DevFSInode::flush_metadata()
ssize_t DevFSInode::write_bytes(off_t, ssize_t, const UserOrKernelBuffer&, FileDescription*)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
KResultOr<NonnullRefPtr<Inode>> DevFSInode::create_child(const String&, mode_t, dev_t, uid_t, gid_t)
@ -141,7 +141,7 @@ KResult DevFSInode::remove_child(const StringView&)
KResultOr<size_t> DevFSInode::directory_entry_count() const
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
KResult DevFSInode::chmod(mode_t)
@ -174,8 +174,8 @@ DevFSLinkInode::DevFSLinkInode(DevFS& fs, String name)
ssize_t DevFSLinkInode::read_bytes(off_t offset, ssize_t, UserOrKernelBuffer& buffer, FileDescription*) const
{
LOCKER(m_lock);
ASSERT(offset == 0);
ASSERT(!m_link.is_null());
VERIFY(offset == 0);
VERIFY(!m_link.is_null());
if (!buffer.write(((const u8*)m_link.substring_view(0).characters_without_null_termination()) + offset, m_link.length()))
return -EFAULT;
return m_link.length();
@ -195,8 +195,8 @@ InodeMetadata DevFSLinkInode::metadata() const
ssize_t DevFSLinkInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& buffer, FileDescription*)
{
LOCKER(m_lock);
ASSERT(offset == 0);
ASSERT(buffer.is_kernel_buffer());
VERIFY(offset == 0);
VERIFY(buffer.is_kernel_buffer());
m_link = buffer.copy_into_string(count);
return count;
}
@ -361,7 +361,7 @@ String DevFSDeviceInode::name() const
ssize_t DevFSDeviceInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
{
LOCKER(m_lock);
ASSERT(!!description);
VERIFY(!!description);
if (!m_attached_device->can_read(*description, offset))
return -EIO;
auto nread = const_cast<Device&>(*m_attached_device).read(*description, offset, buffer, count);
@ -387,7 +387,7 @@ InodeMetadata DevFSDeviceInode::metadata() const
ssize_t DevFSDeviceInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& buffer, FileDescription* description)
{
LOCKER(m_lock);
ASSERT(!!description);
VERIFY(!!description);
if (!m_attached_device->can_read(*description, offset))
return -EIO;
auto nread = const_cast<Device&>(*m_attached_device).write(*description, offset, buffer, count);

View file

@ -63,7 +63,7 @@ bool DevPtsFS::initialize()
static unsigned inode_index_to_pty_index(InodeIndex inode_index)
{
ASSERT(inode_index > 1);
VERIFY(inode_index > 1);
return inode_index.value() - 2;
}
@ -84,7 +84,7 @@ RefPtr<Inode> DevPtsFS::get_inode(InodeIdentifier inode_id) const
unsigned pty_index = inode_index_to_pty_index(inode_id.index());
auto* device = Device::get_device(201, pty_index);
ASSERT(device);
VERIFY(device);
auto inode = adopt(*new DevPtsFSInode(const_cast<DevPtsFS&>(*this), inode_id.index(), static_cast<SlavePTY*>(device)));
inode->m_metadata.inode = inode_id;
@ -122,12 +122,12 @@ DevPtsFSInode::~DevPtsFSInode()
ssize_t DevPtsFSInode::read_bytes(off_t, ssize_t, UserOrKernelBuffer&, FileDescription*) const
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
ssize_t DevPtsFSInode::write_bytes(off_t, ssize_t, const UserOrKernelBuffer&, FileDescription*)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
InodeMetadata DevPtsFSInode::metadata() const
@ -159,14 +159,14 @@ KResult DevPtsFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEn
KResultOr<size_t> DevPtsFSInode::directory_entry_count() const
{
ASSERT(identifier().index() == 1);
VERIFY(identifier().index() == 1);
return 2 + s_ptys->size();
}
RefPtr<Inode> DevPtsFSInode::lookup(StringView name)
{
ASSERT(identifier().index() == 1);
VERIFY(identifier().index() == 1);
if (name == "." || name == "..")
return this;

View file

@ -91,28 +91,28 @@ Ext2FS::~Ext2FS()
bool Ext2FS::flush_super_block()
{
LOCKER(m_lock);
ASSERT((sizeof(ext2_super_block) % logical_block_size()) == 0);
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
bool success = raw_write_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
ASSERT(success);
VERIFY(success);
return true;
}
const ext2_group_desc& Ext2FS::group_descriptor(GroupIndex group_index) const
{
// FIXME: Should this fail gracefully somehow?
ASSERT(group_index <= m_block_group_count);
ASSERT(group_index > 0);
VERIFY(group_index <= m_block_group_count);
VERIFY(group_index > 0);
return block_group_descriptors()[group_index.value() - 1];
}
bool Ext2FS::initialize()
{
LOCKER(m_lock);
ASSERT((sizeof(ext2_super_block) % logical_block_size()) == 0);
VERIFY((sizeof(ext2_super_block) % logical_block_size()) == 0);
auto super_block_buffer = UserOrKernelBuffer::for_kernel_buffer((u8*)&m_super_block);
bool success = raw_read_blocks(2, (sizeof(ext2_super_block) / logical_block_size()), super_block_buffer);
ASSERT(success);
VERIFY(success);
auto& super_block = this->super_block();
if constexpr (EXT2_DEBUG) {
@ -134,7 +134,7 @@ bool Ext2FS::initialize()
set_block_size(EXT2_BLOCK_SIZE(&super_block));
ASSERT(block_size() <= (int)max_block_size);
VERIFY(block_size() <= (int)max_block_size);
m_block_group_count = ceil_div(super_block.s_blocks_count, super_block.s_blocks_per_group);
@ -227,7 +227,7 @@ Ext2FS::BlockListShape Ext2FS::compute_block_list_shape(unsigned blocks) const
shape.meta_blocks += divide_rounded_up(shape.triply_indirect_blocks, entries_per_block * entries_per_block);
shape.meta_blocks += divide_rounded_up(shape.triply_indirect_blocks, entries_per_block);
blocks_remaining -= shape.triply_indirect_blocks;
ASSERT(blocks_remaining == 0);
VERIFY(blocks_remaining == 0);
return shape;
}
@ -302,7 +302,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
auto block_contents = ByteBuffer::create_uninitialized(block_size());
OutputMemoryStream stream { block_contents };
ASSERT(new_shape.indirect_blocks <= entries_per_block);
VERIFY(new_shape.indirect_blocks <= entries_per_block);
for (unsigned i = 0; i < new_shape.indirect_blocks; ++i) {
stream << blocks[output_block_index++].value();
--remaining_blocks;
@ -355,7 +355,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
}
auto* dind_block_as_pointers = (unsigned*)dind_block_contents.data();
ASSERT(indirect_block_count <= entries_per_block);
VERIFY(indirect_block_count <= entries_per_block);
for (unsigned i = 0; i < indirect_block_count; ++i) {
bool ind_block_dirty = false;
@ -386,7 +386,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
if (entries_to_write > entries_per_block)
entries_to_write = entries_per_block;
ASSERT(entries_to_write <= entries_per_block);
VERIFY(entries_to_write <= entries_per_block);
for (unsigned j = 0; j < entries_to_write; ++j) {
BlockIndex output_block = blocks[output_block_index++];
if (ind_block_as_pointers[j] != output_block) {
@ -405,7 +405,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
if (ind_block_dirty) {
auto buffer = UserOrKernelBuffer::for_kernel_buffer(ind_block_contents.data());
int err = write_block(indirect_block_index, buffer, block_size());
ASSERT(err >= 0);
VERIFY(err >= 0);
}
}
for (unsigned i = indirect_block_count; i < entries_per_block; ++i) {
@ -418,7 +418,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
if (dind_block_dirty) {
auto buffer = UserOrKernelBuffer::for_kernel_buffer(dind_block_contents.data());
int err = write_block(e2inode.i_block[EXT2_DIND_BLOCK], buffer, block_size());
ASSERT(err >= 0);
VERIFY(err >= 0);
}
}
@ -427,7 +427,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
// FIXME: Implement!
dbgln("we don't know how to write tind ext2fs blocks yet!");
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
Vector<Ext2FS::BlockIndex> Ext2FS::block_list_for_inode(const ext2_inode& e2inode, bool include_block_list_blocks) const
@ -536,13 +536,13 @@ Vector<Ext2FS::BlockIndex> Ext2FS::block_list_for_inode_impl(const ext2_inode& e
void Ext2FS::free_inode(Ext2FSInode& inode)
{
LOCKER(m_lock);
ASSERT(inode.m_raw_inode.i_links_count == 0);
VERIFY(inode.m_raw_inode.i_links_count == 0);
dbgln_if(EXT2_DEBUG, "Ext2FS: Inode {} has no more links, time to delete!", inode.index());
// Mark all blocks used by this inode as free.
auto block_list = block_list_for_inode(inode.m_raw_inode, true);
for (auto block_index : block_list) {
ASSERT(block_index <= super_block().s_blocks_count);
VERIFY(block_index <= super_block().s_blocks_count);
if (block_index.value())
set_block_allocation_state(block_index, false);
}
@ -674,7 +674,7 @@ void Ext2FSInode::flush_metadata()
RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
{
LOCKER(m_lock);
ASSERT(inode.fsid() == fsid());
VERIFY(inode.fsid() == fsid());
{
auto it = m_inode_cache.find(inode.index());
@ -706,14 +706,14 @@ RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
ssize_t Ext2FSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
{
Locker inode_locker(m_lock);
ASSERT(offset >= 0);
VERIFY(offset >= 0);
if (m_raw_inode.i_size == 0)
return 0;
// Symbolic links shorter than 60 characters are store inline inside the i_block array.
// This avoids wasting an entire block on short links. (Most links are short.)
if (is_symlink() && size() < max_inline_symlink_length) {
ASSERT(offset == 0);
VERIFY(offset == 0);
ssize_t nread = min((off_t)size() - offset, static_cast<off_t>(count));
if (!buffer.write(((const u8*)m_raw_inode.i_block) + offset, (size_t)nread))
return -EFAULT;
@ -748,7 +748,7 @@ ssize_t Ext2FSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer&
for (size_t bi = first_block_logical_index; remaining_count && bi <= last_block_logical_index; ++bi) {
auto block_index = m_block_list[bi];
ASSERT(block_index.value());
VERIFY(block_index.value());
size_t offset_into_block = (bi == first_block_logical_index) ? offset_into_first_block : 0;
size_t num_bytes_to_copy = min(block_size - offset_into_block, remaining_count);
auto buffer_offset = buffer.offset(nread);
@ -827,7 +827,7 @@ KResult Ext2FSInode::resize(u64 new_size)
auto nwritten = write_bytes(clear_from, min(sizeof(zero_buffer), bytes_to_clear), UserOrKernelBuffer::for_kernel_buffer(zero_buffer), nullptr);
if (nwritten < 0)
return KResult((ErrnoCode)-nwritten);
ASSERT(nwritten != 0);
VERIFY(nwritten != 0);
bytes_to_clear -= nwritten;
clear_from += nwritten;
}
@ -838,8 +838,8 @@ KResult Ext2FSInode::resize(u64 new_size)
ssize_t Ext2FSInode::write_bytes(off_t offset, ssize_t count, const UserOrKernelBuffer& data, FileDescription* description)
{
ASSERT(offset >= 0);
ASSERT(count >= 0);
VERIFY(offset >= 0);
VERIFY(count >= 0);
Locker inode_locker(m_lock);
Locker fs_locker(fs().m_lock);
@ -849,7 +849,7 @@ ssize_t Ext2FSInode::write_bytes(off_t offset, ssize_t count, const UserOrKernel
return result;
if (is_symlink()) {
ASSERT(offset == 0);
VERIFY(offset == 0);
if (max((size_t)(offset + count), (size_t)m_raw_inode.i_size) < max_inline_symlink_length) {
dbgln_if(EXT2_DEBUG, "Ext2FS: write_bytes poking into i_block array for inline symlink '{}' ({} bytes)", data.copy_into_string(count), count);
if (!data.read(((u8*)m_raw_inode.i_block) + offset, (size_t)count))
@ -937,7 +937,7 @@ u8 Ext2FS::internal_file_type_to_directory_entry_type(const DirectoryEntryView&
KResult Ext2FSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)> callback) const
{
LOCKER(m_lock);
ASSERT(is_directory());
VERIFY(is_directory());
dbgln_if(EXT2_VERY_DEBUG, "Ext2FS: Traversing as directory: {}", index());
@ -1020,7 +1020,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FSInode::create_child(const String& name, mo
KResult Ext2FSInode::add_child(Inode& child, const StringView& name, mode_t mode)
{
LOCKER(m_lock);
ASSERT(is_directory());
VERIFY(is_directory());
if (name.length() > EXT2_NAME_LEN)
return ENAMETOOLONG;
@ -1064,7 +1064,7 @@ KResult Ext2FSInode::remove_child(const StringView& name)
{
LOCKER(m_lock);
dbgln_if(EXT2_DEBUG, "Ext2FSInode::remove_child('{}') in inode {}", name, index());
ASSERT(is_directory());
VERIFY(is_directory());
auto it = m_lookup_cache.find(name);
if (it == m_lookup_cache.end())
@ -1162,7 +1162,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
}
}
ASSERT(found_a_group);
VERIFY(found_a_group);
auto& bgd = group_descriptor(group_index);
auto& cached_bitmap = get_bitmap_block(bgd.bg_block_bitmap);
@ -1172,7 +1172,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
BlockIndex first_block_in_group = (group_index.value() - 1) * blocks_per_group() + first_block_index().value();
size_t free_region_size = 0;
auto first_unset_bit_index = block_bitmap.find_longest_range_of_unset_bits(count - blocks.size(), free_region_size);
ASSERT(first_unset_bit_index.has_value());
VERIFY(first_unset_bit_index.has_value());
dbgln_if(EXT2_DEBUG, "Ext2FS: allocating free region of size: {} [{}]", free_region_size, group_index);
for (size_t i = 0; i < free_region_size; ++i) {
BlockIndex block_index = (first_unset_bit_index.value() + i) + first_block_in_group.value();
@ -1182,7 +1182,7 @@ auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) ->
}
}
ASSERT(blocks.size() == count);
VERIFY(blocks.size() == count);
return blocks;
}
@ -1239,7 +1239,7 @@ InodeIndex Ext2FS::find_a_free_inode(GroupIndex preferred_group)
InodeIndex inode = first_free_inode_in_group;
dbgln_if(EXT2_DEBUG, "Ext2FS: found suitable inode {}", inode);
ASSERT(get_inode_allocation_state(inode) == false);
VERIFY(get_inode_allocation_state(inode) == false);
return inode;
}
@ -1285,7 +1285,7 @@ bool Ext2FS::set_inode_allocation_state(InodeIndex inode_index, bool new_state)
dbgln_if(EXT2_DEBUG, "Ext2FS: set_inode_allocation_state({}) {} -> {}", inode_index, current_state, new_state);
if (current_state == new_state) {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
return true;
}
@ -1325,14 +1325,14 @@ Ext2FS::CachedBitmap& Ext2FS::get_bitmap_block(BlockIndex bitmap_block_index)
auto block = KBuffer::create_with_size(block_size(), Region::Access::Read | Region::Access::Write, "Ext2FS: Cached bitmap block");
auto buffer = UserOrKernelBuffer::for_kernel_buffer(block.data());
int err = read_block(bitmap_block_index, &buffer, block_size());
ASSERT(err >= 0);
VERIFY(err >= 0);
m_cached_bitmaps.append(make<CachedBitmap>(bitmap_block_index, move(block)));
return *m_cached_bitmaps.last();
}
bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
{
ASSERT(block_index != 0);
VERIFY(block_index != 0);
LOCKER(m_lock);
auto group_index = group_index_from_block_index(block_index);
@ -1346,7 +1346,7 @@ bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
dbgln_if(EXT2_DEBUG, "Ext2FS: block {} state: {} -> {} (in bitmap block {})", block_index, current_state, new_state, bgd.bg_block_bitmap);
if (current_state == new_state) {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
return true;
}
@ -1374,7 +1374,7 @@ bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
KResult Ext2FS::create_directory(Ext2FSInode& parent_inode, const String& name, mode_t mode, uid_t uid, gid_t gid)
{
LOCKER(m_lock);
ASSERT(is_directory(mode));
VERIFY(is_directory(mode));
auto inode_or_error = create_inode(parent_inode, name, mode, 0, uid, gid);
if (inode_or_error.is_error())
@ -1424,7 +1424,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FS::create_inode(Ext2FSInode& parent_inode,
// Looks like we're good, time to update the inode bitmap and group+global inode counters.
bool success = set_inode_allocation_state(inode_id, true);
ASSERT(success);
VERIFY(success);
struct timeval now;
kgettimeofday(now);
@ -1450,7 +1450,7 @@ KResultOr<NonnullRefPtr<Inode>> Ext2FS::create_inode(Ext2FSInode& parent_inode,
e2inode.i_flags = 0;
success = write_ext2_inode(inode_id, e2inode);
ASSERT(success);
VERIFY(success);
// We might have cached the fact that this inode didn't exist. Wipe the slate.
m_inode_cache.remove(inode_id);
@ -1487,7 +1487,7 @@ bool Ext2FSInode::populate_lookup_cache() const
RefPtr<Inode> Ext2FSInode::lookup(StringView name)
{
ASSERT(is_directory());
VERIFY(is_directory());
if (!populate_lookup_cache())
return {};
LOCKER(m_lock);
@ -1549,7 +1549,7 @@ KResult Ext2FSInode::decrement_link_count()
LOCKER(m_lock);
if (fs().is_readonly())
return EROFS;
ASSERT(m_raw_inode.i_links_count);
VERIFY(m_raw_inode.i_links_count);
--m_raw_inode.i_links_count;
if (ref_count() == 1 && m_raw_inode.i_links_count == 0)
fs().uncache_inode(index());
@ -1565,7 +1565,7 @@ void Ext2FS::uncache_inode(InodeIndex index)
KResultOr<size_t> Ext2FSInode::directory_entry_count() const
{
ASSERT(is_directory());
VERIFY(is_directory());
LOCKER(m_lock);
populate_lookup_cache();
return m_lookup_cache.size();

View file

@ -134,13 +134,13 @@ void FIFO::detach(Direction direction)
#if FIFO_DEBUG
klog() << "close reader (" << m_readers << " - 1)";
#endif
ASSERT(m_readers);
VERIFY(m_readers);
--m_readers;
} else if (direction == Direction::Writer) {
#if FIFO_DEBUG
klog() << "close writer (" << m_writers << " - 1)";
#endif
ASSERT(m_writers);
VERIFY(m_writers);
--m_writers;
}

View file

@ -50,7 +50,7 @@ public:
virtual bool should_add_blocker(Thread::Blocker& b, void* data) override
{
ASSERT(b.blocker_type() == Thread::Blocker::Type::File);
VERIFY(b.blocker_type() == Thread::Blocker::Type::File);
auto& blocker = static_cast<Thread::FileBlocker&>(b);
return !blocker.unblock(true, data);
}
@ -59,7 +59,7 @@ public:
{
ScopedSpinLock lock(m_lock);
do_unblock([&](auto& b, void* data, bool&) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::File);
VERIFY(b.blocker_type() == Thread::Blocker::Type::File);
auto& blocker = static_cast<Thread::FileBlocker&>(b);
return blocker.unblock(false, data);
});
@ -159,7 +159,7 @@ protected:
private:
ALWAYS_INLINE void do_evaluate_block_conditions()
{
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
block_condition().unblock();
}

View file

@ -107,7 +107,7 @@ Thread::FileBlocker::BlockFlags FileDescription::should_unblock(Thread::FileBloc
if ((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::SocketFlags) {
auto* sock = socket();
ASSERT(sock);
VERIFY(sock);
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Accept) && sock->can_accept())
unblock_flags |= (u32)Thread::FileBlocker::BlockFlags::Accept;
if (((u32)block_flags & (u32)Thread::FileBlocker::BlockFlags::Connect) && sock->setup_state() == Socket::SetupState::Completed)
@ -205,8 +205,8 @@ bool FileDescription::can_read() const
KResultOr<NonnullOwnPtr<KBuffer>> FileDescription::read_entire_file()
{
// HACK ALERT: (This entire function)
ASSERT(m_file->is_inode());
ASSERT(m_inode);
VERIFY(m_file->is_inode());
VERIFY(m_inode);
return m_inode->read_entire(this);
}

View file

@ -95,7 +95,7 @@ void FS::lock_all()
void FS::set_block_size(size_t block_size)
{
ASSERT(block_size > 0);
VERIFY(block_size > 0);
if (block_size == m_block_size)
return;
m_block_size = block_size;

View file

@ -49,7 +49,7 @@ SpinLock<u32>& Inode::all_inodes_lock()
InlineLinkedList<Inode>& Inode::all_with_lock()
{
ASSERT(s_all_inodes_lock.is_locked());
VERIFY(s_all_inodes_lock.is_locked());
return *s_list;
}
@ -66,7 +66,7 @@ void Inode::sync()
}
for (auto& inode : inodes) {
ASSERT(inode.is_metadata_dirty());
VERIFY(inode.is_metadata_dirty());
inode.flush_metadata();
}
}
@ -83,7 +83,7 @@ KResultOr<NonnullOwnPtr<KBuffer>> Inode::read_entire(FileDescription* descriptio
nread = read_bytes(offset, sizeof(buffer), buf, description);
if (nread < 0)
return KResult((ErrnoCode)-nread);
ASSERT(nread <= (ssize_t)sizeof(buffer));
VERIFY(nread <= (ssize_t)sizeof(buffer));
if (nread <= 0)
break;
builder.append((const char*)buffer, nread);
@ -203,27 +203,27 @@ bool Inode::unbind_socket()
void Inode::register_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
{
LOCKER(m_lock);
ASSERT(!m_watchers.contains(&watcher));
VERIFY(!m_watchers.contains(&watcher));
m_watchers.set(&watcher);
}
void Inode::unregister_watcher(Badge<InodeWatcher>, InodeWatcher& watcher)
{
LOCKER(m_lock);
ASSERT(m_watchers.contains(&watcher));
VERIFY(m_watchers.contains(&watcher));
m_watchers.remove(&watcher);
}
NonnullRefPtr<FIFO> Inode::fifo()
{
LOCKER(m_lock);
ASSERT(metadata().is_fifo());
VERIFY(metadata().is_fifo());
// FIXME: Release m_fifo when it is closed by all readers and writers
if (!m_fifo)
m_fifo = FIFO::create(metadata().uid);
ASSERT(m_fifo);
VERIFY(m_fifo);
return *m_fifo;
}
@ -233,7 +233,7 @@ void Inode::set_metadata_dirty(bool metadata_dirty)
if (metadata_dirty) {
// Sanity check.
ASSERT(!fs().is_readonly());
VERIFY(!fs().is_readonly());
}
if (m_metadata_dirty == metadata_dirty)

View file

@ -122,8 +122,8 @@ KResultOr<Region*> InodeFile::mmap(Process& process, FileDescription& descriptio
String InodeFile::absolute_path(const FileDescription& description) const
{
ASSERT_NOT_REACHED();
ASSERT(description.custody());
VERIFY_NOT_REACHED();
VERIFY(description.custody());
return description.absolute_path();
}
@ -140,15 +140,15 @@ KResult InodeFile::truncate(u64 size)
KResult InodeFile::chown(FileDescription& description, uid_t uid, gid_t gid)
{
ASSERT(description.inode() == m_inode);
ASSERT(description.custody());
VERIFY(description.inode() == m_inode);
VERIFY(description.custody());
return VFS::the().chown(*description.custody(), uid, gid);
}
KResult InodeFile::chmod(FileDescription& description, mode_t mode)
{
ASSERT(description.inode() == m_inode);
ASSERT(description.custody());
VERIFY(description.inode() == m_inode);
VERIFY(description.custody());
return VFS::the().chmod(*description.custody(), mode);
}

View file

@ -60,7 +60,7 @@ bool InodeWatcher::can_write(const FileDescription&, size_t) const
KResultOr<size_t> InodeWatcher::read(FileDescription&, size_t, UserOrKernelBuffer& buffer, size_t buffer_size)
{
LOCKER(m_lock);
ASSERT(!m_queue.is_empty() || !m_inode);
VERIFY(!m_queue.is_empty() || !m_inode);
if (!m_inode)
return 0;

View file

@ -44,7 +44,7 @@ Plan9FS::~Plan9FS()
{
// Make sure to destroy the root inode before the FS gets destroyed.
if (m_root_inode) {
ASSERT(m_root_inode->ref_count() == 1);
VERIFY(m_root_inode->ref_count() == 1);
m_root_inode = nullptr;
}
}
@ -153,7 +153,7 @@ public:
template<typename N>
Decoder& read_number(N& number)
{
ASSERT(sizeof(number) <= m_data.length());
VERIFY(sizeof(number) <= m_data.length());
memcpy(&number, m_data.characters_without_null_termination(), sizeof(number));
m_data = m_data.substring_view(sizeof(number), m_data.length() - sizeof(number));
return *this;
@ -170,14 +170,14 @@ public:
template<typename T>
Message& operator>>(T& t)
{
ASSERT(m_have_been_built);
VERIFY(m_have_been_built);
m_built.decoder >> t;
return *this;
}
StringView read_data()
{
ASSERT(m_have_been_built);
VERIFY(m_have_been_built);
return m_built.decoder.read_data();
}
@ -197,7 +197,7 @@ private:
template<typename N>
Message& append_number(N number)
{
ASSERT(!m_have_been_built);
VERIFY(!m_have_been_built);
m_builder.append(reinterpret_cast<const char*>(&number), sizeof(number));
return *this;
}
@ -330,7 +330,7 @@ Plan9FS::Message::Decoder& Plan9FS::Message::Decoder::operator>>(StringView& str
{
u16 length;
*this >> length;
ASSERT(length <= m_data.length());
VERIFY(length <= m_data.length());
string = m_data.substring_view(0, length);
m_data = m_data.substring_view_starting_after_substring(string);
return *this;
@ -340,7 +340,7 @@ StringView Plan9FS::Message::Decoder::read_data()
{
u32 length;
*this >> length;
ASSERT(length <= m_data.length());
VERIFY(length <= m_data.length());
auto data = m_data.substring_view(0, length);
m_data = m_data.substring_view_starting_after_substring(data);
return data;
@ -401,12 +401,12 @@ Plan9FS::Message& Plan9FS::Message::operator=(Message&& message)
const KBuffer& Plan9FS::Message::build()
{
ASSERT(!m_have_been_built);
VERIFY(!m_have_been_built);
auto tmp_buffer = m_builder.build();
// FIXME: We should not assume success here.
ASSERT(tmp_buffer);
VERIFY(tmp_buffer);
m_have_been_built = true;
m_builder.~KBufferBuilder();
@ -470,7 +470,7 @@ bool Plan9FS::Plan9FSBlockCondition::should_add_blocker(Thread::Blocker& b, void
void Plan9FS::Plan9FSBlockCondition::unblock_completed(u16 tag)
{
unblock([&](Thread::Blocker& b, void*, bool&) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
auto& blocker = static_cast<Blocker&>(b);
return blocker.unblock(tag);
});
@ -479,7 +479,7 @@ void Plan9FS::Plan9FSBlockCondition::unblock_completed(u16 tag)
void Plan9FS::Plan9FSBlockCondition::unblock_all()
{
unblock([&](Thread::Blocker& b, void*, bool&) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Plan9FS);
auto& blocker = static_cast<Blocker&>(b);
return blocker.unblock();
});
@ -498,13 +498,13 @@ bool Plan9FS::is_complete(const ReceiveCompletion& completion)
LOCKER(m_lock);
if (m_completions.contains(completion.tag)) {
// If it's still in the map then it can't be complete
ASSERT(!completion.completed);
VERIFY(!completion.completed);
return false;
}
// if it's not in the map anymore, it must be complete. But we MUST
// hold m_lock to be able to check completion.completed!
ASSERT(completion.completed);
VERIFY(completion.completed);
return true;
}

View file

@ -150,14 +150,14 @@ static inline ProcFileType to_proc_file_type(const InodeIdentifier& identifier)
static inline int to_fd(const InodeIdentifier& identifier)
{
ASSERT(to_proc_parent_directory(identifier) == PDI_PID_fd);
VERIFY(to_proc_parent_directory(identifier) == PDI_PID_fd);
return (identifier.index().value() & 0xff) - FI_MaxStaticFileIndex;
}
static inline size_t to_sys_index(const InodeIdentifier& identifier)
{
ASSERT(to_proc_parent_directory(identifier) == PDI_Root_sys);
ASSERT(to_proc_file_type(identifier) == FI_Root_sys_variable);
VERIFY(to_proc_parent_directory(identifier) == PDI_Root_sys);
VERIFY(to_proc_file_type(identifier) == FI_Root_sys_variable);
return identifier.index().value() >> 16u;
}
@ -178,7 +178,7 @@ static inline InodeIdentifier to_identifier_with_stack(unsigned fsid, ThreadID t
static inline InodeIdentifier sys_var_to_identifier(unsigned fsid, unsigned index)
{
ASSERT(index < 256);
VERIFY(index < 256);
return { fsid, (PDI_Root_sys << 12u) | (index << 16u) | FI_Root_sys_variable };
}
@ -199,7 +199,7 @@ static inline InodeIdentifier to_parent_id(const InodeIdentifier& identifier)
case PDI_PID_stacks:
return to_identifier(identifier.fsid(), PDI_PID, to_pid(identifier), FI_PID_stacks);
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
#if 0
@ -436,7 +436,7 @@ static bool procfs$devices(InodeIdentifier, KBufferBuilder& builder)
else if (device.is_character_device())
obj.add("type", "character");
else
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
});
array.finish();
return true;
@ -633,7 +633,7 @@ static bool procfs$pid_exe(InodeIdentifier identifier, KBufferBuilder& builder)
if (!process)
return false;
auto* custody = process->executable();
ASSERT(custody);
VERIFY(custody);
builder.append(custody->absolute_path().bytes());
return true;
}
@ -884,14 +884,14 @@ SysVariable& SysVariable::for_inode(InodeIdentifier id)
if (index >= sys_variables().size())
return sys_variables()[0];
auto& variable = sys_variables()[index];
ASSERT(variable.address);
VERIFY(variable.address);
return variable;
}
static bool read_sys_bool(InodeIdentifier inode_id, KBufferBuilder& builder)
{
auto& variable = SysVariable::for_inode(inode_id);
ASSERT(variable.type == SysVariable::Type::Boolean);
VERIFY(variable.type == SysVariable::Type::Boolean);
u8 buffer[2];
auto* lockable_bool = reinterpret_cast<Lockable<bool>*>(variable.address);
@ -907,7 +907,7 @@ static bool read_sys_bool(InodeIdentifier inode_id, KBufferBuilder& builder)
static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer& buffer, size_t size)
{
auto& variable = SysVariable::for_inode(inode_id);
ASSERT(variable.type == SysVariable::Type::Boolean);
VERIFY(variable.type == SysVariable::Type::Boolean);
char value = 0;
bool did_read = false;
@ -920,7 +920,7 @@ static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer
});
if (nread < 0)
return nread;
ASSERT(nread == 0 || (nread == 1 && did_read));
VERIFY(nread == 0 || (nread == 1 && did_read));
if (nread == 0 || !(value == '0' || value == '1'))
return (ssize_t)size;
@ -936,7 +936,7 @@ static ssize_t write_sys_bool(InodeIdentifier inode_id, const UserOrKernelBuffer
static bool read_sys_string(InodeIdentifier inode_id, KBufferBuilder& builder)
{
auto& variable = SysVariable::for_inode(inode_id);
ASSERT(variable.type == SysVariable::Type::String);
VERIFY(variable.type == SysVariable::Type::String);
auto* lockable_string = reinterpret_cast<Lockable<String>*>(variable.address);
LOCKER(lockable_string->lock(), Lock::Mode::Shared);
@ -947,7 +947,7 @@ static bool read_sys_string(InodeIdentifier inode_id, KBufferBuilder& builder)
static ssize_t write_sys_string(InodeIdentifier inode_id, const UserOrKernelBuffer& buffer, size_t size)
{
auto& variable = SysVariable::for_inode(inode_id);
ASSERT(variable.type == SysVariable::Type::String);
VERIFY(variable.type == SysVariable::Type::String);
auto string_copy = buffer.copy_into_string(size);
if (string_copy.is_null())
@ -1032,7 +1032,7 @@ RefPtr<Inode> ProcFS::get_inode(InodeIdentifier inode_id) const
}
auto inode = adopt(*new ProcFSInode(const_cast<ProcFS&>(*this), inode_id.index()));
auto result = m_inodes.set(inode_id.index().value(), inode.ptr());
ASSERT(result == ((it == m_inodes.end()) ? AK::HashSetResult::InsertedNewEntry : AK::HashSetResult::ReplacedExistingEntry));
VERIFY(result == ((it == m_inodes.end()) ? AK::HashSetResult::InsertedNewEntry : AK::HashSetResult::ReplacedExistingEntry));
return inode;
}
@ -1081,7 +1081,7 @@ KResult ProcFSInode::refresh_data(FileDescription& description) const
bool (*read_callback)(InodeIdentifier, KBufferBuilder&) = nullptr;
if (directory_entry) {
read_callback = directory_entry->read_callback;
ASSERT(read_callback);
VERIFY(read_callback);
} else {
switch (to_proc_parent_directory(identifier())) {
case PDI_PID_fd:
@ -1093,7 +1093,7 @@ KResult ProcFSInode::refresh_data(FileDescription& description) const
case PDI_Root_sys:
switch (SysVariable::for_inode(identifier()).type) {
case SysVariable::Type::Invalid:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
case SysVariable::Type::Boolean:
read_callback = read_sys_bool;
break;
@ -1103,10 +1103,10 @@ KResult ProcFSInode::refresh_data(FileDescription& description) const
}
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
ASSERT(read_callback);
VERIFY(read_callback);
}
if (!cached_data)
@ -1231,8 +1231,8 @@ InodeMetadata ProcFSInode::metadata() const
ssize_t ProcFSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer& buffer, FileDescription* description) const
{
dbgln_if(PROCFS_DEBUG, "ProcFS: read_bytes offset: {} count: {}", offset, count);
ASSERT(offset >= 0);
ASSERT(buffer.user_or_kernel_ptr());
VERIFY(offset >= 0);
VERIFY(buffer.user_or_kernel_ptr());
if (!description)
return -EIO;
@ -1350,7 +1350,7 @@ KResult ProcFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntr
RefPtr<Inode> ProcFSInode::lookup(StringView name)
{
ASSERT(is_directory());
VERIFY(is_directory());
if (name == ".")
return this;
if (name == "..")
@ -1490,7 +1490,7 @@ ssize_t ProcFSInode::write_bytes(off_t offset, ssize_t size, const UserOrKernelB
if (to_proc_parent_directory(identifier()) == PDI_Root_sys) {
switch (SysVariable::for_inode(identifier()).type) {
case SysVariable::Type::Invalid:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
case SysVariable::Type::Boolean:
write_callback = write_sys_bool;
break;
@ -1506,9 +1506,9 @@ ssize_t ProcFSInode::write_bytes(off_t offset, ssize_t size, const UserOrKernelB
write_callback = directory_entry->write_callback;
}
ASSERT(is_persistent_inode(identifier()));
VERIFY(is_persistent_inode(identifier()));
// FIXME: Being able to write into ProcFS at a non-zero offset seems like something we should maybe support..
ASSERT(offset == 0);
VERIFY(offset == 0);
ssize_t nwritten = write_callback(identifier(), buffer, (size_t)size);
if (nwritten < 0)
klog() << "ProcFS: Writing " << size << " bytes failed: " << nwritten;
@ -1565,7 +1565,7 @@ KResultOr<NonnullRefPtr<Custody>> ProcFSInode::resolve_as_link(Custody& base, Re
res = &process->root_directory();
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
if (!res)
@ -1666,7 +1666,7 @@ KResult ProcFSInode::remove_child([[maybe_unused]] const StringView& name)
KResultOr<size_t> ProcFSInode::directory_entry_count() const
{
ASSERT(is_directory());
VERIFY(is_directory());
size_t count = 0;
KResult result = traverse_as_directory([&count](auto&) {
++count;

View file

@ -131,19 +131,19 @@ private:
// ^Inode
virtual KResult attach(FileDescription&) override;
virtual void did_seek(FileDescription&, off_t) override;
virtual ssize_t read_bytes(off_t, ssize_t, UserOrKernelBuffer&, FileDescription*) const override { ASSERT_NOT_REACHED(); }
virtual ssize_t read_bytes(off_t, ssize_t, UserOrKernelBuffer&, FileDescription*) const override { VERIFY_NOT_REACHED(); }
virtual InodeMetadata metadata() const override;
virtual KResult traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)>) const override { ASSERT_NOT_REACHED(); }
virtual KResult traverse_as_directory(Function<bool(const FS::DirectoryEntryView&)>) const override { VERIFY_NOT_REACHED(); }
virtual RefPtr<Inode> lookup(StringView name) override;
virtual void flush_metadata() override {};
virtual ssize_t write_bytes(off_t, ssize_t, const UserOrKernelBuffer&, FileDescription*) override { ASSERT_NOT_REACHED(); }
virtual ssize_t write_bytes(off_t, ssize_t, const UserOrKernelBuffer&, FileDescription*) override { VERIFY_NOT_REACHED(); }
virtual KResultOr<NonnullRefPtr<Inode>> create_child(const String& name, mode_t, dev_t, uid_t, gid_t) override;
virtual KResult add_child(Inode&, const StringView& name, mode_t) override;
virtual KResult remove_child(const StringView& name) override;
virtual KResultOr<size_t> directory_entry_count() const override;
virtual KResult chmod(mode_t) override { return EINVAL; }
virtual KResult chown(uid_t, gid_t) override { return EINVAL; }
virtual KResultOr<NonnullRefPtr<Custody>> resolve_as_link(Custody&, RefPtr<Custody>*, int, int) const override { ASSERT_NOT_REACHED(); }
virtual KResultOr<NonnullRefPtr<Custody>> resolve_as_link(Custody&, RefPtr<Custody>*, int, int) const override { VERIFY_NOT_REACHED(); }
virtual FileDescription* preopen_fd() override { return m_fd; }
ProcFS& fs() { return static_cast<ProcFS&>(Inode::fs()); }

View file

@ -52,7 +52,7 @@ bool TmpFS::initialize()
NonnullRefPtr<Inode> TmpFS::root_inode() const
{
ASSERT(!m_root_inode.is_null());
VERIFY(!m_root_inode.is_null());
return *m_root_inode;
}
@ -60,7 +60,7 @@ NonnullRefPtr<Inode> TmpFS::root_inode() const
void TmpFS::register_inode(TmpFSInode& inode)
{
LOCKER(m_lock);
ASSERT(inode.identifier().fsid() == fsid());
VERIFY(inode.identifier().fsid() == fsid());
auto index = inode.identifier().index();
m_inodes.set(index, inode);
@ -69,7 +69,7 @@ void TmpFS::register_inode(TmpFSInode& inode)
void TmpFS::unregister_inode(InodeIdentifier identifier)
{
LOCKER(m_lock);
ASSERT(identifier.fsid() == fsid());
VERIFY(identifier.fsid() == fsid());
m_inodes.remove(identifier.index());
}
@ -84,7 +84,7 @@ unsigned TmpFS::next_inode_index()
RefPtr<Inode> TmpFS::get_inode(InodeIdentifier identifier) const
{
LOCKER(m_lock, Lock::Mode::Shared);
ASSERT(identifier.fsid() == fsid());
VERIFY(identifier.fsid() == fsid());
auto it = m_inodes.find(identifier.index());
if (it == m_inodes.end())
@ -149,9 +149,9 @@ KResult TmpFSInode::traverse_as_directory(Function<bool(const FS::DirectoryEntry
ssize_t TmpFSInode::read_bytes(off_t offset, ssize_t size, UserOrKernelBuffer& buffer, FileDescription*) const
{
LOCKER(m_lock, Lock::Mode::Shared);
ASSERT(!is_directory());
ASSERT(size >= 0);
ASSERT(offset >= 0);
VERIFY(!is_directory());
VERIFY(size >= 0);
VERIFY(offset >= 0);
if (!m_content)
return 0;
@ -170,8 +170,8 @@ ssize_t TmpFSInode::read_bytes(off_t offset, ssize_t size, UserOrKernelBuffer& b
ssize_t TmpFSInode::write_bytes(off_t offset, ssize_t size, const UserOrKernelBuffer& buffer, FileDescription*)
{
LOCKER(m_lock);
ASSERT(!is_directory());
ASSERT(offset >= 0);
VERIFY(!is_directory());
VERIFY(offset >= 0);
auto result = prepare_to_write_data();
if (result.is_error())
@ -217,7 +217,7 @@ ssize_t TmpFSInode::write_bytes(off_t offset, ssize_t size, const UserOrKernelBu
RefPtr<Inode> TmpFSInode::lookup(StringView name)
{
LOCKER(m_lock, Lock::Mode::Shared);
ASSERT(is_directory());
VERIFY(is_directory());
if (name == ".")
return this;
@ -233,7 +233,7 @@ RefPtr<Inode> TmpFSInode::lookup(StringView name)
KResultOr<size_t> TmpFSInode::directory_entry_count() const
{
LOCKER(m_lock, Lock::Mode::Shared);
ASSERT(is_directory());
VERIFY(is_directory());
return 2 + m_children.size();
}
@ -301,8 +301,8 @@ KResultOr<NonnullRefPtr<Inode>> TmpFSInode::create_child(const String& name, mod
KResult TmpFSInode::add_child(Inode& child, const StringView& name, mode_t)
{
LOCKER(m_lock);
ASSERT(is_directory());
ASSERT(child.fsid() == fsid());
VERIFY(is_directory());
VERIFY(child.fsid() == fsid());
if (name.length() > NAME_MAX)
return ENAMETOOLONG;
@ -315,7 +315,7 @@ KResult TmpFSInode::add_child(Inode& child, const StringView& name, mode_t)
KResult TmpFSInode::remove_child(const StringView& name)
{
LOCKER(m_lock);
ASSERT(is_directory());
VERIFY(is_directory());
if (name == "." || name == "..")
return KSuccess;
@ -332,7 +332,7 @@ KResult TmpFSInode::remove_child(const StringView& name)
KResult TmpFSInode::truncate(u64 size)
{
LOCKER(m_lock);
ASSERT(!is_directory());
VERIFY(!is_directory());
if (size == 0)
m_content.clear();

View file

@ -67,7 +67,7 @@ UNMAP_AFTER_INIT VFS::~VFS()
InodeIdentifier VFS::root_inode_id() const
{
ASSERT(m_root_inode);
VERIFY(m_root_inode);
return m_root_inode->identifier();
}
@ -211,8 +211,8 @@ KResult VFS::traverse_directory_inode(Inode& dir_inode, Function<bool(const FS::
bool is_root_inode = dir_inode.identifier() == dir_inode.fs().root_inode()->identifier();
if (is_root_inode && !is_vfs_root(dir_inode.identifier()) && entry.name == "..") {
auto mount = find_mount_for_guest(dir_inode);
ASSERT(mount);
ASSERT(mount->host());
VERIFY(mount);
VERIFY(mount->host());
resolved_inode = mount->host()->identifier();
}
callback({ entry.name, resolved_inode, entry.file_type });
@ -697,7 +697,7 @@ KResult VFS::unlink(StringView path, Custody& base)
// We have just checked that the inode is not a directory, and thus it's not
// the root. So it should have a parent. Note that this would be invalidated
// if we were to support bind-mounting regular files on top of the root.
ASSERT(parent_custody);
VERIFY(parent_custody);
auto& parent_inode = parent_custody->inode();
auto current_process = Process::current();

View file

@ -32,9 +32,9 @@ namespace Kernel {
bool FutexQueue::should_add_blocker(Thread::Blocker& b, void* data)
{
ASSERT(data != nullptr); // Thread that is requesting to be blocked
ASSERT(m_lock.is_locked());
ASSERT(b.blocker_type() == Thread::Blocker::Type::Futex);
VERIFY(data != nullptr); // Thread that is requesting to be blocked
VERIFY(m_lock.is_locked());
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: should block thread {}", this, *static_cast<Thread*>(data));
@ -50,12 +50,12 @@ u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& ge
u32 did_wake = 0, did_requeue = 0;
do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
ASSERT(data);
ASSERT(b.blocker_type() == Thread::Blocker::Type::Futex);
VERIFY(data);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue unblocking {}", this, *static_cast<Thread*>(data));
ASSERT(did_wake < wake_count);
VERIFY(did_wake < wake_count);
if (blocker.unblock()) {
if (++did_wake >= wake_count)
stop_iterating = true;
@ -72,7 +72,7 @@ u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& ge
// While still holding m_lock, notify each blocker
for (auto& info : blockers_to_requeue) {
ASSERT(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
VERIFY(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
auto& blocker = *static_cast<Thread::FutexBlocker*>(info.blocker);
blocker.begin_requeue();
}
@ -84,7 +84,7 @@ u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& ge
// Now that we have the lock of the target, append the blockers
// and notify them that they completed the move
for (auto& info : blockers_to_requeue) {
ASSERT(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
VERIFY(info.blocker->blocker_type() == Thread::Blocker::Type::Futex);
auto& blocker = *static_cast<Thread::FutexBlocker*>(info.blocker);
blocker.finish_requeue(*target_futex_queue);
}
@ -107,12 +107,12 @@ u32 FutexQueue::wake_n(u32 wake_count, const Optional<u32>& bitset, bool& is_emp
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n({})", this, wake_count);
u32 did_wake = 0;
do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
ASSERT(data);
ASSERT(b.blocker_type() == Thread::Blocker::Type::Futex);
VERIFY(data);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n unblocking {}", this, *static_cast<Thread*>(data));
ASSERT(did_wake < wake_count);
VERIFY(did_wake < wake_count);
if (bitset.has_value() ? blocker.unblock_bitset(bitset.value()) : blocker.unblock()) {
if (++did_wake >= wake_count)
stop_iterating = true;
@ -130,8 +130,8 @@ u32 FutexQueue::wake_all(bool& is_empty)
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all", this);
u32 did_wake = 0;
do_unblock([&](Thread::Blocker& b, void* data, bool&) {
ASSERT(data);
ASSERT(b.blocker_type() == Thread::Blocker::Type::Futex);
VERIFY(data);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Futex);
auto& blocker = static_cast<Thread::FutexBlocker&>(b);
dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all unblocking {}", this, *static_cast<Thread*>(data));
if (blocker.unblock(true)) {

View file

@ -56,7 +56,7 @@ public:
{
// To keep the alignment of the memory passed in, place the bitmap
// at the end of the memory block.
ASSERT(m_total_chunks * CHUNK_SIZE + (m_total_chunks + 7) / 8 <= memory_size);
VERIFY(m_total_chunks * CHUNK_SIZE + (m_total_chunks + 7) / 8 <= memory_size);
}
~Heap()
{
@ -108,13 +108,13 @@ public:
if (!ptr)
return;
auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
ASSERT((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
ASSERT((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
VERIFY((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
VERIFY((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
FlatPtr start = ((FlatPtr)a - (FlatPtr)m_chunks) / CHUNK_SIZE;
m_bitmap.set_range(start, a->allocation_size_in_chunks, false);
ASSERT(m_allocated_chunks >= a->allocation_size_in_chunks);
VERIFY(m_allocated_chunks >= a->allocation_size_in_chunks);
m_allocated_chunks -= a->allocation_size_in_chunks;
if constexpr (HEAP_SCRUB_BYTE_FREE != 0) {
@ -129,8 +129,8 @@ public:
return h.allocate(new_size);
auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
ASSERT((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
ASSERT((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
VERIFY((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
VERIFY((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
size_t old_size = a->allocation_size_in_chunks * CHUNK_SIZE;
@ -319,7 +319,7 @@ public:
return;
}
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void* reallocate(void* ptr, size_t new_size)
@ -330,12 +330,12 @@ public:
if (subheap->heap.contains(ptr))
return subheap->heap.reallocate(ptr, new_size, *this);
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
HeapType& add_subheap(void* memory, size_t memory_size)
{
ASSERT(memory_size > sizeof(SubHeap));
VERIFY(memory_size > sizeof(SubHeap));
// Place the SubHeap structure at the beginning of the new memory block
memory_size -= sizeof(SubHeap);

View file

@ -86,7 +86,7 @@ public:
void dealloc(void* ptr)
{
ASSERT(ptr);
VERIFY(ptr);
if (ptr < m_base || ptr >= m_end) {
kfree(ptr);
return;
@ -159,7 +159,7 @@ void* slab_alloc(size_t slab_size)
return s_slab_allocator_64.alloc();
if (slab_size <= 128)
return s_slab_allocator_128.alloc();
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void slab_dealloc(void* ptr, size_t slab_size)
@ -172,7 +172,7 @@ void slab_dealloc(void* ptr, size_t slab_size)
return s_slab_allocator_64.dealloc(ptr);
if (slab_size <= 128)
return s_slab_allocator_128.dealloc(ptr);
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void slab_alloc_stats(Function<void(size_t slab_size, size_t allocated, size_t free)> callback)

View file

@ -71,7 +71,7 @@ struct KmallocGlobalHeap {
klog() << "kmalloc(): Cannot expand heap before MM is initialized!";
return false;
}
ASSERT(!m_adding);
VERIFY(!m_adding);
TemporaryChange change(m_adding, true);
// At this point we have very little memory left. Any attempt to
// kmalloc() could fail, so use our backup memory first, so we
@ -231,7 +231,7 @@ void* kmalloc_eternal(size_t size)
ScopedSpinLock lock(s_lock);
void* ptr = s_next_eternal_ptr;
s_next_eternal_ptr += size;
ASSERT(s_next_eternal_ptr < s_end_of_eternal_range);
VERIFY(s_next_eternal_ptr < s_end_of_eternal_range);
g_kmalloc_bytes_eternal += size;
return ptr;
}

View file

@ -121,7 +121,7 @@ public:
return IO::in16(get());
if constexpr (sizeof(T) == 1)
return IO::in8(get());
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
template<typename T>
@ -139,7 +139,7 @@ public:
IO::out8(get(), value);
return;
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
inline void out(u32 value, u8 bit_width)
@ -156,7 +156,7 @@ public:
IO::out8(get(), value);
return;
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
bool is_null() const { return m_address == 0; }

View file

@ -142,13 +142,13 @@ bool APIC::initialized()
APIC& APIC::the()
{
ASSERT(APIC::initialized());
VERIFY(APIC::initialized());
return *s_apic;
}
UNMAP_AFTER_INIT void APIC::initialize()
{
ASSERT(!APIC::initialized());
VERIFY(!APIC::initialized());
s_apic.ensure_instance();
}
@ -302,7 +302,7 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
UNMAP_AFTER_INIT void APIC::do_boot_aps()
{
ASSERT(m_processor_enabled_cnt > 1);
VERIFY(m_processor_enabled_cnt > 1);
u32 aps_to_enable = m_processor_enabled_cnt - 1;
// Copy the APIC startup code and variables to P0x00008000
@ -326,7 +326,7 @@ UNMAP_AFTER_INIT void APIC::do_boot_aps()
// Store pointers to all stacks for the APs to use
auto ap_stack_array = APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_stacks);
ASSERT(aps_to_enable == apic_ap_stacks.size());
VERIFY(aps_to_enable == apic_ap_stacks.size());
for (size_t i = 0; i < aps_to_enable; i++) {
ap_stack_array[i] = apic_ap_stacks[i]->vaddr().get() + Thread::default_kernel_stack_size;
#if APIC_DEBUG
@ -429,7 +429,7 @@ UNMAP_AFTER_INIT void APIC::enable(u32 cpu)
}
// Use the CPU# as logical apic id
ASSERT(cpu <= 0xff);
VERIFY(cpu <= 0xff);
write_register(APIC_REG_LD, (read_register(APIC_REG_LD) & 0x00ffffff) | (cpu << 24)); // TODO: only if not in x2apic mode
// read it back to make sure it's actually set
@ -468,18 +468,18 @@ UNMAP_AFTER_INIT void APIC::enable(u32 cpu)
Thread* APIC::get_idle_thread(u32 cpu) const
{
ASSERT(cpu > 0);
VERIFY(cpu > 0);
return m_ap_idle_threads[cpu - 1];
}
UNMAP_AFTER_INIT void APIC::init_finished(u32 cpu)
{
// This method is called once the boot stack is no longer needed
ASSERT(cpu > 0);
ASSERT(cpu < m_processor_enabled_cnt);
VERIFY(cpu > 0);
VERIFY(cpu < m_processor_enabled_cnt);
// Since we're waiting on other APs here, we shouldn't have the
// scheduler lock
ASSERT(!g_scheduler_lock.own_lock());
VERIFY(!g_scheduler_lock.own_lock());
// Notify the BSP that we are done initializing. It will unmap the startup data at P8000
m_apic_ap_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
@ -519,8 +519,8 @@ void APIC::send_ipi(u32 cpu)
#if APIC_SMP_DEBUG
klog() << "SMP: Send IPI from cpu #" << Processor::id() << " to cpu #" << cpu;
#endif
ASSERT(cpu != Processor::id());
ASSERT(cpu < 8);
VERIFY(cpu != Processor::id());
VERIFY(cpu < 8);
wait_for_pending_icr();
write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand, cpu));
}
@ -531,8 +531,8 @@ UNMAP_AFTER_INIT APICTimer* APIC::initialize_timers(HardwareTimerBase& calibrati
return nullptr;
// We should only initialize and calibrate the APIC timer once on the BSP!
ASSERT(Processor::id() == 0);
ASSERT(!m_apic_timer);
VERIFY(Processor::id() == 0);
VERIFY(!m_apic_timer);
m_apic_timer = APICTimer::initialize(IRQ_APIC_TIMER, calibration_timer);
return m_apic_timer;
@ -583,7 +583,7 @@ void APIC::setup_local_timer(u32 ticks, TimerMode timer_mode, bool enable)
config |= (1 << 3) | 2;
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
write_register(APIC_REG_TIMER_CONFIGURATION, config);

View file

@ -56,8 +56,8 @@ GenericInterruptHandler::~GenericInterruptHandler()
void GenericInterruptHandler::change_interrupt_number(u8 number)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(!m_disable_remap);
VERIFY_INTERRUPTS_DISABLED();
VERIFY(!m_disable_remap);
unregister_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(interrupt_number()), *this);
m_interrupt_number = number;
register_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(interrupt_number()), *this);

View file

@ -80,7 +80,7 @@ void IOAPIC::map_interrupt_redirection(u8 interrupt_vector)
active_low = false;
break;
case 2:
ASSERT_NOT_REACHED(); // Reserved value
VERIFY_NOT_REACHED(); // Reserved value
case 3:
active_low = true;
break;
@ -96,7 +96,7 @@ void IOAPIC::map_interrupt_redirection(u8 interrupt_vector)
trigger_level_mode = false;
break;
case 2:
ASSERT_NOT_REACHED(); // Reserved value
VERIFY_NOT_REACHED(); // Reserved value
case 3:
trigger_level_mode = true;
break;
@ -127,8 +127,8 @@ bool IOAPIC::is_enabled() const
void IOAPIC::spurious_eoi(const GenericInterruptHandler& handler) const
{
InterruptDisabler disabler;
ASSERT(handler.type() == HandlerType::SpuriousInterruptHandler);
ASSERT(handler.interrupt_number() == APIC::spurious_interrupt_vector());
VERIFY(handler.type() == HandlerType::SpuriousInterruptHandler);
VERIFY(handler.interrupt_number() == APIC::spurious_interrupt_vector());
klog() << "IOAPIC::spurious_eoi - Spurious Interrupt occurred";
}
@ -148,7 +148,7 @@ void IOAPIC::map_isa_interrupts()
active_low = false;
break;
case 2:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
case 3:
active_low = true;
break;
@ -164,7 +164,7 @@ void IOAPIC::map_isa_interrupts()
trigger_level_mode = false;
break;
case 2:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
case 3:
trigger_level_mode = true;
break;
@ -196,7 +196,7 @@ void IOAPIC::reset_redirection_entry(int index) const
void IOAPIC::configure_redirection_entry(int index, u8 interrupt_vector, u8 delivery_mode, bool logical_destination, bool active_low, bool trigger_level_mode, bool masked, u8 destination) const
{
InterruptDisabler disabler;
ASSERT((u32)index < m_redirection_entries_count);
VERIFY((u32)index < m_redirection_entries_count);
u32 redirection_entry1 = interrupt_vector | (delivery_mode & 0b111) << 8 | logical_destination << 11 | active_low << 13 | trigger_level_mode << 15 | masked << 16;
u32 redirection_entry2 = destination << 24;
write_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET, redirection_entry1);
@ -219,7 +219,7 @@ void IOAPIC::mask_all_redirection_entries() const
void IOAPIC::mask_redirection_entry(u8 index) const
{
ASSERT((u32)index < m_redirection_entries_count);
VERIFY((u32)index < m_redirection_entries_count);
u32 redirection_entry = read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET);
if (redirection_entry & (1 << 16))
return;
@ -228,13 +228,13 @@ void IOAPIC::mask_redirection_entry(u8 index) const
bool IOAPIC::is_redirection_entry_masked(u8 index) const
{
ASSERT((u32)index < m_redirection_entries_count);
VERIFY((u32)index < m_redirection_entries_count);
return (read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET) & (1 << 16)) != 0;
}
void IOAPIC::unmask_redirection_entry(u8 index) const
{
ASSERT((u32)index < m_redirection_entries_count);
VERIFY((u32)index < m_redirection_entries_count);
u32 redirection_entry = read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET);
if (!(redirection_entry & (1 << 16)))
return;
@ -249,7 +249,7 @@ bool IOAPIC::is_vector_enabled(u8 interrupt_vector) const
u8 IOAPIC::read_redirection_entry_vector(u8 index) const
{
ASSERT((u32)index < m_redirection_entries_count);
VERIFY((u32)index < m_redirection_entries_count);
return (read_register((index << 1) + IOAPIC_REDIRECTION_ENTRY_OFFSET) & 0xFF);
}
@ -266,52 +266,52 @@ Optional<int> IOAPIC::find_redirection_entry_by_vector(u8 vector) const
void IOAPIC::disable(const GenericInterruptHandler& handler)
{
InterruptDisabler disabler;
ASSERT(!is_hard_disabled());
VERIFY(!is_hard_disabled());
u8 interrupt_vector = handler.interrupt_number();
ASSERT(interrupt_vector >= gsi_base() && interrupt_vector < interrupt_vectors_count());
VERIFY(interrupt_vector >= gsi_base() && interrupt_vector < interrupt_vectors_count());
auto found_index = find_redirection_entry_by_vector(interrupt_vector);
if (!found_index.has_value()) {
map_interrupt_redirection(interrupt_vector);
found_index = find_redirection_entry_by_vector(interrupt_vector);
}
ASSERT(found_index.has_value());
VERIFY(found_index.has_value());
mask_redirection_entry(found_index.value());
}
void IOAPIC::enable(const GenericInterruptHandler& handler)
{
InterruptDisabler disabler;
ASSERT(!is_hard_disabled());
VERIFY(!is_hard_disabled());
u8 interrupt_vector = handler.interrupt_number();
ASSERT(interrupt_vector >= gsi_base() && interrupt_vector < interrupt_vectors_count());
VERIFY(interrupt_vector >= gsi_base() && interrupt_vector < interrupt_vectors_count());
auto found_index = find_redirection_entry_by_vector(interrupt_vector);
if (!found_index.has_value()) {
map_interrupt_redirection(interrupt_vector);
found_index = find_redirection_entry_by_vector(interrupt_vector);
}
ASSERT(found_index.has_value());
VERIFY(found_index.has_value());
unmask_redirection_entry(found_index.value());
}
void IOAPIC::eoi(const GenericInterruptHandler& handler) const
{
InterruptDisabler disabler;
ASSERT(!is_hard_disabled());
ASSERT(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
ASSERT(handler.type() != HandlerType::SpuriousInterruptHandler);
VERIFY(!is_hard_disabled());
VERIFY(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
VERIFY(handler.type() != HandlerType::SpuriousInterruptHandler);
APIC::the().eoi();
}
u16 IOAPIC::get_isr() const
{
InterruptDisabler disabler;
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
u16 IOAPIC::get_irr() const
{
InterruptDisabler disabler;
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void IOAPIC::write_register(u32 index, u32 value) const

View file

@ -46,7 +46,7 @@ bool IRQHandler::eoi()
{
dbgln_if(IRQ_DEBUG, "EOI IRQ {}", interrupt_number());
if (!m_shared_with_others) {
ASSERT(!m_responsible_irq_controller.is_null());
VERIFY(!m_responsible_irq_controller.is_null());
m_responsible_irq_controller->eoi(*this);
return true;
}

View file

@ -52,13 +52,13 @@ bool InterruptManagement::initialized()
InterruptManagement& InterruptManagement::the()
{
ASSERT(InterruptManagement::initialized());
VERIFY(InterruptManagement::initialized());
return *s_interrupt_management;
}
UNMAP_AFTER_INIT void InterruptManagement::initialize()
{
ASSERT(!InterruptManagement::initialized());
VERIFY(!InterruptManagement::initialized());
s_interrupt_management = new InterruptManagement();
if (kernel_command_line().lookup("smp").value_or("off") == "on")
@ -78,8 +78,8 @@ void InterruptManagement::enumerate_interrupt_handlers(Function<void(GenericInte
IRQController& InterruptManagement::get_interrupt_controller(int index)
{
ASSERT(index >= 0);
ASSERT(!m_interrupt_controllers[index].is_null());
VERIFY(index >= 0);
VERIFY(!m_interrupt_controllers[index].is_null());
return *m_interrupt_controllers[index];
}
@ -94,7 +94,7 @@ u8 InterruptManagement::acquire_mapped_interrupt_number(u8 original_irq)
u8 InterruptManagement::acquire_irq_number(u8 mapped_interrupt_vector)
{
ASSERT(InterruptManagement::initialized());
VERIFY(InterruptManagement::initialized());
return InterruptManagement::the().get_irq_vector(mapped_interrupt_vector);
}
@ -102,7 +102,7 @@ u8 InterruptManagement::get_mapped_interrupt_vector(u8 original_irq)
{
// FIXME: For SMP configuration (with IOAPICs) use a better routing scheme to make redirections more efficient.
// FIXME: Find a better way to handle conflict with Syscall interrupt gate.
ASSERT((original_irq + IRQ_VECTOR_BASE) != syscall_vector);
VERIFY((original_irq + IRQ_VECTOR_BASE) != syscall_vector);
return original_irq;
}
@ -122,7 +122,7 @@ RefPtr<IRQController> InterruptManagement::get_responsible_irq_controller(u8 int
if (!irq_controller->is_hard_disabled())
return irq_controller;
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT PhysicalAddress InterruptManagement::search_for_madt()
@ -149,7 +149,7 @@ UNMAP_AFTER_INIT void InterruptManagement::switch_to_pic_mode()
SpuriousInterruptHandler::initialize(7);
SpuriousInterruptHandler::initialize(15);
for (auto& irq_controller : m_interrupt_controllers) {
ASSERT(irq_controller);
VERIFY(irq_controller);
if (irq_controller->type() == IRQControllerType::i82093AA) {
irq_controller->hard_disable();
dbgln("Interrupts: Detected {} - Disabled", irq_controller->model());
@ -180,7 +180,7 @@ UNMAP_AFTER_INIT void InterruptManagement::switch_to_ioapic_mode()
}
}
for (auto& irq_controller : m_interrupt_controllers) {
ASSERT(irq_controller);
VERIFY(irq_controller);
if (irq_controller->type() == IRQControllerType::i8259) {
irq_controller->hard_disable();
dbgln("Interrupts: Detected {} - Disabled", irq_controller->model());
@ -198,7 +198,7 @@ UNMAP_AFTER_INIT void InterruptManagement::switch_to_ioapic_mode()
UNMAP_AFTER_INIT void InterruptManagement::locate_apic_data()
{
ASSERT(!m_madt.is_null());
VERIFY(!m_madt.is_null());
auto madt = map_typed<ACPI::Structures::MADT>(m_madt);
int irq_controller_count = 0;

View file

@ -69,8 +69,8 @@ bool PIC::is_enabled() const
void PIC::disable(const GenericInterruptHandler& handler)
{
InterruptDisabler disabler;
ASSERT(!is_hard_disabled());
ASSERT(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
VERIFY(!is_hard_disabled());
VERIFY(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
u8 irq = handler.interrupt_number();
if (m_cached_irq_mask & (1 << irq))
return;
@ -94,7 +94,7 @@ UNMAP_AFTER_INIT PIC::PIC()
void PIC::spurious_eoi(const GenericInterruptHandler& handler) const
{
ASSERT(handler.type() == HandlerType::SpuriousInterruptHandler);
VERIFY(handler.type() == HandlerType::SpuriousInterruptHandler);
if (handler.interrupt_number() == 7)
return;
if (handler.interrupt_number() == 15) {
@ -111,15 +111,15 @@ bool PIC::is_vector_enabled(u8 irq) const
void PIC::enable(const GenericInterruptHandler& handler)
{
InterruptDisabler disabler;
ASSERT(!is_hard_disabled());
ASSERT(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
VERIFY(!is_hard_disabled());
VERIFY(handler.interrupt_number() >= gsi_base() && handler.interrupt_number() < interrupt_vectors_count());
enable_vector(handler.interrupt_number());
}
void PIC::enable_vector(u8 irq)
{
InterruptDisabler disabler;
ASSERT(!is_hard_disabled());
VERIFY(!is_hard_disabled());
if (!(m_cached_irq_mask & (1 << irq)))
return;
u8 imr;
@ -138,9 +138,9 @@ void PIC::enable_vector(u8 irq)
void PIC::eoi(const GenericInterruptHandler& handler) const
{
InterruptDisabler disabler;
ASSERT(!is_hard_disabled());
VERIFY(!is_hard_disabled());
u8 irq = handler.interrupt_number();
ASSERT(irq >= gsi_base() && irq < interrupt_vectors_count());
VERIFY(irq >= gsi_base() && irq < interrupt_vectors_count());
if ((1 << irq) & m_cached_irq_mask) {
spurious_eoi(handler);
return;

View file

@ -84,7 +84,7 @@ SharedIRQHandler::~SharedIRQHandler()
void SharedIRQHandler::handle_interrupt(const RegisterState& regs)
{
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
if constexpr (INTERRUPT_DEBUG) {
dbgln("Interrupt @ {}", interrupt_number());
@ -94,7 +94,7 @@ void SharedIRQHandler::handle_interrupt(const RegisterState& regs)
int i = 0;
for (auto* handler : m_handlers) {
dbgln_if(INTERRUPT_DEBUG, "Going for Interrupt Handling @ {}, Shared Interrupt {}", i, interrupt_number());
ASSERT(handler != nullptr);
VERIFY(handler != nullptr);
handler->increment_invoking_counter();
handler->handle_interrupt(regs);
dbgln_if(INTERRUPT_DEBUG, "Going for Interrupt Handling @ {}, Shared Interrupt {} - End", i, interrupt_number());

View file

@ -36,7 +36,7 @@ UNMAP_AFTER_INIT void SpuriousInterruptHandler::initialize(u8 interrupt_number)
void SpuriousInterruptHandler::register_handler(GenericInterruptHandler& handler)
{
ASSERT(!m_real_handler);
VERIFY(!m_real_handler);
m_real_handler = &handler;
}
void SpuriousInterruptHandler::unregister_handler(GenericInterruptHandler&)
@ -88,7 +88,7 @@ void SpuriousInterruptHandler::enable_interrupt_vector()
void SpuriousInterruptHandler::disable_interrupt_vector()
{
ASSERT(!m_real_irq); // this flag should not be set when we call this method
VERIFY(!m_real_irq); // this flag should not be set when we call this method
if (!m_enabled)
return;
m_enabled = false;

View file

@ -43,7 +43,7 @@ public:
virtual HandlerType type() const override { return HandlerType::UnhandledInterruptHandler; }
virtual const char* purpose() const override { return "Unhandled Interrupt Handler"; }
virtual const char* controller() const override { ASSERT_NOT_REACHED(); }
virtual const char* controller() const override { VERIFY_NOT_REACHED(); }
virtual size_t sharing_devices_count() const override { return 0; }
virtual bool is_shared_handler() const override { return false; }

View file

@ -97,7 +97,7 @@ public:
void set_size(size_t size)
{
ASSERT(size <= capacity());
VERIFY(size <= capacity());
m_size = size;
}

View file

@ -143,7 +143,7 @@ public:
[[nodiscard]] ALWAYS_INLINE KResult error() const
{
ASSERT(m_is_error);
VERIFY(m_is_error);
return m_error;
}
@ -151,20 +151,20 @@ public:
[[nodiscard]] ALWAYS_INLINE T& value()
{
ASSERT(!m_is_error);
VERIFY(!m_is_error);
return *reinterpret_cast<T*>(&m_storage);
}
[[nodiscard]] ALWAYS_INLINE const T& value() const
{
ASSERT(!m_is_error);
VERIFY(!m_is_error);
return *reinterpret_cast<T*>(&m_storage);
}
[[nodiscard]] ALWAYS_INLINE T release_value()
{
ASSERT(!m_is_error);
ASSERT(m_have_storage);
VERIFY(!m_is_error);
VERIFY(m_have_storage);
T released_value(move(*reinterpret_cast<T*>(&m_storage)));
value().~T();
m_have_storage = false;

View file

@ -44,7 +44,7 @@ static u8 parse_hex_digit(char nibble)
{
if (nibble >= '0' && nibble <= '9')
return nibble - '0';
ASSERT(nibble >= 'a' && nibble <= 'f');
VERIFY(nibble >= 'a' && nibble <= 'f');
return 10 + (nibble - 'a');
}
@ -152,7 +152,7 @@ NEVER_INLINE static void dump_backtrace_impl(FlatPtr base_pointer, bool use_ksym
}
return;
}
ASSERT(recognized_symbol_count <= max_recognized_symbol_count);
VERIFY(recognized_symbol_count <= max_recognized_symbol_count);
for (size_t i = 0; i < recognized_symbol_count; ++i) {
auto& symbol = recognized_symbols[i];
if (!symbol.address)

View file

@ -45,8 +45,8 @@ void Lock::lock(Mode mode)
{
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
// and also from within critical sections!
ASSERT(!Processor::current().in_irq());
ASSERT(mode != Mode::Unlocked);
VERIFY(!Processor::current().in_irq());
VERIFY(mode != Mode::Unlocked);
auto current_thread = Thread::current();
ScopedCritical critical; // in case we're not in a critical section already
for (;;) {
@ -62,15 +62,15 @@ void Lock::lock(Mode mode)
case Mode::Unlocked: {
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ ({}) {}: acquire {}, currently unlocked", this, m_name, mode_to_string(mode));
m_mode = mode;
ASSERT(!m_holder);
ASSERT(m_shared_holders.is_empty());
VERIFY(!m_holder);
VERIFY(m_shared_holders.is_empty());
if (mode == Mode::Exclusive) {
m_holder = current_thread;
} else {
ASSERT(mode == Mode::Shared);
VERIFY(mode == Mode::Shared);
m_shared_holders.set(current_thread, 1);
}
ASSERT(m_times_locked == 0);
VERIFY(m_times_locked == 0);
m_times_locked++;
#if LOCK_DEBUG
current_thread->holding_lock(*this, 1, file, line);
@ -80,10 +80,10 @@ void Lock::lock(Mode mode)
return;
}
case Mode::Exclusive: {
ASSERT(m_holder);
VERIFY(m_holder);
if (m_holder != current_thread)
break;
ASSERT(m_shared_holders.is_empty());
VERIFY(m_shared_holders.is_empty());
if constexpr (LOCK_TRACE_DEBUG) {
if (mode == Mode::Exclusive)
@ -92,8 +92,8 @@ void Lock::lock(Mode mode)
dbgln("Lock::lock @ {} ({}): acquire exclusive (requested {}), currently exclusive, holding: {}", this, m_name, mode_to_string(mode), m_times_locked);
}
ASSERT(mode == Mode::Exclusive || mode == Mode::Shared);
ASSERT(m_times_locked > 0);
VERIFY(mode == Mode::Exclusive || mode == Mode::Shared);
VERIFY(m_times_locked > 0);
m_times_locked++;
#if LOCK_DEBUG
current_thread->holding_lock(*this, 1, file, line);
@ -102,15 +102,15 @@ void Lock::lock(Mode mode)
return;
}
case Mode::Shared: {
ASSERT(!m_holder);
VERIFY(!m_holder);
if (mode != Mode::Shared)
break;
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}): acquire {}, currently shared, locks held {}", this, m_name, mode_to_string(mode), m_times_locked);
ASSERT(m_times_locked > 0);
VERIFY(m_times_locked > 0);
m_times_locked++;
ASSERT(!m_shared_holders.is_empty());
VERIFY(!m_shared_holders.is_empty());
auto it = m_shared_holders.find(current_thread);
if (it != m_shared_holders.end())
it->value++;
@ -123,7 +123,7 @@ void Lock::lock(Mode mode)
return;
}
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
m_lock.store(false, AK::memory_order_release);
dbgln_if(LOCK_TRACE_DEBUG, "Lock::lock @ {} ({}) waiting...", this, m_name);
@ -136,7 +136,7 @@ void Lock::unlock()
{
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
// and also from within critical sections!
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
auto current_thread = Thread::current();
ScopedCritical critical; // in case we're not in a critical section already
for (;;) {
@ -149,37 +149,37 @@ void Lock::unlock()
dbgln("Lock::unlock @ {} ({}): release {}, holding: {}", this, m_name, mode_to_string(current_mode), m_times_locked);
}
ASSERT(current_mode != Mode::Unlocked);
VERIFY(current_mode != Mode::Unlocked);
ASSERT(m_times_locked > 0);
VERIFY(m_times_locked > 0);
m_times_locked--;
switch (current_mode) {
case Mode::Exclusive:
ASSERT(m_holder == current_thread);
ASSERT(m_shared_holders.is_empty());
VERIFY(m_holder == current_thread);
VERIFY(m_shared_holders.is_empty());
if (m_times_locked == 0)
m_holder = nullptr;
break;
case Mode::Shared: {
ASSERT(!m_holder);
VERIFY(!m_holder);
auto it = m_shared_holders.find(current_thread);
ASSERT(it != m_shared_holders.end());
VERIFY(it != m_shared_holders.end());
if (it->value > 1) {
it->value--;
} else {
ASSERT(it->value > 0);
VERIFY(it->value > 0);
m_shared_holders.remove(it);
}
break;
}
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
bool unlocked_last = (m_times_locked == 0);
if (unlocked_last) {
ASSERT(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
VERIFY(current_mode == Mode::Exclusive ? !m_holder : m_shared_holders.is_empty());
m_mode = Mode::Unlocked;
m_queue.should_block(false);
}
@ -204,7 +204,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
{
// NOTE: This may be called from an interrupt handler (not an IRQ handler)
// and also from within critical sections!
ASSERT(!Processor::current().in_irq());
VERIFY(!Processor::current().in_irq());
auto current_thread = Thread::current();
ScopedCritical critical; // in case we're not in a critical section already
for (;;) {
@ -224,7 +224,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
m_holder->holding_lock(*this, -(int)lock_count_to_restore);
#endif
m_holder = nullptr;
ASSERT(m_times_locked > 0);
VERIFY(m_times_locked > 0);
lock_count_to_restore = m_times_locked;
m_times_locked = 0;
m_mode = Mode::Unlocked;
@ -234,7 +234,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
break;
}
case Mode::Shared: {
ASSERT(!m_holder);
VERIFY(!m_holder);
auto it = m_shared_holders.find(current_thread);
if (it == m_shared_holders.end()) {
m_lock.store(false, AK::MemoryOrder::memory_order_release);
@ -245,14 +245,14 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::force_unlock_if_locked @ {}: unlocking exclusive with lock count: {}, total locks: {}",
this, it->value, m_times_locked);
ASSERT(it->value > 0);
VERIFY(it->value > 0);
lock_count_to_restore = it->value;
ASSERT(lock_count_to_restore > 0);
VERIFY(lock_count_to_restore > 0);
#if LOCK_DEBUG
m_holder->holding_lock(*this, -(int)lock_count_to_restore);
#endif
m_shared_holders.remove(it);
ASSERT(m_times_locked >= lock_count_to_restore);
VERIFY(m_times_locked >= lock_count_to_restore);
m_times_locked -= lock_count_to_restore;
if (m_times_locked == 0) {
m_mode = Mode::Unlocked;
@ -269,7 +269,7 @@ auto Lock::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
break;
}
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
m_queue.wake_one();
return previous_mode;
@ -290,9 +290,9 @@ void Lock::restore_lock(const char* file, int line, Mode mode, u32 lock_count)
void Lock::restore_lock(Mode mode, u32 lock_count)
#endif
{
ASSERT(mode != Mode::Unlocked);
ASSERT(lock_count > 0);
ASSERT(!Processor::current().in_irq());
VERIFY(mode != Mode::Unlocked);
VERIFY(lock_count > 0);
VERIFY(!Processor::current().in_irq());
auto current_thread = Thread::current();
ScopedCritical critical; // in case we're not in a critical section already
for (;;) {
@ -305,10 +305,10 @@ void Lock::restore_lock(Mode mode, u32 lock_count)
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was unlocked", this, mode_to_string(mode), lock_count);
ASSERT(m_times_locked == 0);
VERIFY(m_times_locked == 0);
m_times_locked = lock_count;
ASSERT(!m_holder);
ASSERT(m_shared_holders.is_empty());
VERIFY(!m_holder);
VERIFY(m_shared_holders.is_empty());
m_holder = current_thread;
m_queue.should_block(true);
m_lock.store(false, AK::memory_order_release);
@ -325,13 +325,13 @@ void Lock::restore_lock(Mode mode, u32 lock_count)
dbgln_if(LOCK_RESTORE_DEBUG, "Lock::restore_lock @ {}: restoring {} with lock count {}, was {}",
this, mode_to_string(mode), lock_count, mode_to_string(expected_mode));
ASSERT(expected_mode == Mode::Shared || m_times_locked == 0);
VERIFY(expected_mode == Mode::Shared || m_times_locked == 0);
m_times_locked += lock_count;
ASSERT(!m_holder);
ASSERT((expected_mode == Mode::Unlocked) == m_shared_holders.is_empty());
VERIFY(!m_holder);
VERIFY((expected_mode == Mode::Unlocked) == m_shared_holders.is_empty());
auto set_result = m_shared_holders.set(current_thread, lock_count);
// There may be other shared lock holders already, but we should not have an entry yet
ASSERT(set_result == AK::HashSetResult::InsertedNewEntry);
VERIFY(set_result == AK::HashSetResult::InsertedNewEntry);
m_queue.should_block(true);
m_lock.store(false, AK::memory_order_release);
#if LOCK_DEBUG
@ -340,7 +340,7 @@ void Lock::restore_lock(Mode mode, u32 lock_count)
return;
}
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
m_lock.store(false, AK::memory_order_relaxed);
@ -352,7 +352,7 @@ void Lock::restore_lock(Mode mode, u32 lock_count)
void Lock::clear_waiters()
{
ASSERT(m_mode != Mode::Shared);
VERIFY(m_mode != Mode::Shared);
m_queue.wake_all();
}

View file

@ -117,13 +117,13 @@ public:
}
ALWAYS_INLINE void unlock()
{
ASSERT(m_locked);
VERIFY(m_locked);
m_locked = false;
m_lock.unlock();
}
ALWAYS_INLINE void lock(Lock::Mode mode = Lock::Mode::Exclusive)
{
ASSERT(!m_locked);
VERIFY(!m_locked);
m_locked = true;
m_lock.lock(mode);
}

View file

@ -308,7 +308,7 @@ UNMAP_AFTER_INIT void E1000NetworkAdapter::read_mac_address()
mac[5] = tmp >> 8;
set_mac_address(mac);
} else {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}
@ -323,7 +323,7 @@ UNMAP_AFTER_INIT void E1000NetworkAdapter::initialize_rx_descriptors()
for (size_t i = 0; i < number_of_rx_descriptors; ++i) {
auto& descriptor = rx_descriptors[i];
auto region = MM.allocate_contiguous_kernel_region(8192, "E1000 RX buffer", Region::Access::Read | Region::Access::Write);
ASSERT(region);
VERIFY(region);
m_rx_buffers_regions.append(region.release_nonnull());
descriptor.addr = m_rx_buffers_regions[i].physical_page(0)->paddr().get();
descriptor.status = 0;
@ -344,7 +344,7 @@ UNMAP_AFTER_INIT void E1000NetworkAdapter::initialize_tx_descriptors()
for (size_t i = 0; i < number_of_tx_descriptors; ++i) {
auto& descriptor = tx_descriptors[i];
auto region = MM.allocate_contiguous_kernel_region(8192, "E1000 TX buffer", Region::Access::Read | Region::Access::Write);
ASSERT(region);
VERIFY(region);
m_tx_buffers_regions.append(region.release_nonnull());
descriptor.addr = m_tx_buffers_regions[i].physical_page(0)->paddr().get();
descriptor.cmd = 0;
@ -426,7 +426,7 @@ void E1000NetworkAdapter::send_raw(ReadonlyBytes payload)
#endif
auto* tx_descriptors = (e1000_tx_desc*)m_tx_descriptors_region->vaddr().as_ptr();
auto& descriptor = tx_descriptors[tx_current];
ASSERT(payload.size() <= 8192);
VERIFY(payload.size() <= 8192);
auto* vptr = (void*)m_tx_buffers_regions[tx_current].vaddr().as_ptr();
memcpy(vptr, payload.data(), payload.size());
descriptor.length = payload.size();
@ -464,7 +464,7 @@ void E1000NetworkAdapter::receive()
break;
auto* buffer = m_rx_buffers_regions[rx_current].vaddr().as_ptr();
u16 length = rx_descriptors[rx_current].length;
ASSERT(length <= 8192);
VERIFY(length <= 8192);
#if E1000_DEBUG
klog() << "E1000: Received 1 packet @ " << buffer << " (" << length << ") bytes!";
#endif

View file

@ -105,7 +105,7 @@ public:
NetworkOrdered<u16> compute_checksum() const
{
ASSERT(!m_checksum);
VERIFY(!m_checksum);
return internet_checksum(this, sizeof(IPv4Packet));
}

View file

@ -97,7 +97,7 @@ void IPv4Socket::get_peer_address(sockaddr* address, socklen_t* address_size)
KResult IPv4Socket::bind(Userspace<const sockaddr*> user_address, socklen_t address_size)
{
ASSERT(setup_state() == SetupState::Unstarted);
VERIFY(setup_state() == SetupState::Unstarted);
if (address_size != sizeof(sockaddr_in))
return EINVAL;
@ -260,7 +260,7 @@ KResultOr<size_t> IPv4Socket::receive_byte_buffered(FileDescription& description
}
}
ASSERT(!m_receive_buffer.is_empty());
VERIFY(!m_receive_buffer.is_empty());
int nreceived = m_receive_buffer.read(buffer, buffer_length);
if (nreceived > 0)
Thread::current()->did_ipv4_socket_read((size_t)nreceived);
@ -311,8 +311,8 @@ KResultOr<size_t> IPv4Socket::receive_packet_buffered(FileDescription& descripti
// Unblocked due to timeout.
return EAGAIN;
}
ASSERT(m_can_read);
ASSERT(!m_receive_queue.is_empty());
VERIFY(m_can_read);
VERIFY(!m_receive_queue.is_empty());
packet = m_receive_queue.take_first();
set_can_read(!m_receive_queue.is_empty());
@ -321,7 +321,7 @@ KResultOr<size_t> IPv4Socket::receive_packet_buffered(FileDescription& descripti
packet.data.value().size(),
m_receive_queue.size());
}
ASSERT(packet.data.has_value());
VERIFY(packet.data.has_value());
packet_timestamp = packet.timestamp;
@ -337,7 +337,7 @@ KResultOr<size_t> IPv4Socket::receive_packet_buffered(FileDescription& descripti
return EFAULT;
socklen_t out_length = sizeof(sockaddr_in);
ASSERT(addr_length);
VERIFY(addr_length);
if (!copy_to_user(addr_length, &out_length))
return EFAULT;
}
@ -390,7 +390,7 @@ bool IPv4Socket::did_receive(const IPv4Address& source_address, u16 source_port,
size_t space_in_receive_buffer = m_receive_buffer.space_for_writing();
if (packet_size > space_in_receive_buffer) {
dbgln("IPv4Socket({}): did_receive refusing packet since buffer is full.", this);
ASSERT(m_can_read);
VERIFY(m_can_read);
return false;
}
auto scratch_buffer = UserOrKernelBuffer::for_kernel_buffer(m_scratch_buffer.value().data());
@ -451,7 +451,7 @@ String IPv4Socket::absolute_path(const FileDescription&) const
builder.append(" (connecting)");
break;
default:
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
return builder.to_string();

View file

@ -97,7 +97,7 @@ void LocalSocket::get_peer_address(sockaddr* address, socklen_t* address_size)
KResult LocalSocket::bind(Userspace<const sockaddr*> user_address, socklen_t address_size)
{
ASSERT(setup_state() == SetupState::Unstarted);
VERIFY(setup_state() == SetupState::Unstarted);
if (address_size != sizeof(sockaddr_un))
return EINVAL;
@ -123,7 +123,7 @@ KResult LocalSocket::bind(Userspace<const sockaddr*> user_address, socklen_t add
auto file = move(result.value());
ASSERT(file->inode());
VERIFY(file->inode());
if (!file->inode()->bind_socket(*this))
return EADDRINUSE;
@ -136,7 +136,7 @@ KResult LocalSocket::bind(Userspace<const sockaddr*> user_address, socklen_t add
KResult LocalSocket::connect(FileDescription& description, Userspace<const sockaddr*> address, socklen_t address_size, ShouldBlock)
{
ASSERT(!m_bound);
VERIFY(!m_bound);
if (address_size != sizeof(sockaddr_un))
return EINVAL;
u16 sa_family_copy;
@ -162,14 +162,14 @@ KResult LocalSocket::connect(FileDescription& description, Userspace<const socka
m_file = move(description_or_error.value());
ASSERT(m_file->inode());
VERIFY(m_file->inode());
if (!m_file->inode()->socket())
return ECONNREFUSED;
m_address.sun_family = sa_family_copy;
memcpy(m_address.sun_path, safe_address, sizeof(m_address.sun_path));
ASSERT(m_connect_side_fd == &description);
VERIFY(m_connect_side_fd == &description);
set_connect_side_role(Role::Connecting);
auto peer = m_file->inode()->socket();
@ -217,12 +217,12 @@ KResult LocalSocket::listen(size_t backlog)
KResult LocalSocket::attach(FileDescription& description)
{
ASSERT(!m_accept_side_fd_open);
VERIFY(!m_accept_side_fd_open);
if (m_connect_side_role == Role::None) {
ASSERT(m_connect_side_fd == nullptr);
VERIFY(m_connect_side_fd == nullptr);
m_connect_side_fd = &description;
} else {
ASSERT(m_connect_side_fd != &description);
VERIFY(m_connect_side_fd != &description);
m_accept_side_fd_open = true;
}
@ -235,7 +235,7 @@ void LocalSocket::detach(FileDescription& description)
if (m_connect_side_fd == &description) {
m_connect_side_fd = nullptr;
} else {
ASSERT(m_accept_side_fd_open);
VERIFY(m_accept_side_fd_open);
m_accept_side_fd_open = false;
}
@ -261,7 +261,7 @@ bool LocalSocket::has_attached_peer(const FileDescription& description) const
return m_connect_side_fd != nullptr;
if (role == Role::Connected)
return m_accept_side_fd_open;
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
bool LocalSocket::can_write(const FileDescription& description, size_t) const
@ -325,7 +325,7 @@ KResultOr<size_t> LocalSocket::recvfrom(FileDescription& description, UserOrKern
}
if (!has_attached_peer(description) && socket_buffer->is_empty())
return 0;
ASSERT(!socket_buffer->is_empty());
VERIFY(!socket_buffer->is_empty());
auto nread = socket_buffer->read(buffer, buffer_size);
if (nread > 0)
Thread::current()->did_unix_socket_read(nread);
@ -438,7 +438,7 @@ NonnullRefPtrVector<FileDescription>& LocalSocket::recvfd_queue_for(const FileDe
return m_fds_for_client;
if (role == Role::Accepted)
return m_fds_for_server;
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
NonnullRefPtrVector<FileDescription>& LocalSocket::sendfd_queue_for(const FileDescription& description)
@ -448,7 +448,7 @@ NonnullRefPtrVector<FileDescription>& LocalSocket::sendfd_queue_for(const FileDe
return m_fds_for_server;
if (role == Role::Accepted)
return m_fds_for_client;
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
KResult LocalSocket::sendfd(const FileDescription& socket_description, FileDescription& passing_description)

View file

@ -208,7 +208,7 @@ size_t NetworkAdapter::dequeue_packet(u8* buffer, size_t buffer_size, timeval& p
packet_timestamp = packet_with_timestamp.timestamp;
auto packet = move(packet_with_timestamp.packet);
size_t packet_size = packet.size();
ASSERT(packet_size <= buffer_size);
VERIFY(packet_size <= buffer_size);
memcpy(buffer, packet.data(), packet_size);
if (m_unused_packet_buffers_count < 100) {
m_unused_packet_buffers.append(packet);

View file

@ -314,8 +314,8 @@ void handle_udp(const IPv4Packet& ipv4_packet, const timeval& packet_timestamp)
return;
}
ASSERT(socket->type() == SOCK_DGRAM);
ASSERT(socket->local_port() == udp_packet.destination_port());
VERIFY(socket->type() == SOCK_DGRAM);
VERIFY(socket->local_port() == udp_packet.destination_port());
socket->did_receive(ipv4_packet.source(), udp_packet.source_port(), KBuffer::copy(&ipv4_packet, sizeof(IPv4Packet) + ipv4_packet.payload_size()), packet_timestamp);
}
@ -391,8 +391,8 @@ void handle_tcp(const IPv4Packet& ipv4_packet, const timeval& packet_timestamp)
LOCKER(socket->lock());
ASSERT(socket->type() == SOCK_STREAM);
ASSERT(socket->local_port() == tcp_packet.destination_port());
VERIFY(socket->type() == SOCK_STREAM);
VERIFY(socket->local_port() == tcp_packet.destination_port());
#if TCP_DEBUG
klog() << "handle_tcp: got socket; state=" << socket->tuple().to_string().characters() << " " << TCPSocket::to_string(socket->state());

View file

@ -77,7 +77,7 @@ public:
void unblock(const IPv4Address& ip_addr, const MACAddress& addr)
{
BlockCondition::unblock([&](auto& b, void*, bool&) {
ASSERT(b.blocker_type() == Thread::Blocker::Type::Routing);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Routing);
auto& blocker = static_cast<ARPTableBlocker&>(b);
return blocker.unblock(false, ip_addr, addr);
});
@ -86,7 +86,7 @@ public:
protected:
virtual bool should_add_blocker(Thread::Blocker& b, void*) override
{
ASSERT(b.blocker_type() == Thread::Blocker::Type::Routing);
VERIFY(b.blocker_type() == Thread::Blocker::Type::Routing);
auto& blocker = static_cast<ARPTableBlocker&>(b);
auto val = s_arp_table->resource().get(blocker.ip_addr());
if (!val.has_value())
@ -107,7 +107,7 @@ ARPTableBlocker::ARPTableBlocker(IPv4Address ip_addr, Optional<MACAddress>& addr
void ARPTableBlocker::not_blocking(bool timeout_in_past)
{
ASSERT(timeout_in_past || !m_should_block);
VERIFY(timeout_in_past || !m_should_block);
auto addr = s_arp_table->resource().get(ip_addr());
ScopedSpinLock lock(m_lock);

View file

@ -76,7 +76,7 @@ RefPtr<Socket> Socket::accept()
return nullptr;
dbgln_if(SOCKET_DEBUG, "Socket({}) de-queueing connection", this);
auto client = m_pending.take_first();
ASSERT(!client->is_connected());
VERIFY(!client->is_connected());
auto& process = *Process::current();
client->m_acceptor = { process.pid().value(), process.uid(), process.gid() };
client->m_connected = true;
@ -101,7 +101,7 @@ KResult Socket::setsockopt(int level, int option, Userspace<const void*> user_va
{
if (level != SOL_SOCKET)
return ENOPROTOOPT;
ASSERT(level == SOL_SOCKET);
VERIFY(level == SOL_SOCKET);
switch (option) {
case SO_SNDTIMEO:
if (user_value_size != sizeof(timeval))

View file

@ -132,13 +132,13 @@ RefPtr<TCPSocket> TCPSocket::create_client(const IPv4Address& new_local_address,
void TCPSocket::release_to_originator()
{
ASSERT(!!m_originator);
VERIFY(!!m_originator);
m_originator.strong_ref()->release_for_accept(this);
}
void TCPSocket::release_for_accept(RefPtr<TCPSocket> socket)
{
ASSERT(m_pending_release_for_accept.contains(socket->tuple()));
VERIFY(m_pending_release_for_accept.contains(socket->tuple()));
m_pending_release_for_accept.remove(socket->tuple());
// FIXME: Should we observe this error somehow?
[[maybe_unused]] auto rc = queue_connection_from(*socket);
@ -170,7 +170,7 @@ KResultOr<size_t> TCPSocket::protocol_receive(ReadonlyBytes raw_ipv4_packet, Use
#if TCP_SOCKET_DEBUG
klog() << "payload_size " << payload_size << ", will it fit in " << buffer_size << "?";
#endif
ASSERT(buffer_size >= payload_size);
VERIFY(buffer_size >= payload_size);
if (!buffer.write(tcp_packet.payload(), payload_size))
return EFAULT;
return payload_size;
@ -189,7 +189,7 @@ KResult TCPSocket::send_tcp_packet(u16 flags, const UserOrKernelBuffer* payload,
const size_t buffer_size = sizeof(TCPPacket) + payload_size;
auto buffer = ByteBuffer::create_zeroed(buffer_size);
auto& tcp_packet = *(TCPPacket*)(buffer.data());
ASSERT(local_port());
VERIFY(local_port());
tcp_packet.set_source_port(local_port());
tcp_packet.set_destination_port(peer_port());
tcp_packet.set_window_size(1024);
@ -219,7 +219,7 @@ KResult TCPSocket::send_tcp_packet(u16 flags, const UserOrKernelBuffer* payload,
}
auto routing_decision = route_to(peer_address(), local_address(), bound_interface());
ASSERT(!routing_decision.is_zero());
VERIFY(!routing_decision.is_zero());
auto packet_buffer = UserOrKernelBuffer::for_kernel_buffer(buffer.data());
auto result = routing_decision.adapter->send_ipv4(
@ -236,7 +236,7 @@ KResult TCPSocket::send_tcp_packet(u16 flags, const UserOrKernelBuffer* payload,
void TCPSocket::send_outgoing_packets()
{
auto routing_decision = route_to(peer_address(), local_address(), bound_interface());
ASSERT(!routing_decision.is_zero());
VERIFY(!routing_decision.is_zero());
auto now = kgettimeofday();
@ -321,7 +321,7 @@ NetworkOrdered<u16> TCPSocket::compute_tcp_checksum(const IPv4Address& source, c
if (checksum > 0xffff)
checksum = (checksum >> 16) + (checksum & 0xffff);
}
ASSERT(packet.data_offset() * 4 == sizeof(TCPPacket));
VERIFY(packet.data_offset() * 4 == sizeof(TCPPacket));
w = (const NetworkOrdered<u16>*)packet.payload();
for (size_t i = 0; i < payload_size / sizeof(u16); ++i) {
checksum += w[i];
@ -391,7 +391,7 @@ KResult TCPSocket::protocol_connect(FileDescription& description, ShouldBlock sh
if (Thread::current()->block<Thread::ConnectBlocker>({}, description, unblock_flags).was_interrupted())
return EINTR;
locker.lock();
ASSERT(setup_state() == SetupState::Completed);
VERIFY(setup_state() == SetupState::Completed);
if (has_error()) { // TODO: check unblock_flags
m_role = Role::None;
return ECONNREFUSED;

View file

@ -58,7 +58,7 @@ SocketHandle<UDPSocket> UDPSocket::from_port(u16 port)
if (it == sockets_by_port().resource().end())
return {};
socket = (*it).value;
ASSERT(socket);
VERIFY(socket);
}
return { *socket };
}
@ -83,8 +83,8 @@ KResultOr<size_t> UDPSocket::protocol_receive(ReadonlyBytes raw_ipv4_packet, Use
{
auto& ipv4_packet = *(const IPv4Packet*)(raw_ipv4_packet.data());
auto& udp_packet = *static_cast<const UDPPacket*>(ipv4_packet.payload());
ASSERT(udp_packet.length() >= sizeof(UDPPacket)); // FIXME: This should be rejected earlier.
ASSERT(buffer_size >= (udp_packet.length() - sizeof(UDPPacket)));
VERIFY(udp_packet.length() >= sizeof(UDPPacket)); // FIXME: This should be rejected earlier.
VERIFY(buffer_size >= (udp_packet.length() - sizeof(UDPPacket)));
if (!buffer.write(udp_packet.payload(), udp_packet.length() - sizeof(UDPPacket)))
return EFAULT;
return udp_packet.length() - sizeof(UDPPacket);

View file

@ -44,7 +44,7 @@ inline u32 read32(Address address, u32 field) { return Access::the().read32_fiel
Access& Access::the()
{
if (s_access == nullptr) {
ASSERT_NOT_REACHED(); // We failed to initialize the PCI subsystem, so stop here!
VERIFY_NOT_REACHED(); // We failed to initialize the PCI subsystem, so stop here!
}
return *s_access;
}
@ -69,7 +69,7 @@ PhysicalID Access::get_physical_id(Address address) const
return physical_id;
}
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
u8 Access::early_read8_field(Address address, u32 field)
@ -110,7 +110,7 @@ void Access::enumerate_functions(int type, u8 bus, u8 device, u8 function, Funct
#if PCI_DEBUG
klog() << "PCI: Found secondary bus: " << secondary_bus;
#endif
ASSERT(secondary_bus != bus);
VERIFY(secondary_bus != bus);
enumerate_bus(type, secondary_bus, callback, recursive);
}
}
@ -188,7 +188,7 @@ Vector<Capability> get_capabilities(Address address)
void raw_access(Address address, u32 field, size_t access_size, u32 value)
{
ASSERT(access_size != 0);
VERIFY(access_size != 0);
if (access_size == 1) {
write8(address, field, value);
return;
@ -201,7 +201,7 @@ void raw_access(Address address, u32 field, size_t access_size, u32 value)
write32(address, field, value);
return;
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
ID get_id(Address address)
@ -303,7 +303,7 @@ void disable_bus_mastering(Address address)
size_t get_BAR_space_size(Address address, u8 bar_number)
{
// See PCI Spec 2.3, Page 222
ASSERT(bar_number < 6);
VERIFY(bar_number < 6);
u8 field = (PCI_BAR0 + (bar_number << 2));
u32 bar_reserved = read32(address, field);
write32(address, field, 0xFFFFFFFF);

View file

@ -68,14 +68,14 @@ uint32_t MMIOAccess::segment_count() const
uint8_t MMIOAccess::segment_start_bus(u32 seg) const
{
auto segment = m_segments.get(seg);
ASSERT(segment.has_value());
VERIFY(segment.has_value());
return segment.value().get_start_bus();
}
uint8_t MMIOAccess::segment_end_bus(u32 seg) const
{
auto segment = m_segments.get(seg);
ASSERT(segment.has_value());
VERIFY(segment.has_value());
return segment.value().get_end_bus();
}
@ -153,7 +153,7 @@ UNMAP_AFTER_INIT Optional<VirtualAddress> MMIOAccess::get_device_configuration_s
u8 MMIOAccess::read8_field(Address address, u32 field)
{
InterruptDisabler disabler;
ASSERT(field <= 0xfff);
VERIFY(field <= 0xfff);
dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 8-bit field {:#08x} for {}", field, address);
return *((u8*)(get_device_configuration_space(address).value().get() + (field & 0xfff)));
}
@ -161,7 +161,7 @@ u8 MMIOAccess::read8_field(Address address, u32 field)
u16 MMIOAccess::read16_field(Address address, u32 field)
{
InterruptDisabler disabler;
ASSERT(field < 0xfff);
VERIFY(field < 0xfff);
dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 16-bit field {:#08x} for {}", field, address);
return *((u16*)(get_device_configuration_space(address).value().get() + (field & 0xfff)));
}
@ -169,7 +169,7 @@ u16 MMIOAccess::read16_field(Address address, u32 field)
u32 MMIOAccess::read32_field(Address address, u32 field)
{
InterruptDisabler disabler;
ASSERT(field <= 0xffc);
VERIFY(field <= 0xffc);
dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 32-bit field {:#08x} for {}", field, address);
return *((u32*)(get_device_configuration_space(address).value().get() + (field & 0xfff)));
}
@ -177,21 +177,21 @@ u32 MMIOAccess::read32_field(Address address, u32 field)
void MMIOAccess::write8_field(Address address, u32 field, u8 value)
{
InterruptDisabler disabler;
ASSERT(field <= 0xfff);
VERIFY(field <= 0xfff);
dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 8-bit field {:#08x}, value={:#02x} for {}", field, value, address);
*((u8*)(get_device_configuration_space(address).value().get() + (field & 0xfff))) = value;
}
void MMIOAccess::write16_field(Address address, u32 field, u16 value)
{
InterruptDisabler disabler;
ASSERT(field < 0xfff);
VERIFY(field < 0xfff);
dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 16-bit field {:#08x}, value={:#02x} for {}", field, value, address);
*((u16*)(get_device_configuration_space(address).value().get() + (field & 0xfff))) = value;
}
void MMIOAccess::write32_field(Address address, u32 field, u32 value)
{
InterruptDisabler disabler;
ASSERT(field <= 0xffc);
VERIFY(field <= 0xffc);
dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 32-bit field {:#08x}, value={:#02x} for {}", field, value, address);
*((u32*)(get_device_configuration_space(address).value().get() + (field & 0xfff))) = value;
}

View file

@ -104,7 +104,7 @@ KResult PerformanceEventBuffer::append_with_eip_and_ebp(u32 eip, u32 ebp, int ty
PerformanceEvent& PerformanceEventBuffer::at(size_t index)
{
ASSERT(index < capacity());
VERIFY(index < capacity());
auto* events = reinterpret_cast<PerformanceEvent*>(m_buffer->data());
return events[index];
}
@ -120,7 +120,7 @@ OwnPtr<KBuffer> PerformanceEventBuffer::to_json(ProcessID pid, const String& exe
bool PerformanceEventBuffer::to_json(KBufferBuilder& builder, ProcessID pid, const String& executable_path) const
{
auto process = Process::from_pid(pid);
ASSERT(process);
VERIFY(process);
ScopedSpinLock locker(process->space().get_lock());
JsonObjectSerializer object(builder);

View file

@ -238,7 +238,7 @@ Process::Process(RefPtr<Thread>& first_thread, const String& name, uid_t uid, gi
} else {
// NOTE: This non-forked code path is only taken when the kernel creates a process "manually" (at boot.)
auto thread_or_error = Thread::try_create(*this);
ASSERT(!thread_or_error.is_error());
VERIFY(!thread_or_error.is_error());
first_thread = thread_or_error.release_value();
first_thread->detach();
}
@ -246,8 +246,8 @@ Process::Process(RefPtr<Thread>& first_thread, const String& name, uid_t uid, gi
Process::~Process()
{
ASSERT(thread_count() == 0); // all threads should have been finalized
ASSERT(!m_alarm_timer);
VERIFY(thread_count() == 0); // all threads should have been finalized
VERIFY(!m_alarm_timer);
{
ScopedSpinLock processses_lock(g_processes_lock);
@ -304,9 +304,9 @@ void create_signal_trampoline()
void Process::crash(int signal, u32 eip, bool out_of_memory)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(!is_dead());
ASSERT(Process::current() == this);
VERIFY_INTERRUPTS_DISABLED();
VERIFY(!is_dead());
VERIFY(Process::current() == this);
if (out_of_memory) {
dbgln("\033[31;1mOut of memory\033[m, killing: {}", *this);
@ -322,12 +322,12 @@ void Process::crash(int signal, u32 eip, bool out_of_memory)
m_termination_signal = signal;
set_dump_core(!out_of_memory);
space().dump_regions();
ASSERT(is_user_process());
VERIFY(is_user_process());
die();
// We can not return from here, as there is nowhere
// to unwind to, so die right away.
Thread::current()->die_if_needed();
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
RefPtr<Process> Process::from_pid(ProcessID pid)
@ -431,8 +431,8 @@ KResultOr<String> Process::get_syscall_path_argument(const Syscall::StringArgume
bool Process::dump_core()
{
ASSERT(is_dumpable());
ASSERT(should_core_dump());
VERIFY(is_dumpable());
VERIFY(should_core_dump());
dbgln("Generating coredump for pid: {}", m_pid.value());
auto coredump_path = String::formatted("/tmp/coredump/{}_{}_{}", name(), m_pid.value(), RTC::now());
auto coredump = CoreDump::create(*this, coredump_path);
@ -443,8 +443,8 @@ bool Process::dump_core()
bool Process::dump_perfcore()
{
ASSERT(is_dumpable());
ASSERT(m_perf_event_buffer);
VERIFY(is_dumpable());
VERIFY(m_perf_event_buffer);
dbgln("Generating perfcore for pid: {}", m_pid.value());
auto description_or_error = VFS::the().open(String::formatted("perfcore.{}", m_pid.value()), O_CREAT | O_EXCL, 0400, current_directory(), UidAndGid { m_uid, m_gid });
if (description_or_error.is_error())
@ -460,7 +460,7 @@ bool Process::dump_perfcore()
void Process::finalize()
{
ASSERT(Thread::current() == g_finalizer);
VERIFY(Thread::current() == g_finalizer);
dbgln_if(PROCESS_DEBUG, "Finalizing process {}", *this);
@ -508,7 +508,7 @@ void Process::finalize()
m_space->remove_all_regions({});
ASSERT(ref_count() > 0);
VERIFY(ref_count() > 0);
// WaitBlockCondition::finalize will be in charge of dropping the last
// reference if there are still waiters around, or whenever the last
// waitable states are consumed. Unless there is no parent around
@ -545,9 +545,9 @@ void Process::die()
void Process::terminate_due_to_signal(u8 signal)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(signal < 32);
ASSERT(Process::current() == this);
VERIFY_INTERRUPTS_DISABLED();
VERIFY(signal < 32);
VERIFY(Process::current() == this);
dbgln("Terminating {} due to signal {}", *this, signal);
m_termination_status = 0;
m_termination_signal = signal;
@ -576,7 +576,7 @@ KResult Process::send_signal(u8 signal, Process* sender)
RefPtr<Thread> Process::create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, const String& name, u32 affinity, bool joinable)
{
ASSERT((priority >= THREAD_PRIORITY_MIN) && (priority <= THREAD_PRIORITY_MAX));
VERIFY((priority >= THREAD_PRIORITY_MIN) && (priority <= THREAD_PRIORITY_MAX));
// FIXME: Do something with guard pages?
@ -648,7 +648,7 @@ void Process::stop_tracing()
void Process::tracer_trap(Thread& thread, const RegisterState& regs)
{
ASSERT(m_tracer.ptr());
VERIFY(m_tracer.ptr());
m_tracer->set_regs(regs);
thread.send_urgent_signal_to_self(SIGTRAP);
}
@ -663,7 +663,7 @@ PerformanceEventBuffer& Process::ensure_perf_events()
bool Process::remove_thread(Thread& thread)
{
auto thread_cnt_before = m_thread_count.fetch_sub(1, AK::MemoryOrder::memory_order_acq_rel);
ASSERT(thread_cnt_before != 0);
VERIFY(thread_cnt_before != 0);
ScopedSpinLock thread_list_lock(m_thread_list_lock);
m_thread_list.remove(thread);
return thread_cnt_before == 1;

View file

@ -614,7 +614,7 @@ extern RecursiveSpinLock g_processes_lock;
template<typename Callback>
inline void Process::for_each(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
ScopedSpinLock lock(g_processes_lock);
for (auto* process = g_processes->head(); process;) {
auto* next_process = process->next();
@ -627,7 +627,7 @@ inline void Process::for_each(Callback callback)
template<typename Callback>
inline void Process::for_each_child(Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
ProcessID my_pid = pid();
ScopedSpinLock lock(g_processes_lock);
for (auto* process = g_processes->head(); process;) {
@ -655,7 +655,7 @@ inline IterationDecision Process::for_each_thread(Callback callback) const
template<typename Callback>
inline void Process::for_each_in_pgrp(ProcessGroupID pgid, Callback callback)
{
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
ScopedSpinLock lock(g_processes_lock);
for (auto* process = g_processes->head(); process;) {
auto* next_process = process->next();
@ -698,7 +698,7 @@ inline const LogStream& operator<<(const LogStream& stream, const Process& proce
dbgln("Has made a promise"); \
cli(); \
Process::current()->crash(SIGABRT, 0); \
ASSERT_NOT_REACHED(); \
VERIFY_NOT_REACHED(); \
} \
} while (0)
@ -711,7 +711,7 @@ inline const LogStream& operator<<(const LogStream& stream, const Process& proce
Process::current()->coredump_metadata().set( \
"pledge_violation", #promise); \
Process::current()->crash(SIGABRT, 0); \
ASSERT_NOT_REACHED(); \
VERIFY_NOT_REACHED(); \
} \
} while (0)

View file

@ -97,7 +97,7 @@ void KernelRng::wait_for_entropy()
void KernelRng::wake_if_ready()
{
ASSERT(get_lock().is_locked());
VERIFY(get_lock().is_locked());
if (resource().is_ready()) {
m_seed_queue.wake_all();
}
@ -167,7 +167,7 @@ bool get_good_random_bytes(u8* buffer, size_t buffer_size, bool allow_wait, bool
// NOTE: The only case where this function should ever return false and
// not actually return random data is if fallback_to_fast == false and
// allow_wait == false and interrupts are enabled!
ASSERT(result || !fallback_to_fast);
VERIFY(result || !fallback_to_fast);
return result;
}
@ -176,7 +176,7 @@ void get_fast_random_bytes(u8* buffer, size_t buffer_size)
// Try to get good randomness, but don't block if we can't right now
// and allow falling back to fast randomness
auto result = get_good_random_bytes(buffer, buffer_size, false, true);
ASSERT(result);
VERIFY(result);
}
}

View file

@ -64,10 +64,10 @@ public:
this->reseed();
}
ASSERT(is_seeded());
VERIFY(is_seeded());
// FIXME: More than 2^20 bytes cannot be generated without refreshing the key.
ASSERT(n < (1 << 20));
VERIFY(n < (1 << 20));
typename CipherType::CTRMode cipher(m_key, KeySize, Crypto::Cipher::Intent::Encryption);
@ -98,7 +98,7 @@ public:
[[nodiscard]] bool is_ready() const
{
ASSERT(m_lock.is_locked());
VERIFY(m_lock.is_locked());
return is_seeded() || m_p0_len >= reseed_threshold;
}

View file

@ -81,11 +81,11 @@ static inline u32 thread_priority_to_priority_index(u32 thread_priority)
{
// Converts the priority in the range of THREAD_PRIORITY_MIN...THREAD_PRIORITY_MAX
// to a index into g_ready_queues where 0 is the highest priority bucket
ASSERT(thread_priority >= THREAD_PRIORITY_MIN && thread_priority <= THREAD_PRIORITY_MAX);
VERIFY(thread_priority >= THREAD_PRIORITY_MIN && thread_priority <= THREAD_PRIORITY_MAX);
constexpr u32 thread_priority_count = THREAD_PRIORITY_MAX - THREAD_PRIORITY_MIN + 1;
static_assert(thread_priority_count > 0);
auto priority_bucket = ((thread_priority_count - (thread_priority - THREAD_PRIORITY_MIN)) / thread_priority_count) * (g_ready_queue_buckets - 1);
ASSERT(priority_bucket < g_ready_queue_buckets);
VERIFY(priority_bucket < g_ready_queue_buckets);
return priority_bucket;
}
@ -97,10 +97,10 @@ Thread& Scheduler::pull_next_runnable_thread()
auto priority_mask = g_ready_queues_mask;
while (priority_mask != 0) {
auto priority = __builtin_ffsl(priority_mask);
ASSERT(priority > 0);
VERIFY(priority > 0);
auto& ready_queue = g_ready_queues[--priority];
for (auto& thread : ready_queue.thread_list) {
ASSERT(thread.m_runnable_priority == (int)priority);
VERIFY(thread.m_runnable_priority == (int)priority);
if (thread.is_active())
continue;
if (!(thread.affinity() & affinity_mask))
@ -134,14 +134,14 @@ bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
ScopedSpinLock lock(g_ready_queues_lock);
auto priority = thread.m_runnable_priority;
if (priority < 0) {
ASSERT(!thread.m_ready_queue_node.is_in_list());
VERIFY(!thread.m_ready_queue_node.is_in_list());
return false;
}
if (check_affinity && !(thread.affinity() & (1 << Processor::current().id())))
return false;
ASSERT(g_ready_queues_mask & (1u << priority));
VERIFY(g_ready_queues_mask & (1u << priority));
auto& ready_queue = g_ready_queues[priority];
thread.m_runnable_priority = -1;
ready_queue.thread_list.remove(thread);
@ -152,15 +152,15 @@ bool Scheduler::dequeue_runnable_thread(Thread& thread, bool check_affinity)
void Scheduler::queue_runnable_thread(Thread& thread)
{
ASSERT(g_scheduler_lock.own_lock());
VERIFY(g_scheduler_lock.own_lock());
if (&thread == Processor::current().idle_thread())
return;
auto priority = thread_priority_to_priority_index(thread.priority());
ScopedSpinLock lock(g_ready_queues_lock);
ASSERT(thread.m_runnable_priority < 0);
VERIFY(thread.m_runnable_priority < 0);
thread.m_runnable_priority = (int)priority;
ASSERT(!thread.m_ready_queue_node.is_in_list());
VERIFY(!thread.m_ready_queue_node.is_in_list());
auto& ready_queue = g_ready_queues[priority];
bool was_empty = ready_queue.thread_list.is_empty();
ready_queue.thread_list.append(thread);
@ -170,7 +170,7 @@ void Scheduler::queue_runnable_thread(Thread& thread)
UNMAP_AFTER_INIT void Scheduler::start()
{
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
// We need to acquire our scheduler lock, which will be released
// by the idle thread once control transferred there
@ -178,23 +178,23 @@ UNMAP_AFTER_INIT void Scheduler::start()
auto& processor = Processor::current();
processor.set_scheduler_data(*new SchedulerPerProcessorData());
ASSERT(processor.is_initialized());
VERIFY(processor.is_initialized());
auto& idle_thread = *processor.idle_thread();
ASSERT(processor.current_thread() == &idle_thread);
ASSERT(processor.idle_thread() == &idle_thread);
VERIFY(processor.current_thread() == &idle_thread);
VERIFY(processor.idle_thread() == &idle_thread);
idle_thread.set_ticks_left(time_slice_for(idle_thread));
idle_thread.did_schedule();
idle_thread.set_initialized(true);
processor.init_context(idle_thread, false);
idle_thread.set_state(Thread::Running);
ASSERT(idle_thread.affinity() == (1u << processor.get_id()));
VERIFY(idle_thread.affinity() == (1u << processor.get_id()));
processor.initialize_context_switching(idle_thread);
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
bool Scheduler::pick_next()
{
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
auto current_thread = Thread::current();
@ -209,7 +209,7 @@ bool Scheduler::pick_next()
// We may be on a different processor after we got switched
// back to this thread!
auto& scheduler_data = Processor::current().get_scheduler_data();
ASSERT(scheduler_data.m_in_scheduler);
VERIFY(scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = false;
});
@ -305,7 +305,7 @@ bool Scheduler::yield()
auto current_thread = Thread::current();
dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: yielding thread {} in_irq={}", proc.get_id(), *current_thread, proc.in_irq());
ASSERT(current_thread != nullptr);
VERIFY(current_thread != nullptr);
if (proc.in_irq() || proc.in_critical()) {
// If we're handling an IRQ we can't switch context, or we're in
// a critical section where we don't want to switch contexts, then
@ -324,10 +324,10 @@ bool Scheduler::yield()
bool Scheduler::donate_to_and_switch(Thread* beneficiary, [[maybe_unused]] const char* reason)
{
ASSERT(g_scheduler_lock.own_lock());
VERIFY(g_scheduler_lock.own_lock());
auto& proc = Processor::current();
ASSERT(proc.in_critical() == 1);
VERIFY(proc.in_critical() == 1);
unsigned ticks_left = Thread::current()->ticks_left();
if (!beneficiary || beneficiary->state() != Thread::Runnable || ticks_left <= 1)
@ -342,7 +342,7 @@ bool Scheduler::donate_to_and_switch(Thread* beneficiary, [[maybe_unused]] const
bool Scheduler::donate_to(RefPtr<Thread>& beneficiary, const char* reason)
{
ASSERT(beneficiary);
VERIFY(beneficiary);
if (beneficiary == Thread::current())
return Scheduler::yield();
@ -359,11 +359,11 @@ bool Scheduler::donate_to(RefPtr<Thread>& beneficiary, const char* reason)
// We may be on a different processor after we got switched
// back to this thread!
auto& scheduler_data = Processor::current().get_scheduler_data();
ASSERT(scheduler_data.m_in_scheduler);
VERIFY(scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = false;
});
ASSERT(!proc.in_irq());
VERIFY(!proc.in_irq());
if (proc.in_critical() > 1) {
scheduler_data.m_pending_beneficiary = beneficiary; // Save the beneficiary
@ -413,7 +413,7 @@ bool Scheduler::context_switch(Thread* thread)
// NOTE: from_thread at this point reflects the thread we were
// switched from, and thread reflects Thread::current()
enter_current(*from_thread, false);
ASSERT(thread == Thread::current());
VERIFY(thread == Thread::current());
#if ARCH(I386)
if (thread->process().is_user_process()) {
@ -429,7 +429,7 @@ bool Scheduler::context_switch(Thread* thread)
void Scheduler::enter_current(Thread& prev_thread, bool is_first)
{
ASSERT(g_scheduler_lock.own_lock());
VERIFY(g_scheduler_lock.own_lock());
prev_thread.set_active(false);
if (prev_thread.state() == Thread::Dying) {
// If the thread we switched from is marked as dying, then notify
@ -457,7 +457,7 @@ void Scheduler::leave_on_first_switch(u32 flags)
// clean up and release locks manually here
g_scheduler_lock.unlock(flags);
auto& scheduler_data = Processor::current().get_scheduler_data();
ASSERT(scheduler_data.m_in_scheduler);
VERIFY(scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = false;
}
@ -465,9 +465,9 @@ void Scheduler::prepare_after_exec()
{
// This is called after exec() when doing a context "switch" into
// the new process. This is called from Processor::assume_context
ASSERT(g_scheduler_lock.own_lock());
VERIFY(g_scheduler_lock.own_lock());
auto& scheduler_data = Processor::current().get_scheduler_data();
ASSERT(!scheduler_data.m_in_scheduler);
VERIFY(!scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = true;
}
@ -475,22 +475,22 @@ void Scheduler::prepare_for_idle_loop()
{
// This is called when the CPU finished setting up the idle loop
// and is about to run it. We need to acquire he scheduler lock
ASSERT(!g_scheduler_lock.own_lock());
VERIFY(!g_scheduler_lock.own_lock());
g_scheduler_lock.lock();
auto& scheduler_data = Processor::current().get_scheduler_data();
ASSERT(!scheduler_data.m_in_scheduler);
VERIFY(!scheduler_data.m_in_scheduler);
scheduler_data.m_in_scheduler = true;
}
Process* Scheduler::colonel()
{
ASSERT(s_colonel_process);
VERIFY(s_colonel_process);
return s_colonel_process;
}
UNMAP_AFTER_INIT void Scheduler::initialize()
{
ASSERT(&Processor::current() != nullptr); // sanity check
VERIFY(&Processor::current() != nullptr); // sanity check
RefPtr<Thread> idle_thread;
g_finalizer_wait_queue = new WaitQueue;
@ -498,8 +498,8 @@ UNMAP_AFTER_INIT void Scheduler::initialize()
g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
s_colonel_process = Process::create_kernel_process(idle_thread, "colonel", idle_loop, nullptr, 1).leak_ref();
ASSERT(s_colonel_process);
ASSERT(idle_thread);
VERIFY(s_colonel_process);
VERIFY(idle_thread);
idle_thread->set_priority(THREAD_PRIORITY_MIN);
idle_thread->set_name(StringView("idle thread #0"));
@ -514,28 +514,28 @@ UNMAP_AFTER_INIT void Scheduler::set_idle_thread(Thread* idle_thread)
UNMAP_AFTER_INIT Thread* Scheduler::create_ap_idle_thread(u32 cpu)
{
ASSERT(cpu != 0);
VERIFY(cpu != 0);
// This function is called on the bsp, but creates an idle thread for another AP
ASSERT(Processor::id() == 0);
VERIFY(Processor::id() == 0);
ASSERT(s_colonel_process);
VERIFY(s_colonel_process);
Thread* idle_thread = s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, String::format("idle thread #%u", cpu), 1 << cpu, false);
ASSERT(idle_thread);
VERIFY(idle_thread);
return idle_thread;
}
void Scheduler::timer_tick(const RegisterState& regs)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_irq());
VERIFY_INTERRUPTS_DISABLED();
VERIFY(Processor::current().in_irq());
auto current_thread = Processor::current_thread();
if (!current_thread)
return;
// Sanity checks
ASSERT(current_thread->current_trap());
ASSERT(current_thread->current_trap()->regs == &regs);
VERIFY(current_thread->current_trap());
VERIFY(current_thread->current_trap()->regs == &regs);
#if !SCHEDULE_ON_ALL_PROCESSORS
bool is_bsp = Processor::id() == 0;
@ -543,7 +543,7 @@ void Scheduler::timer_tick(const RegisterState& regs)
return; // TODO: This prevents scheduling on other CPUs!
#endif
if (current_thread->process().is_profiling()) {
ASSERT(current_thread->process().perf_events());
VERIFY(current_thread->process().perf_events());
auto& perf_events = *current_thread->process().perf_events();
[[maybe_unused]] auto rc = perf_events.append_with_eip_and_ebp(regs.eip, regs.ebp, PERF_EVENT_SAMPLE, 0, 0);
}
@ -551,16 +551,16 @@ void Scheduler::timer_tick(const RegisterState& regs)
if (current_thread->tick())
return;
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_irq());
VERIFY_INTERRUPTS_DISABLED();
VERIFY(Processor::current().in_irq());
Processor::current().invoke_scheduler_async();
}
void Scheduler::invoke_async()
{
ASSERT_INTERRUPTS_DISABLED();
VERIFY_INTERRUPTS_DISABLED();
auto& proc = Processor::current();
ASSERT(!proc.in_irq());
VERIFY(!proc.in_irq());
// Since this function is called when leaving critical sections (such
// as a SpinLock), we need to check if we're not already doing this
@ -572,8 +572,8 @@ void Scheduler::invoke_async()
void Scheduler::yield_from_critical()
{
auto& proc = Processor::current();
ASSERT(proc.in_critical());
ASSERT(!proc.in_irq());
VERIFY(proc.in_critical());
VERIFY(!proc.in_irq());
yield(); // Flag a context switch
@ -594,14 +594,14 @@ void Scheduler::idle_loop(void*)
{
auto& proc = Processor::current();
dbgln("Scheduler[{}]: idle loop running", proc.get_id());
ASSERT(are_interrupts_enabled());
VERIFY(are_interrupts_enabled());
for (;;) {
proc.idle_begin();
asm("hlt");
proc.idle_end();
ASSERT_INTERRUPTS_ENABLED();
VERIFY_INTERRUPTS_ENABLED();
#if SCHEDULE_ON_ALL_PROCESSORS
yield();
#else

View file

@ -53,7 +53,7 @@ public:
ALWAYS_INLINE void unlock(u32 prev_flags)
{
ASSERT(is_locked());
VERIFY(is_locked());
m_lock.store(0, AK::memory_order_release);
Processor::current().leave_critical(prev_flags);
}
@ -98,8 +98,8 @@ public:
ALWAYS_INLINE void unlock(u32 prev_flags)
{
ASSERT(m_recursions > 0);
ASSERT(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
VERIFY(m_recursions > 0);
VERIFY(m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current()));
if (--m_recursions == 0)
m_lock.store(0, AK::memory_order_release);
Processor::current().leave_critical(prev_flags);
@ -137,7 +137,7 @@ public:
ScopedSpinLock(LockType& lock)
: m_lock(&lock)
{
ASSERT(m_lock);
VERIFY(m_lock);
m_prev_flags = m_lock->lock();
m_have_lock = true;
}
@ -161,16 +161,16 @@ public:
ALWAYS_INLINE void lock()
{
ASSERT(m_lock);
ASSERT(!m_have_lock);
VERIFY(m_lock);
VERIFY(!m_have_lock);
m_prev_flags = m_lock->lock();
m_have_lock = true;
}
ALWAYS_INLINE void unlock()
{
ASSERT(m_lock);
ASSERT(m_have_lock);
VERIFY(m_lock);
VERIFY(m_have_lock);
m_lock->unlock(m_prev_flags);
m_prev_flags = 0;
m_have_lock = false;

View file

@ -110,7 +110,7 @@ Optional<bool> user_atomic_compare_exchange_relaxed(volatile u32* var, u32& expe
{
if (FlatPtr(var) & 3)
return {}; // not aligned!
ASSERT(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected)));
VERIFY(!Kernel::is_user_range(VirtualAddress(&expected), sizeof(expected)));
bool is_user = Kernel::is_user_range(VirtualAddress(FlatPtr(var)), sizeof(*var));
if (!is_user)
return {};
@ -169,11 +169,11 @@ bool copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
bool is_user = Kernel::is_user_range(VirtualAddress(dest_ptr), n);
if (!is_user)
return false;
ASSERT(!Kernel::is_user_range(VirtualAddress(src_ptr), n));
VERIFY(!Kernel::is_user_range(VirtualAddress(src_ptr), n));
Kernel::SmapDisabler disabler;
void* fault_at;
if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
ASSERT(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
VERIFY(VirtualAddress(fault_at) >= VirtualAddress(dest_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)dest_ptr + n));
klog() << "copy_to_user(" << dest_ptr << ", " << src_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
return false;
}
@ -185,11 +185,11 @@ bool copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
bool is_user = Kernel::is_user_range(VirtualAddress(src_ptr), n);
if (!is_user)
return false;
ASSERT(!Kernel::is_user_range(VirtualAddress(dest_ptr), n));
VERIFY(!Kernel::is_user_range(VirtualAddress(dest_ptr), n));
Kernel::SmapDisabler disabler;
void* fault_at;
if (!Kernel::safe_memcpy(dest_ptr, src_ptr, n, fault_at)) {
ASSERT(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
VERIFY(VirtualAddress(fault_at) >= VirtualAddress(src_ptr) && VirtualAddress(fault_at) <= VirtualAddress((FlatPtr)src_ptr + n));
klog() << "copy_from_user(" << dest_ptr << ", " << src_ptr << ", " << n << ") failed at " << VirtualAddress(fault_at);
return false;
}
@ -361,22 +361,22 @@ extern "C" int __cxa_atexit(void (*)(void*), void*, void*);
[[noreturn]] void __stack_chk_fail()
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
[[noreturn]] void __stack_chk_fail_local()
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
extern "C" int __cxa_atexit(void (*)(void*), void*, void*)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
return 0;
}
[[noreturn]] void __cxa_pure_virtual()
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

View file

@ -184,8 +184,8 @@ void IDEChannel::start_request(AsyncBlockDeviceRequest& request, bool use_dma, b
void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult result)
{
// NOTE: this may be called from the interrupt handler!
ASSERT(m_current_request);
ASSERT(m_request_lock.is_locked());
VERIFY(m_current_request);
VERIFY(m_request_lock.is_locked());
// Now schedule reading back the buffer as soon as we leave the irq handler.
// This is important so that we can safely write the buffer back,
@ -193,7 +193,7 @@ void IDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult resu
// before Processor::deferred_call_queue returns!
Processor::deferred_call_queue([this, result]() {
dbgln_if(PATA_DEBUG, "IDEChannel::complete_current_request result: {}", (int)result);
ASSERT(m_current_request);
VERIFY(m_current_request);
auto& request = *m_current_request;
m_current_request = nullptr;
@ -334,7 +334,7 @@ void IDEChannel::handle_irq(const RegisterState&)
dbgln_if(PATA_DEBUG, "IDEChannel: Wrote block {}/{}", m_current_request_block_index, m_current_request->block_count());
if (++m_current_request_block_index >= m_current_request->block_count()) {
// We read the last block, flush cache
ASSERT(!m_current_request_flushing_cache);
VERIFY(!m_current_request_flushing_cache);
m_current_request_flushing_cache = true;
m_io_group.io_base().offset(ATA_REG_COMMAND).out<u8>(ATA_CMD_CACHE_FLUSH);
} else {
@ -465,7 +465,7 @@ void IDEChannel::ata_access(Direction direction, bool slave_request, u32 lba, u8
u16 cylinder = 0;
if (lba >= 0x10000000) {
ASSERT(capabilities & ATA_CAP_LBA);
VERIFY(capabilities & ATA_CAP_LBA);
lba_mode = LBAMode::FortyEightBit;
head = 0;
} else if (capabilities & ATA_CAP_LBA) {
@ -532,7 +532,7 @@ void IDEChannel::ata_read_sectors_with_dma(bool slave_request, u16 capabilities)
prdt().offset = m_dma_buffer_page->paddr();
prdt().size = 512 * request.block_count();
ASSERT(prdt().size <= PAGE_SIZE);
VERIFY(prdt().size <= PAGE_SIZE);
// Stop bus master
m_io_group.bus_master_base().out<u8>(0);
@ -574,7 +574,7 @@ bool IDEChannel::ata_do_read_sector()
void IDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
{
auto& request = *m_current_request;
ASSERT(request.block_count() <= 256);
VERIFY(request.block_count() <= 256);
dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors");
auto lba = request.block_index();
@ -597,7 +597,7 @@ void IDEChannel::ata_write_sectors_with_dma(bool slave_request, u16 capabilities
return;
}
ASSERT(prdt().size <= PAGE_SIZE);
VERIFY(prdt().size <= PAGE_SIZE);
// Stop bus master
m_io_group.bus_master_base().out<u8>(0);
@ -623,7 +623,7 @@ void IDEChannel::ata_do_write_sector()
;
u8 status = m_io_group.control_base().in<u8>();
ASSERT(status & ATA_SR_DRQ);
VERIFY(status & ATA_SR_DRQ);
auto in_buffer = request.buffer().offset(m_current_request_block_index * 512);
dbgln_if(PATA_DEBUG, "IDEChannel: Writing 512 bytes (part {}) (status={:#02x})...", m_current_request_block_index, status);
@ -641,7 +641,7 @@ void IDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
{
auto& request = *m_current_request;
ASSERT(request.block_count() <= 256);
VERIFY(request.block_count() <= 256);
u32 start_sector = request.block_index();
u32 count = request.block_count();
dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", count, start_sector);

View file

@ -59,12 +59,12 @@ size_t IDEController::devices_count() const
void IDEController::start_request(const StorageDevice&, AsyncBlockDeviceRequest&)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void IDEController::complete_current_request(AsyncDeviceRequest::RequestResult)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
UNMAP_AFTER_INIT IDEController::IDEController(PCI::Address address, bool force_pio)
@ -108,7 +108,7 @@ RefPtr<StorageDevice> IDEController::device_by_channel_and_position(u32 index) c
case 3:
return m_channels[1].slave_device();
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
RefPtr<StorageDevice> IDEController::device(u32 index) const

View file

@ -40,12 +40,12 @@ DiskPartitionMetadata::PartitionType::PartitionType(Array<u8, 16> partition_type
}
UUID DiskPartitionMetadata::PartitionType::to_uuid() const
{
ASSERT(is_uuid());
VERIFY(is_uuid());
return m_partition_type;
}
u8 DiskPartitionMetadata::PartitionType::to_byte_indicator() const
{
ASSERT(!is_uuid());
VERIFY(!is_uuid());
return m_partition_type[0];
}
bool DiskPartitionMetadata::PartitionType::is_uuid() const
@ -63,7 +63,7 @@ DiskPartitionMetadata::DiskPartitionMetadata(u64 start_block, u64 end_block, u8
, m_type(partition_type)
{
ASSERT(m_type.is_valid());
VERIFY(m_type.is_valid());
}
DiskPartitionMetadata::DiskPartitionMetadata(u64 start_block, u64 end_block, Array<u8, 16> partition_type)
@ -72,7 +72,7 @@ DiskPartitionMetadata::DiskPartitionMetadata(u64 start_block, u64 end_block, Arr
, m_type(partition_type)
{
ASSERT(m_type.is_valid());
VERIFY(m_type.is_valid());
}
DiskPartitionMetadata::DiskPartitionMetadata(u64 start_block, u64 end_block, Array<u8, 16> partition_type, UUID unique_guid, u64 special_attributes, String name)
@ -83,8 +83,8 @@ DiskPartitionMetadata::DiskPartitionMetadata(u64 start_block, u64 end_block, Arr
, m_attributes(special_attributes)
, m_name(name)
{
ASSERT(m_type.is_valid());
ASSERT(!m_unique_guid.is_zero());
VERIFY(m_type.is_valid());
VERIFY(!m_unique_guid.is_zero());
}
DiskPartitionMetadata DiskPartitionMetadata::offset(u64 blocks_count) const

View file

@ -44,11 +44,11 @@ void EBRPartitionTable::search_extended_partition(const StorageDevice& device, M
if (limit == 0)
return;
// EBRs should not carry more than 2 partitions (because they need to form a linked list)
ASSERT(checked_ebr.partitions_count() <= 2);
VERIFY(checked_ebr.partitions_count() <= 2);
auto checked_logical_partition = checked_ebr.partition(0);
// If we are pointed to an invalid logical partition, something is seriously wrong.
ASSERT(checked_logical_partition.has_value());
VERIFY(checked_logical_partition.has_value());
m_partitions.append(checked_logical_partition.value().offset(current_block_offset));
if (!checked_ebr.contains_ebr())
return;
@ -66,7 +66,7 @@ EBRPartitionTable::EBRPartitionTable(const StorageDevice& device)
return;
m_valid = true;
ASSERT(partitions_count() == 0);
VERIFY(partitions_count() == 0);
auto& header = this->header();
for (size_t index = 0; index < 4; index++) {

View file

@ -79,7 +79,7 @@ GUIDPartitionTable::GUIDPartitionTable(const StorageDevice& device)
: MBRPartitionTable(device)
{
m_cached_header = ByteBuffer::create_zeroed(m_device->block_size());
ASSERT(partitions_count() == 0);
VERIFY(partitions_count() == 0);
if (!initialize())
m_valid = false;
}
@ -91,7 +91,7 @@ const GUIDPartitionHeader& GUIDPartitionTable::header() const
bool GUIDPartitionTable::initialize()
{
ASSERT(m_cached_header.data() != nullptr);
VERIFY(m_cached_header.data() != nullptr);
auto first_gpt_block = (m_device->block_size() == 512) ? 1 : 0;

View file

@ -53,12 +53,12 @@ size_t RamdiskController::devices_count() const
void RamdiskController::start_request(const StorageDevice&, AsyncBlockDeviceRequest&)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void RamdiskController::complete_current_request(AsyncDeviceRequest::RequestResult)
{
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
RamdiskController::RamdiskController()

View file

@ -100,7 +100,7 @@ KResultOr<size_t> StorageDevice::read(FileDescription&, size_t offset, UserOrKer
return EIO;
case AsyncDeviceRequest::MemoryFault:
// This should never happen, we're writing to a kernel buffer!
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
default:
break;
}
@ -172,7 +172,7 @@ KResultOr<size_t> StorageDevice::write(FileDescription&, size_t offset, const Us
return EIO;
case AsyncDeviceRequest::MemoryFault:
// This should never happen, we're writing to a kernel buffer!
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
default:
break;
}
@ -193,7 +193,7 @@ KResultOr<size_t> StorageDevice::write(FileDescription&, size_t offset, const Us
return EIO;
case AsyncDeviceRequest::MemoryFault:
// This should never happen, we're writing to a kernel buffer!
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
default:
break;
}

View file

@ -75,7 +75,7 @@ NonnullRefPtrVector<StorageController> StorageManagement::enumerate_controllers(
NonnullRefPtrVector<StorageDevice> StorageManagement::enumerate_storage_devices() const
{
ASSERT(!m_controllers.is_empty());
VERIFY(!m_controllers.is_empty());
NonnullRefPtrVector<StorageDevice> devices;
for (auto& controller : m_controllers) {
for (size_t device_index = 0; device_index < controller.devices_count(); device_index++) {
@ -110,7 +110,7 @@ OwnPtr<PartitionTable> StorageManagement::try_to_initialize_partition_table(cons
NonnullRefPtrVector<DiskPartition> StorageManagement::enumerate_disk_partitions() const
{
ASSERT(!m_storage_devices.is_empty());
VERIFY(!m_storage_devices.is_empty());
NonnullRefPtrVector<DiskPartition> partitions;
size_t device_index = 0;
for (auto& device : m_storage_devices) {
@ -133,7 +133,7 @@ NonnullRefPtrVector<DiskPartition> StorageManagement::enumerate_disk_partitions(
void StorageManagement::determine_boot_device()
{
ASSERT(!m_controllers.is_empty());
VERIFY(!m_controllers.is_empty());
if (m_boot_argument.starts_with("/dev/")) {
StringView device_name = m_boot_argument.substring_view(5);
Device::for_each([&](Device& device) {
@ -153,8 +153,8 @@ void StorageManagement::determine_boot_device()
void StorageManagement::determine_boot_device_with_partition_uuid()
{
ASSERT(!m_disk_partitions.is_empty());
ASSERT(m_boot_argument.starts_with("PARTUUID="));
VERIFY(!m_disk_partitions.is_empty());
VERIFY(m_boot_argument.starts_with("PARTUUID="));
auto partition_uuid = UUID(m_boot_argument.substring_view(strlen("PARTUUID=")));
@ -197,7 +197,7 @@ bool StorageManagement::initialized()
UNMAP_AFTER_INIT void StorageManagement::initialize(String root_device, bool force_pio)
{
ASSERT(!StorageManagement::initialized());
VERIFY(!StorageManagement::initialized());
s_the = new StorageManagement(root_device, force_pio);
}

View file

@ -87,7 +87,7 @@ static Handler s_syscall_table[] = {
int handle(RegisterState& regs, u32 function, u32 arg1, u32 arg2, u32 arg3)
{
ASSERT_INTERRUPTS_ENABLED();
VERIFY_INTERRUPTS_ENABLED();
auto current_thread = Thread::current();
auto& process = current_thread->process();
current_thread->did_syscall();
@ -106,7 +106,7 @@ int handle(RegisterState& regs, u32 function, u32 arg1, u32 arg2, u32 arg3)
process.sys$exit((int)arg1);
else
process.sys$exit_thread(arg1);
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
return 0;
}
@ -138,7 +138,7 @@ void syscall_handler(TrapFrame* trap)
{
auto& regs = *trap->regs;
auto current_thread = Thread::current();
ASSERT(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
VERIFY(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
auto& process = current_thread->process();
if (auto tracer = process.tracer(); tracer && tracer->is_tracing_syscalls()) {
@ -211,12 +211,12 @@ void syscall_handler(TrapFrame* trap)
current_thread->check_dispatch_pending_signal();
// If the previous mode somehow changed something is seriously messed up...
ASSERT(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
VERIFY(current_thread->previous_mode() == Thread::PreviousMode::UserMode);
// Check if we're supposed to return to userspace or just die.
current_thread->die_if_needed();
ASSERT(!g_scheduler_lock.own_lock());
VERIFY(!g_scheduler_lock.own_lock());
}
}

View file

@ -195,7 +195,7 @@ static KResultOr<RequiredLoadRange> get_required_load_range(FileDescription& pro
return IterationDecision::Continue;
});
ASSERT(range.end > range.start);
VERIFY(range.end > range.start);
return range;
};
@ -283,15 +283,15 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
FlatPtr load_base_address = 0;
String elf_name = object_description.absolute_path();
ASSERT(!Processor::current().in_critical());
VERIFY(!Processor::current().in_critical());
MemoryManager::enter_space(*new_space);
KResult ph_load_result = KSuccess;
elf_image.for_each_program_header([&](const ELF::Image::ProgramHeader& program_header) {
if (program_header.type() == PT_TLS) {
ASSERT(should_allocate_tls == ShouldAllocateTls::Yes);
ASSERT(program_header.size_in_memory());
VERIFY(should_allocate_tls == ShouldAllocateTls::Yes);
VERIFY(program_header.size_in_memory());
if (!elf_image.is_within_image(program_header.raw_data(), program_header.size_in_image())) {
dbgln("Shenanigans! ELF PT_TLS header sneaks outside of executable.");
@ -325,8 +325,8 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
if (program_header.is_writable()) {
// Writable section: create a copy in memory.
ASSERT(program_header.size_in_memory());
ASSERT(program_header.alignment() == PAGE_SIZE);
VERIFY(program_header.size_in_memory());
VERIFY(program_header.alignment() == PAGE_SIZE);
if (!elf_image.is_within_image(program_header.raw_data(), program_header.size_in_image())) {
dbgln("Shenanigans! Writable ELF PT_LOAD header sneaks outside of executable.");
@ -368,8 +368,8 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Space> new_space, Fil
}
// Non-writable section: map the executable itself in memory.
ASSERT(program_header.size_in_memory());
ASSERT(program_header.alignment() == PAGE_SIZE);
VERIFY(program_header.size_in_memory());
VERIFY(program_header.alignment() == PAGE_SIZE);
int prot = 0;
if (program_header.is_readable())
prot |= PROT_READ;
@ -454,17 +454,17 @@ KResultOr<LoadResult> Process::load(NonnullRefPtr<FileDescription> main_program_
return interpreter_load_result.error();
// TLS allocation will be done in userspace by the loader
ASSERT(!interpreter_load_result.value().tls_region);
ASSERT(!interpreter_load_result.value().tls_alignment);
ASSERT(!interpreter_load_result.value().tls_size);
VERIFY(!interpreter_load_result.value().tls_region);
VERIFY(!interpreter_load_result.value().tls_alignment);
VERIFY(!interpreter_load_result.value().tls_size);
return interpreter_load_result;
}
KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Vector<String> arguments, Vector<String> environment, RefPtr<FileDescription> interpreter_description, Thread*& new_main_thread, u32& prev_flags, const Elf32_Ehdr& main_program_header)
{
ASSERT(is_user_process());
ASSERT(!Processor::current().in_critical());
VERIFY(is_user_process());
VERIFY(!Processor::current().in_critical());
auto path = main_program_description->absolute_path();
dbgln_if(EXEC_DEBUG, "do_exec: {}", path);
@ -522,7 +522,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
auto signal_trampoline_region = m_space->allocate_region_with_vmobject(signal_trampoline_range.value(), g_signal_trampoline_region->vmobject(), 0, "Signal trampoline", PROT_READ | PROT_EXEC, true);
if (signal_trampoline_region.is_error()) {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
signal_trampoline_region.value()->set_syscall_region(true);
@ -557,7 +557,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
int main_program_fd = -1;
if (interpreter_description) {
main_program_fd = alloc_fd();
ASSERT(main_program_fd >= 0);
VERIFY(main_program_fd >= 0);
main_program_description->seek(0, SEEK_SET);
main_program_description->set_readable(true);
m_fds[main_program_fd].set(move(main_program_description), FD_CLOEXEC);
@ -572,7 +572,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
return IterationDecision::Break;
});
}
ASSERT(new_main_thread);
VERIFY(new_main_thread);
auto auxv = generate_auxiliary_vector(load_result.load_base, load_result.entry_eip, m_uid, m_euid, m_gid, m_egid, path, main_program_fd);
@ -604,7 +604,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
auto tsr_result = new_main_thread->make_thread_specific_region({});
if (tsr_result.is_error()) {
// FIXME: We cannot fail this late. Refactor this so the allocation happens before we commit to the new executable.
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
new_main_thread->reset_fpu_state();
@ -630,8 +630,8 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
}
u32 lock_count_to_restore;
[[maybe_unused]] auto rc = big_lock().force_unlock_if_locked(lock_count_to_restore);
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_critical());
VERIFY_INTERRUPTS_DISABLED();
VERIFY(Processor::current().in_critical());
return KSuccess;
}
@ -727,7 +727,7 @@ KResultOr<RefPtr<FileDescription>> Process::find_elf_interpreter_for_executable(
auto interpreter_description = interp_result.value();
auto interp_metadata = interpreter_description->metadata();
ASSERT(interpreter_description->inode());
VERIFY(interpreter_description->inode());
// Validate the program interpreter as a valid elf binary.
// If your program interpreter is a #! file or something, it's time to stop playing games :)
@ -805,7 +805,7 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
if (metadata.size < 3)
return ENOEXEC;
ASSERT(description->inode());
VERIFY(description->inode());
// Read the first page of the program into memory so we can validate the binfmt of it
char first_page[PAGE_SIZE];
@ -856,20 +856,20 @@ KResult Process::exec(String path, Vector<String> arguments, Vector<String> envi
if (result.is_error())
return result;
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_critical());
VERIFY_INTERRUPTS_DISABLED();
VERIFY(Processor::current().in_critical());
auto current_thread = Thread::current();
if (current_thread == new_main_thread) {
// We need to enter the scheduler lock before changing the state
// and it will be released after the context switch into that
// thread. We should also still be in our critical section
ASSERT(!g_scheduler_lock.own_lock());
ASSERT(Processor::current().in_critical() == 1);
VERIFY(!g_scheduler_lock.own_lock());
VERIFY(Processor::current().in_critical() == 1);
g_scheduler_lock.lock();
current_thread->set_state(Thread::State::Running);
Processor::assume_context(*current_thread, prev_flags);
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
Processor::current().leave_critical(prev_flags);
@ -926,7 +926,7 @@ int Process::sys$execve(Userspace<const Syscall::SC_execve_params*> user_params)
return -EFAULT;
auto result = exec(move(path), move(arguments), move(environment));
ASSERT(result.is_error()); // We should never continue after a successful exec!
VERIFY(result.is_error()); // We should never continue after a successful exec!
return result.error();
}

View file

@ -37,7 +37,7 @@ void Process::sys$exit(int status)
m_termination_signal = 0;
die();
Thread::current()->die_if_needed();
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
}

View file

@ -62,7 +62,7 @@ FutexQueue::~FutexQueue()
void FutexQueue::vmobject_deleted(VMObject& vmobject)
{
ASSERT(m_is_global); // If we got called we must be a global futex
VERIFY(m_is_global); // If we got called we must be a global futex
// Because we're taking ourselves out of the global queue, we need
// to make sure we have at last a reference until we're done
NonnullRefPtr<FutexQueue> own_ref(*this);
@ -88,7 +88,7 @@ void FutexQueue::vmobject_deleted(VMObject& vmobject)
dbgln("Futex @ {} unblocked {} waiters due to vmobject free", this, wake_count);
}
ASSERT(did_wake_all); // No one should be left behind...
VERIFY(did_wake_all); // No one should be left behind...
}
void Process::clear_futex_queues_on_exec()
@ -97,7 +97,7 @@ void Process::clear_futex_queues_on_exec()
for (auto& it : m_futex_queues) {
bool did_wake_all;
it.value->wake_all(did_wake_all);
ASSERT(did_wake_all); // No one should be left behind...
VERIFY(did_wake_all); // No one should be left behind...
}
m_futex_queues.clear();
}
@ -172,16 +172,16 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
if (create_if_not_found) {
// TODO: is there a better way than setting and finding it again?
auto result = global_queues.set(&vmobject, {});
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
it = global_queues.find(&vmobject);
ASSERT(it != global_queues.end());
VERIFY(it != global_queues.end());
return &it->value;
}
return nullptr;
};
auto find_futex_queue = [&](VMObject* vmobject, FlatPtr user_address_or_offset, bool create_if_not_found) -> RefPtr<FutexQueue> {
ASSERT(is_private || vmobject);
VERIFY(is_private || vmobject);
auto* queues = is_private ? &m_futex_queues : find_global_futex_queues(*vmobject, create_if_not_found);
if (!queues)
return {};
@ -191,7 +191,7 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
if (create_if_not_found) {
auto futex_queue = adopt(*new FutexQueue(user_address_or_offset, vmobject));
auto result = queues->set(user_address_or_offset, futex_queue);
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
return futex_queue;
}
return {};
@ -234,7 +234,7 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
auto futex_queue = find_futex_queue(vmobject.ptr(), user_address_or_offset, true);
ASSERT(futex_queue);
VERIFY(futex_queue);
// We need to release the lock before blocking. But we have a reference
// to the FutexQueue so that we can keep it alive.
@ -358,13 +358,13 @@ int Process::sys$futex(Userspace<const Syscall::SC_futex_params*> user_params)
return do_requeue(params.val3);
case FUTEX_WAIT_BITSET:
ASSERT(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAIT
VERIFY(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAIT
if (params.val3 == 0)
return -EINVAL;
return do_wait(params.val3);
case FUTEX_WAKE_BITSET:
ASSERT(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAKE
VERIFY(params.val3 != FUTEX_BITSET_MATCH_ANY); // we should have turned it into FUTEX_WAKE
if (params.val3 == 0)
return -EINVAL;
return do_wake(vmobject.ptr(), user_address_or_offset, params.val, params.val3);

View file

@ -36,7 +36,7 @@ int Process::sys$get_stack_bounds(FlatPtr* user_stack_base, size_t* user_stack_s
auto* stack_region = space().find_region_containing(Range { VirtualAddress(stack_pointer), 1 });
// The syscall handler should have killed us if we had an invalid stack pointer.
ASSERT(stack_region);
VERIFY(stack_region);
FlatPtr stack_base = stack_region->range().base().get();
size_t stack_size = stack_region->size();

View file

@ -47,7 +47,7 @@ KResult Process::do_killpg(ProcessGroupID pgrp, int signal)
{
InterruptDisabler disabler;
ASSERT(pgrp >= 0);
VERIFY(pgrp >= 0);
// Send the signal to all processes in the given group.
if (pgrp == 0) {
@ -136,7 +136,7 @@ int Process::sys$kill(pid_t pid_or_pgid, int signal)
if (pid_or_pgid == m_pid.value()) {
return do_killself(signal);
}
ASSERT(pid_or_pgid >= 0);
VERIFY(pid_or_pgid >= 0);
ScopedSpinLock lock(g_processes_lock);
auto peer = Process::from_pid(pid_or_pgid);
if (!peer)

View file

@ -465,7 +465,7 @@ int Process::sys$munmap(void* addr, size_t size)
if (!whole_region->is_mmap())
return -EPERM;
bool success = space().deallocate_region(*whole_region);
ASSERT(success);
VERIFY(success);
return 0;
}
@ -557,7 +557,7 @@ void* Process::sys$allocate_tls(size_t size)
main_thread = &thread;
return IterationDecision::Break;
});
ASSERT(main_thread);
VERIFY(main_thread);
auto range = space().allocate_range({}, size);
if (!range.has_value())

View file

@ -80,7 +80,7 @@ int Process::sys$module_load(Userspace<const char*> user_path, size_t path_lengt
if (!section.size())
return IterationDecision::Continue;
auto* section_storage = section_storage_by_name.get(section.name()).value_or(nullptr);
ASSERT(section_storage);
VERIFY(section_storage);
section.relocations().for_each_relocation([&](const ELF::Image::Relocation& relocation) {
auto& patch_ptr = *reinterpret_cast<ptrdiff_t*>(section_storage + relocation.offset());
switch (relocation.type()) {
@ -100,7 +100,7 @@ int Process::sys$module_load(Userspace<const char*> user_path, size_t path_lengt
if (relocation.symbol().bind() == STB_LOCAL) {
auto* section_storage_containing_symbol = section_storage_by_name.get(relocation.symbol().section().name()).value_or(nullptr);
ASSERT(section_storage_containing_symbol);
VERIFY(section_storage_containing_symbol);
u32 symbol_address = (ptrdiff_t)(section_storage_containing_symbol + relocation.symbol().value());
if (symbol_address == 0)
missing_symbols = true;
@ -113,7 +113,7 @@ int Process::sys$module_load(Userspace<const char*> user_path, size_t path_lengt
dbgln(" Symbol address: {:p}", symbol_address);
patch_ptr += symbol_address;
} else {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
break;
}

View file

@ -205,7 +205,7 @@ KResult Process::poke_user_data(Userspace<u32*> address, u32 data)
if (region->is_shared()) {
// If the region is shared, we change its vmobject to a PrivateInodeVMObject
// to prevent the write operation from changing any shared inode data
ASSERT(region->vmobject().is_shared_inode());
VERIFY(region->vmobject().is_shared_inode());
region->set_vmobject(PrivateInodeVMObject::create_with_inode(static_cast<SharedInodeVMObject&>(region->vmobject()).inode()));
region->set_shared(false);
}

View file

@ -226,15 +226,15 @@ int Process::sys$poll(Userspace<const Syscall::SC_poll_params*> user_params)
pfd.revents |= POLLNVAL;
} else {
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::Read) {
ASSERT(pfd.events & POLLIN);
VERIFY(pfd.events & POLLIN);
pfd.revents |= POLLIN;
}
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::ReadPriority) {
ASSERT(pfd.events & POLLPRI);
VERIFY(pfd.events & POLLPRI);
pfd.revents |= POLLPRI;
}
if ((u32)fds_entry.unblocked_flags & (u32)Thread::FileBlocker::BlockFlags::Write) {
ASSERT(pfd.events & POLLOUT);
VERIFY(pfd.events & POLLOUT);
pfd.revents |= POLLOUT;
}
}

View file

@ -122,7 +122,7 @@ int Process::sys$accept(int accepting_socket_fd, Userspace<sockaddr*> user_addre
}
}
auto accepted_socket = socket.accept();
ASSERT(accepted_socket);
VERIFY(accepted_socket);
if (user_address) {
u8 address_buffer[sizeof(sockaddr_un)];
@ -263,7 +263,7 @@ ssize_t Process::sys$recvmsg(int sockfd, Userspace<struct msghdr*> user_msg, int
int msg_flags = 0;
if (result.value() > iovs[0].iov_len) {
ASSERT(socket.type() != SOCK_STREAM);
VERIFY(socket.type() != SOCK_STREAM);
msg_flags |= MSG_TRUNC;
}

View file

@ -102,7 +102,7 @@ void Process::sys$exit_thread(Userspace<void*> exit_value)
}
Thread::current()->exit(reinterpret_cast<void*>(exit_value.ptr()));
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
int Process::sys$detach_thread(pid_t tid)

View file

@ -125,7 +125,7 @@ int Process::sys$unveil(Userspace<const Syscall::SC_unveil_params*> user_params)
lexical_path.parts().end(),
{ new_unveiled_path, (UnveilAccess)new_permissions, true },
[](auto& parent, auto& it) -> Optional<UnveilMetadata> { return UnveilMetadata { String::formatted("{}/{}", parent.path(), *it), parent.permissions(), false, parent.permissions_inherited_from_root() }; });
ASSERT(m_veil_state != VeilState::Locked);
VERIFY(m_veil_state != VeilState::Locked);
m_veil_state = VeilState::Dropped;
return 0;
}

Some files were not shown because too many files have changed in this diff Show more