1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-06-01 08:28:11 +00:00

Everywhere: Rename ASSERT => VERIFY

(...and ASSERT_NOT_REACHED => VERIFY_NOT_REACHED)

Since all of these checks are done in release builds as well,
let's rename them to VERIFY to prevent confusion, as everyone is
used to assertions being compiled out in release.

We can introduce a new ASSERT macro that is specifically for debug
checks, but I'm doing this wholesale conversion first since we've
accumulated thousands of these already, and it's not immediately
obvious which ones are suitable for ASSERT.
This commit is contained in:
Andreas Kling 2021-02-23 20:42:32 +01:00
parent b33a6a443e
commit 5d180d1f99
725 changed files with 3448 additions and 3448 deletions

View file

@ -56,7 +56,7 @@ public:
{
// To keep the alignment of the memory passed in, place the bitmap
// at the end of the memory block.
ASSERT(m_total_chunks * CHUNK_SIZE + (m_total_chunks + 7) / 8 <= memory_size);
VERIFY(m_total_chunks * CHUNK_SIZE + (m_total_chunks + 7) / 8 <= memory_size);
}
~Heap()
{
@ -108,13 +108,13 @@ public:
if (!ptr)
return;
auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
ASSERT((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
ASSERT((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
VERIFY((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
VERIFY((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
FlatPtr start = ((FlatPtr)a - (FlatPtr)m_chunks) / CHUNK_SIZE;
m_bitmap.set_range(start, a->allocation_size_in_chunks, false);
ASSERT(m_allocated_chunks >= a->allocation_size_in_chunks);
VERIFY(m_allocated_chunks >= a->allocation_size_in_chunks);
m_allocated_chunks -= a->allocation_size_in_chunks;
if constexpr (HEAP_SCRUB_BYTE_FREE != 0) {
@ -129,8 +129,8 @@ public:
return h.allocate(new_size);
auto* a = (AllocationHeader*)((((u8*)ptr) - sizeof(AllocationHeader)));
ASSERT((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
ASSERT((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
VERIFY((u8*)a >= m_chunks && (u8*)ptr < m_chunks + m_total_chunks * CHUNK_SIZE);
VERIFY((u8*)a + a->allocation_size_in_chunks * CHUNK_SIZE <= m_chunks + m_total_chunks * CHUNK_SIZE);
size_t old_size = a->allocation_size_in_chunks * CHUNK_SIZE;
@ -319,7 +319,7 @@ public:
return;
}
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void* reallocate(void* ptr, size_t new_size)
@ -330,12 +330,12 @@ public:
if (subheap->heap.contains(ptr))
return subheap->heap.reallocate(ptr, new_size, *this);
}
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
HeapType& add_subheap(void* memory, size_t memory_size)
{
ASSERT(memory_size > sizeof(SubHeap));
VERIFY(memory_size > sizeof(SubHeap));
// Place the SubHeap structure at the beginning of the new memory block
memory_size -= sizeof(SubHeap);

View file

@ -86,7 +86,7 @@ public:
void dealloc(void* ptr)
{
ASSERT(ptr);
VERIFY(ptr);
if (ptr < m_base || ptr >= m_end) {
kfree(ptr);
return;
@ -159,7 +159,7 @@ void* slab_alloc(size_t slab_size)
return s_slab_allocator_64.alloc();
if (slab_size <= 128)
return s_slab_allocator_128.alloc();
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void slab_dealloc(void* ptr, size_t slab_size)
@ -172,7 +172,7 @@ void slab_dealloc(void* ptr, size_t slab_size)
return s_slab_allocator_64.dealloc(ptr);
if (slab_size <= 128)
return s_slab_allocator_128.dealloc(ptr);
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
void slab_alloc_stats(Function<void(size_t slab_size, size_t allocated, size_t free)> callback)

View file

@ -71,7 +71,7 @@ struct KmallocGlobalHeap {
klog() << "kmalloc(): Cannot expand heap before MM is initialized!";
return false;
}
ASSERT(!m_adding);
VERIFY(!m_adding);
TemporaryChange change(m_adding, true);
// At this point we have very little memory left. Any attempt to
// kmalloc() could fail, so use our backup memory first, so we
@ -231,7 +231,7 @@ void* kmalloc_eternal(size_t size)
ScopedSpinLock lock(s_lock);
void* ptr = s_next_eternal_ptr;
s_next_eternal_ptr += size;
ASSERT(s_next_eternal_ptr < s_end_of_eternal_range);
VERIFY(s_next_eternal_ptr < s_end_of_eternal_range);
g_kmalloc_bytes_eternal += size;
return ptr;
}