1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-05-31 12:48:10 +00:00

Everywhere: Rename ASSERT => VERIFY

(...and ASSERT_NOT_REACHED => VERIFY_NOT_REACHED)

Since all of these checks are done in release builds as well,
let's rename them to VERIFY to prevent confusion, as everyone is
used to assertions being compiled out in release.

We can introduce a new ASSERT macro that is specifically for debug
checks, but I'm doing this wholesale conversion first since we've
accumulated thousands of these already, and it's not immediately
obvious which ones are suitable for ASSERT.
This commit is contained in:
Andreas Kling 2021-02-23 20:42:32 +01:00
parent b33a6a443e
commit 5d180d1f99
725 changed files with 3448 additions and 3448 deletions

View file

@ -49,7 +49,7 @@ PhysicalRegion::PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper)
void PhysicalRegion::expand(PhysicalAddress lower, PhysicalAddress upper)
{
ASSERT(!m_pages);
VERIFY(!m_pages);
m_lower = lower;
m_upper = upper;
@ -57,7 +57,7 @@ void PhysicalRegion::expand(PhysicalAddress lower, PhysicalAddress upper)
unsigned PhysicalRegion::finalize_capacity()
{
ASSERT(!m_pages);
VERIFY(!m_pages);
m_pages = (m_upper.get() - m_lower.get()) / PAGE_SIZE;
m_bitmap.grow(m_pages, false);
@ -67,8 +67,8 @@ unsigned PhysicalRegion::finalize_capacity()
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment)
{
ASSERT(m_pages);
ASSERT(m_used != m_pages);
VERIFY(m_pages);
VERIFY(m_used != m_pages);
NonnullRefPtrVector<PhysicalPage> physical_pages;
physical_pages.ensure_capacity(count);
@ -82,11 +82,11 @@ NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(siz
unsigned PhysicalRegion::find_contiguous_free_pages(size_t count, size_t physical_alignment)
{
ASSERT(count != 0);
ASSERT(physical_alignment % PAGE_SIZE == 0);
VERIFY(count != 0);
VERIFY(physical_alignment % PAGE_SIZE == 0);
// search from the last page we allocated
auto range = find_and_allocate_contiguous_range(count, physical_alignment / PAGE_SIZE);
ASSERT(range.has_value());
VERIFY(range.has_value());
return range.value();
}
@ -100,8 +100,8 @@ Optional<unsigned> PhysicalRegion::find_one_free_page()
Checked<FlatPtr> local_offset = m_recently_returned[index].get();
local_offset -= m_lower.get();
m_recently_returned.remove(index);
ASSERT(!local_offset.has_overflow());
ASSERT(local_offset.value() < (FlatPtr)(m_pages * PAGE_SIZE));
VERIFY(!local_offset.has_overflow());
VERIFY(local_offset.value() < (FlatPtr)(m_pages * PAGE_SIZE));
return local_offset.value() / PAGE_SIZE;
}
return {};
@ -121,7 +121,7 @@ Optional<unsigned> PhysicalRegion::find_one_free_page()
Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t count, unsigned alignment)
{
ASSERT(count != 0);
VERIFY(count != 0);
size_t found_pages_count = 0;
// TODO: Improve how we deal with alignment != 1
auto first_index = m_bitmap.find_longest_range_of_unset_bits(count + alignment - 1, found_pages_count);
@ -146,7 +146,7 @@ Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t cou
RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
{
ASSERT(m_pages);
VERIFY(m_pages);
auto free_index = find_one_free_page();
if (!free_index.has_value())
@ -157,16 +157,16 @@ RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
void PhysicalRegion::free_page_at(PhysicalAddress addr)
{
ASSERT(m_pages);
VERIFY(m_pages);
if (m_used == 0) {
ASSERT_NOT_REACHED();
VERIFY_NOT_REACHED();
}
Checked<FlatPtr> local_offset = addr.get();
local_offset -= m_lower.get();
ASSERT(!local_offset.has_overflow());
ASSERT(local_offset.value() < (FlatPtr)(m_pages * PAGE_SIZE));
VERIFY(!local_offset.has_overflow());
VERIFY(local_offset.value() < (FlatPtr)(m_pages * PAGE_SIZE));
auto page = local_offset.value() / PAGE_SIZE;
m_bitmap.set(page, false);