mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 00:37:45 +00:00
Everywhere: Rename ASSERT => VERIFY
(...and ASSERT_NOT_REACHED => VERIFY_NOT_REACHED) Since all of these checks are done in release builds as well, let's rename them to VERIFY to prevent confusion, as everyone is used to assertions being compiled out in release. We can introduce a new ASSERT macro that is specifically for debug checks, but I'm doing this wholesale conversion first since we've accumulated thousands of these already, and it's not immediately obvious which ones are suitable for ASSERT.
This commit is contained in:
parent
b33a6a443e
commit
5d180d1f99
725 changed files with 3448 additions and 3448 deletions
|
@ -114,7 +114,7 @@ AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
|
|||
: VMObject(size)
|
||||
, m_volatile_ranges_cache({ 0, page_count() })
|
||||
{
|
||||
ASSERT(paddr.page_base() == paddr);
|
||||
VERIFY(paddr.page_base() == paddr);
|
||||
for (size_t i = 0; i < page_count(); ++i)
|
||||
physical_pages()[i] = PhysicalPage::create(paddr.offset(i * PAGE_SIZE), false, false);
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ AnonymousVMObject::AnonymousVMObject(const AnonymousVMObject& other)
|
|||
, m_shared_committed_cow_pages(other.m_shared_committed_cow_pages) // share the pool
|
||||
{
|
||||
// We can't really "copy" a spinlock. But we're holding it. Clear in the clone
|
||||
ASSERT(other.m_lock.is_locked());
|
||||
VERIFY(other.m_lock.is_locked());
|
||||
m_lock.initialize();
|
||||
|
||||
// The clone also becomes COW
|
||||
|
@ -154,7 +154,7 @@ AnonymousVMObject::AnonymousVMObject(const AnonymousVMObject& other)
|
|||
break;
|
||||
}
|
||||
}
|
||||
ASSERT(m_unused_committed_pages == 0);
|
||||
VERIFY(m_unused_committed_pages == 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -173,7 +173,7 @@ int AnonymousVMObject::purge()
|
|||
|
||||
int AnonymousVMObject::purge_with_interrupts_disabled(Badge<MemoryManager>)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
if (m_paging_lock.is_locked())
|
||||
return 0;
|
||||
return purge_impl();
|
||||
|
@ -181,7 +181,7 @@ int AnonymousVMObject::purge_with_interrupts_disabled(Badge<MemoryManager>)
|
|||
|
||||
void AnonymousVMObject::set_was_purged(const VolatilePageRange& range)
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
VERIFY(m_lock.is_locked());
|
||||
for (auto* purgeable_ranges : m_purgeable_ranges)
|
||||
purgeable_ranges->set_was_purged(range);
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ int AnonymousVMObject::purge_impl()
|
|||
for (size_t i = range.base; i < range_end; i++) {
|
||||
auto& phys_page = m_physical_pages[i];
|
||||
if (phys_page && !phys_page->is_shared_zero_page()) {
|
||||
ASSERT(!phys_page->is_lazy_committed_page());
|
||||
VERIFY(!phys_page->is_lazy_committed_page());
|
||||
++purged_in_range;
|
||||
}
|
||||
phys_page = MM.shared_zero_page();
|
||||
|
@ -226,7 +226,7 @@ void AnonymousVMObject::register_purgeable_page_ranges(PurgeablePageRanges& purg
|
|||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
purgeable_page_ranges.set_vmobject(this);
|
||||
ASSERT(!m_purgeable_ranges.contains_slow(&purgeable_page_ranges));
|
||||
VERIFY(!m_purgeable_ranges.contains_slow(&purgeable_page_ranges));
|
||||
m_purgeable_ranges.append(&purgeable_page_ranges);
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ void AnonymousVMObject::unregister_purgeable_page_ranges(PurgeablePageRanges& pu
|
|||
m_purgeable_ranges.remove(i);
|
||||
return;
|
||||
}
|
||||
ASSERT_NOT_REACHED();
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
bool AnonymousVMObject::is_any_volatile() const
|
||||
|
@ -256,7 +256,7 @@ bool AnonymousVMObject::is_any_volatile() const
|
|||
|
||||
size_t AnonymousVMObject::remove_lazy_commit_pages(const VolatilePageRange& range)
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
VERIFY(m_lock.is_locked());
|
||||
|
||||
size_t removed_count = 0;
|
||||
auto range_end = range.base + range.count;
|
||||
|
@ -265,7 +265,7 @@ size_t AnonymousVMObject::remove_lazy_commit_pages(const VolatilePageRange& rang
|
|||
if (phys_page && phys_page->is_lazy_committed_page()) {
|
||||
phys_page = MM.shared_zero_page();
|
||||
removed_count++;
|
||||
ASSERT(m_unused_committed_pages > 0);
|
||||
VERIFY(m_unused_committed_pages > 0);
|
||||
if (--m_unused_committed_pages == 0)
|
||||
break;
|
||||
}
|
||||
|
@ -275,8 +275,8 @@ size_t AnonymousVMObject::remove_lazy_commit_pages(const VolatilePageRange& rang
|
|||
|
||||
void AnonymousVMObject::update_volatile_cache()
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
ASSERT(m_volatile_ranges_cache_dirty);
|
||||
VERIFY(m_lock.is_locked());
|
||||
VERIFY(m_volatile_ranges_cache_dirty);
|
||||
|
||||
m_volatile_ranges_cache.clear();
|
||||
for_each_nonvolatile_range([&](const VolatilePageRange& range) {
|
||||
|
@ -289,7 +289,7 @@ void AnonymousVMObject::update_volatile_cache()
|
|||
|
||||
void AnonymousVMObject::range_made_volatile(const VolatilePageRange& range)
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
VERIFY(m_lock.is_locked());
|
||||
|
||||
if (m_unused_committed_pages == 0)
|
||||
return;
|
||||
|
@ -322,14 +322,14 @@ void AnonymousVMObject::range_made_volatile(const VolatilePageRange& range)
|
|||
|
||||
void AnonymousVMObject::range_made_nonvolatile(const VolatilePageRange&)
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
VERIFY(m_lock.is_locked());
|
||||
m_volatile_ranges_cache_dirty = true;
|
||||
}
|
||||
|
||||
size_t AnonymousVMObject::count_needed_commit_pages_for_nonvolatile_range(const VolatilePageRange& range)
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
ASSERT(!range.is_empty());
|
||||
VERIFY(m_lock.is_locked());
|
||||
VERIFY(!range.is_empty());
|
||||
|
||||
size_t need_commit_pages = 0;
|
||||
auto range_end = range.base + range.count;
|
||||
|
@ -346,9 +346,9 @@ size_t AnonymousVMObject::count_needed_commit_pages_for_nonvolatile_range(const
|
|||
|
||||
size_t AnonymousVMObject::mark_committed_pages_for_nonvolatile_range(const VolatilePageRange& range, size_t mark_total)
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
ASSERT(!range.is_empty());
|
||||
ASSERT(mark_total > 0);
|
||||
VERIFY(m_lock.is_locked());
|
||||
VERIFY(!range.is_empty());
|
||||
VERIFY(mark_total > 0);
|
||||
|
||||
size_t pages_updated = 0;
|
||||
auto range_end = range.base + range.count;
|
||||
|
@ -376,10 +376,10 @@ RefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(size_t page_inde
|
|||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
|
||||
ASSERT(m_unused_committed_pages > 0);
|
||||
VERIFY(m_unused_committed_pages > 0);
|
||||
|
||||
// We shouldn't have any committed page tags in volatile regions
|
||||
ASSERT([&]() {
|
||||
VERIFY([&]() {
|
||||
for (auto* purgeable_ranges : m_purgeable_ranges) {
|
||||
if (purgeable_ranges->is_volatile(page_index))
|
||||
return false;
|
||||
|
@ -438,7 +438,7 @@ bool AnonymousVMObject::is_nonvolatile(size_t page_index)
|
|||
|
||||
PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
ScopedSpinLock lock(m_lock);
|
||||
auto& page_slot = physical_pages()[page_index];
|
||||
bool have_committed = m_shared_committed_cow_pages && is_nonvolatile(page_index);
|
||||
|
@ -484,7 +484,7 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
|
|||
dbgln(" >> COW: error copying page {}/{} to {}/{}: failed to read from page at {}",
|
||||
page_slot->paddr(), vaddr, page->paddr(), VirtualAddress(dest_ptr), VirtualAddress(fault_at));
|
||||
else
|
||||
ASSERT_NOT_REACHED();
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
}
|
||||
page_slot = move(page);
|
||||
|
|
|
@ -62,7 +62,7 @@ public:
|
|||
template<typename F>
|
||||
IterationDecision for_each_volatile_range(F f) const
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
VERIFY(m_lock.is_locked());
|
||||
// This is a little ugly. Basically, we're trying to find the
|
||||
// volatile ranges that all share, because those are the only
|
||||
// pages we can actually purge
|
||||
|
|
|
@ -56,7 +56,7 @@ ContiguousVMObject::~ContiguousVMObject()
|
|||
|
||||
RefPtr<VMObject> ContiguousVMObject::clone()
|
||||
{
|
||||
ASSERT_NOT_REACHED();
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ InodeVMObject::~InodeVMObject()
|
|||
size_t InodeVMObject::amount_clean() const
|
||||
{
|
||||
size_t count = 0;
|
||||
ASSERT(page_count() == m_dirty_pages.size());
|
||||
VERIFY(page_count() == m_dirty_pages.size());
|
||||
for (size_t i = 0; i < page_count(); ++i) {
|
||||
if (!m_dirty_pages.get(i) && m_physical_pages[i])
|
||||
++count;
|
||||
|
@ -91,7 +91,7 @@ void InodeVMObject::inode_size_changed(Badge<Inode>, size_t old_size, size_t new
|
|||
void InodeVMObject::inode_contents_changed(Badge<Inode>, off_t offset, [[maybe_unused]] ssize_t size, [[maybe_unused]] const UserOrKernelBuffer& data)
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
ASSERT(offset >= 0);
|
||||
VERIFY(offset >= 0);
|
||||
|
||||
// FIXME: Only invalidate the parts that actually changed.
|
||||
for (auto& physical_page : m_physical_pages)
|
||||
|
|
|
@ -91,7 +91,7 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager()
|
|||
|
||||
// We're temporarily "committing" to two pages that we need to allocate below
|
||||
if (!commit_user_physical_pages(2))
|
||||
ASSERT_NOT_REACHED();
|
||||
VERIFY_NOT_REACHED();
|
||||
|
||||
m_shared_zero_page = allocate_committed_user_physical_page();
|
||||
|
||||
|
@ -158,7 +158,7 @@ void MemoryManager::unmap_memory_after_init()
|
|||
|
||||
UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
|
||||
{
|
||||
ASSERT(!m_physical_memory_ranges.is_empty());
|
||||
VERIFY(!m_physical_memory_ranges.is_empty());
|
||||
ContiguousReservedMemoryRange range;
|
||||
for (auto& current_range : m_physical_memory_ranges) {
|
||||
if (current_range.type != PhysicalMemoryRangeType::Reserved) {
|
||||
|
@ -182,7 +182,7 @@ UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
|
|||
|
||||
bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, const Range& range) const
|
||||
{
|
||||
ASSERT(!m_reserved_memory_ranges.is_empty());
|
||||
VERIFY(!m_reserved_memory_ranges.is_empty());
|
||||
for (auto& current_range : m_reserved_memory_ranges) {
|
||||
if (!(current_range.start <= start_address))
|
||||
continue;
|
||||
|
@ -310,8 +310,8 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
|||
dmesgln("MM: User physical region: {} - {}", region.lower(), region.upper());
|
||||
}
|
||||
|
||||
ASSERT(m_super_physical_pages > 0);
|
||||
ASSERT(m_user_physical_pages > 0);
|
||||
VERIFY(m_super_physical_pages > 0);
|
||||
VERIFY(m_user_physical_pages > 0);
|
||||
|
||||
// We start out with no committed pages
|
||||
m_user_physical_pages_uncommitted = m_user_physical_pages.load();
|
||||
|
@ -323,9 +323,9 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
|||
|
||||
PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
ASSERT(s_mm_lock.own_lock());
|
||||
ASSERT(page_directory.get_lock().own_lock());
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(s_mm_lock.own_lock());
|
||||
VERIFY(page_directory.get_lock().own_lock());
|
||||
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
|
||||
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
|
||||
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
|
||||
|
@ -340,9 +340,9 @@ PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress
|
|||
|
||||
PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
ASSERT(s_mm_lock.own_lock());
|
||||
ASSERT(page_directory.get_lock().own_lock());
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(s_mm_lock.own_lock());
|
||||
VERIFY(page_directory.get_lock().own_lock());
|
||||
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
|
||||
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
|
||||
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
|
||||
|
@ -361,9 +361,9 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
|
|||
// of the purging process. So we need to re-map the pd in this case to ensure
|
||||
// we're writing to the correct underlying physical page
|
||||
pd = quickmap_pd(page_directory, page_directory_table_index);
|
||||
ASSERT(&pde == &pd[page_directory_index]); // Sanity check
|
||||
VERIFY(&pde == &pd[page_directory_index]); // Sanity check
|
||||
|
||||
ASSERT(!pde.is_present()); // Should have not changed
|
||||
VERIFY(!pde.is_present()); // Should have not changed
|
||||
}
|
||||
pde.set_page_table_base(page_table->paddr().get());
|
||||
pde.set_user_allowed(true);
|
||||
|
@ -373,7 +373,7 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
|
|||
// Use page_directory_table_index and page_directory_index as key
|
||||
// This allows us to release the page table entry when no longer needed
|
||||
auto result = page_directory.m_page_tables.set(vaddr.get() & ~0x1fffff, move(page_table));
|
||||
ASSERT(result == AK::HashSetResult::InsertedNewEntry);
|
||||
VERIFY(result == AK::HashSetResult::InsertedNewEntry);
|
||||
}
|
||||
|
||||
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
|
||||
|
@ -381,9 +381,9 @@ PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
|
|||
|
||||
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, bool is_last_release)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
ASSERT(s_mm_lock.own_lock());
|
||||
ASSERT(page_directory.get_lock().own_lock());
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(s_mm_lock.own_lock());
|
||||
VERIFY(page_directory.get_lock().own_lock());
|
||||
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
|
||||
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
|
||||
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
|
||||
|
@ -409,7 +409,7 @@ void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress va
|
|||
pde.clear();
|
||||
|
||||
auto result = page_directory.m_page_tables.remove(vaddr.get() & ~0x1fffff);
|
||||
ASSERT(result);
|
||||
VERIFY(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -463,13 +463,13 @@ Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr)
|
|||
auto page_directory = PageDirectory::find_by_cr3(read_cr3());
|
||||
if (!page_directory)
|
||||
return nullptr;
|
||||
ASSERT(page_directory->space());
|
||||
VERIFY(page_directory->space());
|
||||
return user_region_from_vaddr(*page_directory->space(), vaddr);
|
||||
}
|
||||
|
||||
PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
if (Processor::current().in_irq()) {
|
||||
dbgln("CPU[{}] BUG! Page fault while handling IRQ! code={}, vaddr={}, irq level: {}",
|
||||
|
@ -491,7 +491,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
|
|||
|
||||
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, String name, u8 access, size_t physical_alignment, Region::Cacheable cacheable)
|
||||
{
|
||||
ASSERT(!(size % PAGE_SIZE));
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
|
@ -502,7 +502,7 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, Str
|
|||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String name, u8 access, AllocationStrategy strategy, Region::Cacheable cacheable)
|
||||
{
|
||||
ASSERT(!(size % PAGE_SIZE));
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
|
@ -515,7 +515,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, String name, u
|
|||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, String name, u8 access, Region::Cacheable cacheable)
|
||||
{
|
||||
ASSERT(!(size % PAGE_SIZE));
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
|
@ -528,7 +528,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
|
|||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, String name, u8 access, Region::Cacheable cacheable)
|
||||
{
|
||||
ASSERT(!(size % PAGE_SIZE));
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size);
|
||||
if (!range.has_value())
|
||||
|
@ -550,7 +550,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range&
|
|||
|
||||
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, String name, u8 access, Region::Cacheable cacheable)
|
||||
{
|
||||
ASSERT(!(size % PAGE_SIZE));
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
|
||||
if (!range.has_value())
|
||||
|
@ -560,7 +560,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
|
|||
|
||||
bool MemoryManager::commit_user_physical_pages(size_t page_count)
|
||||
{
|
||||
ASSERT(page_count > 0);
|
||||
VERIFY(page_count > 0);
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
if (m_user_physical_pages_uncommitted < page_count)
|
||||
return false;
|
||||
|
@ -572,9 +572,9 @@ bool MemoryManager::commit_user_physical_pages(size_t page_count)
|
|||
|
||||
void MemoryManager::uncommit_user_physical_pages(size_t page_count)
|
||||
{
|
||||
ASSERT(page_count > 0);
|
||||
VERIFY(page_count > 0);
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
ASSERT(m_user_physical_pages_committed >= page_count);
|
||||
VERIFY(m_user_physical_pages_committed >= page_count);
|
||||
|
||||
m_user_physical_pages_uncommitted += page_count;
|
||||
m_user_physical_pages_committed -= page_count;
|
||||
|
@ -598,16 +598,16 @@ void MemoryManager::deallocate_user_physical_page(const PhysicalPage& page)
|
|||
}
|
||||
|
||||
dmesgln("MM: deallocate_user_physical_page couldn't figure out region for user page @ {}", page.paddr());
|
||||
ASSERT_NOT_REACHED();
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
|
||||
{
|
||||
ASSERT(s_mm_lock.is_locked());
|
||||
VERIFY(s_mm_lock.is_locked());
|
||||
RefPtr<PhysicalPage> page;
|
||||
if (committed) {
|
||||
// Draw from the committed pages pool. We should always have these pages available
|
||||
ASSERT(m_user_physical_pages_committed > 0);
|
||||
VERIFY(m_user_physical_pages_committed > 0);
|
||||
m_user_physical_pages_committed--;
|
||||
} else {
|
||||
// We need to make sure we don't touch pages that we have committed to
|
||||
|
@ -622,7 +622,7 @@ RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
|
|||
break;
|
||||
}
|
||||
}
|
||||
ASSERT(!committed || !page.is_null());
|
||||
VERIFY(!committed || !page.is_null());
|
||||
return page;
|
||||
}
|
||||
|
||||
|
@ -655,7 +655,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
|
|||
dbgln("MM: Purge saved the day! Purged {} pages from AnonymousVMObject", purged_page_count);
|
||||
page = find_free_user_physical_page(false);
|
||||
purged_pages = true;
|
||||
ASSERT(page);
|
||||
VERIFY(page);
|
||||
return IterationDecision::Break;
|
||||
}
|
||||
return IterationDecision::Continue;
|
||||
|
@ -692,12 +692,12 @@ void MemoryManager::deallocate_supervisor_physical_page(const PhysicalPage& page
|
|||
}
|
||||
|
||||
dbgln("MM: deallocate_supervisor_physical_page couldn't figure out region for super page @ {}", page.paddr());
|
||||
ASSERT_NOT_REACHED();
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size, size_t physical_alignment)
|
||||
{
|
||||
ASSERT(!(size % PAGE_SIZE));
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
size_t count = ceil_div(size, PAGE_SIZE);
|
||||
NonnullRefPtrVector<PhysicalPage> physical_pages;
|
||||
|
@ -714,7 +714,7 @@ NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_
|
|||
}
|
||||
|
||||
dmesgln("MM: no super physical pages available");
|
||||
ASSERT_NOT_REACHED();
|
||||
VERIFY_NOT_REACHED();
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -741,7 +741,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
|
|||
}
|
||||
|
||||
dmesgln("MM: no super physical pages available");
|
||||
ASSERT_NOT_REACHED();
|
||||
VERIFY_NOT_REACHED();
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -758,7 +758,7 @@ void MemoryManager::enter_process_paging_scope(Process& process)
|
|||
void MemoryManager::enter_space(Space& space)
|
||||
{
|
||||
auto current_thread = Thread::current();
|
||||
ASSERT(current_thread != nullptr);
|
||||
VERIFY(current_thread != nullptr);
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
|
||||
current_thread->tss().cr3 = space.page_directory().cr3();
|
||||
|
@ -779,7 +779,7 @@ extern "C" PageTableEntry boot_pd3_pt1023[1024];
|
|||
|
||||
PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
|
||||
{
|
||||
ASSERT(s_mm_lock.own_lock());
|
||||
VERIFY(s_mm_lock.own_lock());
|
||||
auto& mm_data = get_data();
|
||||
auto& pte = boot_pd3_pt1023[4];
|
||||
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
|
||||
|
@ -805,7 +805,7 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
|
|||
|
||||
PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
|
||||
{
|
||||
ASSERT(s_mm_lock.own_lock());
|
||||
VERIFY(s_mm_lock.own_lock());
|
||||
auto& mm_data = get_data();
|
||||
auto& pte = boot_pd3_pt1023[0];
|
||||
if (pte.physical_page_base() != pt_paddr.as_ptr()) {
|
||||
|
@ -830,7 +830,7 @@ PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
|
|||
|
||||
u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
auto& mm_data = get_data();
|
||||
mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
|
@ -851,10 +851,10 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
|
|||
|
||||
void MemoryManager::unquickmap_page()
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
auto& mm_data = get_data();
|
||||
ASSERT(mm_data.m_quickmap_in_use.is_locked());
|
||||
VERIFY(mm_data.m_quickmap_in_use.is_locked());
|
||||
u32 pte_idx = 8 + Processor::id();
|
||||
VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE);
|
||||
auto& pte = boot_pd3_pt1023[pte_idx];
|
||||
|
|
|
@ -48,7 +48,7 @@ constexpr FlatPtr page_round_up(FlatPtr x)
|
|||
{
|
||||
FlatPtr rounded = (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
|
||||
// Rounding up >0xffff0000 wraps back to 0. That's never what we want.
|
||||
ASSERT(x == 0 || rounded != 0);
|
||||
VERIFY(x == 0 || rounded != 0);
|
||||
return rounded;
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ static AK::Singleton<HashMap<u32, PageDirectory*>> s_cr3_map;
|
|||
|
||||
static HashMap<u32, PageDirectory*>& cr3_map()
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
return *s_cr3_map;
|
||||
}
|
||||
|
||||
|
@ -125,10 +125,10 @@ PageDirectory::PageDirectory(const RangeAllocator* parent_range_allocator)
|
|||
// when writing out the PDPT pointer to CR3.
|
||||
// The reason we're not checking the page directory's physical address directly is because
|
||||
// we're checking for sign extension when putting it into a PDPTE. See issue #4584.
|
||||
ASSERT((table.raw[0] & ~pdpte_bit_flags) <= max_physical_address);
|
||||
ASSERT((table.raw[1] & ~pdpte_bit_flags) <= max_physical_address);
|
||||
ASSERT((table.raw[2] & ~pdpte_bit_flags) <= max_physical_address);
|
||||
ASSERT((table.raw[3] & ~pdpte_bit_flags) <= max_physical_address);
|
||||
VERIFY((table.raw[0] & ~pdpte_bit_flags) <= max_physical_address);
|
||||
VERIFY((table.raw[1] & ~pdpte_bit_flags) <= max_physical_address);
|
||||
VERIFY((table.raw[2] & ~pdpte_bit_flags) <= max_physical_address);
|
||||
VERIFY((table.raw[3] & ~pdpte_bit_flags) <= max_physical_address);
|
||||
|
||||
MM.unquickmap_page();
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ PhysicalPage::PhysicalPage(PhysicalAddress paddr, bool supervisor, bool may_retu
|
|||
|
||||
void PhysicalPage::return_to_freelist() const
|
||||
{
|
||||
ASSERT((paddr().get() & ~PAGE_MASK) == 0);
|
||||
VERIFY((paddr().get() & ~PAGE_MASK) == 0);
|
||||
|
||||
if (m_supervisor)
|
||||
MM.deallocate_supervisor_physical_page(*this);
|
||||
|
|
|
@ -49,7 +49,7 @@ PhysicalRegion::PhysicalRegion(PhysicalAddress lower, PhysicalAddress upper)
|
|||
|
||||
void PhysicalRegion::expand(PhysicalAddress lower, PhysicalAddress upper)
|
||||
{
|
||||
ASSERT(!m_pages);
|
||||
VERIFY(!m_pages);
|
||||
|
||||
m_lower = lower;
|
||||
m_upper = upper;
|
||||
|
@ -57,7 +57,7 @@ void PhysicalRegion::expand(PhysicalAddress lower, PhysicalAddress upper)
|
|||
|
||||
unsigned PhysicalRegion::finalize_capacity()
|
||||
{
|
||||
ASSERT(!m_pages);
|
||||
VERIFY(!m_pages);
|
||||
|
||||
m_pages = (m_upper.get() - m_lower.get()) / PAGE_SIZE;
|
||||
m_bitmap.grow(m_pages, false);
|
||||
|
@ -67,8 +67,8 @@ unsigned PhysicalRegion::finalize_capacity()
|
|||
|
||||
NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(size_t count, bool supervisor, size_t physical_alignment)
|
||||
{
|
||||
ASSERT(m_pages);
|
||||
ASSERT(m_used != m_pages);
|
||||
VERIFY(m_pages);
|
||||
VERIFY(m_used != m_pages);
|
||||
|
||||
NonnullRefPtrVector<PhysicalPage> physical_pages;
|
||||
physical_pages.ensure_capacity(count);
|
||||
|
@ -82,11 +82,11 @@ NonnullRefPtrVector<PhysicalPage> PhysicalRegion::take_contiguous_free_pages(siz
|
|||
|
||||
unsigned PhysicalRegion::find_contiguous_free_pages(size_t count, size_t physical_alignment)
|
||||
{
|
||||
ASSERT(count != 0);
|
||||
ASSERT(physical_alignment % PAGE_SIZE == 0);
|
||||
VERIFY(count != 0);
|
||||
VERIFY(physical_alignment % PAGE_SIZE == 0);
|
||||
// search from the last page we allocated
|
||||
auto range = find_and_allocate_contiguous_range(count, physical_alignment / PAGE_SIZE);
|
||||
ASSERT(range.has_value());
|
||||
VERIFY(range.has_value());
|
||||
return range.value();
|
||||
}
|
||||
|
||||
|
@ -100,8 +100,8 @@ Optional<unsigned> PhysicalRegion::find_one_free_page()
|
|||
Checked<FlatPtr> local_offset = m_recently_returned[index].get();
|
||||
local_offset -= m_lower.get();
|
||||
m_recently_returned.remove(index);
|
||||
ASSERT(!local_offset.has_overflow());
|
||||
ASSERT(local_offset.value() < (FlatPtr)(m_pages * PAGE_SIZE));
|
||||
VERIFY(!local_offset.has_overflow());
|
||||
VERIFY(local_offset.value() < (FlatPtr)(m_pages * PAGE_SIZE));
|
||||
return local_offset.value() / PAGE_SIZE;
|
||||
}
|
||||
return {};
|
||||
|
@ -121,7 +121,7 @@ Optional<unsigned> PhysicalRegion::find_one_free_page()
|
|||
|
||||
Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t count, unsigned alignment)
|
||||
{
|
||||
ASSERT(count != 0);
|
||||
VERIFY(count != 0);
|
||||
size_t found_pages_count = 0;
|
||||
// TODO: Improve how we deal with alignment != 1
|
||||
auto first_index = m_bitmap.find_longest_range_of_unset_bits(count + alignment - 1, found_pages_count);
|
||||
|
@ -146,7 +146,7 @@ Optional<unsigned> PhysicalRegion::find_and_allocate_contiguous_range(size_t cou
|
|||
|
||||
RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
|
||||
{
|
||||
ASSERT(m_pages);
|
||||
VERIFY(m_pages);
|
||||
|
||||
auto free_index = find_one_free_page();
|
||||
if (!free_index.has_value())
|
||||
|
@ -157,16 +157,16 @@ RefPtr<PhysicalPage> PhysicalRegion::take_free_page(bool supervisor)
|
|||
|
||||
void PhysicalRegion::free_page_at(PhysicalAddress addr)
|
||||
{
|
||||
ASSERT(m_pages);
|
||||
VERIFY(m_pages);
|
||||
|
||||
if (m_used == 0) {
|
||||
ASSERT_NOT_REACHED();
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
|
||||
Checked<FlatPtr> local_offset = addr.get();
|
||||
local_offset -= m_lower.get();
|
||||
ASSERT(!local_offset.has_overflow());
|
||||
ASSERT(local_offset.value() < (FlatPtr)(m_pages * PAGE_SIZE));
|
||||
VERIFY(!local_offset.has_overflow());
|
||||
VERIFY(local_offset.value() < (FlatPtr)(m_pages * PAGE_SIZE));
|
||||
|
||||
auto page = local_offset.value() / PAGE_SIZE;
|
||||
m_bitmap.set(page, false);
|
||||
|
|
|
@ -31,7 +31,7 @@ namespace Kernel {
|
|||
|
||||
ProcessPagingScope::ProcessPagingScope(Process& process)
|
||||
{
|
||||
ASSERT(Thread::current() != nullptr);
|
||||
VERIFY(Thread::current() != nullptr);
|
||||
m_previous_cr3 = read_cr3();
|
||||
MM.enter_process_paging_scope(process);
|
||||
}
|
||||
|
|
|
@ -165,7 +165,7 @@ bool VolatilePageRanges::remove(const VolatilePageRange& range, bool& was_purged
|
|||
m_ranges.remove(nearby_index);
|
||||
} else {
|
||||
// See if we need to remove any of the following ranges
|
||||
ASSERT(existing_range == &m_ranges[nearby_index]); // sanity check
|
||||
VERIFY(existing_range == &m_ranges[nearby_index]); // sanity check
|
||||
while (nearby_index < m_ranges.size()) {
|
||||
existing_range = &m_ranges[nearby_index];
|
||||
if (!existing_range->intersects(range))
|
||||
|
@ -228,7 +228,7 @@ auto PurgeablePageRanges::remove_volatile_range(const VolatilePageRange& range,
|
|||
}
|
||||
ScopedSpinLock vmobject_lock(m_vmobject->m_lock); // see comment in add_volatile_range
|
||||
ScopedSpinLock lock(m_volatile_ranges_lock);
|
||||
ASSERT(m_vmobject);
|
||||
VERIFY(m_vmobject);
|
||||
|
||||
// Before we actually remove this range, we need to check if we need
|
||||
// to commit any pages, which may fail. If it fails, we don't actually
|
||||
|
@ -259,7 +259,7 @@ auto PurgeablePageRanges::remove_volatile_range(const VolatilePageRange& range,
|
|||
return RemoveVolatileError::Success;
|
||||
}
|
||||
|
||||
ASSERT(need_commit_pages == 0); // We should have not touched anything
|
||||
VERIFY(need_commit_pages == 0); // We should have not touched anything
|
||||
return RemoveVolatileError::SuccessNoChange;
|
||||
}
|
||||
|
||||
|
@ -287,10 +287,10 @@ void PurgeablePageRanges::set_vmobject(AnonymousVMObject* vmobject)
|
|||
{
|
||||
// No lock needed here
|
||||
if (vmobject) {
|
||||
ASSERT(!m_vmobject);
|
||||
VERIFY(!m_vmobject);
|
||||
m_vmobject = vmobject;
|
||||
} else {
|
||||
ASSERT(m_vmobject);
|
||||
VERIFY(m_vmobject);
|
||||
m_vmobject = nullptr;
|
||||
}
|
||||
}
|
||||
|
@ -309,7 +309,7 @@ CommittedCowPages::~CommittedCowPages()
|
|||
|
||||
NonnullRefPtr<PhysicalPage> CommittedCowPages::allocate_one()
|
||||
{
|
||||
ASSERT(m_committed_pages > 0);
|
||||
VERIFY(m_committed_pages > 0);
|
||||
m_committed_pages--;
|
||||
|
||||
return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
||||
|
@ -317,7 +317,7 @@ NonnullRefPtr<PhysicalPage> CommittedCowPages::allocate_one()
|
|||
|
||||
bool CommittedCowPages::return_one()
|
||||
{
|
||||
ASSERT(m_committed_pages > 0);
|
||||
VERIFY(m_committed_pages > 0);
|
||||
m_committed_pages--;
|
||||
|
||||
MM.uncommit_user_physical_pages(1);
|
||||
|
|
|
@ -65,7 +65,7 @@ struct VolatilePageRange {
|
|||
|
||||
void combine_intersecting_or_adjacent(const VolatilePageRange& other)
|
||||
{
|
||||
ASSERT(intersects_or_adjacent(other));
|
||||
VERIFY(intersects_or_adjacent(other));
|
||||
if (base <= other.base) {
|
||||
count = (other.base - base) + other.count;
|
||||
} else {
|
||||
|
@ -165,7 +165,7 @@ public:
|
|||
|
||||
if (existing_range->range_equals(r))
|
||||
return f(r);
|
||||
ASSERT(existing_range == &m_ranges[nearby_index]); // sanity check
|
||||
VERIFY(existing_range == &m_ranges[nearby_index]); // sanity check
|
||||
while (nearby_index < m_ranges.size()) {
|
||||
existing_range = &m_ranges[nearby_index];
|
||||
if (!existing_range->intersects(range))
|
||||
|
|
|
@ -59,7 +59,7 @@ RangeAllocator::~RangeAllocator()
|
|||
|
||||
void RangeAllocator::dump() const
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
VERIFY(m_lock.is_locked());
|
||||
dbgln("RangeAllocator({})", this);
|
||||
for (auto& range : m_available_ranges) {
|
||||
dbgln(" {:x} -> {:x}", range.base().get(), range.end().get() - 1);
|
||||
|
@ -68,7 +68,7 @@ void RangeAllocator::dump() const
|
|||
|
||||
Vector<Range, 2> Range::carve(const Range& taken)
|
||||
{
|
||||
ASSERT((taken.size() % PAGE_SIZE) == 0);
|
||||
VERIFY((taken.size() % PAGE_SIZE) == 0);
|
||||
|
||||
Vector<Range, 2> parts;
|
||||
if (taken == *this)
|
||||
|
@ -82,13 +82,13 @@ Vector<Range, 2> Range::carve(const Range& taken)
|
|||
|
||||
void RangeAllocator::carve_at_index(int index, const Range& range)
|
||||
{
|
||||
ASSERT(m_lock.is_locked());
|
||||
VERIFY(m_lock.is_locked());
|
||||
auto remaining_parts = m_available_ranges[index].carve(range);
|
||||
ASSERT(remaining_parts.size() >= 1);
|
||||
ASSERT(m_total_range.contains(remaining_parts[0]));
|
||||
VERIFY(remaining_parts.size() >= 1);
|
||||
VERIFY(m_total_range.contains(remaining_parts[0]));
|
||||
m_available_ranges[index] = remaining_parts[0];
|
||||
if (remaining_parts.size() == 2) {
|
||||
ASSERT(m_total_range.contains(remaining_parts[1]));
|
||||
VERIFY(m_total_range.contains(remaining_parts[1]));
|
||||
m_available_ranges.insert(index + 1, move(remaining_parts[1]));
|
||||
}
|
||||
}
|
||||
|
@ -98,8 +98,8 @@ Optional<Range> RangeAllocator::allocate_randomized(size_t size, size_t alignmen
|
|||
if (!size)
|
||||
return {};
|
||||
|
||||
ASSERT((size % PAGE_SIZE) == 0);
|
||||
ASSERT((alignment % PAGE_SIZE) == 0);
|
||||
VERIFY((size % PAGE_SIZE) == 0);
|
||||
VERIFY((alignment % PAGE_SIZE) == 0);
|
||||
|
||||
// FIXME: I'm sure there's a smarter way to do this.
|
||||
static constexpr size_t maximum_randomization_attempts = 1000;
|
||||
|
@ -122,8 +122,8 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
|||
if (!size)
|
||||
return {};
|
||||
|
||||
ASSERT((size % PAGE_SIZE) == 0);
|
||||
ASSERT((alignment % PAGE_SIZE) == 0);
|
||||
VERIFY((size % PAGE_SIZE) == 0);
|
||||
VERIFY((alignment % PAGE_SIZE) == 0);
|
||||
|
||||
#ifdef VM_GUARD_PAGES
|
||||
// NOTE: We pad VM allocations with a guard page on each side.
|
||||
|
@ -151,7 +151,7 @@ Optional<Range> RangeAllocator::allocate_anywhere(size_t size, size_t alignment)
|
|||
FlatPtr aligned_base = round_up_to_power_of_two(initial_base, alignment);
|
||||
|
||||
Range allocated_range(VirtualAddress(aligned_base), size);
|
||||
ASSERT(m_total_range.contains(allocated_range));
|
||||
VERIFY(m_total_range.contains(allocated_range));
|
||||
|
||||
if (available_range == allocated_range) {
|
||||
m_available_ranges.remove(i);
|
||||
|
@ -169,14 +169,14 @@ Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t si
|
|||
if (!size)
|
||||
return {};
|
||||
|
||||
ASSERT(base.is_page_aligned());
|
||||
ASSERT((size % PAGE_SIZE) == 0);
|
||||
VERIFY(base.is_page_aligned());
|
||||
VERIFY((size % PAGE_SIZE) == 0);
|
||||
|
||||
Range allocated_range(base, size);
|
||||
ScopedSpinLock lock(m_lock);
|
||||
for (size_t i = 0; i < m_available_ranges.size(); ++i) {
|
||||
auto& available_range = m_available_ranges[i];
|
||||
ASSERT(m_total_range.contains(allocated_range));
|
||||
VERIFY(m_total_range.contains(allocated_range));
|
||||
if (!available_range.contains(base, size))
|
||||
continue;
|
||||
if (available_range == allocated_range) {
|
||||
|
@ -192,11 +192,11 @@ Optional<Range> RangeAllocator::allocate_specific(VirtualAddress base, size_t si
|
|||
void RangeAllocator::deallocate(const Range& range)
|
||||
{
|
||||
ScopedSpinLock lock(m_lock);
|
||||
ASSERT(m_total_range.contains(range));
|
||||
ASSERT(range.size());
|
||||
ASSERT((range.size() % PAGE_SIZE) == 0);
|
||||
ASSERT(range.base() < range.end());
|
||||
ASSERT(!m_available_ranges.is_empty());
|
||||
VERIFY(m_total_range.contains(range));
|
||||
VERIFY(range.size());
|
||||
VERIFY((range.size() % PAGE_SIZE) == 0);
|
||||
VERIFY(range.base() < range.end());
|
||||
VERIFY(!m_available_ranges.is_empty());
|
||||
|
||||
size_t nearby_index = 0;
|
||||
auto* existing_range = binary_search(
|
||||
|
|
|
@ -49,9 +49,9 @@ Region::Region(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offs
|
|||
, m_shared(shared)
|
||||
, m_cacheable(cacheable == Cacheable::Yes)
|
||||
{
|
||||
ASSERT(m_range.base().is_page_aligned());
|
||||
ASSERT(m_range.size());
|
||||
ASSERT((m_range.size() % PAGE_SIZE) == 0);
|
||||
VERIFY(m_range.base().is_page_aligned());
|
||||
VERIFY(m_range.size());
|
||||
VERIFY((m_range.size() % PAGE_SIZE) == 0);
|
||||
|
||||
m_vmobject->ref_region();
|
||||
register_purgeable_page_ranges();
|
||||
|
@ -69,7 +69,7 @@ Region::~Region()
|
|||
ScopedSpinLock lock(s_mm_lock);
|
||||
if (m_page_directory) {
|
||||
unmap(ShouldDeallocateVirtualMemoryRange::Yes);
|
||||
ASSERT(!m_page_directory);
|
||||
VERIFY(!m_page_directory);
|
||||
}
|
||||
|
||||
MM.unregister_region(*this);
|
||||
|
@ -93,14 +93,14 @@ void Region::unregister_purgeable_page_ranges()
|
|||
|
||||
OwnPtr<Region> Region::clone(Process& new_owner)
|
||||
{
|
||||
ASSERT(Process::current());
|
||||
VERIFY(Process::current());
|
||||
|
||||
ScopedSpinLock lock(s_mm_lock);
|
||||
|
||||
if (m_shared) {
|
||||
ASSERT(!m_stack);
|
||||
VERIFY(!m_stack);
|
||||
if (vmobject().is_inode())
|
||||
ASSERT(vmobject().is_shared_inode());
|
||||
VERIFY(vmobject().is_shared_inode());
|
||||
|
||||
// Create a new region backed by the same VMObject.
|
||||
auto region = Region::create_user_accessible(
|
||||
|
@ -114,7 +114,7 @@ OwnPtr<Region> Region::clone(Process& new_owner)
|
|||
}
|
||||
|
||||
if (vmobject().is_inode())
|
||||
ASSERT(vmobject().is_private_inode());
|
||||
VERIFY(vmobject().is_private_inode());
|
||||
|
||||
auto vmobject_clone = vmobject().clone();
|
||||
if (!vmobject_clone)
|
||||
|
@ -127,9 +127,9 @@ OwnPtr<Region> Region::clone(Process& new_owner)
|
|||
if (m_vmobject->is_anonymous())
|
||||
clone_region->copy_purgeable_page_ranges(*this);
|
||||
if (m_stack) {
|
||||
ASSERT(is_readable());
|
||||
ASSERT(is_writable());
|
||||
ASSERT(vmobject().is_anonymous());
|
||||
VERIFY(is_readable());
|
||||
VERIFY(is_writable());
|
||||
VERIFY(vmobject().is_anonymous());
|
||||
clone_region->set_stack(true);
|
||||
}
|
||||
clone_region->set_syscall_region(is_syscall_region());
|
||||
|
@ -250,14 +250,14 @@ bool Region::should_cow(size_t page_index) const
|
|||
|
||||
void Region::set_should_cow(size_t page_index, bool cow)
|
||||
{
|
||||
ASSERT(!m_shared);
|
||||
VERIFY(!m_shared);
|
||||
if (vmobject().is_anonymous())
|
||||
static_cast<AnonymousVMObject&>(vmobject()).set_should_cow(first_page_index() + page_index, cow);
|
||||
}
|
||||
|
||||
bool Region::map_individual_page_impl(size_t page_index)
|
||||
{
|
||||
ASSERT(m_page_directory->get_lock().own_lock());
|
||||
VERIFY(m_page_directory->get_lock().own_lock());
|
||||
auto page_vaddr = vaddr_from_page_index(page_index);
|
||||
|
||||
bool user_allowed = page_vaddr.get() >= 0x00800000 && is_user_address(page_vaddr);
|
||||
|
@ -289,7 +289,7 @@ bool Region::map_individual_page_impl(size_t page_index)
|
|||
bool Region::do_remap_vmobject_page_range(size_t page_index, size_t page_count)
|
||||
{
|
||||
bool success = true;
|
||||
ASSERT(s_mm_lock.own_lock());
|
||||
VERIFY(s_mm_lock.own_lock());
|
||||
if (!m_page_directory)
|
||||
return success; // not an error, region may have not yet mapped it
|
||||
if (!translate_vmobject_page_range(page_index, page_count))
|
||||
|
@ -333,7 +333,7 @@ bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
|
|||
if (!translate_vmobject_page(page_index))
|
||||
return true; // not an error, region doesn't map this page
|
||||
ScopedSpinLock page_lock(m_page_directory->get_lock());
|
||||
ASSERT(physical_page(page_index));
|
||||
VERIFY(physical_page(page_index));
|
||||
bool success = map_individual_page_impl(page_index);
|
||||
if (with_flush)
|
||||
MM.flush_tlb(m_page_directory, vaddr_from_page_index(page_index));
|
||||
|
@ -380,8 +380,8 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
|
|||
|
||||
void Region::set_page_directory(PageDirectory& page_directory)
|
||||
{
|
||||
ASSERT(!m_page_directory || m_page_directory == &page_directory);
|
||||
ASSERT(s_mm_lock.own_lock());
|
||||
VERIFY(!m_page_directory || m_page_directory == &page_directory);
|
||||
VERIFY(s_mm_lock.own_lock());
|
||||
m_page_directory = page_directory;
|
||||
}
|
||||
|
||||
|
@ -392,7 +392,7 @@ bool Region::map(PageDirectory& page_directory)
|
|||
|
||||
// FIXME: Find a better place for this sanity check(?)
|
||||
if (is_user() && !is_shared()) {
|
||||
ASSERT(!vmobject().is_shared_inode());
|
||||
VERIFY(!vmobject().is_shared_inode());
|
||||
}
|
||||
|
||||
set_page_directory(page_directory);
|
||||
|
@ -411,7 +411,7 @@ bool Region::map(PageDirectory& page_directory)
|
|||
|
||||
void Region::remap()
|
||||
{
|
||||
ASSERT(m_page_directory);
|
||||
VERIFY(m_page_directory);
|
||||
map(*m_page_directory);
|
||||
}
|
||||
|
||||
|
@ -451,7 +451,7 @@ PageFaultResponse Region::handle_fault(const PageFault& fault, ScopedSpinLock<Re
|
|||
return PageFaultResponse::ShouldCrash;
|
||||
#endif
|
||||
}
|
||||
ASSERT(fault.type() == PageFault::Type::ProtectionViolation);
|
||||
VERIFY(fault.type() == PageFault::Type::ProtectionViolation);
|
||||
if (fault.access() == PageFault::Access::Write && is_writable() && should_cow(page_index_in_region)) {
|
||||
dbgln_if(PAGE_FAULT_DEBUG, "PV(cow) fault in Region({})[{}] at {}", this, page_index_in_region, fault.vaddr());
|
||||
auto* phys_page = physical_page(page_index_in_region);
|
||||
|
@ -467,8 +467,8 @@ PageFaultResponse Region::handle_fault(const PageFault& fault, ScopedSpinLock<Re
|
|||
|
||||
PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
ASSERT(vmobject().is_anonymous());
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(vmobject().is_anonymous());
|
||||
|
||||
LOCKER(vmobject().m_paging_lock);
|
||||
|
||||
|
@ -509,7 +509,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
|
|||
|
||||
PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
auto current_thread = Thread::current();
|
||||
if (current_thread)
|
||||
current_thread->did_cow_fault();
|
||||
|
@ -526,18 +526,18 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
|
|||
|
||||
PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region, ScopedSpinLock<RecursiveSpinLock>& mm_lock)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
ASSERT(vmobject().is_inode());
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
VERIFY(vmobject().is_inode());
|
||||
|
||||
mm_lock.unlock();
|
||||
ASSERT(!s_mm_lock.own_lock());
|
||||
ASSERT(!g_scheduler_lock.own_lock());
|
||||
VERIFY(!s_mm_lock.own_lock());
|
||||
VERIFY(!g_scheduler_lock.own_lock());
|
||||
|
||||
LOCKER(vmobject().m_paging_lock);
|
||||
|
||||
mm_lock.lock();
|
||||
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject());
|
||||
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
|
||||
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject];
|
||||
|
@ -589,7 +589,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region, Scoped
|
|||
VirtualAddress(dest_ptr),
|
||||
VirtualAddress(fault_at));
|
||||
else
|
||||
ASSERT_NOT_REACHED();
|
||||
VERIFY_NOT_REACHED();
|
||||
}
|
||||
}
|
||||
MM.unquickmap_page();
|
||||
|
|
|
@ -181,13 +181,13 @@ public:
|
|||
|
||||
const PhysicalPage* physical_page(size_t index) const
|
||||
{
|
||||
ASSERT(index < page_count());
|
||||
VERIFY(index < page_count());
|
||||
return vmobject().physical_pages()[first_page_index() + index];
|
||||
}
|
||||
|
||||
RefPtr<PhysicalPage>& physical_page_slot(size_t index)
|
||||
{
|
||||
ASSERT(index < page_count());
|
||||
VERIFY(index < page_count());
|
||||
return vmobject().physical_pages()[first_page_index() + index];
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ Region& Space::allocate_split_region(const Region& source_region, const Range& r
|
|||
|
||||
KResultOr<Region*> Space::allocate_region(const Range& range, const String& name, int prot, AllocationStrategy strategy)
|
||||
{
|
||||
ASSERT(range.is_valid());
|
||||
VERIFY(range.is_valid());
|
||||
auto vmobject = AnonymousVMObject::create_with_size(range.size(), strategy);
|
||||
if (!vmobject)
|
||||
return ENOMEM;
|
||||
|
@ -92,7 +92,7 @@ KResultOr<Region*> Space::allocate_region(const Range& range, const String& name
|
|||
|
||||
KResultOr<Region*> Space::allocate_region_with_vmobject(const Range& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, const String& name, int prot, bool shared)
|
||||
{
|
||||
ASSERT(range.is_valid());
|
||||
VERIFY(range.is_valid());
|
||||
size_t end_in_vmobject = offset_in_vmobject + range.size();
|
||||
if (end_in_vmobject <= offset_in_vmobject) {
|
||||
dbgln("allocate_region_with_vmobject: Overflow (offset + size)");
|
||||
|
@ -172,9 +172,9 @@ Vector<Region*, 2> Space::split_region_around_range(const Region& source_region,
|
|||
Range old_region_range = source_region.range();
|
||||
auto remaining_ranges_after_unmap = old_region_range.carve(desired_range);
|
||||
|
||||
ASSERT(!remaining_ranges_after_unmap.is_empty());
|
||||
VERIFY(!remaining_ranges_after_unmap.is_empty());
|
||||
auto make_replacement_region = [&](const Range& new_range) -> Region& {
|
||||
ASSERT(old_region_range.contains(new_range));
|
||||
VERIFY(old_region_range.contains(new_range));
|
||||
size_t new_range_offset_in_vmobject = source_region.offset_in_vmobject() + (new_range.base().get() - old_region_range.base().get());
|
||||
return allocate_split_region(source_region, new_range, new_range_offset_in_vmobject);
|
||||
};
|
||||
|
|
|
@ -51,7 +51,7 @@ VMObject::~VMObject()
|
|||
}
|
||||
|
||||
MM.unregister_vmobject(*this);
|
||||
ASSERT(m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) == 0);
|
||||
VERIFY(m_regions_count.load(AK::MemoryOrder::memory_order_relaxed) == 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue