mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 08:37:46 +00:00
AK+Kernel+LibELF: Remove the need for IteratorDecision::Continue
By constraining two implementations, the compiler will select the best fitting one. All this will require is duplicating the implementation and simplifying for the `void` case. This constraining also informs both the caller and compiler by passing the callback parameter types as part of the constraint (e.g.: `IterationFunction<int>`). Some `for_each` functions in LibELF only take functions which return `void`. This is a minimal correctness check, as it removes one way for a function to incompletely do something. There seems to be a possible idiom where inside a lambda, a `return;` is the same as `continue;` in a for-loop.
This commit is contained in:
parent
bbaa463032
commit
aa4d41fe2c
25 changed files with 311 additions and 127 deletions
|
@ -23,13 +23,11 @@ RefPtr<VMObject> AnonymousVMObject::clone()
|
|||
// so that the parent is still guaranteed to be able to have all
|
||||
// non-volatile memory available.
|
||||
size_t need_cow_pages = 0;
|
||||
{
|
||||
// We definitely need to commit non-volatile areas
|
||||
for_each_nonvolatile_range([&](const VolatilePageRange& nonvolatile_range) {
|
||||
need_cow_pages += nonvolatile_range.count;
|
||||
return IterationDecision::Continue;
|
||||
});
|
||||
}
|
||||
|
||||
// We definitely need to commit non-volatile areas
|
||||
for_each_nonvolatile_range([&](const VolatilePageRange& nonvolatile_range) {
|
||||
need_cow_pages += nonvolatile_range.count;
|
||||
});
|
||||
|
||||
dbgln_if(COMMIT_DEBUG, "Cloning {:p}, need {} committed cow pages", this, need_cow_pages);
|
||||
|
||||
|
@ -220,7 +218,6 @@ int AnonymousVMObject::purge_impl()
|
|||
}
|
||||
});
|
||||
}
|
||||
return IterationDecision::Continue;
|
||||
});
|
||||
return purged_page_count;
|
||||
}
|
||||
|
@ -284,7 +281,6 @@ void AnonymousVMObject::update_volatile_cache()
|
|||
m_volatile_ranges_cache.clear();
|
||||
for_each_nonvolatile_range([&](const VolatilePageRange& range) {
|
||||
m_volatile_ranges_cache.add_unchecked(range);
|
||||
return IterationDecision::Continue;
|
||||
});
|
||||
|
||||
m_volatile_ranges_cache_dirty = false;
|
||||
|
|
|
@ -40,7 +40,7 @@ public:
|
|||
|
||||
bool is_any_volatile() const;
|
||||
|
||||
template<typename F>
|
||||
template<IteratorFunction<const VolatilePageRange&> F>
|
||||
IterationDecision for_each_volatile_range(F f) const
|
||||
{
|
||||
VERIFY(m_lock.is_locked());
|
||||
|
@ -78,24 +78,42 @@ public:
|
|||
return IterationDecision::Continue;
|
||||
}
|
||||
|
||||
template<typename F>
|
||||
template<IteratorFunction<const VolatilePageRange&> F>
|
||||
IterationDecision for_each_nonvolatile_range(F f) const
|
||||
{
|
||||
size_t base = 0;
|
||||
for_each_volatile_range([&](const VolatilePageRange& volatile_range) {
|
||||
if (volatile_range.base == base)
|
||||
return IterationDecision::Continue;
|
||||
IterationDecision decision = f({ base, volatile_range.base - base });
|
||||
IterationDecision decision = f(VolatilePageRange { base, volatile_range.base - base });
|
||||
if (decision != IterationDecision::Continue)
|
||||
return decision;
|
||||
base = volatile_range.base + volatile_range.count;
|
||||
return IterationDecision::Continue;
|
||||
});
|
||||
if (base < page_count())
|
||||
return f({ base, page_count() - base });
|
||||
return f(VolatilePageRange { base, page_count() - base });
|
||||
return IterationDecision::Continue;
|
||||
}
|
||||
|
||||
template<VoidFunction<const VolatilePageRange&> F>
|
||||
IterationDecision for_each_volatile_range(F f) const
|
||||
{
|
||||
return for_each_volatile_range([&](auto& range) {
|
||||
f(range);
|
||||
return IterationDecision::Continue;
|
||||
});
|
||||
}
|
||||
|
||||
template<VoidFunction<const VolatilePageRange&> F>
|
||||
IterationDecision for_each_nonvolatile_range(F f) const
|
||||
{
|
||||
return for_each_nonvolatile_range([&](auto range) {
|
||||
f(move(range));
|
||||
return IterationDecision::Continue;
|
||||
});
|
||||
}
|
||||
|
||||
private:
|
||||
explicit AnonymousVMObject(size_t, AllocationStrategy);
|
||||
explicit AnonymousVMObject(PhysicalAddress, size_t);
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <AK/Concepts.h>
|
||||
#include <AK/HashTable.h>
|
||||
#include <AK/NonnullRefPtrVector.h>
|
||||
#include <AK/String.h>
|
||||
|
@ -157,7 +158,7 @@ public:
|
|||
unsigned super_physical_pages() const { return m_super_physical_pages; }
|
||||
unsigned super_physical_pages_used() const { return m_super_physical_pages_used; }
|
||||
|
||||
template<typename Callback>
|
||||
template<IteratorFunction<VMObject&> Callback>
|
||||
static void for_each_vmobject(Callback callback)
|
||||
{
|
||||
for (auto& vmobject : MM.m_vmobjects) {
|
||||
|
@ -166,6 +167,13 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
template<VoidFunction<VMObject&> Callback>
|
||||
static void for_each_vmobject(Callback callback)
|
||||
{
|
||||
for (auto& vmobject : MM.m_vmobjects)
|
||||
callback(vmobject);
|
||||
}
|
||||
|
||||
static Region* find_region_from_vaddr(Space&, VirtualAddress);
|
||||
static Region* find_user_region_from_vaddr(Space&, VirtualAddress);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue