mirror of
https://github.com/RGBCube/serenity
synced 2025-06-01 11:08:12 +00:00
UserspaceEmulator: Remove hand-rolled is_foo() helpers in favor of RTTI
This commit is contained in:
parent
db790dda62
commit
febc8a5ac7
7 changed files with 13 additions and 17 deletions
|
@ -280,7 +280,7 @@ const MmapRegion* Emulator::find_text_region(FlatPtr address)
|
||||||
{
|
{
|
||||||
const MmapRegion* matching_region = nullptr;
|
const MmapRegion* matching_region = nullptr;
|
||||||
mmu().for_each_region([&](auto& region) {
|
mmu().for_each_region([&](auto& region) {
|
||||||
if (!region.is_mmap())
|
if (!is<MmapRegion>(region))
|
||||||
return IterationDecision::Continue;
|
return IterationDecision::Continue;
|
||||||
const auto& mmap_region = static_cast<const MmapRegion&>(region);
|
const auto& mmap_region = static_cast<const MmapRegion&>(region);
|
||||||
if (!(mmap_region.is_executable() && address >= mmap_region.base() && address < mmap_region.base() + mmap_region.size()))
|
if (!(mmap_region.is_executable() && address >= mmap_region.base() && address < mmap_region.base() + mmap_region.size()))
|
||||||
|
@ -1045,7 +1045,7 @@ FlatPtr Emulator::virt$mremap(FlatPtr params_addr)
|
||||||
mmu().copy_from_vm(¶ms, params_addr, sizeof(params));
|
mmu().copy_from_vm(¶ms, params_addr, sizeof(params));
|
||||||
|
|
||||||
if (auto* region = mmu().find_region({ m_cpu.ds(), params.old_address })) {
|
if (auto* region = mmu().find_region({ m_cpu.ds(), params.old_address })) {
|
||||||
if (!region->is_mmap())
|
if (!is<MmapRegion>(*region))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ASSERT(region->size() == params.old_size);
|
ASSERT(region->size() == params.old_size);
|
||||||
auto& mmap_region = *(MmapRegion*)region;
|
auto& mmap_region = *(MmapRegion*)region;
|
||||||
|
@ -1094,7 +1094,7 @@ u32 Emulator::virt$unveil(u32)
|
||||||
u32 Emulator::virt$mprotect(FlatPtr base, size_t size, int prot)
|
u32 Emulator::virt$mprotect(FlatPtr base, size_t size, int prot)
|
||||||
{
|
{
|
||||||
if (auto* region = mmu().find_region({ m_cpu.ds(), base })) {
|
if (auto* region = mmu().find_region({ m_cpu.ds(), base })) {
|
||||||
if (!region->is_mmap())
|
if (!is<MmapRegion>(*region))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
ASSERT(region->size() == size);
|
ASSERT(region->size() == size);
|
||||||
auto& mmap_region = *(MmapRegion*)region;
|
auto& mmap_region = *(MmapRegion*)region;
|
||||||
|
|
|
@ -45,7 +45,7 @@ template<typename Callback>
|
||||||
inline void MallocTracer::for_each_mallocation(Callback callback) const
|
inline void MallocTracer::for_each_mallocation(Callback callback) const
|
||||||
{
|
{
|
||||||
m_emulator.mmu().for_each_region([&](auto& region) {
|
m_emulator.mmu().for_each_region([&](auto& region) {
|
||||||
if (region.is_mmap() && static_cast<const MmapRegion&>(region).is_malloc_block()) {
|
if (is<MmapRegion>(region) && static_cast<const MmapRegion&>(region).is_malloc_block()) {
|
||||||
auto* malloc_data = static_cast<MmapRegion&>(region).malloc_metadata();
|
auto* malloc_data = static_cast<MmapRegion&>(region).malloc_metadata();
|
||||||
for (auto& mallocation : malloc_data->mallocations) {
|
for (auto& mallocation : malloc_data->mallocations) {
|
||||||
if (mallocation.used && callback(mallocation) == IterationDecision::Break)
|
if (mallocation.used && callback(mallocation) == IterationDecision::Break)
|
||||||
|
@ -62,7 +62,7 @@ void MallocTracer::target_did_malloc(Badge<SoftCPU>, FlatPtr address, size_t siz
|
||||||
return;
|
return;
|
||||||
auto* region = m_emulator.mmu().find_region({ 0x23, address });
|
auto* region = m_emulator.mmu().find_region({ 0x23, address });
|
||||||
ASSERT(region);
|
ASSERT(region);
|
||||||
ASSERT(region->is_mmap());
|
ASSERT(is<MmapRegion>(*region));
|
||||||
auto& mmap_region = static_cast<MmapRegion&>(*region);
|
auto& mmap_region = static_cast<MmapRegion&>(*region);
|
||||||
|
|
||||||
// Mark the containing mmap region as a malloc block!
|
// Mark the containing mmap region as a malloc block!
|
||||||
|
@ -145,7 +145,7 @@ void MallocTracer::target_did_realloc(Badge<SoftCPU>, FlatPtr address, size_t si
|
||||||
return;
|
return;
|
||||||
auto* region = m_emulator.mmu().find_region({ 0x23, address });
|
auto* region = m_emulator.mmu().find_region({ 0x23, address });
|
||||||
ASSERT(region);
|
ASSERT(region);
|
||||||
ASSERT(region->is_mmap());
|
ASSERT(is<MmapRegion>(*region));
|
||||||
auto& mmap_region = static_cast<MmapRegion&>(*region);
|
auto& mmap_region = static_cast<MmapRegion&>(*region);
|
||||||
|
|
||||||
ASSERT(mmap_region.is_malloc_block());
|
ASSERT(mmap_region.is_malloc_block());
|
||||||
|
@ -334,7 +334,7 @@ bool MallocTracer::is_reachable(const Mallocation& mallocation) const
|
||||||
if (!region.is_readable())
|
if (!region.is_readable())
|
||||||
return IterationDecision::Continue;
|
return IterationDecision::Continue;
|
||||||
// Skip malloc blocks
|
// Skip malloc blocks
|
||||||
if (region.is_mmap() && static_cast<const MmapRegion&>(region).is_malloc_block())
|
if (is<MmapRegion>(region) && static_cast<const MmapRegion&>(region).is_malloc_block())
|
||||||
return IterationDecision::Continue;
|
return IterationDecision::Continue;
|
||||||
|
|
||||||
size_t pointers_in_region = region.size() / sizeof(u32);
|
size_t pointers_in_region = region.size() / sizeof(u32);
|
||||||
|
|
|
@ -95,7 +95,7 @@ private:
|
||||||
|
|
||||||
ALWAYS_INLINE Mallocation* MallocTracer::find_mallocation(const Region& region, FlatPtr address)
|
ALWAYS_INLINE Mallocation* MallocTracer::find_mallocation(const Region& region, FlatPtr address)
|
||||||
{
|
{
|
||||||
if (!region.is_mmap())
|
if (!is<MmapRegion>(region))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
if (!static_cast<const MmapRegion&>(region).is_malloc_block())
|
if (!static_cast<const MmapRegion&>(region).is_malloc_block())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
|
@ -65,7 +65,6 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MmapRegion(u32 base, u32 size, int prot);
|
MmapRegion(u32 base, u32 size, int prot);
|
||||||
virtual bool is_mmap() const override { return true; }
|
|
||||||
|
|
||||||
u8* m_data { nullptr };
|
u8* m_data { nullptr };
|
||||||
u8* m_shadow_data { nullptr };
|
u8* m_shadow_data { nullptr };
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "ValueWithShadow.h"
|
#include "ValueWithShadow.h"
|
||||||
|
#include <AK/TypeCasts.h>
|
||||||
#include <AK/Types.h>
|
#include <AK/Types.h>
|
||||||
|
|
||||||
namespace UserspaceEmulator {
|
namespace UserspaceEmulator {
|
||||||
|
@ -54,8 +55,6 @@ public:
|
||||||
virtual ValueWithShadow<u64> read64(u32 offset) = 0;
|
virtual ValueWithShadow<u64> read64(u32 offset) = 0;
|
||||||
|
|
||||||
virtual u8* cacheable_ptr([[maybe_unused]] u32 offset) { return nullptr; }
|
virtual u8* cacheable_ptr([[maybe_unused]] u32 offset) { return nullptr; }
|
||||||
virtual bool is_shared_buffer() const { return false; }
|
|
||||||
virtual bool is_mmap() const { return false; }
|
|
||||||
|
|
||||||
bool is_stack() const { return m_stack; }
|
bool is_stack() const { return m_stack; }
|
||||||
void set_stack(bool b) { m_stack = b; }
|
void set_stack(bool b) { m_stack = b; }
|
||||||
|
|
|
@ -49,8 +49,6 @@ public:
|
||||||
virtual u8* data() override { return m_data; }
|
virtual u8* data() override { return m_data; }
|
||||||
virtual u8* shadow_data() override { return m_shadow_data; }
|
virtual u8* shadow_data() override { return m_shadow_data; }
|
||||||
|
|
||||||
bool is_shared_buffer() const override { return true; }
|
|
||||||
|
|
||||||
int shbuf_id() const { return m_shbuf_id; }
|
int shbuf_id() const { return m_shbuf_id; }
|
||||||
|
|
||||||
int allow_all();
|
int allow_all();
|
||||||
|
|
|
@ -44,7 +44,7 @@ void SoftMMU::add_region(NonnullOwnPtr<Region> region)
|
||||||
ASSERT(!find_region({ 0x23, region->base() }));
|
ASSERT(!find_region({ 0x23, region->base() }));
|
||||||
|
|
||||||
// FIXME: More sanity checks pls
|
// FIXME: More sanity checks pls
|
||||||
if (region->is_shared_buffer())
|
if (is<SharedBufferRegion>(*region))
|
||||||
m_shbuf_regions.set(static_cast<SharedBufferRegion*>(region.ptr())->shbuf_id(), region.ptr());
|
m_shbuf_regions.set(static_cast<SharedBufferRegion*>(region.ptr())->shbuf_id(), region.ptr());
|
||||||
|
|
||||||
size_t first_page_in_region = region->base() / PAGE_SIZE;
|
size_t first_page_in_region = region->base() / PAGE_SIZE;
|
||||||
|
@ -63,7 +63,7 @@ void SoftMMU::remove_region(Region& region)
|
||||||
m_page_to_region_map[first_page_in_region + i] = nullptr;
|
m_page_to_region_map[first_page_in_region + i] = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (region.is_shared_buffer())
|
if (is<SharedBufferRegion>(region))
|
||||||
m_shbuf_regions.remove(static_cast<SharedBufferRegion&>(region).shbuf_id());
|
m_shbuf_regions.remove(static_cast<SharedBufferRegion&>(region).shbuf_id());
|
||||||
m_regions.remove_first_matching([&](auto& entry) { return entry.ptr() == ®ion; });
|
m_regions.remove_first_matching([&](auto& entry) { return entry.ptr() == ®ion; });
|
||||||
}
|
}
|
||||||
|
@ -253,7 +253,7 @@ bool SoftMMU::fast_fill_memory8(X86::LogicalAddress address, size_t size, ValueW
|
||||||
if (!region->contains(address.offset() + size - 1))
|
if (!region->contains(address.offset() + size - 1))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (region->is_mmap() && static_cast<const MmapRegion&>(*region).is_malloc_block()) {
|
if (is<MmapRegion>(*region) && static_cast<const MmapRegion&>(*region).is_malloc_block()) {
|
||||||
if (auto* tracer = m_emulator.malloc_tracer()) {
|
if (auto* tracer = m_emulator.malloc_tracer()) {
|
||||||
// FIXME: Add a way to audit an entire range of memory instead of looping here!
|
// FIXME: Add a way to audit an entire range of memory instead of looping here!
|
||||||
for (size_t i = 0; i < size; ++i) {
|
for (size_t i = 0; i < size; ++i) {
|
||||||
|
@ -278,7 +278,7 @@ bool SoftMMU::fast_fill_memory32(X86::LogicalAddress address, size_t count, Valu
|
||||||
if (!region->contains(address.offset() + (count * sizeof(u32)) - 1))
|
if (!region->contains(address.offset() + (count * sizeof(u32)) - 1))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (region->is_mmap() && static_cast<const MmapRegion&>(*region).is_malloc_block()) {
|
if (is<MmapRegion>(*region) && static_cast<const MmapRegion&>(*region).is_malloc_block()) {
|
||||||
if (auto* tracer = m_emulator.malloc_tracer()) {
|
if (auto* tracer = m_emulator.malloc_tracer()) {
|
||||||
// FIXME: Add a way to audit an entire range of memory instead of looping here!
|
// FIXME: Add a way to audit an entire range of memory instead of looping here!
|
||||||
for (size_t i = 0; i < count; ++i) {
|
for (size_t i = 0; i < count; ++i) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue