1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-25 21:17:44 +00:00

Emulator: Use libc.so bounds to fast-reject non-malloc addresses

The auditing code always starts by checking if we're in one of the
ignored code ranges (malloc, free, realloc, syscall, etc.)

To reduce the number of checks needed, we can cache the bounds of
the LibC text segment. This allows us to fast-reject addresses that
cannot possibly be a LibC function.
This commit is contained in:
Andreas Kling 2021-03-09 14:59:41 +01:00
parent 0c46918b73
commit c192b6c61d
2 changed files with 12 additions and 0 deletions

View file

@ -1054,6 +1054,8 @@ u32 Emulator::virt$mmap(u32 params_addr)
} else {
auto region = MmapRegion::create_file_backed(final_address, final_size, params.prot, params.flags, params.fd, params.offset, move(name_str));
if (region->name() == "libc.so: .text") {
m_libc_start = final_address;
m_libc_end = final_address + final_size;
bool rc = find_malloc_symbols(*region);
VERIFY(rc);
}

View file

@ -65,6 +65,7 @@ public:
bool is_in_malloc_or_free() const;
bool is_in_loader_code() const;
bool is_in_libsystem() const;
bool is_in_libc() const;
void did_receive_signal(int signum) { m_pending_signals |= (1 << signum); }
@ -199,6 +200,8 @@ private:
FlatPtr m_malloc_size_symbol_start { 0 };
FlatPtr m_malloc_size_symbol_end { 0 };
FlatPtr m_libc_start { 0 };
FlatPtr m_libc_end { 0 };
FlatPtr m_libsystem_start { 0 };
FlatPtr m_libsystem_end { 0 };
@ -226,6 +229,11 @@ private:
RangeAllocator m_range_allocator;
};
ALWAYS_INLINE bool Emulator::is_in_libc() const
{
return m_cpu.base_eip() >= m_libc_start && m_cpu.base_eip() < m_libc_end;
}
ALWAYS_INLINE bool Emulator::is_in_libsystem() const
{
return m_cpu.base_eip() >= m_libsystem_start && m_cpu.base_eip() < m_libsystem_end;
@ -233,6 +241,8 @@ ALWAYS_INLINE bool Emulator::is_in_libsystem() const
ALWAYS_INLINE bool Emulator::is_in_malloc_or_free() const
{
if (!is_in_libc())
return false;
return (m_cpu.base_eip() >= m_malloc_symbol_start && m_cpu.base_eip() < m_malloc_symbol_end)
|| (m_cpu.base_eip() >= m_free_symbol_start && m_cpu.base_eip() < m_free_symbol_end)
|| (m_cpu.base_eip() >= m_realloc_symbol_start && m_cpu.base_eip() < m_realloc_symbol_end)