From 2fceffff6fd243fee4ee53defb752c10baf197dc Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sat, 14 Nov 2020 18:27:08 +0100 Subject: [PATCH] UserspaceEmulator: Track malloc ChunkedBlocks for faster auditing Instead of doing an O(n) scan over all the mallocations whenever we're doing a read/write audit, UE now keeps track of ChunkedBlocks and their chunks. Both the block lookup and the chunk lookup is O(1). We know what ChunkedBlocks look like via mallocdefs.h from LibC. Note that the old linear scan is still in use for big mallocations, but the vast majority of mallocations are chunked, so this helps a lot. This makes malloc auditing significantly faster! :^) --- DevTools/UserspaceEmulator/MallocTracer.cpp | 107 ++++++++++++++------ DevTools/UserspaceEmulator/MallocTracer.h | 28 ++++- 2 files changed, 103 insertions(+), 32 deletions(-) diff --git a/DevTools/UserspaceEmulator/MallocTracer.cpp b/DevTools/UserspaceEmulator/MallocTracer.cpp index 5746d765a2..440d85b7b6 100644 --- a/DevTools/UserspaceEmulator/MallocTracer.cpp +++ b/DevTools/UserspaceEmulator/MallocTracer.cpp @@ -29,6 +29,7 @@ #include "MmapRegion.h" #include #include +#include #include //#define REACHABLE_DEBUG @@ -60,7 +61,29 @@ void MallocTracer::target_did_malloc(Badge, FlatPtr address, size_t siz existing_mallocation->free_backtrace.clear(); return; } - m_mallocations.append({ address, size, false, Emulator::the().raw_backtrace(), Vector() }); + + if (size <= size_classes[num_size_classes - 1]) { + FlatPtr chunked_block_address = address & ChunkedBlock::block_mask; + // FIXME: Don't do a double hash lookup here. + auto tracked_chunked_block = m_chunked_blocks.get(chunked_block_address); + if (!tracked_chunked_block.has_value()) { + m_chunked_blocks.set(chunked_block_address, make()); + tracked_chunked_block = m_chunked_blocks.get(chunked_block_address); + auto& block = const_cast(*tracked_chunked_block.value()); + block.address = chunked_block_address; + block.chunk_size = mmap_region.read32(offsetof(CommonHeader, m_size)).value(); + auto chunk_count = (ChunkedBlock::block_size - sizeof(ChunkedBlock)) / block.chunk_size; + block.mallocations.resize(chunk_count); + dbgln("Tracking ChunkedBlock @ {:p} with chunk_size={}, chunk_count={}", block.address, block.chunk_size, chunk_count); + } + ASSERT(tracked_chunked_block.has_value()); + auto& block = const_cast(*tracked_chunked_block.value()); + auto chunk_offset = address - (block.address + sizeof(ChunkedBlock)); + auto chunk_index = chunk_offset / block.chunk_size; + block.mallocations[chunk_index] = { address, size, true, false, Emulator::the().raw_backtrace(), Vector() }; + } else { + m_big_mallocations.append({ address, size, true, false, Emulator::the().raw_backtrace(), Vector() }); + } } void MallocTracer::target_did_free(Badge, FlatPtr address) @@ -68,18 +91,16 @@ void MallocTracer::target_did_free(Badge, FlatPtr address) if (!address) return; - for (auto& mallocation : m_mallocations) { - if (mallocation.address == address) { - if (mallocation.freed) { - reportln("\n=={}== \033[31;1mDouble free()\033[0m, {:p}", getpid(), address); - reportln("=={}== Address {} has already been passed to free()", getpid(), address); - Emulator::the().dump_backtrace(); - } else { - mallocation.freed = true; - mallocation.free_backtrace = Emulator::the().raw_backtrace(); - } - return; + if (auto* mallocation = find_mallocation(address)) { + if (mallocation->freed) { + reportln("\n=={}== \033[31;1mDouble free()\033[0m, {:p}", getpid(), address); + reportln("=={}== Address {} has already been passed to free()", getpid(), address); + Emulator::the().dump_backtrace(); + } else { + mallocation->freed = true; + mallocation->free_backtrace = Emulator::the().raw_backtrace(); } + return; } reportln("\n=={}== \033[31;1mInvalid free()\033[0m, {:p}", getpid(), address); @@ -117,34 +138,52 @@ void MallocTracer::target_did_realloc(Badge, FlatPtr address, size_t si MallocTracer::Mallocation* MallocTracer::find_mallocation(FlatPtr address) { - for (auto& mallocation : m_mallocations) { + FlatPtr possible_chunked_block = address & ChunkedBlock::block_mask; + + auto chunked_block = m_chunked_blocks.get(possible_chunked_block); + if (chunked_block.has_value()) { + auto& block = *chunked_block.value(); + auto chunk_offset = address - (block.address + sizeof(ChunkedBlock)); + auto chunk_index = chunk_offset / block.chunk_size; + auto& mallocation = block.mallocations[chunk_index]; + if (mallocation.used) { + ASSERT(mallocation.contains(address)); + return const_cast(&mallocation); + } + return nullptr; + } + + for (auto& mallocation : m_big_mallocations) { if (mallocation.contains(address)) return &mallocation; } + return nullptr; } MallocTracer::Mallocation* MallocTracer::find_mallocation_before(FlatPtr address) { Mallocation* found_mallocation = nullptr; - for (auto& mallocation : m_mallocations) { + for_each_mallocation([&](auto& mallocation) { if (mallocation.address >= address) - continue; + return IterationDecision::Continue; if (!found_mallocation || (mallocation.address > found_mallocation->address)) - found_mallocation = &mallocation; - } + found_mallocation = const_cast(&mallocation); + return IterationDecision::Continue; + }); return found_mallocation; } MallocTracer::Mallocation* MallocTracer::find_mallocation_after(FlatPtr address) { Mallocation* found_mallocation = nullptr; - for (auto& mallocation : m_mallocations) { + for_each_mallocation([&](auto& mallocation) { if (mallocation.address <= address) - continue; + return IterationDecision::Continue; if (!found_mallocation || (mallocation.address < found_mallocation->address)) - found_mallocation = &mallocation; - } + found_mallocation = const_cast(&mallocation); + return IterationDecision::Continue; + }); return found_mallocation; } @@ -235,12 +274,14 @@ bool MallocTracer::is_reachable(const Mallocation& mallocation) const { ASSERT(!mallocation.freed); + bool reachable = false; + // 1. Search in active (non-freed) mallocations for pointers to this mallocation - for (auto& other_mallocation : m_mallocations) { + for_each_mallocation([&](auto& other_mallocation) { if (&mallocation == &other_mallocation) - continue; + return IterationDecision::Continue; if (other_mallocation.freed) - continue; + return IterationDecision::Continue; size_t pointers_in_mallocation = other_mallocation.size / sizeof(u32); for (size_t i = 0; i < pointers_in_mallocation; ++i) { auto value = Emulator::the().mmu().read32({ 0x20, other_mallocation.address + i * sizeof(u32) }); @@ -248,12 +289,15 @@ bool MallocTracer::is_reachable(const Mallocation& mallocation) const #ifdef REACHABLE_DEBUG reportln("mallocation {:p} is reachable from other mallocation {:p}", mallocation.address, other_mallocation.address); #endif - return true; + reachable = true; + return IterationDecision::Break; } } - } + return IterationDecision::Continue; + }); - bool reachable = false; + if (reachable) + return true; // 2. Search in other memory regions for pointers to this mallocation Emulator::the().mmu().for_each_region([&](auto& region) { @@ -288,16 +332,17 @@ void MallocTracer::dump_leak_report() size_t bytes_leaked = 0; size_t leaks_found = 0; - for (auto& mallocation : m_mallocations) { + for_each_mallocation([&](auto& mallocation) { if (mallocation.freed) - continue; + return IterationDecision::Continue; if (is_reachable(mallocation)) - continue; + return IterationDecision::Continue; ++leaks_found; bytes_leaked += mallocation.size; reportln("\n=={}== \033[31;1mLeak\033[0m, {}-byte allocation at address {:p}", getpid(), mallocation.size, mallocation.address); Emulator::the().dump_backtrace(mallocation.malloc_backtrace); - } + return IterationDecision::Continue; + }); if (!leaks_found) reportln("\n=={}== \033[32;1mNo leaks found!\033[0m", getpid()); diff --git a/DevTools/UserspaceEmulator/MallocTracer.h b/DevTools/UserspaceEmulator/MallocTracer.h index e122c1069e..4a90405fe6 100644 --- a/DevTools/UserspaceEmulator/MallocTracer.h +++ b/DevTools/UserspaceEmulator/MallocTracer.h @@ -27,6 +27,8 @@ #pragma once #include +#include +#include #include #include @@ -56,18 +58,42 @@ private: FlatPtr address { 0 }; size_t size { 0 }; + bool used { false }; bool freed { false }; Vector malloc_backtrace; Vector free_backtrace; }; + struct TrackedChunkedBlock { + FlatPtr address { 0 }; + size_t chunk_size { 0 }; + + Vector mallocations; + }; + + template + void for_each_mallocation(Callback callback) const + { + for (auto& it : m_chunked_blocks) { + for (auto& mallocation : it.value->mallocations) { + if (mallocation.used && callback(mallocation) == IterationDecision::Break) + return; + } + } + for (auto& big_mallocation : m_big_mallocations) { + if (callback(big_mallocation) == IterationDecision::Break) + return; + } + } + Mallocation* find_mallocation(FlatPtr); Mallocation* find_mallocation_before(FlatPtr); Mallocation* find_mallocation_after(FlatPtr); bool is_reachable(const Mallocation&) const; - Vector m_mallocations; + HashMap> m_chunked_blocks; + Vector m_big_mallocations; bool m_auditing_enabled { true }; };