mirror of
https://github.com/RGBCube/serenity
synced 2025-05-25 15:55:07 +00:00
UserspaceEmulator: Hang malloc metadata on malloc block MmapRegions
Instead of tracking known malloc blocks in a separate hash table, add an optional malloc metadata pointer to MmapRegion. This makes finding the malloc metadata for a given pointer extremely fast since it can piggyback on the page table array. :^)
This commit is contained in:
parent
395313039d
commit
f41b9946e2
4 changed files with 77 additions and 83 deletions
|
@ -40,12 +40,23 @@ MallocTracer::MallocTracer()
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
void MallocTracer::notify_malloc_block_was_released(Badge<MmapRegion>, MmapRegion& region)
|
template<typename Callback>
|
||||||
|
inline void MallocTracer::for_each_mallocation(Callback callback) const
|
||||||
{
|
{
|
||||||
// FIXME: It's sad that we may lose a bunch of free() backtraces here,
|
Emulator::the().mmu().for_each_region([&](auto& region) {
|
||||||
// but if the address is reused for a new ChunkedBlock, things will
|
if (region.is_mmap() && static_cast<const MmapRegion&>(region).is_malloc_block()) {
|
||||||
// get extremely confused.
|
auto* malloc_data = static_cast<MmapRegion&>(region).malloc_metadata();
|
||||||
m_chunked_blocks.remove(region.base());
|
for (auto& mallocation : malloc_data->mallocations) {
|
||||||
|
if (mallocation.used && callback(mallocation) == IterationDecision::Break)
|
||||||
|
return IterationDecision::Break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return IterationDecision::Continue;
|
||||||
|
});
|
||||||
|
for (auto& big_mallocation : m_big_mallocations) {
|
||||||
|
if (callback(big_mallocation) == IterationDecision::Break)
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void MallocTracer::target_did_malloc(Badge<SoftCPU>, FlatPtr address, size_t size)
|
void MallocTracer::target_did_malloc(Badge<SoftCPU>, FlatPtr address, size_t size)
|
||||||
|
@ -70,35 +81,30 @@ void MallocTracer::target_did_malloc(Badge<SoftCPU>, FlatPtr address, size_t siz
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (size <= size_classes[num_size_classes - 1]) {
|
bool is_chunked_allocation = size <= size_classes[num_size_classes - 1];
|
||||||
FlatPtr chunked_block_address = address & ChunkedBlock::block_mask;
|
if (is_chunked_allocation) {
|
||||||
TrackedChunkedBlock* block = nullptr;
|
MallocRegionMetadata* malloc_data = static_cast<MmapRegion&>(*region).malloc_metadata();
|
||||||
auto tracked_chunked_block = m_chunked_blocks.get(chunked_block_address);
|
if (!malloc_data) {
|
||||||
if (!tracked_chunked_block.has_value()) {
|
auto new_malloc_data = make<MallocRegionMetadata>();
|
||||||
auto new_block = make<TrackedChunkedBlock>();
|
malloc_data = new_malloc_data.ptr();
|
||||||
block = new_block.ptr();
|
static_cast<MmapRegion&>(*region).set_malloc_metadata({}, move(new_malloc_data));
|
||||||
m_chunked_blocks.set(chunked_block_address, move(new_block));
|
malloc_data->address = region->base();
|
||||||
tracked_chunked_block = m_chunked_blocks.get(chunked_block_address);
|
malloc_data->chunk_size = mmap_region.read32(offsetof(CommonHeader, m_size)).value();
|
||||||
auto& block = const_cast<TrackedChunkedBlock&>(*tracked_chunked_block.value());
|
malloc_data->mallocations.resize((ChunkedBlock::block_size - sizeof(ChunkedBlock)) / malloc_data->chunk_size);
|
||||||
block.address = chunked_block_address;
|
dbgln("Tracking ChunkedBlock @ {:p} with chunk_size={}, chunk_count={}", malloc_data->address, malloc_data->chunk_size, malloc_data->mallocations.size());
|
||||||
block.chunk_size = mmap_region.read32(offsetof(CommonHeader, m_size)).value();
|
|
||||||
block.mallocations.resize((ChunkedBlock::block_size - sizeof(ChunkedBlock)) / block.chunk_size);
|
|
||||||
dbgln("Tracking ChunkedBlock @ {:p} with chunk_size={}, chunk_count={}", block.address, block.chunk_size, block.mallocations.size());
|
|
||||||
} else {
|
|
||||||
block = const_cast<TrackedChunkedBlock*>(tracked_chunked_block.value());
|
|
||||||
}
|
}
|
||||||
block->mallocation_for_address(address) = { address, size, true, false, Emulator::the().raw_backtrace(), Vector<FlatPtr>() };
|
malloc_data->mallocation_for_address(address) = { address, size, true, false, Emulator::the().raw_backtrace(), Vector<FlatPtr>() };
|
||||||
} else {
|
} else {
|
||||||
m_big_mallocations.append({ address, size, true, false, Emulator::the().raw_backtrace(), Vector<FlatPtr>() });
|
m_big_mallocations.append({ address, size, true, false, Emulator::the().raw_backtrace(), Vector<FlatPtr>() });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE MallocTracer::Mallocation& MallocTracer::TrackedChunkedBlock::mallocation_for_address(FlatPtr address) const
|
ALWAYS_INLINE Mallocation& MallocRegionMetadata::mallocation_for_address(FlatPtr address) const
|
||||||
{
|
{
|
||||||
return const_cast<Mallocation&>(this->mallocations[chunk_index_for_address(address)]);
|
return const_cast<Mallocation&>(this->mallocations[chunk_index_for_address(address)]);
|
||||||
}
|
}
|
||||||
|
|
||||||
ALWAYS_INLINE size_t MallocTracer::TrackedChunkedBlock::chunk_index_for_address(FlatPtr address) const
|
ALWAYS_INLINE size_t MallocRegionMetadata::chunk_index_for_address(FlatPtr address) const
|
||||||
{
|
{
|
||||||
auto chunk_offset = address - (this->address + sizeof(ChunkedBlock));
|
auto chunk_offset = address - (this->address + sizeof(ChunkedBlock));
|
||||||
return chunk_offset / this->chunk_size;
|
return chunk_offset / this->chunk_size;
|
||||||
|
@ -154,19 +160,18 @@ void MallocTracer::target_did_realloc(Badge<SoftCPU>, FlatPtr address, size_t si
|
||||||
existing_mallocation->malloc_backtrace = Emulator::the().raw_backtrace();
|
existing_mallocation->malloc_backtrace = Emulator::the().raw_backtrace();
|
||||||
}
|
}
|
||||||
|
|
||||||
MallocTracer::Mallocation* MallocTracer::find_mallocation(FlatPtr address)
|
Mallocation* MallocTracer::find_mallocation(FlatPtr address)
|
||||||
{
|
{
|
||||||
FlatPtr possible_chunked_block = address & ChunkedBlock::block_mask;
|
if (auto* region = Emulator::the().mmu().find_region({ 0x23, address })) {
|
||||||
|
if (region->is_mmap() && static_cast<MmapRegion&>(*region).malloc_metadata()) {
|
||||||
auto chunked_block = m_chunked_blocks.get(possible_chunked_block);
|
auto& malloc_data = *static_cast<MmapRegion&>(*region).malloc_metadata();
|
||||||
if (chunked_block.has_value()) {
|
auto& mallocation = malloc_data.mallocation_for_address(address);
|
||||||
auto& block = *chunked_block.value();
|
if (mallocation.used) {
|
||||||
auto& mallocation = block.mallocation_for_address(address);
|
ASSERT(mallocation.contains(address));
|
||||||
if (mallocation.used) {
|
return &mallocation;
|
||||||
ASSERT(mallocation.contains(address));
|
}
|
||||||
return const_cast<Mallocation*>(&mallocation);
|
return nullptr;
|
||||||
}
|
}
|
||||||
return nullptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto& mallocation : m_big_mallocations) {
|
for (auto& mallocation : m_big_mallocations) {
|
||||||
|
@ -177,7 +182,7 @@ MallocTracer::Mallocation* MallocTracer::find_mallocation(FlatPtr address)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
MallocTracer::Mallocation* MallocTracer::find_mallocation_before(FlatPtr address)
|
Mallocation* MallocTracer::find_mallocation_before(FlatPtr address)
|
||||||
{
|
{
|
||||||
Mallocation* found_mallocation = nullptr;
|
Mallocation* found_mallocation = nullptr;
|
||||||
for_each_mallocation([&](auto& mallocation) {
|
for_each_mallocation([&](auto& mallocation) {
|
||||||
|
@ -190,7 +195,7 @@ MallocTracer::Mallocation* MallocTracer::find_mallocation_before(FlatPtr address
|
||||||
return found_mallocation;
|
return found_mallocation;
|
||||||
}
|
}
|
||||||
|
|
||||||
MallocTracer::Mallocation* MallocTracer::find_mallocation_after(FlatPtr address)
|
Mallocation* MallocTracer::find_mallocation_after(FlatPtr address)
|
||||||
{
|
{
|
||||||
Mallocation* found_mallocation = nullptr;
|
Mallocation* found_mallocation = nullptr;
|
||||||
for_each_mallocation([&](auto& mallocation) {
|
for_each_mallocation([&](auto& mallocation) {
|
||||||
|
|
|
@ -37,6 +37,32 @@ namespace UserspaceEmulator {
|
||||||
class MmapRegion;
|
class MmapRegion;
|
||||||
class SoftCPU;
|
class SoftCPU;
|
||||||
|
|
||||||
|
struct Mallocation {
|
||||||
|
bool contains(FlatPtr a) const
|
||||||
|
{
|
||||||
|
return a >= address && a < (address + size);
|
||||||
|
}
|
||||||
|
|
||||||
|
FlatPtr address { 0 };
|
||||||
|
size_t size { 0 };
|
||||||
|
bool used { false };
|
||||||
|
bool freed { false };
|
||||||
|
|
||||||
|
Vector<FlatPtr> malloc_backtrace;
|
||||||
|
Vector<FlatPtr> free_backtrace;
|
||||||
|
};
|
||||||
|
|
||||||
|
class MallocRegionMetadata {
|
||||||
|
public:
|
||||||
|
FlatPtr address { 0 };
|
||||||
|
size_t chunk_size { 0 };
|
||||||
|
|
||||||
|
size_t chunk_index_for_address(FlatPtr) const;
|
||||||
|
Mallocation& mallocation_for_address(FlatPtr) const;
|
||||||
|
|
||||||
|
Vector<Mallocation> mallocations;
|
||||||
|
};
|
||||||
|
|
||||||
class MallocTracer {
|
class MallocTracer {
|
||||||
public:
|
public:
|
||||||
MallocTracer();
|
MallocTracer();
|
||||||
|
@ -45,60 +71,20 @@ public:
|
||||||
void target_did_free(Badge<SoftCPU>, FlatPtr address);
|
void target_did_free(Badge<SoftCPU>, FlatPtr address);
|
||||||
void target_did_realloc(Badge<SoftCPU>, FlatPtr address, size_t);
|
void target_did_realloc(Badge<SoftCPU>, FlatPtr address, size_t);
|
||||||
|
|
||||||
void notify_malloc_block_was_released(Badge<MmapRegion>, MmapRegion&);
|
|
||||||
|
|
||||||
void audit_read(FlatPtr address, size_t);
|
void audit_read(FlatPtr address, size_t);
|
||||||
void audit_write(FlatPtr address, size_t);
|
void audit_write(FlatPtr address, size_t);
|
||||||
|
|
||||||
void dump_leak_report();
|
void dump_leak_report();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct Mallocation {
|
|
||||||
bool contains(FlatPtr a) const
|
|
||||||
{
|
|
||||||
return a >= address && a < (address + size);
|
|
||||||
}
|
|
||||||
|
|
||||||
FlatPtr address { 0 };
|
|
||||||
size_t size { 0 };
|
|
||||||
bool used { false };
|
|
||||||
bool freed { false };
|
|
||||||
|
|
||||||
Vector<FlatPtr> malloc_backtrace;
|
|
||||||
Vector<FlatPtr> free_backtrace;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TrackedChunkedBlock {
|
|
||||||
FlatPtr address { 0 };
|
|
||||||
size_t chunk_size { 0 };
|
|
||||||
|
|
||||||
size_t chunk_index_for_address(FlatPtr) const;
|
|
||||||
Mallocation& mallocation_for_address(FlatPtr) const;
|
|
||||||
|
|
||||||
Vector<Mallocation> mallocations;
|
|
||||||
};
|
|
||||||
|
|
||||||
template<typename Callback>
|
template<typename Callback>
|
||||||
void for_each_mallocation(Callback callback) const
|
void for_each_mallocation(Callback callback) const;
|
||||||
{
|
|
||||||
for (auto& it : m_chunked_blocks) {
|
|
||||||
for (auto& mallocation : it.value->mallocations) {
|
|
||||||
if (mallocation.used && callback(mallocation) == IterationDecision::Break)
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (auto& big_mallocation : m_big_mallocations) {
|
|
||||||
if (callback(big_mallocation) == IterationDecision::Break)
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Mallocation* find_mallocation(FlatPtr);
|
Mallocation* find_mallocation(FlatPtr);
|
||||||
Mallocation* find_mallocation_before(FlatPtr);
|
Mallocation* find_mallocation_before(FlatPtr);
|
||||||
Mallocation* find_mallocation_after(FlatPtr);
|
Mallocation* find_mallocation_after(FlatPtr);
|
||||||
bool is_reachable(const Mallocation&) const;
|
bool is_reachable(const Mallocation&) const;
|
||||||
|
|
||||||
HashMap<FlatPtr, NonnullOwnPtr<TrackedChunkedBlock>> m_chunked_blocks;
|
|
||||||
Vector<Mallocation> m_big_mallocations;
|
Vector<Mallocation> m_big_mallocations;
|
||||||
|
|
||||||
bool m_auditing_enabled { true };
|
bool m_auditing_enabled { true };
|
||||||
|
|
|
@ -58,11 +58,6 @@ MmapRegion::MmapRegion(u32 base, u32 size, int prot)
|
||||||
|
|
||||||
MmapRegion::~MmapRegion()
|
MmapRegion::~MmapRegion()
|
||||||
{
|
{
|
||||||
if (is_malloc_block()) {
|
|
||||||
if (auto* tracer = Emulator::the().malloc_tracer())
|
|
||||||
tracer->notify_malloc_block_was_released({}, *this);
|
|
||||||
}
|
|
||||||
|
|
||||||
free(m_shadow_data);
|
free(m_shadow_data);
|
||||||
if (m_file_backed)
|
if (m_file_backed)
|
||||||
munmap(m_data, size());
|
munmap(m_data, size());
|
||||||
|
|
|
@ -31,6 +31,9 @@
|
||||||
|
|
||||||
namespace UserspaceEmulator {
|
namespace UserspaceEmulator {
|
||||||
|
|
||||||
|
class MallocRegionMetadata;
|
||||||
|
class MallocTracer;
|
||||||
|
|
||||||
class MmapRegion final : public SoftMMU::Region {
|
class MmapRegion final : public SoftMMU::Region {
|
||||||
public:
|
public:
|
||||||
static NonnullOwnPtr<MmapRegion> create_anonymous(u32 base, u32 size, u32 prot);
|
static NonnullOwnPtr<MmapRegion> create_anonymous(u32 base, u32 size, u32 prot);
|
||||||
|
@ -59,6 +62,9 @@ public:
|
||||||
|
|
||||||
void set_prot(int prot) { m_prot = prot; }
|
void set_prot(int prot) { m_prot = prot; }
|
||||||
|
|
||||||
|
MallocRegionMetadata* malloc_metadata() { return m_malloc_metadata; }
|
||||||
|
void set_malloc_metadata(Badge<MallocTracer>, NonnullOwnPtr<MallocRegionMetadata> metadata) { m_malloc_metadata = move(metadata); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
MmapRegion(u32 base, u32 size, int prot);
|
MmapRegion(u32 base, u32 size, int prot);
|
||||||
virtual bool is_mmap() const override { return true; }
|
virtual bool is_mmap() const override { return true; }
|
||||||
|
@ -68,6 +74,8 @@ private:
|
||||||
int m_prot { 0 };
|
int m_prot { 0 };
|
||||||
bool m_file_backed { false };
|
bool m_file_backed { false };
|
||||||
bool m_malloc { false };
|
bool m_malloc { false };
|
||||||
|
|
||||||
|
OwnPtr<MallocRegionMetadata> m_malloc_metadata;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue