1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-06-01 11:18:13 +00:00

UserspaceEmulator: Don't assume entire malloc block is chunked

Accesses in the header (or trailing padding) of a malloc block should
not be associated with any mallocation since only the chunk-sized slots
actually get returned by malloc.

Basically, allow address-to-chunk lookup to fail, and handle such
failures gracefully at call sites.

Fixes #5706.
This commit is contained in:
Andreas Kling 2021-03-09 13:27:05 +01:00
parent 38fc522f5d
commit 1381720d1d
2 changed files with 27 additions and 13 deletions

View file

@ -101,24 +101,34 @@ void MallocTracer::target_did_malloc(Badge<Emulator>, FlatPtr address, size_t si
dbgln("Tracking malloc block @ {:p} with chunk_size={}, chunk_count={}", malloc_data.address, malloc_data.chunk_size, malloc_data.mallocations.size());
}
mmap_region.malloc_metadata()->mallocation_for_address(address) = { address, size, true, false, m_emulator.raw_backtrace(), Vector<FlatPtr>() };
auto* mallocation = mmap_region.malloc_metadata()->mallocation_for_address(address);
VERIFY(mallocation);
*mallocation = { address, size, true, false, m_emulator.raw_backtrace(), Vector<FlatPtr>() };
}
ALWAYS_INLINE Mallocation& MallocRegionMetadata::mallocation_for_address(FlatPtr address) const
ALWAYS_INLINE Mallocation* MallocRegionMetadata::mallocation_for_address(FlatPtr address) const
{
return const_cast<Mallocation&>(this->mallocations[chunk_index_for_address(address)]);
auto index = chunk_index_for_address(address);
if (!index.has_value())
return nullptr;
return &const_cast<Mallocation&>(this->mallocations[index.value()]);
}
ALWAYS_INLINE size_t MallocRegionMetadata::chunk_index_for_address(FlatPtr address) const
ALWAYS_INLINE Optional<size_t> MallocRegionMetadata::chunk_index_for_address(FlatPtr address) const
{
bool is_chunked_block = chunk_size <= size_classes[num_size_classes - 1];
if (!is_chunked_block) {
// This is a BigAllocationBlock
return 0;
}
auto chunk_offset = address - (this->address + sizeof(ChunkedBlock));
VERIFY(this->chunk_size);
return chunk_offset / this->chunk_size;
auto offset_into_block = address - this->address;
if (offset_into_block < sizeof(ChunkedBlock))
return 0;
auto chunk_offset = offset_into_block - sizeof(ChunkedBlock);
auto chunk_index = chunk_offset / this->chunk_size;
if (chunk_index >= mallocations.size())
return {};
return chunk_index;
}
void MallocTracer::target_did_free(Badge<Emulator>, FlatPtr address)
@ -382,4 +392,5 @@ void MallocTracer::dump_leak_report()
else
reportln("\n=={}== \033[31;1m{} leak(s) found: {} byte(s) leaked\033[0m", getpid(), leaks_found, bytes_leaked);
}
}

View file

@ -60,8 +60,8 @@ public:
FlatPtr address { 0 };
size_t chunk_size { 0 };
size_t chunk_index_for_address(FlatPtr) const;
Mallocation& mallocation_for_address(FlatPtr) const;
Optional<size_t> chunk_index_for_address(FlatPtr) const;
Mallocation* mallocation_for_address(FlatPtr) const;
Vector<Mallocation> mallocations;
};
@ -103,11 +103,14 @@ ALWAYS_INLINE Mallocation* MallocTracer::find_mallocation(const Region& region,
auto* malloc_data = static_cast<MmapRegion&>(const_cast<Region&>(region)).malloc_metadata();
if (!malloc_data)
return nullptr;
auto& mallocation = malloc_data->mallocation_for_address(address);
if (!mallocation.used)
auto* mallocation = malloc_data->mallocation_for_address(address);
if (!mallocation)
return nullptr;
VERIFY(mallocation.contains(address));
return &mallocation;
if (!mallocation->used)
return nullptr;
if (!mallocation->contains(address))
return nullptr;
return mallocation;
}
}