1
Fork 0
mirror of https://github.com/RGBCube/serenity synced 2025-07-27 05:37:34 +00:00

LibCoredump: Don't copy uncompressed coredumps into a ByteBuffer

This was completely unnecessary and accounted for 6% of the total time
spent when loading a WebContent coredump into CrashReporter.
This commit is contained in:
Andreas Kling 2021-09-22 00:33:43 +02:00
parent 1da8faebf5
commit 784ab75d2d
2 changed files with 31 additions and 6 deletions

View file

@ -18,15 +18,33 @@ OwnPtr<Reader> Reader::create(const String& path)
auto file_or_error = MappedFile::map(path); auto file_or_error = MappedFile::map(path);
if (file_or_error.is_error()) if (file_or_error.is_error())
return {}; return {};
if (!Compress::GzipDecompressor::is_likely_compressed(file_or_error.value()->bytes())) {
// It's an uncompressed coredump.
return AK::adopt_own_if_nonnull(new (nothrow) Reader(file_or_error.release_value()));
}
auto decompressed_data = decompress_coredump(file_or_error.value()->bytes()); auto decompressed_data = decompress_coredump(file_or_error.value()->bytes());
if (!decompressed_data.has_value()) if (!decompressed_data.has_value())
return {}; return {};
return adopt_own_if_nonnull(new (nothrow) Reader(decompressed_data.release_value())); return adopt_own_if_nonnull(new (nothrow) Reader(decompressed_data.release_value()));
} }
Reader::Reader(ByteBuffer coredump_data) Reader::Reader(ByteBuffer buffer)
: m_coredump_buffer(move(coredump_data)) : Reader(buffer.bytes())
, m_coredump_image(m_coredump_buffer.bytes()) {
m_coredump_buffer = move(buffer);
}
Reader::Reader(NonnullRefPtr<MappedFile> file)
: Reader(file->bytes())
{
m_mapped_file = move(file);
}
Reader::Reader(ReadonlyBytes coredump_bytes)
: m_coredump_bytes(coredump_bytes)
, m_coredump_image(m_coredump_bytes)
{ {
size_t index = 0; size_t index = 0;
m_coredump_image.for_each_program_header([this, &index](auto pheader) { m_coredump_image.for_each_program_header([this, &index](auto pheader) {
@ -42,8 +60,6 @@ Reader::Reader(ByteBuffer coredump_data)
Optional<ByteBuffer> Reader::decompress_coredump(const ReadonlyBytes& raw_coredump) Optional<ByteBuffer> Reader::decompress_coredump(const ReadonlyBytes& raw_coredump)
{ {
if (!Compress::GzipDecompressor::is_likely_compressed(raw_coredump))
return ByteBuffer::copy(raw_coredump); // handle old format coredumps (uncompressed)
auto decompressed_coredump = Compress::GzipDecompressor::decompress_all(raw_coredump); auto decompressed_coredump = Compress::GzipDecompressor::decompress_all(raw_coredump);
if (!decompressed_coredump.has_value()) if (!decompressed_coredump.has_value())
return ByteBuffer::copy(raw_coredump); // if we didn't manage to decompress it, try and parse it as decompressed coredump return ByteBuffer::copy(raw_coredump); // if we didn't manage to decompress it, try and parse it as decompressed coredump

View file

@ -51,7 +51,9 @@ public:
HashMap<String, String> metadata() const; HashMap<String, String> metadata() const;
private: private:
Reader(ByteBuffer); explicit Reader(ReadonlyBytes);
explicit Reader(ByteBuffer);
explicit Reader(NonnullRefPtr<MappedFile>);
static Optional<ByteBuffer> decompress_coredump(const ReadonlyBytes&); static Optional<ByteBuffer> decompress_coredump(const ReadonlyBytes&);
@ -75,7 +77,14 @@ private:
// as getters with the appropriate (non-JsonValue) types. // as getters with the appropriate (non-JsonValue) types.
const JsonObject process_info() const; const JsonObject process_info() const;
// For uncompressed coredumps, we keep the MappedFile
RefPtr<MappedFile> m_mapped_file;
// For compressed coredumps, we decompress them into a ByteBuffer
ByteBuffer m_coredump_buffer; ByteBuffer m_coredump_buffer;
ReadonlyBytes m_coredump_bytes;
ELF::Image m_coredump_image; ELF::Image m_coredump_image;
ssize_t m_notes_segment_index { -1 }; ssize_t m_notes_segment_index { -1 };
}; };