mirror of
https://github.com/RGBCube/serenity
synced 2025-07-27 23:07:35 +00:00
LibCoreDump+CrashDaemon: Compress coredumps
Most coredumps contain large amounts of consecutive null bytes and as such are a prime candidate for compression. This commit makes CrashDaemon compress files once the kernel finishes emitting them, as well as adds the functionality needed in LibCoreDump to then parse them.
This commit is contained in:
parent
b8f462a78b
commit
9f656b6fa9
8 changed files with 70 additions and 15 deletions
|
@ -4,4 +4,4 @@ set(SOURCES
|
|||
)
|
||||
|
||||
serenity_lib(LibCoreDump coredump)
|
||||
target_link_libraries(LibCoreDump LibC LibCore LibDebug)
|
||||
target_link_libraries(LibCoreDump LibC LibCompress LibCore LibDebug)
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include <AK/JsonObject.h>
|
||||
#include <AK/JsonValue.h>
|
||||
#include <LibCompress/Gzip.h>
|
||||
#include <LibCoreDump/Reader.h>
|
||||
#include <signal_numbers.h>
|
||||
#include <string.h>
|
||||
|
@ -37,12 +38,12 @@ OwnPtr<Reader> Reader::create(const String& path)
|
|||
auto file_or_error = MappedFile::map(path);
|
||||
if (file_or_error.is_error())
|
||||
return {};
|
||||
return adopt_own(*new Reader(file_or_error.release_value()));
|
||||
return adopt_own(*new Reader(file_or_error.value()->bytes()));
|
||||
}
|
||||
|
||||
Reader::Reader(NonnullRefPtr<MappedFile> coredump_file)
|
||||
: m_coredump_file(move(coredump_file))
|
||||
, m_coredump_image(m_coredump_file->bytes())
|
||||
Reader::Reader(ReadonlyBytes coredump_bytes)
|
||||
: m_coredump_buffer(decompress_coredump(coredump_bytes))
|
||||
, m_coredump_image(m_coredump_buffer.bytes())
|
||||
{
|
||||
size_t index = 0;
|
||||
m_coredump_image.for_each_program_header([this, &index](auto pheader) {
|
||||
|
@ -56,6 +57,16 @@ Reader::Reader(NonnullRefPtr<MappedFile> coredump_file)
|
|||
VERIFY(m_notes_segment_index != -1);
|
||||
}
|
||||
|
||||
ByteBuffer Reader::decompress_coredump(const ReadonlyBytes& raw_coredump)
|
||||
{
|
||||
if (!Compress::GzipDecompressor::is_likely_compressed(raw_coredump))
|
||||
return ByteBuffer::copy(raw_coredump); // handle old format core dumps (uncompressed)
|
||||
auto decompressed_coredump = Compress::GzipDecompressor::decompress_all(raw_coredump);
|
||||
if (!decompressed_coredump.has_value())
|
||||
return ByteBuffer::copy(raw_coredump); // if we didnt manage to decompress it, try and parse it as decompressed core dump
|
||||
return decompressed_coredump.value();
|
||||
}
|
||||
|
||||
Reader::~Reader()
|
||||
{
|
||||
}
|
||||
|
|
|
@ -70,7 +70,9 @@ public:
|
|||
HashMap<String, String> metadata() const;
|
||||
|
||||
private:
|
||||
Reader(NonnullRefPtr<MappedFile>);
|
||||
Reader(ReadonlyBytes);
|
||||
|
||||
static ByteBuffer decompress_coredump(const ReadonlyBytes&);
|
||||
|
||||
class NotesEntryIterator {
|
||||
public:
|
||||
|
@ -92,7 +94,7 @@ private:
|
|||
// as getters with the appropriate (non-JsonValue) types.
|
||||
const JsonObject process_info() const;
|
||||
|
||||
NonnullRefPtr<MappedFile> m_coredump_file;
|
||||
ByteBuffer m_coredump_buffer;
|
||||
ELF::Image m_coredump_image;
|
||||
ssize_t m_notes_segment_index { -1 };
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue