CrashDaemon+CrashReporter: Streamline crash reporting a little bit

Before this patch, this is what would happen after something crashed:

1. CrashDaemon finds a new coredump in /tmp
2. CrashDaemon compresses the new coredump (gzip)
3. CrashDaemon parses the uncompressed coredump and prints a backtrace
4. CrashDaemon launches CrashReporter
5. CrashReporter parses the uncompressed coredump (again)
6. CrashReporter unlinks the uncompressed coredump
7. CrashReporter displays a GUI

This was taking quite a long time when dealing with large programs
crashing (like Browser's WebContent processes.)

The new flow:

1. CrashDaemon finds a new coredump in /tmp
2. CrashDaemon mmap()'s the (uncompressed) coredump
3. CrashDaemon launches CrashReporter
4. CrashDaemon goes to sleep for 3 seconds (hack alert!)
5. CrashReporter parses the (uncompressed) coredump
6. CrashReporter unlinks the (uncompressed) coredump
7. CrashReporter displays a GUI
8. CrashDaemon wakes up (after step 4)
9. CrashDaemon compresses the coredump (gzip)

TL;DR: we no longer parse the coredumps twice, and we also prioritize
launching the CrashReporter GUI immediately when a new coredump shows
up, instead of compressing and parsing it in CrashDaemon first.

The net effect of this is that you get a backtrace on screen much
sooner. That's pretty nice. :^)
This commit is contained in:
Andreas Kling 2021-09-21 18:15:43 +02:00
parent 38b0200151
commit 4f224b15ed
2 changed files with 28 additions and 32 deletions

View file

@ -38,9 +38,12 @@ struct TitleAndText {
static TitleAndText build_backtrace(Coredump::Reader const& coredump, ELF::Core::ThreadInfo const& thread_info, size_t thread_index)
{
auto timer = Core::ElapsedTimer::start_new();
Coredump::Backtrace backtrace(coredump, thread_info);
auto metadata = coredump.metadata();
dbgln("Generating backtrace took {} ms", timer.elapsed());
StringBuilder builder;
auto prepend_metadata = [&](auto& key, StringView fmt) {
@ -73,6 +76,11 @@ static TitleAndText build_backtrace(Coredump::Reader const& coredump, ELF::Core:
builder.append(entry.to_string());
}
dbgln("--- Backtrace for thread #{} (TID {}) ---", thread_index, thread_info.tid);
for (auto& entry : backtrace.entries()) {
dbgln("{}", entry.to_string(true));
}
return {
String::formatted("Thread #{} (TID {})", thread_index, thread_info.tid),
builder.build()

View file

@ -8,6 +8,7 @@
#include <AK/MappedFile.h>
#include <Kernel/API/InodeWatcherEvent.h>
#include <LibCompress/Gzip.h>
#include <LibCore/ElapsedTimer.h>
#include <LibCore/File.h>
#include <LibCore/FileWatcher.h>
#include <LibCoredump/Backtrace.h>
@ -33,14 +34,8 @@ static void wait_until_coredump_is_ready(const String& coredump_path)
}
}
static bool compress_coredump(const String& coredump_path)
static bool compress_coredump(String const& coredump_path, NonnullRefPtr<MappedFile> coredump_file)
{
auto file_or_error = MappedFile::map(coredump_path);
if (file_or_error.is_error()) {
dbgln("Could not open coredump '{}': {}", coredump_path, file_or_error.error());
return false;
}
auto coredump_file = file_or_error.value();
auto compressed_coredump = Compress::GzipCompressor::compress_all(coredump_file->bytes());
if (!compressed_coredump.has_value()) {
dbgln("Could not compress coredump '{}'", coredump_path);
@ -60,28 +55,6 @@ static bool compress_coredump(const String& coredump_path)
return true;
}
static void print_backtrace(const String& coredump_path)
{
auto coredump = Coredump::Reader::create(coredump_path);
if (!coredump) {
dbgln("Could not open coredump '{}'", coredump_path);
return;
}
size_t thread_index = 0;
coredump->for_each_thread_info([&](auto& thread_info) {
Coredump::Backtrace backtrace(*coredump, thread_info);
if (thread_index > 0)
dbgln();
dbgln("--- Backtrace for thread #{} (TID {}) ---", thread_index, thread_info.tid);
for (auto& entry : backtrace.entries()) {
dbgln("{}", entry.to_string(true));
}
++thread_index;
return IterationDecision::Continue;
});
}
static void launch_crash_reporter(const String& coredump_path, bool unlink_after_use)
{
pid_t child;
@ -126,8 +99,23 @@ int main()
continue; // stops compress_coredump from accidentally triggering us
dbgln("New coredump file: {}", coredump_path);
wait_until_coredump_is_ready(coredump_path);
auto compressed = compress_coredump(coredump_path);
print_backtrace(coredump_path);
launch_crash_reporter(coredump_path, compressed);
auto file_or_error = MappedFile::map(coredump_path);
if (file_or_error.is_error()) {
dbgln("Unable to map coredump {}: {}", coredump_path, file_or_error.error().string());
continue;
}
launch_crash_reporter(coredump_path, true);
// FIXME: This is a hack to give CrashReporter time to parse the coredump
// before we start compressing it.
// I'm sure there's a much smarter approach to this whole thing,
// and we should find it. :^)
sleep(3);
auto compress_timer = Core::ElapsedTimer::start_new();
if (compress_coredump(coredump_path, file_or_error.release_value()))
dbgln("Compressing coredump took {} ms", compress_timer.elapsed());
}
}