diff --git a/runtime/tools/verbose_gc_to_bmu.dart b/runtime/tools/verbose_gc_to_bmu.dart index 20030fa3a4a..fa4d255c48e 100644 --- a/runtime/tools/verbose_gc_to_bmu.dart +++ b/runtime/tools/verbose_gc_to_bmu.dart @@ -78,9 +78,11 @@ class Timeline { Interval parseVerboseGCLine(String line) { var fields = line.split(','); // Update this (and indices below, if needed) when logging format changes. - if (fields.length != 25) { - assert(line.startsWith('[ GC | space | count | start | gc time') || - line.startsWith('[ (isolate)| (reason)| | (s) | (ms) ')); + if (fields.length < 10) { + // Ignore the lines that just specify column names, separated by '|'. + // We assume these have very few commas in them, so that fields.length + // is < 10. + assert(line.contains("|")); return new Interval(0, 0); } var begin = (1e6 * double.parse(fields[2])).floor(); diff --git a/runtime/vm/compiler.cc b/runtime/vm/compiler.cc index 874a2aebfb3..231202b5782 100644 --- a/runtime/vm/compiler.cc +++ b/runtime/vm/compiler.cc @@ -838,7 +838,12 @@ RawCode* CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { JitOptimizer optimizer(flow_graph); - optimizer.ApplyICData(); + { + NOT_IN_PRODUCT(TimelineDurationScope tds(thread(), compiler_timeline, + "ApplyICData")); + optimizer.ApplyICData(); + thread()->CheckForSafepoint(); + } DEBUG_ASSERT(flow_graph->VerifyUseLists()); // Optimize (a << b) & c patterns, merge operations. @@ -869,6 +874,7 @@ RawCode* CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { inliner.Inline(); // Use lists are maintained and validated by the inliner. DEBUG_ASSERT(flow_graph->VerifyUseLists()); + thread()->CheckForSafepoint(); } // Propagate types and eliminate more type tests. @@ -918,6 +924,7 @@ RawCode* CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { // propagation. ConstantPropagator::Optimize(flow_graph); DEBUG_ASSERT(flow_graph->VerifyUseLists()); + thread()->CheckForSafepoint(); } // Optimistically convert loop phis that have a single non-smi input @@ -981,6 +988,7 @@ RawCode* CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { DEBUG_ASSERT(flow_graph->VerifyUseLists()); } flow_graph->RemoveRedefinitions(); + thread()->CheckForSafepoint(); } // Optimize (a << b) & c patterns, merge operations. @@ -1060,6 +1068,7 @@ RawCode* CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { // TODO(fschneider): Support allocation sinking with try-catch. sinking = new AllocationSinking(flow_graph); sinking->Optimize(); + thread()->CheckForSafepoint(); } DEBUG_ASSERT(flow_graph->VerifyUseLists()); @@ -1109,6 +1118,7 @@ RawCode* CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { // Perform register allocation on the SSA graph. FlowGraphAllocator allocator(*flow_graph); allocator.AllocateRegisters(); + thread()->CheckForSafepoint(); } if (reorder_blocks) { diff --git a/runtime/vm/flow_graph.cc b/runtime/vm/flow_graph.cc index c14c18e5e79..b69c1af20e9 100644 --- a/runtime/vm/flow_graph.cc +++ b/runtime/vm/flow_graph.cc @@ -1316,6 +1316,7 @@ void FlowGraph::RemoveRedefinitions() { // Remove redefinition instructions inserted to inhibit hoisting. for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done(); block_it.Advance()) { + thread()->CheckForSafepoint(); for (ForwardInstructionIterator instr_it(block_it.Current()); !instr_it.Done(); instr_it.Advance()) { RedefinitionInstr* redefinition = instr_it.Current()->AsRedefinition(); diff --git a/runtime/vm/flow_graph_inliner.cc b/runtime/vm/flow_graph_inliner.cc index 1d3937704a3..0d2e73d2202 100644 --- a/runtime/vm/flow_graph_inliner.cc +++ b/runtime/vm/flow_graph_inliner.cc @@ -621,6 +621,7 @@ class CallSiteInliner : public ValueObject { ++inlining_recursion_depth_; inlined_recursive_call_ = false; } + thread()->CheckForSafepoint(); } collected_call_sites_ = NULL; diff --git a/runtime/vm/heap.cc b/runtime/vm/heap.cc index 15bc62fc5ae..4998b881cfd 100644 --- a/runtime/vm/heap.cc +++ b/runtime/vm/heap.cc @@ -700,14 +700,10 @@ void Heap::RecordBeforeGC(Space space, GCReason reason) { stats_.before_.micros_ = OS::GetCurrentMonotonicMicros(); stats_.before_.new_ = new_space_.GetCurrentUsage(); stats_.before_.old_ = old_space_.GetCurrentUsage(); - stats_.times_[0] = 0; - stats_.times_[1] = 0; - stats_.times_[2] = 0; - stats_.times_[3] = 0; - stats_.data_[0] = 0; - stats_.data_[1] = 0; - stats_.data_[2] = 0; - stats_.data_[3] = 0; + for (int i = 0; i < GCStats::kTimeEntries; i++) + stats_.times_[i] = 0; + for (int i = 0; i < GCStats::kDataEntries; i++) + stats_.data_[i] = 0; } @@ -742,26 +738,34 @@ void Heap::PrintStats() { if ((FLAG_verbose_gc_hdr != 0) && (((stats_.num_ - 1) % FLAG_verbose_gc_hdr) == 0)) { OS::PrintErr( - "[ GC | space | count | start | gc time | " - "new gen (KB) | old gen (KB) | timers | data ]\n" - "[ (isolate)| (reason)| | (s) | (ms) | " - "used,cap,ext | used,cap,ext | (ms) | ]\n"); + "[ | | | | " + "| new gen | new gen | new gen " + "| old gen | old gen | old gen " + "| sweep | safe- | roots/| stbuf/| tospc/| weaks/| ]\n" + "[ GC isolate | space (reason) | GC# | start | time " + "| used (kB) | capacity kB | external" + "| used (kB) | capacity (kB) | external kB " + "| thread| point |marking| reset | sweep |swplrge| data ]\n" + "[ | | | (s) | (ms) " + "|before| after|before| after| b4 |aftr" + "| before| after | before| after |before| after" + "| (ms) | (ms) | (ms) | (ms) | (ms) | (ms) | ]\n"); } // clang-format off const char* space_str = stats_.space_ == kNew ? "Scavenge" : "Mark-Sweep"; OS::PrintErr( - "[ GC(%" Pd64 "): %s(%s), " // GC(isolate), space(reason) - "%" Pd ", " // count - "%.3f, " // start time - "%.3f, " // total time - "%" Pd ", %" Pd ", " // new gen: in use before/after - "%" Pd ", %" Pd ", " // new gen: capacity before/after - "%" Pd ", %" Pd ", " // new gen: external before/after - "%" Pd ", %" Pd ", " // old gen: in use before/after - "%" Pd ", %" Pd ", " // old gen: capacity before/after - "%" Pd ", %" Pd ", " // old gen: external before/after - "%.3f, %.3f, %.3f, %.3f, " // times + "[ GC %9" Pd64 " : %10s(%9s), " // GC(isolate), space(reason) + "%4" Pd ", " // count + "%6.2f, " // start time + "%5.1f, " // total time + "%5" Pd ", %5" Pd ", " // new gen: in use before/after + "%5" Pd ", %5" Pd ", " // new gen: capacity before/after + "%3" Pd ", %3" Pd ", " // new gen: external before/after + "%6" Pd ", %6" Pd ", " // old gen: in use before/after + "%6" Pd ", %6" Pd ", " // old gen: capacity before/after + "%5" Pd ", %5" Pd ", " // old gen: external before/after + "%6.2f, %6.2f, %6.2f, %6.2f, %6.2f, %6.2f, " // times "%" Pd ", %" Pd ", %" Pd ", %" Pd ", " // data "]\n", // End with a comma to make it easier to import in spreadsheets. isolate()->main_port(), space_str, GCReasonToString(stats_.reason_), @@ -785,6 +789,8 @@ void Heap::PrintStats() { MicrosecondsToMilliseconds(stats_.times_[1]), MicrosecondsToMilliseconds(stats_.times_[2]), MicrosecondsToMilliseconds(stats_.times_[3]), + MicrosecondsToMilliseconds(stats_.times_[4]), + MicrosecondsToMilliseconds(stats_.times_[5]), stats_.data_[0], stats_.data_[1], stats_.data_[2], diff --git a/runtime/vm/heap.h b/runtime/vm/heap.h index 5ae4361e78a..64b7499d796 100644 --- a/runtime/vm/heap.h +++ b/runtime/vm/heap.h @@ -46,12 +46,8 @@ class Heap { kGCTestCase, }; -#if defined(DEBUG) // Pattern for unused new space and swept old space. - static const uint64_t kZap64Bits = 0xf3f3f3f3f3f3f3f3; - static const uint32_t kZap32Bits = 0xf3f3f3f3; static const uint8_t kZapByte = 0xf3; -#endif // DEBUG ~Heap(); @@ -216,7 +212,7 @@ class Heap { // Stats collection. void RecordTime(int id, int64_t micros) { - ASSERT((id >= 0) && (id < GCStats::kDataEntries)); + ASSERT((id >= 0) && (id < GCStats::kTimeEntries)); stats_.times_[id] = micros; } @@ -268,11 +264,12 @@ class Heap { DISALLOW_COPY_AND_ASSIGN(Data); }; + enum { kTimeEntries = 6 }; enum { kDataEntries = 4 }; Data before_; Data after_; - int64_t times_[kDataEntries]; + int64_t times_[kTimeEntries]; intptr_t data_[kDataEntries]; private: diff --git a/runtime/vm/jit_optimizer.cc b/runtime/vm/jit_optimizer.cc index 686adc5110a..f85d9c46fb3 100644 --- a/runtime/vm/jit_optimizer.cc +++ b/runtime/vm/jit_optimizer.cc @@ -62,6 +62,7 @@ void JitOptimizer::ApplyClassIds() { ASSERT(current_iterator_ == NULL); for (BlockIterator block_it = flow_graph_->reverse_postorder_iterator(); !block_it.Done(); block_it.Advance()) { + thread()->CheckForSafepoint(); ForwardInstructionIterator it(block_it.Current()); current_iterator_ = ⁢ for (; !it.Done(); it.Advance()) { diff --git a/runtime/vm/pages.cc b/runtime/vm/pages.cc index 9768bc5f251..ce8bf42aad3 100644 --- a/runtime/vm/pages.cc +++ b/runtime/vm/pages.cc @@ -849,6 +849,8 @@ void PageSpace::MarkSweep(bool invoke_api_callbacks) { Isolate* isolate = heap_->isolate(); ASSERT(isolate == Isolate::Current()); + const int64_t pre_wait_for_sweepers = OS::GetCurrentMonotonicMicros(); + // Wait for pending tasks to complete and then account for the driver task. { MonitorLocker locker(tasks_lock()); @@ -857,6 +859,9 @@ void PageSpace::MarkSweep(bool invoke_api_callbacks) { } set_tasks(1); } + + const int64_t pre_safe_point = OS::GetCurrentMonotonicMicros(); + // Ensure that all threads for this isolate are at a safepoint (either // stopped or in native code). We have guards around Newgen GC and oldgen GC // to ensure that if two threads are racing to collect at the same time the @@ -864,6 +869,8 @@ void PageSpace::MarkSweep(bool invoke_api_callbacks) { { SafepointOperationScope safepoint_scope(thread); + const int64_t start = OS::GetCurrentMonotonicMicros(); + // Perform various cleanup that relies on no tasks interfering. isolate->class_table()->FreeOldTables(); @@ -882,8 +889,6 @@ void PageSpace::MarkSweep(bool invoke_api_callbacks) { OS::PrintErr(" done.\n"); } - const int64_t start = OS::GetCurrentMonotonicMicros(); - // Make code pages writable. WriteProtectCode(false); @@ -991,6 +996,8 @@ void PageSpace::MarkSweep(bool invoke_api_callbacks) { page_space_controller_.EvaluateGarbageCollection( usage_before, GetCurrentUsage(), start, end); + heap_->RecordTime(kConcurrentSweep, pre_safe_point - pre_wait_for_sweepers); + heap_->RecordTime(kSafePoint, start - pre_safe_point); heap_->RecordTime(kMarkObjects, mid1 - start); heap_->RecordTime(kResetFreeLists, mid2 - mid1); heap_->RecordTime(kSweepPages, mid3 - mid2); diff --git a/runtime/vm/pages.h b/runtime/vm/pages.h index 0cf4388cbfd..7699625cc97 100644 --- a/runtime/vm/pages.h +++ b/runtime/vm/pages.h @@ -324,10 +324,12 @@ class PageSpace { // Ids for time and data records in Heap::GCStats. enum { // Time - kMarkObjects = 0, - kResetFreeLists = 1, - kSweepPages = 2, - kSweepLargePages = 3, + kConcurrentSweep = 0, + kSafePoint = 1, + kMarkObjects = 2, + kResetFreeLists = 3, + kSweepPages = 4, + kSweepLargePages = 5, // Data kGarbageRatio = 0, kGCTimeFraction = 1, diff --git a/runtime/vm/precompiler.cc b/runtime/vm/precompiler.cc index f52d943ba4f..dc6c1fcf16f 100644 --- a/runtime/vm/precompiler.cc +++ b/runtime/vm/precompiler.cc @@ -3201,7 +3201,7 @@ bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) { { #ifndef PRODUCT TimelineDurationScope tds2(thread(), compiler_timeline, - "CommonSubexpressionElinination"); + "CommonSubexpressionElimination"); #endif // !PRODUCT if (FLAG_common_subexpression_elimination || FLAG_loop_invariant_code_motion) { diff --git a/runtime/vm/redundancy_elimination.cc b/runtime/vm/redundancy_elimination.cc index 66a287cc15d..1cabf787c2e 100644 --- a/runtime/vm/redundancy_elimination.cc +++ b/runtime/vm/redundancy_elimination.cc @@ -2402,6 +2402,9 @@ bool DominatorBasedCSE::OptimizeRecursive(FlowGraph* graph, // Process children in the dominator tree recursively. intptr_t num_children = block->dominated_blocks().length(); + if (num_children != 0) { + graph->thread()->CheckForSafepoint(); + } for (intptr_t i = 0; i < num_children; ++i) { BlockEntryInstr* child = block->dominated_blocks()[i]; if (i < num_children - 1) { diff --git a/runtime/vm/scavenger.cc b/runtime/vm/scavenger.cc index f728f6b967a..4fa81d926f6 100644 --- a/runtime/vm/scavenger.cc +++ b/runtime/vm/scavenger.cc @@ -517,6 +517,7 @@ void Scavenger::IterateRoots(Isolate* isolate, ScavengerVisitor* visitor) { heap_->RecordData(kToKBAfterStoreBuffer, RoundWordsToKB(UsedInWords())); heap_->RecordTime(kVisitIsolateRoots, middle - start); heap_->RecordTime(kIterateStoreBuffers, end - middle); + heap_->RecordTime(kDummyScavengeTime, 0); } @@ -781,6 +782,9 @@ void Scavenger::Scavenge(bool invoke_api_callbacks) { // will continue with its scavenge after waiting for the winner to complete. // TODO(koda): Consider moving SafepointThreads into allocation failure/retry // logic to avoid needless collections. + + int64_t pre_safe_point = OS::GetCurrentMonotonicMicros(); + Thread* thread = Thread::Current(); SafepointOperationScope safepoint_scope(thread); @@ -791,6 +795,9 @@ void Scavenger::Scavenge(bool invoke_api_callbacks) { PageSpace* page_space = heap_->old_space(); NoSafepointScope no_safepoints; + int64_t post_safe_point = OS::GetCurrentMonotonicMicros(); + heap_->RecordTime(kSafePoint, post_safe_point - pre_safe_point); + // TODO(koda): Make verification more compatible with concurrent sweep. if (FLAG_verify_before_gc && !FLAG_concurrent_sweep) { OS::PrintErr("Verifying before Scavenge..."); diff --git a/runtime/vm/scavenger.h b/runtime/vm/scavenger.h index 9bffb215b80..7fa3950f701 100644 --- a/runtime/vm/scavenger.h +++ b/runtime/vm/scavenger.h @@ -198,10 +198,12 @@ class Scavenger { // Ids for time and data records in Heap::GCStats. enum { // Time - kVisitIsolateRoots = 0, - kIterateStoreBuffers = 1, - kProcessToSpace = 2, - kIterateWeaks = 3, + kDummyScavengeTime = 0, + kSafePoint = 1, + kVisitIsolateRoots = 2, + kIterateStoreBuffers = 3, + kProcessToSpace = 4, + kIterateWeaks = 5, // Data kStoreBufferEntries = 0, kDataUnused1 = 1,