From 8853472dd6e0add7f7f12afec0920e58c3b990de Mon Sep 17 00:00:00 2001 From: Ryan Macnak Date: Wed, 28 Jun 2023 16:49:07 +0000 Subject: [PATCH] [vm, gc] Pause concurrent marking during scavenge. TEST=ci Change-Id: I7c3deaf4383b993c4c0253a4f94a1d503b2d9a92 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/309802 Commit-Queue: Ryan Macnak Reviewed-by: Siva Annamalai --- runtime/vm/heap/marker.cc | 33 ++++++++++++++++++++++++++++++++- runtime/vm/heap/pages.cc | 33 +++++++++++++++++++++++++++++++++ runtime/vm/heap/pages.h | 22 +++++++++++++++++++++- runtime/vm/heap/scavenger.cc | 2 ++ 4 files changed, 88 insertions(+), 2 deletions(-) diff --git a/runtime/vm/heap/marker.cc b/runtime/vm/heap/marker.cc index ae22911cae2..db01a466b85 100644 --- a/runtime/vm/heap/marker.cc +++ b/runtime/vm/heap/marker.cc @@ -83,6 +83,33 @@ class MarkingVisitorBase : public ObjectPointerVisitor { return more_to_mark; } + void DrainMarkingStackWithPauseChecks() { + do { + ObjectPtr raw_obj; + while (work_list_.Pop(&raw_obj)) { + const intptr_t class_id = raw_obj->GetClassId(); + + intptr_t size; + if (class_id == kWeakPropertyCid) { + size = ProcessWeakProperty(static_cast(raw_obj)); + } else if (class_id == kWeakReferenceCid) { + size = ProcessWeakReference(static_cast(raw_obj)); + } else if (class_id == kWeakArrayCid) { + size = ProcessWeakArray(static_cast(raw_obj)); + } else if (class_id == kFinalizerEntryCid) { + size = ProcessFinalizerEntry(static_cast(raw_obj)); + } else { + size = raw_obj->untag()->VisitPointersNonvirtual(this); + } + marked_bytes_ += size; + + if (UNLIKELY(page_space_->pause_concurrent_marking())) { + page_space_->YieldConcurrentMarking(); + } + } + } while (ProcessPendingWeakProperties()); + } + void DrainMarkingStack() { while (ProcessMarkingStack(kIntptrMax)) { } @@ -809,7 +836,7 @@ class ConcurrentMarkTask : public ThreadPool::Task { marker_->IterateRoots(visitor_); - visitor_->DrainMarkingStack(); + visitor_->DrainMarkingStackWithPauseChecks(); int64_t stop = OS::GetCurrentMonotonicMicros(); visitor_->AddMicros(stop - start); if (FLAG_log_marker_tasks) { @@ -826,6 +853,8 @@ class ConcurrentMarkTask : public ThreadPool::Task { page_space_->set_tasks(page_space_->tasks() - 1); page_space_->set_concurrent_marker_tasks( page_space_->concurrent_marker_tasks() - 1); + page_space_->set_concurrent_marker_tasks_active( + page_space_->concurrent_marker_tasks_active() - 1); ASSERT(page_space_->phase() == PageSpace::kMarking); if (page_space_->concurrent_marker_tasks() == 0) { page_space_->set_phase(PageSpace::kAwaitingFinalization); @@ -904,6 +933,8 @@ void GCMarker::StartConcurrentMark(PageSpace* page_space) { page_space->set_tasks(page_space->tasks() + num_tasks); page_space->set_concurrent_marker_tasks( page_space->concurrent_marker_tasks() + num_tasks); + page_space->set_concurrent_marker_tasks_active( + page_space->concurrent_marker_tasks_active() + num_tasks); } ResetSlices(); diff --git a/runtime/vm/heap/pages.cc b/runtime/vm/heap/pages.cc index 85ee9410d9e..230130ca2c8 100644 --- a/runtime/vm/heap/pages.cc +++ b/runtime/vm/heap/pages.cc @@ -62,6 +62,8 @@ PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words) tasks_lock_(), tasks_(0), concurrent_marker_tasks_(0), + concurrent_marker_tasks_active_(0), + pause_concurrent_marking_(false), phase_(kDone), #if defined(DEBUG) iterating_thread_(nullptr), @@ -434,6 +436,37 @@ void PageSpace::ReleaseLock(FreeList* freelist) { freelist->mutex()->Unlock(); } +void PageSpace::PauseConcurrentMarking() { + MonitorLocker ml(&tasks_lock_); + ASSERT(!pause_concurrent_marking_); + pause_concurrent_marking_ = true; + while (concurrent_marker_tasks_active_ != 0) { + ml.Wait(); + } +} + +void PageSpace::ResumeConcurrentMarking() { + MonitorLocker ml(&tasks_lock_); + ASSERT(pause_concurrent_marking_); + pause_concurrent_marking_ = false; + ml.NotifyAll(); +} + +void PageSpace::YieldConcurrentMarking() { + MonitorLocker ml(&tasks_lock_); + if (pause_concurrent_marking_) { + TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "Pause"); + concurrent_marker_tasks_active_--; + if (concurrent_marker_tasks_active_ == 0) { + ml.NotifyAll(); + } + while (pause_concurrent_marking_) { + ml.Wait(); + } + concurrent_marker_tasks_active_++; + } +} + class BasePageIterator : ValueObject { public: explicit BasePageIterator(const PageSpace* space) : space_(space) {} diff --git a/runtime/vm/heap/pages.h b/runtime/vm/heap/pages.h index 3ed6f288d13..38eed3ee96d 100644 --- a/runtime/vm/heap/pages.h +++ b/runtime/vm/heap/pages.h @@ -298,17 +298,35 @@ class PageSpace { void AcquireLock(FreeList* freelist); void ReleaseLock(FreeList* freelist); + void PauseConcurrentMarking(); + void ResumeConcurrentMarking(); + void YieldConcurrentMarking(); + Monitor* tasks_lock() const { return &tasks_lock_; } intptr_t tasks() const { return tasks_; } void set_tasks(intptr_t val) { ASSERT(val >= 0); tasks_ = val; } - intptr_t concurrent_marker_tasks() const { return concurrent_marker_tasks_; } + intptr_t concurrent_marker_tasks() const { + DEBUG_ASSERT(tasks_lock_.IsOwnedByCurrentThread()); + return concurrent_marker_tasks_; + } void set_concurrent_marker_tasks(intptr_t val) { ASSERT(val >= 0); + DEBUG_ASSERT(tasks_lock_.IsOwnedByCurrentThread()); concurrent_marker_tasks_ = val; } + intptr_t concurrent_marker_tasks_active() const { + DEBUG_ASSERT(tasks_lock_.IsOwnedByCurrentThread()); + return concurrent_marker_tasks_active_; + } + void set_concurrent_marker_tasks_active(intptr_t val) { + ASSERT(val >= 0); + DEBUG_ASSERT(tasks_lock_.IsOwnedByCurrentThread()); + concurrent_marker_tasks_active_ = val; + } + bool pause_concurrent_marking() const { return pause_concurrent_marking_; } Phase phase() const { return phase_; } void set_phase(Phase val) { phase_ = val; } @@ -444,6 +462,8 @@ class PageSpace { mutable Monitor tasks_lock_; intptr_t tasks_; intptr_t concurrent_marker_tasks_; + intptr_t concurrent_marker_tasks_active_; + RelaxedAtomic pause_concurrent_marking_; Phase phase_; #if defined(DEBUG) diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc index 545da00ab46..aba049dcbe8 100644 --- a/runtime/vm/heap/scavenger.cc +++ b/runtime/vm/heap/scavenger.cc @@ -1713,6 +1713,7 @@ void Scavenger::Scavenge(Thread* thread, GCType type, GCReason reason) { } promo_candidate_words += page->promo_candidate_words(); } + heap_->old_space()->PauseConcurrentMarking(); SemiSpace* from = Prologue(reason); intptr_t bytes_promoted; @@ -1747,6 +1748,7 @@ void Scavenger::Scavenge(Thread* thread, GCType type, GCReason reason) { MournWeakHandles(); MournWeakTables(); heap_->old_space()->ResetProgressBars(); + heap_->old_space()->ResumeConcurrentMarking(); // Restore write-barrier assumptions. heap_->isolate_group()->RememberLiveTemporaries();