[vm, gc] Run weak processing in parallel for new-space GCs.

Allow updating weak handles etc to happen in parallel with work-stealing.

Also make LocalBlockWorklist more symmetric with BlockWorklist.

Cf. 4495c2b30a

TEST=ci
Change-Id: Id58fe16be92028b1aa4dd8f097769b4107f2a3f0
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/316043
Reviewed-by: Siva Annamalai <asiva@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2023-07-26 16:45:01 +00:00 committed by Commit Queue
parent a07f57c7a1
commit cac1319768
4 changed files with 83 additions and 53 deletions

View file

@ -2,6 +2,9 @@
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// VMOptions=
// VMOptions=--scavenger_tasks=0
import "dart:io";
import "package:expect/expect.dart";

View file

@ -308,21 +308,7 @@ template <int Size, typename T>
class LocalBlockWorkList : public ValueObject {
public:
LocalBlockWorkList() { head_ = new PointerBlock<Size>(); }
~LocalBlockWorkList() { delete head_; }
template <typename Lambda>
DART_FORCE_INLINE void Finalize(Lambda action) {
auto* block = head_;
head_ = nullptr;
while (block != nullptr) {
while (!block->IsEmpty()) {
action(static_cast<T>(block->Pop()));
}
auto* next = block->next();
delete block;
block = next;
}
}
~LocalBlockWorkList() { ASSERT(head_ == nullptr); }
template <typename Lambda>
DART_FORCE_INLINE void Process(Lambda action) {
@ -347,12 +333,22 @@ class LocalBlockWorkList : public ValueObject {
head_->Push(obj);
}
void Finalize() {
ASSERT(head_ != nullptr);
ASSERT(head_->IsEmpty());
delete head_;
head_ = nullptr;
}
void AbandonWork() {
while (head_ != nullptr) {
PointerBlock<Size>* next = head_->next_;
head_->Reset();
delete head_;
head_ = next;
ASSERT(head_ != nullptr);
auto* block = head_;
head_ = nullptr;
while (block != nullptr) {
auto* next = block->next_;
block->Reset();
delete block;
block = next;
}
}

View file

@ -275,6 +275,7 @@ class ScavengerVisitorBase : public ObjectPointerVisitor {
}
void ProcessAll() {
TIMELINE_FUNCTION_GC_DURATION(thread_, "ProcessToSpace");
LongJumpScope jump(thread_);
if (setjmp(*jump.Set()) == 0) {
do {
@ -308,7 +309,7 @@ class ScavengerVisitorBase : public ObjectPointerVisitor {
return promoted_list_.WaitForWork(num_busy);
}
void Finalize() {
void ProcessWeak() {
if (!scavenger_->abort_) {
ASSERT(!HasWork());
@ -321,19 +322,26 @@ class ScavengerVisitorBase : public ObjectPointerVisitor {
MournWeakReferences();
MournWeakArrays();
MournFinalizerEntries();
scavenger_->IterateWeak();
}
page_space_->ReleaseLock(freelist_);
thread_ = nullptr;
}
void FinalizePromotion() { promoted_list_.Finalize(); }
void AbandonWork() {
promoted_list_.AbandonWork();
weak_array_list_.AbandonWork();
weak_property_list_.AbandonWork();
weak_reference_list_.AbandonWork();
finalizer_entry_list_.AbandonWork();
void Finalize() {
if (!scavenger_->abort_) {
promoted_list_.Finalize();
weak_array_list_.Finalize();
weak_property_list_.Finalize();
weak_reference_list_.Finalize();
finalizer_entry_list_.Finalize();
} else {
promoted_list_.AbandonWork();
weak_array_list_.AbandonWork();
weak_property_list_.AbandonWork();
weak_reference_list_.AbandonWork();
finalizer_entry_list_.AbandonWork();
}
}
Page* head() const {
@ -550,21 +558,21 @@ class ScavengerVisitorBase : public ObjectPointerVisitor {
void ProcessWeakPropertiesScoped();
void MournWeakProperties() {
weak_property_list_.Finalize([](WeakPropertyPtr weak_property) {
weak_property_list_.Process([](WeakPropertyPtr weak_property) {
weak_property->untag()->key_ = Object::null();
weak_property->untag()->value_ = Object::null();
});
}
void MournWeakReferences() {
weak_reference_list_.Finalize([](WeakReferencePtr weak_reference) {
weak_reference_list_.Process([](WeakReferencePtr weak_reference) {
ForwardOrSetNullIfCollected(weak_reference,
&weak_reference->untag()->target_);
});
}
void MournWeakArrays() {
weak_array_list_.Finalize([](WeakArrayPtr weak_array) {
weak_array_list_.Process([](WeakArrayPtr weak_array) {
intptr_t length = Smi::Value(weak_array->untag()->length());
for (intptr_t i = 0; i < length; i++) {
ForwardOrSetNullIfCollected(weak_array,
@ -574,7 +582,7 @@ class ScavengerVisitorBase : public ObjectPointerVisitor {
}
void MournFinalizerEntries() {
finalizer_entry_list_.Finalize([&](FinalizerEntryPtr finalizer_entry) {
finalizer_entry_list_.Process([&](FinalizerEntryPtr finalizer_entry) {
MournFinalizerEntry(this, finalizer_entry);
});
}
@ -711,7 +719,7 @@ class ParallelScavengerTask : public ThreadPool::Task {
ASSERT(!visitor_->HasWork());
// Phase 2: Weak processing, statistics.
visitor_->Finalize();
visitor_->ProcessWeak();
}
private:
@ -1236,6 +1244,41 @@ void Scavenger::IterateRoots(ScavengerVisitorBase<parallel>* visitor) {
IterateRememberedCards(visitor);
}
enum WeakSlices {
kWeakHandles = 0,
kWeakTables,
kProgressBars,
kRememberLiveTemporaries,
kNumWeakSlices,
};
void Scavenger::IterateWeak() {
for (;;) {
intptr_t slice = weak_slices_started_.fetch_add(1);
if (slice >= kNumWeakSlices) {
break; // No more slices.
}
switch (slice) {
case kWeakHandles:
MournWeakHandles();
break;
case kWeakTables:
MournWeakTables();
break;
case kProgressBars:
heap_->old_space()->ResetProgressBars();
break;
case kRememberLiveTemporaries:
// Restore write-barrier assumptions.
heap_->isolate_group()->RememberLiveTemporaries();
break;
default:
UNREACHABLE();
}
}
}
void Scavenger::MournWeakHandles() {
Thread* thread = Thread::Current();
TIMELINE_FUNCTION_GC_DURATION(thread, "MournWeakHandles");
@ -1611,6 +1654,7 @@ void Scavenger::Scavenge(Thread* thread, GCType type, GCReason reason) {
failed_to_promote_ = false;
abort_ = false;
root_slices_started_ = 0;
weak_slices_started_ = 0;
intptr_t abandoned_bytes = 0; // TODO(rmacnak): Count fragmentation?
SpaceUsage usage_before = GetCurrentUsage();
intptr_t promo_candidate_words = 0;
@ -1653,14 +1697,8 @@ void Scavenger::Scavenge(Thread* thread, GCType type, GCReason reason) {
}
}
ASSERT(promotion_stack_.IsEmpty());
MournWeakHandles();
MournWeakTables();
heap_->old_space()->ResetProgressBars();
heap_->old_space()->ResumeConcurrentMarking();
// Restore write-barrier assumptions.
heap_->isolate_group()->RememberLiveTemporaries();
// Scavenge finished. Run accounting.
int64_t end = OS::GetCurrentMonotonicMicros();
stats_history_.Add(ScavengeStats(
@ -1689,13 +1727,9 @@ intptr_t Scavenger::SerialScavenge(SemiSpace* from) {
SerialScavengerVisitor visitor(heap_->isolate_group(), this, from, freelist,
&promotion_stack_);
visitor.ProcessRoots();
{
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ProcessToSpace");
visitor.ProcessAll();
}
visitor.ProcessAll();
visitor.ProcessWeak();
visitor.Finalize();
visitor.FinalizePromotion();
to_->AddList(visitor.head(), visitor.tail());
return visitor.bytes_promoted();
}
@ -1731,11 +1765,7 @@ intptr_t Scavenger::ParallelScavenge(SemiSpace* from) {
for (intptr_t i = 0; i < num_tasks; i++) {
ParallelScavengerVisitor* visitor = visitors[i];
if (abort_) {
visitor->AbandonWork();
} else {
visitor->FinalizePromotion();
}
visitor->Finalize();
to_->AddList(visitor->head(), visitor->tail());
bytes_promoted += visitor->bytes_promoted();
delete visitor;

View file

@ -270,7 +270,9 @@ class Scavenger {
void IterateObjectIdTable(ObjectPointerVisitor* visitor);
template <bool parallel>
void IterateRoots(ScavengerVisitorBase<parallel>* visitor);
void IterateWeak();
void MournWeakHandles();
void MournWeakTables();
void Epilogue(SemiSpace* from);
void VerifyStoreBuffers(const char* msg);
@ -278,8 +280,6 @@ class Scavenger {
void UpdateMaxHeapCapacity();
void UpdateMaxHeapUsage();
void MournWeakTables();
intptr_t NewSizeInWords(intptr_t old_size_in_words, GCReason reason) const;
Heap* heap_;
@ -294,6 +294,7 @@ class Scavenger {
bool scavenging_;
bool early_tenure_ = false;
RelaxedAtomic<intptr_t> root_slices_started_;
RelaxedAtomic<intptr_t> weak_slices_started_;
StoreBufferBlock* blocks_ = nullptr;
int64_t gc_time_micros_;