diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc index a57bcfc59e4..713bf9769a6 100644 --- a/runtime/vm/heap/heap.cc +++ b/runtime/vm/heap/heap.cc @@ -79,7 +79,7 @@ uword Heap::AllocateNew(intptr_t size) { // This call to CollectGarbage might end up "reusing" a collection spawned // from a different thread and will be racing to allocate the requested // memory with other threads being released after the collection. - CollectGarbage(kNew); + CollectGarbage(thread, GCType::kScavenge, GCReason::kNewSpace); addr = new_space_.TryAllocate(thread, size); if (LIKELY(addr != 0)) { @@ -184,16 +184,16 @@ void Heap::CheckExternalGC(Thread* thread) { if (new_space_.ExternalInWords() >= (4 * new_space_.CapacityInWords())) { // Attempt to free some external allocation by a scavenge. (If the total // remains above the limit, next external alloc will trigger another.) - CollectGarbage(GCType::kScavenge, GCReason::kExternal); + CollectGarbage(thread, GCType::kScavenge, GCReason::kExternal); // Promotion may have pushed old space over its limit. Fall through for old // space GC check. } if (old_space_.ReachedHardThreshold()) { if (last_gc_was_old_space_) { - CollectNewSpaceGarbage(thread, GCReason::kFull); + CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kFull); } - CollectGarbage(GCType::kMarkSweep, GCReason::kExternal); + CollectGarbage(thread, GCType::kMarkSweep, GCReason::kExternal); } else { CheckStartConcurrentMarking(thread, GCReason::kExternal); } @@ -377,7 +377,7 @@ void Heap::NotifyIdle(int64_t deadline) { // first to shrink the root set (make old-space GC faster) and avoid // intergenerational garbage (make old-space GC free more memory). if (new_space_.ShouldPerformIdleScavenge(deadline)) { - CollectNewSpaceGarbage(thread, GCReason::kIdle); + CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kIdle); } // Check if we want to collect old-space, in decreasing order of cost. @@ -425,34 +425,9 @@ void Heap::NotifyIdle(int64_t deadline) { } } -void Heap::EvacuateNewSpace(Thread* thread, GCReason reason) { - ASSERT(reason != GCReason::kPromotion); - ASSERT(reason != GCReason::kFinalize); - if (thread->isolate_group() == Dart::vm_isolate_group()) { - // The vm isolate cannot safely collect garbage due to unvisited read-only - // handles and slots bootstrapped with RAW_NULL. Ignore GC requests to - // trigger a nice out-of-memory message instead of a crash in the middle of - // visiting pointers. - return; - } - { - GcSafepointOperationScope safepoint_operation(thread); - RecordBeforeGC(GCType::kScavenge, reason); - VMTagScope tagScope(thread, reason == GCReason::kIdle - ? VMTag::kGCIdleTagId - : VMTag::kGCNewSpaceTagId); - TIMELINE_FUNCTION_GC_DURATION(thread, "EvacuateNewGeneration"); - new_space_.Evacuate(reason); - RecordAfterGC(GCType::kScavenge); - PrintStats(); -#if defined(SUPPORT_TIMELINE) - PrintStatsToTimeline(&tbes, reason); -#endif - last_gc_was_old_space_ = false; - } -} - -void Heap::CollectNewSpaceGarbage(Thread* thread, GCReason reason) { +void Heap::CollectNewSpaceGarbage(Thread* thread, + GCType type, + GCReason reason) { NoActiveIsolateScope no_active_isolate_scope; ASSERT(reason != GCReason::kPromotion); ASSERT(reason != GCReason::kFinalize); @@ -465,21 +440,21 @@ void Heap::CollectNewSpaceGarbage(Thread* thread, GCReason reason) { } { GcSafepointOperationScope safepoint_operation(thread); - RecordBeforeGC(GCType::kScavenge, reason); + RecordBeforeGC(type, reason); { VMTagScope tagScope(thread, reason == GCReason::kIdle ? VMTag::kGCIdleTagId : VMTag::kGCNewSpaceTagId); TIMELINE_FUNCTION_GC_DURATION(thread, "CollectNewGeneration"); - new_space_.Scavenge(reason); - RecordAfterGC(GCType::kScavenge); + new_space_.Scavenge(thread, type, reason); + RecordAfterGC(type); PrintStats(); #if defined(SUPPORT_TIMELINE) PrintStatsToTimeline(&tbes, reason); #endif last_gc_was_old_space_ = false; } - if (reason == GCReason::kNewSpace) { + if (type == GCType::kScavenge && reason == GCReason::kNewSpace) { if (old_space_.ReachedHardThreshold()) { CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kPromotion); @@ -542,11 +517,11 @@ void Heap::CollectOldSpaceGarbage(Thread* thread, } } -void Heap::CollectGarbage(GCType type, GCReason reason) { - Thread* thread = Thread::Current(); +void Heap::CollectGarbage(Thread* thread, GCType type, GCReason reason) { switch (type) { case GCType::kScavenge: - CollectNewSpaceGarbage(thread, reason); + case GCType::kEvacuate: + CollectNewSpaceGarbage(thread, type, reason); break; case GCType::kMarkSweep: case GCType::kMarkCompact: @@ -557,19 +532,9 @@ void Heap::CollectGarbage(GCType type, GCReason reason) { } } -void Heap::CollectGarbage(Space space) { - Thread* thread = Thread::Current(); - if (space == kOld) { - CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kOldSpace); - } else { - ASSERT(space == kNew); - CollectNewSpaceGarbage(thread, GCReason::kNewSpace); - } -} - void Heap::CollectMostGarbage(GCReason reason, bool compact) { Thread* thread = Thread::Current(); - CollectNewSpaceGarbage(thread, reason); + CollectNewSpaceGarbage(thread, GCType::kScavenge, reason); CollectOldSpaceGarbage( thread, compact ? GCType::kMarkCompact : GCType::kMarkSweep, reason); } @@ -579,7 +544,7 @@ void Heap::CollectAllGarbage(GCReason reason, bool compact) { // New space is evacuated so this GC will collect all dead objects // kept alive by a cross-generational pointer. - EvacuateNewSpace(thread, reason); + CollectNewSpaceGarbage(thread, GCType::kEvacuate, reason); if (thread->is_marking()) { // If incremental marking is happening, we need to finish the GC cycle // and perform a follow-up GC to purge any "floating garbage" that may be @@ -610,7 +575,7 @@ void Heap::CheckStartConcurrentMarking(Thread* thread, GCReason reason) { // new-space GC. This check is the concurrent-marking equivalent to the // new-space GC before synchronous-marking in CollectMostGarbage. if (last_gc_was_old_space_) { - CollectNewSpaceGarbage(thread, GCReason::kFull); + CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kFull); } StartConcurrentMarking(thread, reason); @@ -858,6 +823,8 @@ const char* Heap::GCTypeToString(GCType type) { switch (type) { case GCType::kScavenge: return "Scavenge"; + case GCType::kEvacuate: + return "Evacuate"; case GCType::kStartConcurrentMark: return "StartCMark"; case GCType::kMarkSweep: diff --git a/runtime/vm/heap/heap.h b/runtime/vm/heap/heap.h index 112c59ffc3f..705e5a9ac6b 100644 --- a/runtime/vm/heap/heap.h +++ b/runtime/vm/heap/heap.h @@ -108,8 +108,7 @@ class Heap { void NotifyIdle(int64_t deadline); // Collect a single generation. - void CollectGarbage(Space space); - void CollectGarbage(GCType type, GCReason reason); + void CollectGarbage(Thread* thread, GCType type, GCReason reason); // Collect both generations by performing a scavenge followed by a // mark-sweep. This function may not collect all unreachable objects. Because @@ -344,9 +343,8 @@ class Heap { bool VerifyGC(MarkExpectation mark_expectation = kForbidMarked); // Helper functions for garbage collection. - void CollectNewSpaceGarbage(Thread* thread, GCReason reason); + void CollectNewSpaceGarbage(Thread* thread, GCType type, GCReason reason); void CollectOldSpaceGarbage(Thread* thread, GCType type, GCReason reason); - void EvacuateNewSpace(Thread* thread, GCReason reason); // GC stats collection. void RecordBeforeGC(GCType type, GCReason reason); @@ -463,7 +461,8 @@ class GCTestHelper : public AllStatic { static void CollectNewSpace() { Thread* thread = Thread::Current(); ASSERT(thread->execution_state() == Thread::kThreadInVM); - thread->heap()->CollectNewSpaceGarbage(thread, GCReason::kDebugging); + thread->heap()->CollectGarbage(thread, GCType::kScavenge, + GCReason::kDebugging); } // Fully collect old gen and wait for the sweeper to finish. The normal call @@ -474,9 +473,11 @@ class GCTestHelper : public AllStatic { Thread* thread = Thread::Current(); ASSERT(thread->execution_state() == Thread::kThreadInVM); if (thread->is_marking()) { - thread->heap()->CollectGarbage(GCType::kMarkSweep, GCReason::kDebugging); + thread->heap()->CollectGarbage(thread, GCType::kMarkSweep, + GCReason::kDebugging); } - thread->heap()->CollectGarbage(GCType::kMarkSweep, GCReason::kDebugging); + thread->heap()->CollectGarbage(thread, GCType::kMarkSweep, + GCReason::kDebugging); WaitForGCTasks(); } diff --git a/runtime/vm/heap/heap_test.cc b/runtime/vm/heap/heap_test.cc index c45765c7d67..d2e2c41916e 100644 --- a/runtime/vm/heap/heap_test.cc +++ b/runtime/vm/heap/heap_test.cc @@ -512,7 +512,8 @@ ISOLATE_UNIT_TEST_CASE(ExternalPromotion) { class HeapTestHelper { public: static void Scavenge(Thread* thread) { - thread->heap()->CollectNewSpaceGarbage(thread, GCReason::kDebugging); + thread->heap()->CollectNewSpaceGarbage(thread, GCType::kScavenge, + GCReason::kDebugging); } static void MarkSweep(Thread* thread) { thread->heap()->CollectOldSpaceGarbage(thread, GCType::kMarkSweep, diff --git a/runtime/vm/heap/safepoint.cc b/runtime/vm/heap/safepoint.cc index c2e8d9a903d..09e0c04168f 100644 --- a/runtime/vm/heap/safepoint.cc +++ b/runtime/vm/heap/safepoint.cc @@ -63,7 +63,7 @@ ForceGrowthSafepointOperationScope::~ForceGrowthSafepointOperationScope() { ASSERT(T->CanCollectGarbage()); // Check if we passed the growth limit during the scope. if (heap->old_space()->ReachedHardThreshold()) { - heap->CollectGarbage(GCType::kMarkSweep, GCReason::kOldSpace); + heap->CollectGarbage(T, GCType::kMarkSweep, GCReason::kOldSpace); } else { heap->CheckStartConcurrentMarking(T, GCReason::kOldSpace); } diff --git a/runtime/vm/heap/scavenger.cc b/runtime/vm/heap/scavenger.cc index c94c042888d..0648d600883 100644 --- a/runtime/vm/heap/scavenger.cc +++ b/runtime/vm/heap/scavenger.cc @@ -1731,21 +1731,20 @@ uword ScavengerVisitorBase::TryAllocateCopySlow(intptr_t size) { return tail_->TryAllocateGC(size); } -void Scavenger::Scavenge(GCReason reason) { +void Scavenger::Scavenge(Thread* thread, GCType type, GCReason reason) { int64_t start = OS::GetCurrentMonotonicMicros(); - // Ensure that all threads for this isolate are at a safepoint (either stopped - // or in native code). If two threads are racing at this point, the loser - // will continue with its scavenge after waiting for the winner to complete. - // TODO(koda): Consider moving SafepointThreads into allocation failure/retry - // logic to avoid needless collections. - Thread* thread = Thread::Current(); - GcSafepointOperationScope safepoint_scope(thread); + ASSERT(thread->IsAtSafepoint()); // Scavenging is not reentrant. Make sure that is the case. ASSERT(!scavenging_); scavenging_ = true; + if (type == GCType::kEvacuate) { + // Forces the next scavenge to promote all the objects in the new space. + early_tenure_ = true; + } + if (FLAG_verify_before_gc) { OS::PrintErr("Verifying before Scavenge..."); heap_->WaitForSweeperTasksAtSafepoint(thread); @@ -1807,6 +1806,11 @@ void Scavenger::Scavenge(GCReason reason) { // Done scavenging. Reset the marker. ASSERT(scavenging_); scavenging_ = false; + + // It is possible for objects to stay in the new space + // if the VM cannot create more pages for these objects. + ASSERT((type != GCType::kEvacuate) || (UsedInWords() == 0) || + failed_to_promote_); } intptr_t Scavenger::SerialScavenge(SemiSpace* from) { @@ -1970,23 +1974,4 @@ void Scavenger::PrintToJSONObject(JSONObject* object) const { } #endif // !PRODUCT -void Scavenger::Evacuate(GCReason reason) { - // We need a safepoint here to prevent allocation right before or right after - // the scavenge. - // The former can introduce an object that we might fail to collect. - // The latter means even if the scavenge promotes every object in the new - // space, the new allocation means the space is not empty, - // causing the assertion below to fail. - GcSafepointOperationScope scope(Thread::Current()); - - // Forces the next scavenge to promote all the objects in the new space. - early_tenure_ = true; - - Scavenge(reason); - - // It is possible for objects to stay in the new space - // if the VM cannot create more pages for these objects. - ASSERT((UsedInWords() == 0) || failed_to_promote_); -} - } // namespace dart diff --git a/runtime/vm/heap/scavenger.h b/runtime/vm/heap/scavenger.h index 5a93aec8f68..229912ac956 100644 --- a/runtime/vm/heap/scavenger.h +++ b/runtime/vm/heap/scavenger.h @@ -277,10 +277,7 @@ class Scavenger { void AbandonRemainingTLABForDebugging(Thread* thread); // Collect the garbage in this scavenger. - void Scavenge(GCReason reason); - - // Promote all live objects. - void Evacuate(GCReason reason); + void Scavenge(Thread* thread, GCType type, GCReason reason); int64_t UsedInWords() const { MutexLocker ml(&space_lock_); diff --git a/runtime/vm/heap/spaces.h b/runtime/vm/heap/spaces.h index c1c0663d8d0..2b9c00f8aa1 100644 --- a/runtime/vm/heap/spaces.h +++ b/runtime/vm/heap/spaces.h @@ -31,6 +31,7 @@ class SpaceUsage { enum class GCType { kScavenge, + kEvacuate, kStartConcurrentMark, kMarkSweep, kMarkCompact, diff --git a/runtime/vm/isolate.h b/runtime/vm/isolate.h index 5b7eaad55a1..a71c5f5d070 100644 --- a/runtime/vm/isolate.h +++ b/runtime/vm/isolate.h @@ -1807,9 +1807,9 @@ class EnterIsolateGroupScope { // an individual isolate. class NoActiveIsolateScope : public StackResource { public: - NoActiveIsolateScope() - : StackResource(Thread::Current()), - thread_(static_cast(thread())) { + NoActiveIsolateScope() : NoActiveIsolateScope(Thread::Current()) {} + explicit NoActiveIsolateScope(Thread* thread) + : StackResource(thread), thread_(thread) { saved_isolate_ = thread_->isolate_; thread_->isolate_ = nullptr; } diff --git a/runtime/vm/metrics_test.cc b/runtime/vm/metrics_test.cc index 5a9ebf70195..f87ba5deb05 100644 --- a/runtime/vm/metrics_test.cc +++ b/runtime/vm/metrics_test.cc @@ -78,7 +78,7 @@ VM_UNIT_TEST_CASE(Metric_OnDemand) { ISOLATE_UNIT_TEST_CASE(Metric_EmbedderAPI) { { - TransitionVMToNative transition(Thread::Current()); + TransitionVMToNative transition(thread); const char* kScript = "void main() {}"; Dart_Handle api_lib = TestCase::LoadTestScript( @@ -88,15 +88,16 @@ ISOLATE_UNIT_TEST_CASE(Metric_EmbedderAPI) { // Ensure we've done new/old GCs to ensure max metrics are initialized. String::New("", Heap::kNew); - IsolateGroup::Current()->heap()->new_space()->Scavenge(GCReason::kDebugging); - IsolateGroup::Current()->heap()->CollectAllGarbage(GCReason::kDebugging, - /*compact=*/ true); + thread->heap()->CollectGarbage(thread, GCType::kScavenge, + GCReason::kDebugging); + thread->heap()->CollectGarbage(thread, GCType::kMarkCompact, + GCReason::kDebugging); // Ensure we've something live in new space. String::New("", Heap::kNew); { - TransitionVMToNative transition(Thread::Current()); + TransitionVMToNative transition(thread); Dart_Isolate isolate = Dart_CurrentIsolate(); #if !defined(PRODUCT) diff --git a/runtime/vm/service.cc b/runtime/vm/service.cc index 72b9021d1eb..4d3fa4da9e7 100644 --- a/runtime/vm/service.cc +++ b/runtime/vm/service.cc @@ -4473,13 +4473,13 @@ static void GetHeapMap(Thread* thread, JSONStream* js) { auto isolate_group = thread->isolate_group(); if (js->HasParam("gc")) { if (js->ParamIs("gc", "scavenge")) { - isolate_group->heap()->CollectGarbage(GCType::kScavenge, + isolate_group->heap()->CollectGarbage(thread, GCType::kScavenge, GCReason::kDebugging); } else if (js->ParamIs("gc", "mark-sweep")) { - isolate_group->heap()->CollectGarbage(GCType::kMarkSweep, + isolate_group->heap()->CollectGarbage(thread, GCType::kMarkSweep, GCReason::kDebugging); } else if (js->ParamIs("gc", "mark-compact")) { - isolate_group->heap()->CollectGarbage(GCType::kMarkCompact, + isolate_group->heap()->CollectGarbage(thread, GCType::kMarkCompact, GCReason::kDebugging); } else { PrintInvalidParamError(js, "gc"); diff --git a/runtime/vm/thread.cc b/runtime/vm/thread.cc index e21a66c050a..9f3a5bf67f3 100644 --- a/runtime/vm/thread.cc +++ b/runtime/vm/thread.cc @@ -437,7 +437,7 @@ ErrorPtr Thread::HandleInterrupts() { if ((interrupt_bits & kVMInterrupt) != 0) { CheckForSafepoint(); if (isolate_group()->store_buffer()->Overflowed()) { - heap()->CollectGarbage(GCType::kScavenge, GCReason::kStoreBuffer); + heap()->CollectGarbage(this, GCType::kScavenge, GCReason::kStoreBuffer); } #if !defined(PRODUCT)