mirror of
https://github.com/dart-lang/sdk
synced 2024-11-02 08:44:27 +00:00
[vm, gc] Avoid unnecessary nested GcSafepointOperationScopes.
TEST=ci Change-Id: Ic12d6d1766cf90808716d301abfa216eb597cb1f Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/241201 Reviewed-by: Siva Annamalai <asiva@google.com> Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
parent
8f7cdd250e
commit
fd10f5ab28
11 changed files with 58 additions and 105 deletions
|
@ -79,7 +79,7 @@ uword Heap::AllocateNew(intptr_t size) {
|
|||
// This call to CollectGarbage might end up "reusing" a collection spawned
|
||||
// from a different thread and will be racing to allocate the requested
|
||||
// memory with other threads being released after the collection.
|
||||
CollectGarbage(kNew);
|
||||
CollectGarbage(thread, GCType::kScavenge, GCReason::kNewSpace);
|
||||
|
||||
addr = new_space_.TryAllocate(thread, size);
|
||||
if (LIKELY(addr != 0)) {
|
||||
|
@ -184,16 +184,16 @@ void Heap::CheckExternalGC(Thread* thread) {
|
|||
if (new_space_.ExternalInWords() >= (4 * new_space_.CapacityInWords())) {
|
||||
// Attempt to free some external allocation by a scavenge. (If the total
|
||||
// remains above the limit, next external alloc will trigger another.)
|
||||
CollectGarbage(GCType::kScavenge, GCReason::kExternal);
|
||||
CollectGarbage(thread, GCType::kScavenge, GCReason::kExternal);
|
||||
// Promotion may have pushed old space over its limit. Fall through for old
|
||||
// space GC check.
|
||||
}
|
||||
|
||||
if (old_space_.ReachedHardThreshold()) {
|
||||
if (last_gc_was_old_space_) {
|
||||
CollectNewSpaceGarbage(thread, GCReason::kFull);
|
||||
CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kFull);
|
||||
}
|
||||
CollectGarbage(GCType::kMarkSweep, GCReason::kExternal);
|
||||
CollectGarbage(thread, GCType::kMarkSweep, GCReason::kExternal);
|
||||
} else {
|
||||
CheckStartConcurrentMarking(thread, GCReason::kExternal);
|
||||
}
|
||||
|
@ -377,7 +377,7 @@ void Heap::NotifyIdle(int64_t deadline) {
|
|||
// first to shrink the root set (make old-space GC faster) and avoid
|
||||
// intergenerational garbage (make old-space GC free more memory).
|
||||
if (new_space_.ShouldPerformIdleScavenge(deadline)) {
|
||||
CollectNewSpaceGarbage(thread, GCReason::kIdle);
|
||||
CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kIdle);
|
||||
}
|
||||
|
||||
// Check if we want to collect old-space, in decreasing order of cost.
|
||||
|
@ -425,34 +425,9 @@ void Heap::NotifyIdle(int64_t deadline) {
|
|||
}
|
||||
}
|
||||
|
||||
void Heap::EvacuateNewSpace(Thread* thread, GCReason reason) {
|
||||
ASSERT(reason != GCReason::kPromotion);
|
||||
ASSERT(reason != GCReason::kFinalize);
|
||||
if (thread->isolate_group() == Dart::vm_isolate_group()) {
|
||||
// The vm isolate cannot safely collect garbage due to unvisited read-only
|
||||
// handles and slots bootstrapped with RAW_NULL. Ignore GC requests to
|
||||
// trigger a nice out-of-memory message instead of a crash in the middle of
|
||||
// visiting pointers.
|
||||
return;
|
||||
}
|
||||
{
|
||||
GcSafepointOperationScope safepoint_operation(thread);
|
||||
RecordBeforeGC(GCType::kScavenge, reason);
|
||||
VMTagScope tagScope(thread, reason == GCReason::kIdle
|
||||
? VMTag::kGCIdleTagId
|
||||
: VMTag::kGCNewSpaceTagId);
|
||||
TIMELINE_FUNCTION_GC_DURATION(thread, "EvacuateNewGeneration");
|
||||
new_space_.Evacuate(reason);
|
||||
RecordAfterGC(GCType::kScavenge);
|
||||
PrintStats();
|
||||
#if defined(SUPPORT_TIMELINE)
|
||||
PrintStatsToTimeline(&tbes, reason);
|
||||
#endif
|
||||
last_gc_was_old_space_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
void Heap::CollectNewSpaceGarbage(Thread* thread, GCReason reason) {
|
||||
void Heap::CollectNewSpaceGarbage(Thread* thread,
|
||||
GCType type,
|
||||
GCReason reason) {
|
||||
NoActiveIsolateScope no_active_isolate_scope;
|
||||
ASSERT(reason != GCReason::kPromotion);
|
||||
ASSERT(reason != GCReason::kFinalize);
|
||||
|
@ -465,21 +440,21 @@ void Heap::CollectNewSpaceGarbage(Thread* thread, GCReason reason) {
|
|||
}
|
||||
{
|
||||
GcSafepointOperationScope safepoint_operation(thread);
|
||||
RecordBeforeGC(GCType::kScavenge, reason);
|
||||
RecordBeforeGC(type, reason);
|
||||
{
|
||||
VMTagScope tagScope(thread, reason == GCReason::kIdle
|
||||
? VMTag::kGCIdleTagId
|
||||
: VMTag::kGCNewSpaceTagId);
|
||||
TIMELINE_FUNCTION_GC_DURATION(thread, "CollectNewGeneration");
|
||||
new_space_.Scavenge(reason);
|
||||
RecordAfterGC(GCType::kScavenge);
|
||||
new_space_.Scavenge(thread, type, reason);
|
||||
RecordAfterGC(type);
|
||||
PrintStats();
|
||||
#if defined(SUPPORT_TIMELINE)
|
||||
PrintStatsToTimeline(&tbes, reason);
|
||||
#endif
|
||||
last_gc_was_old_space_ = false;
|
||||
}
|
||||
if (reason == GCReason::kNewSpace) {
|
||||
if (type == GCType::kScavenge && reason == GCReason::kNewSpace) {
|
||||
if (old_space_.ReachedHardThreshold()) {
|
||||
CollectOldSpaceGarbage(thread, GCType::kMarkSweep,
|
||||
GCReason::kPromotion);
|
||||
|
@ -542,11 +517,11 @@ void Heap::CollectOldSpaceGarbage(Thread* thread,
|
|||
}
|
||||
}
|
||||
|
||||
void Heap::CollectGarbage(GCType type, GCReason reason) {
|
||||
Thread* thread = Thread::Current();
|
||||
void Heap::CollectGarbage(Thread* thread, GCType type, GCReason reason) {
|
||||
switch (type) {
|
||||
case GCType::kScavenge:
|
||||
CollectNewSpaceGarbage(thread, reason);
|
||||
case GCType::kEvacuate:
|
||||
CollectNewSpaceGarbage(thread, type, reason);
|
||||
break;
|
||||
case GCType::kMarkSweep:
|
||||
case GCType::kMarkCompact:
|
||||
|
@ -557,19 +532,9 @@ void Heap::CollectGarbage(GCType type, GCReason reason) {
|
|||
}
|
||||
}
|
||||
|
||||
void Heap::CollectGarbage(Space space) {
|
||||
Thread* thread = Thread::Current();
|
||||
if (space == kOld) {
|
||||
CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kOldSpace);
|
||||
} else {
|
||||
ASSERT(space == kNew);
|
||||
CollectNewSpaceGarbage(thread, GCReason::kNewSpace);
|
||||
}
|
||||
}
|
||||
|
||||
void Heap::CollectMostGarbage(GCReason reason, bool compact) {
|
||||
Thread* thread = Thread::Current();
|
||||
CollectNewSpaceGarbage(thread, reason);
|
||||
CollectNewSpaceGarbage(thread, GCType::kScavenge, reason);
|
||||
CollectOldSpaceGarbage(
|
||||
thread, compact ? GCType::kMarkCompact : GCType::kMarkSweep, reason);
|
||||
}
|
||||
|
@ -579,7 +544,7 @@ void Heap::CollectAllGarbage(GCReason reason, bool compact) {
|
|||
|
||||
// New space is evacuated so this GC will collect all dead objects
|
||||
// kept alive by a cross-generational pointer.
|
||||
EvacuateNewSpace(thread, reason);
|
||||
CollectNewSpaceGarbage(thread, GCType::kEvacuate, reason);
|
||||
if (thread->is_marking()) {
|
||||
// If incremental marking is happening, we need to finish the GC cycle
|
||||
// and perform a follow-up GC to purge any "floating garbage" that may be
|
||||
|
@ -610,7 +575,7 @@ void Heap::CheckStartConcurrentMarking(Thread* thread, GCReason reason) {
|
|||
// new-space GC. This check is the concurrent-marking equivalent to the
|
||||
// new-space GC before synchronous-marking in CollectMostGarbage.
|
||||
if (last_gc_was_old_space_) {
|
||||
CollectNewSpaceGarbage(thread, GCReason::kFull);
|
||||
CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kFull);
|
||||
}
|
||||
|
||||
StartConcurrentMarking(thread, reason);
|
||||
|
@ -858,6 +823,8 @@ const char* Heap::GCTypeToString(GCType type) {
|
|||
switch (type) {
|
||||
case GCType::kScavenge:
|
||||
return "Scavenge";
|
||||
case GCType::kEvacuate:
|
||||
return "Evacuate";
|
||||
case GCType::kStartConcurrentMark:
|
||||
return "StartCMark";
|
||||
case GCType::kMarkSweep:
|
||||
|
|
|
@ -108,8 +108,7 @@ class Heap {
|
|||
void NotifyIdle(int64_t deadline);
|
||||
|
||||
// Collect a single generation.
|
||||
void CollectGarbage(Space space);
|
||||
void CollectGarbage(GCType type, GCReason reason);
|
||||
void CollectGarbage(Thread* thread, GCType type, GCReason reason);
|
||||
|
||||
// Collect both generations by performing a scavenge followed by a
|
||||
// mark-sweep. This function may not collect all unreachable objects. Because
|
||||
|
@ -344,9 +343,8 @@ class Heap {
|
|||
bool VerifyGC(MarkExpectation mark_expectation = kForbidMarked);
|
||||
|
||||
// Helper functions for garbage collection.
|
||||
void CollectNewSpaceGarbage(Thread* thread, GCReason reason);
|
||||
void CollectNewSpaceGarbage(Thread* thread, GCType type, GCReason reason);
|
||||
void CollectOldSpaceGarbage(Thread* thread, GCType type, GCReason reason);
|
||||
void EvacuateNewSpace(Thread* thread, GCReason reason);
|
||||
|
||||
// GC stats collection.
|
||||
void RecordBeforeGC(GCType type, GCReason reason);
|
||||
|
@ -463,7 +461,8 @@ class GCTestHelper : public AllStatic {
|
|||
static void CollectNewSpace() {
|
||||
Thread* thread = Thread::Current();
|
||||
ASSERT(thread->execution_state() == Thread::kThreadInVM);
|
||||
thread->heap()->CollectNewSpaceGarbage(thread, GCReason::kDebugging);
|
||||
thread->heap()->CollectGarbage(thread, GCType::kScavenge,
|
||||
GCReason::kDebugging);
|
||||
}
|
||||
|
||||
// Fully collect old gen and wait for the sweeper to finish. The normal call
|
||||
|
@ -474,9 +473,11 @@ class GCTestHelper : public AllStatic {
|
|||
Thread* thread = Thread::Current();
|
||||
ASSERT(thread->execution_state() == Thread::kThreadInVM);
|
||||
if (thread->is_marking()) {
|
||||
thread->heap()->CollectGarbage(GCType::kMarkSweep, GCReason::kDebugging);
|
||||
thread->heap()->CollectGarbage(thread, GCType::kMarkSweep,
|
||||
GCReason::kDebugging);
|
||||
}
|
||||
thread->heap()->CollectGarbage(GCType::kMarkSweep, GCReason::kDebugging);
|
||||
thread->heap()->CollectGarbage(thread, GCType::kMarkSweep,
|
||||
GCReason::kDebugging);
|
||||
WaitForGCTasks();
|
||||
}
|
||||
|
||||
|
|
|
@ -512,7 +512,8 @@ ISOLATE_UNIT_TEST_CASE(ExternalPromotion) {
|
|||
class HeapTestHelper {
|
||||
public:
|
||||
static void Scavenge(Thread* thread) {
|
||||
thread->heap()->CollectNewSpaceGarbage(thread, GCReason::kDebugging);
|
||||
thread->heap()->CollectNewSpaceGarbage(thread, GCType::kScavenge,
|
||||
GCReason::kDebugging);
|
||||
}
|
||||
static void MarkSweep(Thread* thread) {
|
||||
thread->heap()->CollectOldSpaceGarbage(thread, GCType::kMarkSweep,
|
||||
|
|
|
@ -63,7 +63,7 @@ ForceGrowthSafepointOperationScope::~ForceGrowthSafepointOperationScope() {
|
|||
ASSERT(T->CanCollectGarbage());
|
||||
// Check if we passed the growth limit during the scope.
|
||||
if (heap->old_space()->ReachedHardThreshold()) {
|
||||
heap->CollectGarbage(GCType::kMarkSweep, GCReason::kOldSpace);
|
||||
heap->CollectGarbage(T, GCType::kMarkSweep, GCReason::kOldSpace);
|
||||
} else {
|
||||
heap->CheckStartConcurrentMarking(T, GCReason::kOldSpace);
|
||||
}
|
||||
|
|
|
@ -1731,21 +1731,20 @@ uword ScavengerVisitorBase<parallel>::TryAllocateCopySlow(intptr_t size) {
|
|||
return tail_->TryAllocateGC(size);
|
||||
}
|
||||
|
||||
void Scavenger::Scavenge(GCReason reason) {
|
||||
void Scavenger::Scavenge(Thread* thread, GCType type, GCReason reason) {
|
||||
int64_t start = OS::GetCurrentMonotonicMicros();
|
||||
|
||||
// Ensure that all threads for this isolate are at a safepoint (either stopped
|
||||
// or in native code). If two threads are racing at this point, the loser
|
||||
// will continue with its scavenge after waiting for the winner to complete.
|
||||
// TODO(koda): Consider moving SafepointThreads into allocation failure/retry
|
||||
// logic to avoid needless collections.
|
||||
Thread* thread = Thread::Current();
|
||||
GcSafepointOperationScope safepoint_scope(thread);
|
||||
ASSERT(thread->IsAtSafepoint());
|
||||
|
||||
// Scavenging is not reentrant. Make sure that is the case.
|
||||
ASSERT(!scavenging_);
|
||||
scavenging_ = true;
|
||||
|
||||
if (type == GCType::kEvacuate) {
|
||||
// Forces the next scavenge to promote all the objects in the new space.
|
||||
early_tenure_ = true;
|
||||
}
|
||||
|
||||
if (FLAG_verify_before_gc) {
|
||||
OS::PrintErr("Verifying before Scavenge...");
|
||||
heap_->WaitForSweeperTasksAtSafepoint(thread);
|
||||
|
@ -1807,6 +1806,11 @@ void Scavenger::Scavenge(GCReason reason) {
|
|||
// Done scavenging. Reset the marker.
|
||||
ASSERT(scavenging_);
|
||||
scavenging_ = false;
|
||||
|
||||
// It is possible for objects to stay in the new space
|
||||
// if the VM cannot create more pages for these objects.
|
||||
ASSERT((type != GCType::kEvacuate) || (UsedInWords() == 0) ||
|
||||
failed_to_promote_);
|
||||
}
|
||||
|
||||
intptr_t Scavenger::SerialScavenge(SemiSpace* from) {
|
||||
|
@ -1970,23 +1974,4 @@ void Scavenger::PrintToJSONObject(JSONObject* object) const {
|
|||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
void Scavenger::Evacuate(GCReason reason) {
|
||||
// We need a safepoint here to prevent allocation right before or right after
|
||||
// the scavenge.
|
||||
// The former can introduce an object that we might fail to collect.
|
||||
// The latter means even if the scavenge promotes every object in the new
|
||||
// space, the new allocation means the space is not empty,
|
||||
// causing the assertion below to fail.
|
||||
GcSafepointOperationScope scope(Thread::Current());
|
||||
|
||||
// Forces the next scavenge to promote all the objects in the new space.
|
||||
early_tenure_ = true;
|
||||
|
||||
Scavenge(reason);
|
||||
|
||||
// It is possible for objects to stay in the new space
|
||||
// if the VM cannot create more pages for these objects.
|
||||
ASSERT((UsedInWords() == 0) || failed_to_promote_);
|
||||
}
|
||||
|
||||
} // namespace dart
|
||||
|
|
|
@ -277,10 +277,7 @@ class Scavenger {
|
|||
void AbandonRemainingTLABForDebugging(Thread* thread);
|
||||
|
||||
// Collect the garbage in this scavenger.
|
||||
void Scavenge(GCReason reason);
|
||||
|
||||
// Promote all live objects.
|
||||
void Evacuate(GCReason reason);
|
||||
void Scavenge(Thread* thread, GCType type, GCReason reason);
|
||||
|
||||
int64_t UsedInWords() const {
|
||||
MutexLocker ml(&space_lock_);
|
||||
|
|
|
@ -31,6 +31,7 @@ class SpaceUsage {
|
|||
|
||||
enum class GCType {
|
||||
kScavenge,
|
||||
kEvacuate,
|
||||
kStartConcurrentMark,
|
||||
kMarkSweep,
|
||||
kMarkCompact,
|
||||
|
|
|
@ -1807,9 +1807,9 @@ class EnterIsolateGroupScope {
|
|||
// an individual isolate.
|
||||
class NoActiveIsolateScope : public StackResource {
|
||||
public:
|
||||
NoActiveIsolateScope()
|
||||
: StackResource(Thread::Current()),
|
||||
thread_(static_cast<Thread*>(thread())) {
|
||||
NoActiveIsolateScope() : NoActiveIsolateScope(Thread::Current()) {}
|
||||
explicit NoActiveIsolateScope(Thread* thread)
|
||||
: StackResource(thread), thread_(thread) {
|
||||
saved_isolate_ = thread_->isolate_;
|
||||
thread_->isolate_ = nullptr;
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ VM_UNIT_TEST_CASE(Metric_OnDemand) {
|
|||
|
||||
ISOLATE_UNIT_TEST_CASE(Metric_EmbedderAPI) {
|
||||
{
|
||||
TransitionVMToNative transition(Thread::Current());
|
||||
TransitionVMToNative transition(thread);
|
||||
|
||||
const char* kScript = "void main() {}";
|
||||
Dart_Handle api_lib = TestCase::LoadTestScript(
|
||||
|
@ -88,15 +88,16 @@ ISOLATE_UNIT_TEST_CASE(Metric_EmbedderAPI) {
|
|||
|
||||
// Ensure we've done new/old GCs to ensure max metrics are initialized.
|
||||
String::New("<land-in-new-space>", Heap::kNew);
|
||||
IsolateGroup::Current()->heap()->new_space()->Scavenge(GCReason::kDebugging);
|
||||
IsolateGroup::Current()->heap()->CollectAllGarbage(GCReason::kDebugging,
|
||||
/*compact=*/ true);
|
||||
thread->heap()->CollectGarbage(thread, GCType::kScavenge,
|
||||
GCReason::kDebugging);
|
||||
thread->heap()->CollectGarbage(thread, GCType::kMarkCompact,
|
||||
GCReason::kDebugging);
|
||||
|
||||
// Ensure we've something live in new space.
|
||||
String::New("<land-in-new-space2>", Heap::kNew);
|
||||
|
||||
{
|
||||
TransitionVMToNative transition(Thread::Current());
|
||||
TransitionVMToNative transition(thread);
|
||||
|
||||
Dart_Isolate isolate = Dart_CurrentIsolate();
|
||||
#if !defined(PRODUCT)
|
||||
|
|
|
@ -4473,13 +4473,13 @@ static void GetHeapMap(Thread* thread, JSONStream* js) {
|
|||
auto isolate_group = thread->isolate_group();
|
||||
if (js->HasParam("gc")) {
|
||||
if (js->ParamIs("gc", "scavenge")) {
|
||||
isolate_group->heap()->CollectGarbage(GCType::kScavenge,
|
||||
isolate_group->heap()->CollectGarbage(thread, GCType::kScavenge,
|
||||
GCReason::kDebugging);
|
||||
} else if (js->ParamIs("gc", "mark-sweep")) {
|
||||
isolate_group->heap()->CollectGarbage(GCType::kMarkSweep,
|
||||
isolate_group->heap()->CollectGarbage(thread, GCType::kMarkSweep,
|
||||
GCReason::kDebugging);
|
||||
} else if (js->ParamIs("gc", "mark-compact")) {
|
||||
isolate_group->heap()->CollectGarbage(GCType::kMarkCompact,
|
||||
isolate_group->heap()->CollectGarbage(thread, GCType::kMarkCompact,
|
||||
GCReason::kDebugging);
|
||||
} else {
|
||||
PrintInvalidParamError(js, "gc");
|
||||
|
|
|
@ -437,7 +437,7 @@ ErrorPtr Thread::HandleInterrupts() {
|
|||
if ((interrupt_bits & kVMInterrupt) != 0) {
|
||||
CheckForSafepoint();
|
||||
if (isolate_group()->store_buffer()->Overflowed()) {
|
||||
heap()->CollectGarbage(GCType::kScavenge, GCReason::kStoreBuffer);
|
||||
heap()->CollectGarbage(this, GCType::kScavenge, GCReason::kStoreBuffer);
|
||||
}
|
||||
|
||||
#if !defined(PRODUCT)
|
||||
|
|
Loading…
Reference in a new issue