From 1c1e11b0e76e489a4ef9091999687b959a2d921c Mon Sep 17 00:00:00 2001 From: Ryan Macnak Date: Wed, 10 May 2023 17:24:50 +0000 Subject: [PATCH] [vm, gc] Retry allocation under the safepoint before performing GC. This avoids back-to-back GCs where multiple threads race to enter a safepoint operation to perform GC, and the losers of the race perform GC after they enter their own safepoint operation. TEST=ci Bug: https://github.com/dart-lang/sdk/issues/29415 Change-Id: Ic9be830b0c4438f9b57ea9f54464b342872175f8 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/300860 Reviewed-by: Siva Annamalai Commit-Queue: Ryan Macnak --- runtime/vm/heap/heap.cc | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/runtime/vm/heap/heap.cc b/runtime/vm/heap/heap.cc index 302cdfccf66..224614eb3d5 100644 --- a/runtime/vm/heap/heap.cc +++ b/runtime/vm/heap/heap.cc @@ -86,9 +86,16 @@ uword Heap::AllocateNew(Thread* thread, intptr_t size) { return addr; } if (!assume_scavenge_will_fail_ && !thread->force_growth()) { - // This call to CollectGarbage might end up "reusing" a collection spawned - // from a different thread and will be racing to allocate the requested - // memory with other threads being released after the collection. + GcSafepointOperationScope safepoint_operation(thread); + + // Another thread may have won the race to the safepoint and performed a GC + // before this thread acquired the safepoint. Retry the allocation under the + // safepoint to avoid back-to-back GC. + addr = new_space_.TryAllocate(thread, size); + if (addr != 0) { + return addr; + } + CollectGarbage(thread, GCType::kScavenge, GCReason::kNewSpace); addr = new_space_.TryAllocate(thread, size); @@ -123,6 +130,14 @@ uword Heap::AllocateOld(Thread* thread, intptr_t size, bool is_exec) { if (addr != 0) { return addr; } + GcSafepointOperationScope safepoint_operation(thread); + // Another thread may have won the race to the safepoint and performed a GC + // before this thread acquired the safepoint. Retry the allocation under the + // safepoint to avoid back-to-back GC. + addr = old_space_.TryAllocate(size, is_exec); + if (addr != 0) { + return addr; + } // All GC tasks finished without allocating successfully. Collect both // generations. CollectMostGarbage(GCReason::kOldSpace, /*compact=*/false); @@ -131,7 +146,7 @@ uword Heap::AllocateOld(Thread* thread, intptr_t size, bool is_exec) { return addr; } // Wait for all of the concurrent tasks to finish before giving up. - WaitForSweeperTasks(thread); + WaitForSweeperTasksAtSafepoint(thread); addr = old_space_.TryAllocate(size, is_exec); if (addr != 0) { return addr; @@ -143,7 +158,7 @@ uword Heap::AllocateOld(Thread* thread, intptr_t size, bool is_exec) { } // Before throwing an out-of-memory error try a synchronous GC. CollectAllGarbage(GCReason::kOldSpace, /*compact=*/true); - WaitForSweeperTasks(thread); + WaitForSweeperTasksAtSafepoint(thread); } uword addr = old_space_.TryAllocate(size, is_exec, PageSpace::kForceGrowth); if (addr != 0) { @@ -573,7 +588,6 @@ void Heap::CollectAllGarbage(GCReason reason, bool compact) { } CollectOldSpaceGarbage( thread, compact ? GCType::kMarkCompact : GCType::kMarkSweep, reason); - WaitForSweeperTasks(thread); } void Heap::CheckCatchUp(Thread* thread) {