[vm, gc] Evaluate old-gen GC on each new-gen page.

This gives starting or finalizing concurrent marking a chance to run not-immediately-after-a-scavenge.

TEST=ci
Change-Id: Iec1ba7b7440045cc18e01637af1f865d588b24c2
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/246163
Reviewed-by: Siva Annamalai <asiva@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2022-06-07 21:56:41 +00:00 committed by Commit Bot
parent 128a7e1641
commit 88ca1e95b8
8 changed files with 69 additions and 58 deletions

View file

@ -193,7 +193,7 @@ void Heap::CheckExternalGC(Thread* thread) {
}
CollectGarbage(thread, GCType::kMarkSweep, GCReason::kExternal);
} else {
CheckStartConcurrentMarking(thread, GCReason::kExternal);
CheckConcurrentMarking(thread, GCReason::kExternal);
}
}
@ -457,7 +457,7 @@ void Heap::CollectNewSpaceGarbage(Thread* thread,
CollectOldSpaceGarbage(thread, GCType::kMarkSweep,
GCReason::kPromotion);
} else {
CheckStartConcurrentMarking(thread, GCReason::kPromotion);
CheckConcurrentMarking(thread, GCReason::kPromotion);
}
}
}
@ -554,29 +554,42 @@ void Heap::CollectAllGarbage(GCReason reason, bool compact) {
WaitForSweeperTasks(thread);
}
void Heap::CheckStartConcurrentMarking(Thread* thread, GCReason reason) {
void Heap::CheckConcurrentMarking(Thread* thread, GCReason reason) {
PageSpace::Phase phase;
{
MonitorLocker ml(old_space_.tasks_lock());
if (old_space_.phase() != PageSpace::kDone) {
return; // Busy.
}
phase = old_space_.phase();
}
if (old_space_.ReachedSoftThreshold()) {
// New-space objects are roots during old-space GC. This means that even
// unreachable new-space objects prevent old-space objects they reference
// from being collected during an old-space GC. Normally this is not an
// issue because new-space GCs run much more frequently than old-space GCs.
// If new-space allocation is low and direct old-space allocation is high,
// which can happen in a program that allocates large objects and little
// else, old-space can fill up with unreachable objects until the next
// new-space GC. This check is the concurrent-marking equivalent to the
// new-space GC before synchronous-marking in CollectMostGarbage.
if (last_gc_was_old_space_) {
CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kFull);
}
StartConcurrentMarking(thread, reason);
switch (phase) {
case PageSpace::kMarking:
// TODO(rmacnak): Have this thread help with marking.
case PageSpace::kSweepingLarge:
case PageSpace::kSweepingRegular:
return; // Busy.
case PageSpace::kAwaitingFinalization:
CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
return;
case PageSpace::kDone:
if (old_space_.ReachedSoftThreshold()) {
// New-space objects are roots during old-space GC. This means that even
// unreachable new-space objects prevent old-space objects they
// reference from being collected during an old-space GC. Normally this
// is not an issue because new-space GCs run much more frequently than
// old-space GCs. If new-space allocation is low and direct old-space
// allocation is high, which can happen in a program that allocates
// large objects and little else, old-space can fill up with unreachable
// objects until the next new-space GC. This check is the
// concurrent-marking equivalent to the new-space GC before
// synchronous-marking in CollectMostGarbage.
if (last_gc_was_old_space_) {
CollectNewSpaceGarbage(thread, GCType::kScavenge, GCReason::kFull);
}
StartConcurrentMarking(thread, reason);
}
return;
default:
UNREACHABLE();
}
}
@ -595,17 +608,6 @@ void Heap::StartConcurrentMarking(Thread* thread, GCReason reason) {
#endif
}
void Heap::CheckFinishConcurrentMarking(Thread* thread) {
bool ready;
{
MonitorLocker ml(old_space_.tasks_lock());
ready = old_space_.phase() == PageSpace::kAwaitingFinalization;
}
if (ready) {
CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
}
}
void Heap::WaitForMarkerTasks(Thread* thread) {
MonitorLocker ml(old_space_.tasks_lock());
while ((old_space_.phase() == PageSpace::kMarking) ||

View file

@ -125,9 +125,8 @@ class Heap {
void CollectAllGarbage(GCReason reason = GCReason::kFull,
bool compact = false);
void CheckStartConcurrentMarking(Thread* thread, GCReason reason);
void CheckConcurrentMarking(Thread* thread, GCReason reason);
void StartConcurrentMarking(Thread* thread, GCReason reason);
void CheckFinishConcurrentMarking(Thread* thread);
void WaitForMarkerTasks(Thread* thread);
void WaitForSweeperTasks(Thread* thread);
void WaitForSweeperTasksAtSafepoint(Thread* thread);

View file

@ -465,19 +465,6 @@ void PageSpace::FreePages(OldPage* pages) {
}
}
void PageSpace::EvaluateConcurrentMarking(GrowthPolicy growth_policy) {
if (growth_policy != kForceGrowth) {
ASSERT(GrowthControlState());
if (heap_ != NULL) { // Some unit tests.
Thread* thread = Thread::Current();
if (thread->CanCollectGarbage()) {
heap_->CheckFinishConcurrentMarking(thread);
heap_->CheckStartConcurrentMarking(thread, GCReason::kOldSpace);
}
}
}
}
uword PageSpace::TryAllocateInFreshPage(intptr_t size,
FreeList* freelist,
OldPage::PageType type,
@ -485,7 +472,12 @@ uword PageSpace::TryAllocateInFreshPage(intptr_t size,
bool is_locked) {
ASSERT(Heap::IsAllocatableViaFreeLists(size));
EvaluateConcurrentMarking(growth_policy);
if (growth_policy != kForceGrowth) {
ASSERT(GrowthControlState());
if (heap_ != nullptr) { // Some unit tests.
heap_->CheckConcurrentMarking(Thread::Current(), GCReason::kOldSpace);
}
}
uword result = 0;
SpaceUsage after_allocation = GetCurrentUsage();
@ -521,7 +513,12 @@ uword PageSpace::TryAllocateInFreshLargePage(intptr_t size,
GrowthPolicy growth_policy) {
ASSERT(!Heap::IsAllocatableViaFreeLists(size));
EvaluateConcurrentMarking(growth_policy);
if (growth_policy != kForceGrowth) {
ASSERT(GrowthControlState());
if (heap_ != nullptr) { // Some unit tests.
heap_->CheckConcurrentMarking(Thread::Current(), GCReason::kOldSpace);
}
}
intptr_t page_size_in_words = LargePageSizeInWordsFor(size);
if ((page_size_in_words << kWordSizeLog2) < size) {

View file

@ -553,8 +553,6 @@ class PageSpace {
OldPage::PageType type,
GrowthPolicy growth_policy);
void EvaluateConcurrentMarking(GrowthPolicy growth_policy);
// Makes bump block walkable; do not call concurrently with mutator.
void MakeIterable() const;

View file

@ -65,7 +65,7 @@ ForceGrowthSafepointOperationScope::~ForceGrowthSafepointOperationScope() {
if (heap->old_space()->ReachedHardThreshold()) {
heap->CollectGarbage(T, GCType::kMarkSweep, GCReason::kOldSpace);
} else {
heap->CheckStartConcurrentMarking(T, GCReason::kOldSpace);
heap->CheckConcurrentMarking(T, GCReason::kOldSpace);
}
}
}

View file

@ -1672,12 +1672,19 @@ ObjectPtr Scavenger::FindObject(FindObjectVisitor* visitor) {
return Object::null();
}
void Scavenger::TryAllocateNewTLAB(Thread* thread, intptr_t min_size) {
void Scavenger::TryAllocateNewTLAB(Thread* thread,
intptr_t min_size,
bool can_safepoint) {
ASSERT(heap_ != Dart::vm_isolate_group()->heap());
ASSERT(!scavenging_);
AbandonRemainingTLAB(thread);
if (can_safepoint) {
ASSERT(thread->no_safepoint_scope_depth() == 0);
heap_->CheckConcurrentMarking(thread, GCReason::kNewSpace);
}
MutexLocker ml(&space_lock_);
for (NewPage* page = to_->head(); page != nullptr; page = page->next()) {
if (page->owner() != nullptr) continue;

View file

@ -283,7 +283,15 @@ class Scavenger {
if (LIKELY(addr != 0)) {
return addr;
}
TryAllocateNewTLAB(thread, size);
TryAllocateNewTLAB(thread, size, true);
return TryAllocateFromTLAB(thread, size);
}
uword TryAllocateNoSafepoint(Thread* thread, intptr_t size) {
uword addr = TryAllocateFromTLAB(thread, size);
if (LIKELY(addr != 0)) {
return addr;
}
TryAllocateNewTLAB(thread, size, false);
return TryAllocateFromTLAB(thread, size);
}
void AbandonRemainingTLAB(Thread* thread);
@ -393,7 +401,7 @@ class Scavenger {
thread->set_top(result + size);
return result;
}
void TryAllocateNewTLAB(Thread* thread, intptr_t size);
void TryAllocateNewTLAB(Thread* thread, intptr_t size, bool can_safepoint);
SemiSpace* Prologue(GCReason reason);
intptr_t ParallelScavenge(SemiSpace* from);

View file

@ -788,7 +788,7 @@ class FastObjectCopyBase : public ObjectCopyBase {
const uword size =
header_size != 0 ? header_size : from.untag()->HeapSize();
if (Heap::IsAllocatableInNewSpace(size)) {
const uword alloc = new_space_->TryAllocate(thread_, size);
const uword alloc = new_space_->TryAllocateNoSafepoint(thread_, size);
if (alloc != 0) {
ObjectPtr to(reinterpret_cast<UntaggedObject*>(alloc));
fast_forward_map_.Insert(from, to, size);
@ -1353,7 +1353,7 @@ class ObjectCopy : public Base {
auto raw_from = from.ptr().untag();
auto raw_to = to.ptr().untag();
const intptr_t cid = Types::GetTypedDataPtr(from)->GetClassId();
raw_to->length_ = raw_from->length_;
ASSERT(raw_to->length_ == raw_from->length_);
raw_to->RecomputeDataField();
const intptr_t length =
TypedData::ElementSizeInBytes(cid) * Smi::Value(raw_from->length_);
@ -1605,7 +1605,7 @@ class FastObjectCopy : public ObjectCopy<FastObjectCopyBase> {
if (length == 0) return Object::null();
const intptr_t size = Array::InstanceSize(length);
const uword array_addr = new_space_->TryAllocate(thread_, size);
const uword array_addr = new_space_->TryAllocateNoSafepoint(thread_, size);
if (array_addr == 0) {
exception_msg_ = kFastAllocationFailed;
return Marker();