[vm, gc] Use idle time to deallocate cached new-space pages.

New-space pages are cached because deallocation, not allocation, is slow.

TEST=ci
Change-Id: I8439aa0477518a1fb7ac643a435de53bf6679ac7
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/216687
Reviewed-by: Liam Appelbe <liama@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2021-10-14 18:58:35 +00:00 committed by commit-bot@chromium.org
parent fc868e7ff8
commit 63177bc4fc
3 changed files with 59 additions and 50 deletions

View file

@ -365,51 +365,57 @@ void Heap::HintFreed(intptr_t size) {
void Heap::NotifyIdle(int64_t deadline) {
Thread* thread = Thread::Current();
TIMELINE_FUNCTION_GC_DURATION(thread, "NotifyIdle");
GcSafepointOperationScope safepoint_operation(thread);
{
GcSafepointOperationScope safepoint_operation(thread);
// Check if we want to collect new-space first, because if we want to collect
// both new-space and old-space, the new-space collection should run first
// to shrink the root set (make old-space GC faster) and avoid
// intergenerational garbage (make old-space GC free more memory).
if (new_space_.ShouldPerformIdleScavenge(deadline)) {
CollectNewSpaceGarbage(thread, GCReason::kIdle);
// Check if we want to collect new-space first, because if we want to
// collect both new-space and old-space, the new-space collection should run
// first to shrink the root set (make old-space GC faster) and avoid
// intergenerational garbage (make old-space GC free more memory).
if (new_space_.ShouldPerformIdleScavenge(deadline)) {
CollectNewSpaceGarbage(thread, GCReason::kIdle);
}
// Check if we want to collect old-space, in decreasing order of cost.
// Because we use a deadline instead of a timeout, we automatically take any
// time used up by a scavenge into account when deciding if we can complete
// a mark-sweep on time.
if (old_space_.ShouldPerformIdleMarkCompact(deadline)) {
// We prefer mark-compact over other old space GCs if we have enough time,
// since it removes old space fragmentation and frees up most memory.
// Blocks for O(heap), roughtly twice as costly as mark-sweep.
CollectOldSpaceGarbage(thread, GCType::kMarkCompact, GCReason::kIdle);
} else if (old_space_.ReachedHardThreshold()) {
// Even though the following GC may exceed our idle deadline, we need to
// ensure than that promotions during idle scavenges do not lead to
// unbounded growth of old space. If a program is allocating only in new
// space and all scavenges happen during idle time, then NotifyIdle will
// be the only place that checks the old space allocation limit.
// Compare the tail end of Heap::CollectNewSpaceGarbage.
// Blocks for O(heap).
CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kIdle);
} else if (old_space_.ShouldStartIdleMarkSweep(deadline) ||
old_space_.ReachedSoftThreshold()) {
// If we have both work to do and enough time, start or finish GC.
// If we have crossed the soft threshold, ignore time; the next old-space
// allocation will trigger this work anyway, so we try to pay at least
// some of that cost with idle time.
// Blocks for O(roots).
PageSpace::Phase phase;
{
MonitorLocker ml(old_space_.tasks_lock());
phase = old_space_.phase();
}
if (phase == PageSpace::kAwaitingFinalization) {
CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
} else if (phase == PageSpace::kDone) {
StartConcurrentMarking(thread);
}
}
}
// Check if we want to collect old-space, in decreasing order of cost.
// Because we use a deadline instead of a timeout, we automatically take any
// time used up by a scavenge into account when deciding if we can complete
// a mark-sweep on time.
if (old_space_.ShouldPerformIdleMarkCompact(deadline)) {
// We prefer mark-compact over other old space GCs if we have enough time,
// since it removes old space fragmentation and frees up most memory.
// Blocks for O(heap), roughtly twice as costly as mark-sweep.
CollectOldSpaceGarbage(thread, GCType::kMarkCompact, GCReason::kIdle);
} else if (old_space_.ReachedHardThreshold()) {
// Even though the following GC may exceed our idle deadline, we need to
// ensure than that promotions during idle scavenges do not lead to
// unbounded growth of old space. If a program is allocating only in new
// space and all scavenges happen during idle time, then NotifyIdle will be
// the only place that checks the old space allocation limit.
// Compare the tail end of Heap::CollectNewSpaceGarbage.
// Blocks for O(heap).
CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kIdle);
} else if (old_space_.ShouldStartIdleMarkSweep(deadline) ||
old_space_.ReachedSoftThreshold()) {
// If we have both work to do and enough time, start or finish GC.
// If we have crossed the soft threshold, ignore time; the next old-space
// allocation will trigger this work anyway, so we try to pay at least some
// of that cost with idle time.
// Blocks for O(roots).
PageSpace::Phase phase;
{
MonitorLocker ml(old_space_.tasks_lock());
phase = old_space_.phase();
}
if (phase == PageSpace::kAwaitingFinalization) {
CollectOldSpaceGarbage(thread, GCType::kMarkSweep, GCReason::kFinalize);
} else if (phase == PageSpace::kDone) {
StartConcurrentMarking(thread);
}
if (OS::GetCurrentMonotonicMicros() < deadline) {
SemiSpace::DrainCache();
}
}

View file

@ -651,15 +651,17 @@ void SemiSpace::Init() {
page_cache_mutex = new Mutex(NOT_IN_PRODUCT("page_cache_mutex"));
}
void SemiSpace::Cleanup() {
{
MutexLocker ml(page_cache_mutex);
ASSERT(page_cache_size >= 0);
ASSERT(page_cache_size <= kPageCacheCapacity);
while (page_cache_size > 0) {
delete page_cache[--page_cache_size];
}
void SemiSpace::DrainCache() {
MutexLocker ml(page_cache_mutex);
ASSERT(page_cache_size >= 0);
ASSERT(page_cache_size <= kPageCacheCapacity);
while (page_cache_size > 0) {
delete page_cache[--page_cache_size];
}
}
void SemiSpace::Cleanup() {
DrainCache();
delete page_cache_mutex;
page_cache_mutex = nullptr;
}

View file

@ -168,6 +168,7 @@ class NewPage {
class SemiSpace {
public:
static void Init();
static void DrainCache();
static void Cleanup();
static intptr_t CachedSize();