[vm, gc] Remove the evacuating compactor.

This was a temporary measure to verify the VM could handle moving old-space objects before we had the sliding compactor.

Bug: https://github.com/dart-lang/sdk/issues/30978
Change-Id: I4ffec413918481c0af4828d126930455f620935d
Reviewed-on: https://dart-review.googlesource.com/22663
Reviewed-by: Siva Annamalai <asiva@google.com>
Reviewed-by: Erik Corry <erikcorry@google.com>
This commit is contained in:
Ryan Macnak 2017-11-22 18:12:02 +00:00
parent 8ae3c5d450
commit e4d01aa323
7 changed files with 9 additions and 89 deletions

View file

@ -2,8 +2,7 @@
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// VMOptions=--error_on_bad_type --error_on_bad_override
// VMOptions=--use_compactor_evacuating
// VMOptions=--use_compactor_sliding
// VMOptions=--use_compactor
import 'package:observatory/heap_snapshot.dart';
import 'package:observatory/models.dart' as M;

View file

@ -2,8 +2,7 @@
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// VMOptions=--error_on_bad_type --error_on_bad_override
// VMOptions=--use_compactor_evacuating
// VMOptions=--use_compactor_sliding
// VMOptions=--use_compactor
import 'package:observatory/heap_snapshot.dart';
import 'package:observatory/models.dart' as M;

View file

@ -175,10 +175,7 @@
D(trace_zones, bool, false, "Traces allocation sizes in the zone.") \
P(truncating_left_shift, bool, true, \
"Optimize left shift to truncate if possible") \
R(use_compactor_evacuating, false, bool, false, \
"Compact the heap during old-space GC.") \
R(use_compactor_sliding, false, bool, false, \
"Compact the heap during old-space GC.") \
P(use_compactor, bool, false, "Compact the heap during old-space GC.") \
P(use_cha_deopt, bool, true, \
"Use class hierarchy analysis even if it can cause deoptimization.") \
P(use_field_guards, bool, !USING_DBC, \

View file

@ -414,44 +414,4 @@ void GCCompactor::ForwardPointersForSliding() {
heap_->ForwardWeakTables(this);
}
// Moves live objects to fresh pages. Returns the number of bytes moved.
intptr_t GCCompactor::EvacuatePages(HeapPage* pages) {
TIMELINE_FUNCTION_GC_DURATION(thread(), "EvacuatePages");
intptr_t moved_bytes = 0;
for (HeapPage* page = pages; page != NULL; page = page->next()) {
uword old_addr = page->object_start();
uword end = page->object_end();
while (old_addr < end) {
RawObject* old_obj = RawObject::FromAddr(old_addr);
const intptr_t size = old_obj->Size();
if (old_obj->IsMarked()) {
ASSERT(!old_obj->IsFreeListElement());
ASSERT(!old_obj->IsForwardingCorpse());
uword new_addr = heap_->old_space()->TryAllocateDataBumpLocked(
size, PageSpace::kForceGrowth);
if (new_addr == 0) {
OUT_OF_MEMORY();
}
memmove(reinterpret_cast<void*>(new_addr),
reinterpret_cast<void*>(old_addr), size);
RawObject* new_obj = RawObject::FromAddr(new_addr);
new_obj->ClearMarkBit();
ForwardingCorpse* forwarder =
ForwardingCorpse::AsForwarder(old_addr, size);
forwarder->set_target(new_obj);
heap_->ForwardWeakEntries(old_obj, new_obj);
moved_bytes += size;
}
old_addr += size;
}
}
return moved_bytes;
}
} // namespace dart

View file

@ -18,7 +18,7 @@ class Heap;
class HeapPage;
class RawObject;
// Implements an evacuating compactor and a sliding compactor.
// Implements a sliding compactor.
class GCCompactor : public ValueObject,
private HandleVisitor,
private ObjectPointerVisitor {
@ -31,8 +31,6 @@ class GCCompactor : public ValueObject,
void CompactBySliding(HeapPage* pages, FreeList* freelist, Mutex* mutex);
intptr_t EvacuatePages(HeapPage* page);
private:
void SlidePage(HeapPage* page);
uword SlideBlock(uword first_object, ForwardingPage* forwarding_page);

View file

@ -984,10 +984,8 @@ void PageSpace::MarkSweep() {
mid3 = OS::GetCurrentMonotonicMicros();
if (FLAG_use_compactor_evacuating) {
EvacuatingCompact(thread);
} else if (FLAG_use_compactor_sliding) {
SlidingCompact(thread);
if (FLAG_use_compactor) {
Compact(thread);
} else if (FLAG_concurrent_sweep) {
ConcurrentSweep(isolate);
} else {
@ -1041,7 +1039,7 @@ void PageSpace::MarkSweep() {
ml.NotifyAll();
}
if (FLAG_use_compactor_evacuating || FLAG_use_compactor_sliding) {
if (FLAG_use_compactor) {
// Const object tables are hashed by address: rehash.
SafepointOperationScope safepoint(thread);
thread->isolate()->RehashConstants();
@ -1078,37 +1076,7 @@ void PageSpace::ConcurrentSweep(Isolate* isolate) {
&freelist_[HeapPage::kData]);
}
void PageSpace::EvacuatingCompact(Thread* thread) {
thread->isolate()->set_compaction_in_progress(true);
HeapPage* pages_to_evacuate = pages_;
pages_ = pages_tail_ = NULL;
GCCompactor compactor(thread, heap_);
intptr_t moved_bytes = compactor.EvacuatePages(pages_to_evacuate);
usage_.used_in_words -= (moved_bytes / kWordSize);
Become::FollowForwardingPointers(thread);
{
MutexLocker ml(pages_lock_);
HeapPage* page = pages_to_evacuate;
while (page != NULL) {
HeapPage* next = page->next();
IncreaseCapacityInWordsLocked(-(page->memory_->size() >> kWordSizeLog2));
page->Deallocate();
page = next;
}
}
thread->isolate()->set_compaction_in_progress(false);
if (FLAG_verify_after_gc) {
OS::PrintErr("Verifying after compacting...");
heap_->VerifyGC(kForbidMarked);
OS::PrintErr(" done.\n");
}
}
void PageSpace::SlidingCompact(Thread* thread) {
void PageSpace::Compact(Thread* thread) {
thread->isolate()->set_compaction_in_progress(true);
GCCompactor compactor(thread, heap_);
compactor.CompactBySliding(pages_, &freelist_[HeapPage::kData], pages_lock_);

View file

@ -389,8 +389,7 @@ class PageSpace {
void BlockingSweep();
void ConcurrentSweep(Isolate* isolate);
void EvacuatingCompact(Thread* thread);
void SlidingCompact(Thread* thread);
void Compact(Thread* thread);
static intptr_t LargePageSizeInWordsFor(intptr_t size);