[vm, gc] Don't rebuild the remembered set during marking.

Instead, only filter dead objects from the remembered set at the end of marking.

Allows scavenges to occur during concurrent marking, and avoids changing the mutator to handle a race with the concurrent marker for the remembered bit.

Bug: https://github.com/dart-lang/sdk/issues/34002
Change-Id: I116d7acc8fb57b8c3088febe6c196030a40fb319
Reviewed-on: https://dart-review.googlesource.com/67841
Reviewed-by: Siva Annamalai <asiva@google.com>
This commit is contained in:
Ryan Macnak 2018-08-08 01:03:42 +00:00
parent c712470bb9
commit 9b5a4241de

View file

@ -152,7 +152,6 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
page_space_(page_space),
work_list_(marking_stack),
delayed_weak_properties_(NULL),
visiting_old_object_(NULL),
skipped_code_functions_(skipped_code_functions),
marked_bytes_(0) {
ASSERT(thread_->isolate() == isolate);
@ -191,7 +190,6 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
// The key is marked so we make sure to properly visit all pointers
// originating from this weak property.
VisitingOldObject(cur_weak);
cur_weak->VisitPointersNonvirtual(this);
} else {
// Requeue this weak property to be handled later.
@ -200,7 +198,6 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
// Advance to next weak property in the queue.
cur_weak = reinterpret_cast<RawWeakProperty*>(next_weak);
}
VisitingOldObject(NULL);
return marked;
}
@ -211,13 +208,12 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
}
if (raw_obj == NULL) {
ASSERT(visiting_old_object_ == NULL);
return;
}
do {
do {
// First drain the marking stacks.
VisitingOldObject(raw_obj);
const intptr_t class_id = raw_obj->GetClassId();
intptr_t size;
@ -241,12 +237,11 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
// by the handling of weak properties.
raw_obj = work_list_.Pop();
} while (raw_obj != NULL);
VisitingOldObject(NULL);
}
void VisitPointers(RawObject** first, RawObject** last) {
for (RawObject** current = first; current <= last; current++) {
MarkObject(*current, current);
MarkObject(*current);
}
}
@ -302,11 +297,6 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
}
}
void VisitingOldObject(RawObject* obj) {
ASSERT((obj == NULL) || obj->IsOldObject());
visiting_old_object_ = obj;
}
private:
void PushMarked(RawObject* raw_obj) {
ASSERT(raw_obj->IsHeapObject());
@ -316,41 +306,26 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
// Push the marked object on the marking stack.
ASSERT(raw_obj->IsMarked());
// We acquired the mark bit => no other task is modifying the header.
// TODO(koda): For concurrent mutator, this needs synchronization. Consider
// clearing these bits already in the CAS for the mark bit.
raw_obj->ClearRememberedBitUnsynchronized();
work_list_.Push(raw_obj);
}
static bool TryAcquireMarkBit(RawObject* raw_obj) {
// While it might seem this is redundant with TryAcquireMarkBit, we must
// do this check first to avoid attempting an AMO on the read-only
// vm-isolate or image pages.
if (raw_obj->IsMarked()) return false;
if (!sync) {
if (raw_obj->IsMarked()) return false;
raw_obj->SetMarkBitUnsynchronized();
return true;
} else {
return raw_obj->TryAcquireMarkBit();
}
return raw_obj->TryAcquireMarkBit();
}
void MarkObject(RawObject* raw_obj, RawObject** p) {
void MarkObject(RawObject* raw_obj) {
// Fast exit if the raw object is a Smi.
if (!raw_obj->IsHeapObject()) {
return;
}
// Fast exit if the raw object is marked.
if (raw_obj->IsMarked()) {
return;
}
// TODO(koda): Investigate performance impact of alternative branching:
// if (smi or new) <-- can be done as single compare + conditional jump
// if (smi) return;
// else ...
// if (marked) return;
// ...
if (raw_obj->IsNewObject()) {
ProcessNewSpaceObject(raw_obj, p);
if (raw_obj->IsSmiOrNewObject()) {
return;
}
@ -362,31 +337,6 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
PushMarked(raw_obj);
}
static bool TryAcquireRememberedBit(RawObject* raw_obj) {
if (!sync) {
if (raw_obj->IsRemembered()) return false;
raw_obj->SetRememberedBitUnsynchronized();
return true;
}
return raw_obj->TryAcquireRememberedBit();
}
void ProcessNewSpaceObject(RawObject* raw_obj, RawObject** p) {
// TODO(iposva): Add consistency check.
if ((visiting_old_object_ != NULL) &&
TryAcquireRememberedBit(visiting_old_object_)) {
// NOTE: We pass in the pointer to the address we are visiting
// allows us to get a distance from the object start. At some
// point we might want to store exact addresses in store buffers
// for locations far enough from the header, so that we do not
// need to walk big objects only to find the single new
// reference in the last word during scavenge. This doesn't seem
// to be a problem though currently.
ASSERT(p != NULL);
thread_->StoreBufferAddObjectGC(visiting_old_object_);
}
}
#ifndef PRODUCT
void UpdateLiveOld(intptr_t class_id, intptr_t size) {
// TODO(koda): Support growing the array once mutator runs concurrently.
@ -404,7 +354,6 @@ class MarkingVisitorBase : public ObjectPointerVisitor {
PageSpace* page_space_;
MarkerWorkList work_list_;
RawWeakProperty* delayed_weak_properties_;
RawObject* visiting_old_object_;
SkippedCodeFunctions* skipped_code_functions_;
uintptr_t marked_bytes_;
@ -459,11 +408,36 @@ class MarkingWeakVisitor : public HandleVisitor {
void GCMarker::Prologue(Isolate* isolate) {
isolate->PrepareForGC();
// The store buffers will be rebuilt as part of marking, reset them now.
isolate->store_buffer()->Reset();
}
void GCMarker::Epilogue(Isolate* isolate) {}
void GCMarker::Epilogue(Isolate* isolate) {
// Filter collected objects from the remembered set.
StoreBuffer* store_buffer = isolate->store_buffer();
StoreBufferBlock* reading = store_buffer->Blocks();
StoreBufferBlock* writing = store_buffer->PopNonFullBlock();
while (reading != NULL) {
StoreBufferBlock* next = reading->next();
// Generated code appends to store buffers; tell MemorySanitizer.
MSAN_UNPOISON(reading, sizeof(*reading));
while (!reading->IsEmpty()) {
RawObject* raw_object = reading->Pop();
ASSERT(!raw_object->IsForwardingCorpse());
ASSERT(raw_object->IsRemembered());
if (raw_object->IsMarked()) {
writing->Push(raw_object);
if (writing->IsFull()) {
store_buffer->PushBlock(writing, StoreBuffer::kIgnoreThreshold);
writing = store_buffer->PopNonFullBlock();
}
}
}
reading->Reset();
// Return the emptied block for recycling (no need to check threshold).
store_buffer->PushBlock(reading, StoreBuffer::kIgnoreThreshold);
reading = next;
}
store_buffer->PushBlock(writing, StoreBuffer::kIgnoreThreshold);
}
void GCMarker::IterateRoots(Isolate* isolate,
ObjectPointerVisitor* visitor,