diff --git a/runtime/vm/heap/gc_shared.h b/runtime/vm/heap/gc_shared.h index 1a0d98d79b9..a3112ffd605 100644 --- a/runtime/vm/heap/gc_shared.h +++ b/runtime/vm/heap/gc_shared.h @@ -233,6 +233,18 @@ void MournFinalized(GCVisitorType* visitor) { finalizer_dart->untag()->exchange_entries_collected(current_entry); current_entry->untag()->set_next(previous_head); const bool first_entry = previous_head.IsRawNull(); + + // If we're in the marker, we need to ensure that we release the store + // buffer afterwards. + // If we're in the scavenger and have the finalizer in old space and + // a new space entry, we don't need to release the store buffer. + if (!first_entry && previous_head->IsNewObject() && + current_entry->IsOldObject()) { + TRACE_FINALIZER("Entry %p (old) next is %p (new)", + current_entry->untag(), previous_head->untag()); + // We must release the thread's store buffer block. + } + // Schedule calling Dart finalizer. if (first_entry) { Isolate* isolate = finalizer->untag()->isolate_; diff --git a/runtime/vm/heap/marker.cc b/runtime/vm/heap/marker.cc index 2b77843a9fa..452e0d48413 100644 --- a/runtime/vm/heap/marker.cc +++ b/runtime/vm/heap/marker.cc @@ -273,6 +273,15 @@ class MarkingVisitorBase : public ObjectPointerVisitor { work_list_.Finalize(); deferred_work_list_.Finalize(); MournFinalized(this); + // MournFinalized inserts newly discovered dead entries into the + // linked list attached to the Finalizer. This might create + // cross-generational references which might be added to the store + // buffer. Release the store buffer to satisfy the invariant that + // thread local store buffer is empty after marking and all references + // are processed. + // TODO(http://dartbug.com/48957): `thread_` can differ from + // `Thread::Current()`. + Thread::Current()->ReleaseStoreBuffer(); } void MournWeakProperties() { diff --git a/runtime/vm/object_test.cc b/runtime/vm/object_test.cc index e042e9320ba..207a9a447be 100644 --- a/runtime/vm/object_test.cc +++ b/runtime/vm/object_test.cc @@ -5043,6 +5043,87 @@ REPEAT_512(FINALIZER_CROSS_GEN_TEST_CASE) #undef FINALIZER_CROSS_GEN_TEST_CASE +// Force the marker to add a FinalizerEntry to the store buffer during marking. +// +// This test requires two entries, one in new space, one in old space. +// The scavenger should run first, adding the entry to collected_entries. +// The marker runs right after, swapping the collected_entries with the entry +// in old space, _and_ setting the next field to the entry in new space. +// This forces the entry to be added to the store-buffer _during_ marking. +// +// Then, the compacter needs to be used. Which will move the entry in old +// space. +// +// If the thread's store buffer block is not released after that, the compactor +// will not update it, causing an outdated address to be released to the store +// buffer later. +// +// This causes two types of errors to trigger with --verify-store-buffer: +// 1. We see the address in the store buffer but the object is no entry there. +// Also can cause segfaults on reading garbage or unallocated memory. +// 2. We see the entry has a marked bit, but can't find it in the store buffer. +ISOLATE_UNIT_TEST_CASE(Finalizer_Regress_48843) { +#ifdef DEBUG + SetFlagScope sfs(&FLAG_trace_finalizers, true); + SetFlagScope sfs2(&FLAG_verify_store_buffer, true); +#endif + SetFlagScope sfs3(&FLAG_use_compactor, true); + + const auto& finalizer = Finalizer::Handle(Finalizer::New(Heap::kOld)); + finalizer.set_isolate(thread->isolate()); + + const auto& detach1 = + String::Handle(OneByteString::New("detach1", Heap::kNew)); + const auto& token1 = String::Handle(OneByteString::New("token1", Heap::kNew)); + const auto& detach2 = + String::Handle(OneByteString::New("detach2", Heap::kOld)); + const auto& token2 = String::Handle(OneByteString::New("token2", Heap::kOld)); + + { + HANDLESCOPE(thread); + const auto& entry1 = + FinalizerEntry::Handle(FinalizerEntry::New(finalizer, Heap::kNew)); + entry1.set_detach(detach1); + entry1.set_token(token1); + + const auto& entry2 = + FinalizerEntry::Handle(FinalizerEntry::New(finalizer, Heap::kOld)); + entry2.set_detach(detach2); + entry2.set_token(token2); + + { + HANDLESCOPE(thread); + const auto& value1 = + String::Handle(OneByteString::New("value1", Heap::kNew)); + entry1.set_value(value1); + const auto& value2 = + String::Handle(OneByteString::New("value2", Heap::kOld)); + entry2.set_value(value2); + // Lose both values. + } + + // First collect new space. + GCTestHelper::CollectNewSpace(); + // Then old space, this will make the old space entry point to the new + // space entry. + // Also, this must be a mark compact, not a mark sweep, to move the entry. + GCTestHelper::CollectOldSpace(); + } + + // Imagine callbacks running. + // Entries themselves become unreachable. + finalizer.set_entries_collected( + FinalizerEntry::Handle(FinalizerEntry::null())); + + // There should be a single entry in the store buffer. + // And it should crash when seeing the address in the buffer. + GCTestHelper::CollectNewSpace(); + + // We should no longer be processing the entries. + GCTestHelper::CollectOldSpace(); + GCTestHelper::CollectNewSpace(); +} + void NativeFinalizer_TwoEntriesCrossGen_Finalizer(void* peer) { intptr_t* token = reinterpret_cast(peer); (*token)++; diff --git a/runtime/vm/thread.cc b/runtime/vm/thread.cc index 573e775d5a4..2a6495bdbe1 100644 --- a/runtime/vm/thread.cc +++ b/runtime/vm/thread.cc @@ -380,8 +380,10 @@ void Thread::ExitIsolateGroupAsHelper(bool bypass_safepoint) { void Thread::ReleaseStoreBuffer() { ASSERT(IsAtSafepoint()); + if (store_buffer_block_ == nullptr || store_buffer_block_->IsEmpty()) { + return; // Nothing to release. + } // Prevent scheduling another GC by ignoring the threshold. - ASSERT(store_buffer_block_ != nullptr); StoreBufferRelease(StoreBuffer::kIgnoreThreshold); // Make sure to get an *empty* block; the isolate needs all entries // at GC time.