[vm/aot] Scan deferred pools for code entries

When generating snapshot with loading units trace
deferred pools for code objects as well - otherwise
we might miss code references from one deferred
unit to another unit (e.g. if the only remaining
reference to some code object in the root unit is
actually from a deferred unit).

Fixes a bunch of issues with deferred libraries (Crash -> Pass):

- language_2/deferred/split_constants_canonicalization_test/1
- vm/dart_2/deferred_isolate_test
- vm/dart_2/deferred_loading_and_weak_serialization_references_test/0
- vm/dart_2/deferred_loading_and_weak_serialization_references_test/1
- vm/dart_2/deferred_loading_call_modes_test/2

Fixes issue https://github.com/dart-lang/sdk/issues/45917

TEST=ci

Fixed: 45917
Cq-Include-Trybots: luci.dart.try:vm-kernel-precomp-dwarf-linux-product-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-linux-release-x64-try,vm-kernel-precomp-linux-release-simarm-try
Change-Id: Iccd3efcab6a5396d4b6f70968d9176ff18d7147c
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/198405
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Vyacheslav Egorov <vegorov@google.com>
This commit is contained in:
Vyacheslav Egorov 2021-05-11 23:08:18 +00:00 committed by commit-bot@chromium.org
parent 460887d814
commit 20526d4368
2 changed files with 48 additions and 32 deletions

View file

@ -1812,25 +1812,24 @@ class CodeSerializationCluster : public SerializationCluster {
void Trace(Serializer* s, ObjectPtr object) {
CodePtr code = Code::RawCast(object);
if (s->InCurrentLoadingUnit(code, /*record*/ true)) {
const bool is_deferred = !s->InCurrentLoadingUnitOrRoot(code);
if (is_deferred) {
s->RecordDeferredCode(code);
} else {
objects_.Add(code);
}
// Even if this code object is itself deferred we still need to scan
// the pool for references to other code objects (which might reside
// in the current loading unit).
ObjectPoolPtr pool = code->untag()->object_pool_;
if (s->kind() == Snapshot::kFullAOT && FLAG_use_bare_instructions) {
ObjectPoolPtr pool = code->untag()->object_pool_;
if ((pool != ObjectPool::null()) && s->InCurrentLoadingUnit(code)) {
const intptr_t length = pool->untag()->length_;
uint8_t* entry_bits = pool->untag()->entry_bits();
for (intptr_t i = 0; i < length; i++) {
auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
if (entry_type == ObjectPool::EntryType::kTaggedObject) {
s->Push(pool->untag()->data()[i].raw_obj_);
}
}
}
TracePool(s, pool, /*only_code=*/is_deferred);
} else {
if (s->InCurrentLoadingUnit(code->untag()->object_pool_)) {
s->Push(code->untag()->object_pool_);
if (s->InCurrentLoadingUnitOrRoot(pool)) {
s->Push(pool);
} else {
TracePool(s, pool, /*only_code=*/true);
}
}
@ -1868,7 +1867,7 @@ class CodeSerializationCluster : public SerializationCluster {
#endif
}
if (s->InCurrentLoadingUnit(code->untag()->compressed_stackmaps_)) {
if (s->InCurrentLoadingUnitOrRoot(code->untag()->compressed_stackmaps_)) {
s->Push(code->untag()->compressed_stackmaps_);
}
@ -1886,7 +1885,7 @@ class CodeSerializationCluster : public SerializationCluster {
s->Push(code->untag()->catch_entry_);
if (!FLAG_precompiled_mode || !FLAG_dwarf_stack_traces_mode) {
s->Push(code->untag()->inlined_id_to_function_);
if (s->InCurrentLoadingUnit(code->untag()->code_source_map_)) {
if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) {
s->Push(code->untag()->code_source_map_);
}
}
@ -1898,6 +1897,24 @@ class CodeSerializationCluster : public SerializationCluster {
#endif
}
void TracePool(Serializer* s, ObjectPoolPtr pool, bool only_code) {
if (pool == ObjectPool::null()) {
return;
}
const intptr_t length = pool->untag()->length_;
uint8_t* entry_bits = pool->untag()->entry_bits();
for (intptr_t i = 0; i < length; i++) {
auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
if (entry_type == ObjectPool::EntryType::kTaggedObject) {
const ObjectPtr target = pool->untag()->data()[i].raw_obj_;
if (!only_code || target->IsCode()) {
s->Push(target);
}
}
}
}
struct CodeOrderInfo {
CodePtr code;
intptr_t order;
@ -2074,7 +2091,7 @@ class CodeSerializationCluster : public SerializationCluster {
// No need to write object pool out if we are producing full AOT
// snapshot with bare instructions.
if (!(kind == Snapshot::kFullAOT && FLAG_use_bare_instructions)) {
if (s->InCurrentLoadingUnit(code->untag()->object_pool_)) {
if (s->InCurrentLoadingUnitOrRoot(code->untag()->object_pool_)) {
WriteField(code, object_pool_);
} else {
WriteFieldValue(object_pool_, ObjectPool::null());
@ -2084,7 +2101,7 @@ class CodeSerializationCluster : public SerializationCluster {
WriteField(code, exception_handlers_);
WriteField(code, pc_descriptors_);
WriteField(code, catch_entry_);
if (s->InCurrentLoadingUnit(code->untag()->compressed_stackmaps_)) {
if (s->InCurrentLoadingUnitOrRoot(code->untag()->compressed_stackmaps_)) {
WriteField(code, compressed_stackmaps_);
} else {
WriteFieldValue(compressed_stackmaps_, CompressedStackMaps::null());
@ -2094,7 +2111,7 @@ class CodeSerializationCluster : public SerializationCluster {
WriteFieldValue(code_source_map_, CodeSourceMap::null());
} else {
WriteField(code, inlined_id_to_function_);
if (s->InCurrentLoadingUnit(code->untag()->code_source_map_)) {
if (s->InCurrentLoadingUnitOrRoot(code->untag()->code_source_map_)) {
WriteField(code, code_source_map_);
} else {
WriteFieldValue(code_source_map_, CodeSourceMap::null());
@ -6701,7 +6718,7 @@ SerializationCluster* Serializer::NewClusterForClass(intptr_t cid,
#endif // !DART_PRECOMPILED_RUNTIME
}
bool Serializer::InCurrentLoadingUnit(ObjectPtr obj, bool record) {
bool Serializer::InCurrentLoadingUnitOrRoot(ObjectPtr obj) {
if (loading_units_ == nullptr) return true;
intptr_t unit_id = heap_->GetLoadingUnit(obj);
@ -6709,17 +6726,15 @@ bool Serializer::InCurrentLoadingUnit(ObjectPtr obj, bool record) {
// Not found in early assignment. Conservatively choose the root.
// TODO(41974): Are these always type testing stubs?
unit_id = LoadingUnit::kRootId;
heap_->SetLoadingUnit(obj, unit_id);
}
if (unit_id == LoadingUnit::kRootId) {
return true;
}
if (unit_id != current_loading_unit_id_) {
if (record) {
(*loading_units_)[unit_id]->AddDeferredObject(static_cast<CodePtr>(obj));
}
return false;
}
return true;
return unit_id == LoadingUnit::kRootId || unit_id == current_loading_unit_id_;
}
void Serializer::RecordDeferredCode(CodePtr code) {
const intptr_t unit_id = heap_->GetLoadingUnit(code);
ASSERT(unit_id != WeakTable::kNoValue && unit_id != LoadingUnit::kRootId);
(*loading_units_)[unit_id]->AddDeferredObject(code);
}
#if !defined(DART_PRECOMPILED_RUNTIME)
@ -6786,7 +6801,7 @@ void Serializer::WriteInstructions(InstructionsPtr instr,
bool deferred) {
ASSERT(code != Code::null());
ASSERT(InCurrentLoadingUnit(code) != deferred);
ASSERT(InCurrentLoadingUnitOrRoot(code) != deferred);
if (deferred) {
return;
}

View file

@ -434,7 +434,8 @@ class Serializer : public ThreadStackResource {
// Returns true if [obj] has an artificial profile node associated with it.
bool CreateArtificialNodeIfNeeded(ObjectPtr obj);
bool InCurrentLoadingUnit(ObjectPtr obj, bool record = false);
bool InCurrentLoadingUnitOrRoot(ObjectPtr obj);
void RecordDeferredCode(CodePtr ptr);
GrowableArray<LoadingUnitSerializationData*>* loading_units() const {
return loading_units_;
}