dart-sdk/runtime/vm/object_reload.cc

942 lines
33 KiB
C++
Raw Normal View History

// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/object.h"
#include "platform/unaligned.h"
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
#include "vm/code_patcher.h"
#include "vm/dart_entry.h"
#include "vm/hash_table.h"
#include "vm/isolate_reload.h"
#include "vm/log.h"
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
#include "vm/object_store.h"
#include "vm/resolver.h"
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
#include "vm/stub_code.h"
#include "vm/symbols.h"
namespace dart {
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
DECLARE_FLAG(bool, trace_reload);
DECLARE_FLAG(bool, trace_reload_verbose);
DECLARE_FLAG(bool, two_args_smi_icd);
void CallSiteResetter::ZeroEdgeCounters(const Function& function) {
ic_data_array_ = function.ic_data_array();
if (ic_data_array_.IsNull()) {
return;
}
ASSERT(ic_data_array_.Length() > 0);
edge_counters_ ^=
ic_data_array_.At(Function::ICDataArrayIndices::kEdgeCounters);
if (edge_counters_.IsNull()) {
return;
}
// Fill edge counters array with zeros.
for (intptr_t i = 0; i < edge_counters_.Length(); i++) {
edge_counters_.SetAt(i, Object::smi_zero());
}
}
CallSiteResetter::CallSiteResetter(Zone* zone)
: zone_(zone),
instrs_(Instructions::Handle(zone)),
pool_(ObjectPool::Handle(zone)),
object_(Object::Handle(zone)),
name_(String::Handle(zone)),
new_cls_(Class::Handle(zone)),
new_lib_(Library::Handle(zone)),
new_function_(Function::Handle(zone)),
new_field_(Field::Handle(zone)),
entries_(Array::Handle(zone)),
old_target_(Function::Handle(zone)),
new_target_(Function::Handle(zone)),
caller_(Function::Handle(zone)),
args_desc_array_(Array::Handle(zone)),
ic_data_array_(Array::Handle(zone)),
edge_counters_(Array::Handle(zone)),
descriptors_(PcDescriptors::Handle(zone)),
ic_data_(ICData::Handle(zone)) {}
void CallSiteResetter::ResetCaches(const Code& code) {
[vm] Allow optimized type testing stubs to be partial. Previously, the code in the TypeCheck runtime entry assumed that if a lazily specialized type testing stub (TTS) returned a false negative in JIT mode, that it should always be regenerated and that regeneration would always result in different code. In AOT mode, false negatives instead always cause the stub to go to runtime, even if that false negative had been seen before, because the assumption is that false negatives shouldn't happen when the whole class hierarchy is known at compile time. However, even in the current implementation of optimized TTSes, there are cases where this assumption is false. For example, the code generated by BuildOptimizedSubclassRangeCheckWithTypeArguments allows for provided type arguments to be type parameters. When this happens, the type parameter is instantiated at runtime using the instantiator or function type arguments, and the instantiated type parameter must be identical to the result retrieved from the type arguments of the instance. That means that if the instantiated type parameter is not the same type, but a supertype, of the result, then a false negative is generated. This CL changes TypeCheck's handling of false negatives from lazily specialized TTSes as follows: in JIT, if the regenerated stub is the same as the old stub, or in AOT, a false negative causes the same fall back to SubtypeTestCaches as unoptimized stubs. This way, further checks with the same false negative will be caught via the STC before going to runtime, assuming the STC hasn't already filled up with false negatives. Currently, we only generate false negatives for reasons that will not change when respecialization occurs due to additions to the hierarchy, so we do not need to clear affected STCs during respecialization. However, the previous approach to resetting STCs on reload (in CallSiteResetter::ResetCaches) is insufficient, since there may be caches containing reloaded types in non-reloaded code (like the TTS invoker stub created by the TTS testing framework). Instead, clear all caches on reload using the same ObjectVisitor as deoptimizing type testing stubs. Since we now have to check instruction equality to determine whether to add to the STC, we now only replace the existing stub if the instructions are different. This makes it easier to test whether a TTS invocation on a false positive caused respecialization or not. This CL also reworks the testing framework for type testing stubs, 1) creating a test case object that stores the particulars of a given invocation, including expectations, and 2) moving most checks and access to appropriate data structures into a state object that handles setup and performing invocations given test cases. TEST=vm/cc/TTS_Partial Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-release-x64-try,vm-kernel-precomp-linux-release-x64-try,vm-kernel-precomp-nnbd-linux-release-x64-try,vm-kernel-nnbd-linux-release-x64-try,vm-kernel-tsan-linux-release-x64-try,vm-kernel-linux-product-x64-try,vm-kernel-precomp-linux-product-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try Change-Id: I139608c5a0f2442a85a1cf39d1c04104db7a5593 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/208653 Commit-Queue: Tess Strickland <sstrickl@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2021-08-10 13:56:33 +00:00
// Iterate over the Code's object pool and reset all ICDatas.
// SubtypeTestCaches are reset during the same heap traversal as type
// testing stub deoptimization.
#ifdef TARGET_ARCH_IA32
// IA32 does not have an object pool, but, we can iterate over all
// embedded objects by using the variable length data section.
if (!code.is_alive()) {
return;
}
instrs_ = code.instructions();
ASSERT(!instrs_.IsNull());
uword base_address = instrs_.PayloadStart();
intptr_t offsets_length = code.pointer_offsets_length();
const int32_t* offsets = code.untag()->data();
for (intptr_t i = 0; i < offsets_length; i++) {
int32_t offset = offsets[i];
ObjectPtr* object_ptr = reinterpret_cast<ObjectPtr*>(base_address + offset);
ObjectPtr raw_object = LoadUnaligned(object_ptr);
if (!raw_object->IsHeapObject()) {
continue;
}
object_ = raw_object;
if (object_.IsICData()) {
Reset(ICData::Cast(object_));
}
}
#else
pool_ = code.object_pool();
ASSERT(!pool_.IsNull());
ResetCaches(pool_);
#endif
}
static void FindICData(const Array& ic_data_array,
intptr_t deopt_id,
ICData* ic_data) {
// ic_data_array is sorted because of how it is constructed in
// Function::SaveICDataMap.
intptr_t lo = Function::ICDataArrayIndices::kFirstICData;
intptr_t hi = ic_data_array.Length() - 1;
while (lo <= hi) {
intptr_t mid = (hi - lo + 1) / 2 + lo;
ASSERT(mid >= lo);
ASSERT(mid <= hi);
*ic_data ^= ic_data_array.At(mid);
if (ic_data->deopt_id() == deopt_id) {
return;
} else if (ic_data->deopt_id() > deopt_id) {
hi = mid - 1;
} else {
lo = mid + 1;
}
}
FATAL1("Missing deopt id %" Pd "\n", deopt_id);
}
void CallSiteResetter::ResetSwitchableCalls(const Code& code) {
if (code.is_optimized()) {
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
return; // No switchable calls in optimized code.
}
object_ = code.owner();
if (!object_.IsFunction()) {
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
return; // No switchable calls in stub code.
}
const Function& function = Function::Cast(object_);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
if (function.kind() == UntaggedFunction::kIrregexpFunction) {
// Regex matchers do not support breakpoints or stepping, and they only call
// core library functions that cannot change due to reload. As a performance
// optimization, avoid this matching of ICData to PCs for these functions'
// large number of instance calls.
ASSERT(!function.is_debuggable());
return;
}
ic_data_array_ = function.ic_data_array();
if (ic_data_array_.IsNull()) {
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
// The megamorphic miss stub and some recognized function doesn't populate
// their ic_data_array. Check this only happens for functions without IC
// calls.
#if defined(DEBUG)
descriptors_ = code.pc_descriptors();
PcDescriptors::Iterator iter(descriptors_, UntaggedPcDescriptors::kIcCall);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
while (iter.MoveNext()) {
FATAL1("%s has IC calls but no ic_data_array\n",
function.ToFullyQualifiedCString());
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
}
#endif
return;
}
descriptors_ = code.pc_descriptors();
PcDescriptors::Iterator iter(descriptors_, UntaggedPcDescriptors::kIcCall);
while (iter.MoveNext()) {
uword pc = code.PayloadStart() + iter.PcOffset();
CodePatcher::GetInstanceCallAt(pc, code, &object_);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
// This check both avoids unnecessary patching to reduce log spam and
// prevents patching over breakpoint stubs.
if (!object_.IsICData()) {
FindICData(ic_data_array_, iter.DeoptId(), &ic_data_);
ASSERT(ic_data_.rebind_rule() == ICData::kInstance);
ASSERT(ic_data_.NumArgsTested() == 1);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
const Code& stub =
ic_data_.is_tracking_exactness()
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
? StubCode::OneArgCheckInlineCacheWithExactnessCheck()
: StubCode::OneArgCheckInlineCache();
CodePatcher::PatchInstanceCallAt(pc, code, ic_data_, stub);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
if (FLAG_trace_ic) {
OS::PrintErr("Instance call at %" Px
" resetting to polymorphic dispatch, %s\n",
pc, ic_data_.ToCString());
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
}
}
}
}
void CallSiteResetter::ResetCaches(const ObjectPool& pool) {
for (intptr_t i = 0; i < pool.Length(); i++) {
ObjectPool::EntryType entry_type = pool.TypeAt(i);
[vm] Decouple assemblers from runtime. This is the next step towards preventing compiler from directly peeking into runtime and instead interact with runtime through a well defined surface. The goal of the refactoring to locate all places where compiler accesses some runtime information and partion those accesses into two categories: - creating objects in the host runtime (e.g. allocating strings, numbers, etc) during compilation; - accessing properties of the target runtime (e.g. offsets of fields) to embed those into the generated code; This change introduces dart::compiler and dart::compiler::target namespaces. All code in the compiler will gradually be moved into dart::compiler namespace. One of the motivations for this change is to be able to prevent access to globally defined host constants like kWordSize by shadowing them in the dart::compiler namespace. The nested namespace dart::compiler::target hosts all information about target runtime that compiler could access, e.g. compiler::target::kWordSize defines word size of the target which will eventually be made different from the host kWordSize (defined by dart::kWordSize). The API for compiler to runtime interaction is placed into compiler_api.h. Note that we still permit runtime to access compiler internals directly - this is not going to be decoupled as part of this work. Issue https://github.com/dart-lang/sdk/issues/31709 Change-Id: If4396d295879391becfa6c38d4802bbff81f5b20 Reviewed-on: https://dart-review.googlesource.com/c/90242 Commit-Queue: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2019-01-25 16:45:13 +00:00
if (entry_type != ObjectPool::EntryType::kTaggedObject) {
continue;
}
object_ = pool.ObjectAt(i);
if (object_.IsICData()) {
Reset(ICData::Cast(object_));
}
}
}
void Class::CopyStaticFieldValues(ProgramReloadContext* reload_context,
const Class& old_cls) const {
const Array& old_field_list = Array::Handle(old_cls.fields());
Field& old_field = Field::Handle();
String& old_name = String::Handle();
const Array& field_list = Array::Handle(fields());
Field& field = Field::Handle();
String& name = String::Handle();
for (intptr_t i = 0; i < field_list.Length(); i++) {
field = Field::RawCast(field_list.At(i));
name = field.name();
// Find the corresponding old field, if it exists, and migrate
// over the field value.
for (intptr_t j = 0; j < old_field_list.Length(); j++) {
old_field = Field::RawCast(old_field_list.At(j));
old_name = old_field.name();
if (name.Equals(old_name)) {
if (field.is_static()) {
// We only copy values if requested and if the field is not a const
// field. We let const fields be updated with a reload.
if (!field.is_const()) {
// Make new field point to the old field value so that both
// old and new code see and update same value.
reload_context->isolate_group()->FreeStaticField(field);
field.set_field_id_unsafe(old_field.field_id());
}
reload_context->AddStaticFieldMapping(old_field, field);
} else {
if (old_field.needs_load_guard()) {
ASSERT(!old_field.is_unboxing_candidate());
field.set_needs_load_guard(true);
field.set_is_unboxing_candidate_unsafe(false);
}
}
}
}
}
}
void Class::CopyCanonicalConstants(const Class& old_cls) const {
if (is_enum_class()) {
// We do not copy enum classes's canonical constants because we explicitly
// become the old enum values to the new enum values.
return;
}
#if defined(DEBUG)
{
// Class has no canonical constants allocated.
const Array& my_constants = Array::Handle(constants());
ASSERT(my_constants.IsNull() || my_constants.Length() == 0);
}
#endif // defined(DEBUG).
// Copy old constants into new class.
const Array& old_constants = Array::Handle(old_cls.constants());
if (old_constants.IsNull() || old_constants.Length() == 0) {
return;
}
TIR_Print("Copied %" Pd " canonical constants for class `%s`\n",
old_constants.Length(), ToCString());
set_constants(old_constants);
}
void Class::CopyDeclarationType(const Class& old_cls) const {
const Type& old_declaration_type = Type::Handle(old_cls.declaration_type());
if (old_declaration_type.IsNull()) {
return;
}
set_declaration_type(old_declaration_type);
}
class EnumMapTraits {
public:
static bool ReportStats() { return false; }
static const char* Name() { return "EnumMapTraits"; }
static bool IsMatch(const Object& a, const Object& b) {
return a.ptr() == b.ptr();
}
static uword Hash(const Object& obj) {
ASSERT(obj.IsString());
return String::Cast(obj).Hash();
}
};
// Given an old enum class, add become mappings from old values to new values.
// Some notes about how we reload enums below:
//
// When an enum is reloaded the following three things can happen, possibly
// simultaneously.
//
// 1) A new enum value is added.
// This case is handled automatically.
// 2) Enum values are reordered.
// We pair old and new enums and the old enums 'become' the new ones so
// the ordering is always correct (i.e. enum indices match slots in values
// array)
// 3) An existing enum value is removed.
// Each enum class has a canonical 'deleted' enum sentinel instance.
// When an enum value is deleted, we 'become' all references to the 'deleted'
// sentinel value. The index value is -1.
//
void Class::ReplaceEnum(ProgramReloadContext* reload_context,
const Class& old_enum) const {
// We only do this for finalized enum classes.
ASSERT(is_enum_class());
ASSERT(old_enum.is_enum_class());
ASSERT(is_finalized());
ASSERT(old_enum.is_finalized());
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
ObjectStore* object_store = thread->isolate_group()->object_store();
Field& field = Field::Handle(zone);
String& enum_ident = String::Handle();
Instance& old_enum_value = Instance::Handle(zone);
Instance& enum_value = Instance::Handle(zone);
// The E.values array.
Array& old_enum_values = Array::Handle(zone);
// The E.values array.
Array& enum_values = Array::Handle(zone);
// The E._deleted_enum_sentinel instance.
Instance& old_deleted_enum_sentinel = Instance::Handle(zone);
// The E._deleted_enum_sentinel instance.
Instance& deleted_enum_sentinel = Instance::Handle(zone);
Array& enum_map_storage =
Array::Handle(zone, HashTables::New<UnorderedHashMap<EnumMapTraits> >(4));
ASSERT(!enum_map_storage.IsNull());
TIR_Print("Replacing enum `%s`\n", String::Handle(Name()).ToCString());
{
field = old_enum.LookupStaticField(Symbols::Values());
if (!field.IsNull()) {
ASSERT(field.is_static() && field.is_const());
old_enum_values ^= field.StaticConstFieldValue();
ASSERT(!old_enum_values.IsNull());
} else {
old_enum_values = Array::empty_array().ptr();
}
field = old_enum.LookupStaticField(Symbols::_DeletedEnumSentinel());
ASSERT(!field.IsNull() && field.is_static() && field.is_const());
old_deleted_enum_sentinel ^= field.StaticConstFieldValue();
ASSERT(!old_deleted_enum_sentinel.IsNull());
field = object_store->enum_name_field();
ASSERT(!field.IsNull());
UnorderedHashMap<EnumMapTraits> enum_map(enum_map_storage.ptr());
// Build a map of all enum name -> old enum instance.
for (intptr_t i = 0, n = old_enum_values.Length(); i < n; ++i) {
old_enum_value ^= old_enum_values.At(i);
ASSERT(!old_enum_value.IsNull());
enum_ident ^= old_enum_value.GetField(field);
VTIR_Print("Element %s being added to mapping\n", enum_ident.ToCString());
bool update = enum_map.UpdateOrInsert(enum_ident, old_enum_value);
VTIR_Print("Element %s added to mapping\n", enum_ident.ToCString());
ASSERT(!update);
}
// The storage given to the map may have been reallocated, remember the new
// address.
enum_map_storage = enum_map.Release().ptr();
}
bool enums_deleted = false;
{
field = LookupStaticField(Symbols::Values());
if (!field.IsNull()) {
ASSERT(field.is_static() && field.is_const());
enum_values ^= field.StaticConstFieldValue();
ASSERT(!enum_values.IsNull());
} else {
enum_values = Array::empty_array().ptr();
}
field = LookupStaticField(Symbols::_DeletedEnumSentinel());
ASSERT(!field.IsNull() && field.is_static() && field.is_const());
deleted_enum_sentinel ^= field.StaticConstFieldValue();
ASSERT(!deleted_enum_sentinel.IsNull());
field = object_store->enum_name_field();
ASSERT(!field.IsNull());
UnorderedHashMap<EnumMapTraits> enum_map(enum_map_storage.ptr());
// Add a become mapping from the old instances to the new instances.
for (intptr_t i = 0, n = enum_values.Length(); i < n; ++i) {
enum_value ^= enum_values.At(i);
ASSERT(!enum_value.IsNull());
enum_ident ^= enum_value.GetField(field);
old_enum_value ^= enum_map.GetOrNull(enum_ident);
if (old_enum_value.IsNull()) {
VTIR_Print("New element %s was not found in mapping\n",
enum_ident.ToCString());
} else {
VTIR_Print("Adding element `%s` to become mapping\n",
enum_ident.ToCString());
bool removed = enum_map.Remove(enum_ident);
ASSERT(removed);
reload_context->AddBecomeMapping(old_enum_value, enum_value);
}
}
enums_deleted = enum_map.NumOccupied() > 0;
// The storage given to the map may have been reallocated, remember the new
// address.
enum_map_storage = enum_map.Release().ptr();
}
// Map the old E.values array to the new E.values array.
reload_context->AddBecomeMapping(old_enum_values, enum_values);
// Map the old E._deleted_enum_sentinel to the new E._deleted_enum_sentinel.
reload_context->AddBecomeMapping(old_deleted_enum_sentinel,
deleted_enum_sentinel);
if (enums_deleted) {
// Map all deleted enums to the deleted enum sentinel value.
// TODO(johnmccutchan): Add this to the reload 'notices' list.
VTIR_Print(
"The following enum values were deleted from %s and will become the "
"deleted enum sentinel:\n",
old_enum.ToCString());
UnorderedHashMap<EnumMapTraits> enum_map(enum_map_storage.ptr());
UnorderedHashMap<EnumMapTraits>::Iterator it(&enum_map);
while (it.MoveNext()) {
const intptr_t entry = it.Current();
enum_ident = String::RawCast(enum_map.GetKey(entry));
ASSERT(!enum_ident.IsNull());
old_enum_value ^= enum_map.GetOrNull(enum_ident);
VTIR_Print("Element `%s` was deleted\n", enum_ident.ToCString());
reload_context->AddBecomeMapping(old_enum_value, deleted_enum_sentinel);
}
enum_map.Release();
}
}
void Class::PatchFieldsAndFunctions() const {
// Move all old functions and fields to a patch class so that they
// still refer to their original script.
const PatchClass& patch =
PatchClass::Handle(PatchClass::New(*this, Script::Handle(script())));
ASSERT(!patch.IsNull());
const Library& lib = Library::Handle(library());
patch.set_library_kernel_data(ExternalTypedData::Handle(lib.kernel_data()));
patch.set_library_kernel_offset(lib.kernel_offset());
const Array& funcs = Array::Handle(current_functions());
Function& func = Function::Handle();
Object& owner = Object::Handle();
for (intptr_t i = 0; i < funcs.Length(); i++) {
func = Function::RawCast(funcs.At(i));
if ((func.token_pos() == TokenPosition::kMinSource) ||
func.IsClosureFunction()) {
// Eval functions do not need to have their script updated.
//
// Closure functions refer to the parent's script which we can
// rely on being updated for us, if necessary.
continue;
}
// If the source for this function is already patched, leave it alone.
owner = func.RawOwner();
ASSERT(!owner.IsNull());
if (!owner.IsPatchClass()) {
ASSERT(owner.ptr() == this->ptr());
func.set_owner(patch);
}
}
Thread* thread = Thread::Current();
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
const Array& field_list = Array::Handle(fields());
Field& field = Field::Handle();
for (intptr_t i = 0; i < field_list.Length(); i++) {
field = Field::RawCast(field_list.At(i));
owner = field.RawOwner();
ASSERT(!owner.IsNull());
if (!owner.IsPatchClass()) {
ASSERT(owner.ptr() == this->ptr());
field.set_owner(patch);
}
field.ForceDynamicGuardedCidAndLength();
}
}
void Class::MigrateImplicitStaticClosures(ProgramReloadContext* irc,
const Class& new_cls) const {
const Array& funcs = Array::Handle(current_functions());
Thread* thread = Thread::Current();
Function& old_func = Function::Handle();
String& selector = String::Handle();
Function& new_func = Function::Handle();
Closure& old_closure = Closure::Handle();
Closure& new_closure = Closure::Handle();
for (intptr_t i = 0; i < funcs.Length(); i++) {
old_func ^= funcs.At(i);
if (old_func.is_static() && old_func.HasImplicitClosureFunction()) {
selector = old_func.name();
new_func = Resolver::ResolveFunction(thread->zone(), new_cls, selector);
if (!new_func.IsNull() && new_func.is_static()) {
old_func = old_func.ImplicitClosureFunction();
old_closure = old_func.ImplicitStaticClosure();
new_func = new_func.ImplicitClosureFunction();
new_closure = new_func.ImplicitStaticClosure();
if (old_closure.IsCanonical()) {
new_closure.SetCanonical();
}
irc->AddBecomeMapping(old_closure, new_closure);
}
}
}
}
class EnumClassConflict : public ClassReasonForCancelling {
public:
EnumClassConflict(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
StringPtr ToString() {
return String::NewFormatted(
from_.is_enum_class()
? "Enum class cannot be redefined to be a non-enum class: %s"
: "Class cannot be redefined to be a enum class: %s",
from_.ToCString());
}
};
class EnsureFinalizedError : public ClassReasonForCancelling {
public:
EnsureFinalizedError(Zone* zone,
const Class& from,
const Class& to,
const Error& error)
: ClassReasonForCancelling(zone, from, to), error_(error) {}
private:
const Error& error_;
ErrorPtr ToError() { return error_.ptr(); }
StringPtr ToString() { return String::New(error_.ToErrorCString()); }
};
class ConstToNonConstClass : public ClassReasonForCancelling {
public:
ConstToNonConstClass(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
private:
StringPtr ToString() {
return String::NewFormatted("Const class cannot become non-const: %s",
from_.ToCString());
}
};
class ConstClassFieldRemoved : public ClassReasonForCancelling {
public:
ConstClassFieldRemoved(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
private:
StringPtr ToString() {
return String::NewFormatted("Const class cannot remove fields: %s",
from_.ToCString());
}
};
class NativeFieldsConflict : public ClassReasonForCancelling {
public:
NativeFieldsConflict(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
private:
StringPtr ToString() {
return String::NewFormatted("Number of native fields changed in %s",
from_.ToCString());
}
};
class TypeParametersChanged : public ClassReasonForCancelling {
public:
TypeParametersChanged(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
StringPtr ToString() {
return String::NewFormatted(
"Limitation: type parameters have changed for %s", from_.ToCString());
}
void AppendTo(JSONArray* array) {
JSONObject jsobj(array);
jsobj.AddProperty("type", "ReasonForCancellingReload");
jsobj.AddProperty("kind", "TypeParametersChanged");
jsobj.AddProperty("class", to_);
jsobj.AddProperty("message",
"Limitation: changing type parameters "
"does not work with hot reload.");
}
};
class PreFinalizedConflict : public ClassReasonForCancelling {
public:
PreFinalizedConflict(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
private:
StringPtr ToString() {
return String::NewFormatted(
"Original class ('%s') is prefinalized and replacement class "
"('%s') is not ",
from_.ToCString(), to_.ToCString());
}
};
class InstanceSizeConflict : public ClassReasonForCancelling {
public:
InstanceSizeConflict(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
private:
StringPtr ToString() {
return String::NewFormatted("Instance size mismatch between '%s' (%" Pd
") and replacement "
"'%s' ( %" Pd ")",
from_.ToCString(), from_.host_instance_size(),
to_.ToCString(), to_.host_instance_size());
}
};
// This is executed before iterating over the instances.
void Class::CheckReload(const Class& replacement,
ProgramReloadContext* context) const {
ASSERT(ProgramReloadContext::IsSameClass(*this, replacement));
if (!is_declaration_loaded()) {
// The old class hasn't been used in any meaningful way, so the VM is okay
// with any change.
return;
}
// Ensure is_enum_class etc have been set.
replacement.EnsureDeclarationLoaded();
// Class cannot change enum property.
if (is_enum_class() != replacement.is_enum_class()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
EnumClassConflict(context->zone(), *this, replacement));
return;
}
if (is_finalized()) {
// Make sure the declaration types parameter count matches for the two
// classes.
// ex. class A<int,B> {} cannot be replace with class A<B> {}.
auto group_context = context->group_reload_context();
if (NumTypeParameters() != replacement.NumTypeParameters()) {
group_context->AddReasonForCancelling(
new (context->zone())
TypeParametersChanged(context->zone(), *this, replacement));
return;
}
}
if (is_finalized() || is_allocate_finalized()) {
auto thread = Thread::Current();
// Ensure the replacement class is also finalized.
const Error& error = Error::Handle(
is_allocate_finalized() ? replacement.EnsureIsAllocateFinalized(thread)
: replacement.EnsureIsFinalized(thread));
if (!error.IsNull()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
EnsureFinalizedError(context->zone(), *this, replacement, error));
return; // No reason to check other properties.
}
ASSERT(replacement.is_finalized());
TIR_Print("Finalized replacement class for %s\n", ToCString());
}
if (is_finalized() && is_const() && (constants() != Array::null()) &&
(Array::LengthOf(constants()) > 0)) {
// Consts can't become non-consts.
if (!replacement.is_const()) {
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
ConstToNonConstClass(context->zone(), *this, replacement));
return;
}
// Consts can't lose fields.
bool field_removed = false;
const Array& old_fields =
Array::Handle(OffsetToFieldMap(true /* original classes */));
const Array& new_fields = Array::Handle(replacement.OffsetToFieldMap());
if (new_fields.Length() < old_fields.Length()) {
field_removed = true;
} else {
Field& old_field = Field::Handle();
Field& new_field = Field::Handle();
String& old_name = String::Handle();
String& new_name = String::Handle();
for (intptr_t i = 0, n = old_fields.Length(); i < n; i++) {
old_field ^= old_fields.At(i);
new_field ^= new_fields.At(i);
if (old_field.IsNull() != new_field.IsNull()) {
field_removed = true;
break;
}
if (!old_field.IsNull()) {
old_name = old_field.name();
new_name = new_field.name();
if (!old_name.Equals(new_name)) {
field_removed = true;
break;
}
}
}
}
if (field_removed) {
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
ConstClassFieldRemoved(context->zone(), *this, replacement));
return;
}
}
// Native field count cannot change.
if (num_native_fields() != replacement.num_native_fields()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
NativeFieldsConflict(context->zone(), *this, replacement));
return;
}
// Just checking.
ASSERT(is_enum_class() == replacement.is_enum_class());
ASSERT(num_native_fields() == replacement.num_native_fields());
if (is_finalized()) {
if (!CanReloadFinalized(replacement, context)) return;
}
if (is_prefinalized()) {
if (!CanReloadPreFinalized(replacement, context)) return;
}
TIR_Print("Class `%s` can be reloaded (%" Pd " and %" Pd ")\n", ToCString(),
id(), replacement.id());
}
bool Class::RequiresInstanceMorphing(const Class& replacement) const {
// Get the field maps for both classes. These field maps walk the class
// hierarchy.
const Array& fields =
Array::Handle(OffsetToFieldMap(true /* original classes */));
const Array& replacement_fields =
Array::Handle(replacement.OffsetToFieldMap());
// Check that the size of the instance is the same.
if (fields.Length() != replacement_fields.Length()) return true;
// Check that we have the same next field offset. This check is not
// redundant with the one above because the instance OffsetToFieldMap
// array length is based on the instance size (which may be aligned up).
if (host_next_field_offset() != replacement.host_next_field_offset()) {
return true;
}
// Verify that field names / offsets match across the entire hierarchy.
Field& field = Field::Handle();
String& field_name = String::Handle();
Field& replacement_field = Field::Handle();
String& replacement_field_name = String::Handle();
for (intptr_t i = 0; i < fields.Length(); i++) {
if (fields.At(i) == Field::null()) {
ASSERT(replacement_fields.At(i) == Field::null());
continue;
}
field = Field::RawCast(fields.At(i));
replacement_field = Field::RawCast(replacement_fields.At(i));
field_name = field.name();
replacement_field_name = replacement_field.name();
if (!field_name.Equals(replacement_field_name)) return true;
}
return false;
}
bool Class::CanReloadFinalized(const Class& replacement,
ProgramReloadContext* context) const {
// Make sure the declaration types argument count matches for the two classes.
// ex. class A<int,B> {} cannot be replace with class A<B> {}.
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
auto group_context = context->group_reload_context();
auto shared_class_table =
group_context->isolate_group()->shared_class_table();
if (NumTypeArguments() != replacement.NumTypeArguments()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
group_context->AddReasonForCancelling(
new (context->zone())
TypeParametersChanged(context->zone(), *this, replacement));
return false;
}
if (RequiresInstanceMorphing(replacement)) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
ASSERT(id() == replacement.id());
const classid_t cid = id();
// We unconditionally create an instance morpher. As a side effect of
// building the morpher, we will mark all new fields as late.
auto instance_morpher = InstanceMorpher::CreateFromClassDescriptors(
context->zone(), shared_class_table, *this, replacement);
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
group_context->EnsureHasInstanceMorpherFor(cid, instance_morpher);
}
return true;
}
bool Class::CanReloadPreFinalized(const Class& replacement,
ProgramReloadContext* context) const {
// The replacement class must also prefinalized.
if (!replacement.is_prefinalized()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
PreFinalizedConflict(context->zone(), *this, replacement));
return false;
}
// Check the instance sizes are equal.
if (host_instance_size() != replacement.host_instance_size()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
InstanceSizeConflict(context->zone(), *this, replacement));
return false;
}
return true;
}
void Library::CheckReload(const Library& replacement,
ProgramReloadContext* context) const {
// Carry over the loaded bit of any deferred prefixes.
Reland "[vm] Check prefix.loadLibrary is called and returns before prefix members are used." This reverts commit 9a87cf91744975e4c6d305d635697fa76ed2c0b7. Reason for revert: Broken test disabled Original change's description: > Revert "[vm] Check prefix.loadLibrary is called and returns before prefix members are used." > > This reverts commit b0484ecbde5e09b0addf73bab016ac988800d888. > > Reason for revert: timeouts on Flutter integration tests > (https://github.com/dart-lang/sdk/issues/42350). > > Original change's description: > > [vm] Check prefix.loadLibrary is called and returns before prefix members are used. > > > > Restore checks against reloading a library with deferred prefixes. > > > > No loading is actually deferred. > > > > Bug: https://github.com/dart-lang/sdk/issues/26878 > > Bug: https://github.com/dart-lang/sdk/issues/41974 > > Change-Id: Iec2662de117453d596cca28dd9481a9751091ce9 > > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/149613 > > Commit-Queue: Ryan Macnak <rmacnak@google.com> > > Reviewed-by: Alexander Markov <alexmarkov@google.com> > > Reviewed-by: Siva Annamalai <asiva@google.com> > > TBR=rmacnak@google.com,alexmarkov@google.com,asiva@google.com > > # Not skipping CQ checks because original CL landed > 1 day ago. > > Bug: https://github.com/dart-lang/sdk/issues/26878, https://github.com/dart-lang/sdk/issues/41974 > Change-Id: I78709650e91d206b84a8ddd9171ef66d6cf1b008 > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/151169 > Reviewed-by: Alexander Markov <alexmarkov@google.com> > Commit-Queue: Alexander Markov <alexmarkov@google.com> TBR=rmacnak@google.com,alexmarkov@google.com,asiva@google.com # Not skipping CQ checks because this is a reland. Bug: https://github.com/dart-lang/sdk/issues/26878, https://github.com/dart-lang/sdk/issues/41974 Change-Id: Ife76bd51db65ca58e08655a9b8406c8ca483447f Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/151326 Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com> Commit-Queue: Ryan Macnak <rmacnak@google.com>
2020-06-16 21:11:40 +00:00
Object& object = Object::Handle();
LibraryPrefix& prefix = LibraryPrefix::Handle();
LibraryPrefix& original_prefix = LibraryPrefix::Handle();
String& name = String::Handle();
String& original_name = String::Handle();
Reland "[vm] Check prefix.loadLibrary is called and returns before prefix members are used." This reverts commit 9a87cf91744975e4c6d305d635697fa76ed2c0b7. Reason for revert: Broken test disabled Original change's description: > Revert "[vm] Check prefix.loadLibrary is called and returns before prefix members are used." > > This reverts commit b0484ecbde5e09b0addf73bab016ac988800d888. > > Reason for revert: timeouts on Flutter integration tests > (https://github.com/dart-lang/sdk/issues/42350). > > Original change's description: > > [vm] Check prefix.loadLibrary is called and returns before prefix members are used. > > > > Restore checks against reloading a library with deferred prefixes. > > > > No loading is actually deferred. > > > > Bug: https://github.com/dart-lang/sdk/issues/26878 > > Bug: https://github.com/dart-lang/sdk/issues/41974 > > Change-Id: Iec2662de117453d596cca28dd9481a9751091ce9 > > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/149613 > > Commit-Queue: Ryan Macnak <rmacnak@google.com> > > Reviewed-by: Alexander Markov <alexmarkov@google.com> > > Reviewed-by: Siva Annamalai <asiva@google.com> > > TBR=rmacnak@google.com,alexmarkov@google.com,asiva@google.com > > # Not skipping CQ checks because original CL landed > 1 day ago. > > Bug: https://github.com/dart-lang/sdk/issues/26878, https://github.com/dart-lang/sdk/issues/41974 > Change-Id: I78709650e91d206b84a8ddd9171ef66d6cf1b008 > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/151169 > Reviewed-by: Alexander Markov <alexmarkov@google.com> > Commit-Queue: Alexander Markov <alexmarkov@google.com> TBR=rmacnak@google.com,alexmarkov@google.com,asiva@google.com # Not skipping CQ checks because this is a reland. Bug: https://github.com/dart-lang/sdk/issues/26878, https://github.com/dart-lang/sdk/issues/41974 Change-Id: Ife76bd51db65ca58e08655a9b8406c8ca483447f Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/151326 Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com> Commit-Queue: Ryan Macnak <rmacnak@google.com>
2020-06-16 21:11:40 +00:00
DictionaryIterator it(replacement);
while (it.HasNext()) {
object = it.GetNext();
if (!object.IsLibraryPrefix()) continue;
prefix ^= object.ptr();
if (!prefix.is_deferred_load()) continue;
name = prefix.name();
DictionaryIterator original_it(*this);
while (original_it.HasNext()) {
object = original_it.GetNext();
if (!object.IsLibraryPrefix()) continue;
original_prefix ^= object.ptr();
if (!original_prefix.is_deferred_load()) continue;
original_name = original_prefix.name();
if (!name.Equals(original_name)) continue;
// The replacement of the old prefix with the new prefix
// in Isolate::loaded_prefixes_set_ implicitly carried
// the loaded state over to the new prefix.
context->AddBecomeMapping(original_prefix, prefix);
Reland "[vm] Check prefix.loadLibrary is called and returns before prefix members are used." This reverts commit 9a87cf91744975e4c6d305d635697fa76ed2c0b7. Reason for revert: Broken test disabled Original change's description: > Revert "[vm] Check prefix.loadLibrary is called and returns before prefix members are used." > > This reverts commit b0484ecbde5e09b0addf73bab016ac988800d888. > > Reason for revert: timeouts on Flutter integration tests > (https://github.com/dart-lang/sdk/issues/42350). > > Original change's description: > > [vm] Check prefix.loadLibrary is called and returns before prefix members are used. > > > > Restore checks against reloading a library with deferred prefixes. > > > > No loading is actually deferred. > > > > Bug: https://github.com/dart-lang/sdk/issues/26878 > > Bug: https://github.com/dart-lang/sdk/issues/41974 > > Change-Id: Iec2662de117453d596cca28dd9481a9751091ce9 > > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/149613 > > Commit-Queue: Ryan Macnak <rmacnak@google.com> > > Reviewed-by: Alexander Markov <alexmarkov@google.com> > > Reviewed-by: Siva Annamalai <asiva@google.com> > > TBR=rmacnak@google.com,alexmarkov@google.com,asiva@google.com > > # Not skipping CQ checks because original CL landed > 1 day ago. > > Bug: https://github.com/dart-lang/sdk/issues/26878, https://github.com/dart-lang/sdk/issues/41974 > Change-Id: I78709650e91d206b84a8ddd9171ef66d6cf1b008 > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/151169 > Reviewed-by: Alexander Markov <alexmarkov@google.com> > Commit-Queue: Alexander Markov <alexmarkov@google.com> TBR=rmacnak@google.com,alexmarkov@google.com,asiva@google.com # Not skipping CQ checks because this is a reland. Bug: https://github.com/dart-lang/sdk/issues/26878, https://github.com/dart-lang/sdk/issues/41974 Change-Id: Ife76bd51db65ca58e08655a9b8406c8ca483447f Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/151326 Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com> Commit-Queue: Ryan Macnak <rmacnak@google.com>
2020-06-16 21:11:40 +00:00
}
}
}
void CallSiteResetter::Reset(const ICData& ic) {
ICData::RebindRule rule = ic.rebind_rule();
if (rule == ICData::kInstance) {
const intptr_t num_args = ic.NumArgsTested();
const intptr_t len = ic.Length();
// We need at least one non-sentinel entry to require a check
// for the smi fast path case.
if (num_args == 2 && len >= 2) {
if (ic.IsImmutable()) {
return;
}
name_ = ic.target_name();
const Class& smi_class = Class::Handle(zone_, Smi::Class());
const Function& smi_op_target = Function::Handle(
zone_, Resolver::ResolveDynamicAnyArgs(zone_, smi_class, name_));
GrowableArray<intptr_t> class_ids(2);
Function& target = Function::Handle(zone_);
ic.GetCheckAt(0, &class_ids, &target);
if ((target.ptr() == smi_op_target.ptr()) && (class_ids[0] == kSmiCid) &&
(class_ids[1] == kSmiCid)) {
// The smi fast path case, preserve the initial entry but reset the
// count.
ic.ClearCountAt(0, *this);
ic.TruncateTo(/*num_checks=*/1, *this);
return;
}
// Fall back to the normal behavior with cached empty ICData arrays.
}
ic.Clear(*this);
ic.set_is_megamorphic(false);
return;
} else if (rule == ICData::kNoRebind || rule == ICData::kNSMDispatch) {
// TODO(30877) we should account for addition/removal of NSM.
// Don't rebind dispatchers.
return;
} else if (rule == ICData::kStatic || rule == ICData::kSuper) {
old_target_ = ic.GetTargetAt(0);
if (old_target_.IsNull()) {
FATAL("old_target is NULL.\n");
}
name_ = old_target_.name();
if (rule == ICData::kStatic) {
ASSERT(old_target_.is_static() ||
old_target_.kind() == UntaggedFunction::kConstructor);
// This can be incorrect if the call site was an unqualified invocation.
new_cls_ = old_target_.Owner();
new_target_ = Resolver::ResolveFunction(zone_, new_cls_, name_);
if (new_target_.kind() != old_target_.kind()) {
new_target_ = Function::null();
}
} else {
// Super call.
caller_ = ic.Owner();
ASSERT(!caller_.is_static());
new_cls_ = caller_.Owner();
new_cls_ = new_cls_.SuperClass();
new_target_ = Resolver::ResolveDynamicAnyArgs(zone_, new_cls_, name_,
/*allow_add=*/true);
}
args_desc_array_ = ic.arguments_descriptor();
ArgumentsDescriptor args_desc(args_desc_array_);
if (new_target_.IsNull() ||
!new_target_.AreValidArguments(args_desc, NULL)) {
// TODO(rmacnak): Patch to a NSME stub.
VTIR_Print("Cannot rebind static call to %s from %s\n",
old_target_.ToCString(),
Object::Handle(zone_, ic.Owner()).ToCString());
return;
}
ic.ClearAndSetStaticTarget(new_target_, *this);
} else {
FATAL("Unexpected rebind rule.");
}
}
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
} // namespace dart