dart-sdk/runtime/vm/object_reload.cc

914 lines
32 KiB
C++
Raw Normal View History

// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/object.h"
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
#include "vm/code_patcher.h"
#include "vm/hash_table.h"
#include "vm/isolate_reload.h"
#include "vm/log.h"
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
#include "vm/object_store.h"
#include "vm/resolver.h"
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
#include "vm/stub_code.h"
#include "vm/symbols.h"
namespace dart {
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
DECLARE_FLAG(bool, trace_reload);
DECLARE_FLAG(bool, trace_reload_verbose);
DECLARE_FLAG(bool, two_args_smi_icd);
void CallSiteResetter::ZeroEdgeCounters(const Function& function) {
ic_data_array_ = function.ic_data_array();
if (ic_data_array_.IsNull()) {
return;
}
ASSERT(ic_data_array_.Length() > 0);
edge_counters_ ^= ic_data_array_.At(0);
if (edge_counters_.IsNull()) {
return;
}
// Fill edge counters array with zeros.
for (intptr_t i = 0; i < edge_counters_.Length(); i++) {
edge_counters_.SetAt(i, Object::smi_zero());
}
}
CallSiteResetter::CallSiteResetter(Zone* zone)
: zone_(zone),
instrs_(Instructions::Handle(zone)),
pool_(ObjectPool::Handle(zone)),
object_(Object::Handle(zone)),
name_(String::Handle(zone)),
new_cls_(Class::Handle(zone)),
new_lib_(Library::Handle(zone)),
new_function_(Function::Handle(zone)),
new_field_(Field::Handle(zone)),
entries_(Array::Handle(zone)),
old_target_(Function::Handle(zone)),
new_target_(Function::Handle(zone)),
caller_(Function::Handle(zone)),
args_desc_array_(Array::Handle(zone)),
ic_data_array_(Array::Handle(zone)),
edge_counters_(Array::Handle(zone)),
descriptors_(PcDescriptors::Handle(zone)),
ic_data_(ICData::Handle(zone)) {}
void CallSiteResetter::ResetCaches(const Code& code) {
// Iterate over the Code's object pool and reset all ICDatas and
// SubtypeTestCaches.
#ifdef TARGET_ARCH_IA32
// IA32 does not have an object pool, but, we can iterate over all
// embedded objects by using the variable length data section.
if (!code.is_alive()) {
return;
}
instrs_ = code.instructions();
ASSERT(!instrs_.IsNull());
uword base_address = instrs_.PayloadStart();
intptr_t offsets_length = code.pointer_offsets_length();
const int32_t* offsets = code.raw_ptr()->data();
for (intptr_t i = 0; i < offsets_length; i++) {
int32_t offset = offsets[i];
RawObject** object_ptr =
reinterpret_cast<RawObject**>(base_address + offset);
RawObject* raw_object = *object_ptr;
if (!raw_object->IsHeapObject()) {
continue;
}
object_ = raw_object;
if (object_.IsICData()) {
Reset(ICData::Cast(object_));
} else if (object_.IsSubtypeTestCache()) {
SubtypeTestCache::Cast(object_).Reset();
}
}
#else
pool_ = code.object_pool();
ASSERT(!pool_.IsNull());
ResetCaches(pool_);
#endif
}
static void FindICData(const Array& ic_data_array,
intptr_t deopt_id,
ICData* ic_data) {
// ic_data_array is sorted because of how it is constructed in
// Function::SaveICDataMap.
intptr_t lo = 1;
intptr_t hi = ic_data_array.Length() - 1;
while (lo <= hi) {
intptr_t mid = (hi - lo + 1) / 2 + lo;
ASSERT(mid >= lo);
ASSERT(mid <= hi);
*ic_data ^= ic_data_array.At(mid);
if (ic_data->deopt_id() == deopt_id) {
return;
} else if (ic_data->deopt_id() > deopt_id) {
hi = mid - 1;
} else {
lo = mid + 1;
}
}
FATAL1("Missing deopt id %" Pd "\n", deopt_id);
}
void CallSiteResetter::ResetSwitchableCalls(const Code& code) {
if (code.is_optimized()) {
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
return; // No switchable calls in optimized code.
}
object_ = code.owner();
if (!object_.IsFunction()) {
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
return; // No switchable calls in stub code.
}
const Function& function = Function::Cast(object_);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
if (function.kind() == RawFunction::kIrregexpFunction) {
// Regex matchers do not support breakpoints or stepping, and they only call
// core library functions that cannot change due to reload. As a performance
// optimization, avoid this matching of ICData to PCs for these functions'
// large number of instance calls.
ASSERT(!function.is_debuggable());
return;
}
ic_data_array_ = function.ic_data_array();
if (ic_data_array_.IsNull()) {
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
// The megamorphic miss stub and some recognized function doesn't populate
// their ic_data_array. Check this only happens for functions without IC
// calls.
#if defined(DEBUG)
descriptors_ = code.pc_descriptors();
PcDescriptors::Iterator iter(descriptors_, RawPcDescriptors::kIcCall);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
while (iter.MoveNext()) {
FATAL1("%s has IC calls but no ic_data_array\n", object_.ToCString());
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
}
#endif
return;
}
descriptors_ = code.pc_descriptors();
PcDescriptors::Iterator iter(descriptors_, RawPcDescriptors::kIcCall);
while (iter.MoveNext()) {
uword pc = code.PayloadStart() + iter.PcOffset();
CodePatcher::GetInstanceCallAt(pc, code, &object_);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
// This check both avoids unnecessary patching to reduce log spam and
// prevents patching over breakpoint stubs.
if (!object_.IsICData()) {
FindICData(ic_data_array_, iter.DeoptId(), &ic_data_);
ASSERT(ic_data_.rebind_rule() == ICData::kInstance);
ASSERT(ic_data_.NumArgsTested() == 1);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
const Code& stub =
ic_data_.is_tracking_exactness()
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
? StubCode::OneArgCheckInlineCacheWithExactnessCheck()
: StubCode::OneArgCheckInlineCache();
CodePatcher::PatchInstanceCallAt(pc, code, ic_data_, stub);
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
if (FLAG_trace_ic) {
OS::PrintErr("Instance call at %" Px
" resetting to polymorphic dispatch, %s\n",
pc, ic_data_.ToCString());
[vm, compiler] Specialize unoptimized monomorphic and megamorphic calls. dart-bytecode, arm64: +4.742% geomean dart-bytecode-jit-unopt, arm64: +12.73% geomean dart2js-compile, x64: +3.635% geomean In the polymorphic and unlinked cases, call to a stub the does a linear scan against an ICData. In the monomorphic case, call to a prologue of the expected target function that checks the expected receiver class. There is additional indirection in the JIT version compared to the AOT version to also tick a usage counter so the inliner can make good decisions. In the megamorphic case, call to a stub that does a hash table lookup against a MegamorphicCache. Megamorphic call sites face a loss of precision in usage counts. The call site count is not recorded and the usage counter of the target function is used as an approximation. Monomorphic and megamorphic calls sites are reset to the polymorphic/unlinked state on hot reload. Monomorphic and megamorphic calls sites do not check the stepping state, so they are reset to the polymorphic/unlinked state when stepping begins and disabled. Back-edges now increment the usage counter in addition to checking it. This ensures function with loops containing monomorphic calls will eventually cross the optimization threshold. Fixed backwards use of kMonomorphicEntryOffset and kPolymorphicEntryOffset. Fixed C stack overflow when bouncing between the KBC interpreter and a simulator. Bug: https://github.com/dart-lang/sdk/issues/26780 Bug: https://github.com/dart-lang/sdk/issues/36409 Bug: https://github.com/dart-lang/sdk/issues/36731 Change-Id: I78a49cccd962703a459288e71ce246ed845df474 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/102820 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Markov <alexmarkov@google.com>
2019-06-12 21:56:53 +00:00
}
}
}
}
void CallSiteResetter::RebindStaticTargets(const Bytecode& bytecode) {
// Iterate over the Bytecode's object pool and reset all ICDatas.
pool_ = bytecode.object_pool();
ASSERT(!pool_.IsNull());
for (intptr_t i = 0; i < pool_.Length(); i++) {
ObjectPool::EntryType entry_type = pool_.TypeAt(i);
if (entry_type != ObjectPool::EntryType::kTaggedObject) {
continue;
}
object_ = pool_.ObjectAt(i);
if (object_.IsFunction()) {
const Function& old_function = Function::Cast(object_);
if (old_function.IsClosureFunction()) {
continue;
}
name_ = old_function.name();
new_cls_ = old_function.Owner();
if (new_cls_.IsTopLevel()) {
new_lib_ = new_cls_.library();
new_function_ = new_lib_.LookupLocalFunction(name_);
} else {
new_function_ = new_cls_.LookupFunction(name_);
}
if (!new_function_.IsNull() &&
(new_function_.is_static() == old_function.is_static()) &&
(new_function_.kind() == old_function.kind())) {
pool_.SetObjectAt(i, new_function_);
} else {
VTIR_Print("Cannot rebind function %s\n", old_function.ToCString());
}
} else if (object_.IsField()) {
const Field& old_field = Field::Cast(object_);
name_ = old_field.name();
new_cls_ = old_field.Owner();
if (new_cls_.IsTopLevel()) {
new_lib_ = new_cls_.library();
new_field_ = new_lib_.LookupLocalField(name_);
} else {
new_field_ = new_cls_.LookupField(name_);
}
if (!new_field_.IsNull() &&
(new_field_.is_static() == old_field.is_static())) {
pool_.SetObjectAt(i, new_field_);
} else {
VTIR_Print("Cannot rebind field %s\n", old_field.ToCString());
}
}
}
}
void CallSiteResetter::ResetCaches(const ObjectPool& pool) {
for (intptr_t i = 0; i < pool.Length(); i++) {
ObjectPool::EntryType entry_type = pool.TypeAt(i);
[vm] Decouple assemblers from runtime. This is the next step towards preventing compiler from directly peeking into runtime and instead interact with runtime through a well defined surface. The goal of the refactoring to locate all places where compiler accesses some runtime information and partion those accesses into two categories: - creating objects in the host runtime (e.g. allocating strings, numbers, etc) during compilation; - accessing properties of the target runtime (e.g. offsets of fields) to embed those into the generated code; This change introduces dart::compiler and dart::compiler::target namespaces. All code in the compiler will gradually be moved into dart::compiler namespace. One of the motivations for this change is to be able to prevent access to globally defined host constants like kWordSize by shadowing them in the dart::compiler namespace. The nested namespace dart::compiler::target hosts all information about target runtime that compiler could access, e.g. compiler::target::kWordSize defines word size of the target which will eventually be made different from the host kWordSize (defined by dart::kWordSize). The API for compiler to runtime interaction is placed into compiler_api.h. Note that we still permit runtime to access compiler internals directly - this is not going to be decoupled as part of this work. Issue https://github.com/dart-lang/sdk/issues/31709 Change-Id: If4396d295879391becfa6c38d4802bbff81f5b20 Reviewed-on: https://dart-review.googlesource.com/c/90242 Commit-Queue: Vyacheslav Egorov <vegorov@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com>
2019-01-25 16:45:13 +00:00
if (entry_type != ObjectPool::EntryType::kTaggedObject) {
continue;
}
object_ = pool.ObjectAt(i);
if (object_.IsICData()) {
Reset(ICData::Cast(object_));
} else if (object_.IsSubtypeTestCache()) {
SubtypeTestCache::Cast(object_).Reset();
}
}
}
void Class::CopyStaticFieldValues(IsolateReloadContext* reload_context,
const Class& old_cls) const {
// We only update values for non-enum classes.
const bool update_values = !is_enum_class();
const Array& old_field_list = Array::Handle(old_cls.fields());
Field& old_field = Field::Handle();
String& old_name = String::Handle();
const Array& field_list = Array::Handle(fields());
Field& field = Field::Handle();
String& name = String::Handle();
for (intptr_t i = 0; i < field_list.Length(); i++) {
field = Field::RawCast(field_list.At(i));
name = field.name();
// Find the corresponding old field, if it exists, and migrate
// over the field value.
for (intptr_t j = 0; j < old_field_list.Length(); j++) {
old_field = Field::RawCast(old_field_list.At(j));
old_name = old_field.name();
if (name.Equals(old_name)) {
if (field.is_static()) {
// We only copy values if requested and if the field is not a const
// field. We let const fields be updated with a reload.
if (update_values && !field.is_const()) {
// Make new field point to the old field value so that both
// old and new code see and update same value.
reload_context->isolate()->field_table()->Free(field.field_id());
field.set_field_id(old_field.field_id());
}
reload_context->AddStaticFieldMapping(old_field, field);
} else {
if (old_field.needs_load_guard()) {
ASSERT(!old_field.is_unboxing_candidate());
field.set_needs_load_guard(true);
field.set_is_unboxing_candidate(false);
}
}
}
}
}
}
void Class::CopyCanonicalConstants(const Class& old_cls) const {
if (is_enum_class()) {
// We do not copy enum classes's canonical constants because we explicitly
// become the old enum values to the new enum values.
return;
}
#if defined(DEBUG)
{
// Class has no canonical constants allocated.
const Array& my_constants = Array::Handle(constants());
ASSERT(my_constants.Length() == 0);
}
#endif // defined(DEBUG).
// Copy old constants into new class.
const Array& old_constants = Array::Handle(old_cls.constants());
if (old_constants.IsNull() || old_constants.Length() == 0) {
return;
}
TIR_Print("Copied %" Pd " canonical constants for class `%s`\n",
old_constants.Length(), ToCString());
set_constants(old_constants);
}
void Class::CopyDeclarationType(const Class& old_cls) const {
const Type& old_declaration_type = Type::Handle(old_cls.declaration_type());
if (old_declaration_type.IsNull()) {
return;
}
set_declaration_type(old_declaration_type);
}
class EnumMapTraits {
public:
static bool ReportStats() { return false; }
static const char* Name() { return "EnumMapTraits"; }
static bool IsMatch(const Object& a, const Object& b) {
return a.raw() == b.raw();
}
static uword Hash(const Object& obj) {
ASSERT(obj.IsString());
return String::Cast(obj).Hash();
}
};
// Given an old enum class, add become mappings from old values to new values.
// Some notes about how we reload enums below:
//
// When an enum is reloaded the following three things can happen, possibly
// simultaneously.
//
// 1) A new enum value is added.
// This case is handled automatically.
// 2) Enum values are reordered.
// We pair old and new enums and the old enums 'become' the new ones so
// the ordering is always correct (i.e. enum indices match slots in values
// array)
// 3) An existing enum value is removed.
// Each enum class has a canonical 'deleted' enum sentinel instance.
// When an enum value is deleted, we 'become' all references to the 'deleted'
// sentinel value. The index value is -1.
//
void Class::ReplaceEnum(IsolateReloadContext* reload_context,
const Class& old_enum) const {
// We only do this for finalized enum classes.
ASSERT(is_enum_class());
ASSERT(old_enum.is_enum_class());
ASSERT(is_finalized());
ASSERT(old_enum.is_finalized());
Zone* zone = Thread::Current()->zone();
Array& enum_fields = Array::Handle(zone);
Field& field = Field::Handle(zone);
String& enum_ident = String::Handle();
Instance& old_enum_value = Instance::Handle(zone);
Instance& enum_value = Instance::Handle(zone);
// The E.values array.
Instance& old_enum_values = Instance::Handle(zone);
// The E.values array.
Instance& enum_values = Instance::Handle(zone);
// The E._deleted_enum_sentinel instance.
Instance& old_deleted_enum_sentinel = Instance::Handle(zone);
// The E._deleted_enum_sentinel instance.
Instance& deleted_enum_sentinel = Instance::Handle(zone);
Array& enum_map_storage =
Array::Handle(zone, HashTables::New<UnorderedHashMap<EnumMapTraits> >(4));
ASSERT(!enum_map_storage.IsNull());
TIR_Print("Replacing enum `%s`\n", String::Handle(Name()).ToCString());
{
UnorderedHashMap<EnumMapTraits> enum_map(enum_map_storage.raw());
// Build a map of all enum name -> old enum instance.
enum_fields = old_enum.fields();
for (intptr_t i = 0; i < enum_fields.Length(); i++) {
field = Field::RawCast(enum_fields.At(i));
enum_ident = field.name();
if (!field.is_static()) {
// Enum instances are only held in static fields.
continue;
}
if (enum_ident.Equals(Symbols::Values())) {
old_enum_values = field.StaticValue();
// Non-enum instance.
continue;
}
if (enum_ident.Equals(Symbols::_DeletedEnumSentinel())) {
old_deleted_enum_sentinel = field.StaticValue();
// Non-enum instance.
continue;
}
old_enum_value = field.StaticValue();
ASSERT(!old_enum_value.IsNull());
VTIR_Print("Element %s being added to mapping\n", enum_ident.ToCString());
bool update = enum_map.UpdateOrInsert(enum_ident, old_enum_value);
VTIR_Print("Element %s added to mapping\n", enum_ident.ToCString());
ASSERT(!update);
}
// The storage given to the map may have been reallocated, remember the new
// address.
enum_map_storage = enum_map.Release().raw();
}
bool enums_deleted = false;
{
UnorderedHashMap<EnumMapTraits> enum_map(enum_map_storage.raw());
// Add a become mapping from the old instances to the new instances.
enum_fields = fields();
for (intptr_t i = 0; i < enum_fields.Length(); i++) {
field = Field::RawCast(enum_fields.At(i));
enum_ident = field.name();
if (!field.is_static()) {
// Enum instances are only held in static fields.
continue;
}
if (enum_ident.Equals(Symbols::Values())) {
enum_values = field.StaticValue();
// Non-enum instance.
continue;
}
if (enum_ident.Equals(Symbols::_DeletedEnumSentinel())) {
deleted_enum_sentinel = field.StaticValue();
// Non-enum instance.
continue;
}
enum_value = field.StaticValue();
ASSERT(!enum_value.IsNull());
old_enum_value ^= enum_map.GetOrNull(enum_ident);
if (old_enum_value.IsNull()) {
VTIR_Print("New element %s was not found in mapping\n",
enum_ident.ToCString());
} else {
VTIR_Print("Adding element `%s` to become mapping\n",
enum_ident.ToCString());
bool removed = enum_map.Remove(enum_ident);
ASSERT(removed);
reload_context->AddEnumBecomeMapping(old_enum_value, enum_value);
}
}
enums_deleted = enum_map.NumOccupied() > 0;
// The storage given to the map may have been reallocated, remember the new
// address.
enum_map_storage = enum_map.Release().raw();
}
// Map the old E.values array to the new E.values array.
ASSERT(!old_enum_values.IsNull());
ASSERT(!enum_values.IsNull());
reload_context->AddEnumBecomeMapping(old_enum_values, enum_values);
// Map the old E._deleted_enum_sentinel to the new E._deleted_enum_sentinel.
ASSERT(!old_deleted_enum_sentinel.IsNull());
ASSERT(!deleted_enum_sentinel.IsNull());
reload_context->AddEnumBecomeMapping(old_deleted_enum_sentinel,
deleted_enum_sentinel);
if (enums_deleted) {
// Map all deleted enums to the deleted enum sentinel value.
// TODO(johnmccutchan): Add this to the reload 'notices' list.
VTIR_Print(
"The following enum values were deleted from %s and will become the "
"deleted enum sentinel:\n",
old_enum.ToCString());
UnorderedHashMap<EnumMapTraits> enum_map(enum_map_storage.raw());
UnorderedHashMap<EnumMapTraits>::Iterator it(&enum_map);
while (it.MoveNext()) {
const intptr_t entry = it.Current();
enum_ident = String::RawCast(enum_map.GetKey(entry));
ASSERT(!enum_ident.IsNull());
old_enum_value ^= enum_map.GetOrNull(enum_ident);
VTIR_Print("Element `%s` was deleted\n", enum_ident.ToCString());
reload_context->AddEnumBecomeMapping(old_enum_value,
deleted_enum_sentinel);
}
enum_map.Release();
}
}
void Class::PatchFieldsAndFunctions() const {
// Move all old functions and fields to a patch class so that they
// still refer to their original script.
const PatchClass& patch =
PatchClass::Handle(PatchClass::New(*this, Script::Handle(script())));
ASSERT(!patch.IsNull());
const Library& lib = Library::Handle(library());
[vm/bytecode] Represent classes, libraries, scripts and recursive types in bytecode Size of a large application: Before: 26628600 After: 21480120 (-19.3%) Size of snapshots: isolate_snapshot_framework.bin Before: 9322496 After: 6782976 (-27.2%) isolate_snapshot_product_framework.bin Before: 9166848 After: 6602752 (-27.9%) Regressions in tests: 1) Test language_2/type_alias_equality_test/04 fails similarly to default mode, as VM does not implement comparison of function types according to the specification. Previously this test was passing as function types were canonicalized in bytecode, which was not always correct. This CL fixes the problem with canonicalization of function types in bytecode and the test starts failing again. 2) Tests standalone_2/entrypoints_verification_test, standalone_2/io/test_extension_test, standalone_2/io/test_extension_fail_test fail as native extensions are not supported in bytecode yet. These tests start passing after df5e7aac17365b0bcca8fe6d4fb6c4838e2d67ba, which switched bytecode tests to kernel service (on x64), because kernel service doesn't drop ASTs. This CL switches from reading AST library declarations to bytecode even if AST is not removed, so tests fail again. Change-Id: I8b7ba44bfa49d0b1599b2509553ff7c831a4e244 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/104700 Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Martin Kustermann <kustermann@google.com> Reviewed-by: Régis Crelier <regis@google.com> Commit-Queue: Alexander Markov <alexmarkov@google.com>
2019-06-20 18:27:21 +00:00
if (!lib.is_declared_in_bytecode()) {
patch.set_library_kernel_data(ExternalTypedData::Handle(lib.kernel_data()));
patch.set_library_kernel_offset(lib.kernel_offset());
}
const Array& funcs = Array::Handle(functions());
Function& func = Function::Handle();
Object& owner = Object::Handle();
for (intptr_t i = 0; i < funcs.Length(); i++) {
func = Function::RawCast(funcs.At(i));
if ((func.token_pos() == TokenPosition::kMinSource) ||
func.IsClosureFunction()) {
// Eval functions do not need to have their script updated.
//
// Closure functions refer to the parent's script which we can
// rely on being updated for us, if necessary.
continue;
}
// If the source for this function is already patched, leave it alone.
owner = func.RawOwner();
ASSERT(!owner.IsNull());
if (!owner.IsPatchClass()) {
ASSERT(owner.raw() == this->raw());
func.set_owner(patch);
}
}
const Array& field_list = Array::Handle(fields());
Field& field = Field::Handle();
for (intptr_t i = 0; i < field_list.Length(); i++) {
field = Field::RawCast(field_list.At(i));
owner = field.RawOwner();
ASSERT(!owner.IsNull());
if (!owner.IsPatchClass()) {
ASSERT(owner.raw() == this->raw());
field.set_owner(patch);
}
field.ForceDynamicGuardedCidAndLength();
}
}
void Class::MigrateImplicitStaticClosures(IsolateReloadContext* irc,
const Class& new_cls) const {
const Array& funcs = Array::Handle(functions());
Function& old_func = Function::Handle();
String& selector = String::Handle();
Function& new_func = Function::Handle();
Instance& old_closure = Instance::Handle();
Instance& new_closure = Instance::Handle();
for (intptr_t i = 0; i < funcs.Length(); i++) {
old_func ^= funcs.At(i);
if (old_func.is_static() && old_func.HasImplicitClosureFunction()) {
selector = old_func.name();
new_func = new_cls.LookupFunction(selector);
if (!new_func.IsNull() && new_func.is_static()) {
old_func = old_func.ImplicitClosureFunction();
old_closure = old_func.ImplicitStaticClosure();
new_func = new_func.ImplicitClosureFunction();
new_closure = new_func.ImplicitStaticClosure();
if (old_closure.IsCanonical()) {
new_closure.SetCanonical();
}
irc->AddBecomeMapping(old_closure, new_closure);
}
}
}
}
class EnumClassConflict : public ClassReasonForCancelling {
public:
EnumClassConflict(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
RawString* ToString() {
return String::NewFormatted(
from_.is_enum_class()
? "Enum class cannot be redefined to be a non-enum class: %s"
: "Class cannot be redefined to be a enum class: %s",
from_.ToCString());
}
};
class TypedefClassConflict : public ClassReasonForCancelling {
public:
TypedefClassConflict(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
RawString* ToString() {
return String::NewFormatted(
from_.IsTypedefClass()
? "Typedef class cannot be redefined to be a non-typedef class: %s"
: "Class cannot be redefined to be a typedef class: %s",
from_.ToCString());
}
};
class EnsureFinalizedError : public ClassReasonForCancelling {
public:
EnsureFinalizedError(Zone* zone,
const Class& from,
const Class& to,
const Error& error)
: ClassReasonForCancelling(zone, from, to), error_(error) {}
private:
const Error& error_;
RawError* ToError() { return error_.raw(); }
RawString* ToString() { return String::New(error_.ToErrorCString()); }
};
class NativeFieldsConflict : public ClassReasonForCancelling {
public:
NativeFieldsConflict(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
private:
RawString* ToString() {
return String::NewFormatted("Number of native fields changed in %s",
from_.ToCString());
}
};
class TypeParametersChanged : public ClassReasonForCancelling {
public:
TypeParametersChanged(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
RawString* ToString() {
return String::NewFormatted(
"Limitation: type parameters have changed for %s", from_.ToCString());
}
void AppendTo(JSONArray* array) {
JSONObject jsobj(array);
jsobj.AddProperty("type", "ReasonForCancellingReload");
jsobj.AddProperty("kind", "TypeParametersChanged");
jsobj.AddProperty("class", to_);
jsobj.AddProperty("message",
"Limitation: changing type parameters "
"does not work with hot reload.");
}
};
class PreFinalizedConflict : public ClassReasonForCancelling {
public:
PreFinalizedConflict(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
private:
RawString* ToString() {
return String::NewFormatted(
"Original class ('%s') is prefinalized and replacement class "
"('%s') is not ",
from_.ToCString(), to_.ToCString());
}
};
class InstanceSizeConflict : public ClassReasonForCancelling {
public:
InstanceSizeConflict(Zone* zone, const Class& from, const Class& to)
: ClassReasonForCancelling(zone, from, to) {}
private:
RawString* ToString() {
return String::NewFormatted("Instance size mismatch between '%s' (%" Pd
") and replacement "
"'%s' ( %" Pd ")",
Revert "[vm] Add support for real unboxed floating point fields in AOT" This reverts commit 9eb531bde45ae7b8417ff86cbb5f9ea496de50ae. Reason for revert: Bots are red. Some tests are failing. https://ci.chromium.org/p/dart/builders/ci.sandbox/vm-kernel-precomp-obfuscate-linux-release-x64/6039 https://ci.chromium.org/p/dart/builders/ci.sandbox/vm-kernel-precomp-android-release-arm_x64/957 Original change's description: > [vm] Add support for real unboxed floating point fields in AOT > > Non-nullable floating point fields (double, Float32x4, Float64x2) > are fully unboxed in their classes. > > A bitmap for each class was added to the shared class table in order to keep > track of the pointers of the classes. Since all classes in Flutter Gallery > have less than 64 fields, the bitmap is represented by a 64 bit integer and > fields whose offset is more than 64 words are not unboxed. > > The instance sizes and field offsets might change between target and host > in cross-compilation, since the number of words used to store unboxed fields > may differ. > > dart-aot Xeon > > SplayLatency -4.62% > SplayHarderLatency -4.17% > NavierStokes -2.20% > Tracer 8.72% > ParticleSystemPaint 2.90% > NBodySIMD 8.35% > NBody 25.59% > > With hack TFA to make doubles in Rect/Offset/Size classes in flutter non-nullable: > > flutter arm-v8: > > gallery total size: -1% > > matrix_utils_transform_rect_perspective -16.70% (less is better) > matrix_utils_transform_rect_affine -31.82% (less is better) > matrix_utils_transform_point_perspective -24.90% (less is better) > matrix_utils_transform_point_affine) -27.26% (less is better) > rrect_contains_bench -4.719% (less is better) > > Change-Id: I9ae09c9c3167d99f9efd071a92937aa51093fd1d > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/131824 > Commit-Queue: Victor Agnez Lima <victoragnez@google.com> > Reviewed-by: Martin Kustermann <kustermann@google.com> > Reviewed-by: Ryan Macnak <rmacnak@google.com> > Reviewed-by: Samir Jindel <sjindel@google.com> TBR=kustermann@google.com,rmacnak@google.com,sjindel@google.com,victoragnez@google.com Change-Id: Ic73858f6adb7f55c4129d4f46ff4731b378cb634 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/134020 Reviewed-by: Zichang Guo <zichangguo@google.com> Commit-Queue: Zichang Guo <zichangguo@google.com>
2020-01-30 18:13:28 +00:00
from_.ToCString(), from_.instance_size(),
to_.ToCString(), to_.instance_size());
}
};
// This is executed before iterating over the instances.
void Class::CheckReload(const Class& replacement,
IsolateReloadContext* context) const {
ASSERT(IsolateReloadContext::IsSameClass(*this, replacement));
if (!is_declaration_loaded()) {
// The old class hasn't been used in any meanfully way, so the VM is okay
// with any change.
return;
}
// Ensure is_enum_class etc have been set.
replacement.EnsureDeclarationLoaded();
// Class cannot change enum property.
if (is_enum_class() != replacement.is_enum_class()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
EnumClassConflict(context->zone(), *this, replacement));
return;
}
// Class cannot change typedef property.
if (IsTypedefClass() != replacement.IsTypedefClass()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
TypedefClassConflict(context->zone(), *this, replacement));
return;
}
if (is_finalized()) {
// Ensure the replacement class is also finalized.
const Error& error =
Error::Handle(replacement.EnsureIsFinalized(Thread::Current()));
if (!error.IsNull()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
EnsureFinalizedError(context->zone(), *this, replacement, error));
return; // No reason to check other properties.
}
ASSERT(replacement.is_finalized());
TIR_Print("Finalized replacement class for %s\n", ToCString());
}
// Native field count cannot change.
if (num_native_fields() != replacement.num_native_fields()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
NativeFieldsConflict(context->zone(), *this, replacement));
return;
}
// Just checking.
ASSERT(is_enum_class() == replacement.is_enum_class());
ASSERT(num_native_fields() == replacement.num_native_fields());
if (is_finalized()) {
if (!CanReloadFinalized(replacement, context)) return;
}
if (is_prefinalized()) {
if (!CanReloadPreFinalized(replacement, context)) return;
}
TIR_Print("Class `%s` can be reloaded (%" Pd " and %" Pd ")\n", ToCString(),
id(), replacement.id());
}
bool Class::RequiresInstanceMorphing(const Class& replacement) const {
// Get the field maps for both classes. These field maps walk the class
// hierarchy.
const Array& fields =
Array::Handle(OffsetToFieldMap(true /* original classes */));
const Array& replacement_fields =
Array::Handle(replacement.OffsetToFieldMap());
// Check that the size of the instance is the same.
if (fields.Length() != replacement_fields.Length()) return true;
// Check that we have the same next field offset. This check is not
// redundant with the one above because the instance OffsetToFieldMap
// array length is based on the instance size (which may be aligned up).
Revert "[vm] Add support for real unboxed floating point fields in AOT" This reverts commit 9eb531bde45ae7b8417ff86cbb5f9ea496de50ae. Reason for revert: Bots are red. Some tests are failing. https://ci.chromium.org/p/dart/builders/ci.sandbox/vm-kernel-precomp-obfuscate-linux-release-x64/6039 https://ci.chromium.org/p/dart/builders/ci.sandbox/vm-kernel-precomp-android-release-arm_x64/957 Original change's description: > [vm] Add support for real unboxed floating point fields in AOT > > Non-nullable floating point fields (double, Float32x4, Float64x2) > are fully unboxed in their classes. > > A bitmap for each class was added to the shared class table in order to keep > track of the pointers of the classes. Since all classes in Flutter Gallery > have less than 64 fields, the bitmap is represented by a 64 bit integer and > fields whose offset is more than 64 words are not unboxed. > > The instance sizes and field offsets might change between target and host > in cross-compilation, since the number of words used to store unboxed fields > may differ. > > dart-aot Xeon > > SplayLatency -4.62% > SplayHarderLatency -4.17% > NavierStokes -2.20% > Tracer 8.72% > ParticleSystemPaint 2.90% > NBodySIMD 8.35% > NBody 25.59% > > With hack TFA to make doubles in Rect/Offset/Size classes in flutter non-nullable: > > flutter arm-v8: > > gallery total size: -1% > > matrix_utils_transform_rect_perspective -16.70% (less is better) > matrix_utils_transform_rect_affine -31.82% (less is better) > matrix_utils_transform_point_perspective -24.90% (less is better) > matrix_utils_transform_point_affine) -27.26% (less is better) > rrect_contains_bench -4.719% (less is better) > > Change-Id: I9ae09c9c3167d99f9efd071a92937aa51093fd1d > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/131824 > Commit-Queue: Victor Agnez Lima <victoragnez@google.com> > Reviewed-by: Martin Kustermann <kustermann@google.com> > Reviewed-by: Ryan Macnak <rmacnak@google.com> > Reviewed-by: Samir Jindel <sjindel@google.com> TBR=kustermann@google.com,rmacnak@google.com,sjindel@google.com,victoragnez@google.com Change-Id: Ic73858f6adb7f55c4129d4f46ff4731b378cb634 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/134020 Reviewed-by: Zichang Guo <zichangguo@google.com> Commit-Queue: Zichang Guo <zichangguo@google.com>
2020-01-30 18:13:28 +00:00
if (next_field_offset() != replacement.next_field_offset()) return true;
// Verify that field names / offsets match across the entire hierarchy.
Field& field = Field::Handle();
String& field_name = String::Handle();
Field& replacement_field = Field::Handle();
String& replacement_field_name = String::Handle();
for (intptr_t i = 0; i < fields.Length(); i++) {
if (fields.At(i) == Field::null()) {
ASSERT(replacement_fields.At(i) == Field::null());
continue;
}
field = Field::RawCast(fields.At(i));
replacement_field = Field::RawCast(replacement_fields.At(i));
field_name = field.name();
replacement_field_name = replacement_field.name();
if (!field_name.Equals(replacement_field_name)) return true;
}
return false;
}
bool Class::CanReloadFinalized(const Class& replacement,
IsolateReloadContext* context) const {
// Make sure the declaration types argument count matches for the two classes.
// ex. class A<int,B> {} cannot be replace with class A<B> {}.
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
auto group_context = context->group_reload_context();
if (NumTypeArguments() != replacement.NumTypeArguments()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
group_context->AddReasonForCancelling(
new (context->zone())
TypeParametersChanged(context->zone(), *this, replacement));
return false;
}
if (RequiresInstanceMorphing(replacement)) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
ASSERT(id() == replacement.id());
const classid_t cid = id();
// We unconditionally create an instance morpher. As a side effect of
// building the morpher, we will mark all new fields as late.
auto instance_morpher = InstanceMorpher::CreateFromClassDescriptors(
context->zone(), context->isolate()->shared_class_table(), *this,
replacement);
group_context->EnsureHasInstanceMorpherFor(cid, instance_morpher);
}
return true;
}
bool Class::CanReloadPreFinalized(const Class& replacement,
IsolateReloadContext* context) const {
// The replacement class must also prefinalized.
if (!replacement.is_prefinalized()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
PreFinalizedConflict(context->zone(), *this, replacement));
return false;
}
// Check the instance sizes are equal.
Revert "[vm] Add support for real unboxed floating point fields in AOT" This reverts commit 9eb531bde45ae7b8417ff86cbb5f9ea496de50ae. Reason for revert: Bots are red. Some tests are failing. https://ci.chromium.org/p/dart/builders/ci.sandbox/vm-kernel-precomp-obfuscate-linux-release-x64/6039 https://ci.chromium.org/p/dart/builders/ci.sandbox/vm-kernel-precomp-android-release-arm_x64/957 Original change's description: > [vm] Add support for real unboxed floating point fields in AOT > > Non-nullable floating point fields (double, Float32x4, Float64x2) > are fully unboxed in their classes. > > A bitmap for each class was added to the shared class table in order to keep > track of the pointers of the classes. Since all classes in Flutter Gallery > have less than 64 fields, the bitmap is represented by a 64 bit integer and > fields whose offset is more than 64 words are not unboxed. > > The instance sizes and field offsets might change between target and host > in cross-compilation, since the number of words used to store unboxed fields > may differ. > > dart-aot Xeon > > SplayLatency -4.62% > SplayHarderLatency -4.17% > NavierStokes -2.20% > Tracer 8.72% > ParticleSystemPaint 2.90% > NBodySIMD 8.35% > NBody 25.59% > > With hack TFA to make doubles in Rect/Offset/Size classes in flutter non-nullable: > > flutter arm-v8: > > gallery total size: -1% > > matrix_utils_transform_rect_perspective -16.70% (less is better) > matrix_utils_transform_rect_affine -31.82% (less is better) > matrix_utils_transform_point_perspective -24.90% (less is better) > matrix_utils_transform_point_affine) -27.26% (less is better) > rrect_contains_bench -4.719% (less is better) > > Change-Id: I9ae09c9c3167d99f9efd071a92937aa51093fd1d > Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/131824 > Commit-Queue: Victor Agnez Lima <victoragnez@google.com> > Reviewed-by: Martin Kustermann <kustermann@google.com> > Reviewed-by: Ryan Macnak <rmacnak@google.com> > Reviewed-by: Samir Jindel <sjindel@google.com> TBR=kustermann@google.com,rmacnak@google.com,sjindel@google.com,victoragnez@google.com Change-Id: Ic73858f6adb7f55c4129d4f46ff4731b378cb634 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/134020 Reviewed-by: Zichang Guo <zichangguo@google.com> Commit-Queue: Zichang Guo <zichangguo@google.com>
2020-01-30 18:13:28 +00:00
if (instance_size() != replacement.instance_size()) {
[vm/concurrency] Split up IsolateReloadContext into IsolateReloadContext/IsolateGroupReloadContext Similar to the split of ClassTable into ClassTable/SharedClassTable, this CL splits up the IsolateReloadContext into: * IsolateGroupReloadContext: Consists of reload-related information across all isolates. The [Reload()] method is split up in phases that are performed on all isolates before the next phase is started. => This allows each isolate to add reasons for rolling back, if no reasons are found the reload will be accepted atomically. * IsolateReloadContext: Constists of reload-related information for a particular isolate (e.g. mappings of old to new classes) The assumption is that all isolates have the same source (and therefore the same libraries). For certain things, e.g. discovering which libraries changed, it is necessary to examine the object store. We use the first isolate in a group (but could use any of them) to do so, since the isolate group does not have this information atm. This is a preparation CL for supporting hot-reloading multiple isolates within one isolate group. Though the support in this CL stays at having only a single isolate in a group. => This CL turns off FLAG_enable_isolate_groups in JIT mode. Issue https://github.com/dart-lang/sdk/issues/36097 Change-Id: I7f4d536d4f5ab4a2a73fb0c7618ba967c9b77234 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/123254 Commit-Queue: Martin Kustermann <kustermann@google.com> Reviewed-by: Ryan Macnak <rmacnak@google.com> Reviewed-by: Alexander Aprelev <aam@google.com>
2019-11-15 14:08:45 +00:00
context->group_reload_context()->AddReasonForCancelling(
new (context->zone())
InstanceSizeConflict(context->zone(), *this, replacement));
return false;
}
return true;
}
void Library::CheckReload(const Library& replacement,
IsolateReloadContext* context) const {
// Currently no library properties will prevent a reload.
}
void CallSiteResetter::Reset(const ICData& ic) {
ICData::RebindRule rule = ic.rebind_rule();
if (rule == ICData::kInstance) {
const intptr_t num_args = ic.NumArgsTested();
const bool tracking_exactness = ic.is_tracking_exactness();
const intptr_t len = ic.Length();
// We need at least one non-sentinel entry to require a check
// for the smi fast path case.
if (num_args == 2 && len >= 2) {
if (ic.IsImmutable()) {
return;
}
name_ = ic.target_name();
const Class& smi_class = Class::Handle(zone_, Smi::Class());
const Function& smi_op_target = Function::Handle(
zone_, Resolver::ResolveDynamicAnyArgs(zone_, smi_class, name_));
GrowableArray<intptr_t> class_ids(2);
Function& target = Function::Handle(zone_);
ic.GetCheckAt(0, &class_ids, &target);
if ((target.raw() == smi_op_target.raw()) && (class_ids[0] == kSmiCid) &&
(class_ids[1] == kSmiCid)) {
// The smi fast path case, preserve the initial entry but reset the
// count.
ic.ClearCountAt(0);
ic.WriteSentinelAt(1);
entries_ = ic.entries();
entries_.Truncate(2 * ic.TestEntryLength());
return;
}
// Fall back to the normal behavior with cached empty ICData arrays.
}
entries_ = ICData::CachedEmptyICDataArray(num_args, tracking_exactness);
ic.set_entries(entries_);
ic.set_is_megamorphic(false);
return;
} else if (rule == ICData::kNoRebind || rule == ICData::kNSMDispatch) {
// TODO(30877) we should account for addition/removal of NSM.
// Don't rebind dispatchers.
return;
} else if (rule == ICData::kStatic || rule == ICData::kSuper) {
old_target_ = ic.GetTargetAt(0);
if (old_target_.IsNull()) {
FATAL("old_target is NULL.\n");
}
name_ = old_target_.name();
if (rule == ICData::kStatic) {
ASSERT(old_target_.is_static() ||
old_target_.kind() == RawFunction::kConstructor);
// This can be incorrect if the call site was an unqualified invocation.
new_cls_ = old_target_.Owner();
new_target_ = new_cls_.LookupFunction(name_);
if (new_target_.kind() != old_target_.kind()) {
new_target_ = Function::null();
}
} else {
// Super call.
caller_ = ic.Owner();
ASSERT(!caller_.is_static());
new_cls_ = caller_.Owner();
new_cls_ = new_cls_.SuperClass();
new_target_ = Function::null();
while (!new_cls_.IsNull()) {
// TODO(rmacnak): Should use Resolver::ResolveDynamicAnyArgs to handle
// method-extractors and call-through-getters, but we're in a no
// safepoint scope here.
new_target_ = new_cls_.LookupDynamicFunction(name_);
if (!new_target_.IsNull()) {
break;
}
new_cls_ = new_cls_.SuperClass();
}
}
args_desc_array_ = ic.arguments_descriptor();
ArgumentsDescriptor args_desc(args_desc_array_);
if (new_target_.IsNull() ||
!new_target_.AreValidArguments(NNBDMode::kLegacyLib, args_desc, NULL)) {
// TODO(rmacnak): Patch to a NSME stub.
VTIR_Print("Cannot rebind static call to %s from %s\n",
old_target_.ToCString(),
Object::Handle(zone_, ic.Owner()).ToCString());
return;
}
ic.ClearAndSetStaticTarget(new_target_);
} else {
FATAL("Unexpected rebind rule.");
}
}
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
} // namespace dart