[vm/compiler] Add initial partial TTSes for implemented types.

Previously, no optimized TTSes were generated for implemented types, and
so they always fell back to the default TTS, which mostly depends on
calling the runtime and cached checks in SubtypeTestCaches. Now,
optimized TTSes are generated that check for certain compatible
implementing classes before falling back on the runtime/STC.

More specifically, the optimized TTSes for implemented types checks for
the following cases:
1) The implemented type is instantiated and the checked class implements
   an instantiated subtype of the implemented type. The only check
   required is a class id match.
2) The instance type arguments of the checked class are compatible with
   the type arguments of the checked type.  That is, given the following
   declarations, where Base, Impl1, and Impl2 have the same number of
   parent type arguments:

```
case Impl1<K, V> implements Base<K, V>
case Impl2<V> implements Base<String, V>
```

   then the generated optimized TTS for Base<S, T>, where S and T are
   either type parameters or instantiated types, checks for instances of
   Base and Impl1, comparing the type arguments of the instance to S and
   T. The generated TTS does not currently check for Impl2, and thus
   when given an instance of Impl2, it falls back to the old runtime
   checking/SubtypeTestCache behavior.

This compatibility restriction allows us to perform the same checks on
the loaded instance type arguments as is done for non-implemented types,
where the checked classes are subclasses and so naturally compatible in
this manner.

Note that two implementing classes whose instance type arguments are
compatible may store their instance type arguments at different field
offsets. Thus, we also split the classes being checked into groups that
share the same instance type arguments field offset, and load the
instance type arguments differently for each checked group.

This CL also removes now-unused code in the HierarchyInfo class.

TEST=vm/cc/TTS_{Generic,}SubtypeRangeCheck

Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-release-x64-try,vm-kernel-precomp-linux-release-x64-try,vm-kernel-precomp-nnbd-linux-release-x64-try,vm-kernel-nnbd-linux-release-x64-try,vm-kernel-linux-product-x64-try,vm-kernel-precomp-linux-product-x64-try,vm-kernel-linux-release-simarm-try,vm-kernel-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm64-try,vm-kernel-precomp-linux-release-simarm-try,vm-kernel-precomp-linux-release-simarm_x64-try
Change-Id: I4c3aa23db2e75adbad9c15727b491669b2f3a189
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/209540
Reviewed-by: Martin Kustermann <kustermann@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Commit-Queue: Tess Strickland <sstrickl@google.com>
This commit is contained in:
Tess Strickland 2021-08-16 13:02:34 +00:00 committed by commit-bot@chromium.org
parent 6e28f8bb40
commit 7c55965b76
19 changed files with 1166 additions and 452 deletions

View file

@ -299,7 +299,8 @@ void AssemblerBase::Comment(const char* format, ...) {
}
bool AssemblerBase::EmittingComments() {
return FLAG_code_comments || FLAG_disassemble || FLAG_disassemble_optimized;
return FLAG_code_comments || FLAG_disassemble || FLAG_disassemble_optimized ||
FLAG_disassemble_stubs;
}
void AssemblerBase::Stop(const char* message) {

View file

@ -2349,7 +2349,7 @@ void FlowGraphCompiler::GenerateCidRangesCheck(
// If there are no valid class ranges, the check will fail. If we are
// supposed to fall-through in the positive case, we'll explicitly jump to
// the [outside_range_lbl].
if (cid_ranges.length() == 1 && cid_ranges[0].IsIllegalRange()) {
if (cid_ranges.is_empty()) {
if (fall_through_if_inside) {
assembler->Jump(outside_range_lbl);
}

View file

@ -54,31 +54,16 @@ DEFINE_FLAG(bool,
DECLARE_FLAG(bool, inline_alloc);
DECLARE_FLAG(bool, use_slow_path);
class SubclassFinder {
class SubtypeFinder {
public:
SubclassFinder(Zone* zone,
GrowableArray<intptr_t>* cids,
bool include_abstract)
SubtypeFinder(Zone* zone,
GrowableArray<intptr_t>* cids,
bool include_abstract)
: array_handles_(zone),
class_handles_(zone),
cids_(cids),
include_abstract_(include_abstract) {}
void ScanSubClasses(const Class& klass) {
if (include_abstract_ || !klass.is_abstract()) {
cids_->Add(klass.id());
}
ScopedHandle<GrowableObjectArray> array(&array_handles_);
ScopedHandle<Class> subclass(&class_handles_);
*array = klass.direct_subclasses();
if (!array->IsNull()) {
for (intptr_t i = 0; i < array->Length(); ++i) {
*subclass ^= array->At(i);
ScanSubClasses(*subclass);
}
}
}
void ScanImplementorClasses(const Class& klass) {
// An implementor of [klass] is
// * the [klass] itself.
@ -134,36 +119,9 @@ const CidRangeVector& HierarchyInfo::SubtypeRangesForClass(
CidRangeVector& ranges = (*cid_ranges)[klass.id()];
if (ranges.length() == 0) {
if (!FLAG_precompiled_mode) {
BuildRangesForJIT(table, &ranges, klass, /*use_subtype_test=*/true,
include_abstract, exclude_null);
BuildRangesForJIT(table, &ranges, klass, include_abstract, exclude_null);
} else {
BuildRangesFor(table, &ranges, klass, /*use_subtype_test=*/true,
include_abstract, exclude_null);
}
}
return ranges;
}
const CidRangeVector& HierarchyInfo::SubclassRangesForClass(
const Class& klass) {
ClassTable* table = thread()->isolate_group()->class_table();
const intptr_t cid_count = table->NumCids();
if (cid_subclass_ranges_ == nullptr) {
cid_subclass_ranges_.reset(new CidRangeVector[cid_count]);
}
CidRangeVector& ranges = cid_subclass_ranges_[klass.id()];
if (ranges.length() == 0) {
if (!FLAG_precompiled_mode) {
BuildRangesForJIT(table, &ranges, klass,
/*use_subtype_test=*/true,
/*include_abstract=*/false,
/*exclude_null=*/false);
} else {
BuildRangesFor(table, &ranges, klass,
/*use_subtype_test=*/false,
/*include_abstract=*/false,
/*exclude_null=*/false);
BuildRangesFor(table, &ranges, klass, include_abstract, exclude_null);
}
}
return ranges;
@ -175,18 +133,12 @@ const CidRangeVector& HierarchyInfo::SubclassRangesForClass(
void HierarchyInfo::BuildRangesFor(ClassTable* table,
CidRangeVector* ranges,
const Class& klass,
bool use_subtype_test,
bool include_abstract,
bool exclude_null) {
Zone* zone = thread()->zone();
ClassTable* class_table = thread()->isolate_group()->class_table();
// Only really used if `use_subtype_test == true`.
const Type& dst_type = Type::Handle(zone, Type::RawCast(klass.RareType()));
AbstractType& cls_type = AbstractType::Handle(zone);
Class& cls = Class::Handle(zone);
AbstractType& super_type = AbstractType::Handle(zone);
const intptr_t cid_count = table->NumCids();
// Iterate over all cids to find the ones to be included in the ranges.
@ -210,24 +162,14 @@ void HierarchyInfo::BuildRangesFor(ClassTable* table,
if (!include_abstract && cls.is_abstract()) continue;
if (cls.IsTopLevel()) continue;
// We are either interested in [CidRange]es of subclasses or subtypes.
// We are interested in [CidRange]es of subtypes.
bool test_succeeded = false;
if (cid == kNullCid) {
ASSERT(exclude_null);
test_succeeded = false;
} else if (use_subtype_test) {
} else {
cls_type = cls.RareType();
test_succeeded = cls_type.IsSubtypeOf(dst_type, Heap::kNew);
} else {
while (!cls.IsObjectClass()) {
if (cls.ptr() == klass.ptr()) {
test_succeeded = true;
break;
}
super_type = cls.super_type();
const intptr_t type_class_id = super_type.type_class_id();
cls = class_table->At(type_class_id);
}
}
if (test_succeeded) {
@ -245,41 +187,31 @@ void HierarchyInfo::BuildRangesFor(ClassTable* table,
}
}
// Construct last range (either close open one, or add invalid).
// Construct last range if there is a open one.
if (start != -1) {
ASSERT(start <= end);
CidRange range(start, end);
ranges->Add(range);
} else if (ranges->length() == 0) {
CidRange range;
ASSERT(range.IsIllegalRange());
ranges->Add(range);
}
}
void HierarchyInfo::BuildRangesForJIT(ClassTable* table,
CidRangeVector* ranges,
const Class& dst_klass,
bool use_subtype_test,
bool include_abstract,
bool exclude_null) {
if (dst_klass.InVMIsolateHeap()) {
BuildRangesFor(table, ranges, dst_klass, use_subtype_test, include_abstract,
exclude_null);
BuildRangesFor(table, ranges, dst_klass, include_abstract, exclude_null);
return;
}
Zone* zone = thread()->zone();
GrowableArray<intptr_t> cids;
SubclassFinder finder(zone, &cids, include_abstract);
SubtypeFinder finder(zone, &cids, include_abstract);
{
SafepointReadRwLocker ml(thread(),
thread()->isolate_group()->program_lock());
if (use_subtype_test) {
finder.ScanImplementorClasses(dst_klass);
} else {
finder.ScanSubClasses(dst_klass);
}
finder.ScanImplementorClasses(dst_klass);
}
// Sort all collected cids.
@ -429,17 +361,6 @@ bool HierarchyInfo::CanUseGenericSubtypeRangeCheckFor(
ASSERT(type_class.NumTypeParameters() > 0 &&
type.arguments() != TypeArguments::null());
// If the type class is implemented the different implementations might have
// their type argument vector stored at different offsets and we can therefore
// not perform our optimized [CidRange]-based implementation.
//
// TODO(kustermann): If the class is implemented but all implementations
// store the instantator type argument vector at the same offset we can
// still do it!
if (type_class.is_implemented()) {
return false;
}
const TypeArguments& ta =
TypeArguments::Handle(zone, Type::Cast(type).arguments());
ASSERT(ta.Length() == num_type_arguments);
@ -477,11 +398,10 @@ bool HierarchyInfo::InstanceOfHasClassRange(const AbstractType& type,
/*exclude_null=*/true);
if (ranges.length() == 1) {
const CidRangeValue& range = ranges[0];
if (!range.IsIllegalRange()) {
*lower_limit = range.cid_start;
*upper_limit = range.cid_end;
return true;
}
ASSERT(!range.IsIllegalRange());
*lower_limit = range.cid_start;
*upper_limit = range.cid_end;
return true;
}
}
return false;

View file

@ -5,6 +5,7 @@
#ifndef RUNTIME_VM_COMPILER_BACKEND_IL_H_
#define RUNTIME_VM_COMPILER_BACKEND_IL_H_
#include "vm/hash_map.h"
#if defined(DART_PRECOMPILED_RUNTIME)
#error "AOT runtime should not use compiler sources (including header files)"
#endif // defined(DART_PRECOMPILED_RUNTIME)
@ -247,17 +248,19 @@ class HierarchyInfo : public ThreadStackResource {
cid_subtype_ranges_nullable_(),
cid_subtype_ranges_abstract_nullable_(),
cid_subtype_ranges_nonnullable_(),
cid_subtype_ranges_abstract_nonnullable_(),
cid_subclass_ranges_() {
cid_subtype_ranges_abstract_nonnullable_() {
thread->set_hierarchy_info(this);
}
~HierarchyInfo() { thread()->set_hierarchy_info(NULL); }
// Returned from FindBestTAVOffset and SplitOnConsistentTypeArguments
// to denote a failure to find a compatible concrete, finalized class.
static const intptr_t kNoCompatibleTAVOffset = 0;
const CidRangeVector& SubtypeRangesForClass(const Class& klass,
bool include_abstract,
bool exclude_null);
const CidRangeVector& SubclassRangesForClass(const Class& klass);
bool InstanceOfHasClassRange(const AbstractType& type,
intptr_t* lower_limit,
@ -284,13 +287,11 @@ class HierarchyInfo : public ThreadStackResource {
private:
// Does not use any hierarchy information available in the system but computes
// it via O(n) class table traversal. The boolean parameters denote:
// use_subtype_test : if set, IsSubtypeOf() is used to compute inclusion
// include_abstract : if set, include abstract types (don't care otherwise)
// exclude_null : if set, exclude null types (don't care otherwise)
void BuildRangesFor(ClassTable* table,
CidRangeVector* ranges,
const Class& klass,
bool use_subtype_test,
bool include_abstract,
bool exclude_null);
@ -299,7 +300,6 @@ class HierarchyInfo : public ThreadStackResource {
void BuildRangesForJIT(ClassTable* table,
CidRangeVector* ranges,
const Class& klass,
bool use_subtype_test,
bool include_abstract,
bool exclude_null);
@ -307,7 +307,6 @@ class HierarchyInfo : public ThreadStackResource {
std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_abstract_nullable_;
std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_nonnullable_;
std::unique_ptr<CidRangeVector[]> cid_subtype_ranges_abstract_nonnullable_;
std::unique_ptr<CidRangeVector[]> cid_subclass_ranges_;
};
// An embedded container with N elements of type T. Used (with partial

View file

@ -256,9 +256,7 @@ void FlowGraphTypePropagator::VisitCheckClass(CheckClassInstr* check) {
CompileType result = CompileType::None();
for (intptr_t i = 0, n = cids.length(); i < n; i++) {
CidRange* cid_range = cids.At(i);
if (cid_range->IsIllegalRange()) {
return;
}
ASSERT(!cid_range->IsIllegalRange());
for (intptr_t cid = cid_range->cid_start; cid <= cid_range->cid_end;
cid++) {
CompileType tp = CompileType::FromCid(cid);

View file

@ -146,9 +146,6 @@ compiler_sources = [
"stub_code_compiler_arm64.cc",
"stub_code_compiler_ia32.cc",
"stub_code_compiler_x64.cc",
"type_testing_stubs_arm.cc",
"type_testing_stubs_arm64.cc",
"type_testing_stubs_x64.cc",
"write_barrier_elimination.cc",
"write_barrier_elimination.h",
]

View file

@ -1,29 +0,0 @@
// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h"
#if defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/type_testing_stubs.h"
#define __ assembler->
namespace dart {
void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
compiler::Assembler* assembler,
compiler::UnresolvedPcRelativeCalls* unresolved_calls,
const Code& slow_type_test_stub,
HierarchyInfo* hi,
const Type& type,
const Class& type_class) {
BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class);
__ Branch(compiler::Address(
THR, compiler::target::Thread::slow_type_test_entry_point_offset()));
}
} // namespace dart
#endif // defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)

View file

@ -1,32 +0,0 @@
// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h"
#if defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/type_testing_stubs.h"
#define __ assembler->
namespace dart {
void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
compiler::Assembler* assembler,
compiler::UnresolvedPcRelativeCalls* unresolved_calls,
const Code& slow_type_test_stub,
HierarchyInfo* hi,
const Type& type,
const Class& type_class) {
BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class);
__ ldr(
TMP,
compiler::Address(
THR, compiler::target::Thread::slow_type_test_entry_point_offset()));
__ br(TMP);
}
} // namespace dart
#endif // defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)

View file

@ -1,29 +0,0 @@
// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/globals.h"
#if defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/type_testing_stubs.h"
#define __ assembler->
namespace dart {
void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
compiler::Assembler* assembler,
compiler::UnresolvedPcRelativeCalls* unresolved_calls,
const Code& slow_type_test_stub,
HierarchyInfo* hi,
const Type& type,
const Class& type_class) {
BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class);
__ jmp(compiler::Address(
THR, compiler::target::Thread::slow_type_test_entry_point_offset()));
}
} // namespace dart
#endif // defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)

View file

@ -14,8 +14,6 @@
namespace dart {
DECLARE_FLAG(bool, disassemble_stubs);
#if !defined(DART_PRECOMPILED_RUNTIME)
uword NativeCallbackTrampolines::TrampolineForId(int32_t callback_id) {
#if defined(DART_PRECOMPILER)

View file

@ -35,6 +35,7 @@ constexpr bool kDartUseBackgroundCompilation = true;
P(disassemble, bool, false, "Disassemble dart code.") \
P(disassemble_optimized, bool, false, "Disassemble optimized code.") \
P(disassemble_relative, bool, false, "Use offsets instead of absolute PCs") \
P(disassemble_stubs, bool, false, "Disassemble generated stubs.") \
P(support_disassembler, bool, true, "Support the disassembler.")
#else
#define DISASSEMBLE_FLAGS(P, R, C, D) \
@ -42,6 +43,7 @@ constexpr bool kDartUseBackgroundCompilation = true;
R(disassemble_optimized, false, bool, false, "Disassemble optimized code.") \
R(disassemble_relative, false, bool, false, \
"Use offsets instead of absolute PCs") \
R(disassemble_stubs, false, bool, false, "Disassemble generated stubs.") \
R(support_disassembler, false, bool, true, "Support the disassembler.")
#endif

View file

@ -11397,48 +11397,93 @@ bool Field::UpdateGuardedCidAndLength(const Object& value) const {
return true;
}
// Given the type G<T0, ..., Tn> and class C<U0, ..., Un> find path to C at G.
// This path can be used to compute type arguments of C at G.
//
// Note: we are relying on the restriction that the same class can only occur
// once among the supertype.
static bool FindInstantiationOf(const Type& type,
bool Class::FindInstantiationOf(Zone* zone,
const Class& cls,
GrowableArray<const AbstractType*>* path,
bool consider_only_super_classes) {
if (type.type_class() == cls.ptr()) {
bool consider_only_super_classes) const {
ASSERT(cls.is_type_finalized());
if (cls.ptr() == ptr()) {
return true; // Found instantiation.
}
Class& cls2 = Class::Handle();
AbstractType& super_type = AbstractType::Handle();
super_type = cls.super_type();
if (!super_type.IsNull() && !super_type.IsObjectType()) {
cls2 = super_type.type_class();
path->Add(&super_type);
if (FindInstantiationOf(type, cls2, path, consider_only_super_classes)) {
Class& cls2 = Class::Handle(zone);
AbstractType& super = AbstractType::Handle(zone, super_type());
if (!super.IsNull() && !super.IsObjectType()) {
cls2 = super.type_class();
if (path != nullptr) {
path->Add(&super);
}
if (cls2.FindInstantiationOf(zone, cls, path,
consider_only_super_classes)) {
return true; // Found instantiation.
}
path->RemoveLast();
}
if (!consider_only_super_classes) {
Array& super_interfaces = Array::Handle(cls.interfaces());
for (intptr_t i = 0; i < super_interfaces.Length(); i++) {
super_type ^= super_interfaces.At(i);
cls2 = super_type.type_class();
path->Add(&super_type);
if (FindInstantiationOf(type, cls2, path,
/*consider_only_supertypes=*/false)) {
return true; // Found instantiation.
}
if (path != nullptr) {
path->RemoveLast();
}
}
if (!consider_only_super_classes) {
Array& super_interfaces = Array::Handle(zone, interfaces());
for (intptr_t i = 0; i < super_interfaces.Length(); i++) {
super ^= super_interfaces.At(i);
cls2 = super.type_class();
if (path != nullptr) {
path->Add(&super);
}
if (cls2.FindInstantiationOf(zone, cls, path)) {
return true; // Found instantiation.
}
if (path != nullptr) {
path->RemoveLast();
}
}
}
return false; // Not found.
}
bool Class::FindInstantiationOf(Zone* zone,
const Type& type,
GrowableArray<const AbstractType*>* path,
bool consider_only_super_classes) const {
return FindInstantiationOf(zone, Class::Handle(zone, type.type_class()), path,
consider_only_super_classes);
}
TypePtr Class::GetInstantiationOf(Zone* zone, const Class& cls) const {
if (ptr() == cls.ptr()) {
return DeclarationType();
}
if (FindInstantiationOf(zone, cls, /*consider_only_super_classes=*/true)) {
// Since [cls] is a superclass of [this], use [cls]'s declaration type.
return cls.DeclarationType();
}
const auto& decl_type = Type::Handle(zone, DeclarationType());
GrowableArray<const AbstractType*> path(zone, 0);
if (!FindInstantiationOf(zone, cls, &path)) {
return Type::null();
}
ASSERT(!path.is_empty());
auto& calculated_type = Type::Handle(zone, decl_type.ptr());
auto& calculated_type_args =
TypeArguments::Handle(zone, calculated_type.arguments());
for (auto* const type : path) {
calculated_type ^= type->ptr();
if (!calculated_type.IsInstantiated()) {
calculated_type ^= calculated_type.InstantiateFrom(
calculated_type_args, Object::null_type_arguments(), kAllFree,
Heap::kNew);
}
calculated_type_args = calculated_type.arguments();
}
ASSERT_EQUAL(calculated_type.type_class_id(), cls.id());
return calculated_type.ptr();
}
TypePtr Class::GetInstantiationOf(Zone* zone, const Type& type) const {
return GetInstantiationOf(zone, Class::Handle(zone, type.type_class()));
}
void Field::SetStaticValue(const Object& value) const {
auto thread = Thread::Current();
ASSERT(thread->IsMutatorThread());
@ -11476,21 +11521,22 @@ StaticTypeExactnessState StaticTypeExactnessState::Compute(
ASSERT(value.ptr() != Object::sentinel().ptr());
ASSERT(value.ptr() != Object::transition_sentinel().ptr());
Zone* const zone = Thread::Current()->zone();
const TypeArguments& static_type_args =
TypeArguments::Handle(static_type.arguments());
TypeArguments::Handle(zone, static_type.arguments());
TypeArguments& args = TypeArguments::Handle();
TypeArguments& args = TypeArguments::Handle(zone);
ASSERT(static_type.IsFinalized());
const Class& cls = Class::Handle(value.clazz());
const Class& cls = Class::Handle(zone, value.clazz());
GrowableArray<const AbstractType*> path(10);
bool is_super_class = true;
if (!FindInstantiationOf(static_type, cls, &path,
/*consider_only_super_classes=*/true)) {
if (!cls.FindInstantiationOf(zone, static_type, &path,
/*consider_only_super_classes=*/true)) {
is_super_class = false;
bool found_super_interface = FindInstantiationOf(
static_type, cls, &path, /*consider_only_super_classes=*/false);
bool found_super_interface =
cls.FindInstantiationOf(zone, static_type, &path);
ASSERT(found_super_interface);
}
@ -11522,7 +11568,7 @@ StaticTypeExactnessState StaticTypeExactnessState::Compute(
// To compute C<X0, ..., Xn> at G we walk the chain backwards and
// instantiate Si using type parameters of S{i-1} which gives us a type
// depending on type parameters of S{i-2}.
AbstractType& type = AbstractType::Handle(path.Last()->ptr());
AbstractType& type = AbstractType::Handle(zone, path.Last()->ptr());
for (intptr_t i = path.length() - 2; (i >= 0) && !type.IsInstantiated();
i--) {
args = path[i]->arguments();
@ -11560,19 +11606,19 @@ StaticTypeExactnessState StaticTypeExactnessState::Compute(
const intptr_t num_type_params = cls.NumTypeParameters();
bool trivial_case =
(num_type_params ==
Class::Handle(static_type.type_class()).NumTypeParameters()) &&
Class::Handle(zone, static_type.type_class()).NumTypeParameters()) &&
(value.GetTypeArguments() == static_type.arguments());
if (!trivial_case && FLAG_trace_field_guards) {
THR_Print("Not a simple case: %" Pd " vs %" Pd
" type parameters, %s vs %s type arguments\n",
num_type_params,
Class::Handle(static_type.type_class()).NumTypeParameters(),
Class::Handle(zone, static_type.type_class()).NumTypeParameters(),
SafeTypeArgumentsToCString(
TypeArguments::Handle(value.GetTypeArguments())),
TypeArguments::Handle(zone, value.GetTypeArguments())),
SafeTypeArgumentsToCString(static_type_args));
}
AbstractType& type_arg = AbstractType::Handle();
AbstractType& type_arg = AbstractType::Handle(zone);
args = type.arguments();
for (intptr_t i = 0; (i < num_type_params) && trivial_case; i++) {
type_arg = args.TypeAt(i);

View file

@ -1270,6 +1270,71 @@ class Class : public Object {
}
void set_interfaces(const Array& value) const;
// Returns whether a path from [this] to [cls] can be found, where the first
// element is a direct supertype of [this], each following element is a direct
// supertype of the previous element and the final element has [cls] as its
// type class. If [this] and [cls] are the same class, then the path is empty.
//
// If [path] is not nullptr, then the elements of the path are added to it.
// This path can then be used to compute type arguments of [cls] given type
// arguments for an instance of [this].
//
// Note: There may be multiple paths to [cls], but the result of applying each
// path must be equal to the other results.
bool FindInstantiationOf(Zone* zone,
const Class& cls,
GrowableArray<const AbstractType*>* path,
bool consider_only_super_classes = false) const;
bool FindInstantiationOf(Zone* zone,
const Class& cls,
bool consider_only_super_classes = false) const {
return FindInstantiationOf(zone, cls, /*path=*/nullptr,
consider_only_super_classes);
}
// Returns whether a path from [this] to [type] can be found, where the first
// element is a direct supertype of [this], each following element is a direct
// supertype of the previous element and the final element has the same type
// class as [type]. If [this] is the type class of [type], then the path is
// empty.
//
// If [path] is not nullptr, then the elements of the path are added to it.
// This path can then be used to compute type arguments of [type]'s type
// class given type arguments for an instance of [this].
//
// Note: There may be multiple paths to [type]'s type class, but the result of
// applying each path must be equal to the other results.
bool FindInstantiationOf(Zone* zone,
const Type& type,
GrowableArray<const AbstractType*>* path,
bool consider_only_super_classes = false) const;
bool FindInstantiationOf(Zone* zone,
const Type& type,
bool consider_only_super_classes = false) const {
return FindInstantiationOf(zone, type, /*path=*/nullptr,
consider_only_super_classes);
}
// If [this] is a subtype of a type with type class [cls], then this
// returns [cls]<X_0, ..., X_n>, where n is the number of type arguments for
// [cls] and where each type argument X_k is either instantiated or has free
// class type parameters corresponding to the type parameters of [this].
// Thus, given an instance of [this], the result can be instantiated
// with the instance type arguments to get the type of the instance.
//
// If [this] is not a subtype of a type with type class [cls], returns null.
TypePtr GetInstantiationOf(Zone* zone, const Class& cls) const;
// If [this] is a subtype of [type], then this returns [cls]<X_0, ..., X_n>,
// where [cls] is the type class of [type], n is the number of type arguments
// for [cls], and where each type argument X_k is either instantiated or has
// free class type parameters corresponding to the type parameters of [this].
// Thus, given an instance of [this], the result can be instantiated with the
// instance type arguments to get the type of the instance.
//
// If [this] is not a subtype of a type with type class [cls], returns null.
TypePtr GetInstantiationOf(Zone* zone, const Type& type) const;
#if !defined(PRODUCT) || !defined(DART_PRECOMPILED_RUNTIME)
// Returns the list of classes directly implementing this class.
GrowableObjectArrayPtr direct_implementors() const {

View file

@ -5304,4 +5304,97 @@ TEST_CASE(TypeParameterTypeRef) {
EXPECT(!m.IsSubtypeOf(t, Heap::kNew));
}
TEST_CASE(Class_GetInstantiationOf) {
const char* kScript = R"(
class B<T> {}
class A1<X, Y> implements B<List<Y>> {}
class A2<X, Y> extends A1<Y, X> {}
)";
Dart_Handle api_lib = TestCase::LoadTestScript(kScript, nullptr);
EXPECT_VALID(api_lib);
TransitionNativeToVM transition(thread);
Zone* const zone = thread->zone();
const auto& root_lib =
Library::CheckedHandle(zone, Api::UnwrapHandle(api_lib));
EXPECT(!root_lib.IsNull());
const auto& class_b = Class::Handle(zone, GetClass(root_lib, "B"));
const auto& class_a1 = Class::Handle(zone, GetClass(root_lib, "A1"));
const auto& class_a2 = Class::Handle(zone, GetClass(root_lib, "A2"));
const auto& core_lib = Library::Handle(zone, Library::CoreLibrary());
const auto& class_list = Class::Handle(zone, GetClass(core_lib, "List"));
auto expect_type_equal = [](const AbstractType& expected,
const AbstractType& got) {
if (got.Equals(expected)) return;
TextBuffer buffer(128);
buffer.AddString("Expected type ");
expected.PrintName(Object::kScrubbedName, &buffer);
buffer.AddString(", got ");
got.PrintName(Object::kScrubbedName, &buffer);
dart::Expect(__FILE__, __LINE__).Fail("%s", buffer.buffer());
};
const auto& decl_type_b = Type::Handle(zone, class_b.DeclarationType());
const auto& decl_type_list = Type::Handle(zone, class_list.DeclarationType());
const auto& null_tav = Object::null_type_arguments();
// Test that A1.GetInstantiationOf(B) returns B<List<A1::Y>>.
{
const auto& decl_type_a1 = Type::Handle(zone, class_a1.DeclarationType());
const auto& decl_type_args_a1 =
TypeArguments::Handle(zone, decl_type_a1.arguments());
const auto& type_arg_a1_y =
TypeParameter::CheckedHandle(zone, decl_type_args_a1.TypeAt(1));
auto& tav_a1_y = TypeArguments::Handle(TypeArguments::New(1));
tav_a1_y.SetTypeAt(0, type_arg_a1_y);
tav_a1_y = tav_a1_y.Canonicalize(thread, nullptr);
auto& type_list_a1_y = Type::CheckedHandle(
zone, decl_type_list.InstantiateFrom(tav_a1_y, null_tav, kAllFree,
Heap::kNew));
type_list_a1_y ^= type_list_a1_y.Canonicalize(thread, nullptr);
auto& tav_list_a1_y = TypeArguments::Handle(TypeArguments::New(1));
tav_list_a1_y.SetTypeAt(0, type_list_a1_y);
tav_list_a1_y = tav_list_a1_y.Canonicalize(thread, nullptr);
auto& type_b_list_a1_y = Type::CheckedHandle(
zone, decl_type_b.InstantiateFrom(tav_list_a1_y, null_tav, kAllFree,
Heap::kNew));
type_b_list_a1_y ^= type_b_list_a1_y.Canonicalize(thread, nullptr);
const auto& inst_b_a1 =
Type::Handle(zone, class_a1.GetInstantiationOf(zone, class_b));
EXPECT(!inst_b_a1.IsNull());
expect_type_equal(type_b_list_a1_y, inst_b_a1);
}
// Test that A2.GetInstantiationOf(B) returns B<List<A2::X>>.
{
const auto& decl_type_a2 = Type::Handle(zone, class_a2.DeclarationType());
const auto& decl_type_args_a2 =
TypeArguments::Handle(zone, decl_type_a2.arguments());
const auto& type_arg_a2_x =
TypeParameter::CheckedHandle(zone, decl_type_args_a2.TypeAt(1));
auto& tav_a2_x = TypeArguments::Handle(TypeArguments::New(1));
tav_a2_x.SetTypeAt(0, type_arg_a2_x);
tav_a2_x = tav_a2_x.Canonicalize(thread, nullptr);
auto& type_list_a2_x = Type::CheckedHandle(
zone, decl_type_list.InstantiateFrom(tav_a2_x, null_tav, kAllFree,
Heap::kNew));
type_list_a2_x ^= type_list_a2_x.Canonicalize(thread, nullptr);
auto& tav_list_a2_x = TypeArguments::Handle(TypeArguments::New(1));
tav_list_a2_x.SetTypeAt(0, type_list_a2_x);
tav_list_a2_x = tav_list_a2_x.Canonicalize(thread, nullptr);
auto& type_b_list_a2_x = Type::CheckedHandle(
zone, decl_type_b.InstantiateFrom(tav_list_a2_x, null_tav, kAllFree,
Heap::kNew));
type_b_list_a2_x ^= type_b_list_a2_x.Canonicalize(thread, nullptr);
const auto& inst_b_a2 =
Type::Handle(zone, class_a2.GetInstantiationOf(zone, class_b));
EXPECT(!inst_b_a2.IsNull());
expect_type_equal(type_b_list_a2_x, inst_b_a2);
}
}
} // namespace dart

View file

@ -22,7 +22,6 @@
namespace dart {
DEFINE_FLAG(bool, disassemble_stubs, false, "Disassemble generated stubs.");
DECLARE_FLAG(bool, precompiled_mode);
StubCode::StubCodeEntry StubCode::entries_[kNumStubEntries] = {

View file

@ -22,8 +22,6 @@ class Code;
class Isolate;
class ObjectPointerVisitor;
DECLARE_FLAG(bool, disassemble_stubs);
// Is it permitted for the stubs above to refer to Object::null(), which is
// allocated in the VM isolate and shared across all isolates.
// However, in cases where a simple GC-safe placeholder is needed on the stack,

View file

@ -5,6 +5,7 @@
#include <functional>
#include "vm/compiler/assembler/disassembler.h"
#include "vm/hash_map.h"
#include "vm/longjump.h"
#include "vm/object_store.h"
#include "vm/stub_code.h"
@ -20,8 +21,6 @@
namespace dart {
DECLARE_FLAG(bool, disassemble_stubs);
TypeTestingStubNamer::TypeTestingStubNamer()
: lib_(Library::Handle()),
klass_(Class::Handle()),
@ -308,6 +307,18 @@ CodePtr TypeTestingStubGenerator::BuildCodeForType(const Type& type) {
return code.ptr();
}
void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
compiler::Assembler* assembler,
compiler::UnresolvedPcRelativeCalls* unresolved_calls,
const Code& slow_type_test_stub,
HierarchyInfo* hi,
const Type& type,
const Class& type_class) {
BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class);
__ Jump(compiler::Address(
THR, compiler::target::Thread::slow_type_test_entry_point_offset()));
}
void TypeTestingStubGenerator::BuildOptimizedTypeTestStubFastCases(
compiler::Assembler* assembler,
HierarchyInfo* hi,
@ -316,21 +327,24 @@ void TypeTestingStubGenerator::BuildOptimizedTypeTestStubFastCases(
// These are handled via the TopTypeTypeTestStub!
ASSERT(!type.IsTopTypeForSubtyping());
if (type.IsObjectType()) {
ASSERT(type.IsNonNullable() &&
IsolateGroup::Current()->use_strict_null_safety_checks());
compiler::Label is_null;
__ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
__ BranchIf(EQUAL, &is_null, compiler::Assembler::kNearJump);
__ Ret();
__ Bind(&is_null);
return; // No further checks needed.
}
// Fast case for 'int' and '_Smi' (which can appear in core libraries).
if (type.IsIntType() || type.IsSmiType()) {
compiler::Label non_smi_value;
__ BranchIfNotSmi(TypeTestABI::kInstanceReg, &non_smi_value);
__ BranchIfNotSmi(TypeTestABI::kInstanceReg, &non_smi_value,
compiler::Assembler::kNearJump);
__ Ret();
__ Bind(&non_smi_value);
} else if (type.IsObjectType()) {
ASSERT(type.IsNonNullable() &&
IsolateGroup::Current()->use_strict_null_safety_checks());
compiler::Label continue_checking;
__ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
__ BranchIf(EQUAL, &continue_checking);
__ Ret();
__ Bind(&continue_checking);
} else {
// TODO(kustermann): Make more fast cases, e.g. Type::Number()
// is implemented by Smi.
@ -343,52 +357,226 @@ void TypeTestingStubGenerator::BuildOptimizedTypeTestStubFastCases(
/*include_abstract=*/false,
/*exclude_null=*/!Instance::NullIsAssignableTo(type));
const Type& smi_type = Type::Handle(Type::SmiType());
const bool smi_is_ok = smi_type.IsSubtypeOf(type, Heap::kNew);
BuildOptimizedSubtypeRangeCheck(assembler, ranges, smi_is_ok);
compiler::Label is_subtype, is_not_subtype;
const bool smi_is_ok =
Type::Handle(Type::SmiType()).IsSubtypeOf(type, Heap::kNew);
if (smi_is_ok) {
__ LoadClassIdMayBeSmi(TTSInternalRegs::kScratchReg,
TypeTestABI::kInstanceReg);
} else {
__ BranchIfSmi(TypeTestABI::kInstanceReg, &is_not_subtype);
__ LoadClassId(TTSInternalRegs::kScratchReg, TypeTestABI::kInstanceReg);
}
BuildOptimizedSubtypeRangeCheck(assembler, ranges,
TTSInternalRegs::kScratchReg, &is_subtype,
&is_not_subtype);
__ Bind(&is_subtype);
__ Ret();
__ Bind(&is_not_subtype);
} else {
ASSERT(hi->CanUseGenericSubtypeRangeCheckFor(type));
const intptr_t num_type_arguments = type_class.NumTypeArguments();
const TypeArguments& ta = TypeArguments::Handle(type.arguments());
ASSERT(ta.Length() == num_type_arguments);
BuildOptimizedSubclassRangeCheckWithTypeArguments(assembler, hi, type,
type_class, ta);
type_class);
}
if (Instance::NullIsAssignableTo(type)) {
// Fast case for 'null'.
compiler::Label non_null;
__ CompareObject(TypeTestABI::kInstanceReg, Object::null_object());
__ BranchIf(NOT_EQUAL, &non_null);
__ BranchIf(NOT_EQUAL, &non_null, compiler::Assembler::kNearJump);
__ Ret();
__ Bind(&non_null);
}
}
static void CommentCheckedClasses(compiler::Assembler* assembler,
const CidRangeVector& ranges) {
if (!assembler->EmittingComments()) return;
Thread* const thread = Thread::Current();
ClassTable* const class_table = thread->isolate_group()->class_table();
Zone* const zone = thread->zone();
if (ranges.is_empty()) {
__ Comment("No valid cids to check");
return;
}
if ((ranges.length() == 1) && ranges[0].IsSingleCid()) {
const auto& cls = Class::Handle(zone, class_table->At(ranges[0].cid_start));
__ Comment("Checking for cid %" Pd " (%s)", cls.id(),
cls.ScrubbedNameCString());
return;
}
__ Comment("Checking for concrete finalized classes:");
auto& cls = Class::Handle(zone);
for (const auto& range : ranges) {
ASSERT(!range.IsIllegalRange());
for (classid_t cid = range.cid_start; cid <= range.cid_end; cid++) {
// Invalid entries can be included to keep range count low.
if (!class_table->HasValidClassAt(cid)) continue;
cls = class_table->At(cid);
if (cls.is_abstract()) continue; // Only output concrete classes.
__ Comment(" * %" Pd32 " (%s)", cid, cls.ScrubbedNameCString());
}
}
}
// Represents the following needs for runtime checks to see if an instance of
// [cls] is a subtype of [type] that has type class [type_class]:
//
// * kCannotBeChecked: Instances of [cls] cannot be checked with any of the
// currently implemented runtime checks, so must fall back on the runtime.
//
// * kNotSubtype: A [cls] instance is guaranteed to not be a subtype of [type]
// regardless of any instance type arguments.
//
// * kCidCheckOnly: A [cls] instance is guaranteed to be a subtype of [type]
// regardless of any instance type arguments.
//
// * kNeedsFinalization: Checking that an instance of [cls] is a subtype of
// [type] requires instance type arguments, but [cls] is not finalized, and
// so the appropriate type arguments field offset cannot be determined.
//
// * kInstanceTypeArgumentsAreSubtypes: [cls] implements a fully uninstantiated
// type with type class [type_class] which can be directly instantiated with
// the instance type arguments. Thus, each type argument of [type] should be
// compared with the corresponding (index-wise) instance type argument.
enum class CheckType {
kCannotBeChecked,
kNotSubtype,
kCidCheckOnly,
kNeedsFinalization,
kInstanceTypeArgumentsAreSubtypes,
};
// Returns a CheckType describing how to check instances of [to_check] as
// subtypes of [type].
static CheckType SubtypeChecksForClass(Zone* zone,
const Type& type,
const Class& type_class,
const Class& to_check) {
ASSERT_EQUAL(type.type_class_id(), type_class.id());
ASSERT(type_class.is_type_finalized());
ASSERT(!to_check.is_abstract());
ASSERT(to_check.is_type_finalized());
ASSERT(AbstractType::Handle(zone, to_check.RareType())
.IsSubtypeOf(AbstractType::Handle(zone, type_class.RareType()),
Heap::kNew));
if (!type_class.IsGeneric()) {
// All instances of [to_check] are subtypes of [type].
return CheckType::kCidCheckOnly;
}
if (to_check.FindInstantiationOf(zone, type_class,
/*only_super_classes=*/true)) {
// No need to check for type argument consistency, as [to_check] is the same
// as or a subclass of [type_class].
return to_check.is_finalized()
? CheckType::kInstanceTypeArgumentsAreSubtypes
: CheckType::kCannotBeChecked;
}
auto& calculated_type =
AbstractType::Handle(zone, to_check.GetInstantiationOf(zone, type_class));
if (calculated_type.IsInstantiated()) {
if (type.IsInstantiated()) {
return calculated_type.IsSubtypeOf(type, Heap::kNew)
? CheckType::kCidCheckOnly
: CheckType::kNotSubtype;
}
// TODO(dartbug.com/46920): Requires walking both types, checking
// corresponding instantiated parts at compile time (assuming uninstantiated
// parts check successfully) and then creating appropriate runtime checks
// for uninstantiated parts of [type].
return CheckType::kCannotBeChecked;
}
if (!to_check.is_finalized()) {
return CheckType::kNeedsFinalization;
}
ASSERT(to_check.NumTypeArguments() > 0);
ASSERT(compiler::target::Class::TypeArgumentsFieldOffset(to_check) !=
compiler::target::Class::kNoTypeArguments);
// If the calculated type arguments are a prefix of the declaration type
// arguments, then we can just treat the instance type arguments as if they
// were used to instantiate the type class during checking.
const auto& decl_type_args = TypeArguments::Handle(
zone, Type::Handle(zone, to_check.DeclarationType()).arguments());
const auto& calculated_type_args =
TypeArguments::Handle(zone, calculated_type.arguments());
const bool type_args_consistent = calculated_type_args.IsSubvectorEquivalent(
decl_type_args, 0, type_class.NumTypeArguments(),
TypeEquality::kCanonical);
// TODO(dartbug.com/46920): Currently we require subtyping to be checkable
// by comparing the instance type arguments against the type arguments of
// [type] piecewise, but we could check other cases as well.
return type_args_consistent ? CheckType::kInstanceTypeArgumentsAreSubtypes
: CheckType::kCannotBeChecked;
}
static void CommentSkippedClasses(compiler::Assembler* assembler,
const Type& type,
const Class& type_class,
const CidRangeVector& ranges) {
if (!assembler->EmittingComments() || ranges.is_empty()) return;
if (ranges.is_empty()) return;
ASSERT(type_class.is_implemented());
__ Comment("Not checking the following concrete implementors of %s:",
type_class.ScrubbedNameCString());
Thread* const thread = Thread::Current();
auto* const class_table = thread->isolate_group()->class_table();
Zone* const zone = thread->zone();
auto& cls = Class::Handle(zone);
auto& calculated_type = Type::Handle(zone);
for (const auto& range : ranges) {
ASSERT(!range.IsIllegalRange());
for (classid_t cid = range.cid_start; cid <= range.cid_end; cid++) {
// Invalid entries can be included to keep range count low.
if (!class_table->HasValidClassAt(cid)) continue;
cls = class_table->At(cid);
if (cls.is_abstract()) continue; // Only output concrete classes.
ASSERT(cls.is_type_finalized());
TextBuffer buffer(128);
buffer.Printf(" * %" Pd32 "(%s): ", cid, cls.ScrubbedNameCString());
switch (SubtypeChecksForClass(zone, type, type_class, cls)) {
case CheckType::kCannotBeChecked:
calculated_type = cls.GetInstantiationOf(zone, type_class);
buffer.AddString("cannot check that ");
calculated_type.PrintName(Object::kScrubbedName, &buffer);
buffer.AddString(" is a subtype of ");
type.PrintName(Object::kScrubbedName, &buffer);
break;
case CheckType::kNotSubtype:
calculated_type = cls.GetInstantiationOf(zone, type_class);
calculated_type.PrintName(Object::kScrubbedName, &buffer);
buffer.AddString(" is not a subtype of ");
type.PrintName(Object::kScrubbedName, &buffer);
break;
case CheckType::kNeedsFinalization:
buffer.AddString("is not finalized");
break;
case CheckType::kInstanceTypeArgumentsAreSubtypes:
buffer.AddString("was not finalized during class splitting");
break;
default:
// Either the CheckType was kCidCheckOnly, which should never happen
// since it only requires type finalization, or a new CheckType has
// been added.
UNREACHABLE();
break;
}
__ Comment("%s", buffer.buffer());
}
}
}
// Builds a cid range check for the concrete subclasses and implementors of
// type. Assumes cid to check is already in TTSInternalRegs::kScratchReg. Falls
// through or jumps to check_succeeded if the range contains the cid, else
// jumps to check_failed.
void TypeTestingStubGenerator::BuildOptimizedSubtypeRangeCheck(
compiler::Assembler* assembler,
const CidRangeVector& ranges,
bool smi_is_ok) {
compiler::Label cid_range_failed, is_subtype;
if (smi_is_ok) {
__ LoadClassIdMayBeSmi(TTSInternalRegs::kScratchReg,
TypeTestABI::kInstanceReg);
} else {
__ BranchIfSmi(TypeTestABI::kInstanceReg, &cid_range_failed);
__ LoadClassId(TTSInternalRegs::kScratchReg, TypeTestABI::kInstanceReg);
}
Register class_id_reg,
compiler::Label* check_succeeded,
compiler::Label* check_failed) {
CommentCheckedClasses(assembler, ranges);
FlowGraphCompiler::GenerateCidRangesCheck(
assembler, TTSInternalRegs::kScratchReg, ranges, &is_subtype,
&cid_range_failed, true);
__ Bind(&is_subtype);
__ Ret();
__ Bind(&cid_range_failed);
assembler, class_id_reg, ranges, check_succeeded, check_failed, true);
}
void TypeTestingStubGenerator::
@ -396,73 +584,343 @@ void TypeTestingStubGenerator::
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class,
const TypeArguments& ta) {
// a) First we make a quick sub*class* cid-range check.
const Class& type_class) {
ASSERT(hi->CanUseGenericSubtypeRangeCheckFor(type));
compiler::Label check_failed;
ASSERT(!type_class.is_implemented());
const CidRangeVector& ranges = hi->SubclassRangesForClass(type_class);
BuildOptimizedSubclassRangeCheck(assembler, ranges, &check_failed);
// fall through to continue
// a) First we perform subtype cid-range checks and load the instance type
// arguments based on which check succeeded.
__ LoadClassIdMayBeSmi(TTSInternalRegs::kScratchReg,
TypeTestABI::kInstanceReg);
compiler::Label load_succeeded;
if (BuildLoadInstanceTypeArguments(assembler, hi, type, type_class,
TTSInternalRegs::kScratchReg,
TTSInternalRegs::kInstanceTypeArgumentsReg,
&load_succeeded, &check_failed)) {
// Only build type argument checking if any checked cid ranges require it.
__ Bind(&load_succeeded);
// b) Then we'll load the values for the type parameters.
__ LoadCompressedFieldFromOffset(
TTSInternalRegs::kInstanceTypeArgumentsReg, TypeTestABI::kInstanceReg,
compiler::target::Class::TypeArgumentsFieldOffset(type_class));
// b) We check for "rare" types, where the instance type arguments are null.
//
// The kernel frontend should fill in any non-assigned type parameters on
// construction with dynamic/Object, so we should never get the null type
// argument vector in created instances.
//
// TODO(kustermann): We could consider not using "null" as type argument
// vector representing all-dynamic to avoid this extra check (which will be
// uncommon because most Dart code in 2.0 will be strongly typed)!
__ CompareObject(TTSInternalRegs::kInstanceTypeArgumentsReg,
Object::null_object());
const Type& rare_type = Type::Handle(Type::RawCast(type_class.RareType()));
if (rare_type.IsSubtypeOf(type, Heap::kNew)) {
compiler::Label process_done;
__ BranchIf(NOT_EQUAL, &process_done, compiler::Assembler::kNearJump);
__ Ret();
__ Bind(&process_done);
} else {
__ BranchIf(EQUAL, &check_failed);
}
// The kernel frontend should fill in any non-assigned type parameters on
// construction with dynamic/Object, so we should never get the null type
// argument vector in created instances.
//
// TODO(kustermann): We could consider not using "null" as type argument
// vector representing all-dynamic to avoid this extra check (which will be
// uncommon because most Dart code in 2.0 will be strongly typed)!
__ CompareObject(TTSInternalRegs::kInstanceTypeArgumentsReg,
Object::null_object());
const Type& rare_type = Type::Handle(Type::RawCast(type_class.RareType()));
if (rare_type.IsSubtypeOf(type, Heap::kNew)) {
compiler::Label process_done;
__ BranchIf(NOT_EQUAL, &process_done);
// c) Then we'll check each value of the type argument.
AbstractType& type_arg = AbstractType::Handle();
const TypeArguments& ta = TypeArguments::Handle(type.arguments());
const intptr_t num_type_parameters = type_class.NumTypeParameters();
const intptr_t num_type_arguments = type_class.NumTypeArguments();
ASSERT(ta.Length() >= num_type_arguments);
for (intptr_t i = 0; i < num_type_parameters; ++i) {
const intptr_t type_param_value_offset_i =
num_type_arguments - num_type_parameters + i;
type_arg = ta.TypeAt(type_param_value_offset_i);
ASSERT(type_arg.IsTypeParameter() ||
hi->CanUseSubtypeRangeCheckFor(type_arg));
BuildOptimizedTypeArgumentValueCheck(
assembler, hi, type_arg, type_param_value_offset_i, &check_failed);
}
__ Ret();
__ Bind(&process_done);
} else {
__ BranchIf(EQUAL, &check_failed);
}
// c) Then we'll check each value of the type argument.
AbstractType& type_arg = AbstractType::Handle();
const intptr_t num_type_parameters = type_class.NumTypeParameters();
const intptr_t num_type_arguments = type_class.NumTypeArguments();
for (intptr_t i = 0; i < num_type_parameters; ++i) {
const intptr_t type_param_value_offset_i =
num_type_arguments - num_type_parameters + i;
type_arg = ta.TypeAt(type_param_value_offset_i);
ASSERT(type_arg.IsTypeParameter() ||
hi->CanUseSubtypeRangeCheckFor(type_arg));
BuildOptimizedTypeArgumentValueCheck(
assembler, hi, type_arg, type_param_value_offset_i, &check_failed);
}
__ Ret();
// If anything fails.
__ Bind(&check_failed);
}
void TypeTestingStubGenerator::BuildOptimizedSubclassRangeCheck(
compiler::Assembler* assembler,
// Splits [ranges] into multiple ranges in [output], where the concrete,
// finalized classes in each range share the same type arguments field offset.
//
// The first range in [output] contains [type_class], if any do, and otherwise
// prioritizes ranges that include predefined cids before ranges that only
// contain user-defined classes.
//
// Any cids that do not have valid class table entries, correspond to abstract
// or unfinalized classes, or have no TAV field offset are treated as don't
// cares, in that the cid may appear in any of the CidRangeVectors as needed to
// reduce the number of ranges.
//
// Note that CidRangeVectors are MallocGrowableArrays, so the elements in
// output must be freed after use!
static void SplitByTypeArgumentsFieldOffset(
Thread* T,
const Class& type_class,
const CidRangeVector& ranges,
compiler::Label* check_failed) {
__ LoadClassIdMayBeSmi(TTSInternalRegs::kScratchReg,
TypeTestABI::kInstanceReg);
GrowableArray<CidRangeVector*>* output) {
ASSERT(output != nullptr);
ASSERT(!ranges.is_empty());
compiler::Label is_subtype;
FlowGraphCompiler::GenerateCidRangesCheck(
assembler, TTSInternalRegs::kScratchReg, ranges, &is_subtype,
check_failed, true);
__ Bind(&is_subtype);
Zone* const Z = T->zone();
ClassTable* const class_table = T->isolate_group()->class_table();
IntMap<CidRangeVector*> offset_map(Z);
IntMap<intptr_t> predefined_offsets(Z);
IntMap<intptr_t> user_defined_offsets(Z);
auto add_to_vector = [&](intptr_t tav_offset, const CidRange& range) {
if (range.cid_start == -1) return;
ASSERT(tav_offset != compiler::target::Class::kNoTypeArguments);
if (CidRangeVector* vector = offset_map.Lookup(tav_offset)) {
vector->Add(range);
} else {
vector = new CidRangeVector(1);
vector->Add(range);
offset_map.Insert(tav_offset, vector);
}
};
auto increment_count = [&](intptr_t cid, intptr_t tav_offset) {
if (cid <= kNumPredefinedCids) {
predefined_offsets.Update(
{tav_offset, predefined_offsets.Lookup(tav_offset) + 1});
} else if (auto* const kv = predefined_offsets.LookupPair(tav_offset)) {
predefined_offsets.Update({kv->key, kv->value + 1});
} else {
user_defined_offsets.Update(
{tav_offset, user_defined_offsets.Lookup(tav_offset) + 1});
}
};
// First populate offset_map.
auto& cls = Class::Handle(Z);
for (const auto& range : ranges) {
intptr_t last_offset = compiler::target::Class::kNoTypeArguments;
intptr_t cid_start = -1;
intptr_t cid_end = -1;
for (intptr_t cid = range.cid_start; cid <= range.cid_end; cid++) {
if (!class_table->HasValidClassAt(cid)) continue;
cls = class_table->At(cid);
if (cls.is_abstract()) continue;
// Only finalized concrete classes are present due to the conditions on
// returning kInstanceTypeArgumentsAreSubtypes in SubtypeChecksForClass.
ASSERT(cls.is_finalized());
const intptr_t tav_offset =
compiler::target::Class::TypeArgumentsFieldOffset(cls);
if (tav_offset == compiler::target::Class::kNoTypeArguments) continue;
if (tav_offset == last_offset && cid_start >= 0) {
cid_end = cid;
increment_count(cid, tav_offset);
continue;
}
add_to_vector(last_offset, {cid_start, cid_end});
last_offset = tav_offset;
cid_start = cid_end = cid;
increment_count(cid, tav_offset);
}
add_to_vector(last_offset, {cid_start, cid_end});
}
ASSERT(!offset_map.IsEmpty());
// Add the CidRangeVector for the type_class's offset, if it has one.
if (!type_class.is_abstract() && type_class.is_finalized()) {
const intptr_t type_class_offset =
compiler::target::Class::TypeArgumentsFieldOffset(type_class);
ASSERT(predefined_offsets.LookupPair(type_class_offset) != nullptr ||
user_defined_offsets.LookupPair(type_class_offset) != nullptr);
CidRangeVector* const vector = offset_map.Lookup(type_class_offset);
ASSERT(vector != nullptr);
output->Add(vector);
// Remove this CidRangeVector from consideration in the following loops.
predefined_offsets.Remove(type_class_offset);
user_defined_offsets.Remove(type_class_offset);
}
// Now add CidRangeVectors that include predefined cids.
// For now, we do this in an arbitrary order, but we could use the counts
// to prioritize offsets that are more shared if desired.
auto predefined_it = predefined_offsets.GetIterator();
while (auto* const kv = predefined_it.Next()) {
CidRangeVector* const vector = offset_map.Lookup(kv->key);
ASSERT(vector != nullptr);
output->Add(vector);
}
// Finally, add CidRangeVectors that only include user-defined cids.
// For now, we do this in an arbitrary order, but we could use the counts
// to prioritize offsets that are more shared if desired.
auto user_defined_it = user_defined_offsets.GetIterator();
while (auto* const kv = user_defined_it.Next()) {
CidRangeVector* const vector = offset_map.Lookup(kv->key);
ASSERT(vector != nullptr);
output->Add(vector);
}
ASSERT(output->length() > 0);
}
// Given [type], its type class [type_class], and a CidRangeVector [ranges],
// populates the output CidRangeVectors from cids in [ranges], based on what
// runtime checks are needed to determine whether the runtime type of
// an instance is a subtype of [type].
//
// Concrete, type finalized classes whose cids are added to [cid_check_only]
// implement a particular instantiation of [type_class] that is guaranteed to
// be a subtype of [type]. Thus, these instances do not require any checking
// of type arguments.
//
// Concrete, finalized classes whose cids are added to [type_argument_checks]
// implement a fully uninstantiated version of [type_class] that can be directly
// instantiated with the type arguments of the class's instance. Thus, each
// type argument of [type] should be checked against the corresponding
// instance type argument.
//
// Classes whose cids are in [not_checked]:
// * Instances of the class are guaranteed to not be a subtype of [type].
// * The class is not finalized.
// * The subtype relation cannot be checked with our current approach and
// thus the stub must fall back to the STC/VM runtime.
//
// Any cids that do not have valid class table entries or correspond to
// abstract classes are treated as don't cares, in that the cid may or may not
// appear as needed to reduce the number of ranges.
static void SplitOnTypeArgumentTests(HierarchyInfo* hi,
const Type& type,
const Class& type_class,
const CidRangeVector& ranges,
CidRangeVector* cid_check_only,
CidRangeVector* type_argument_checks,
CidRangeVector* not_checked) {
ASSERT(type_class.is_implemented()); // No need to split if not implemented.
ASSERT(cid_check_only->is_empty());
ASSERT(type_argument_checks->is_empty());
ASSERT(not_checked->is_empty());
ClassTable* const class_table = hi->thread()->isolate_group()->class_table();
Zone* const zone = hi->thread()->zone();
auto& to_check = Class::Handle(zone);
auto add_cid_range = [&](CheckType check, const CidRange& range) {
if (range.cid_start == -1) return;
switch (check) {
case CheckType::kCidCheckOnly:
cid_check_only->Add(range);
break;
case CheckType::kInstanceTypeArgumentsAreSubtypes:
type_argument_checks->Add(range);
break;
default:
not_checked->Add(range);
}
};
for (const auto& range : ranges) {
CheckType last_check = CheckType::kCannotBeChecked;
classid_t cid_start = -1, cid_end = -1;
for (classid_t cid = range.cid_start; cid <= range.cid_end; cid++) {
// Invalid entries can be included to keep range count low.
if (!class_table->HasValidClassAt(cid)) continue;
to_check = class_table->At(cid);
if (to_check.is_abstract()) continue;
const CheckType current_check =
SubtypeChecksForClass(zone, type, type_class, to_check);
ASSERT(current_check != CheckType::kInstanceTypeArgumentsAreSubtypes ||
to_check.is_finalized());
if (last_check == current_check && cid_start >= 0) {
cid_end = cid;
continue;
}
add_cid_range(last_check, {cid_start, cid_end});
last_check = current_check;
cid_start = cid_end = cid;
}
add_cid_range(last_check, {cid_start, cid_end});
}
}
bool TypeTestingStubGenerator::BuildLoadInstanceTypeArguments(
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class,
const Register class_id_reg,
const Register instance_type_args_reg,
compiler::Label* load_succeeded,
compiler::Label* load_failed) {
const CidRangeVector& ranges =
hi->SubtypeRangesForClass(type_class, /*include_abstract=*/false,
!Instance::NullIsAssignableTo(type));
if (!type_class.is_implemented()) {
ASSERT(type_class.is_finalized());
const intptr_t tav_offset =
compiler::target::Class::TypeArgumentsFieldOffset(type_class);
compiler::Label is_subtype;
BuildOptimizedSubtypeRangeCheck(assembler, ranges, class_id_reg,
&is_subtype, load_failed);
__ Bind(&is_subtype);
if (tav_offset != compiler::target::Class::kNoTypeArguments) {
// The class and its subclasses have trivially consistent type arguments.
__ LoadCompressedFieldFromOffset(instance_type_args_reg,
TypeTestABI::kInstanceReg, tav_offset);
return true;
} else {
// Not a generic type, so cid checks are sufficient.
__ Ret();
return false;
}
}
Thread* const T = hi->thread();
Zone* const Z = T->zone();
CidRangeVector cid_checks_only, type_argument_checks, not_checked;
SplitOnTypeArgumentTests(hi, type, type_class, ranges, &cid_checks_only,
&type_argument_checks, &not_checked);
if (!cid_checks_only.is_empty()) {
compiler::Label is_subtype, keep_looking;
compiler::Label* check_failed =
type_argument_checks.is_empty() ? load_failed : &keep_looking;
BuildOptimizedSubtypeRangeCheck(assembler, cid_checks_only, class_id_reg,
&is_subtype, check_failed);
__ Bind(&is_subtype);
__ Ret();
__ Bind(&keep_looking);
}
if (!type_argument_checks.is_empty()) {
GrowableArray<CidRangeVector*> vectors;
SplitByTypeArgumentsFieldOffset(T, type_class, type_argument_checks,
&vectors);
ASSERT(vectors.length() > 0);
ClassTable* const class_table = T->isolate_group()->class_table();
auto& cls = Class::Handle(Z);
for (intptr_t i = 0; i < vectors.length(); i++) {
CidRangeVector* const vector = vectors[i];
ASSERT(!vector->is_empty());
const intptr_t first_cid = vector->At(0).cid_start;
ASSERT(class_table->HasValidClassAt(first_cid));
cls = class_table->At(first_cid);
ASSERT(cls.is_finalized());
const intptr_t tav_offset =
compiler::target::Class::TypeArgumentsFieldOffset(cls);
compiler::Label load_tav, keep_looking;
// For the last vector, just jump to load_failed if the check fails
// and avoid emitting a jump to load_succeeded.
compiler::Label* check_failed =
i < vectors.length() - 1 ? &keep_looking : load_failed;
BuildOptimizedSubtypeRangeCheck(assembler, *vector, class_id_reg,
&load_tav, check_failed);
__ Bind(&load_tav);
__ LoadCompressedFieldFromOffset(instance_type_args_reg,
TypeTestABI::kInstanceReg, tav_offset);
if (i < vectors.length() - 1) {
__ Jump(load_succeeded);
__ Bind(&keep_looking);
}
// Free the CidRangeVector allocated by SplitByTypeArgumentsFieldOffset.
delete vector;
}
}
if (!not_checked.is_empty()) {
CommentSkippedClasses(assembler, type, type_class, not_checked);
}
return !type_argument_checks.is_empty();
}
// Generate code to verify that instance's type argument is a subtype of
@ -477,9 +935,13 @@ void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
return;
}
// If the upper bound is a type parameter and its value is "dynamic"
// we always succeed.
compiler::Label is_dynamic;
if (assembler->EmittingComments()) {
TextBuffer buffer(128);
buffer.Printf("Generating check for type argument %" Pd ": ",
type_param_value_offset_i);
type_arg.PrintName(Object::kScrubbedName, &buffer);
__ Comment("%s", buffer.buffer());
}
if (type_arg.IsTypeParameter()) {
const TypeParameter& type_param = TypeParameter::Cast(type_arg);
const Register kTypeArgumentsReg =
@ -487,9 +949,12 @@ void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
? TypeTestABI::kInstantiatorTypeArgumentsReg
: TypeTestABI::kFunctionTypeArgumentsReg;
compiler::Label is_dynamic;
__ CompareObject(kTypeArgumentsReg, Object::null_object());
__ BranchIf(EQUAL, &is_dynamic);
__ BranchIf(EQUAL, &is_dynamic, compiler::Assembler::kNearJump);
// TODO(dartbug.com/46920): Currently only canonical equality (identity)
// is checked.
__ LoadCompressedFieldFromOffset(
TTSInternalRegs::kScratchReg, kTypeArgumentsReg,
compiler::target::TypeArguments::type_at_offset(type_param.index()));
@ -499,6 +964,7 @@ void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
compiler::target::TypeArguments::type_at_offset(
type_param_value_offset_i));
__ BranchIf(NOT_EQUAL, check_failed);
__ Bind(&is_dynamic);
} else {
const Class& type_class = Class::Handle(type_arg.type_class());
const bool null_is_assignable = Instance::NullIsAssignableTo(type_arg);
@ -525,9 +991,9 @@ void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
// Never is a bottom type.
__ CompareImmediate(TTSInternalRegs::kScratchReg, kNeverCid);
__ BranchIf(EQUAL, &is_subtype);
FlowGraphCompiler::GenerateCidRangesCheck(
assembler, TTSInternalRegs::kScratchReg, ranges, &is_subtype,
check_failed, true);
BuildOptimizedSubtypeRangeCheck(assembler, ranges,
TTSInternalRegs::kScratchReg, &is_subtype,
check_failed);
__ Bind(&is_subtype);
// Weak NNBD mode uses LEGACY_SUBTYPE which ignores nullability.
@ -548,8 +1014,6 @@ void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
__ BranchIf(EQUAL, check_failed);
}
}
__ Bind(&is_dynamic);
}
void RegisterTypeArgumentsUse(const Function& function,

View file

@ -73,27 +73,29 @@ class TypeTestingStubGenerator {
static void BuildOptimizedSubtypeRangeCheck(compiler::Assembler* assembler,
const CidRangeVector& ranges,
bool smi_is_ok);
Register class_id_reg,
compiler::Label* check_succeeded,
compiler::Label* check_failed);
static void BuildOptimizedSubclassRangeCheckWithTypeArguments(
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class,
const TypeArguments& type_arguments);
const Class& type_class);
static void BuildOptimizedSubclassRangeCheckWithTypeArguments(
// Falls through or jumps to load_succeeded if load succeeds, otherwise jumps
// to load_failed. Returns from the stub for checked cid ranges which do not
// require checking the instance type arguments. Returns whether any cid
// ranges require type argument checking.
static bool BuildLoadInstanceTypeArguments(
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class,
const TypeArguments& type_arguments,
const Register class_id_reg,
const Register instance_type_args_reg);
static void BuildOptimizedSubclassRangeCheck(compiler::Assembler* assembler,
const CidRangeVector& ranges,
compiler::Label* check_failed);
const Register instance_type_args_reg,
compiler::Label* load_succeeded,
compiler::Label* load_failed);
static void BuildOptimizedTypeArgumentValueCheck(
compiler::Assembler* assembler,

View file

@ -33,23 +33,32 @@ const bool FLAG_trace_type_testing_stub_tests = false;
class TraceStubInvocationScope : public ValueObject {
public:
TraceStubInvocationScope() : old_flag_value_(FLAG_trace_type_checks) {
TraceStubInvocationScope()
: old_trace_type_checks_(FLAG_trace_type_checks),
old_disassemble_stubs_(FLAG_disassemble_stubs) {
if (FLAG_trace_type_testing_stub_tests) {
#if defined(DEBUG)
FLAG_trace_type_checks = true;
#endif
#if defined(FORCE_INCLUDE_DISASSEMBLER) || !defined(PRODUCT)
FLAG_disassemble_stubs = true;
#endif
}
}
~TraceStubInvocationScope() {
if (FLAG_trace_type_testing_stub_tests) {
#if defined(DEBUG)
FLAG_trace_type_checks = old_flag_value_;
FLAG_trace_type_checks = old_trace_type_checks_;
#endif
#if defined(FORCE_INCLUDE_DISASSEMBLER) || !defined(PRODUCT)
FLAG_disassemble_stubs = old_disassemble_stubs_;
#endif
}
}
private:
const bool old_flag_value_;
const bool old_trace_type_checks_;
const bool old_disassemble_stubs_;
};
#define __ assembler->
@ -184,7 +193,7 @@ static void CanonicalizeTAV(TypeArguments* tav) {
*tav = tav->Canonicalize(Thread::Current(), nullptr);
}
struct TTSTestCase : public ValueObject {
struct TTSTestCase {
const Instance& instance;
const TypeArguments& instantiator_tav;
const TypeArguments& function_tav;
@ -255,7 +264,7 @@ struct TTSTestCase : public ValueObject {
if (cls.NumTypeArguments() == 0) {
return true;
}
return instance.GetTypeArguments() != other.instance.GetTypeArguments();
return instance.GetTypeArguments() == other.instance.GetTypeArguments();
}
bool HasSTCEntry(const SubtypeTestCache& cache,
@ -291,6 +300,9 @@ struct TTSTestCase : public ValueObject {
Object::null_type_arguments(),
Object::null_type_arguments(), out_index, out_result);
}
private:
DISALLOW_ALLOCATION();
};
// Inherits should_specialize from original.
@ -392,8 +404,10 @@ class TTSTestState : public ValueObject {
last_tested_type_.SetTypeTestingStub(specializing_stub);
InvokeStubHelper(test_case,
/*is_lazy_specialization=*/test_case.should_specialize);
if (test_case.should_fail) {
// We only respecialize on successful checks.
if (test_case.should_fail || test_case.instance.IsNull()) {
// We only specialize if we go to runtime and the runtime check
// succeeds. The lazy specialization stub for nullable types has a
// special fast case for null that skips the runtime.
EXPECT(new_tts_stub_.ptr() == specializing_stub.ptr());
} else if (test_case.should_specialize) {
// Specializing test cases should never result in a default TTS.
@ -512,9 +526,11 @@ class TTSTestState : public ValueObject {
if (test_case.should_fail) {
EXPECT(last_result_.IsError());
EXPECT(last_result_.IsUnhandledException());
const auto& error =
Instance::Handle(UnhandledException::Cast(last_result_).exception());
EXPECT(strstr(error.ToCString(), "_TypeError"));
if (last_result_.IsUnhandledException()) {
const auto& error = Instance::Handle(
UnhandledException::Cast(last_result_).exception());
EXPECT(strstr(error.ToCString(), "_TypeError"));
}
} else {
EXPECT(new_tts_stub_.ptr() != StubCode::LazySpecializeTypeTest().ptr());
ReportModifiedRegisters(modified_abi_regs());
@ -543,6 +559,39 @@ class TTSTestState : public ValueObject {
}
}
void ReportMissingOrChangedEntries(const SubtypeTestCache& old_cache,
const SubtypeTestCache& new_cache) {
auto& cid_or_sig = Object::Handle(zone());
auto& type = AbstractType::Handle(zone());
auto& instance_type_args = TypeArguments::Handle(zone());
auto& instantiator_type_args = TypeArguments::Handle(zone());
auto& function_type_args = TypeArguments::Handle(zone());
auto& instance_parent_type_args = TypeArguments::Handle(zone());
auto& instance_delayed_type_args = TypeArguments::Handle(zone());
auto& old_result = Bool::Handle(zone());
auto& new_result = Bool::Handle(zone());
SafepointMutexLocker ml(
thread_->isolate_group()->subtype_test_cache_mutex());
for (intptr_t i = 0; i < old_cache.NumberOfChecks(); i++) {
old_cache.GetCheck(0, &cid_or_sig, &type, &instance_type_args,
&instantiator_type_args, &function_type_args,
&instance_parent_type_args,
&instance_delayed_type_args, &old_result);
intptr_t new_index;
if (!new_cache.HasCheck(
cid_or_sig, type, instance_type_args, instantiator_type_args,
function_type_args, instance_parent_type_args,
instance_delayed_type_args, &new_index, &new_result)) {
dart::Expect(__FILE__, __LINE__)
.Fail("New STC is missing check in old STC");
}
if (old_result.value() != new_result.value()) {
dart::Expect(__FILE__, __LINE__)
.Fail("New STC has different result from old STC");
}
}
}
void ReportUnexpectedSTCChanges(const TTSTestCase& test_case,
bool is_lazy_specialization = false) {
ASSERT(!test_case.should_be_false_negative ||
@ -552,47 +601,27 @@ class TTSTestState : public ValueObject {
!is_lazy_specialization && test_case.should_be_false_negative;
if (should_update_stc && !had_stc_entry) {
// We should have changed the STC to include the new entry.
EXPECT((previous_stc_.IsNull() && !last_stc_.IsNull()) ||
previous_stc_.cache() != last_stc_.cache());
// We only should have added one check.
EXPECT_EQ(previous_stc_.IsNull() ? 1 : previous_stc_.NumberOfChecks() + 1,
last_stc_.NumberOfChecks());
if (!previous_stc_.IsNull()) {
// Make sure all the checks in the previous STC are still there.
auto& cid_or_sig = Object::Handle(zone());
auto& type = AbstractType::Handle(zone());
auto& instance_type_args = TypeArguments::Handle(zone());
auto& instantiator_type_args = TypeArguments::Handle(zone());
auto& function_type_args = TypeArguments::Handle(zone());
auto& instance_parent_type_args = TypeArguments::Handle(zone());
auto& instance_delayed_type_args = TypeArguments::Handle(zone());
auto& old_result = Bool::Handle(zone());
auto& new_result = Bool::Handle(zone());
SafepointMutexLocker ml(
thread_->isolate_group()->subtype_test_cache_mutex());
for (intptr_t i = 0; i < previous_stc_.NumberOfChecks(); i++) {
previous_stc_.GetCheck(0, &cid_or_sig, &type, &instance_type_args,
&instantiator_type_args, &function_type_args,
&instance_parent_type_args,
&instance_delayed_type_args, &old_result);
intptr_t new_index;
if (!last_stc_.HasCheck(
cid_or_sig, type, instance_type_args, instantiator_type_args,
function_type_args, instance_parent_type_args,
instance_delayed_type_args, &new_index, &new_result)) {
dart::Expect(__FILE__, __LINE__)
.Fail("New STC is missing check in old STC");
}
if (old_result.value() != new_result.value()) {
dart::Expect(__FILE__, __LINE__)
.Fail("New STC has different result from old STC");
}
EXPECT(!last_stc_.IsNull());
if (!last_stc_.IsNull()) {
EXPECT(previous_stc_.IsNull() ||
previous_stc_.cache() != last_stc_.cache());
// We only should have added one check.
EXPECT_EQ(
previous_stc_.IsNull() ? 1 : previous_stc_.NumberOfChecks() + 1,
last_stc_.NumberOfChecks());
if (!previous_stc_.IsNull()) {
// Make sure all the checks in the previous STC are still there.
ReportMissingOrChangedEntries(previous_stc_, last_stc_);
}
}
} else {
// Whatever STC existed before, if any, should be unchanged.
EXPECT((previous_stc_.IsNull() && last_stc_.IsNull()) ||
previous_stc_.cache() == last_stc_.cache());
if (previous_stc_.IsNull()) {
EXPECT(last_stc_.IsNull());
} else {
EXPECT(!last_stc_.IsNull() &&
previous_stc_.cache() == last_stc_.cache());
}
}
// False negatives should always be an STC hit when not lazily
@ -602,12 +631,18 @@ class TTSTestState : public ValueObject {
if ((!should_update_stc && has_stc_entry) ||
(should_update_stc && !has_stc_entry)) {
TextBuffer buffer(128);
buffer.Printf("%s entry for %s, got:\n",
should_update_stc ? "Expected" : "Did not expect",
type_.ToCString());
for (intptr_t i = 0; i < last_stc_.NumberOfChecks(); i++) {
last_stc_.WriteCurrentEntryToBuffer(zone(), &buffer, i);
buffer.Printf(
"%s entry for %s, got:",
test_case.should_be_false_negative ? "Expected" : "Did not expect",
type_.ToCString());
if (last_stc_.IsNull()) {
buffer.AddString(" null");
} else {
buffer.AddString("\n");
for (intptr_t i = 0; i < last_stc_.NumberOfChecks(); i++) {
last_stc_.WriteCurrentEntryToBuffer(zone(), &buffer, i);
buffer.AddString("\n");
}
}
dart::Expect(__FILE__, __LINE__).Fail("%s", buffer.buffer());
}
@ -629,14 +664,34 @@ class TTSTestState : public ValueObject {
Object& last_result_;
};
// Tests three situations in turn with the same test case:
// Tests three situations in turn with the test case and with an
// appropriate null object test:
// 1) Install the lazy specialization stub for JIT and test.
// 2) Test again without installing a stub, so using the stub resulting from 1.
// 3) Install an eagerly specialized stub, similar to AOT mode but keeping any
// STC created by the earlier steps, and test.
static void RunTTSTest(const AbstractType& dst_type,
const TTSTestCase& test_case) {
bool null_should_fail = !Instance::NullIsAssignableTo(
dst_type, test_case.instantiator_tav, test_case.function_tav);
const TTSTestCase null_test(
Instance::Handle(), test_case.instantiator_tav, test_case.function_tav,
test_case.should_specialize, null_should_fail,
// Null is never a false negative.
/*should_be_false_negative=*/false,
// Since null is never a false negative, it can't trigger
// respecialization.
/*should_respecialize=*/false);
TTSTestState state(Thread::Current(), dst_type);
// First check the null case. This should _never_ create an STC.
state.InvokeLazilySpecializedStub(null_test);
state.InvokeExistingStub(null_test);
state.InvokeEagerlySpecializedStub(null_test);
EXPECT(state.last_stc().IsNull());
// Now run the actual test case.
state.InvokeLazilySpecializedStub(test_case);
state.InvokeExistingStub(test_case);
state.InvokeEagerlySpecializedStub(test_case);
@ -824,24 +879,15 @@ ISOLATE_UNIT_TEST_CASE(TTS_SubtypeRangeCheck) {
auto& type_dynamic_t =
AbstractType::Handle(Type::New(class_i, tav_dynamic_t));
FinalizeAndCanonicalize(&type_dynamic_t);
RunTTSTest(type_dynamic_t, FalseNegative({obj_i, tav_object, tav_null,
/*should_specialize=*/false}));
RunTTSTest(type_dynamic_t, Failure({obj_i2, tav_object, tav_null,
/*should_specialize=*/false}));
RunTTSTest(type_dynamic_t, Failure({obj_base_int, tav_object, tav_null,
/*should_specialize=*/false}));
RunTTSTest(type_dynamic_t, Failure({obj_a, tav_object, tav_null,
/*should_specialize=*/false}));
RunTTSTest(type_dynamic_t, Failure({obj_a1, tav_object, tav_null,
/*should_specialize=*/false}));
RunTTSTest(type_dynamic_t, FalseNegative({obj_a2, tav_object, tav_null,
/*should_specialize=*/false}));
RunTTSTest(type_dynamic_t, Failure({obj_b, tav_object, tav_null,
/*should_specialize=*/false}));
RunTTSTest(type_dynamic_t, Failure({obj_b1, tav_object, tav_null,
/*should_specialize=*/false}));
RunTTSTest(type_dynamic_t, FalseNegative({obj_b2, tav_object, tav_null,
/*should_specialize=*/false}));
RunTTSTest(type_dynamic_t, FalseNegative({obj_i, tav_object, tav_null}));
RunTTSTest(type_dynamic_t, Failure({obj_i2, tav_object, tav_null}));
RunTTSTest(type_dynamic_t, Failure({obj_base_int, tav_object, tav_null}));
RunTTSTest(type_dynamic_t, Failure({obj_a, tav_object, tav_null}));
RunTTSTest(type_dynamic_t, Failure({obj_a1, tav_object, tav_null}));
RunTTSTest(type_dynamic_t, FalseNegative({obj_a2, tav_object, tav_null}));
RunTTSTest(type_dynamic_t, Failure({obj_b, tav_object, tav_null}));
RunTTSTest(type_dynamic_t, Failure({obj_b1, tav_object, tav_null}));
RunTTSTest(type_dynamic_t, FalseNegative({obj_b2, tav_object, tav_null}));
// obj as Object (with null safety)
auto isolate_group = IsolateGroup::Current();
@ -978,8 +1024,10 @@ ISOLATE_UNIT_TEST_CASE(TTS_GenericSubtypeRangeCheck) {
RunTTSTest(type_base_b, {obj_base_int, tav_null, tav_null});
RunTTSTest(type_base_b, Failure({obj_i2, tav_null, tav_null}));
// We do not generate TTS for uninstantiated types if we would need to use
// subtype range checks for the class of the interface type.
// We generate TTS for implemented classes and uninstantiated types, but
// any class that implements the type class but does not match in both
// instance TAV offset and type argument indices is guaranteed to be a
// false negative.
//
// obj as I<dynamic, String> // I is generic & implemented.
// obj as Base<A2<T>> // A2<T> is not instantiated.
@ -993,11 +1041,9 @@ ISOLATE_UNIT_TEST_CASE(TTS_GenericSubtypeRangeCheck) {
type_i_dynamic_string = type_i_dynamic_string.ToNullability(
Nullability::kNonNullable, Heap::kNew);
FinalizeAndCanonicalize(&type_i_dynamic_string);
RunTTSTest(
type_i_dynamic_string,
FalseNegative({obj_i, tav_null, tav_null, /*should_specialize=*/false}));
RunTTSTest(type_i_dynamic_string, Failure({obj_base_int, tav_null, tav_null,
/*should_specialize=*/false}));
RunTTSTest(type_i_dynamic_string, {obj_i, tav_null, tav_null});
RunTTSTest(type_i_dynamic_string,
Failure({obj_base_int, tav_null, tav_null}));
// <...> as Base<A2<T>>
const auto& tav_t = TypeArguments::Handle(TypeArguments::New(1));
@ -1035,6 +1081,112 @@ ISOLATE_UNIT_TEST_CASE(TTS_GenericSubtypeRangeCheck) {
/*should_specialize=*/false}));
}
ISOLATE_UNIT_TEST_CASE(TTS_Generic_Implements_Instantiated_Interface) {
const char* kScript =
R"(
abstract class I<T> {}
class B<R> implements I<String> {}
createBInt() => B<int>();
)";
const auto& root_library = Library::Handle(LoadTestScript(kScript));
const auto& class_i = Class::Handle(GetClass(root_library, "I"));
const auto& obj_b_int = Object::Handle(Invoke(root_library, "createBInt"));
const auto& tav_null = Object::null_type_arguments();
auto& tav_string = TypeArguments::Handle(TypeArguments::New(1));
tav_string.SetTypeAt(0, Type::Handle(Type::StringType()));
CanonicalizeTAV(&tav_string);
auto& type_i_string = Type::Handle(Type::New(class_i, tav_string));
FinalizeAndCanonicalize(&type_i_string);
const auto& type_i_t = Type::Handle(class_i.DeclarationType());
RunTTSTest(type_i_string, {obj_b_int, tav_null, tav_null});
// Optimized TTSees don't currently handle the case where the implemented
// type is known, but the type being checked requires instantiation at
// runtime.
RunTTSTest(type_i_t, FalseNegative({obj_b_int, tav_string, tav_null}));
}
ISOLATE_UNIT_TEST_CASE(TTS_Future) {
const char* kScript =
R"(
import "dart:async";
createFutureInt() => (() async => 3)();
)";
const auto& root_library = Library::Handle(LoadTestScript(kScript));
const auto& class_future = Class::Handle(GetClass(root_library, "Future"));
const auto& obj_futureint =
Object::Handle(Invoke(root_library, "createFutureInt"));
const auto& type_nullable_object = Type::Handle(
IsolateGroup::Current()->object_store()->nullable_object_type());
const auto& type_int = Type::Handle(Type::IntType());
const auto& type_string = Type::Handle(Type::StringType());
const auto& type_num = Type::Handle(Type::Number());
const auto& tav_null = Object::null_type_arguments();
auto& tav_dynamic = TypeArguments::Handle(TypeArguments::New(1));
tav_dynamic.SetTypeAt(0, Object::dynamic_type());
CanonicalizeTAV(&tav_dynamic);
auto& tav_nullable_object = TypeArguments::Handle(TypeArguments::New(1));
tav_nullable_object.SetTypeAt(0, type_nullable_object);
CanonicalizeTAV(&tav_nullable_object);
auto& tav_int = TypeArguments::Handle(TypeArguments::New(1));
tav_int.SetTypeAt(0, type_int);
CanonicalizeTAV(&tav_int);
auto& tav_num = TypeArguments::Handle(TypeArguments::New(1));
tav_num.SetTypeAt(0, type_num);
CanonicalizeTAV(&tav_num);
auto& tav_string = TypeArguments::Handle(TypeArguments::New(1));
tav_string.SetTypeAt(0, type_string);
CanonicalizeTAV(&tav_string);
auto& type_future = Type::Handle(Type::New(class_future, tav_null));
FinalizeAndCanonicalize(&type_future);
auto& type_future_dynamic =
Type::Handle(Type::New(class_future, tav_dynamic));
FinalizeAndCanonicalize(&type_future_dynamic);
auto& type_future_nullable_object =
Type::Handle(Type::New(class_future, tav_nullable_object));
FinalizeAndCanonicalize(&type_future_nullable_object);
auto& type_future_int = Type::Handle(Type::New(class_future, tav_int));
FinalizeAndCanonicalize(&type_future_int);
auto& type_future_string = Type::Handle(Type::New(class_future, tav_string));
FinalizeAndCanonicalize(&type_future_string);
auto& type_future_num = Type::Handle(Type::New(class_future, tav_num));
FinalizeAndCanonicalize(&type_future_num);
auto& type_future_t = Type::Handle(class_future.DeclarationType());
// Some more tests of generic implemented classes, using Future. Here,
// obj is an object of type Future<int>.
//
// obj as Future : Null type args (caught by TTS)
// obj as Future<dynamic> : Canonicalized to same as previous case.
// obj as Future<Object?> : Type arg is top type (caught by TTS)
// obj as Future<int> : Type arg is the same type (caught by TTS)
// obj as Future<String> : Type arg is not a subtype (error via runtime)
// obj as Future<num> : Type arg is a supertype that can be matched
// with cid range (caught by TTS)
// obj as Future<X>, : Type arg is a type parameter instantiated with
// X = int : ... the same type (caught by TTS)
// X = String : ... an unrelated type (error via runtime)
// X = num : ... a supertype (caught by STC/runtime)
RunTTSTest(type_future, {obj_futureint, tav_null, tav_null});
RunTTSTest(type_future_dynamic, {obj_futureint, tav_null, tav_null});
RunTTSTest(type_future_nullable_object, {obj_futureint, tav_null, tav_null});
RunTTSTest(type_future_int, {obj_futureint, tav_null, tav_null});
RunTTSTest(type_future_string, Failure({obj_futureint, tav_null, tav_null}));
RunTTSTest(type_future_num, {obj_futureint, tav_null, tav_null});
RunTTSTest(type_future_t, {obj_futureint, tav_int, tav_null});
RunTTSTest(type_future_t, Failure({obj_futureint, tav_string, tav_null}));
RunTTSTest(type_future_t, FalseNegative({obj_futureint, tav_num, tav_null}));
}
ISOLATE_UNIT_TEST_CASE(TTS_Regress40964) {
const char* kScript =
R"(
@ -1122,15 +1274,91 @@ ISOLATE_UNIT_TEST_CASE(TTS_TypeParameter) {
// Check that we generate correct TTS for _Smi type.
ISOLATE_UNIT_TEST_CASE(TTS_Smi) {
const auto& root_library = Library::Handle(Library::CoreLibrary());
const auto& smi_class = Class::Handle(GetClass(root_library, "_Smi"));
ClassFinalizer::FinalizeTypesInClass(smi_class);
const auto& type_smi = Type::Handle(Type::SmiType());
const auto& tav_null = Object::null_type_arguments();
const auto& dst_type = AbstractType::Handle(smi_class.RareType());
const auto& tav_null = TypeArguments::Handle(TypeArguments::null());
// Test on some easy-to-make instances.
RunTTSTest(type_smi, {Smi::Handle(Smi::New(0)), tav_null, tav_null});
RunTTSTest(type_smi, Failure({Integer::Handle(Integer::New(kMaxInt64)),
tav_null, tav_null}));
RunTTSTest(type_smi,
Failure({Double::Handle(Double::New(1.0)), tav_null, tav_null}));
RunTTSTest(type_smi, Failure({Symbols::Empty(), tav_null, tav_null}));
RunTTSTest(type_smi,
Failure({Array::Handle(Array::New(1)), tav_null, tav_null}));
}
THR_Print("\nTesting that instance of _Smi is a subtype of _Smi\n");
RunTTSTest(dst_type, {Smi::Handle(Smi::New(0)), tav_null, tav_null});
// Check that we generate correct TTS for int type.
ISOLATE_UNIT_TEST_CASE(TTS_Int) {
const auto& type_int = Type::Handle(Type::IntType());
const auto& tav_null = Object::null_type_arguments();
// Test on some easy-to-make instances.
RunTTSTest(type_int, {Smi::Handle(Smi::New(0)), tav_null, tav_null});
RunTTSTest(type_int,
{Integer::Handle(Integer::New(kMaxInt64)), tav_null, tav_null});
RunTTSTest(type_int,
Failure({Double::Handle(Double::New(1.0)), tav_null, tav_null}));
RunTTSTest(type_int, Failure({Symbols::Empty(), tav_null, tav_null}));
RunTTSTest(type_int,
Failure({Array::Handle(Array::New(1)), tav_null, tav_null}));
}
// Check that we generate correct TTS for num type.
ISOLATE_UNIT_TEST_CASE(TTS_Num) {
const auto& type_num = Type::Handle(Type::Number());
const auto& tav_null = Object::null_type_arguments();
// Test on some easy-to-make instances.
RunTTSTest(type_num, {Smi::Handle(Smi::New(0)), tav_null, tav_null});
RunTTSTest(type_num,
{Integer::Handle(Integer::New(kMaxInt64)), tav_null, tav_null});
RunTTSTest(type_num, {Double::Handle(Double::New(1.0)), tav_null, tav_null});
RunTTSTest(type_num, Failure({Symbols::Empty(), tav_null, tav_null}));
RunTTSTest(type_num,
Failure({Array::Handle(Array::New(1)), tav_null, tav_null}));
}
// Check that we generate correct TTS for Double type.
ISOLATE_UNIT_TEST_CASE(TTS_Double) {
const auto& type_num = Type::Handle(Type::Double());
const auto& tav_null = Object::null_type_arguments();
// Test on some easy-to-make instances.
RunTTSTest(type_num, Failure({Smi::Handle(Smi::New(0)), tav_null, tav_null}));
RunTTSTest(type_num, Failure({Integer::Handle(Integer::New(kMaxInt64)),
tav_null, tav_null}));
RunTTSTest(type_num, {Double::Handle(Double::New(1.0)), tav_null, tav_null});
RunTTSTest(type_num, Failure({Symbols::Empty(), tav_null, tav_null}));
RunTTSTest(type_num,
Failure({Array::Handle(Array::New(1)), tav_null, tav_null}));
}
// Check that we generate correct TTS for Object type.
ISOLATE_UNIT_TEST_CASE(TTS_Object) {
const auto& type_obj =
Type::Handle(IsolateGroup::Current()->object_store()->object_type());
const auto& tav_null = Object::null_type_arguments();
auto make_test_case = [&](const Instance& instance) -> TTSTestCase {
if (IsolateGroup::Current()->use_strict_null_safety_checks()) {
// The stub for non-nullable object should specialize, but only fails
// on null, which is already checked within RunTTSTest.
return {instance, tav_null, tav_null};
} else {
// The default type testing stub for nullable object is the top type
// stub, so it should neither specialize _or_ return false negatives.
return {instance, tav_null, tav_null, /*should_specialize=*/false};
}
};
// Test on some easy-to-make instances.
RunTTSTest(type_obj, make_test_case(Smi::Handle(Smi::New(0))));
RunTTSTest(type_obj,
make_test_case(Integer::Handle(Integer::New(kMaxInt64))));
RunTTSTest(type_obj, make_test_case(Double::Handle(Double::New(1.0))));
RunTTSTest(type_obj, make_test_case(Symbols::Empty()));
RunTTSTest(type_obj, make_test_case(Array::Handle(Array::New(1))));
}
// Check that we generate correct TTS for type Function (the non-FunctionType
@ -1152,7 +1380,6 @@ ISOLATE_UNIT_TEST_CASE(TTS_Function) {
const auto& obj_f = Object::Handle(Invoke(root_library, "createF"));
const auto& obj_g = Object::Handle(Invoke(root_library, "createG"));
const auto& obj_h = Object::Handle(Invoke(root_library, "createH"));
const auto& obj_null = Instance::Handle();
const auto& tav_null = TypeArguments::Handle(TypeArguments::null());
const auto& type_function = Type::Handle(Type::DartFunctionType());
@ -1160,11 +1387,6 @@ ISOLATE_UNIT_TEST_CASE(TTS_Function) {
RunTTSTest(type_function, {obj_f, tav_null, tav_null});
RunTTSTest(type_function, {obj_g, tav_null, tav_null});
RunTTSTest(type_function, {obj_h, tav_null, tav_null});
if (!thread->isolate_group()->use_strict_null_safety_checks()) {
RunTTSTest(type_function, {obj_null, tav_null, tav_null});
} else {
RunTTSTest(type_function, Failure({obj_null, tav_null, tav_null}));
}
const auto& class_a = Class::Handle(GetClass(root_library, "A"));
const auto& obj_a_int = Object::Handle(Invoke(root_library, "createAInt"));