Revert "Revert "Revert "Recognize and optimize a.runtimeType == b.runtimeType pattern."""

This reverts commit e495e100dd.

Commit breaks bots that are using Android devices (SIMARM is unaffected).

R=rmacnak@google.com
BUG=

Review URL: https://codereview.chromium.org/2449013004 .
This commit is contained in:
Vyacheslav Egorov 2016-10-26 16:53:32 +02:00
parent 4e7a31262e
commit 16e4f2f1c2
27 changed files with 65 additions and 1109 deletions

View file

@ -6,6 +6,8 @@ class _Double implements double {
factory _Double.fromInteger(int value)
native "Double_doubleFromInteger";
Type get runtimeType => double;
// TODO: Make a stared static method for hashCode and _identityHashCode
// when semantics are corrected as described in:
// https://github.com/dart-lang/sdk/issues/2884

View file

@ -6,6 +6,8 @@ abstract class _IntegerImplementation {
// The Dart class _Bigint extending _IntegerImplementation requires a
// default constructor.
Type get runtimeType => int;
num operator +(num other) {
var result = other._addFromInteger(this);
if (result != null) return result;

View file

@ -116,54 +116,12 @@ DEFINE_NATIVE_ENTRY(Object_noSuchMethod, 6) {
DEFINE_NATIVE_ENTRY(Object_runtimeType, 1) {
const Instance& instance = Instance::CheckedHandle(arguments->NativeArgAt(0));
if (instance.IsString()) {
return Type::StringType();
} else if (instance.IsInteger()) {
return Type::IntType();
} else if (instance.IsDouble()) {
return Type::Double();
}
// Special handling for following types outside this native.
ASSERT(!instance.IsString() && !instance.IsInteger() && !instance.IsDouble());
return instance.GetType();
}
DEFINE_NATIVE_ENTRY(Object_haveSameRuntimeType, 2) {
const Instance& left = Instance::CheckedHandle(arguments->NativeArgAt(0));
const Instance& right = Instance::CheckedHandle(arguments->NativeArgAt(1));
const intptr_t left_cid = left.GetClassId();
const intptr_t right_cid = right.GetClassId();
if (left_cid != right_cid) {
if (RawObject::IsIntegerClassId(left_cid)) {
return Bool::Get(RawObject::IsIntegerClassId(right_cid)).raw();
} else if (RawObject::IsStringClassId(right_cid)) {
return Bool::Get(RawObject::IsStringClassId(right_cid)).raw();
} else {
return Bool::False().raw();
}
}
const Class& cls = Class::Handle(left.clazz());
if (cls.IsClosureClass()) {
// TODO(vegorov): provide faster implementation for closure classes.
const AbstractType& left_type = AbstractType::Handle(left.GetType());
const AbstractType& right_type = AbstractType::Handle(right.GetType());
return Bool::Get(left_type.raw() == right_type.raw()).raw();
}
if (!cls.IsGeneric()) {
return Bool::True().raw();
}
const TypeArguments& left_type_arguments =
TypeArguments::Handle(left.GetTypeArguments());
const TypeArguments& right_type_arguments =
TypeArguments::Handle(right.GetTypeArguments());
return Bool::Get(left_type_arguments.Equals(right_type_arguments)).raw();
}
DEFINE_NATIVE_ENTRY(Object_instanceOf, 4) {
const Instance& instance =
Instance::CheckedHandle(zone, arguments->NativeArgAt(0));

View file

@ -53,8 +53,6 @@
@patch Type get runtimeType native "Object_runtimeType";
static bool _haveSameRuntimeType(a, b) native "Object_haveSameRuntimeType";
// Call this function instead of inlining instanceof, thus collecting
// type feedback and reducing code size of unoptimized code.
bool _instanceOf(instantiator_type_arguments, type, bool negate)

View file

@ -83,6 +83,8 @@ abstract class _StringBase {
"_StringBase can't be instaniated");
}
Type get runtimeType => String;
int get hashCode native "String_getHashCode";
bool get _isOneByte {

View file

@ -72,12 +72,10 @@ static void GetUniqueDynamicTarget(Isolate* isolate,
}
AotOptimizer::AotOptimizer(Precompiler* precompiler,
FlowGraph* flow_graph,
AotOptimizer::AotOptimizer(FlowGraph* flow_graph,
bool use_speculative_inlining,
GrowableArray<intptr_t>* inlining_black_list)
: FlowGraphVisitor(flow_graph->reverse_postorder()),
precompiler_(precompiler),
flow_graph_(flow_graph),
use_speculative_inlining_(use_speculative_inlining),
inlining_black_list_(inlining_black_list),
@ -126,47 +124,6 @@ void AotOptimizer::PopulateWithICData() {
}
bool AotOptimizer::RecognizeRuntimeTypeGetter(InstanceCallInstr* call) {
if ((precompiler_ == NULL) || !precompiler_->get_runtime_type_is_unique()) {
return false;
}
if (call->function_name().raw() != Symbols::GetRuntimeType().raw()) {
return false;
}
// There is only a single function Object.get:runtimeType that can be invoked
// by this call. Convert dynamic invocation to a static one.
const Class& cls = Class::Handle(Z, I->object_store()->object_class());
const Array& args_desc_array = Array::Handle(Z,
ArgumentsDescriptor::New(call->ArgumentCount(),
call->argument_names()));
ArgumentsDescriptor args_desc(args_desc_array);
const Function& function = Function::Handle(Z,
Resolver::ResolveDynamicForReceiverClass(
cls,
call->function_name(),
args_desc));
ASSERT(!function.IsNull());
ZoneGrowableArray<PushArgumentInstr*>* args =
new (Z) ZoneGrowableArray<PushArgumentInstr*>(
call->ArgumentCount());
for (intptr_t i = 0; i < call->ArgumentCount(); i++) {
args->Add(call->PushArgumentAt(i));
}
StaticCallInstr* static_call = new (Z) StaticCallInstr(
call->token_pos(),
Function::ZoneHandle(Z, function.raw()),
call->argument_names(),
args,
call->deopt_id());
static_call->set_result_cid(kTypeCid);
call->ReplaceWith(static_call, current_iterator());
return true;
}
// Optimize instance calls using cid. This is called after optimizer
// converted instance calls to instructions. Any remaining
// instance calls are either megamorphic calls, cannot be optimized or
@ -672,58 +629,6 @@ bool AotOptimizer::TryStringLengthOneEquality(InstanceCallInstr* call,
static bool SmiFitsInDouble() { return kSmiBits < 53; }
static bool IsGetRuntimeType(Definition* defn) {
StaticCallInstr* call = defn->AsStaticCall();
return (call != NULL) &&
(call->function().recognized_kind() ==
MethodRecognizer::kObjectRuntimeType);
}
// Recognize a.runtimeType == b.runtimeType and fold it into
// Object._haveSameRuntimeType(a, b).
// Note: this optimization is not speculative.
bool AotOptimizer::TryReplaceWithHaveSameRuntimeType(InstanceCallInstr* call) {
const ICData& ic_data = *call->ic_data();
ASSERT(ic_data.NumArgsTested() == 2);
ASSERT(call->ArgumentCount() == 2);
Definition* left = call->ArgumentAt(0);
Definition* right = call->ArgumentAt(1);
if (IsGetRuntimeType(left) && left->input_use_list()->IsSingleUse() &&
IsGetRuntimeType(right) && right->input_use_list()->IsSingleUse()) {
const Class& cls = Class::Handle(Z, I->object_store()->object_class());
const Function& have_same_runtime_type = Function::ZoneHandle(Z,
cls.LookupStaticFunctionAllowPrivate(Symbols::HaveSameRuntimeType()));
ASSERT(!have_same_runtime_type.IsNull());
ZoneGrowableArray<PushArgumentInstr*>* args =
new (Z) ZoneGrowableArray<PushArgumentInstr*>(2);
PushArgumentInstr* arg = new (Z) PushArgumentInstr(
new (Z) Value(left->ArgumentAt(0)));
InsertBefore(call, arg, NULL, FlowGraph::kEffect);
args->Add(arg);
arg = new (Z) PushArgumentInstr(
new (Z) Value(right->ArgumentAt(0)));
InsertBefore(call, arg, NULL, FlowGraph::kEffect);
args->Add(arg);
StaticCallInstr* static_call = new (Z) StaticCallInstr(
call->token_pos(),
have_same_runtime_type,
Object::null_array(), // argument_names
args,
call->deopt_id());
static_call->set_result_cid(kBoolCid);
ReplaceCall(call, static_call);
return true;
}
return false;
}
bool AotOptimizer::TryReplaceWithEqualityOp(InstanceCallInstr* call,
Token::Kind op_kind) {
const ICData& ic_data = *call->ic_data();
@ -735,7 +640,11 @@ bool AotOptimizer::TryReplaceWithEqualityOp(InstanceCallInstr* call,
intptr_t cid = kIllegalCid;
if (HasOnlyTwoOf(ic_data, kOneByteStringCid)) {
return TryStringLengthOneEquality(call, op_kind);
if (TryStringLengthOneEquality(call, op_kind)) {
return true;
} else {
return false;
}
} else if (HasOnlyTwoOf(ic_data, kSmiCid)) {
InsertBefore(call,
new(Z) CheckSmiInstr(new(Z) Value(left),
@ -801,8 +710,6 @@ bool AotOptimizer::TryReplaceWithEqualityOp(InstanceCallInstr* call,
cid = kSmiCid;
} else {
// Shortcut for equality with null.
// TODO(vegorov): this optimization is not speculative and should
// be hoisted out of this function.
ConstantInstr* right_const = right->AsConstant();
ConstantInstr* left_const = left->AsConstant();
if ((right_const != NULL && right_const->value().IsNull()) ||
@ -1850,14 +1757,6 @@ void AotOptimizer::VisitInstanceCall(InstanceCallInstr* instr) {
return;
}
if (RecognizeRuntimeTypeGetter(instr)) {
return;
}
if ((op_kind == Token::kEQ) && TryReplaceWithHaveSameRuntimeType(instr)) {
return;
}
const ICData& unary_checks =
ICData::ZoneHandle(Z, instr->ic_data()->AsUnaryClassChecks());
if (IsAllowedForInlining(instr->deopt_id()) &&

View file

@ -13,16 +13,13 @@ namespace dart {
class CSEInstructionMap;
template <typename T> class GrowableArray;
class ParsedFunction;
class Precompiler;
class RawBool;
class AotOptimizer : public FlowGraphVisitor {
public:
AotOptimizer(Precompiler* precompiler,
FlowGraph* flow_graph,
AotOptimizer(FlowGraph* flow_graph,
bool use_speculative_inlining,
GrowableArray<intptr_t>* inlining_black_list);
virtual ~AotOptimizer() {}
FlowGraph* flow_graph() const { return flow_graph_; }
@ -104,9 +101,6 @@ class AotOptimizer : public FlowGraphVisitor {
void ReplaceCall(Definition* call, Definition* replacement);
bool RecognizeRuntimeTypeGetter(InstanceCallInstr* call);
bool TryReplaceWithHaveSameRuntimeType(InstanceCallInstr* call);
bool InstanceCallNeedsClassCheck(InstanceCallInstr* call,
RawFunction::Kind kind) const;
@ -137,7 +131,6 @@ class AotOptimizer : public FlowGraphVisitor {
bool IsAllowedForInlining(intptr_t deopt_id);
Precompiler* precompiler_;
FlowGraph* flow_graph_;
const bool use_speculative_inlining_;

View file

@ -20,7 +20,6 @@ namespace dart {
V(Object_toString, 1) \
V(Object_noSuchMethod, 6) \
V(Object_runtimeType, 1) \
V(Object_haveSameRuntimeType, 2) \
V(Object_instanceOf, 4) \
V(Object_simpleInstanceOf, 2) \
V(Object_instanceOfNum, 2) \

View file

@ -855,8 +855,7 @@ bool CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
&inline_id_to_token_pos,
&caller_inline_id,
use_speculative_inlining,
/*inlining_black_list=*/ NULL,
/*precompiler=*/ NULL);
NULL);
inliner.Inline();
// Use lists are maintained and validated by the inliner.
DEBUG_ASSERT(flow_graph->VerifyUseLists());
@ -1460,8 +1459,7 @@ RawError* Compiler::CompileFunction(Thread* thread,
const Function& function) {
#ifdef DART_PRECOMPILER
if (FLAG_precompiled_mode) {
return Precompiler::CompileFunction(
/* precompiler = */ NULL, thread, thread->zone(), function);
return Precompiler::CompileFunction(thread, thread->zone(), function);
}
#endif
Isolate* isolate = thread->isolate();

View file

@ -84,14 +84,6 @@ void FlowGraph::ReplaceCurrentInstruction(ForwardInstructionIterator* iterator,
THR_Print("Removing v%" Pd ".\n", current_defn->ssa_temp_index());
}
}
if (current->ArgumentCount() != 0) {
// This is a call instruction. Must remove original push arguments.
for (intptr_t i = 0; i < current->ArgumentCount(); ++i) {
PushArgumentInstr* push = current->PushArgumentAt(i);
push->ReplaceUsesWith(push->value()->definition());
push->RemoveFromGraph();
}
}
iterator->RemoveCurrentFromGraph();
}

View file

@ -848,8 +848,7 @@ class CallSiteInliner : public ValueObject {
// Deopt-ids overlap between caller and callee.
if (FLAG_precompiled_mode) {
#ifdef DART_PRECOMPILER
AotOptimizer optimizer(inliner_->precompiler_,
callee_graph,
AotOptimizer optimizer(callee_graph,
inliner_->use_speculative_inlining_,
inliner_->inlining_black_list_);
optimizer.PopulateWithICData();
@ -1900,16 +1899,14 @@ FlowGraphInliner::FlowGraphInliner(
GrowableArray<TokenPosition>* inline_id_to_token_pos,
GrowableArray<intptr_t>* caller_inline_id,
bool use_speculative_inlining,
GrowableArray<intptr_t>* inlining_black_list,
Precompiler* precompiler)
GrowableArray<intptr_t>* inlining_black_list)
: flow_graph_(flow_graph),
inline_id_to_function_(inline_id_to_function),
inline_id_to_token_pos_(inline_id_to_token_pos),
caller_inline_id_(caller_inline_id),
trace_inlining_(ShouldTraceInlining(flow_graph)),
use_speculative_inlining_(use_speculative_inlining),
inlining_black_list_(inlining_black_list),
precompiler_(precompiler) {
inlining_black_list_(inlining_black_list) {
ASSERT(!use_speculative_inlining || (inlining_black_list != NULL));
}
@ -3813,36 +3810,6 @@ bool FlowGraphInliner::TryInlineRecognizedMethod(FlowGraph* flow_graph,
return false;
}
case MethodRecognizer::kObjectRuntimeType: {
Type& type = Type::ZoneHandle(Z);
if (RawObject::IsStringClassId(receiver_cid)) {
type = Type::StringType();
} else if (receiver_cid == kDoubleCid) {
type = Type::Double();
} else if (RawObject::IsIntegerClassId(receiver_cid)) {
type = Type::IntType();
} else if (receiver_cid != kClosureCid) {
const Class& cls = Class::Handle(Z,
flow_graph->isolate()->class_table()->At(receiver_cid));
if (!cls.IsGeneric()) {
type = cls.CanonicalType();
}
}
if (!type.IsNull()) {
*entry = new(Z) TargetEntryInstr(flow_graph->allocate_block_id(),
call->GetBlock()->try_index());
(*entry)->InheritDeoptTarget(Z, call);
*last = new(Z) ConstantInstr(type);
flow_graph->AppendTo(*entry, *last,
call->deopt_id() != Thread::kNoDeoptId ?
call->env() : NULL,
FlowGraph::kValue);
return true;
}
return false;
}
case MethodRecognizer::kOneByteStringSetAt: {
// This is an internal method, no need to check argument types nor
// range.

View file

@ -19,7 +19,6 @@ class InstanceCallInstr;
class Instruction;
class StaticCallInstr;
class TargetEntryInstr;
class Precompiler;
class FlowGraphInliner : ValueObject {
public:
@ -28,8 +27,7 @@ class FlowGraphInliner : ValueObject {
GrowableArray<TokenPosition>* inline_id_to_token_pos,
GrowableArray<intptr_t>* caller_inline_id,
bool use_speculative_inlining,
GrowableArray<intptr_t>* inlining_black_list,
Precompiler* precompiler);
GrowableArray<intptr_t>* inlining_black_list);
// The flow graph is destructively updated upon inlining.
void Inline();
@ -79,7 +77,6 @@ class FlowGraphInliner : ValueObject {
const bool trace_inlining_;
const bool use_speculative_inlining_;
GrowableArray<intptr_t>* inlining_black_list_;
Precompiler* precompiler_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphInliner);
};

View file

@ -3231,67 +3231,6 @@ void PolymorphicInstanceCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
#endif
RawType* PolymorphicInstanceCallInstr::ComputeRuntimeType(
const ICData& ic_data) {
bool is_string = true;
bool is_integer = true;
bool is_double = true;
const intptr_t num_checks = ic_data.NumberOfChecks();
for (intptr_t i = 0; i < num_checks; i++) {
const intptr_t cid = ic_data.GetReceiverClassIdAt(i);
is_string = is_string && RawObject::IsStringClassId(cid);
is_integer = is_integer && RawObject::IsIntegerClassId(cid);
is_double = is_double && (cid == kDoubleCid);
}
if (is_string) {
return Type::StringType();
} else if (is_integer) {
return Type::IntType();
} else if (is_double) {
return Type::Double();
}
return Type::null();
}
Definition* PolymorphicInstanceCallInstr::Canonicalize(FlowGraph* flow_graph) {
if (!HasSingleRecognizedTarget() || with_checks()) {
return this;
}
const Function& target = Function::Handle(ic_data().GetTargetAt(0));
if (target.recognized_kind() == MethodRecognizer::kObjectRuntimeType) {
const AbstractType& type =
AbstractType::Handle(ComputeRuntimeType(ic_data()));
if (!type.IsNull()) {
return flow_graph->GetConstant(type);
}
}
return this;
}
Definition* StaticCallInstr::Canonicalize(FlowGraph* flow_graph) {
if (!FLAG_precompiled_mode) {
return this;
}
if (function().recognized_kind() == MethodRecognizer::kObjectRuntimeType) {
if (input_use_list() == NULL) {
// This function has only environment uses. In precompiled mode it is
// fine to remove it - because we will never deoptimize.
return flow_graph->constant_dead();
}
}
return this;
}
LocationSummary* StaticCallInstr::MakeLocationSummary(Zone* zone,
bool optimizing) const {
return MakeCallSummary(zone);

View file

@ -2965,10 +2965,6 @@ class PolymorphicInstanceCallInstr : public TemplateDefinition<0, Throws> {
virtual EffectSet Effects() const { return EffectSet::All(); }
virtual Definition* Canonicalize(FlowGraph* graph);
static RawType* ComputeRuntimeType(const ICData& ic_data);
PRINT_OPERANDS_TO_SUPPORT
private:
@ -3328,7 +3324,6 @@ class StaticCallInstr : public TemplateDefinition<0, Throws> {
DECLARE_INSTRUCTION(StaticCall)
virtual CompileType ComputeType() const;
virtual Definition* Canonicalize(FlowGraph* flow_graph);
// Accessors forwarded to the AST node.
const Function& function() const { return function_; }

View file

@ -1579,103 +1579,16 @@ void Intrinsifier::ObjectEquals(Assembler* assembler) {
}
static void RangeCheck(Assembler* assembler,
Register val,
Register tmp,
intptr_t low,
intptr_t high,
Condition cc,
Label* target) {
__ AddImmediate(tmp, val, -low);
__ CompareImmediate(tmp, high - low);
__ b(target, cc);
}
const Condition kIfNotInRange = HI;
const Condition kIfInRange = LS;
static void JumpIfInteger(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfInRange, target);
}
static void JumpIfNotInteger(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target);
}
static void JumpIfString(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler,
cid,
tmp,
kOneByteStringCid,
kExternalTwoByteStringCid,
kIfInRange,
target);
}
static void JumpIfNotString(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler,
cid,
tmp,
kOneByteStringCid,
kExternalTwoByteStringCid,
kIfNotInRange,
target);
}
// Return type quickly for simple types (not parameterized and not signature).
void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
Label fall_through, use_canonical_type, not_double, not_integer;
Label fall_through;
__ ldr(R0, Address(SP, 0 * kWordSize));
__ LoadClassIdMayBeSmi(R1, R0);
__ CompareImmediate(R1, kClosureCid);
__ b(&fall_through, EQ); // Instance is a closure.
__ CompareImmediate(R1, kNumPredefinedCids);
__ b(&use_canonical_type, HI);
__ CompareImmediate(R1, kDoubleCid);
__ b(&not_double, NE);
__ LoadIsolate(R0);
__ LoadFromOffset(kWord, R0, R0, Isolate::object_store_offset());
__ LoadFromOffset(kWord, R0, R0, ObjectStore::double_type_offset());
__ Ret();
__ Bind(&not_double);
JumpIfNotInteger(assembler, R1, R0, &not_integer);
__ LoadIsolate(R0);
__ LoadFromOffset(kWord, R0, R0, Isolate::object_store_offset());
__ LoadFromOffset(kWord, R0, R0, ObjectStore::int_type_offset());
__ Ret();
__ Bind(&not_integer);
JumpIfNotString(assembler, R1, R0, &use_canonical_type);
__ LoadIsolate(R0);
__ LoadFromOffset(kWord, R0, R0, Isolate::object_store_offset());
__ LoadFromOffset(kWord, R0, R0, ObjectStore::string_type_offset());
__ Ret();
__ Bind(&use_canonical_type);
__ LoadClassById(R2, R1);
// R2: class of instance (R0).
__ ldrh(R3, FieldAddress(R2, Class::num_type_arguments_offset()));
__ CompareImmediate(R3, 0);
__ b(&fall_through, NE);
@ -1689,61 +1602,6 @@ void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
}
void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) {
Label fall_through, different_cids, equal, not_equal, not_integer;
__ ldr(R0, Address(SP, 0 * kWordSize));
__ LoadClassIdMayBeSmi(R1, R0);
// Check if left hand size is a closure. Closures are handled in the runtime.
__ CompareImmediate(R1, kClosureCid);
__ b(&fall_through, EQ);
__ ldr(R0, Address(SP, 1 * kWordSize));
__ LoadClassIdMayBeSmi(R2, R0);
// Check whether class ids match. If class ids don't match objects can still
// have the same runtime type (e.g. multiple string implementation classes
// map to a single String type).
__ cmp(R1, Operand(R2));
__ b(&different_cids, NE);
// Objects have the same class and neither is a closure.
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(R3, R1);
__ ldrh(R3, FieldAddress(R3, Class::num_type_arguments_offset()));
__ CompareImmediate(R3, 0);
__ b(&fall_through, NE);
__ Bind(&equal);
__ LoadObject(R0, Bool::True());
__ Ret();
// Class ids are different. Check if we are comparing runtime types of
// two strings (with different representations) or two integers.
__ Bind(&different_cids);
__ CompareImmediate(R1, kNumPredefinedCids);
__ b(&not_equal, HI);
// Check if both are integers.
JumpIfNotInteger(assembler, R1, R0, &not_integer);
JumpIfInteger(assembler, R2, R0, &equal);
__ b(&not_equal);
__ Bind(&not_integer);
// Check if both are strings.
JumpIfNotString(assembler, R1, R0, &not_equal);
JumpIfString(assembler, R2, R0, &equal);
// Neither strings nor integers and have different class ids.
__ Bind(&not_equal);
__ LoadObject(R0, Bool::False());
__ Ret();
__ Bind(&fall_through);
}
void Intrinsifier::String_getHashCode(Assembler* assembler) {
__ ldr(R0, Address(SP, 0 * kWordSize));
__ ldr(R0, FieldAddress(R0, String::hash_offset()));

View file

@ -1653,103 +1653,16 @@ void Intrinsifier::ObjectEquals(Assembler* assembler) {
}
static void RangeCheck(Assembler* assembler,
Register val,
Register tmp,
intptr_t low,
intptr_t high,
Condition cc,
Label* target) {
__ AddImmediate(tmp, val, -low);
__ CompareImmediate(tmp, high - low);
__ b(target, cc);
}
const Condition kIfNotInRange = HI;
const Condition kIfInRange = LS;
static void JumpIfInteger(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfInRange, target);
}
static void JumpIfNotInteger(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target);
}
static void JumpIfString(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler,
cid,
tmp,
kOneByteStringCid,
kExternalTwoByteStringCid,
kIfInRange,
target);
}
static void JumpIfNotString(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler,
cid,
tmp,
kOneByteStringCid,
kExternalTwoByteStringCid,
kIfNotInRange,
target);
}
// Return type quickly for simple types (not parameterized and not signature).
void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
Label fall_through, use_canonical_type, not_double, not_integer;
Label fall_through;
__ ldr(R0, Address(SP, 0 * kWordSize));
__ LoadClassIdMayBeSmi(R1, R0);
__ CompareImmediate(R1, kClosureCid);
__ b(&fall_through, EQ); // Instance is a closure.
__ CompareImmediate(R1, kNumPredefinedCids);
__ b(&use_canonical_type, HI);
__ CompareImmediate(R1, kDoubleCid);
__ b(&not_double, NE);
__ LoadIsolate(R0);
__ LoadFromOffset(R0, R0, Isolate::object_store_offset());
__ LoadFromOffset(R0, R0, ObjectStore::double_type_offset());
__ ret();
__ Bind(&not_double);
JumpIfNotInteger(assembler, R1, R0, &not_integer);
__ LoadIsolate(R0);
__ LoadFromOffset(R0, R0, Isolate::object_store_offset());
__ LoadFromOffset(R0, R0, ObjectStore::int_type_offset());
__ ret();
__ Bind(&not_integer);
JumpIfNotString(assembler, R1, R0, &use_canonical_type);
__ LoadIsolate(R0);
__ LoadFromOffset(R0, R0, Isolate::object_store_offset());
__ LoadFromOffset(R0, R0, ObjectStore::string_type_offset());
__ ret();
__ Bind(&use_canonical_type);
__ LoadClassById(R2, R1);
// R2: class of instance (R0).
__ ldr(R3, FieldAddress(R2, Class::num_type_arguments_offset()), kHalfword);
__ CompareImmediate(R3, 0);
__ b(&fall_through, NE);
@ -1763,61 +1676,6 @@ void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
}
void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) {
Label fall_through, different_cids, equal, not_equal, not_integer;
__ ldr(R0, Address(SP, 0 * kWordSize));
__ LoadClassIdMayBeSmi(R1, R0);
// Check if left hand size is a closure. Closures are handled in the runtime.
__ CompareImmediate(R1, kClosureCid);
__ b(&fall_through, EQ);
__ ldr(R0, Address(SP, 1 * kWordSize));
__ LoadClassIdMayBeSmi(R2, R0);
// Check whether class ids match. If class ids don't match objects can still
// have the same runtime type (e.g. multiple string implementation classes
// map to a single String type).
__ cmp(R1, Operand(R2));
__ b(&different_cids, NE);
// Objects have the same class and neither is a closure.
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(R3, R1);
__ ldr(R3, FieldAddress(R3, Class::num_type_arguments_offset()), kHalfword);
__ CompareImmediate(R3, 0);
__ b(&fall_through, NE);
__ Bind(&equal);
__ LoadObject(R0, Bool::True());
__ ret();
// Class ids are different. Check if we are comparing runtime types of
// two strings (with different representations) or two integers.
__ Bind(&different_cids);
__ CompareImmediate(R1, kNumPredefinedCids);
__ b(&not_equal, HI);
// Check if both are integers.
JumpIfNotInteger(assembler, R1, R0, &not_integer);
JumpIfInteger(assembler, R2, R0, &equal);
__ b(&not_equal);
__ Bind(&not_integer);
// Check if both are strings.
JumpIfNotString(assembler, R1, R0, &not_equal);
JumpIfString(assembler, R2, R0, &equal);
// Neither strings nor integers and have different class ids.
__ Bind(&not_equal);
__ LoadObject(R0, Bool::False());
__ ret();
__ Bind(&fall_through);
}
void Intrinsifier::String_getHashCode(Assembler* assembler) {
Label fall_through;
__ ldr(R0, Address(SP, 0 * kWordSize));

View file

@ -1709,105 +1709,16 @@ void Intrinsifier::ObjectEquals(Assembler* assembler) {
}
static void RangeCheck(Assembler* assembler,
Register reg,
intptr_t low,
intptr_t high,
Condition cc,
Label* target) {
__ subl(reg, Immediate(low));
__ cmpl(reg, Immediate(high - low));
__ j(cc, target);
}
const Condition kIfNotInRange = ABOVE;
const Condition kIfInRange = BELOW_EQUAL;
static void JumpIfInteger(Assembler* assembler,
Register cid,
Label* target) {
RangeCheck(assembler, cid, kSmiCid, kBigintCid, kIfInRange, target);
}
static void JumpIfNotInteger(Assembler* assembler,
Register cid,
Label* target) {
RangeCheck(assembler, cid, kSmiCid, kBigintCid, kIfNotInRange, target);
}
static void JumpIfString(Assembler* assembler,
Register cid,
Label* target) {
RangeCheck(assembler,
cid,
kOneByteStringCid,
kExternalTwoByteStringCid,
kIfInRange,
target);
}
static void JumpIfNotString(Assembler* assembler,
Register cid,
Label* target) {
RangeCheck(assembler,
cid,
kOneByteStringCid,
kExternalTwoByteStringCid,
kIfNotInRange,
target);
}
// Return type quickly for simple types (not parameterized and not signature).
void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
Label fall_through, use_canonical_type, not_double, not_integer;
Label fall_through;
__ movl(EAX, Address(ESP, + 1 * kWordSize));
__ LoadClassIdMayBeSmi(EDI, EAX);
__ cmpl(EDI, Immediate(kClosureCid));
__ j(EQUAL, &fall_through); // Instance is a closure.
__ cmpl(EDI, Immediate(kNumPredefinedCids));
__ j(ABOVE, &use_canonical_type);
// If object is a instance of _Double return double type.
__ cmpl(EDI, Immediate(kDoubleCid));
__ j(NOT_EQUAL, &not_double);
__ LoadIsolate(EAX);
__ movl(EAX, Address(EAX, Isolate::object_store_offset()));
__ movl(EAX, Address(EAX, ObjectStore::double_type_offset()));
__ ret();
__ Bind(&not_double);
// If object is an integer (smi, mint or bigint) return int type.
__ movl(EAX, EDI);
JumpIfNotInteger(assembler, EAX, &not_integer);
__ LoadIsolate(EAX);
__ movl(EAX, Address(EAX, Isolate::object_store_offset()));
__ movl(EAX, Address(EAX, ObjectStore::int_type_offset()));
__ ret();
__ Bind(&not_integer);
// If object is a string (one byte, two byte or external variants) return
// string type.
__ movl(EAX, EDI);
JumpIfNotString(assembler, EAX, &use_canonical_type);
__ LoadIsolate(EAX);
__ movl(EAX, Address(EAX, Isolate::object_store_offset()));
__ movl(EAX, Address(EAX, ObjectStore::string_type_offset()));
__ ret();
// Object is neither double, nor integer, nor string.
__ Bind(&use_canonical_type);
__ j(EQUAL, &fall_through, Assembler::kNearJump); // Instance is a closure.
__ LoadClassById(EBX, EDI);
// EBX: class of instance (EAX).
__ movzxw(EDI, FieldAddress(EBX, Class::num_type_arguments_offset()));
__ cmpl(EDI, Immediate(0));
__ j(NOT_EQUAL, &fall_through, Assembler::kNearJump);
@ -1820,70 +1731,6 @@ void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
}
void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) {
Label fall_through, different_cids, equal, not_equal, not_integer;
__ movl(EAX, Address(ESP, + 1 * kWordSize));
__ LoadClassIdMayBeSmi(EDI, EAX);
// Check if left hand size is a closure. Closures are handled in the runtime.
__ cmpl(EDI, Immediate(kClosureCid));
__ j(EQUAL, &fall_through);
__ movl(EAX, Address(ESP, + 2 * kWordSize));
__ LoadClassIdMayBeSmi(EBX, EAX);
// Check whether class ids match. If class ids don't match objects can still
// have the same runtime type (e.g. multiple string implementation classes
// map to a single String type).
__ cmpl(EDI, EBX);
__ j(NOT_EQUAL, &different_cids);
// Objects have the same class and neither is a closure.
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(EBX, EDI);
__ movzxw(EBX, FieldAddress(EBX, Class::num_type_arguments_offset()));
__ cmpl(EBX, Immediate(0));
__ j(NOT_EQUAL, &fall_through, Assembler::kNearJump);
__ Bind(&equal);
__ LoadObject(EAX, Bool::True());
__ ret();
// Class ids are different. Check if we are comparing runtime types of
// two strings (with different representations) or two integers.
__ Bind(&different_cids);
__ cmpl(EDI, Immediate(kNumPredefinedCids));
__ j(ABOVE_EQUAL, &not_equal);
__ movl(EAX, EDI);
JumpIfNotInteger(assembler, EAX, &not_integer);
// First object is an integer. Check if the second is an integer too.
// Otherwise types are unequal because only integers have the same runtime
// type as other integers.
JumpIfInteger(assembler, EBX, &equal);
__ jmp(&not_equal);
__ Bind(&not_integer);
// Check if the first object is a string. If it is not then
// objects don't have the same runtime type because they have
// different class ids and they are not strings or integers.
JumpIfNotString(assembler, EDI, &not_equal);
// First object is a string. Check if the second is a string too.
JumpIfString(assembler, EBX, &equal);
// Strings only have the same runtime type as other strings.
// Fall-through to the not equal case.
__ Bind(&not_equal);
__ LoadObject(EAX, Bool::False());
__ ret();
__ Bind(&fall_through);
}
void Intrinsifier::String_getHashCode(Assembler* assembler) {
Label fall_through;
__ movl(EAX, Address(ESP, + 1 * kWordSize)); // String object.

View file

@ -1689,109 +1689,15 @@ void Intrinsifier::ObjectEquals(Assembler* assembler) {
}
enum RangeCheckCondition {
kIfNotInRange, kIfInRange
};
static void RangeCheck(Assembler* assembler,
Register val,
Register tmp,
intptr_t low,
intptr_t high,
RangeCheckCondition cc,
Label* target) {
__ AddImmediate(tmp, val, -low);
if (cc == kIfInRange) {
__ BranchUnsignedLessEqual(tmp, Immediate(high - low), target);
} else {
ASSERT(cc == kIfNotInRange);
__ BranchUnsignedGreater(tmp, Immediate(high - low), target);
}
}
static void JumpIfInteger(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfInRange, target);
}
static void JumpIfNotInteger(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler, cid, tmp, kSmiCid, kBigintCid, kIfNotInRange, target);
}
static void JumpIfString(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler,
cid,
tmp,
kOneByteStringCid,
kExternalTwoByteStringCid,
kIfInRange,
target);
}
static void JumpIfNotString(Assembler* assembler,
Register cid,
Register tmp,
Label* target) {
RangeCheck(assembler,
cid,
tmp,
kOneByteStringCid,
kExternalTwoByteStringCid,
kIfNotInRange,
target);
}
// Return type quickly for simple types (not parameterized and not signature).
void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
Label fall_through, use_canonical_type, not_integer, not_double;
Label fall_through;
__ lw(T0, Address(SP, 0 * kWordSize));
__ LoadClassIdMayBeSmi(T1, T0);
// Closures are handled in the runtime.
__ BranchEqual(T1, Immediate(kClosureCid), &fall_through);
__ BranchUnsignedGreaterEqual(
T1, Immediate(kNumPredefinedCids), &use_canonical_type);
__ BranchNotEqual(T1, Immediate(kDoubleCid), &not_double);
// Object is a double.
__ LoadIsolate(T1);
__ LoadFromOffset(T1, T1, Isolate::object_store_offset());
__ LoadFromOffset(V0, T1, ObjectStore::double_type_offset());
__ Ret();
__ Bind(&not_double);
JumpIfNotInteger(assembler, T1, T2, &not_integer);
// Object is an integer.
__ LoadIsolate(T1);
__ LoadFromOffset(T1, T1, Isolate::object_store_offset());
__ LoadFromOffset(V0, T1, ObjectStore::int_type_offset());
__ Ret();
__ Bind(&not_integer);
JumpIfNotString(assembler, T1, T2, &use_canonical_type);
// Object is a string.
__ LoadIsolate(T1);
__ LoadFromOffset(T1, T1, Isolate::object_store_offset());
__ LoadFromOffset(V0, T1, ObjectStore::string_type_offset());
__ Ret();
__ Bind(&use_canonical_type);
__ LoadClassById(T2, T1);
// T2: class of instance (T0).
__ lhu(T1, FieldAddress(T2, Class::num_type_arguments_offset()));
__ BranchNotEqual(T1, Immediate(0), &fall_through);
@ -1803,59 +1709,6 @@ void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
}
void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) {
Label fall_through, different_cids, equal, not_equal, not_integer;
__ lw(T0, Address(SP, 0 * kWordSize));
__ LoadClassIdMayBeSmi(T1, T0);
// Closures are handled in the runtime.
__ BranchEqual(T1, Immediate(kClosureCid), &fall_through);
__ lw(T0, Address(SP, 1 * kWordSize));
__ LoadClassIdMayBeSmi(T2, T0);
// Check whether class ids match. If class ids don't match objects can still
// have the same runtime type (e.g. multiple string implementation classes
// map to a single String type).
__ BranchNotEqual(T1, T2, &different_cids);
// Objects have the same class and neither is a closure.
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(T2, T1);
__ lhu(T1, FieldAddress(T2, Class::num_type_arguments_offset()));
__ BranchNotEqual(T1, Immediate(0), &fall_through);
__ Bind(&equal);
__ LoadObject(V0, Bool::True());
__ Ret();
// Class ids are different. Check if we are comparing runtime types of
// two strings (with different representations) or two integers.
__ Bind(&different_cids);
__ BranchUnsignedGreaterEqual(
T1, Immediate(kNumPredefinedCids), &not_equal);
// Check if both are integers.
JumpIfNotInteger(assembler, T1, T0, &not_integer);
JumpIfInteger(assembler, T2, T0, &equal);
__ b(&not_equal);
__ Bind(&not_integer);
// Check if both are strings.
JumpIfNotString(assembler, T1, T0, &not_equal);
JumpIfString(assembler, T2, T0, &equal);
// Neither strings nor integers and have different class ids.
__ Bind(&not_equal);
__ LoadObject(V0, Bool::False());
__ Ret();
__ Bind(&fall_through);
}
void Intrinsifier::String_getHashCode(Assembler* assembler) {
Label fall_through;
__ lw(T0, Address(SP, 0 * kWordSize));

View file

@ -1573,106 +1573,19 @@ void Intrinsifier::ObjectEquals(Assembler* assembler) {
}
static void RangeCheck(Assembler* assembler,
Register reg,
intptr_t low,
intptr_t high,
Condition cc,
Label* target) {
__ subq(reg, Immediate(low));
__ cmpq(reg, Immediate(high - low));
__ j(cc, target);
}
const Condition kIfNotInRange = ABOVE;
const Condition kIfInRange = BELOW_EQUAL;
static void JumpIfInteger(Assembler* assembler,
Register cid,
Label* target) {
RangeCheck(assembler, cid, kSmiCid, kBigintCid, kIfInRange, target);
}
static void JumpIfNotInteger(Assembler* assembler,
Register cid,
Label* target) {
RangeCheck(assembler, cid, kSmiCid, kBigintCid, kIfNotInRange, target);
}
static void JumpIfString(Assembler* assembler,
Register cid,
Label* target) {
RangeCheck(assembler,
cid,
kOneByteStringCid,
kExternalTwoByteStringCid,
kIfInRange,
target);
}
static void JumpIfNotString(Assembler* assembler,
Register cid,
Label* target) {
RangeCheck(assembler,
cid,
kOneByteStringCid,
kExternalTwoByteStringCid,
kIfNotInRange,
target);
}
// Return type quickly for simple types (not parameterized and not signature).
void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
Label fall_through, use_canonical_type, not_integer, not_double;
Label fall_through;
__ movq(RAX, Address(RSP, + 1 * kWordSize));
__ LoadClassIdMayBeSmi(RCX, RAX);
// RCX: untagged cid of instance (RAX).
__ cmpq(RCX, Immediate(kClosureCid));
__ j(EQUAL, &fall_through); // Instance is a closure.
__ j(EQUAL, &fall_through, Assembler::kNearJump); // Instance is a closure.
__ cmpl(RCX, Immediate(kNumPredefinedCids));
__ j(ABOVE, &use_canonical_type);
// If object is a instance of _Double return double type.
__ cmpl(RCX, Immediate(kDoubleCid));
__ j(NOT_EQUAL, &not_double);
__ LoadIsolate(RAX);
__ movq(RAX, Address(RAX, Isolate::object_store_offset()));
__ movq(RAX, Address(RAX, ObjectStore::double_type_offset()));
__ ret();
__ Bind(&not_double);
// If object is an integer (smi, mint or bigint) return int type.
__ movl(RAX, RCX);
JumpIfNotInteger(assembler, RAX, &not_integer);
__ LoadIsolate(RAX);
__ movq(RAX, Address(RAX, Isolate::object_store_offset()));
__ movq(RAX, Address(RAX, ObjectStore::int_type_offset()));
__ ret();
__ Bind(&not_integer);
// If object is a string (one byte, two byte or external variants) return
// string type.
__ movq(RAX, RCX);
JumpIfNotString(assembler, RAX, &use_canonical_type);
__ LoadIsolate(RAX);
__ movq(RAX, Address(RAX, Isolate::object_store_offset()));
__ movq(RAX, Address(RAX, ObjectStore::string_type_offset()));
__ ret();
// Object is neither double, nor integer, nor string.
__ Bind(&use_canonical_type);
__ LoadClassById(RDI, RCX);
// RDI: class of instance (RAX).
__ movzxw(RCX, FieldAddress(RDI, Class::num_type_arguments_offset()));
__ cmpq(RCX, Immediate(0));
__ j(NOT_EQUAL, &fall_through, Assembler::kNearJump);
@ -1685,70 +1598,6 @@ void Intrinsifier::ObjectRuntimeType(Assembler* assembler) {
}
void Intrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler) {
Label fall_through, different_cids, equal, not_equal, not_integer;
__ movq(RAX, Address(RSP, + 1 * kWordSize));
__ LoadClassIdMayBeSmi(RCX, RAX);
// Check if left hand size is a closure. Closures are handled in the runtime.
__ cmpq(RCX, Immediate(kClosureCid));
__ j(EQUAL, &fall_through);
__ movq(RAX, Address(RSP, + 2 * kWordSize));
__ LoadClassIdMayBeSmi(RDX, RAX);
// Check whether class ids match. If class ids don't match objects can still
// have the same runtime type (e.g. multiple string implementation classes
// map to a single String type).
__ cmpq(RCX, RDX);
__ j(NOT_EQUAL, &different_cids);
// Objects have the same class and neither is a closure.
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(RDI, RCX);
__ movzxw(RCX, FieldAddress(RDI, Class::num_type_arguments_offset()));
__ cmpq(RCX, Immediate(0));
__ j(NOT_EQUAL, &fall_through, Assembler::kNearJump);
__ Bind(&equal);
__ LoadObject(RAX, Bool::True());
__ ret();
// Class ids are different. Check if we are comparing runtime types of
// two strings (with different representations) or two integers.
__ Bind(&different_cids);
__ cmpq(RCX, Immediate(kNumPredefinedCids));
__ j(ABOVE_EQUAL, &not_equal);
__ movq(RAX, RCX);
JumpIfNotInteger(assembler, RAX, &not_integer);
// First object is an integer. Check if the second is an integer too.
// Otherwise types are unequal because only integers have the same runtime
// type as other integers.
JumpIfInteger(assembler, RDX, &equal);
__ jmp(&not_equal);
__ Bind(&not_integer);
// Check if the first object is a string. If it is not then
// objects don't have the same runtime type because they have
// different class ids and they are not strings or integers.
JumpIfNotString(assembler, RCX, &not_equal);
// First object is a string. Check if the second is a string too.
JumpIfString(assembler, RDX, &equal);
// Strings only have the same runtime type as other strings.
// Fall-through to the not equal case.
__ Bind(&not_equal);
__ LoadObject(RAX, Bool::False());
__ ret();
__ Bind(&fall_through);
}
void Intrinsifier::String_getHashCode(Assembler* assembler) {
Label fall_through;
__ movq(RAX, Address(RSP, + 1 * kWordSize)); // String object.

View file

@ -224,9 +224,6 @@ class Isolate : public BaseIsolate {
ObjectStore* object_store() const { return object_store_; }
void set_object_store(ObjectStore* value) { object_store_ = value; }
static intptr_t object_store_offset() {
return OFFSET_OF(Isolate, object_store_);
}
ApiState* api_state() const { return api_state_; }
void set_api_state(ApiState* value) { api_state_ = value; }

View file

@ -1707,20 +1707,13 @@ void JitOptimizer::VisitInstanceCall(InstanceCallInstr* instr) {
// we don't have one target.
const Function& target =
Function::Handle(Z, unary_checks.GetTargetAt(0));
if (target.recognized_kind() == MethodRecognizer::kObjectRuntimeType) {
has_one_target =
PolymorphicInstanceCallInstr::ComputeRuntimeType(unary_checks) !=
Type::null();
} else {
const bool polymorphic_target =
MethodRecognizer::PolymorphicTarget(target);
has_one_target = !polymorphic_target;
}
const bool polymorphic_target = MethodRecognizer::PolymorphicTarget(target);
has_one_target = !polymorphic_target;
}
if (has_one_target) {
const Function& target = Function::Handle(Z, unary_checks.GetTargetAt(0));
const RawFunction::Kind function_kind = target.kind();
RawFunction::Kind function_kind =
Function::Handle(Z, unary_checks.GetTargetAt(0)).kind();
if (!flow_graph()->InstanceCallNeedsClassCheck(instr, function_kind)) {
PolymorphicInstanceCallInstr* call =
new(Z) PolymorphicInstanceCallInstr(instr, unary_checks,

View file

@ -167,7 +167,6 @@ namespace dart {
V(_RegExp, _ExecuteMatch, RegExp_ExecuteMatch, Dynamic, 0x6036d7fa) \
V(Object, ==, ObjectEquals, Bool, 0x11662ed8) \
V(Object, get:runtimeType, ObjectRuntimeType, Type, 0x00e7c26b) \
V(Object, _haveSameRuntimeType, ObjectHaveSameRuntimeType, Bool, 0x72aad7e2) \
V(_StringBase, get:hashCode, String_getHashCode, Smi, 0x78c2eb88) \
V(_StringBase, get:isEmpty, StringBaseIsEmpty, Bool, 0x74c21fca) \
V(_StringBase, _substringMatches, StringBaseSubstringMatches, Bool, \
@ -490,7 +489,6 @@ namespace dart {
V(_TypedList, _setFloat64, ByteArrayBaseSetFloat64, 0x4765edda) \
V(_TypedList, _setFloat32x4, ByteArrayBaseSetFloat32x4, 0x7cca4533) \
V(_TypedList, _setInt32x4, ByteArrayBaseSetInt32x4, 0x7631bdbc) \
V(Object, get:runtimeType, ObjectRuntimeType, 0x00e7c26b) \
// Forward declarations.
class Function;

View file

@ -72,12 +72,12 @@ DECLARE_FLAG(bool, trace_reload);
DECLARE_FLAG(bool, write_protect_code);
DECLARE_FLAG(bool, support_externalizable_strings);
static const char* const kGetterPrefix = "get:";
static const intptr_t kGetterPrefixLength = strlen(kGetterPrefix);
static const char* const kSetterPrefix = "set:";
static const intptr_t kSetterPrefixLength = strlen(kSetterPrefix);
// A cache of VM heap allocated preinitialized empty ic data entry arrays.
RawArray* ICData::cached_icdata_arrays_[kCachedICDataArrayCount];

View file

@ -81,9 +81,6 @@ class ObjectStore {
void set_int_type(const Type& value) {
int_type_ = value.raw();
}
static intptr_t int_type_offset() {
return OFFSET_OF(ObjectStore, int_type_);
}
RawClass* integer_implementation_class() const {
return integer_implementation_class_;
@ -96,16 +93,14 @@ class ObjectStore {
void set_smi_class(const Class& value) { smi_class_ = value.raw(); }
RawType* smi_type() const { return smi_type_; }
void set_smi_type(const Type& value) { smi_type_ = value.raw(); }
void set_smi_type(const Type& value) { smi_type_ = value.raw();
}
RawClass* double_class() const { return double_class_; }
void set_double_class(const Class& value) { double_class_ = value.raw(); }
RawType* double_type() const { return double_type_; }
void set_double_type(const Type& value) { double_type_ = value.raw(); }
static intptr_t double_type_offset() {
return OFFSET_OF(ObjectStore, double_type_);
}
RawClass* mint_class() const { return mint_class_; }
void set_mint_class(const Class& value) { mint_class_ = value.raw(); }
@ -120,9 +115,6 @@ class ObjectStore {
void set_string_type(const Type& value) {
string_type_ = value.raw();
}
static intptr_t string_type_offset() {
return OFFSET_OF(ObjectStore, string_type_);
}
RawClass* compiletime_error_class() const {
return compiletime_error_class_;

View file

@ -152,11 +152,9 @@ class DartPrecompilationPipeline : public DartCompilationPipeline {
class PrecompileParsedFunctionHelper : public ValueObject {
public:
PrecompileParsedFunctionHelper(Precompiler* precompiler,
ParsedFunction* parsed_function,
PrecompileParsedFunctionHelper(ParsedFunction* parsed_function,
bool optimized)
: precompiler_(precompiler),
parsed_function_(parsed_function),
: parsed_function_(parsed_function),
optimized_(optimized),
thread_(Thread::Current()) {
}
@ -173,7 +171,6 @@ class PrecompileParsedFunctionHelper : public ValueObject {
FlowGraphCompiler* graph_compiler,
FlowGraph* flow_graph);
Precompiler* precompiler_;
ParsedFunction* parsed_function_;
const bool optimized_;
Thread* const thread_;
@ -317,8 +314,7 @@ Precompiler::Precompiler(Thread* thread, bool reset_fields) :
types_to_retain_(),
consts_to_retain_(),
field_type_map_(),
error_(Error::Handle()),
get_runtime_type_is_unique_(false) {
error_(Error::Handle()) {
}
@ -487,8 +483,9 @@ void Precompiler::PrecompileStaticInitializers() {
void Precompiler::PrecompileConstructors() {
class ConstructorVisitor : public FunctionVisitor {
public:
explicit ConstructorVisitor(Precompiler* precompiler, Zone* zone)
: precompiler_(precompiler), zone_(zone) {
explicit ConstructorVisitor(Zone* zone, FieldTypeMap* map)
: zone_(zone), field_type_map_(map) {
ASSERT(map != NULL);
}
void Visit(const Function& function) {
if (!function.IsGenerativeConstructor()) return;
@ -500,20 +497,18 @@ void Precompiler::PrecompileConstructors() {
if (FLAG_trace_precompiler) {
THR_Print("Precompiling constructor %s\n", function.ToCString());
}
CompileFunction(precompiler_,
Thread::Current(),
CompileFunction(Thread::Current(),
zone_,
function,
precompiler_->field_type_map());
field_type_map_);
}
private:
Precompiler* precompiler_;
Zone* zone_;
FieldTypeMap* field_type_map_;
};
HANDLESCOPE(T);
ConstructorVisitor visitor(this, zone_);
ConstructorVisitor visitor(zone_, &field_type_map_);
VisitFunctions(&visitor);
FieldTypeMap::Iterator it(field_type_map_.GetIterator());
@ -801,7 +796,7 @@ void Precompiler::ProcessFunction(const Function& function) {
ASSERT(!function.is_abstract());
ASSERT(!function.IsRedirectingFactory());
error_ = CompileFunction(this, thread_, zone_, function);
error_ = CompileFunction(thread_, zone_, function);
if (!error_.IsNull()) {
Jump(error_);
}
@ -1139,8 +1134,7 @@ RawFunction* Precompiler::CompileStaticInitializer(const Field& field,
parsed_function->AllocateVariables();
DartPrecompilationPipeline pipeline(zone.GetZone());
PrecompileParsedFunctionHelper helper(/* precompiler = */ NULL,
parsed_function,
PrecompileParsedFunctionHelper helper(parsed_function,
/* optimized = */ true);
bool success = helper.Compile(&pipeline);
ASSERT(success);
@ -1241,8 +1235,7 @@ RawObject* Precompiler::ExecuteOnce(SequenceNode* fragment) {
// Non-optimized code generator.
DartPrecompilationPipeline pipeline(Thread::Current()->zone());
PrecompileParsedFunctionHelper helper(/* precompiler = */ NULL,
parsed_function,
PrecompileParsedFunctionHelper helper(parsed_function,
/* optimized = */ false);
helper.Compile(&pipeline);
Code::Handle(func.unoptimized_code()).set_var_descriptors(
@ -1518,10 +1511,6 @@ void Precompiler::CollectDynamicFunctionNames() {
}
}
farray ^= table.GetOrNull(Symbols::GetRuntimeType());
get_runtime_type_is_unique_ = !farray.IsNull() && (farray.Length() == 1);
if (FLAG_print_unique_targets) {
UniqueFunctionsSet::Iterator unique_iter(&functions_set);
while (unique_iter.MoveNext()) {
@ -2830,8 +2819,7 @@ bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
caller_inline_id.Add(-1);
CSTAT_TIMER_SCOPE(thread(), graphoptimizer_timer);
AotOptimizer optimizer(precompiler_,
flow_graph,
AotOptimizer optimizer(flow_graph,
use_speculative_inlining,
&inlining_black_list);
optimizer.PopulateWithICData();
@ -2873,8 +2861,7 @@ bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
&inline_id_to_token_pos,
&caller_inline_id,
use_speculative_inlining,
&inlining_black_list,
precompiler_);
&inlining_black_list);
inliner.Inline();
// Use lists are maintained and validated by the inliner.
DEBUG_ASSERT(flow_graph->VerifyUseLists());
@ -3253,8 +3240,7 @@ bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
}
static RawError* PrecompileFunctionHelper(Precompiler* precompiler,
CompilationPipeline* pipeline,
static RawError* PrecompileFunctionHelper(CompilationPipeline* pipeline,
const Function& function,
bool optimized) {
// Check that we optimize, except if the function is not optimizable.
@ -3296,8 +3282,7 @@ static RawError* PrecompileFunctionHelper(Precompiler* precompiler,
num_tokens_after - num_tokens_before);
}
PrecompileParsedFunctionHelper helper(
precompiler, parsed_function, optimized);
PrecompileParsedFunctionHelper helper(parsed_function, optimized);
const bool success = helper.Compile(pipeline);
if (!success) {
// Encountered error.
@ -3345,8 +3330,7 @@ static RawError* PrecompileFunctionHelper(Precompiler* precompiler,
}
RawError* Precompiler::CompileFunction(Precompiler* precompiler,
Thread* thread,
RawError* Precompiler::CompileFunction(Thread* thread,
Zone* zone,
const Function& function,
FieldTypeMap* field_type_map) {
@ -3356,7 +3340,7 @@ RawError* Precompiler::CompileFunction(Precompiler* precompiler,
ASSERT(FLAG_precompiled_mode);
const bool optimized = function.IsOptimizable(); // False for natives.
DartPrecompilationPipeline pipeline(zone, field_type_map);
return PrecompileFunctionHelper(precompiler, &pipeline, function, optimized);
return PrecompileFunctionHelper(&pipeline, function, optimized);
}
#endif // DART_PRECOMPILER

View file

@ -356,8 +356,7 @@ class Precompiler : public ValueObject {
Dart_QualifiedFunctionName embedder_entry_points[],
bool reset_fields);
static RawError* CompileFunction(Precompiler* precompiler,
Thread* thread,
static RawError* CompileFunction(Thread* thread,
Zone* zone,
const Function& function,
FieldTypeMap* field_type_map = NULL);
@ -368,15 +367,6 @@ class Precompiler : public ValueObject {
static RawFunction* CompileStaticInitializer(const Field& field,
bool compute_type);
// Returns true if get:runtimeType is not overloaded by any class.
bool get_runtime_type_is_unique() const {
return get_runtime_type_is_unique_;
}
FieldTypeMap* field_type_map() {
return &field_type_map_;
}
private:
Precompiler(Thread* thread, bool reset_fields);
@ -476,8 +466,6 @@ class Precompiler : public ValueObject {
InstanceSet consts_to_retain_;
FieldTypeMap field_type_map_;
Error& error_;
bool get_runtime_type_is_unique_;
};

View file

@ -403,8 +403,6 @@ class ObjectPointerVisitor;
V(_name, "_name") \
V(_classRangeCheck, "_classRangeCheck") \
V(_classRangeCheckNegative, "_classRangeCheckNegative") \
V(GetRuntimeType, "get:runtimeType") \
V(HaveSameRuntimeType, "_haveSameRuntimeType") \
// Contains a list of frequently used strings in a canonicalized form. This