Reapply r18377 it was reverted due to the unrelated bug it surfaced.

Remove SminessPropagator and FlowGraphTypePropagator and all associated infrastructure and fields.

Replace multiple fields (result_cid_, propagated_cid_, propagated_type_, reaching_cid_) with a single field of type CompileType which represents an element of type analysis lattice and incorporates information about: value's nullability, concrete class id and abstract super type. This ensures that propagated cid and type are always in sync and complement each other

Implement a new FlowGraphPropagator that propagates types over the CompileType-lattice.

R=fschneider@google.com

Review URL: https://codereview.chromium.org//12260008

git-svn-id: https://dart.googlecode.com/svn/branches/bleeding_edge/dart@18456 260f80e4-7a28-3924-810f-c04153c831b5
This commit is contained in:
vegorov@google.com 2013-02-13 17:16:35 +00:00
parent d6e257abd7
commit baa0347253
17 changed files with 1189 additions and 1552 deletions

View file

@ -21,6 +21,7 @@
#include "vm/flow_graph_compiler.h"
#include "vm/flow_graph_inliner.h"
#include "vm/flow_graph_optimizer.h"
#include "vm/flow_graph_type_propagator.h"
#include "vm/il_printer.h"
#include "vm/longjump.h"
#include "vm/object.h"
@ -53,7 +54,7 @@ DEFINE_FLAG(bool, verify_compiler, false,
DECLARE_FLAG(bool, print_flow_graph);
DECLARE_FLAG(bool, print_flow_graph_optimized);
DECLARE_FLAG(bool, trace_failed_optimization_attempts);
DECLARE_FLAG(bool, trace_type_propagation);
// Compile a function. Should call only if the function has not been compiled.
// Arg0: function object.
@ -190,15 +191,25 @@ static bool CompileParsedFunctionHelper(const ParsedFunction& parsed_function,
// Use lists are maintained and validated by the inliner.
}
if (FLAG_trace_type_propagation) {
OS::Print("Before type propagation:\n");
FlowGraphPrinter printer(*flow_graph);
printer.PrintBlocks();
}
// Propagate types and eliminate more type tests.
if (FLAG_propagate_types) {
FlowGraphTypePropagator propagator(flow_graph);
propagator.PropagateTypes();
propagator.Propagate();
}
if (FLAG_trace_type_propagation) {
OS::Print("After type propagation:\n");
FlowGraphPrinter printer(*flow_graph);
printer.PrintBlocks();
}
// Propagate sminess from CheckSmi to phis.
flow_graph->ComputeUseLists();
optimizer.PropagateSminess();
// Use propagated class-ids to optimize further.
optimizer.ApplyClassIds();

View file

@ -762,12 +762,7 @@ bool EffectGraphVisitor::CanSkipTypeCheck(intptr_t token_pos,
return false;
}
// Propagated types are not set yet.
// More checks will possibly be eliminated during type propagation.
bool is_null, is_instance;
const bool eliminated =
(value->CanComputeIsNull(&is_null) && is_null) ||
(value->CanComputeIsInstanceOf(dst_type, &is_instance) && is_instance);
const bool eliminated = value->Type()->IsAssignableTo(dst_type);
if (FLAG_trace_type_check_elimination) {
FlowGraphPrinter::PrintTypeCheck(owner()->parsed_function(),
token_pos,

View file

@ -24,6 +24,7 @@ DEFINE_FLAG(bool, unbox_mints, true, "Optimize 64-bit integer arithmetic.");
DECLARE_FLAG(int, optimization_counter_threshold);
DECLARE_FLAG(bool, print_ast);
DECLARE_FLAG(bool, print_scopes);
DECLARE_FLAG(bool, eliminate_type_checks);
FlowGraphCompiler::~FlowGraphCompiler() {
@ -561,6 +562,16 @@ void FlowGraphCompiler::GenerateAssertAssignable(intptr_t token_pos,
__ cmpl(EAX, raw_null);
__ j(EQUAL, &is_assignable);
if (!FLAG_eliminate_type_checks) {
// If type checks are not eliminated during the graph building then
// a transition sentinel can be seen here.
const Immediate& raw_transition_sentinel =
Immediate(reinterpret_cast<intptr_t>(
Object::transition_sentinel().raw()));
__ cmpl(EAX, raw_transition_sentinel);
__ j(EQUAL, &is_assignable);
}
// Generate throw new TypeError() if the type is malformed.
if (dst_type.IsMalformed()) {
const Error& error = Error::Handle(dst_type.malformed_error());

View file

@ -23,7 +23,7 @@ DEFINE_FLAG(bool, trap_on_deoptimization, false, "Trap on deoptimization.");
DECLARE_FLAG(int, optimization_counter_threshold);
DECLARE_FLAG(bool, print_ast);
DECLARE_FLAG(bool, print_scopes);
DECLARE_FLAG(bool, use_sse41);
DECLARE_FLAG(bool, eliminate_type_checks);
FlowGraphCompiler::~FlowGraphCompiler() {
@ -561,6 +561,13 @@ void FlowGraphCompiler::GenerateAssertAssignable(intptr_t token_pos,
__ cmpq(RAX, raw_null);
__ j(EQUAL, &is_assignable);
if (!FLAG_eliminate_type_checks) {
// If type checks are not eliminated during the graph building then
// a transition sentinel can be seen here.
__ CompareObject(RAX, Object::transition_sentinel());
__ j(EQUAL, &is_assignable);
}
// Generate throw new TypeError() if the type is malformed.
if (dst_type.IsMalformed()) {
const Error& error = Error::Handle(dst_type.malformed_error());

View file

@ -84,7 +84,7 @@ bool FlowGraphOptimizer::TryCreateICData(InstanceCallInstr* call) {
GrowableArray<intptr_t> class_ids(call->ic_data()->num_args_tested());
ASSERT(call->ic_data()->num_args_tested() <= call->ArgumentCount());
for (intptr_t i = 0; i < call->ic_data()->num_args_tested(); i++) {
intptr_t cid = call->ArgumentAt(i)->value()->ResultCid();
intptr_t cid = call->ArgumentAt(i)->value()->Type()->ToCid();
class_ids.Add(cid);
}
// TODO(srdjan): Test for other class_ids > 1.
@ -150,7 +150,7 @@ void FlowGraphOptimizer::SpecializePolymorphicInstanceCall(
return; // Already specialized.
}
const intptr_t receiver_cid = call->ArgumentAt(0)->value()->ResultCid();
const intptr_t receiver_cid = call->ArgumentAt(0)->value()->Type()->ToCid();
if (receiver_cid == kDynamicCid) {
return; // No information about receiver was infered.
}
@ -231,7 +231,7 @@ void FlowGraphOptimizer::InsertConversion(Representation from,
Definition* converted = NULL;
if ((from == kTagged) && (to == kUnboxedMint)) {
ASSERT((deopt_target != NULL) ||
(use->definition()->GetPropagatedCid() == kDoubleCid));
(use->Type()->ToCid() == kDoubleCid));
const intptr_t deopt_id = (deopt_target != NULL) ?
deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId;
converted = new UnboxIntegerInstr(new Value(use->definition()), deopt_id);
@ -251,7 +251,7 @@ void FlowGraphOptimizer::InsertConversion(Representation from,
const intptr_t deopt_id = (deopt_target != NULL) ?
deopt_target->DeoptimizationTarget() : Isolate::kNoDeoptId;
ASSERT((deopt_target != NULL) ||
(use->definition()->GetPropagatedCid() == kDoubleCid));
(use->Type()->ToCid() == kDoubleCid));
ConstantInstr* constant = use->definition()->AsConstant();
if ((constant != NULL) && constant->value().IsSmi()) {
const double dbl_val = Smi::Cast(constant->value()).AsDoubleValue();
@ -313,7 +313,7 @@ void FlowGraphOptimizer::SelectRepresentations() {
for (intptr_t i = 0; i < join_entry->phis()->length(); ++i) {
PhiInstr* phi = (*join_entry->phis())[i];
if (phi == NULL) continue;
if (phi->GetPropagatedCid() == kDoubleCid) {
if (phi->Type()->ToCid() == kDoubleCid) {
phi->set_representation(kUnboxedDouble);
}
}
@ -1965,192 +1965,13 @@ void FlowGraphOptimizer::VisitStrictCompare(StrictCompareInstr* instr) {
// If one of the input is not a boxable number (Mint, Double, Bigint), no
// need for number checks.
if (!MayBeBoxableNumber(instr->left()->ResultCid()) ||
!MayBeBoxableNumber(instr->right()->ResultCid())) {
if (!MayBeBoxableNumber(instr->left()->Type()->ToCid()) ||
!MayBeBoxableNumber(instr->right()->Type()->ToCid())) {
instr->set_needs_number_check(false);
}
}
// SminessPropagator ensures that CheckSmis are eliminated across phis.
class SminessPropagator : public ValueObject {
public:
explicit SminessPropagator(FlowGraph* flow_graph)
: flow_graph_(flow_graph),
known_smis_(new BitVector(flow_graph_->current_ssa_temp_index())),
rollback_checks_(10),
in_worklist_(NULL),
worklist_(0) { }
void Propagate();
private:
void PropagateSminessRecursive(BlockEntryInstr* block);
void AddToWorklist(PhiInstr* phi);
PhiInstr* RemoveLastFromWorklist();
void ProcessPhis();
FlowGraph* flow_graph_;
BitVector* known_smis_;
GrowableArray<intptr_t> rollback_checks_;
BitVector* in_worklist_;
GrowableArray<PhiInstr*> worklist_;
DISALLOW_COPY_AND_ASSIGN(SminessPropagator);
};
void SminessPropagator::AddToWorklist(PhiInstr* phi) {
if (in_worklist_ == NULL) {
in_worklist_ = new BitVector(flow_graph_->current_ssa_temp_index());
}
if (!in_worklist_->Contains(phi->ssa_temp_index())) {
in_worklist_->Add(phi->ssa_temp_index());
worklist_.Add(phi);
}
}
PhiInstr* SminessPropagator::RemoveLastFromWorklist() {
PhiInstr* phi = worklist_.RemoveLast();
ASSERT(in_worklist_->Contains(phi->ssa_temp_index()));
in_worklist_->Remove(phi->ssa_temp_index());
return phi;
}
static bool IsDefinitelySmiPhi(PhiInstr* phi) {
for (intptr_t i = 0; i < phi->InputCount(); i++) {
const intptr_t cid = phi->InputAt(i)->ResultCid();
if (cid != kSmiCid) {
return false;
}
}
return true;
}
static bool IsPossiblySmiPhi(PhiInstr* phi) {
for (intptr_t i = 0; i < phi->InputCount(); i++) {
const intptr_t cid = phi->InputAt(i)->ResultCid();
if ((cid != kSmiCid) && (cid != kDynamicCid)) {
return false;
}
}
return true;
}
void SminessPropagator::ProcessPhis() {
// First optimistically mark all possible smi-phis: phi is possibly a smi if
// its operands are either smis or phis in the worklist.
for (intptr_t i = 0; i < worklist_.length(); i++) {
PhiInstr* phi = worklist_[i];
ASSERT(phi->GetPropagatedCid() == kDynamicCid);
phi->SetPropagatedCid(kSmiCid);
// Append all phis that use this phi and can potentially be smi to the
// end of worklist.
for (Value* use = phi->input_use_list();
use != NULL;
use = use->next_use()) {
PhiInstr* phi_use = use->instruction()->AsPhi();
if ((phi_use != NULL) &&
(phi_use->GetPropagatedCid() == kDynamicCid) &&
IsPossiblySmiPhi(phi_use)) {
AddToWorklist(phi_use);
}
}
}
// Now unmark phis that are not definitely smi: that is have only
// smi operands.
while (!worklist_.is_empty()) {
PhiInstr* phi = RemoveLastFromWorklist();
if (!IsDefinitelySmiPhi(phi)) {
// Phi result is not a smi. Propagate this fact to phis that depend on it.
phi->SetPropagatedCid(kDynamicCid);
for (Value* use = phi->input_use_list();
use != NULL;
use = use->next_use()) {
PhiInstr* phi_use = use->instruction()->AsPhi();
if ((phi_use != NULL) && (phi_use->GetPropagatedCid() == kSmiCid)) {
AddToWorklist(phi_use);
}
}
}
}
}
void SminessPropagator::PropagateSminessRecursive(BlockEntryInstr* block) {
const intptr_t rollback_point = rollback_checks_.length();
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
Instruction* instr = it.Current();
if (instr->IsCheckSmi()) {
const intptr_t value_ssa_index =
instr->InputAt(0)->definition()->ssa_temp_index();
if (!known_smis_->Contains(value_ssa_index)) {
known_smis_->Add(value_ssa_index);
rollback_checks_.Add(value_ssa_index);
}
} else if (instr->IsBranch()) {
for (intptr_t i = 0; i < instr->InputCount(); i++) {
Value* use = instr->InputAt(i);
if (known_smis_->Contains(use->definition()->ssa_temp_index())) {
use->set_reaching_cid(kSmiCid);
}
}
}
}
for (intptr_t i = 0; i < block->dominated_blocks().length(); ++i) {
PropagateSminessRecursive(block->dominated_blocks()[i]);
}
if (block->last_instruction()->SuccessorCount() == 1 &&
block->last_instruction()->SuccessorAt(0)->IsJoinEntry()) {
JoinEntryInstr* join =
block->last_instruction()->SuccessorAt(0)->AsJoinEntry();
intptr_t pred_index = join->IndexOfPredecessor(block);
ASSERT(pred_index >= 0);
if (join->phis() != NULL) {
for (intptr_t i = 0; i < join->phis()->length(); ++i) {
PhiInstr* phi = (*join->phis())[i];
if (phi == NULL) continue;
Value* use = phi->InputAt(pred_index);
const intptr_t value_ssa_index = use->definition()->ssa_temp_index();
if (known_smis_->Contains(value_ssa_index) &&
(phi->GetPropagatedCid() != kSmiCid)) {
use->set_reaching_cid(kSmiCid);
AddToWorklist(phi);
}
}
}
}
for (intptr_t i = rollback_point; i < rollback_checks_.length(); i++) {
known_smis_->Remove(rollback_checks_[i]);
}
rollback_checks_.TruncateTo(rollback_point);
}
void SminessPropagator::Propagate() {
PropagateSminessRecursive(flow_graph_->graph_entry());
ProcessPhis();
}
void FlowGraphOptimizer::PropagateSminess() {
SminessPropagator propagator(flow_graph_);
propagator.Propagate();
}
// Range analysis for smi values.
class RangeAnalysis : public ValueObject {
public:
@ -2268,7 +2089,7 @@ void RangeAnalysis::CollectSmiValues() {
Instruction* current = instr_it.Current();
Definition* defn = current->AsDefinition();
if (defn != NULL) {
if ((defn->GetPropagatedCid() == kSmiCid) &&
if ((defn->Type()->ToCid() == kSmiCid) &&
(defn->ssa_temp_index() != -1)) {
smi_values_.Add(defn);
}
@ -2281,7 +2102,7 @@ void RangeAnalysis::CollectSmiValues() {
if (join != NULL) {
for (PhiIterator phi_it(join); !phi_it.Done(); phi_it.Advance()) {
PhiInstr* current = phi_it.Current();
if (current->GetPropagatedCid() == kSmiCid) {
if ((current->Type()->ToCid() == kSmiCid)) {
smi_values_.Add(current);
}
}
@ -2736,261 +2557,6 @@ void FlowGraphOptimizer::InferSmiRanges() {
}
void FlowGraphTypePropagator::VisitBlocks() {
ASSERT(current_iterator_ == NULL);
for (intptr_t i = 0; i < block_order_.length(); ++i) {
BlockEntryInstr* entry = block_order_[i];
entry->Accept(this);
ForwardInstructionIterator it(entry);
current_iterator_ = &it;
for (; !it.Done(); it.Advance()) {
Instruction* current = it.Current();
// No need to propagate the input types of the instruction, as long as
// PhiInstr's are handled as part of JoinEntryInstr.
// Visit the instruction and possibly eliminate type checks.
current->Accept(this);
// The instruction may have been removed from the graph.
Definition* defn = current->AsDefinition();
if ((defn != NULL) &&
!defn->IsPushArgument() &&
(defn->previous() != NULL)) {
// Cache the propagated computation type.
AbstractType& type = AbstractType::Handle(defn->CompileType());
still_changing_ = defn->SetPropagatedType(type) || still_changing_;
// Propagate class ids.
const intptr_t cid = defn->ResultCid();
still_changing_ = defn->SetPropagatedCid(cid) || still_changing_;
}
}
current_iterator_ = NULL;
}
}
void FlowGraphTypePropagator::VisitAssertAssignable(
AssertAssignableInstr* instr) {
bool is_null, is_instance;
if (FLAG_eliminate_type_checks &&
!instr->is_eliminated() &&
((instr->value()->CanComputeIsNull(&is_null) && is_null) ||
(instr->value()->CanComputeIsInstanceOf(instr->dst_type(), &is_instance)
&& is_instance))) {
// TODO(regis): Remove is_eliminated_ field and support.
instr->eliminate();
Value* use = instr->value();
ASSERT(use != NULL);
Definition* result = use->definition();
ASSERT(result != NULL);
// Replace uses and remove the current instruction via the iterator.
instr->ReplaceUsesWith(result);
ASSERT(current_iterator()->Current() == instr);
current_iterator()->RemoveCurrentFromGraph();
if (FLAG_trace_optimization) {
OS::Print("Replacing v%"Pd" with v%"Pd"\n",
instr->ssa_temp_index(),
result->ssa_temp_index());
}
if (FLAG_trace_type_check_elimination) {
FlowGraphPrinter::PrintTypeCheck(parsed_function(),
instr->token_pos(),
instr->value(),
instr->dst_type(),
instr->dst_name(),
instr->is_eliminated());
}
}
}
void FlowGraphTypePropagator::VisitAssertBoolean(AssertBooleanInstr* instr) {
bool is_null, is_bool;
if (FLAG_eliminate_type_checks &&
!instr->is_eliminated() &&
instr->value()->CanComputeIsNull(&is_null) &&
!is_null &&
instr->value()->CanComputeIsInstanceOf(Type::Handle(Type::BoolType()),
&is_bool) &&
is_bool) {
// TODO(regis): Remove is_eliminated_ field and support.
instr->eliminate();
Value* use = instr->value();
Definition* result = use->definition();
ASSERT(result != NULL);
// Replace uses and remove the current instruction via the iterator.
instr->ReplaceUsesWith(result);
ASSERT(current_iterator()->Current() == instr);
current_iterator()->RemoveCurrentFromGraph();
if (FLAG_trace_optimization) {
OS::Print("Replacing v%"Pd" with v%"Pd"\n",
instr->ssa_temp_index(),
result->ssa_temp_index());
}
if (FLAG_trace_type_check_elimination) {
FlowGraphPrinter::PrintTypeCheck(parsed_function(),
instr->token_pos(),
instr->value(),
Type::Handle(Type::BoolType()),
Symbols::BooleanExpression(),
instr->is_eliminated());
}
}
}
void FlowGraphTypePropagator::VisitInstanceOf(InstanceOfInstr* instr) {
bool is_null;
bool is_instance = false;
if (FLAG_eliminate_type_checks &&
instr->value()->CanComputeIsNull(&is_null) &&
(is_null ||
instr->value()->CanComputeIsInstanceOf(instr->type(), &is_instance))) {
bool val = instr->negate_result() ? !is_instance : is_instance;
Definition* result = new ConstantInstr(val ? Bool::True() : Bool::False());
result->set_ssa_temp_index(flow_graph_->alloc_ssa_temp_index());
result->InsertBefore(instr);
// Replace uses and remove the current instruction via the iterator.
instr->ReplaceUsesWith(result);
ASSERT(current_iterator()->Current() == instr);
current_iterator()->RemoveCurrentFromGraph();
if (FLAG_trace_optimization) {
OS::Print("Replacing v%"Pd" with v%"Pd"\n",
instr->ssa_temp_index(),
result->ssa_temp_index());
}
if (FLAG_trace_type_check_elimination) {
FlowGraphPrinter::PrintTypeCheck(parsed_function(),
instr->token_pos(),
instr->value(),
instr->type(),
Symbols::InstanceOf(),
/* eliminated = */ true);
}
}
}
void FlowGraphTypePropagator::VisitGraphEntry(GraphEntryInstr* graph_entry) {
// Visit incoming parameters.
for (intptr_t i = 0; i < graph_entry->initial_definitions()->length(); i++) {
ParameterInstr* param =
(*graph_entry->initial_definitions())[i]->AsParameter();
if (param != NULL) VisitParameter(param);
}
}
void FlowGraphTypePropagator::VisitJoinEntry(JoinEntryInstr* join_entry) {
if (join_entry->phis() != NULL) {
for (intptr_t i = 0; i < join_entry->phis()->length(); ++i) {
PhiInstr* phi = (*join_entry->phis())[i];
if (phi != NULL) {
VisitPhi(phi);
}
}
}
}
// TODO(srdjan): Investigate if the propagated cid should be more specific.
void FlowGraphTypePropagator::VisitPushArgument(PushArgumentInstr* push) {
if (!push->has_propagated_cid()) push->SetPropagatedCid(kDynamicCid);
}
void FlowGraphTypePropagator::VisitPhi(PhiInstr* phi) {
// We could set the propagated type of the phi to the least upper bound of its
// input propagated types. However, keeping all propagated types allows us to
// optimize method dispatch.
// TODO(regis): Support a set of propagated types. For now, we compute the
// least specific of the input propagated types.
AbstractType& type = AbstractType::Handle(phi->LeastSpecificInputType());
bool changed = phi->SetPropagatedType(type);
if (changed) {
still_changing_ = true;
}
// Merge class ids: if any two inputs have different class ids then result
// is kDynamicCid.
intptr_t merged_cid = kIllegalCid;
for (intptr_t i = 0; i < phi->InputCount(); i++) {
// Result cid of UseVal can be kIllegalCid if the referred definition
// has not been visited yet.
intptr_t cid = phi->InputAt(i)->ResultCid();
if (cid == kIllegalCid) {
still_changing_ = true;
continue;
}
if (merged_cid == kIllegalCid) {
// First time set.
merged_cid = cid;
} else if (merged_cid != cid) {
merged_cid = kDynamicCid;
}
}
if (merged_cid == kIllegalCid) {
merged_cid = kDynamicCid;
}
changed = phi->SetPropagatedCid(merged_cid);
if (changed) {
still_changing_ = true;
}
}
void FlowGraphTypePropagator::VisitParameter(ParameterInstr* param) {
// TODO(regis): Once we inline functions, the propagated type of the formal
// parameter will reflect the compile type of the passed-in argument.
// For now, we do not know anything about the argument type and therefore set
// it to the DynamicType, unless the argument is a compiler generated value,
// i.e. the receiver argument or the constructor phase argument.
AbstractType& param_type = AbstractType::Handle(Type::DynamicType());
param->SetPropagatedCid(kDynamicCid);
bool param_type_is_known = false;
if (param->index() == 0) {
const Function& function = parsed_function().function();
if ((function.IsDynamicFunction() || function.IsConstructor())) {
// Parameter is the receiver .
param_type_is_known = true;
}
} else if ((param->index() == 1) &&
parsed_function().function().IsConstructor()) {
// Parameter is the constructor phase.
param_type_is_known = true;
}
if (param_type_is_known) {
LocalScope* scope = parsed_function().node_sequence()->scope();
param_type = scope->VariableAt(param->index())->type().raw();
if (FLAG_use_cha) {
const intptr_t cid = Class::Handle(param_type.type_class()).id();
if (!CHA::HasSubclasses(cid)) {
// Receiver's class has no subclasses.
param->SetPropagatedCid(cid);
}
}
}
bool changed = param->SetPropagatedType(param_type);
if (changed) {
still_changing_ = true;
}
}
void FlowGraphTypePropagator::PropagateTypes() {
// TODO(regis): Is there a way to make this more efficient, e.g. by visiting
// only blocks depending on blocks that have changed and not the whole graph.
do {
still_changing_ = false;
VisitBlocks();
} while (still_changing_);
}
static BlockEntryInstr* FindPreHeader(BlockEntryInstr* header) {
for (intptr_t j = 0; j < header->PredecessorCount(); ++j) {
BlockEntryInstr* candidate = header->PredecessorAt(j);
@ -3035,7 +2601,7 @@ void LICM::TryHoistCheckSmiThroughPhi(ForwardInstructionIterator* it,
return;
}
if (phi->GetPropagatedCid() == kSmiCid) {
if (phi->Type()->ToCid() == kSmiCid) {
current->UnuseAllInputs();
it->RemoveCurrentFromGraph();
return;
@ -3047,8 +2613,9 @@ void LICM::TryHoistCheckSmiThroughPhi(ForwardInstructionIterator* it,
intptr_t non_smi_input = kNotFound;
for (intptr_t i = 0; i < phi->InputCount(); ++i) {
Value* input = phi->InputAt(i);
if (input->ResultCid() != kSmiCid) {
if ((non_smi_input != kNotFound) || (input->ResultCid() != kDynamicCid)) {
if (input->Type()->ToCid() != kSmiCid) {
if ((non_smi_input != kNotFound) ||
(input->Type()->ToCid() != kDynamicCid)) {
// There are multiple kDynamicCid inputs or there is an input that is
// known to be non-smi.
return;
@ -3072,7 +2639,7 @@ void LICM::TryHoistCheckSmiThroughPhi(ForwardInstructionIterator* it,
current->value()->set_definition(non_smi_input_defn);
non_smi_input_defn->AddInputUse(current->value());
phi->SetPropagatedCid(kSmiCid);
phi->Type()->ReplaceWith(CompileType::FromCid(kSmiCid));
}
@ -4118,10 +3685,10 @@ void ConstantPropagator::VisitStrictCompare(StrictCompareInstr* instr) {
if (IsNonConstant(left) || IsNonConstant(right)) {
// TODO(vegorov): incorporate nullability information into the lattice.
if ((left.IsNull() && (instr->right()->ResultCid() != kDynamicCid)) ||
(right.IsNull() && (instr->left()->ResultCid() != kDynamicCid))) {
bool result = left.IsNull() ? (instr->right()->ResultCid() == kNullCid)
: (instr->left()->ResultCid() == kNullCid);
if ((left.IsNull() && instr->right()->Type()->HasDecidableNullability()) ||
(right.IsNull() && instr->left()->Type()->HasDecidableNullability())) {
bool result = left.IsNull() ? instr->right()->Type()->IsNull()
: instr->left()->Type()->IsNull();
if (instr->kind() == Token::kNE_STRICT) result = !result;
SetValue(instr, result ? Bool::True() : Bool::False());
} else {

View file

@ -121,39 +121,6 @@ class FlowGraphOptimizer : public FlowGraphVisitor {
class ParsedFunction;
class FlowGraphTypePropagator : public FlowGraphVisitor {
public:
explicit FlowGraphTypePropagator(FlowGraph* flow_graph)
: FlowGraphVisitor(flow_graph->reverse_postorder()),
parsed_function_(flow_graph->parsed_function()),
flow_graph_(flow_graph),
still_changing_(false) { }
virtual ~FlowGraphTypePropagator() { }
const ParsedFunction& parsed_function() const { return parsed_function_; }
void PropagateTypes();
private:
virtual void VisitBlocks();
virtual void VisitAssertAssignable(AssertAssignableInstr* instr);
virtual void VisitAssertBoolean(AssertBooleanInstr* instr);
virtual void VisitInstanceOf(InstanceOfInstr* instr);
virtual void VisitGraphEntry(GraphEntryInstr* graph_entry);
virtual void VisitJoinEntry(JoinEntryInstr* join_entry);
virtual void VisitPhi(PhiInstr* phi);
virtual void VisitParameter(ParameterInstr* param);
virtual void VisitPushArgument(PushArgumentInstr* bind);
const ParsedFunction& parsed_function_;
FlowGraph* flow_graph_;
bool still_changing_;
DISALLOW_COPY_AND_ASSIGN(FlowGraphTypePropagator);
};
// Loop invariant code motion.
class LICM : public AllStatic {
public:

View file

@ -0,0 +1,734 @@
// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/flow_graph_type_propagator.h"
#include "vm/cha.h"
#include "vm/bit_vector.h"
namespace dart {
DEFINE_FLAG(bool, trace_type_propagation, false,
"Trace flow graph type propagation");
DECLARE_FLAG(bool, enable_type_checks);
DECLARE_FLAG(bool, use_cha);
FlowGraphTypePropagator::FlowGraphTypePropagator(FlowGraph* flow_graph)
: FlowGraphVisitor(flow_graph->reverse_postorder()),
flow_graph_(flow_graph),
types_(flow_graph->current_ssa_temp_index()),
in_worklist_(new BitVector(flow_graph->current_ssa_temp_index())) {
for (intptr_t i = 0; i < flow_graph->current_ssa_temp_index(); i++) {
types_.Add(NULL);
}
}
void FlowGraphTypePropagator::Propagate() {
// Walk dominator tree and propagate reaching types to all Values.
// Collect all phis for a fix point iteration.
PropagateRecursive(flow_graph_->graph_entry());
#ifdef DEBUG
// Initially work-list contains only phis.
for (intptr_t i = 0; i < worklist_.length(); i++) {
ASSERT(worklist_[i]->IsPhi());
ASSERT(worklist_[i]->Type()->IsNone());
}
#endif
// Iterate until fix point is reached updating types of definitions.
while (!worklist_.is_empty()) {
Definition* def = RemoveLastFromWorklist();
if (FLAG_trace_type_propagation) {
OS::Print("recomputing type of v%"Pd": %s\n",
def->ssa_temp_index(),
def->Type()->ToCString());
}
if (def->RecomputeType()) {
if (FLAG_trace_type_propagation) {
OS::Print(" ... new type %s\n", def->Type()->ToCString());
}
for (Value::Iterator it(def->input_use_list());
!it.Done();
it.Advance()) {
Definition* use_defn = it.Current()->instruction()->AsDefinition();
if (use_defn != NULL) {
AddToWorklist(use_defn);
}
}
}
}
}
void FlowGraphTypePropagator::PropagateRecursive(BlockEntryInstr* block) {
const intptr_t rollback_point = rollback_.length();
block->Accept(this);
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
Instruction* instr = it.Current();
for (intptr_t i = 0; i < instr->InputCount(); i++) {
VisitValue(instr->InputAt(i));
}
instr->Accept(this);
}
GotoInstr* goto_instr = block->last_instruction()->AsGoto();
if (goto_instr != NULL) {
JoinEntryInstr* join = goto_instr->successor();
intptr_t pred_index = join->IndexOfPredecessor(block);
ASSERT(pred_index >= 0);
for (PhiIterator it(join); !it.Done(); it.Advance()) {
VisitValue(it.Current()->InputAt(pred_index));
}
}
for (intptr_t i = 0; i < block->dominated_blocks().length(); ++i) {
PropagateRecursive(block->dominated_blocks()[i]);
}
for (intptr_t i = rollback_.length() - 1; i >= rollback_point; i--) {
types_[rollback_[i].index()] = rollback_[i].type();
}
rollback_.TruncateTo(rollback_point);
}
CompileType* FlowGraphTypePropagator::TypeOf(Definition* def) {
const intptr_t index = def->ssa_temp_index();
CompileType* type = types_[index];
if (type == NULL) {
type = types_[index] = def->Type();
ASSERT(type != NULL);
}
return type;
}
void FlowGraphTypePropagator::SetTypeOf(Definition* def, CompileType* type) {
const intptr_t index = def->ssa_temp_index();
rollback_.Add(RollbackEntry(index, types_[index]));
types_[index] = type;
}
void FlowGraphTypePropagator::SetCid(Definition* def, intptr_t cid) {
CompileType* current = TypeOf(def);
if (current->ToCid() == cid) return;
SetTypeOf(def, CompileType::FromCid(cid));
}
void FlowGraphTypePropagator::VisitValue(Value* value) {
CompileType* type = TypeOf(value->definition());
value->SetReachingType(type);
if (FLAG_trace_type_propagation) {
OS::Print("reaching type to v%"Pd" for v%"Pd" is %s\n",
value->instruction()->IsDefinition() ?
value->instruction()->AsDefinition()->ssa_temp_index() : -1,
value->definition()->ssa_temp_index(),
type->ToCString());
}
}
void FlowGraphTypePropagator::VisitJoinEntry(JoinEntryInstr* join) {
for (PhiIterator it(join); !it.Done(); it.Advance()) {
if (it.Current()->is_alive()) {
worklist_.Add(it.Current());
}
}
}
void FlowGraphTypePropagator::VisitCheckSmi(CheckSmiInstr* check) {
SetCid(check->value()->definition(), kSmiCid);
}
void FlowGraphTypePropagator::VisitCheckClass(CheckClassInstr* check) {
if ((check->unary_checks().NumberOfChecks() != 1) ||
check->AffectedBySideEffect()) {
// TODO(vegorov): If check is affected by side-effect we can still propagate
// the type further but not the cid.
return;
}
SetCid(check->value()->definition(),
check->unary_checks().GetReceiverClassIdAt(0));
}
void FlowGraphTypePropagator::AddToWorklist(Definition* defn) {
if (defn->ssa_temp_index() == -1) {
return;
}
const intptr_t index = defn->ssa_temp_index();
if (!in_worklist_->Contains(index)) {
worklist_.Add(defn);
in_worklist_->Add(index);
}
}
Definition* FlowGraphTypePropagator::RemoveLastFromWorklist() {
Definition* defn = worklist_.RemoveLast();
ASSERT(defn->ssa_temp_index() != -1);
in_worklist_->Remove(defn->ssa_temp_index());
return defn;
}
void CompileType::Union(CompileType* other) {
if (other->IsNone()) {
return;
}
if (IsNone()) {
ReplaceWith(other);
return;
}
is_nullable_ = is_nullable_ || other->is_nullable_;
if (ToNullableCid() == kNullCid) {
cid_ = other->cid_;
type_ = other->type_;
return;
}
if (other->ToNullableCid() == kNullCid) {
return;
}
if (ToNullableCid() != other->ToNullableCid()) {
ASSERT(cid_ != kNullCid);
cid_ = kDynamicCid;
}
if (ToAbstractType()->IsMoreSpecificThan(*other->ToAbstractType(), NULL)) {
type_ = other->ToAbstractType();
} else if (ToAbstractType()->IsMoreSpecificThan(*ToAbstractType(), NULL)) {
// Nothing to do.
} else {
// Can't unify.
type_ = &Type::ZoneHandle(Type::DynamicType());
}
}
static bool IsNullableCid(intptr_t cid) {
ASSERT(cid != kIllegalCid);
return cid == kNullCid || cid == kDynamicCid;
}
CompileType* CompileType::New(intptr_t cid, const AbstractType& type) {
return new CompileType(IsNullableCid(cid), cid, &type);
}
CompileType* CompileType::FromAbstractType(const AbstractType& type,
bool is_nullable) {
return new CompileType(is_nullable, kIllegalCid, &type);
}
CompileType* CompileType::FromCid(intptr_t cid) {
return new CompileType(IsNullableCid(cid), cid, NULL);
}
CompileType* CompileType::Dynamic() {
return New(kDynamicCid, Type::ZoneHandle(Type::DynamicType()));
}
CompileType* CompileType::Null() {
return New(kNullCid, Type::ZoneHandle(Type::NullType()));
}
CompileType* CompileType::Bool() {
return New(kBoolCid, Type::ZoneHandle(Type::BoolType()));
}
CompileType* CompileType::Int() {
return FromAbstractType(Type::ZoneHandle(Type::IntType()), kNonNullable);
}
intptr_t CompileType::ToCid() {
if ((cid_ == kNullCid) || (cid_ == kDynamicCid)) {
return cid_;
}
return is_nullable_ ? static_cast<intptr_t>(kDynamicCid) : ToNullableCid();
}
intptr_t CompileType::ToNullableCid() {
if (cid_ == kIllegalCid) {
ASSERT(type_ != NULL);
if (type_->IsMalformed()) {
cid_ = kDynamicCid;
} else if (type_->IsVoidType()) {
cid_ = kNullCid;
} else if (FLAG_use_cha && type_->HasResolvedTypeClass()) {
const intptr_t cid = Class::Handle(type_->type_class()).id();
if (!CHA::HasSubclasses(cid)) {
cid_ = cid;
}
} else {
cid_ = kDynamicCid;
}
}
return cid_;
}
bool CompileType::HasDecidableNullability() {
return !is_nullable_ || IsNull();
}
bool CompileType::IsNull() {
return (ToCid() == kNullCid);
}
const AbstractType* CompileType::ToAbstractType() {
if (type_ == NULL) {
ASSERT(cid_ != kIllegalCid);
const Class& type_class =
Class::Handle(Isolate::Current()->class_table()->At(cid_));
if (type_class.HasTypeArguments()) {
type_ = &Type::ZoneHandle(Type::DynamicType());
return type_;
}
type_ = &Type::ZoneHandle(Type::NewNonParameterizedType(type_class));
}
return type_;
}
bool CompileType::CanComputeIsInstanceOf(const AbstractType& type,
bool is_nullable,
bool* is_instance) {
ASSERT(is_instance != NULL);
// We cannot give an answer if the given type is malformed.
if (type.IsMalformed()) {
return false;
}
if (type.IsDynamicType() || type.IsObjectType()) {
*is_instance = true;
return true;
}
if (IsNone()) {
return false;
}
// We should never test for an instance of null.
ASSERT(!type.IsNullType());
// Consider the compile type of the value.
const AbstractType& compile_type = *ToAbstractType();
if (compile_type.IsMalformed()) {
return false;
}
// If the compile type of the value is void, we are type checking the result
// of a void function, which was checked to be null at the return statement
// inside the function.
if (compile_type.IsVoidType()) {
ASSERT(FLAG_enable_type_checks);
*is_instance = true;
return true;
}
// The Null type is only a subtype of Object and of dynamic.
// Functions that do not explicitly return a value, implicitly return null,
// except generative constructors, which return the object being constructed.
// It is therefore acceptable for void functions to return null.
if (compile_type.IsNullType()) {
*is_instance = is_nullable ||
type.IsObjectType() || type.IsDynamicType() || type.IsVoidType();
return true;
}
// A non-null value is not an instance of void.
if (type.IsVoidType()) {
*is_instance = IsNull();
return HasDecidableNullability();
}
// If the value can be null then we can't eliminate the
// check unless null is allowed.
if (is_nullable_ && !is_nullable) {
return false;
}
Error& malformed_error = Error::Handle();
*is_instance = compile_type.IsMoreSpecificThan(type, &malformed_error);
return malformed_error.IsNull() && *is_instance;
}
bool CompileType::IsMoreSpecificThan(const AbstractType& other) {
return !IsNone() && ToAbstractType()->IsMoreSpecificThan(other, NULL);
}
CompileType* Value::Type() {
if (reaching_type_ == NULL) {
reaching_type_ = definition()->Type();
}
return reaching_type_;
}
CompileType* PhiInstr::ComputeInitialType() const {
// Initially type of phis is unknown until type propagation is run
// for the first time.
return CompileType::None();
}
bool PhiInstr::RecomputeType() {
if (!is_alive()) {
return false;
}
CompileType* result = CompileType::None();
for (intptr_t i = 0; i < InputCount(); i++) {
if (FLAG_trace_type_propagation) {
OS::Print(" phi %"Pd" input %"Pd": v%"Pd" has reaching type %s\n",
ssa_temp_index(),
i,
InputAt(i)->definition()->ssa_temp_index(),
InputAt(i)->Type()->ToCString());
}
result->Union(InputAt(i)->Type());
}
if (result->IsNone()) {
ASSERT(Type()->IsNone());
return false;
}
if (Type()->IsNone() || !Type()->IsEqualTo(result)) {
Type()->ReplaceWith(result);
return true;
}
return false;
}
CompileType* ParameterInstr::ComputeInitialType() const {
// Note that returning the declared type of the formal parameter would be
// incorrect, because ParameterInstr is used as input to the type check
// verifying the run time type of the passed-in parameter and this check would
// always be wrongly eliminated.
return CompileType::Dynamic();
}
CompileType* PushArgumentInstr::ComputeInitialType() const {
return CompileType::Dynamic();
}
CompileType* ConstantInstr::ComputeInitialType() const {
if (value().IsNull()) {
return CompileType::Null();
}
if (value().IsInstance()) {
return CompileType::New(
Class::Handle(value().clazz()).id(),
AbstractType::ZoneHandle(Instance::Cast(value()).GetType()));
} else {
ASSERT(value().IsAbstractTypeArguments());
return CompileType::Dynamic();
}
}
CompileType* AssertAssignableInstr::ComputeInitialType() const {
CompileType* value_type = value()->Type();
if (value_type->IsMoreSpecificThan(dst_type())) {
return value_type;
}
return CompileType::FromAbstractType(dst_type());
}
bool AssertAssignableInstr::RecomputeType() {
CompileType* value_type = value()->Type();
if (value_type == Type()) {
return false;
}
if (value_type->IsMoreSpecificThan(dst_type()) &&
!Type()->IsEqualTo(value_type)) {
Type()->ReplaceWith(value_type);
return true;
}
return false;
}
CompileType* AssertBooleanInstr::ComputeInitialType() const {
return CompileType::Bool();
}
CompileType* ArgumentDefinitionTestInstr::ComputeInitialType() const {
return CompileType::Bool();
}
CompileType* BooleanNegateInstr::ComputeInitialType() const {
return CompileType::Bool();
}
CompileType* InstanceOfInstr::ComputeInitialType() const {
return CompileType::Bool();
}
CompileType* StrictCompareInstr::ComputeInitialType() const {
return CompileType::Bool();
}
CompileType* EqualityCompareInstr::ComputeInitialType() const {
return IsInlinedNumericComparison() ? CompileType::Bool()
: CompileType::Dynamic();
}
CompileType* RelationalOpInstr::ComputeInitialType() const {
return IsInlinedNumericComparison() ? CompileType::Bool()
: CompileType::Dynamic();
}
CompileType* CurrentContextInstr::ComputeInitialType() const {
return CompileType::FromCid(kContextCid);
}
CompileType* CloneContextInstr::ComputeInitialType() const {
return CompileType::FromCid(kContextCid);
}
CompileType* AllocateContextInstr::ComputeInitialType() const {
return CompileType::FromCid(kContextCid);
}
CompileType* StaticCallInstr::ComputeInitialType() const {
if (result_cid_ != kDynamicCid) {
return CompileType::FromCid(result_cid_);
}
if (FLAG_enable_type_checks) {
return CompileType::FromAbstractType(
AbstractType::ZoneHandle(function().result_type()));
}
return CompileType::Dynamic();
}
CompileType* LoadLocalInstr::ComputeInitialType() const {
if (FLAG_enable_type_checks) {
return CompileType::FromAbstractType(local().type());
}
return CompileType::Dynamic();
}
CompileType* StoreLocalInstr::ComputeInitialType() const {
// Returns stored value.
return value()->Type();
}
CompileType* StringFromCharCodeInstr::ComputeInitialType() const {
return CompileType::FromCid(cid_);
}
CompileType* StoreInstanceFieldInstr::ComputeInitialType() const {
return value()->Type();
}
CompileType* LoadStaticFieldInstr::ComputeInitialType() const {
if (FLAG_enable_type_checks) {
return CompileType::FromAbstractType(
AbstractType::ZoneHandle(field().type()));
}
return CompileType::Dynamic();
}
CompileType* StoreStaticFieldInstr::ComputeInitialType() const {
return value()->Type();
}
CompileType* CreateArrayInstr::ComputeInitialType() const {
return CompileType::FromAbstractType(type(), CompileType::kNonNullable);
}
CompileType* CreateClosureInstr::ComputeInitialType() const {
const Function& fun = function();
const Class& signature_class = Class::Handle(fun.signature_class());
return CompileType::FromAbstractType(
Type::ZoneHandle(signature_class.SignatureType()),
CompileType::kNonNullable);
}
CompileType* AllocateObjectInstr::ComputeInitialType() const {
// TODO(vegorov): Incorporate type arguments into the returned type.
return CompileType::FromCid(cid_);
}
CompileType* LoadFieldInstr::ComputeInitialType() const {
// Type may be null if the field is a VM field, e.g. context parent.
// Keep it as null for debug purposes and do not return dynamic in production
// mode, since misuse of the type would remain undetected.
if (type().IsNull()) {
return CompileType::Dynamic();
}
if (FLAG_enable_type_checks) {
return CompileType::FromAbstractType(type());
}
return CompileType::FromCid(result_cid_);
}
CompileType* StoreVMFieldInstr::ComputeInitialType() const {
return value()->Type();
}
CompileType* BinarySmiOpInstr::ComputeInitialType() const {
return CompileType::FromCid(kSmiCid);
}
CompileType* UnarySmiOpInstr::ComputeInitialType() const {
return CompileType::FromCid(kSmiCid);
}
CompileType* DoubleToSmiInstr::ComputeInitialType() const {
return CompileType::FromCid(kSmiCid);
}
CompileType* ConstraintInstr::ComputeInitialType() const {
return CompileType::FromCid(kSmiCid);
}
CompileType* BinaryMintOpInstr::ComputeInitialType() const {
return CompileType::Int();
}
CompileType* ShiftMintOpInstr::ComputeInitialType() const {
return CompileType::Int();
}
CompileType* UnaryMintOpInstr::ComputeInitialType() const {
return CompileType::Int();
}
CompileType* BoxIntegerInstr::ComputeInitialType() const {
return CompileType::Int();
}
CompileType* UnboxIntegerInstr::ComputeInitialType() const {
return CompileType::Int();
}
CompileType* DoubleToIntegerInstr::ComputeInitialType() const {
return CompileType::Int();
}
CompileType* BinaryDoubleOpInstr::ComputeInitialType() const {
return CompileType::FromCid(kDoubleCid);
}
CompileType* MathSqrtInstr::ComputeInitialType() const {
return CompileType::FromCid(kDoubleCid);
}
CompileType* UnboxDoubleInstr::ComputeInitialType() const {
return CompileType::FromCid(kDoubleCid);
}
CompileType* BoxDoubleInstr::ComputeInitialType() const {
return CompileType::FromCid(kDoubleCid);
}
CompileType* SmiToDoubleInstr::ComputeInitialType() const {
return CompileType::FromCid(kDoubleCid);
}
CompileType* DoubleToDoubleInstr::ComputeInitialType() const {
return CompileType::FromCid(kDoubleCid);
}
CompileType* InvokeMathCFunctionInstr::ComputeInitialType() const {
return CompileType::FromCid(kDoubleCid);
}
} // namespace dart

View file

@ -0,0 +1,77 @@
// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef VM_FLOW_GRAPH_TYPE_PROPAGATOR_H_
#define VM_FLOW_GRAPH_TYPE_PROPAGATOR_H_
#include "vm/flow_graph.h"
#include "vm/intermediate_language.h"
namespace dart {
class FlowGraphTypePropagator : public FlowGraphVisitor {
public:
explicit FlowGraphTypePropagator(FlowGraph* flow_graph);
void Propagate();
private:
void PropagateRecursive(BlockEntryInstr* block);
void VisitValue(Value* value);
virtual void VisitJoinEntry(JoinEntryInstr* instr);
virtual void VisitCheckSmi(CheckSmiInstr* instr);
virtual void VisitCheckClass(CheckClassInstr* instr);
// Current reaching type of the definition. Valid only during dominator tree
// traversal.
CompileType* TypeOf(Definition* def);
// Mark definition as having given compile type in all dominated instructions.
void SetTypeOf(Definition* def, CompileType* type);
// Mark definition as having given class id in all dominated instructions.
void SetCid(Definition* value, intptr_t cid);
void AddToWorklist(Definition* defn);
Definition* RemoveLastFromWorklist();
FlowGraph* flow_graph_;
// Mapping between SSA values and their current reaching types. Valid
// only during dominator tree traversal.
GrowableArray<CompileType*> types_;
// Worklist for fixpoint computation.
GrowableArray<Definition*> worklist_;
BitVector* in_worklist_;
// RollbackEntry is used to track and rollback changed in the types_ array
// done during dominator tree traversal.
class RollbackEntry {
public:
// Default constructor needed for the container.
RollbackEntry()
: index_(), type_() {
}
RollbackEntry(intptr_t index, CompileType* type)
: index_(index), type_(type) {
}
intptr_t index() const { return index_; }
CompileType* type() const { return type_; }
private:
intptr_t index_;
CompileType* type_;
};
GrowableArray<RollbackEntry> rollback_;
};
} // namespace dart
#endif // VM_FLOW_GRAPH_TYPE_PROPAGATOR_H_

View file

@ -88,17 +88,12 @@ void FlowGraphPrinter::PrintTypeCheck(const ParsedFunction& parsed_function,
const AbstractType& dst_type,
const String& dst_name,
bool eliminated) {
const Script& script = Script::Handle(parsed_function.function().script());
const char* compile_type_name = "unknown";
if (value != NULL) {
const AbstractType& type = AbstractType::Handle(value->CompileType());
if (!type.IsNull()) {
compile_type_name = String::Handle(type.Name()).ToCString();
}
compile_type_name = value->Type()->ToCString();
}
Parser::PrintMessage(script, token_pos, "",
"%s type check: compile type '%s' is %s specific than "
"type '%s' of '%s'.",
OS::Print("%s type check: compile type %s is %s specific than "
"type '%s' of '%s'.\n",
eliminated ? "Eliminated" : "Generated",
compile_type_name,
eliminated ? "more" : "not more",
@ -107,6 +102,31 @@ void FlowGraphPrinter::PrintTypeCheck(const ParsedFunction& parsed_function,
}
void CompileType::PrintTo(BufferFormatter* f) const {
f->Print("T{");
f->Print("%s, ", is_nullable_ ? "null" : "not-null");
if (cid_ != kIllegalCid) {
const Class& cls =
Class::Handle(Isolate::Current()->class_table()->At(cid_));
f->Print("%s, ", String::Handle(cls.Name()).ToCString());
} else {
f->Print("?, ");
}
f->Print("%s}", (type_ != NULL) ? String::Handle(type_->Name()).ToCString()
: "?");
}
const char* CompileType::ToCString() const {
char buffer[1024];
BufferFormatter f(buffer, sizeof(buffer));
PrintTo(&f);
return Isolate::Current()->current_zone()->MakeCopyOfString(buffer);
}
static void PrintICData(BufferFormatter* f, const ICData& ic_data) {
f->Print(" IC[%"Pd": ", ic_data.NumberOfChecks());
Function& target = Function::Handle();
@ -134,20 +154,6 @@ static void PrintICData(BufferFormatter* f, const ICData& ic_data) {
}
static void PrintPropagatedType(BufferFormatter* f, const Definition& def) {
if (def.HasPropagatedType()) {
String& name = String::Handle();
name = AbstractType::Handle(def.PropagatedType()).Name();
f->Print(" {PT: %s}", name.ToCString());
}
if (def.has_propagated_cid()) {
const Class& cls = Class::Handle(
Isolate::Current()->class_table()->At(def.propagated_cid()));
f->Print(" {PCid: %s}", String::Handle(cls.Name()).ToCString());
}
}
static void PrintUse(BufferFormatter* f, const Definition& definition) {
if (definition.is_used()) {
if (definition.HasSSATemp()) {
@ -182,11 +188,15 @@ void Definition::PrintTo(BufferFormatter* f) const {
f->Print("%s:%"Pd"(", DebugName(), GetDeoptId());
PrintOperandsTo(f);
f->Print(")");
PrintPropagatedType(f, *this);
if (range_ != NULL) {
f->Print(" ");
range_->PrintTo(f);
}
if (type_ != NULL) {
f->Print(" ");
type_->PrintTo(f);
}
}
@ -200,6 +210,11 @@ void Definition::PrintOperandsTo(BufferFormatter* f) const {
void Value::PrintTo(BufferFormatter* f) const {
PrintUse(f, *definition());
if ((reaching_type_ != NULL) &&
(reaching_type_ != definition()->Type())) {
f->Print(" ");
reaching_type_->PrintTo(f);
}
}
@ -276,10 +291,9 @@ const char* RangeBoundary::ToCString() const {
void AssertAssignableInstr::PrintOperandsTo(BufferFormatter* f) const {
value()->PrintTo(f);
f->Print(", %s, '%s'%s",
String::Handle(dst_type().Name()).ToCString(),
dst_name().ToCString(),
is_eliminated() ? " eliminated" : "");
f->Print(", %s, '%s'",
dst_type().ToCString(),
dst_name().ToCString());
f->Print(" instantiator(");
instantiator()->PrintTo(f);
f->Print(")");
@ -291,7 +305,6 @@ void AssertAssignableInstr::PrintOperandsTo(BufferFormatter* f) const {
void AssertBooleanInstr::PrintOperandsTo(BufferFormatter* f) const {
value()->PrintTo(f);
f->Print("%s", is_eliminated() ? " eliminated" : "");
}
@ -607,7 +620,6 @@ void PhiInstr::PrintTo(BufferFormatter* f) const {
if (i < inputs_.length() - 1) f->Print(", ");
}
f->Print(")");
PrintPropagatedType(f, *this);
if (is_alive()) {
f->Print(" alive");
} else {
@ -617,6 +629,10 @@ void PhiInstr::PrintTo(BufferFormatter* f) const {
f->Print(" ");
range_->PrintTo(f);
}
if (type_ != NULL) {
f->Print(" ");
type_->PrintTo(f);
}
}

View file

@ -25,15 +25,15 @@ DEFINE_FLAG(bool, new_identity_spec, true,
DEFINE_FLAG(bool, propagate_ic_data, true,
"Propagate IC data from unoptimized to optimized IC calls.");
DECLARE_FLAG(bool, enable_type_checks);
DECLARE_FLAG(bool, eliminate_type_checks);
DECLARE_FLAG(int, max_polymorphic_checks);
DECLARE_FLAG(bool, trace_optimization);
Definition::Definition()
: range_(NULL),
type_(NULL),
temp_index_(-1),
ssa_temp_index_(-1),
propagated_type_(AbstractType::Handle()),
propagated_cid_(kIllegalCid),
input_use_list_(NULL),
env_use_list_(NULL),
use_kind_(kValue), // Phis and parameters rely on this default.
@ -146,10 +146,7 @@ bool LoadFieldInstr::AttributesEqual(Instruction* other) const {
LoadFieldInstr* other_load = other->AsLoadField();
ASSERT(other_load != NULL);
ASSERT((offset_in_bytes() != other_load->offset_in_bytes()) ||
((immutable_ == other_load->immutable_) &&
((ResultCid() == other_load->ResultCid()) ||
(ResultCid() == kDynamicCid) ||
(other_load->ResultCid() == kDynamicCid))));
((immutable_ == other_load->immutable_)));
return offset_in_bytes() == other_load->offset_in_bytes();
}
@ -407,178 +404,17 @@ void FlowGraphVisitor::VisitBlocks() {
}
// TODO(regis): Support a set of compile types for the given value.
bool Value::CanComputeIsNull(bool* is_null) const {
ASSERT(is_null != NULL);
// For now, we can only return a meaningful result if the value is constant.
if (!BindsToConstant()) {
bool Value::NeedsStoreBuffer() {
if (Type()->IsNull() ||
(Type()->ToNullableCid() == kSmiCid) ||
(Type()->ToNullableCid() == kBoolCid)) {
return false;
}
// Return true if the constant value is Object::null.
if (BindsToConstantNull()) {
*is_null = true;
return true;
}
// Consider the compile type of the value to check for sentinels, which are
// also treated as null.
const AbstractType& compile_type = AbstractType::Handle(CompileType());
ASSERT(!compile_type.IsMalformed());
ASSERT(!compile_type.IsVoidType());
// There are only three instances that can be of type Null:
// Object::null(), Object::sentinel(), and Object::transition_sentinel().
// The inline code and run time code performing the type check will only
// encounter the 2 sentinel values if type check elimination was disabled.
// Otherwise, the type check of a sentinel value will be eliminated here,
// because these sentinel values can only be encountered as constants, never
// as actual value of a heap object being type checked.
if (compile_type.IsNullType()) {
*is_null = true;
return true;
}
return false;
}
// TODO(regis): Support a set of compile types for the given value.
bool Value::CanComputeIsInstanceOf(const AbstractType& type,
bool* is_instance) const {
ASSERT(is_instance != NULL);
// We cannot give an answer if the given type is malformed.
if (type.IsMalformed()) {
return false;
}
// We should never test for an instance of null.
ASSERT(!type.IsNullType());
// Consider the compile type of the value.
const AbstractType& compile_type = AbstractType::Handle(CompileType());
if (compile_type.IsMalformed()) {
return false;
}
// If the compile type of the value is void, we are type checking the result
// of a void function, which was checked to be null at the return statement
// inside the function.
if (compile_type.IsVoidType()) {
ASSERT(FLAG_enable_type_checks);
*is_instance = true;
return true;
}
// The Null type is only a subtype of Object and of dynamic.
// Functions that do not explicitly return a value, implicitly return null,
// except generative constructors, which return the object being constructed.
// It is therefore acceptable for void functions to return null.
if (compile_type.IsNullType()) {
*is_instance =
type.IsObjectType() || type.IsDynamicType() || type.IsVoidType();
return true;
}
// Until we support a set of compile types, we can only give answers for
// constant values. Indeed, a variable of the proper compile time type may
// still hold null at run time and therefore fail the test.
if (!BindsToConstant()) {
return false;
}
// A non-null constant is not an instance of void.
if (type.IsVoidType()) {
*is_instance = false;
return true;
}
// Since the value is a constant, its type is instantiated.
ASSERT(compile_type.IsInstantiated());
// The run time type of the value is guaranteed to be a subtype of the
// compile time type of the value. However, establishing here that the
// compile time type is a subtype of the given type does not guarantee that
// the run time type will also be a subtype of the given type, because the
// subtype relation is not transitive when an uninstantiated type is
// involved.
Error& malformed_error = Error::Handle();
if (type.IsInstantiated()) {
// Perform the test on the compile-time type and provide the answer, unless
// the type test produced a malformed error (e.g. an upper bound error).
*is_instance = compile_type.IsSubtypeOf(type, &malformed_error);
} else {
// However, the 'more specific than' relation is transitive and used here.
// In other words, if the compile type of the value is more specific than
// the given type, the run time type of the value, which is guaranteed to be
// a subtype of the compile type, is also guaranteed to be a subtype of the
// given type.
*is_instance = compile_type.IsMoreSpecificThan(type, &malformed_error);
}
return malformed_error.IsNull();
}
bool Value::NeedsStoreBuffer() const {
const intptr_t cid = ResultCid();
if ((cid == kSmiCid) || (cid == kBoolCid) || (cid == kNullCid)) {
return false;
}
return !BindsToConstant();
}
RawAbstractType* PhiInstr::CompileType() const {
ASSERT(!HasPropagatedType());
// Since type propagation has not yet occured, we are reaching this phi via a
// back edge phi input. Return null as compile type so that this input is
// ignored in the first iteration of type propagation.
return AbstractType::null();
}
RawAbstractType* PhiInstr::LeastSpecificInputType() const {
AbstractType& least_specific_type = AbstractType::Handle();
AbstractType& input_type = AbstractType::Handle();
for (intptr_t i = 0; i < InputCount(); i++) {
input_type = InputAt(i)->CompileType();
if (input_type.IsNull()) {
// This input is on a back edge and we are in the first iteration of type
// propagation. Ignore it.
continue;
}
ASSERT(!input_type.IsNull());
if (least_specific_type.IsNull() ||
least_specific_type.IsMoreSpecificThan(input_type, NULL)) {
// Type input_type is less specific than the current least_specific_type.
least_specific_type = input_type.raw();
} else if (input_type.IsMoreSpecificThan(least_specific_type, NULL)) {
// Type least_specific_type is less specific than input_type. No change.
} else {
// The types are unrelated. No need to continue.
least_specific_type = Type::ObjectType();
break;
}
}
return least_specific_type.raw();
}
RawAbstractType* ParameterInstr::CompileType() const {
ASSERT(!HasPropagatedType());
// Note that returning the declared type of the formal parameter would be
// incorrect, because ParameterInstr is used as input to the type check
// verifying the run time type of the passed-in parameter and this check would
// always be wrongly eliminated.
return Type::DynamicType();
}
RawAbstractType* PushArgumentInstr::CompileType() const {
return AbstractType::null();
}
void JoinEntryInstr::AddPredecessor(BlockEntryInstr* predecessor) {
// Require the predecessors to be sorted by block_id to make managing
// their corresponding phi inputs simpler.
@ -727,45 +563,6 @@ void Definition::ReplaceWith(Definition* other,
}
bool Definition::SetPropagatedCid(intptr_t cid) {
if (cid == kIllegalCid) {
return false;
}
if (propagated_cid_ == kIllegalCid) {
// First setting, nothing has changed.
propagated_cid_ = cid;
return false;
}
bool has_changed = (propagated_cid_ != cid);
propagated_cid_ = cid;
return has_changed;
}
intptr_t Definition::GetPropagatedCid() {
if (has_propagated_cid()) return propagated_cid();
intptr_t cid = ResultCid();
ASSERT(cid != kIllegalCid);
SetPropagatedCid(cid);
return cid;
}
intptr_t PhiInstr::GetPropagatedCid() {
return propagated_cid();
}
intptr_t ParameterInstr::GetPropagatedCid() {
return propagated_cid();
}
intptr_t AssertAssignableInstr::GetPropagatedCid() {
return propagated_cid();
}
// ==== Postorder graph traversal.
static bool IsMarked(BlockEntryInstr* block,
GrowableArray<BlockEntryInstr*>* preorder) {
@ -1027,154 +824,6 @@ void Instruction::Goto(JoinEntryInstr* entry) {
}
RawAbstractType* Value::CompileType() const {
if (definition()->HasPropagatedType()) {
return definition()->PropagatedType();
}
// The compile type may be requested when building the flow graph, i.e. before
// type propagation has occurred. To avoid repeatedly computing the compile
// type of the definition, we store it as initial propagated type.
AbstractType& type = AbstractType::Handle(definition()->CompileType());
definition()->SetPropagatedType(type);
return type.raw();
}
intptr_t Value::ResultCid() const {
if (reaching_cid() == kIllegalCid) {
return definition()->GetPropagatedCid();
}
return reaching_cid();
}
RawAbstractType* ConstantInstr::CompileType() const {
if (value().IsNull()) {
return Type::NullType();
}
if (value().IsInstance()) {
return Instance::Cast(value()).GetType();
} else {
ASSERT(value().IsAbstractTypeArguments());
return AbstractType::null();
}
}
intptr_t ConstantInstr::ResultCid() const {
if (value().IsNull()) {
return kNullCid;
}
if (value().IsInstance()) {
return Class::Handle(value().clazz()).id();
} else {
ASSERT(value().IsAbstractTypeArguments());
return kDynamicCid;
}
}
RawAbstractType* AssertAssignableInstr::CompileType() const {
const AbstractType& value_compile_type =
AbstractType::Handle(value()->CompileType());
if (!value_compile_type.IsNull() &&
value_compile_type.IsMoreSpecificThan(dst_type(), NULL)) {
return value_compile_type.raw();
}
return dst_type().raw();
}
RawAbstractType* AssertBooleanInstr::CompileType() const {
return Type::BoolType();
}
RawAbstractType* ArgumentDefinitionTestInstr::CompileType() const {
return Type::BoolType();
}
RawAbstractType* CurrentContextInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* StoreContextInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* ClosureCallInstr::CompileType() const {
// Because of function subtyping rules, the declared return type of a closure
// call cannot be relied upon for compile type analysis. For example, a
// function returning dynamic can be assigned to a closure variable declared
// to return int and may actually return a double at run-time.
return Type::DynamicType();
}
RawAbstractType* InstanceCallInstr::CompileType() const {
// TODO(regis): Return a more specific type than dynamic for recognized
// combinations of receiver type and method name.
return Type::DynamicType();
}
RawAbstractType* PolymorphicInstanceCallInstr::CompileType() const {
return Type::DynamicType();
}
RawAbstractType* StaticCallInstr::CompileType() const {
if (FLAG_enable_type_checks) {
return function().result_type();
}
return Type::DynamicType();
}
RawAbstractType* LoadLocalInstr::CompileType() const {
if (FLAG_enable_type_checks) {
return local().type().raw();
}
return Type::DynamicType();
}
RawAbstractType* StoreLocalInstr::CompileType() const {
return value()->CompileType();
}
RawAbstractType* StrictCompareInstr::CompileType() const {
return Type::BoolType();
}
// Only known == targets return a Boolean.
RawAbstractType* EqualityCompareInstr::CompileType() const {
if ((receiver_class_id() == kSmiCid) ||
(receiver_class_id() == kDoubleCid) ||
(receiver_class_id() == kNumberCid)) {
return Type::BoolType();
}
return Type::DynamicType();
}
intptr_t EqualityCompareInstr::ResultCid() const {
if ((receiver_class_id() == kSmiCid) ||
(receiver_class_id() == kDoubleCid) ||
(receiver_class_id() == kNumberCid)) {
// Known/library equalities that are guaranteed to return Boolean.
return kBoolCid;
}
return kDynamicCid;
}
bool EqualityCompareInstr::IsPolymorphic() const {
return HasICData() &&
(ic_data()->NumberOfChecks() > 0) &&
@ -1182,194 +831,6 @@ bool EqualityCompareInstr::IsPolymorphic() const {
}
RawAbstractType* RelationalOpInstr::CompileType() const {
if ((operands_class_id() == kSmiCid) ||
(operands_class_id() == kDoubleCid) ||
(operands_class_id() == kNumberCid)) {
// Known/library relational ops that are guaranteed to return Boolean.
return Type::BoolType();
}
return Type::DynamicType();
}
intptr_t RelationalOpInstr::ResultCid() const {
if ((operands_class_id() == kSmiCid) ||
(operands_class_id() == kDoubleCid) ||
(operands_class_id() == kNumberCid)) {
// Known/library relational ops that are guaranteed to return Boolean.
return kBoolCid;
}
return kDynamicCid;
}
RawAbstractType* NativeCallInstr::CompileType() const {
// The result type of the native function is identical to the result type of
// the enclosing native Dart function. However, we prefer to check the type
// of the value returned from the native call.
return Type::DynamicType();
}
RawAbstractType* StringFromCharCodeInstr::CompileType() const {
return Type::StringType();
}
RawAbstractType* LoadIndexedInstr::CompileType() const {
switch (class_id_) {
case kArrayCid:
case kImmutableArrayCid:
return Type::DynamicType();
case kFloat32ArrayCid :
case kFloat64ArrayCid :
return Type::Double();
case kInt8ArrayCid:
case kUint8ArrayCid:
case kUint8ClampedArrayCid:
case kExternalUint8ArrayCid:
case kExternalUint8ClampedArrayCid:
case kInt16ArrayCid:
case kUint16ArrayCid:
case kInt32ArrayCid:
case kUint32ArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
return Type::IntType();
default:
UNIMPLEMENTED();
return Type::IntType();
}
}
RawAbstractType* StoreIndexedInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* StoreInstanceFieldInstr::CompileType() const {
return value()->CompileType();
}
RawAbstractType* LoadStaticFieldInstr::CompileType() const {
if (FLAG_enable_type_checks) {
return field().type();
}
return Type::DynamicType();
}
RawAbstractType* StoreStaticFieldInstr::CompileType() const {
return value()->CompileType();
}
RawAbstractType* BooleanNegateInstr::CompileType() const {
return Type::BoolType();
}
RawAbstractType* InstanceOfInstr::CompileType() const {
return Type::BoolType();
}
RawAbstractType* CreateArrayInstr::CompileType() const {
return type().raw();
}
RawAbstractType* CreateClosureInstr::CompileType() const {
const Function& fun = function();
const Class& signature_class = Class::Handle(fun.signature_class());
return signature_class.SignatureType();
}
RawAbstractType* AllocateObjectInstr::CompileType() const {
// TODO(regis): Be more specific.
return Type::DynamicType();
}
RawAbstractType* AllocateObjectWithBoundsCheckInstr::CompileType() const {
// TODO(regis): Be more specific.
return Type::DynamicType();
}
RawAbstractType* LoadFieldInstr::CompileType() const {
// Type may be null if the field is a VM field, e.g. context parent.
// Keep it as null for debug purposes and do not return dynamic in production
// mode, since misuse of the type would remain undetected.
if (type().IsNull()) {
return AbstractType::null();
}
if (FLAG_enable_type_checks) {
return type().raw();
}
return Type::DynamicType();
}
RawAbstractType* StoreVMFieldInstr::CompileType() const {
return value()->CompileType();
}
RawAbstractType* InstantiateTypeArgumentsInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* ExtractConstructorTypeArgumentsInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* ExtractConstructorInstantiatorInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* AllocateContextInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* ChainContextInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* CloneContextInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* CatchEntryInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* CheckStackOverflowInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* BinarySmiOpInstr::CompileType() const {
return Type::SmiType();
}
intptr_t BinarySmiOpInstr::ResultCid() const {
return kSmiCid;
}
bool BinarySmiOpInstr::CanDeoptimize() const {
switch (op_kind()) {
case Token::kBIT_AND:
@ -1398,47 +859,6 @@ bool BinarySmiOpInstr::RightIsPowerOfTwoConstant() const {
}
RawAbstractType* BinaryMintOpInstr::CompileType() const {
return Type::IntType();
}
intptr_t BinaryMintOpInstr::ResultCid() const {
return kDynamicCid;
}
RawAbstractType* ShiftMintOpInstr::CompileType() const {
return Type::IntType();
}
intptr_t ShiftMintOpInstr::ResultCid() const {
return kDynamicCid;
}
RawAbstractType* UnaryMintOpInstr::CompileType() const {
return Type::IntType();
}
intptr_t UnaryMintOpInstr::ResultCid() const {
return kDynamicCid;
}
RawAbstractType* BinaryDoubleOpInstr::CompileType() const {
return Type::Double();
}
intptr_t BinaryDoubleOpInstr::ResultCid() const {
// The output is not an instance but when it is boxed it becomes double.
return kDoubleCid;
}
static bool ToIntegerConstant(Value* value, intptr_t* result) {
if (!value->BindsToConstant()) {
if (value->definition()->IsUnboxDouble()) {
@ -1599,96 +1019,6 @@ Definition* BinaryMintOpInstr::Canonicalize(FlowGraphOptimizer* optimizer) {
}
RawAbstractType* MathSqrtInstr::CompileType() const {
return Type::Double();
}
RawAbstractType* UnboxDoubleInstr::CompileType() const {
return Type::null();
}
intptr_t BoxDoubleInstr::ResultCid() const {
return kDoubleCid;
}
RawAbstractType* BoxDoubleInstr::CompileType() const {
return Type::Double();
}
intptr_t BoxIntegerInstr::ResultCid() const {
return kDynamicCid;
}
RawAbstractType* BoxIntegerInstr::CompileType() const {
return Type::IntType();
}
intptr_t UnboxIntegerInstr::ResultCid() const {
return kDynamicCid;
}
RawAbstractType* UnboxIntegerInstr::CompileType() const {
return Type::null();
}
RawAbstractType* UnarySmiOpInstr::CompileType() const {
return Type::SmiType();
}
RawAbstractType* SmiToDoubleInstr::CompileType() const {
return Type::Double();
}
RawAbstractType* DoubleToIntegerInstr::CompileType() const {
return Type::IntType();
}
RawAbstractType* DoubleToSmiInstr::CompileType() const {
return Type::SmiType();
}
RawAbstractType* DoubleToDoubleInstr::CompileType() const {
return Type::Double();
}
RawAbstractType* InvokeMathCFunctionInstr::CompileType() const {
return Type::Double();
}
RawAbstractType* CheckClassInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* CheckSmiInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* CheckArrayBoundInstr::CompileType() const {
return AbstractType::null();
}
RawAbstractType* CheckEitherNonSmiInstr::CompileType() const {
return AbstractType::null();
}
// Optimizations that eliminate or simplify individual instructions.
Instruction* Instruction::Canonicalize(FlowGraphOptimizer* optimizer) {
return this;
@ -1752,7 +1082,7 @@ Definition* LoadFieldInstr::Canonicalize(FlowGraphOptimizer* optimizer) {
StaticCallInstr* call = value()->definition()->AsStaticCall();
if (call != NULL &&
call->is_known_constructor() &&
call->ResultCid() == kArrayCid) {
(call->Type()->ToCid() == kArrayCid)) {
return call->ArgumentAt(1)->value()->definition();
}
return this;
@ -1760,33 +1090,18 @@ Definition* LoadFieldInstr::Canonicalize(FlowGraphOptimizer* optimizer) {
Definition* AssertBooleanInstr::Canonicalize(FlowGraphOptimizer* optimizer) {
const intptr_t value_cid = value()->ResultCid();
return (value_cid == kBoolCid) ? value()->definition() : this;
if (FLAG_eliminate_type_checks && (value()->Type()->ToCid() == kBoolCid)) {
return value()->definition();
}
return this;
}
Definition* AssertAssignableInstr::Canonicalize(FlowGraphOptimizer* optimizer) {
// (1) Replace the assert with its input if the input has a known compatible
// class-id. The class-ids handled here are those that are known to be
// results of IL instructions.
intptr_t cid = value()->ResultCid();
bool is_redundant = false;
if (dst_type().IsIntType()) {
is_redundant = (cid == kSmiCid) || (cid == kMintCid);
} else if (dst_type().IsDoubleType()) {
is_redundant = (cid == kDoubleCid);
} else if (dst_type().IsBoolType()) {
is_redundant = (cid == kBoolCid);
}
if (is_redundant) return value()->definition();
// (2) Replace the assert with its input if the input is the result of a
// compatible assert itself.
AssertAssignableInstr* check = value()->definition()->AsAssertAssignable();
if ((check != NULL) && check->dst_type().Equals(dst_type())) {
// TODO(fschneider): Propagate type-assertions across phi-nodes.
// TODO(fschneider): Eliminate more asserts with subtype relation.
return check;
if (FLAG_eliminate_type_checks &&
value()->Type()->IsAssignableTo(dst_type())) {
return value()->definition();
}
// (3) For uninstantiated target types: If the instantiator type arguments
@ -1868,7 +1183,7 @@ Definition* StrictCompareInstr::Canonicalize(FlowGraphOptimizer* optimizer) {
// Handles e === true.
if ((kind() == Token::kEQ_STRICT) &&
(right_constant.raw() == Bool::True().raw()) &&
(left()->ResultCid() == kBoolCid)) {
(left()->Type()->ToCid() == kBoolCid)) {
// Return left subexpression as the replacement for this instruction.
return left_defn;
}
@ -1877,7 +1192,10 @@ Definition* StrictCompareInstr::Canonicalize(FlowGraphOptimizer* optimizer) {
Instruction* CheckClassInstr::Canonicalize(FlowGraphOptimizer* optimizer) {
const intptr_t value_cid = value()->ResultCid();
// TODO(vegorov): Replace class checks with null checks when ToNullableCid
// matches.
const intptr_t value_cid = value()->Type()->ToCid();
if (value_cid == kDynamicCid) {
return this;
}
@ -1896,14 +1214,14 @@ Instruction* CheckClassInstr::Canonicalize(FlowGraphOptimizer* optimizer) {
Instruction* CheckSmiInstr::Canonicalize(FlowGraphOptimizer* optimizer) {
return (value()->ResultCid() == kSmiCid) ? NULL : this;
return (value()->Type()->ToCid() == kSmiCid) ? NULL : this;
}
Instruction* CheckEitherNonSmiInstr::Canonicalize(
FlowGraphOptimizer* optimizer) {
if ((left()->ResultCid() == kDoubleCid) ||
(right()->ResultCid() == kDoubleCid)) {
if ((left()->Type()->ToCid() == kDoubleCid) ||
(right()->Type()->ToCid() == kDoubleCid)) {
return NULL; // Remove from the graph.
}
return this;
@ -2122,13 +1440,11 @@ void StaticCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
void AssertAssignableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (!is_eliminated()) {
compiler->GenerateAssertAssignable(token_pos(),
deopt_id(),
dst_type(),
dst_name(),
locs());
}
compiler->GenerateAssertAssignable(token_pos(),
deopt_id(),
dst_type(),
dst_name(),
locs());
ASSERT(locs()->in(0).reg() == locs()->out().reg());
}
@ -2400,7 +1716,7 @@ RangeBoundary RangeBoundary::Max(RangeBoundary a, RangeBoundary b) {
void Definition::InferRange() {
ASSERT(GetPropagatedCid() == kSmiCid); // Has meaning only for smis.
ASSERT(Type()->ToCid() == kSmiCid); // Has meaning only for smis.
if (range_ == NULL) {
range_ = Range::Unknown();
}

File diff suppressed because it is too large Load diff

View file

@ -172,9 +172,9 @@ void StringFromCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
intptr_t LoadIndexedInstr::ResultCid() const {
CompileType* LoadIndexedInstr::ComputeInitialType() const {
UNIMPLEMENTED();
return kDynamicCid;
return NULL;
}

View file

@ -212,9 +212,7 @@ void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register obj = locs()->in(0).reg();
Register result = locs()->out().reg();
if (!is_eliminated()) {
EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler);
}
EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler);
ASSERT(obj == result);
}
@ -1097,14 +1095,16 @@ void StringFromCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
intptr_t LoadIndexedInstr::ResultCid() const {
CompileType* LoadIndexedInstr::ComputeInitialType() const {
switch (class_id_) {
case kArrayCid:
case kImmutableArrayCid:
return kDynamicCid;
return CompileType::Dynamic();
case kFloat32ArrayCid :
case kFloat64ArrayCid :
return kDoubleCid;
return CompileType::FromCid(kDoubleCid);
case kInt8ArrayCid:
case kUint8ArrayCid:
case kUint8ClampedArrayCid:
@ -1114,16 +1114,19 @@ intptr_t LoadIndexedInstr::ResultCid() const {
case kUint16ArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
return kSmiCid;
return CompileType::FromCid(kSmiCid);
case kInt32ArrayCid:
case kUint32ArrayCid:
// Result can be Smi or Mint when boxed.
// Instruction can deoptimize if we optimistically assumed that the result
// fits into Smi.
return CanDeoptimize() ? kSmiCid : kDynamicCid;
return CanDeoptimize() ? CompileType::FromCid(kSmiCid)
: CompileType::Int();
default:
UNIMPLEMENTED();
return kDynamicCid;
return CompileType::Dynamic();
}
}
@ -2267,8 +2270,8 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary() const {
ASSERT((left()->ResultCid() != kDoubleCid) &&
(right()->ResultCid() != kDoubleCid));
ASSERT((left()->Type()->ToCid() != kDoubleCid) &&
(right()->Type()->ToCid() != kDoubleCid));
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 1;
LocationSummary* summary =
@ -2365,7 +2368,7 @@ LocationSummary* UnboxDoubleInstr::MakeLocationSummary() const {
void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->ResultCid();
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
const XmmRegister result = locs()->out().fpu_reg();
@ -2753,8 +2756,6 @@ LocationSummary* CheckSmiInstr::MakeLocationSummary() const {
void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// TODO(srdjan): Check if we can remove this by reordering CSE and LICM.
if (value()->ResultCid() == kSmiCid) return;
Register value = locs()->in(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id(),
kDeoptCheckSmi);
@ -2820,7 +2821,7 @@ LocationSummary* UnboxIntegerInstr::MakeLocationSummary() const {
void UnboxIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->ResultCid();
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
const XmmRegister result = locs()->out().fpu_reg();

View file

@ -172,9 +172,9 @@ void StringFromCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
intptr_t LoadIndexedInstr::ResultCid() const {
CompileType* LoadIndexedInstr::ComputeInitialType() const {
UNIMPLEMENTED();
return kDynamicCid;
return NULL;
}

View file

@ -219,9 +219,7 @@ void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register obj = locs()->in(0).reg();
Register result = locs()->out().reg();
if (!is_eliminated()) {
EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler);
}
EmitAssertBoolean(obj, token_pos(), deopt_id(), locs(), compiler);
ASSERT(obj == result);
}
@ -961,14 +959,16 @@ void StringFromCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
intptr_t LoadIndexedInstr::ResultCid() const {
CompileType* LoadIndexedInstr::ComputeInitialType() const {
switch (class_id_) {
case kArrayCid:
case kImmutableArrayCid:
return kDynamicCid;
return CompileType::Dynamic();
case kFloat32ArrayCid :
case kFloat64ArrayCid :
return kDoubleCid;
return CompileType::FromCid(kDoubleCid);
case kInt8ArrayCid:
case kUint8ArrayCid:
case kUint8ClampedArrayCid:
@ -980,10 +980,11 @@ intptr_t LoadIndexedInstr::ResultCid() const {
case kTwoByteStringCid:
case kInt32ArrayCid:
case kUint32ArrayCid:
return kSmiCid;
return CompileType::FromCid(kSmiCid);
default:
UNIMPLEMENTED();
return kSmiCid;
return NULL;
}
}
@ -2127,8 +2128,8 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* CheckEitherNonSmiInstr::MakeLocationSummary() const {
ASSERT((left()->ResultCid() != kDoubleCid) &&
(right()->ResultCid() != kDoubleCid));
ASSERT((left()->Type()->ToCid() != kDoubleCid) &&
(right()->Type()->ToCid() != kDoubleCid));
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 1;
LocationSummary* summary =
@ -2225,7 +2226,7 @@ LocationSummary* UnboxDoubleInstr::MakeLocationSummary() const {
void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->ResultCid();
const intptr_t value_cid = value()->Type()->ToCid();
const Register value = locs()->in(0).reg();
const XmmRegister result = locs()->out().fpu_reg();
@ -2627,8 +2628,6 @@ LocationSummary* CheckSmiInstr::MakeLocationSummary() const {
void CheckSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// TODO(srdjan): Check if we can remove this by reordering CSE and LICM.
if (value()->ResultCid() == kSmiCid) return;
Register value = locs()->in(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id(),
kDeoptCheckSmi);

View file

@ -2185,12 +2185,13 @@ bool Class::TypeTest(
// Since we do not truncate the type argument vector of a subclass (see
// below), we only check a prefix of the proper length.
// Check for covariance.
if (other_type_arguments.IsNull() ||
other_type_arguments.IsRawInstantiatedRaw(len)) {
if (other_type_arguments.IsNull() || other_type_arguments.IsRaw(len)) {
return true;
}
if (type_arguments.IsNull() ||
type_arguments.IsRawInstantiatedRaw(len)) {
if (type_arguments.IsNull() || type_arguments.IsRaw(len)) {
// Other type can't be more specific than this one because for that
// it would have to have all dynamic type arguments which is checked
// above.
return test_kind == kIsSubtypeOf;
}
return type_arguments.TypeTest(test_kind,

View file

@ -155,6 +155,8 @@
'flow_graph_inliner.h',
'flow_graph_optimizer.cc',
'flow_graph_optimizer.h',
'flow_graph_type_propagator.cc',
'flow_graph_type_propagator.h',
'freelist.cc',
'freelist.h',
'freelist_test.cc',