[VM] Replace hand-written assembly prologues with IR

As part of the prologue changes we get rid of the empty context as well.

Issue https://github.com/dart-lang/sdk/issues/31495

Change-Id: I707e23c631bcfbbad6c91c4963d0c10f7a0be625
Reviewed-on: https://dart-review.googlesource.com/25320
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
This commit is contained in:
Martin Kustermann 2017-12-13 16:04:34 +00:00
parent 74dcc87393
commit cf1de7d46c
54 changed files with 2231 additions and 2337 deletions

View file

@ -80,13 +80,15 @@ DEFINE_NATIVE_ENTRY(Closure_clone, 1) {
TypeArguments::Handle(zone, receiver.function_type_arguments()); TypeArguments::Handle(zone, receiver.function_type_arguments());
const Function& function = Function::Handle(zone, receiver.function()); const Function& function = Function::Handle(zone, receiver.function());
const Context& context = Context::Handle(zone, receiver.context()); const Context& context = Context::Handle(zone, receiver.context());
Context& cloned_context = Context& cloned_context = Context::Handle(zone);
Context::Handle(zone, Context::New(context.num_variables())); if (!context.IsNull()) {
cloned_context.set_parent(Context::Handle(zone, context.parent())); cloned_context = Context::New(context.num_variables());
Object& instance = Object::Handle(zone); cloned_context.set_parent(Context::Handle(zone, context.parent()));
for (int i = 0; i < context.num_variables(); i++) { Object& instance = Object::Handle(zone);
instance = context.At(i); for (int i = 0; i < context.num_variables(); i++) {
cloned_context.SetAt(i, instance); instance = context.At(i);
cloned_context.SetAt(i, instance);
}
} }
return Closure::New(instantiator_type_arguments, function_type_arguments, return Closure::New(instantiator_type_arguments, function_type_arguments,
function, cloned_context); function, cloned_context);

View file

@ -2843,12 +2843,18 @@ class Instance extends HeapObject implements M.Instance {
// Coerce absence to false. // Coerce absence to false.
valueAsStringIsTruncated = map['valueAsStringIsTruncated'] == true; valueAsStringIsTruncated = map['valueAsStringIsTruncated'] == true;
closureFunction = map['closureFunction']; closureFunction = map['closureFunction'];
closureContext = map['closureContext'];
name = map['name']; name = map['name'];
length = map['length']; length = map['length'];
pattern = map['pattern']; pattern = map['pattern'];
typeClass = map['typeClass']; typeClass = map['typeClass'];
final context = map['closureContext'];
if (context is Context) {
closureContext = context;
} else if (context != null) {
assert(context is Instance && context.isNull);
}
if (mapIsRef) { if (mapIsRef) {
return; return;
} }

View file

@ -55,11 +55,7 @@ var tests = [
return field.load().then((_) { return field.load().then((_) {
return field.staticValue.load().then((Instance block) { return field.staticValue.load().then((Instance block) {
expect(block.isClosure, isTrue); expect(block.isClosure, isTrue);
expect(block.closureContext.isContext, isTrue); expect(block.closureContext, isNull);
expect(block.closureContext.length, equals(0));
return block.closureContext.load().then((Context ctxt) {
expect(ctxt.parentContext, isNull);
});
}); });
}); });
}), }),
@ -75,11 +71,7 @@ var tests = [
expect(ctxt.variables.single.value.asValue.isString, isTrue); expect(ctxt.variables.single.value.asValue.isString, isTrue);
expect(ctxt.variables.single.value.asValue.valueAsString, expect(ctxt.variables.single.value.asValue.valueAsString,
equals('I could be copied into the block')); equals('I could be copied into the block'));
expect(ctxt.parentContext.isContext, isTrue); expect(ctxt.parentContext, isNull);
expect(ctxt.parentContext.length, equals(0));
return ctxt.parentContext.load().then((Context outerCtxt) {
expect(outerCtxt.parentContext, isNull);
});
}); });
}); });
}); });
@ -95,11 +87,7 @@ var tests = [
expect(ctxt.variables.single.value.asValue.isInt, isTrue); expect(ctxt.variables.single.value.asValue.isInt, isTrue);
expect(ctxt.variables.single.value.asValue.valueAsString, expect(ctxt.variables.single.value.asValue.valueAsString,
equals('43')); equals('43'));
expect(ctxt.parentContext.isContext, isTrue); expect(ctxt.parentContext, isNull);
expect(ctxt.parentContext.length, equals(0));
return ctxt.parentContext.load().then((Context outerCtxt) {
expect(outerCtxt.parentContext, isNull);
});
}); });
}); });
}); });
@ -122,13 +110,7 @@ var tests = [
expect(outerCtxt.variables.single.value.asValue.isInt, isTrue); expect(outerCtxt.variables.single.value.asValue.isInt, isTrue);
expect(outerCtxt.variables.single.value.asValue.valueAsString, expect(outerCtxt.variables.single.value.asValue.valueAsString,
equals('421')); equals('421'));
expect(outerCtxt.parentContext.isContext, isTrue); expect(outerCtxt.parentContext, isNull);
expect(outerCtxt.parentContext.length, equals(0));
return outerCtxt.parentContext
.load()
.then((Context outerCtxt2) {
expect(outerCtxt2.parentContext, isNull);
});
}); });
}); });
}); });

View file

@ -59,6 +59,17 @@ class BaseGrowableArray : public B {
return data_[index]; return data_[index];
} }
void FillWith(const T& value, intptr_t start, intptr_t length) {
ASSERT(start >= 0);
ASSERT(length >= 0);
ASSERT(start <= length_);
Resize(start + length);
for (intptr_t i = 0; i < length; ++i) {
data_[start + i] = value;
}
}
const T& At(intptr_t index) const { return operator[](index); } const T& At(intptr_t index) const { return operator[](index); }
T& Last() const { T& Last() const {

View file

@ -35,6 +35,7 @@ void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
#define PARAMS_A_D uintptr_t ra, uintptr_t rd #define PARAMS_A_D uintptr_t ra, uintptr_t rd
#define PARAMS_D uintptr_t rd #define PARAMS_D uintptr_t rd
#define PARAMS_A_B_C uintptr_t ra, uintptr_t rb, uintptr_t rc #define PARAMS_A_B_C uintptr_t ra, uintptr_t rb, uintptr_t rc
#define PARAMS_A_B_Y uintptr_t ra, uintptr_t rb, intptr_t ry
#define PARAMS_A uintptr_t ra #define PARAMS_A uintptr_t ra
#define PARAMS_T intptr_t x #define PARAMS_T intptr_t x
#define PARAMS_A_X uintptr_t ra, intptr_t x #define PARAMS_A_X uintptr_t ra, intptr_t x
@ -44,6 +45,7 @@ void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
#define ENCODE_A_D , ra, rd #define ENCODE_A_D , ra, rd
#define ENCODE_D , 0, rd #define ENCODE_D , 0, rd
#define ENCODE_A_B_C , ra, rb, rc #define ENCODE_A_B_C , ra, rb, rc
#define ENCODE_A_B_Y , ra, rb, ry
#define ENCODE_A , ra, 0 #define ENCODE_A , ra, 0
#define ENCODE_T , x #define ENCODE_T , x
#define ENCODE_A_X , ra, x #define ENCODE_A_X , ra, x
@ -53,6 +55,7 @@ void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
#define FENCODE_A_D Encode #define FENCODE_A_D Encode
#define FENCODE_D Encode #define FENCODE_D Encode
#define FENCODE_A_B_C Encode #define FENCODE_A_B_C Encode
#define FENCODE_A_B_Y Encode
#define FENCODE_A Encode #define FENCODE_A Encode
#define FENCODE_T EncodeSigned #define FENCODE_T EncodeSigned
#define FENCODE_A_X EncodeSigned #define FENCODE_A_X EncodeSigned

View file

@ -130,6 +130,7 @@ class Assembler : public ValueObject {
#define PARAMS_A_D uintptr_t ra, uintptr_t rd #define PARAMS_A_D uintptr_t ra, uintptr_t rd
#define PARAMS_D uintptr_t rd #define PARAMS_D uintptr_t rd
#define PARAMS_A_B_C uintptr_t ra, uintptr_t rb, uintptr_t rc #define PARAMS_A_B_C uintptr_t ra, uintptr_t rb, uintptr_t rc
#define PARAMS_A_B_Y uintptr_t ra, uintptr_t rb, intptr_t ry
#define PARAMS_A uintptr_t ra #define PARAMS_A uintptr_t ra
#define PARAMS_X intptr_t x #define PARAMS_X intptr_t x
#define PARAMS_T intptr_t x #define PARAMS_T intptr_t x
@ -141,6 +142,7 @@ class Assembler : public ValueObject {
#undef PARAMS_A_D #undef PARAMS_A_D
#undef PARAMS_D #undef PARAMS_D
#undef PARAMS_A_B_C #undef PARAMS_A_B_C
#undef PARAMS_A_B_Y
#undef PARAMS_A #undef PARAMS_A
#undef PARAMS_X #undef PARAMS_X
#undef PARAMS_T #undef PARAMS_T

View file

@ -178,6 +178,21 @@ static void FormatA_B_C(char* buf,
Apply(&buf, &size, pc, op3, c, ""); Apply(&buf, &size, pc, op3, c, "");
} }
static void FormatA_B_Y(char* buf,
intptr_t size,
uword pc,
uint32_t op,
Fmt op1,
Fmt op2,
Fmt op3) {
const int32_t a = (op >> 8) & 0xFF;
const int32_t b = (op >> 16) & 0xFF;
const int32_t y = static_cast<int8_t>((op >> 24) & 0xFF);
Apply(&buf, &size, pc, op1, a, ", ");
Apply(&buf, &size, pc, op2, b, ", ");
Apply(&buf, &size, pc, op3, y, "");
}
#define BYTECODE_FORMATTER(name, encoding, op1, op2, op3) \ #define BYTECODE_FORMATTER(name, encoding, op1, op2, op3) \
static void Format##name(char* buf, intptr_t size, uword pc, uint32_t op) { \ static void Format##name(char* buf, intptr_t size, uword pc, uint32_t op) { \
Format##encoding(buf, size, pc, op, Fmt##op1, Fmt##op2, Fmt##op3); \ Format##encoding(buf, size, pc, op, Fmt##op1, Fmt##op2, Fmt##op3); \

View file

@ -229,6 +229,8 @@ void ConstantPropagator::VisitGuardFieldLength(GuardFieldLengthInstr* instr) {}
void ConstantPropagator::VisitCheckSmi(CheckSmiInstr* instr) {} void ConstantPropagator::VisitCheckSmi(CheckSmiInstr* instr) {}
void ConstantPropagator::VisitTailCall(TailCallInstr* instr) {}
void ConstantPropagator::VisitCheckNull(CheckNullInstr* instr) {} void ConstantPropagator::VisitCheckNull(CheckNullInstr* instr) {}
void ConstantPropagator::VisitGenericCheckBound(GenericCheckBoundInstr* instr) { void ConstantPropagator::VisitGenericCheckBound(GenericCheckBoundInstr* instr) {
@ -630,6 +632,15 @@ void ConstantPropagator::VisitLoadCodeUnits(LoadCodeUnitsInstr* instr) {
SetValue(instr, non_constant_); SetValue(instr, non_constant_);
} }
void ConstantPropagator::VisitLoadIndexedUnsafe(LoadIndexedUnsafeInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitStoreIndexedUnsafe(
StoreIndexedUnsafeInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitStoreIndexed(StoreIndexedInstr* instr) { void ConstantPropagator::VisitStoreIndexed(StoreIndexedInstr* instr) {
SetValue(instr, instr->value()->definition()->constant_value()); SetValue(instr, instr->value()->definition()->constant_value());
} }

View file

@ -33,8 +33,9 @@ FlowGraph::FlowGraph(const ParsedFunction& parsed_function,
current_ssa_temp_index_(0), current_ssa_temp_index_(0),
max_block_id_(max_block_id), max_block_id_(max_block_id),
parsed_function_(parsed_function), parsed_function_(parsed_function),
num_copied_params_(parsed_function.num_copied_params()), num_direct_parameters_(parsed_function.function().HasOptionalParameters()
num_non_copied_params_(parsed_function.num_non_copied_params()), ? 0
: parsed_function.function().NumParameters()),
graph_entry_(graph_entry), graph_entry_(graph_entry),
preorder_(), preorder_(),
postorder_(), postorder_(),
@ -42,7 +43,6 @@ FlowGraph::FlowGraph(const ParsedFunction& parsed_function,
optimized_block_order_(), optimized_block_order_(),
constant_null_(NULL), constant_null_(NULL),
constant_dead_(NULL), constant_dead_(NULL),
constant_empty_context_(NULL),
licm_allowed_(true), licm_allowed_(true),
prologue_info_(prologue_info), prologue_info_(prologue_info),
loop_headers_(NULL), loop_headers_(NULL),
@ -611,7 +611,7 @@ class VariableLivenessAnalysis : public LivenessAnalysis {
explicit VariableLivenessAnalysis(FlowGraph* flow_graph) explicit VariableLivenessAnalysis(FlowGraph* flow_graph)
: LivenessAnalysis(flow_graph->variable_count(), flow_graph->postorder()), : LivenessAnalysis(flow_graph->variable_count(), flow_graph->postorder()),
flow_graph_(flow_graph), flow_graph_(flow_graph),
num_non_copied_params_(flow_graph->num_non_copied_params()), num_direct_parameters_(flow_graph->num_direct_parameters()),
assigned_vars_() {} assigned_vars_() {}
// For every block (in preorder) compute and return set of variables that // For every block (in preorder) compute and return set of variables that
@ -656,7 +656,7 @@ class VariableLivenessAnalysis : public LivenessAnalysis {
return false; return false;
} }
if (store->is_last()) { if (store->is_last()) {
const intptr_t index = store->local().BitIndexIn(num_non_copied_params_); const intptr_t index = store->local().BitIndexIn(num_direct_parameters_);
return GetLiveOutSet(block)->Contains(index); return GetLiveOutSet(block)->Contains(index);
} }
@ -669,7 +669,7 @@ class VariableLivenessAnalysis : public LivenessAnalysis {
if (load->local().Equals(*flow_graph_->CurrentContextVar())) { if (load->local().Equals(*flow_graph_->CurrentContextVar())) {
return false; return false;
} }
const intptr_t index = load->local().BitIndexIn(num_non_copied_params_); const intptr_t index = load->local().BitIndexIn(num_direct_parameters_);
return load->is_last() && !GetLiveOutSet(block)->Contains(index); return load->is_last() && !GetLiveOutSet(block)->Contains(index);
} }
@ -677,7 +677,7 @@ class VariableLivenessAnalysis : public LivenessAnalysis {
virtual void ComputeInitialSets(); virtual void ComputeInitialSets();
const FlowGraph* flow_graph_; const FlowGraph* flow_graph_;
const intptr_t num_non_copied_params_; const intptr_t num_direct_parameters_;
GrowableArray<BitVector*> assigned_vars_; GrowableArray<BitVector*> assigned_vars_;
}; };
@ -709,7 +709,7 @@ void VariableLivenessAnalysis::ComputeInitialSets() {
LoadLocalInstr* load = current->AsLoadLocal(); LoadLocalInstr* load = current->AsLoadLocal();
if (load != NULL) { if (load != NULL) {
const intptr_t index = load->local().BitIndexIn(num_non_copied_params_); const intptr_t index = load->local().BitIndexIn(num_direct_parameters_);
if (index >= live_in->length()) continue; // Skip tmp_locals. if (index >= live_in->length()) continue; // Skip tmp_locals.
live_in->Add(index); live_in->Add(index);
if (!last_loads->Contains(index)) { if (!last_loads->Contains(index)) {
@ -722,7 +722,7 @@ void VariableLivenessAnalysis::ComputeInitialSets() {
StoreLocalInstr* store = current->AsStoreLocal(); StoreLocalInstr* store = current->AsStoreLocal();
if (store != NULL) { if (store != NULL) {
const intptr_t index = const intptr_t index =
store->local().BitIndexIn(num_non_copied_params_); store->local().BitIndexIn(num_direct_parameters_);
if (index >= live_in->length()) continue; // Skip tmp_locals. if (index >= live_in->length()) continue; // Skip tmp_locals.
if (kill->Contains(index)) { if (kill->Contains(index)) {
if (!live_in->Contains(index)) { if (!live_in->Contains(index)) {
@ -946,13 +946,9 @@ void FlowGraph::Rename(GrowableArray<PhiInstr*>* live_phis,
ZoneGrowableArray<Definition*>* inlining_parameters) { ZoneGrowableArray<Definition*>* inlining_parameters) {
GraphEntryInstr* entry = graph_entry(); GraphEntryInstr* entry = graph_entry();
// Initial renaming environment.
GrowableArray<Definition*> env(variable_count());
// Add global constants to the initial definitions. // Add global constants to the initial definitions.
constant_null_ = GetConstant(Object::ZoneHandle()); constant_null_ = GetConstant(Object::ZoneHandle());
constant_dead_ = GetConstant(Symbols::OptimizedOut()); constant_dead_ = GetConstant(Symbols::OptimizedOut());
constant_empty_context_ = GetConstant(Object::empty_context());
// Check if inlining_parameters include a type argument vector parameter. // Check if inlining_parameters include a type argument vector parameter.
const intptr_t inlined_type_args_param = const intptr_t inlined_type_args_param =
@ -961,66 +957,73 @@ void FlowGraph::Rename(GrowableArray<PhiInstr*>* live_phis,
? 1 ? 1
: 0; : 0;
// Add parameters to the initial definitions and renaming environment. // Initial renaming environment.
if (inlining_parameters != NULL) { GrowableArray<Definition*> env(variable_count());
// Use known parameters. {
ASSERT(inlined_type_args_param + parameter_count() == const intptr_t parameter_count =
inlining_parameters->length()); IsCompiledForOsr() ? variable_count() : num_direct_parameters_;
for (intptr_t i = 0; i < parameter_count(); ++i) { for (intptr_t i = 0; i < parameter_count; i++) {
// If inlined_type_args_param == 1, then (*inlining_parameters)[0]
// is the passed-in type args. We do not add it to env[0] but to
// env[parameter_count()] below.
Definition* defn = (*inlining_parameters)[inlined_type_args_param + i];
AllocateSSAIndexes(defn);
AddToInitialDefinitions(defn);
env.Add(defn);
}
} else {
// Create new parameters. For functions compiled for OSR, the locals
// are unknown and so treated like parameters.
intptr_t count = IsCompiledForOsr() ? variable_count() : parameter_count();
for (intptr_t i = 0; i < count; ++i) {
ParameterInstr* param = new (zone()) ParameterInstr(i, entry); ParameterInstr* param = new (zone()) ParameterInstr(i, entry);
param->set_ssa_temp_index(alloc_ssa_temp_index()); // New SSA temp. param->set_ssa_temp_index(alloc_ssa_temp_index());
AddToInitialDefinitions(param); AddToInitialDefinitions(param);
env.Add(param); env.Add(param);
} }
ASSERT(env.length() == parameter_count);
// Fill in all local variables with `null` (for osr the stack locals have
// already been been handled above).
if (!IsCompiledForOsr()) {
ASSERT(env.length() == num_direct_parameters_);
env.FillWith(constant_null(), num_direct_parameters_, num_stack_locals());
}
} }
// Initialize all locals in the renaming environment For OSR, the locals have // Override the entries in the renaming environment which are special (i.e.
// already been handled as parameters. // inlining arguments, type parameter, args descriptor, context, ...)
if (!IsCompiledForOsr()) { {
intptr_t i = parameter_count(); // Replace parameter slots with inlining definitions coming in.
if (isolate()->reify_generic_functions() && function().IsGeneric()) { if (inlining_parameters != NULL) {
// The first local is the slot holding the copied passed-in type args. for (intptr_t i = 0; i < function().NumParameters(); ++i) {
// TODO(regis): Do we need the SpecialParameterInstr if the type_args_var Definition* defn = (*inlining_parameters)[inlined_type_args_param + i];
// is not needed? Add an assert for now: AllocateSSAIndexes(defn);
ASSERT(parsed_function().function_type_arguments() != NULL); AddToInitialDefinitions(defn);
Definition* defn;
if (inlining_parameters == NULL) { intptr_t index = parsed_function_.RawParameterVariable(i)->BitIndexIn(
defn = new SpecialParameterInstr(SpecialParameterInstr::kTypeArgs, num_direct_parameters_);
Thread::kNoDeoptId); env[index] = defn;
} else {
defn = (*inlining_parameters)[0];
} }
AllocateSSAIndexes(defn);
AddToInitialDefinitions(defn);
env.Add(defn);
++i;
} }
for (; i < variable_count(); ++i) {
if (i == CurrentContextEnvIndex()) { if (!IsCompiledForOsr()) {
if (function().IsClosureFunction()) { const bool reify_generic_argument =
SpecialParameterInstr* context = new SpecialParameterInstr( function().IsGeneric() && isolate()->reify_generic_functions();
SpecialParameterInstr::kContext, Thread::kNoDeoptId);
context->set_ssa_temp_index(alloc_ssa_temp_index()); // New SSA temp. // Replace the type arguments slot with a special parameter.
AddToInitialDefinitions(context); if (reify_generic_argument) {
env.Add(context); ASSERT(parsed_function().function_type_arguments() != NULL);
Definition* defn;
if (inlining_parameters == NULL) {
// Note: If we are not inlining, then the prologue builder will
// take care of checking that we got the correct reified type
// arguments. This includes checking the argument descriptor in order
// to even find out if the parameter was passed or not.
defn = constant_dead();
} else { } else {
env.Add(constant_empty_context()); defn = (*inlining_parameters)[0];
} }
} else { AllocateSSAIndexes(defn);
env.Add(constant_null()); AddToInitialDefinitions(defn);
env[RawTypeArgumentEnvIndex()] = defn;
}
// Replace the argument descriptor slot with a special parameter.
if (parsed_function().has_arg_desc_var()) {
Definition* defn = new SpecialParameterInstr(
SpecialParameterInstr::kArgDescriptor, Thread::kNoDeoptId);
AllocateSSAIndexes(defn);
AddToInitialDefinitions(defn);
env[ArgumentDescriptorEnvIndex()] = defn;
} }
} }
} }
@ -1029,7 +1032,7 @@ void FlowGraph::Rename(GrowableArray<PhiInstr*>* live_phis,
// Functions with try-catch have a fixed area of stack slots reserved // Functions with try-catch have a fixed area of stack slots reserved
// so that all local variables are stored at a known location when // so that all local variables are stored at a known location when
// on entry to the catch. // on entry to the catch.
entry->set_fixed_slot_count(num_stack_locals() + num_copied_params()); entry->set_fixed_slot_count(num_stack_locals());
} }
RenameRecursive(entry, &env, live_phis, variable_liveness); RenameRecursive(entry, &env, live_phis, variable_liveness);
} }
@ -1037,7 +1040,7 @@ void FlowGraph::Rename(GrowableArray<PhiInstr*>* live_phis,
void FlowGraph::AttachEnvironment(Instruction* instr, void FlowGraph::AttachEnvironment(Instruction* instr,
GrowableArray<Definition*>* env) { GrowableArray<Definition*>* env) {
Environment* deopt_env = Environment* deopt_env =
Environment::From(zone(), *env, num_non_copied_params_, parsed_function_); Environment::From(zone(), *env, num_direct_parameters_, parsed_function_);
if (instr->IsClosureCall()) { if (instr->IsClosureCall()) {
deopt_env = deopt_env =
deopt_env->DeepCopy(zone(), deopt_env->Length() - instr->InputCount()); deopt_env->DeepCopy(zone(), deopt_env->Length() - instr->InputCount());
@ -1151,7 +1154,7 @@ void FlowGraph::RenameRecursive(BlockEntryInstr* block_entry,
Definition* result = NULL; Definition* result = NULL;
if (store != NULL) { if (store != NULL) {
// Update renaming environment. // Update renaming environment.
intptr_t index = store->local().BitIndexIn(num_non_copied_params_); intptr_t index = store->local().BitIndexIn(num_direct_parameters_);
result = store->value()->definition(); result = store->value()->definition();
if (!FLAG_prune_dead_locals || if (!FLAG_prune_dead_locals ||
@ -1164,7 +1167,7 @@ void FlowGraph::RenameRecursive(BlockEntryInstr* block_entry,
// The graph construction ensures we do not have an unused LoadLocal // The graph construction ensures we do not have an unused LoadLocal
// computation. // computation.
ASSERT(definition->HasTemp()); ASSERT(definition->HasTemp());
intptr_t index = load->local().BitIndexIn(num_non_copied_params_); intptr_t index = load->local().BitIndexIn(num_direct_parameters_);
result = (*env)[index]; result = (*env)[index];
PhiInstr* phi = result->AsPhi(); PhiInstr* phi = result->AsPhi();
@ -1181,7 +1184,7 @@ void FlowGraph::RenameRecursive(BlockEntryInstr* block_entry,
// Record captured parameters so that they can be skipped when // Record captured parameters so that they can be skipped when
// emitting sync code inside optimized try-blocks. // emitting sync code inside optimized try-blocks.
if (load->local().is_captured_parameter()) { if (load->local().is_captured_parameter()) {
intptr_t index = load->local().BitIndexIn(num_non_copied_params_); intptr_t index = load->local().BitIndexIn(num_direct_parameters_);
captured_parameters_->Add(index); captured_parameters_->Add(index);
} }

View file

@ -77,12 +77,12 @@ struct PrologueInfo {
// The first blockid used for prologue building. This information can be used // The first blockid used for prologue building. This information can be used
// by the inliner for budget calculations: The prologue code falls away when // by the inliner for budget calculations: The prologue code falls away when
// inlining, so we should not include it in the budget. // inlining, so we should not include it in the budget.
const intptr_t min_block_id; intptr_t min_block_id;
// The last blockid used for prologue building. This information can be used // The last blockid used for prologue building. This information can be used
// by the inliner for budget calculations: The prologue code falls away when // by the inliner for budget calculations: The prologue code falls away when
// inlining, so we should not include it in the budget. // inlining, so we should not include it in the budget.
const intptr_t max_block_id; intptr_t max_block_id;
PrologueInfo(intptr_t min, intptr_t max) PrologueInfo(intptr_t min, intptr_t max)
: min_block_id(min), max_block_id(max) {} : min_block_id(min), max_block_id(max) {}
@ -103,17 +103,26 @@ class FlowGraph : public ZoneAllocated {
// Function properties. // Function properties.
const ParsedFunction& parsed_function() const { return parsed_function_; } const ParsedFunction& parsed_function() const { return parsed_function_; }
const Function& function() const { return parsed_function_.function(); } const Function& function() const { return parsed_function_.function(); }
intptr_t parameter_count() const {
return num_copied_params_ + num_non_copied_params_; // The number of directly accessable parameters (above the frame pointer).
} // All other parameters can only be indirectly loaded via metadata found in
// the arguments descriptor.
intptr_t num_direct_parameters() const { return num_direct_parameters_; }
// The number of variables (or boxes) which code can load from / store to.
// The SSA renaming will insert phi's for them (and only them - i.e. there
// will be no phi insertion for [LocalVariable]s pointing to the expression
// stack!).
intptr_t variable_count() const { intptr_t variable_count() const {
return parameter_count() + parsed_function_.num_stack_locals(); return num_direct_parameters_ + parsed_function_.num_stack_locals();
} }
// The number of variables (or boxes) inside the functions frame - meaning
// below the frame pointer. This does not include the expression stack.
intptr_t num_stack_locals() const { intptr_t num_stack_locals() const {
return parsed_function_.num_stack_locals(); return parsed_function_.num_stack_locals();
} }
intptr_t num_copied_params() const { return num_copied_params_; }
intptr_t num_non_copied_params() const { return num_non_copied_params_; }
bool IsIrregexpFunction() const { return function().IsIrregexpFunction(); } bool IsIrregexpFunction() const { return function().IsIrregexpFunction(); }
LocalVariable* CurrentContextVar() const { LocalVariable* CurrentContextVar() const {
@ -122,7 +131,16 @@ class FlowGraph : public ZoneAllocated {
intptr_t CurrentContextEnvIndex() const { intptr_t CurrentContextEnvIndex() const {
return parsed_function().current_context_var()->BitIndexIn( return parsed_function().current_context_var()->BitIndexIn(
num_non_copied_params_); num_direct_parameters_);
}
intptr_t RawTypeArgumentEnvIndex() const {
return parsed_function().RawTypeArgumentsVariable()->BitIndexIn(
num_direct_parameters_);
}
intptr_t ArgumentDescriptorEnvIndex() const {
return parsed_function().arg_desc_var()->BitIndexIn(num_direct_parameters_);
} }
// Flow graph orders. // Flow graph orders.
@ -181,10 +199,6 @@ class FlowGraph : public ZoneAllocated {
ConstantInstr* constant_dead() const { return constant_dead_; } ConstantInstr* constant_dead() const { return constant_dead_; }
ConstantInstr* constant_empty_context() const {
return constant_empty_context_;
}
intptr_t alloc_ssa_temp_index() { return current_ssa_temp_index_++; } intptr_t alloc_ssa_temp_index() { return current_ssa_temp_index_++; }
void AllocateSSAIndexes(Definition* def) { void AllocateSSAIndexes(Definition* def) {
@ -410,8 +424,7 @@ class FlowGraph : public ZoneAllocated {
// Flow graph fields. // Flow graph fields.
const ParsedFunction& parsed_function_; const ParsedFunction& parsed_function_;
const intptr_t num_copied_params_; intptr_t num_direct_parameters_;
const intptr_t num_non_copied_params_;
GraphEntryInstr* graph_entry_; GraphEntryInstr* graph_entry_;
GrowableArray<BlockEntryInstr*> preorder_; GrowableArray<BlockEntryInstr*> preorder_;
GrowableArray<BlockEntryInstr*> postorder_; GrowableArray<BlockEntryInstr*> postorder_;
@ -419,7 +432,6 @@ class FlowGraph : public ZoneAllocated {
GrowableArray<BlockEntryInstr*> optimized_block_order_; GrowableArray<BlockEntryInstr*> optimized_block_order_;
ConstantInstr* constant_null_; ConstantInstr* constant_null_;
ConstantInstr* constant_dead_; ConstantInstr* constant_dead_;
ConstantInstr* constant_empty_context_;
bool licm_allowed_; bool licm_allowed_;

View file

@ -212,6 +212,7 @@ void FlowGraphCompiler::InitCompiler() {
} }
} }
} }
if (!is_optimizing()) { if (!is_optimizing()) {
// Initialize edge counter array. // Initialize edge counter array.
const intptr_t num_counters = flow_graph_.preorder().length(); const intptr_t num_counters = flow_graph_.preorder().length();
@ -337,13 +338,14 @@ void FlowGraphCompiler::EmitCatchEntryState(Environment* env,
catch_entry_state_maps_builder_->NewMapping(assembler()->CodeSize()); catch_entry_state_maps_builder_->NewMapping(assembler()->CodeSize());
// Parameters first. // Parameters first.
intptr_t i = 0; intptr_t i = 0;
const intptr_t num_non_copied_params = flow_graph().num_non_copied_params();
for (; i < num_non_copied_params; ++i) { const intptr_t num_direct_parameters = flow_graph().num_direct_parameters();
for (; i < num_direct_parameters; ++i) {
// Don't sync captured parameters. They are not in the environment. // Don't sync captured parameters. They are not in the environment.
if (flow_graph().captured_parameters()->Contains(i)) continue; if (flow_graph().captured_parameters()->Contains(i)) continue;
if ((*idefs)[i]->IsConstant()) continue; // Common constants. if ((*idefs)[i]->IsConstant()) continue; // Common constants.
Location src = env->LocationAt(i); Location src = env->LocationAt(i);
intptr_t dest_index = i - num_non_copied_params; intptr_t dest_index = i - num_direct_parameters;
if (!src.IsStackSlot()) { if (!src.IsStackSlot()) {
ASSERT(src.IsConstant()); ASSERT(src.IsConstant());
// Skip dead locations. // Skip dead locations.
@ -362,7 +364,7 @@ void FlowGraphCompiler::EmitCatchEntryState(Environment* env,
} }
// Process locals. Skip exception_var and stacktrace_var. // Process locals. Skip exception_var and stacktrace_var.
intptr_t local_base = kFirstLocalSlotFromFp + num_non_copied_params; intptr_t local_base = kFirstLocalSlotFromFp + num_direct_parameters;
intptr_t ex_idx = local_base - catch_block->exception_var().index(); intptr_t ex_idx = local_base - catch_block->exception_var().index();
intptr_t st_idx = local_base - catch_block->stacktrace_var().index(); intptr_t st_idx = local_base - catch_block->stacktrace_var().index();
for (; i < flow_graph().variable_count(); ++i) { for (; i < flow_graph().variable_count(); ++i) {
@ -372,7 +374,7 @@ void FlowGraphCompiler::EmitCatchEntryState(Environment* env,
if ((*idefs)[i]->IsConstant()) continue; // Common constants. if ((*idefs)[i]->IsConstant()) continue; // Common constants.
Location src = env->LocationAt(i); Location src = env->LocationAt(i);
if (src.IsInvalid()) continue; if (src.IsInvalid()) continue;
intptr_t dest_index = i - num_non_copied_params; intptr_t dest_index = i - num_direct_parameters;
if (!src.IsStackSlot()) { if (!src.IsStackSlot()) {
ASSERT(src.IsConstant()); ASSERT(src.IsConstant());
// Skip dead locations. // Skip dead locations.
@ -542,8 +544,7 @@ intptr_t FlowGraphCompiler::StackSize() const {
if (is_optimizing_) { if (is_optimizing_) {
return flow_graph_.graph_entry()->spill_slot_count(); return flow_graph_.graph_entry()->spill_slot_count();
} else { } else {
return parsed_function_.num_stack_locals() + return parsed_function_.num_stack_locals();
parsed_function_.num_copied_params();
} }
} }
@ -1269,20 +1270,24 @@ void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) {
Register reg = kNoRegister; Register reg = kNoRegister;
if (loc.IsRegister()) { if (loc.IsRegister()) {
reg = loc.reg(); reg = loc.reg();
} else if (loc.IsUnallocated() || loc.IsConstant()) { } else if (loc.IsUnallocated()) {
ASSERT(loc.IsConstant() || ASSERT((loc.policy() == Location::kRequiresRegister) ||
((loc.policy() == Location::kRequiresRegister) || (loc.policy() == Location::kWritableRegister) ||
(loc.policy() == Location::kWritableRegister) || (loc.policy() == Location::kPrefersRegister) ||
(loc.policy() == Location::kAny))); (loc.policy() == Location::kAny));
reg = AllocateFreeRegister(blocked_registers); reg = AllocateFreeRegister(blocked_registers);
locs->set_in(i, Location::RegisterLocation(reg)); locs->set_in(i, Location::RegisterLocation(reg));
} }
ASSERT(reg != kNoRegister); ASSERT(reg != kNoRegister || loc.IsConstant());
// Inputs are consumed from the simulated frame. In case of a call argument // Inputs are consumed from the simulated frame. In case of a call argument
// we leave it until the call instruction. // we leave it until the call instruction.
if (should_pop) { if (should_pop) {
assembler()->PopRegister(reg); if (loc.IsConstant()) {
assembler()->Drop(1);
} else {
assembler()->PopRegister(reg);
}
} }
} }

View file

@ -717,9 +717,6 @@ class FlowGraphCompiler : public ValueObject {
Label* is_not_instance_lbl); Label* is_not_instance_lbl);
void GenerateBoolToJump(Register bool_reg, Label* is_true, Label* is_false); void GenerateBoolToJump(Register bool_reg, Label* is_true, Label* is_false);
void CheckTypeArgsLen(bool expect_type_args, Label* wrong_num_arguments);
void CopyParameters(bool expect_type_args, bool check_arguments);
#endif // !defined(TARGET_ARCH_DBC) #endif // !defined(TARGET_ARCH_DBC)
void GenerateInlinedGetter(intptr_t offset); void GenerateInlinedGetter(intptr_t offset);

View file

@ -701,253 +701,6 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
} }
} }
// Input parameters:
// R4: arguments descriptor array.
void FlowGraphCompiler::CheckTypeArgsLen(bool expect_type_args,
Label* wrong_num_arguments) {
__ Comment("Check type args len");
const Function& function = parsed_function().function();
Label correct_type_args_len;
// Type args are always optional, so length can always be zero.
// If expect_type_args, a non-zero length must match the declaration length.
__ ldr(R6, FieldAddress(R4, ArgumentsDescriptor::type_args_len_offset()));
if (isolate()->strong()) {
__ AndImmediate(
R6, R6,
Smi::RawValue(ArgumentsDescriptor::TypeArgsLenField::mask_in_place()));
}
__ CompareImmediate(R6, Smi::RawValue(0));
if (expect_type_args) {
__ CompareImmediate(R6, Smi::RawValue(function.NumTypeParameters()), NE);
}
__ b(wrong_num_arguments, NE);
__ Bind(&correct_type_args_len);
}
// Input parameters:
// R4: arguments descriptor array.
void FlowGraphCompiler::CopyParameters(bool expect_type_args,
bool check_arguments) {
Label wrong_num_arguments;
if (check_arguments) {
CheckTypeArgsLen(expect_type_args, &wrong_num_arguments);
}
__ Comment("Copy parameters");
const Function& function = parsed_function().function();
LocalScope* scope = parsed_function().node_sequence()->scope();
const int num_fixed_params = function.num_fixed_parameters();
const int num_opt_pos_params = function.NumOptionalPositionalParameters();
const int num_opt_named_params = function.NumOptionalNamedParameters();
const int num_params =
num_fixed_params + num_opt_pos_params + num_opt_named_params;
ASSERT(function.NumParameters() == num_params);
ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp);
// Check that min_num_pos_args <= num_pos_args <= max_num_pos_args,
// where num_pos_args is the number of positional arguments passed in.
const int min_num_pos_args = num_fixed_params;
const int max_num_pos_args = num_fixed_params + num_opt_pos_params;
__ ldr(R6, FieldAddress(R4, ArgumentsDescriptor::positional_count_offset()));
if (isolate()->strong()) {
__ AndImmediate(
R6, R6,
Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place()));
}
// Check that min_num_pos_args <= num_pos_args.
__ CompareImmediate(R6, Smi::RawValue(min_num_pos_args));
__ b(&wrong_num_arguments, LT);
// Check that num_pos_args <= max_num_pos_args.
__ CompareImmediate(R6, Smi::RawValue(max_num_pos_args));
__ b(&wrong_num_arguments, GT);
// Copy positional arguments.
// Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied
// to fp[kFirstLocalSlotFromFp - i].
__ ldr(NOTFP, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
// Since NOTFP and R6 are Smi, use LSL 1 instead of LSL 2.
// Let NOTFP point to the last passed positional argument, i.e. to
// fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)].
__ sub(NOTFP, NOTFP, Operand(R6));
__ add(NOTFP, FP, Operand(NOTFP, LSL, 1));
__ add(NOTFP, NOTFP, Operand((kParamEndSlotFromFp + 1) * kWordSize));
// Let R8 point to the last copied positional argument, i.e. to
// fp[kFirstLocalSlotFromFp - (num_pos_args - 1)].
__ AddImmediate(R8, FP, (kFirstLocalSlotFromFp + 1) * kWordSize);
__ sub(R8, R8, Operand(R6, LSL, 1)); // R6 is a Smi.
__ SmiUntag(R6);
Label loop, loop_condition;
__ b(&loop_condition);
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const Address argument_addr(NOTFP, R6, LSL, 2);
const Address copy_addr(R8, R6, LSL, 2);
__ Bind(&loop);
__ ldr(IP, argument_addr);
__ str(IP, copy_addr);
__ Bind(&loop_condition);
__ subs(R6, R6, Operand(1));
__ b(&loop, PL);
// Copy or initialize optional named arguments.
Label all_arguments_processed;
#ifdef DEBUG
const bool check_correct_named_args = true;
#else
const bool check_correct_named_args = check_arguments;
#endif
if (num_opt_named_params > 0) {
// Start by alphabetically sorting the names of the optional parameters.
LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
int* opt_param_position = new int[num_opt_named_params];
for (int pos = num_fixed_params; pos < num_params; pos++) {
LocalVariable* parameter = scope->VariableAt(pos);
const String& opt_param_name = parameter->name();
int i = pos - num_fixed_params;
while (--i >= 0) {
LocalVariable* param_i = opt_param[i];
const intptr_t result = opt_param_name.CompareTo(param_i->name());
ASSERT(result != 0);
if (result > 0) break;
opt_param[i + 1] = opt_param[i];
opt_param_position[i + 1] = opt_param_position[i];
}
opt_param[i + 1] = parameter;
opt_param_position[i + 1] = pos;
}
// Generate code handling each optional parameter in alphabetical order.
__ ldr(NOTFP, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
// Let NOTFP point to the first passed argument, i.e. to
// fp[kParamEndSlotFromFp + num_args - 0]; num_args (NOTFP) is Smi.
__ add(NOTFP, FP, Operand(NOTFP, LSL, 1));
__ AddImmediate(NOTFP, NOTFP, kParamEndSlotFromFp * kWordSize);
// Let R8 point to the entry of the first named argument.
__ add(R8, R4,
Operand(ArgumentsDescriptor::first_named_entry_offset() -
kHeapObjectTag));
for (int i = 0; i < num_opt_named_params; i++) {
Label load_default_value, assign_optional_parameter;
const int param_pos = opt_param_position[i];
// Check if this named parameter was passed in.
// Load R9 with the name of the argument.
__ ldr(R9, Address(R8, ArgumentsDescriptor::name_offset()));
ASSERT(opt_param[i]->name().IsSymbol());
__ CompareObject(R9, opt_param[i]->name());
__ b(&load_default_value, NE);
// Load R9 with passed-in argument at provided arg_pos, i.e. at
// fp[kParamEndSlotFromFp + num_args - arg_pos].
__ ldr(R9, Address(R8, ArgumentsDescriptor::position_offset()));
if (isolate()->strong()) {
__ AndImmediate(
R9, R9,
Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place()));
}
// R9 is arg_pos as Smi.
// Point to next named entry.
__ add(R8, R8, Operand(ArgumentsDescriptor::named_entry_size()));
__ rsb(R9, R9, Operand(0));
Address argument_addr(NOTFP, R9, LSL, 1); // R9 is a negative Smi.
__ ldr(R9, argument_addr);
__ b(&assign_optional_parameter);
__ Bind(&load_default_value);
// Load R9 with default argument.
const Instance& value = parsed_function().DefaultParameterValueAt(
param_pos - num_fixed_params);
__ LoadObject(R9, value);
__ Bind(&assign_optional_parameter);
// Assign R9 to fp[kFirstLocalSlotFromFp - param_pos].
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
const Address param_addr(FP, computed_param_pos * kWordSize);
__ str(R9, param_addr);
}
delete[] opt_param;
delete[] opt_param_position;
if (check_correct_named_args) {
// Check that R8 now points to the null terminator in the arguments
// descriptor.
__ ldr(R9, Address(R8, 0));
__ CompareObject(R9, Object::null_object());
__ b(&all_arguments_processed, EQ);
}
} else {
ASSERT(num_opt_pos_params > 0);
__ ldr(R6,
FieldAddress(R4, ArgumentsDescriptor::positional_count_offset()));
__ SmiUntag(R6);
if (isolate()->strong()) {
__ AndImmediate(
R6, R6, ArgumentsDescriptor::PositionalCountField::mask_in_place());
}
for (int i = 0; i < num_opt_pos_params; i++) {
Label next_parameter;
// Handle this optional positional parameter only if k or fewer positional
// arguments have been passed, where k is param_pos, the position of this
// optional parameter in the formal parameter list.
const int param_pos = num_fixed_params + i;
__ CompareImmediate(R6, param_pos);
__ b(&next_parameter, GT);
// Load R9 with default argument.
const Object& value = parsed_function().DefaultParameterValueAt(i);
__ LoadObject(R9, value);
// Assign R9 to fp[kFirstLocalSlotFromFp - param_pos].
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
const Address param_addr(FP, computed_param_pos * kWordSize);
__ str(R9, param_addr);
__ Bind(&next_parameter);
}
if (check_correct_named_args) {
__ ldr(NOTFP, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
__ SmiUntag(NOTFP);
// Check that R6 equals NOTFP, i.e. no named arguments passed.
__ cmp(R6, Operand(NOTFP));
__ b(&all_arguments_processed, EQ);
}
}
__ Bind(&wrong_num_arguments);
if (check_arguments) {
__ LeaveDartFrame(kKeepCalleePP); // The arguments are still on the stack.
__ Branch(*StubCode::CallClosureNoSuchMethod_entry());
// The noSuchMethod call may return to the caller, but not here.
} else if (check_correct_named_args) {
__ Stop("Wrong arguments");
}
__ Bind(&all_arguments_processed);
// Nullify originally passed arguments only after they have been copied and
// checked, otherwise noSuchMethod would not see their original values.
// This step can be skipped in case we decide that formal parameters are
// implicitly final, since garbage collecting the unmodified value is not
// an issue anymore.
// R4 : arguments descriptor array.
__ ldr(R6, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
__ SmiUntag(R6);
__ add(NOTFP, FP, Operand((kParamEndSlotFromFp + 1) * kWordSize));
const Address original_argument_addr(NOTFP, R6, LSL, 2);
__ LoadObject(IP, Object::null_object());
Label null_args_loop, null_args_loop_condition;
__ b(&null_args_loop_condition);
__ Bind(&null_args_loop);
__ str(IP, original_argument_addr);
__ Bind(&null_args_loop_condition);
__ subs(R6, R6, Operand(1));
__ b(&null_args_loop, PL);
}
void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
// LR: return address. // LR: return address.
// SP: receiver. // SP: receiver.
@ -998,8 +751,7 @@ void FlowGraphCompiler::EmitFrameEntry() {
} }
__ Comment("Enter frame"); __ Comment("Enter frame");
if (flow_graph().IsCompiledForOsr()) { if (flow_graph().IsCompiledForOsr()) {
intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() - intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals();
flow_graph().num_copied_params();
ASSERT(extra_slots >= 0); ASSERT(extra_slots >= 0);
__ EnterOsrFrame(extra_slots * kWordSize); __ EnterOsrFrame(extra_slots * kWordSize);
} else { } else {
@ -1017,9 +769,8 @@ void FlowGraphCompiler::EmitFrameEntry() {
// R4: arguments descriptor array. // R4: arguments descriptor array.
void FlowGraphCompiler::CompileGraph() { void FlowGraphCompiler::CompileGraph() {
InitCompiler(); InitCompiler();
const Function& function = parsed_function().function();
#ifdef DART_PRECOMPILER #ifdef DART_PRECOMPILER
const Function& function = parsed_function().function();
if (function.IsDynamicFunction()) { if (function.IsDynamicFunction()) {
__ MonomorphicCheckedEntry(); __ MonomorphicCheckedEntry();
} }
@ -1033,117 +784,27 @@ void FlowGraphCompiler::CompileGraph() {
EmitFrameEntry(); EmitFrameEntry();
ASSERT(assembler()->constant_pool_allowed()); ASSERT(assembler()->constant_pool_allowed());
const int num_fixed_params = function.num_fixed_parameters(); // In unoptimized code, initialize (non-argument) stack allocated slots.
const int num_copied_params = parsed_function().num_copied_params();
const int num_locals = parsed_function().num_stack_locals();
// The prolog of OSR functions is never executed, hence greatly simplified.
const bool expect_type_args = isolate()->reify_generic_functions() &&
function.IsGeneric() &&
!flow_graph().IsCompiledForOsr();
const bool check_arguments =
(function.IsClosureFunction() || function.IsConvertedClosureFunction()) &&
!flow_graph().IsCompiledForOsr();
// We check the number of passed arguments when we have to copy them due to
// the presence of optional parameters.
// No such checking code is generated if only fixed parameters are declared,
// unless we are in debug mode or unless we are compiling a closure.
if (num_copied_params == 0) {
if (check_arguments) {
Label correct_num_arguments, wrong_num_arguments;
CheckTypeArgsLen(expect_type_args, &wrong_num_arguments);
__ Comment("Check argument count");
// Check that exactly num_fixed arguments are passed in.
__ ldr(R0, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
__ CompareImmediate(R0, Smi::RawValue(num_fixed_params));
__ b(&wrong_num_arguments, NE);
__ ldr(R1,
FieldAddress(R4, ArgumentsDescriptor::positional_count_offset()));
if (isolate()->strong()) {
__ AndImmediate(
R1, R1,
Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place()));
}
__ cmp(R0, Operand(R1));
__ b(&correct_num_arguments, EQ);
__ Bind(&wrong_num_arguments);
ASSERT(assembler()->constant_pool_allowed());
__ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack.
__ Branch(*StubCode::CallClosureNoSuchMethod_entry());
// The noSuchMethod call may return to the caller, but not here.
__ Bind(&correct_num_arguments);
}
} else if (!flow_graph().IsCompiledForOsr()) {
CopyParameters(expect_type_args, check_arguments);
}
if (function.IsClosureFunction() && !flow_graph().IsCompiledForOsr()) {
// Load context from the closure object (first argument).
LocalScope* scope = parsed_function().node_sequence()->scope();
LocalVariable* closure_parameter = scope->VariableAt(0);
__ ldr(CTX, Address(FP, closure_parameter->index() * kWordSize));
__ ldr(CTX, FieldAddress(CTX, Closure::context_offset()));
}
// In unoptimized code, initialize (non-argument) stack allocated slots to
// null.
if (!is_optimizing()) { if (!is_optimizing()) {
ASSERT(num_locals > 0); // There is always at least context_var. const int num_locals = parsed_function().num_stack_locals();
intptr_t args_desc_index = -1;
if (parsed_function().has_arg_desc_var()) {
args_desc_index =
-(parsed_function().arg_desc_var()->index() - kFirstLocalSlotFromFp);
}
__ Comment("Initialize spill slots"); __ Comment("Initialize spill slots");
const intptr_t slot_base = parsed_function().first_stack_local_index(); if (num_locals > 1 || (num_locals == 1 && args_desc_index == -1)) {
const intptr_t context_index =
parsed_function().current_context_var()->index();
if (num_locals > 1) {
__ LoadObject(R0, Object::null_object()); __ LoadObject(R0, Object::null_object());
} }
for (intptr_t i = 0; i < num_locals; ++i) { for (intptr_t i = 0; i < num_locals; ++i) {
// Subtract index i (locals lie at lower addresses than FP). Register value_reg = i == args_desc_index ? ARGS_DESC_REG : R0;
if (((slot_base - i) == context_index)) { __ StoreToOffset(kWord, value_reg, FP,
if (function.IsClosureFunction()) { (kFirstLocalSlotFromFp - i) * kWordSize);
__ StoreToOffset(kWord, CTX, FP, (slot_base - i) * kWordSize);
} else {
__ LoadObject(R1, Object::empty_context());
__ StoreToOffset(kWord, R1, FP, (slot_base - i) * kWordSize);
}
} else {
ASSERT(num_locals > 1);
__ StoreToOffset(kWord, R0, FP, (slot_base - i) * kWordSize);
}
} }
} }
// Copy passed-in type argument vector if the function is generic.
if (expect_type_args) {
__ Comment("Copy passed-in type args");
Label store_type_args, ok;
__ ldr(R0, FieldAddress(R4, ArgumentsDescriptor::type_args_len_offset()));
__ CompareImmediate(R0, Smi::RawValue(0));
if (is_optimizing()) {
// Initialize type_args to null if none passed in.
__ LoadObject(R0, Object::null_object(), EQ);
__ b(&store_type_args, EQ);
} else {
__ b(&ok, EQ); // Already initialized to null.
}
// Load the passed type args vector in R0 from
// fp[kParamEndSlotFromFp + num_args + 1]; num_args (R1) is Smi.
__ ldr(R1, FieldAddress(R4, ArgumentsDescriptor::count_offset()));
__ add(R1, FP, Operand(R1, LSL, 1));
__ ldr(R0, Address(R1, (kParamEndSlotFromFp + 1) * kWordSize));
// Store R0 into the stack slot reserved for the function type arguments.
// If the function type arguments variable is captured, a copy will happen
// after the context is allocated.
const intptr_t slot_base = parsed_function().first_stack_local_index();
ASSERT(parsed_function().function_type_arguments()->is_captured() ||
parsed_function().function_type_arguments()->index() == slot_base);
__ Bind(&store_type_args);
__ str(R0, Address(FP, slot_base * kWordSize));
__ Bind(&ok);
}
EndCodeSourceRange(TokenPosition::kDartCodePrologue); EndCodeSourceRange(TokenPosition::kDartCodePrologue);
VisitBlocks(); VisitBlocks();

View file

@ -678,254 +678,6 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
} }
} }
// Input parameters:
// R4: arguments descriptor array.
void FlowGraphCompiler::CheckTypeArgsLen(bool expect_type_args,
Label* wrong_num_arguments) {
__ Comment("Check type args len");
const Function& function = parsed_function().function();
Label correct_type_args_len;
// Type args are always optional, so length can always be zero.
// If expect_type_args, a non-zero length must match the declaration length.
__ LoadFieldFromOffset(R8, R4, ArgumentsDescriptor::type_args_len_offset());
if (isolate()->strong()) {
__ AndImmediate(
R8, R8,
Smi::RawValue(ArgumentsDescriptor::TypeArgsLenField::mask_in_place()));
}
__ CompareImmediate(R8, Smi::RawValue(0));
if (expect_type_args) {
__ b(&correct_type_args_len, EQ);
__ CompareImmediate(R8, Smi::RawValue(function.NumTypeParameters()));
}
__ b(wrong_num_arguments, NE);
__ Bind(&correct_type_args_len);
}
// Input parameters:
// R4: arguments descriptor array.
void FlowGraphCompiler::CopyParameters(bool expect_type_args,
bool check_arguments) {
Label wrong_num_arguments;
if (check_arguments) {
CheckTypeArgsLen(expect_type_args, &wrong_num_arguments);
}
__ Comment("Copy parameters");
const Function& function = parsed_function().function();
LocalScope* scope = parsed_function().node_sequence()->scope();
const int num_fixed_params = function.num_fixed_parameters();
const int num_opt_pos_params = function.NumOptionalPositionalParameters();
const int num_opt_named_params = function.NumOptionalNamedParameters();
const int num_params =
num_fixed_params + num_opt_pos_params + num_opt_named_params;
ASSERT(function.NumParameters() == num_params);
ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp);
// Check that min_num_pos_args <= num_pos_args <= max_num_pos_args,
// where num_pos_args is the number of positional arguments passed in.
const int min_num_pos_args = num_fixed_params;
const int max_num_pos_args = num_fixed_params + num_opt_pos_params;
__ LoadFieldFromOffset(R8, R4,
ArgumentsDescriptor::positional_count_offset());
if (isolate()->strong()) {
__ AndImmediate(
R8, R8,
Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place()));
}
// Check that min_num_pos_args <= num_pos_args.
__ CompareImmediate(R8, Smi::RawValue(min_num_pos_args));
__ b(&wrong_num_arguments, LT);
// Check that num_pos_args <= max_num_pos_args.
__ CompareImmediate(R8, Smi::RawValue(max_num_pos_args));
__ b(&wrong_num_arguments, GT);
// Copy positional arguments.
// Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied
// to fp[kFirstLocalSlotFromFp - i].
__ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset());
// Since R7 and R8 are Smi, use LSL 2 instead of LSL 3.
// Let R7 point to the last passed positional argument, i.e. to
// fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)].
__ sub(R7, R7, Operand(R8));
__ add(R7, FP, Operand(R7, LSL, 2));
__ add(R7, R7, Operand((kParamEndSlotFromFp + 1) * kWordSize));
// Let R6 point to the last copied positional argument, i.e. to
// fp[kFirstLocalSlotFromFp - (num_pos_args - 1)].
__ AddImmediate(R6, FP, (kFirstLocalSlotFromFp + 1) * kWordSize);
__ sub(R6, R6, Operand(R8, LSL, 2)); // R8 is a Smi.
__ SmiUntag(R8);
Label loop, loop_condition;
__ b(&loop_condition);
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const Address argument_addr(R7, R8, UXTX, Address::Scaled);
const Address copy_addr(R6, R8, UXTX, Address::Scaled);
__ Bind(&loop);
__ ldr(TMP, argument_addr);
__ str(TMP, copy_addr);
__ Bind(&loop_condition);
__ subs(R8, R8, Operand(1));
__ b(&loop, PL);
// Copy or initialize optional named arguments.
Label all_arguments_processed;
#ifdef DEBUG
const bool check_correct_named_args = true;
#else
const bool check_correct_named_args = check_arguments;
#endif
if (num_opt_named_params > 0) {
// Start by alphabetically sorting the names of the optional parameters.
LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
int* opt_param_position = new int[num_opt_named_params];
for (int pos = num_fixed_params; pos < num_params; pos++) {
LocalVariable* parameter = scope->VariableAt(pos);
const String& opt_param_name = parameter->name();
int i = pos - num_fixed_params;
while (--i >= 0) {
LocalVariable* param_i = opt_param[i];
const intptr_t result = opt_param_name.CompareTo(param_i->name());
ASSERT(result != 0);
if (result > 0) break;
opt_param[i + 1] = opt_param[i];
opt_param_position[i + 1] = opt_param_position[i];
}
opt_param[i + 1] = parameter;
opt_param_position[i + 1] = pos;
}
// Generate code handling each optional parameter in alphabetical order.
__ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset());
// Let R7 point to the first passed argument, i.e. to
// fp[kParamEndSlotFromFp + num_args - 0]; num_args (R7) is Smi.
__ add(R7, FP, Operand(R7, LSL, 2));
__ AddImmediate(R7, kParamEndSlotFromFp * kWordSize);
// Let R6 point to the entry of the first named argument.
__ add(R6, R4,
Operand(ArgumentsDescriptor::first_named_entry_offset() -
kHeapObjectTag));
for (int i = 0; i < num_opt_named_params; i++) {
Label load_default_value, assign_optional_parameter;
const int param_pos = opt_param_position[i];
// Check if this named parameter was passed in.
// Load R5 with the name of the argument.
__ LoadFromOffset(R5, R6, ArgumentsDescriptor::name_offset());
ASSERT(opt_param[i]->name().IsSymbol());
__ CompareObject(R5, opt_param[i]->name());
__ b(&load_default_value, NE);
// Load R5 with passed-in argument at provided arg_pos, i.e. at
// fp[kParamEndSlotFromFp + num_args - arg_pos].
__ LoadFromOffset(R5, R6, ArgumentsDescriptor::position_offset());
if (isolate()->strong()) {
__ AndImmediate(
R5, R5,
Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place()));
}
// R5 is arg_pos as Smi.
// Point to next named entry.
__ add(R6, R6, Operand(ArgumentsDescriptor::named_entry_size()));
// Negate and untag R5 so we can use in scaled address mode.
__ subs(R5, ZR, Operand(R5, ASR, 1));
Address argument_addr(R7, R5, UXTX, Address::Scaled); // R5 is untagged.
__ ldr(R5, argument_addr);
__ b(&assign_optional_parameter);
__ Bind(&load_default_value);
// Load R5 with default argument.
const Instance& value = parsed_function().DefaultParameterValueAt(
param_pos - num_fixed_params);
__ LoadObject(R5, value);
__ Bind(&assign_optional_parameter);
// Assign R5 to fp[kFirstLocalSlotFromFp - param_pos].
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
__ StoreToOffset(R5, FP, computed_param_pos * kWordSize);
}
delete[] opt_param;
delete[] opt_param_position;
if (check_correct_named_args) {
// Check that R6 now points to the null terminator in the arguments
// descriptor.
__ ldr(R5, Address(R6));
__ CompareObject(R5, Object::null_object());
__ b(&all_arguments_processed, EQ);
}
} else {
ASSERT(num_opt_pos_params > 0);
__ LoadFieldFromOffset(R8, R4,
ArgumentsDescriptor::positional_count_offset());
__ SmiUntag(R8);
if (isolate()->strong()) {
__ AndImmediate(
R8, R8, ArgumentsDescriptor::PositionalCountField::mask_in_place());
}
for (int i = 0; i < num_opt_pos_params; i++) {
Label next_parameter;
// Handle this optional positional parameter only if k or fewer positional
// arguments have been passed, where k is param_pos, the position of this
// optional parameter in the formal parameter list.
const int param_pos = num_fixed_params + i;
__ CompareImmediate(R8, param_pos);
__ b(&next_parameter, GT);
// Load R5 with default argument.
const Object& value = parsed_function().DefaultParameterValueAt(i);
__ LoadObject(R5, value);
// Assign R5 to fp[kFirstLocalSlotFromFp - param_pos].
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
__ StoreToOffset(R5, FP, computed_param_pos * kWordSize);
__ Bind(&next_parameter);
}
if (check_correct_named_args) {
__ LoadFieldFromOffset(R7, R4, ArgumentsDescriptor::count_offset());
__ SmiUntag(R7);
// Check that R8 equals R7, i.e. no named arguments passed.
__ CompareRegisters(R8, R7);
__ b(&all_arguments_processed, EQ);
}
}
__ Bind(&wrong_num_arguments);
if (check_arguments) {
__ LeaveDartFrame(kKeepCalleePP); // The arguments are still on the stack.
__ BranchPatchable(*StubCode::CallClosureNoSuchMethod_entry());
// The noSuchMethod call may return to the caller, but not here.
} else if (check_correct_named_args) {
__ Stop("Wrong arguments");
}
__ Bind(&all_arguments_processed);
// Nullify originally passed arguments only after they have been copied and
// checked, otherwise noSuchMethod would not see their original values.
// This step can be skipped in case we decide that formal parameters are
// implicitly final, since garbage collecting the unmodified value is not
// an issue anymore.
// R4 : arguments descriptor array.
__ LoadFieldFromOffset(R8, R4, ArgumentsDescriptor::count_offset());
__ SmiUntag(R8);
__ add(R7, FP, Operand((kParamEndSlotFromFp + 1) * kWordSize));
const Address original_argument_addr(R7, R8, UXTX, Address::Scaled);
__ LoadObject(TMP, Object::null_object());
Label null_args_loop, null_args_loop_condition;
__ b(&null_args_loop_condition);
__ Bind(&null_args_loop);
__ str(TMP, original_argument_addr);
__ Bind(&null_args_loop_condition);
__ subs(R8, R8, Operand(1));
__ b(&null_args_loop, PL);
}
void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
// LR: return address. // LR: return address.
// SP: receiver. // SP: receiver.
@ -982,8 +734,7 @@ void FlowGraphCompiler::EmitFrameEntry() {
} }
__ Comment("Enter frame"); __ Comment("Enter frame");
if (flow_graph().IsCompiledForOsr()) { if (flow_graph().IsCompiledForOsr()) {
intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() - intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals();
flow_graph().num_copied_params();
ASSERT(extra_slots >= 0); ASSERT(extra_slots >= 0);
__ EnterOsrFrame(extra_slots * kWordSize, new_pp); __ EnterOsrFrame(extra_slots * kWordSize, new_pp);
} else { } else {
@ -1001,9 +752,8 @@ void FlowGraphCompiler::EmitFrameEntry() {
// R4: arguments descriptor array. // R4: arguments descriptor array.
void FlowGraphCompiler::CompileGraph() { void FlowGraphCompiler::CompileGraph() {
InitCompiler(); InitCompiler();
const Function& function = parsed_function().function();
#ifdef DART_PRECOMPILER #ifdef DART_PRECOMPILER
const Function& function = parsed_function().function();
if (function.IsDynamicFunction()) { if (function.IsDynamicFunction()) {
__ MonomorphicCheckedEntry(); __ MonomorphicCheckedEntry();
} }
@ -1017,116 +767,26 @@ void FlowGraphCompiler::CompileGraph() {
EmitFrameEntry(); EmitFrameEntry();
ASSERT(assembler()->constant_pool_allowed()); ASSERT(assembler()->constant_pool_allowed());
const int num_fixed_params = function.num_fixed_parameters(); // In unoptimized code, initialize (non-argument) stack allocated slots.
const int num_copied_params = parsed_function().num_copied_params();
const int num_locals = parsed_function().num_stack_locals();
// The prolog of OSR functions is never executed, hence greatly simplified.
const bool expect_type_args = isolate()->reify_generic_functions() &&
function.IsGeneric() &&
!flow_graph().IsCompiledForOsr();
const bool check_arguments =
(function.IsClosureFunction() || function.IsConvertedClosureFunction()) &&
!flow_graph().IsCompiledForOsr();
// We check the number of passed arguments when we have to copy them due to
// the presence of optional parameters.
// No such checking code is generated if only fixed parameters are declared,
// unless we are in debug mode or unless we are compiling a closure.
if (num_copied_params == 0) {
if (check_arguments) {
Label correct_num_arguments, wrong_num_arguments;
CheckTypeArgsLen(expect_type_args, &wrong_num_arguments);
__ Comment("Check argument count");
// Check that exactly num_fixed arguments are passed in.
__ LoadFieldFromOffset(R0, R4, ArgumentsDescriptor::count_offset());
__ CompareImmediate(R0, Smi::RawValue(num_fixed_params));
__ b(&wrong_num_arguments, NE);
__ LoadFieldFromOffset(R1, R4,
ArgumentsDescriptor::positional_count_offset());
if (isolate()->strong()) {
__ AndImmediate(
R1, R1,
Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place()));
}
__ CompareRegisters(R0, R1);
__ b(&correct_num_arguments, EQ);
__ Bind(&wrong_num_arguments);
__ LeaveDartFrame(kKeepCalleePP); // Arguments are still on the stack.
__ BranchPatchable(*StubCode::CallClosureNoSuchMethod_entry());
// The noSuchMethod call may return to the caller, but not here.
__ Bind(&correct_num_arguments);
}
} else if (!flow_graph().IsCompiledForOsr()) {
CopyParameters(expect_type_args, check_arguments);
}
if (function.IsClosureFunction() && !flow_graph().IsCompiledForOsr()) {
// Load context from the closure object (first argument).
LocalScope* scope = parsed_function().node_sequence()->scope();
LocalVariable* closure_parameter = scope->VariableAt(0);
__ ldr(CTX, Address(FP, closure_parameter->index() * kWordSize));
__ ldr(CTX, FieldAddress(CTX, Closure::context_offset()));
}
// In unoptimized code, initialize (non-argument) stack allocated slots to
// null.
if (!is_optimizing()) { if (!is_optimizing()) {
ASSERT(num_locals > 0); // There is always at least context_var. const int num_locals = parsed_function().num_stack_locals();
intptr_t args_desc_index = -1;
if (parsed_function().has_arg_desc_var()) {
args_desc_index =
-(parsed_function().arg_desc_var()->index() - kFirstLocalSlotFromFp);
}
__ Comment("Initialize spill slots"); __ Comment("Initialize spill slots");
const intptr_t slot_base = parsed_function().first_stack_local_index(); if (num_locals > 1 || (num_locals == 1 && args_desc_index == -1)) {
const intptr_t context_index =
parsed_function().current_context_var()->index();
if (num_locals > 1) {
__ LoadObject(R0, Object::null_object()); __ LoadObject(R0, Object::null_object());
} }
for (intptr_t i = 0; i < num_locals; ++i) { for (intptr_t i = 0; i < num_locals; ++i) {
// Subtract index i (locals lie at lower addresses than FP). Register value_reg = i == args_desc_index ? ARGS_DESC_REG : R0;
if (((slot_base - i) == context_index)) { __ StoreToOffset(value_reg, FP, (kFirstLocalSlotFromFp - i) * kWordSize);
if (function.IsClosureFunction()) {
__ StoreToOffset(CTX, FP, (slot_base - i) * kWordSize);
} else {
__ LoadObject(R1, Object::empty_context());
__ StoreToOffset(R1, FP, (slot_base - i) * kWordSize);
}
} else {
ASSERT(num_locals > 1);
__ StoreToOffset(R0, FP, (slot_base - i) * kWordSize);
}
} }
} }
// Copy passed-in type argument vector if the function is generic.
if (expect_type_args) {
__ Comment("Copy passed-in type args");
Label store_type_args, ok;
__ LoadFieldFromOffset(R0, R4, ArgumentsDescriptor::type_args_len_offset());
__ CompareImmediate(R0, Smi::RawValue(0));
if (is_optimizing()) {
// Initialize type_args to null if none passed in.
__ LoadObject(R0, Object::null_object());
__ b(&store_type_args, EQ);
} else {
__ b(&ok, EQ); // Already initialized to null.
}
// Load the passed type args vector in R0 from
// fp[kParamEndSlotFromFp + num_args + 1]; num_args (R1) is Smi.
__ LoadFieldFromOffset(R1, R4, ArgumentsDescriptor::count_offset());
__ add(R1, FP, Operand(R1, LSL, 2));
__ LoadFromOffset(R0, R1, (kParamEndSlotFromFp + 1) * kWordSize);
// Store R0 into the stack slot reserved for the function type arguments.
// If the function type arguments variable is captured, a copy will happen
// after the context is allocated.
const intptr_t slot_base = parsed_function().first_stack_local_index();
ASSERT(parsed_function().function_type_arguments()->is_captured() ||
parsed_function().function_type_arguments()->index() == slot_base);
__ Bind(&store_type_args);
__ StoreToOffset(R0, FP, slot_base * kWordSize);
__ Bind(&ok);
}
EndCodeSourceRange(TokenPosition::kDartCodePrologue); EndCodeSourceRange(TokenPosition::kDartCodePrologue);
VisitBlocks(); VisitBlocks();

View file

@ -308,122 +308,29 @@ void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
void FlowGraphCompiler::EmitFrameEntry() { void FlowGraphCompiler::EmitFrameEntry() {
const Function& function = parsed_function().function(); const Function& function = parsed_function().function();
const intptr_t num_fixed_params = function.num_fixed_parameters(); const intptr_t num_fixed_params = function.num_fixed_parameters();
const int num_opt_pos_params = function.NumOptionalPositionalParameters();
const int num_opt_named_params = function.NumOptionalNamedParameters();
const int num_params =
num_fixed_params + num_opt_pos_params + num_opt_named_params;
const bool has_optional_params =
(num_opt_pos_params != 0) || (num_opt_named_params != 0);
const int num_locals = parsed_function().num_stack_locals(); const int num_locals = parsed_function().num_stack_locals();
const intptr_t context_index =
-parsed_function().current_context_var()->index() - 1;
if (CanOptimizeFunction() && function.IsOptimizable() && if (CanOptimizeFunction() && function.IsOptimizable() &&
(!is_optimizing() || may_reoptimize())) { (!is_optimizing() || may_reoptimize())) {
__ HotCheck(!is_optimizing(), GetOptimizationThreshold()); __ HotCheck(!is_optimizing(), GetOptimizationThreshold());
} }
if (has_optional_params) { if (is_optimizing()) {
__ EntryOptional(num_fixed_params, num_opt_pos_params,
num_opt_named_params);
} else if (!is_optimizing()) {
__ Entry(num_fixed_params, num_locals, context_index);
} else {
__ EntryOptimized(num_fixed_params, __ EntryOptimized(num_fixed_params,
flow_graph_.graph_entry()->spill_slot_count()); flow_graph_.graph_entry()->spill_slot_count());
} else {
__ Entry(num_locals);
} }
if (num_opt_named_params != 0) { if (!is_optimizing()) {
LocalScope* scope = parsed_function().node_sequence()->scope(); if (parsed_function().has_arg_desc_var()) {
// TODO(kustermann): If dbc simulator put the args_desc_ into the
// Start by alphabetically sorting the names of the optional parameters. // _special_regs, we could replace these 3 with the MoveSpecial bytecode.
LocalVariable** opt_param = const intptr_t args_desc_index =
zone()->Alloc<LocalVariable*>(num_opt_named_params); -(parsed_function().arg_desc_var()->index() - kFirstLocalSlotFromFp);
int* opt_param_position = zone()->Alloc<int>(num_opt_named_params); __ LoadArgDescriptor();
for (int pos = num_fixed_params; pos < num_params; pos++) { __ StoreLocal(args_desc_index);
LocalVariable* parameter = scope->VariableAt(pos); __ Drop(1);
const String& opt_param_name = parameter->name();
int i = pos - num_fixed_params;
while (--i >= 0) {
LocalVariable* param_i = opt_param[i];
const intptr_t result = opt_param_name.CompareTo(param_i->name());
ASSERT(result != 0);
if (result > 0) break;
opt_param[i + 1] = opt_param[i];
opt_param_position[i + 1] = opt_param_position[i];
}
opt_param[i + 1] = parameter;
opt_param_position[i + 1] = pos;
}
for (intptr_t i = 0; i < num_opt_named_params; i++) {
const int param_pos = opt_param_position[i];
const Instance& value = parsed_function().DefaultParameterValueAt(
param_pos - num_fixed_params);
__ LoadConstant(param_pos, opt_param[i]->name());
__ LoadConstant(param_pos, value);
}
} else if (num_opt_pos_params != 0) {
for (intptr_t i = 0; i < num_opt_pos_params; i++) {
const Object& value = parsed_function().DefaultParameterValueAt(i);
__ LoadConstant(num_fixed_params + i, value);
}
}
if (has_optional_params) {
if (!is_optimizing()) {
ASSERT(num_locals > 0); // There is always at least context_var.
__ Frame(num_locals); // Reserve space for locals.
} else if (flow_graph_.graph_entry()->spill_slot_count() >
flow_graph_.num_copied_params()) {
__ Frame(flow_graph_.graph_entry()->spill_slot_count() -
flow_graph_.num_copied_params());
}
}
const bool expect_type_arguments =
isolate()->reify_generic_functions() && function.IsGeneric();
if (function.IsClosureFunction()) {
// In optimized mode the register allocator expects CurrentContext in the
// flow_graph_.num_copied_params() register at function entry, unless that
// register is used for function type arguments, either as their
// permanent location or as their temporary location when captured.
// In that case, the next register holds CurrentContext.
// (see FlowGraphAllocator::ProcessInitialDefinition)
Register context_reg =
is_optimizing()
? (expect_type_arguments ? flow_graph_.num_copied_params() + 1
: flow_graph_.num_copied_params())
: context_index;
LocalScope* scope = parsed_function().node_sequence()->scope();
LocalVariable* local = scope->VariableAt(0); // Closure instance receiver.
Register closure_reg;
if (local->index() > 0) {
__ Move(context_reg, -local->index());
closure_reg = context_reg;
} else {
closure_reg = -local->index() - 1;
}
__ LoadField(context_reg, closure_reg,
Closure::context_offset() / kWordSize);
} else if (has_optional_params && !is_optimizing()) {
__ LoadConstant(context_index, Object::empty_context());
}
if (isolate()->reify_generic_functions()) {
// Check for a passed type argument vector if the function is generic, or
// check that none is passed if not generic and not already checked during
// resolution.
const bool check_arguments =
(function.IsClosureFunction() || function.IsConvertedClosureFunction());
if ((expect_type_arguments || check_arguments) &&
!flow_graph().IsCompiledForOsr()) {
ASSERT(!expect_type_arguments ||
(-parsed_function().first_stack_local_index() - 1 ==
flow_graph_.num_copied_params()));
__ CheckFunctionTypeArgs(function.NumTypeParameters(),
flow_graph_.num_copied_params());
} }
} }
} }
@ -450,10 +357,13 @@ uint16_t FlowGraphCompiler::ToEmbeddableCid(intptr_t cid,
} }
intptr_t FlowGraphCompiler::CatchEntryRegForVariable(const LocalVariable& var) { intptr_t FlowGraphCompiler::CatchEntryRegForVariable(const LocalVariable& var) {
const Function& function = parsed_function().function();
const intptr_t num_non_copied_params =
function.HasOptionalParameters() ? 0 : function.NumParameters();
ASSERT(is_optimizing()); ASSERT(is_optimizing());
ASSERT(var.index() <= 0); ASSERT(var.index() <= 0);
return kNumberOfCpuRegisters - return kNumberOfCpuRegisters - (num_non_copied_params - var.index());
(flow_graph().num_non_copied_params() - var.index());
} }
#undef __ #undef __
@ -470,6 +380,9 @@ void ParallelMoveResolver::EmitMove(int index) {
__ Move(destination.reg(), -kParamEndSlotFromFp + source.stack_index()); __ Move(destination.reg(), -kParamEndSlotFromFp + source.stack_index());
} else if (source.IsRegister() && destination.IsRegister()) { } else if (source.IsRegister() && destination.IsRegister()) {
__ Move(destination.reg(), source.reg()); __ Move(destination.reg(), source.reg());
} else if (source.IsArgsDescRegister()) {
ASSERT(destination.IsRegister());
__ LoadArgDescriptorOpt(destination.reg());
} else if (source.IsConstant() && destination.IsRegister()) { } else if (source.IsConstant() && destination.IsRegister()) {
if (source.constant_instruction()->representation() == kUnboxedDouble) { if (source.constant_instruction()->representation() == kUnboxedDouble) {
const Register result = destination.reg(); const Register result = destination.reg();

View file

@ -713,258 +713,6 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
} }
} }
// Input parameters:
// EDX: arguments descriptor array.
void FlowGraphCompiler::CheckTypeArgsLen(bool expect_type_args,
Label* wrong_num_arguments) {
__ Comment("Check type args len");
const Function& function = parsed_function().function();
Label correct_type_args_len;
if (expect_type_args) {
// Type args are always optional, so length can always be zero.
// If expect_type_args, a non-zero length must match the declaration length.
__ movl(EAX,
FieldAddress(EDX, ArgumentsDescriptor::type_args_len_offset()));
if (isolate()->strong()) {
__ andl(EAX,
Immediate(Smi::RawValue(
ArgumentsDescriptor::TypeArgsLenField::mask_in_place())));
}
__ cmpl(EAX, Immediate(Smi::RawValue(0)));
__ j(EQUAL, &correct_type_args_len, Assembler::kNearJump);
__ cmpl(EAX, Immediate(Smi::RawValue(function.NumTypeParameters())));
} else {
__ cmpl(FieldAddress(EDX, ArgumentsDescriptor::type_args_len_offset()),
Immediate(Smi::RawValue(0)));
}
__ j(NOT_EQUAL, wrong_num_arguments);
__ Bind(&correct_type_args_len);
}
// Input parameters:
// EDX: arguments descriptor array.
void FlowGraphCompiler::CopyParameters(bool expect_type_args,
bool check_arguments) {
Label wrong_num_arguments;
if (check_arguments) {
CheckTypeArgsLen(expect_type_args, &wrong_num_arguments);
}
__ Comment("Copy parameters");
const Function& function = parsed_function().function();
LocalScope* scope = parsed_function().node_sequence()->scope();
const int num_fixed_params = function.num_fixed_parameters();
const int num_opt_pos_params = function.NumOptionalPositionalParameters();
const int num_opt_named_params = function.NumOptionalNamedParameters();
const int num_params =
num_fixed_params + num_opt_pos_params + num_opt_named_params;
ASSERT(function.NumParameters() == num_params);
ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp);
// Check that min_num_pos_args <= num_pos_args <= max_num_pos_args,
// where num_pos_args is the number of positional arguments passed in.
const int min_num_pos_args = num_fixed_params;
const int max_num_pos_args = num_fixed_params + num_opt_pos_params;
__ movl(ECX,
FieldAddress(EDX, ArgumentsDescriptor::positional_count_offset()));
if (isolate()->strong()) {
__ andl(ECX,
Immediate(Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place())));
}
// Check that min_num_pos_args <= num_pos_args.
__ cmpl(ECX, Immediate(Smi::RawValue(min_num_pos_args)));
__ j(LESS, &wrong_num_arguments);
// Check that num_pos_args <= max_num_pos_args.
__ cmpl(ECX, Immediate(Smi::RawValue(max_num_pos_args)));
__ j(GREATER, &wrong_num_arguments);
// Copy positional arguments.
// Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied
// to fp[kFirstLocalSlotFromFp - i].
__ movl(EBX, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
// Since EBX and ECX are Smi, use TIMES_2 instead of TIMES_4.
// Let EBX point to the last passed positional argument, i.e. to
// fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)].
__ subl(EBX, ECX);
__ leal(EBX,
Address(EBP, EBX, TIMES_2, (kParamEndSlotFromFp + 1) * kWordSize));
// Let EDI point to the last copied positional argument, i.e. to
// fp[kFirstLocalSlotFromFp - (num_pos_args - 1)].
__ leal(EDI, Address(EBP, (kFirstLocalSlotFromFp + 1) * kWordSize));
__ subl(EDI, ECX); // ECX is a Smi, subtract twice for TIMES_4 scaling.
__ subl(EDI, ECX);
__ SmiUntag(ECX);
Label loop, loop_condition;
__ jmp(&loop_condition, Assembler::kNearJump);
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const Address argument_addr(EBX, ECX, TIMES_4, 0);
const Address copy_addr(EDI, ECX, TIMES_4, 0);
__ Bind(&loop);
__ movl(EAX, argument_addr);
__ movl(copy_addr, EAX);
__ Bind(&loop_condition);
__ decl(ECX);
__ j(POSITIVE, &loop, Assembler::kNearJump);
// Copy or initialize optional named arguments.
const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
Label all_arguments_processed;
#ifdef DEBUG
const bool check_correct_named_args = true;
#else
const bool check_correct_named_args = check_arguments;
#endif
if (num_opt_named_params > 0) {
// Start by alphabetically sorting the names of the optional parameters.
LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
int* opt_param_position = new int[num_opt_named_params];
for (int pos = num_fixed_params; pos < num_params; pos++) {
LocalVariable* parameter = scope->VariableAt(pos);
const String& opt_param_name = parameter->name();
int i = pos - num_fixed_params;
while (--i >= 0) {
LocalVariable* param_i = opt_param[i];
const intptr_t result = opt_param_name.CompareTo(param_i->name());
ASSERT(result != 0);
if (result > 0) break;
opt_param[i + 1] = opt_param[i];
opt_param_position[i + 1] = opt_param_position[i];
}
opt_param[i + 1] = parameter;
opt_param_position[i + 1] = pos;
}
// Generate code handling each optional parameter in alphabetical order.
__ movl(EBX, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
// Let EBX point to the first passed argument, i.e. to
// fp[kParamEndSlotFromFp + num_args - 0]; num_args (EBX) is Smi.
__ leal(EBX, Address(EBP, EBX, TIMES_2, kParamEndSlotFromFp * kWordSize));
// Let EDI point to the entry of the first named argument.
__ leal(EDI,
FieldAddress(EDX, ArgumentsDescriptor::first_named_entry_offset()));
for (int i = 0; i < num_opt_named_params; i++) {
Label load_default_value, assign_optional_parameter;
const int param_pos = opt_param_position[i];
// Check if this named parameter was passed in.
// Load EAX with the name of the argument.
__ movl(EAX, Address(EDI, ArgumentsDescriptor::name_offset()));
ASSERT(opt_param[i]->name().IsSymbol());
__ CompareObject(EAX, opt_param[i]->name());
__ j(NOT_EQUAL, &load_default_value, Assembler::kNearJump);
// Load EAX with passed-in argument at provided arg_pos, i.e. at
// fp[kParamEndSlotFromFp + num_args - arg_pos].
__ movl(EAX, Address(EDI, ArgumentsDescriptor::position_offset()));
if (isolate()->strong()) {
__ andl(
EAX,
Immediate(Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place())));
}
// EAX is arg_pos as Smi.
// Point to next named entry.
__ addl(EDI, Immediate(ArgumentsDescriptor::named_entry_size()));
__ negl(EAX);
Address argument_addr(EBX, EAX, TIMES_2, 0); // EAX is a negative Smi.
__ movl(EAX, argument_addr);
__ jmp(&assign_optional_parameter, Assembler::kNearJump);
__ Bind(&load_default_value);
// Load EAX with default argument.
const Instance& value = parsed_function().DefaultParameterValueAt(
param_pos - num_fixed_params);
__ LoadObject(EAX, value);
__ Bind(&assign_optional_parameter);
// Assign EAX to fp[kFirstLocalSlotFromFp - param_pos].
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
const Address param_addr(EBP, computed_param_pos * kWordSize);
__ movl(param_addr, EAX);
}
delete[] opt_param;
delete[] opt_param_position;
if (check_correct_named_args) {
// Check that EDI now points to the null terminator in the arguments
// descriptor.
__ cmpl(Address(EDI, 0), raw_null);
__ j(EQUAL, &all_arguments_processed, Assembler::kNearJump);
}
} else {
ASSERT(num_opt_pos_params > 0);
__ movl(ECX,
FieldAddress(EDX, ArgumentsDescriptor::positional_count_offset()));
__ SmiUntag(ECX);
if (isolate()->strong()) {
__ andl(ECX,
Immediate(
ArgumentsDescriptor::PositionalCountField::mask_in_place()));
}
for (int i = 0; i < num_opt_pos_params; i++) {
Label next_parameter;
// Handle this optional positional parameter only if k or fewer positional
// arguments have been passed, where k is param_pos, the position of this
// optional parameter in the formal parameter list.
const int param_pos = num_fixed_params + i;
__ cmpl(ECX, Immediate(param_pos));
__ j(GREATER, &next_parameter, Assembler::kNearJump);
// Load EAX with default argument.
const Object& value = parsed_function().DefaultParameterValueAt(i);
__ LoadObject(EAX, value);
// Assign EAX to fp[kFirstLocalSlotFromFp - param_pos].
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
const Address param_addr(EBP, computed_param_pos * kWordSize);
__ movl(param_addr, EAX);
__ Bind(&next_parameter);
}
if (check_correct_named_args) {
__ movl(EBX, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
__ SmiUntag(EBX);
// Check that ECX equals EBX, i.e. no named arguments passed.
__ cmpl(ECX, EBX);
__ j(EQUAL, &all_arguments_processed, Assembler::kNearJump);
}
}
__ Bind(&wrong_num_arguments);
if (check_arguments) {
__ LeaveFrame(); // The arguments are still on the stack.
__ Jmp(*StubCode::CallClosureNoSuchMethod_entry());
// The noSuchMethod call may return to the caller, but not here.
} else if (check_correct_named_args) {
__ Stop("Wrong arguments");
}
__ Bind(&all_arguments_processed);
// Nullify originally passed arguments only after they have been copied and
// checked, otherwise noSuchMethod would not see their original values.
// This step can be skipped in case we decide that formal parameters are
// implicitly final, since garbage collecting the unmodified value is not
// an issue anymore.
// EDX : arguments descriptor array.
__ movl(ECX, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
__ SmiUntag(ECX);
Label null_args_loop, null_args_loop_condition;
__ jmp(&null_args_loop_condition, Assembler::kNearJump);
const Address original_argument_addr(EBP, ECX, TIMES_4,
(kParamEndSlotFromFp + 1) * kWordSize);
__ Bind(&null_args_loop);
__ movl(original_argument_addr, raw_null);
__ Bind(&null_args_loop_condition);
__ decl(ECX);
__ j(POSITIVE, &null_args_loop, Assembler::kNearJump);
}
void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
// TOS: return address. // TOS: return address.
// +1 : receiver. // +1 : receiver.
@ -1012,8 +760,7 @@ void FlowGraphCompiler::EmitFrameEntry() {
} }
__ Comment("Enter frame"); __ Comment("Enter frame");
if (flow_graph().IsCompiledForOsr()) { if (flow_graph().IsCompiledForOsr()) {
intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() - intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals();
flow_graph().num_copied_params();
ASSERT(extra_slots >= 0); ASSERT(extra_slots >= 0);
__ EnterOsrFrame(extra_slots * kWordSize); __ EnterOsrFrame(extra_slots * kWordSize);
} else { } else {
@ -1032,139 +779,28 @@ void FlowGraphCompiler::CompileGraph() {
EmitFrameEntry(); EmitFrameEntry();
const Function& function = parsed_function().function(); // In unoptimized code, initialize (non-argument) stack allocated slots.
const int num_fixed_params = function.num_fixed_parameters();
const int num_copied_params = parsed_function().num_copied_params();
const int num_locals = parsed_function().num_stack_locals();
// The prolog of OSR functions is never executed, hence greatly simplified.
const bool expect_type_args = isolate()->reify_generic_functions() &&
function.IsGeneric() &&
!flow_graph().IsCompiledForOsr();
const bool check_arguments =
(function.IsClosureFunction() || function.IsConvertedClosureFunction()) &&
!flow_graph().IsCompiledForOsr();
// We check the number of passed arguments when we have to copy them due to
// the presence of optional parameters.
// No such checking code is generated if only fixed parameters are declared,
// unless we are in debug mode or unless we are compiling a closure.
if (num_copied_params == 0) {
if (check_arguments) {
Label correct_num_arguments, wrong_num_arguments;
CheckTypeArgsLen(expect_type_args, &wrong_num_arguments);
__ Comment("Check argument count");
// Check that exactly num_fixed arguments are passed in.
__ movl(EAX, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
__ cmpl(EAX, Immediate(Smi::RawValue(num_fixed_params)));
__ j(NOT_EQUAL, &wrong_num_arguments, Assembler::kNearJump);
if (isolate()->strong()) {
__ movl(ECX, FieldAddress(
EDX, ArgumentsDescriptor::positional_count_offset()));
__ andl(
ECX,
Immediate(Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place())));
__ cmpl(EAX, ECX);
} else {
__ cmpl(EAX, FieldAddress(
EDX, ArgumentsDescriptor::positional_count_offset()));
}
__ j(EQUAL, &correct_num_arguments, Assembler::kNearJump);
__ Bind(&wrong_num_arguments);
__ LeaveFrame(); // The arguments are still on the stack.
__ Jmp(*StubCode::CallClosureNoSuchMethod_entry());
// The noSuchMethod call may return to the caller, but not here.
__ Bind(&correct_num_arguments);
}
} else if (!flow_graph().IsCompiledForOsr()) {
CopyParameters(expect_type_args, check_arguments);
}
if (function.IsClosureFunction() && !flow_graph().IsCompiledForOsr()) {
// Load context from the closure object (first argument).
LocalScope* scope = parsed_function().node_sequence()->scope();
LocalVariable* closure_parameter = scope->VariableAt(0);
// TODO(fschneider): Don't load context for optimized functions that
// don't use it.
__ movl(CTX, Address(EBP, closure_parameter->index() * kWordSize));
__ movl(CTX, FieldAddress(CTX, Closure::context_offset()));
#ifdef DEBUG
Label ok;
__ LoadClassId(EBX, CTX);
__ cmpl(EBX, Immediate(kContextCid));
__ j(EQUAL, &ok, Assembler::kNearJump);
__ Stop("Incorrect context at entry");
__ Bind(&ok);
#endif
}
// In unoptimized code, initialize (non-argument) stack allocated slots to
// null.
if (!is_optimizing()) { if (!is_optimizing()) {
ASSERT(num_locals > 0); // There is always at least context_var. const int num_locals = parsed_function().num_stack_locals();
intptr_t args_desc_index = -1;
if (parsed_function().has_arg_desc_var()) {
args_desc_index =
-(parsed_function().arg_desc_var()->index() - kFirstLocalSlotFromFp);
}
__ Comment("Initialize spill slots"); __ Comment("Initialize spill slots");
const intptr_t slot_base = parsed_function().first_stack_local_index(); if (num_locals > 1 || args_desc_index != 0) {
const intptr_t context_index =
parsed_function().current_context_var()->index();
if (num_locals > 1) {
const Immediate& raw_null = const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null())); Immediate(reinterpret_cast<intptr_t>(Object::null()));
__ movl(EAX, raw_null); __ movl(EAX, raw_null);
} }
for (intptr_t i = 0; i < num_locals; ++i) { for (intptr_t i = 0; i < num_locals; ++i) {
// Subtract index i (locals lie at lower addresses than EBP). Register value_reg = i == args_desc_index ? ARGS_DESC_REG : EAX;
if (((slot_base - i) == context_index)) { __ movl(Address(EBP, (kFirstLocalSlotFromFp - i) * kWordSize), value_reg);
if (function.IsClosureFunction()) {
__ movl(Address(EBP, (slot_base - i) * kWordSize), CTX);
} else {
const Immediate& raw_empty_context = Immediate(
reinterpret_cast<intptr_t>(Object::empty_context().raw()));
__ movl(Address(EBP, (slot_base - i) * kWordSize), raw_empty_context);
}
} else {
ASSERT(num_locals > 1);
__ movl(Address(EBP, (slot_base - i) * kWordSize), EAX);
}
} }
} }
// Copy passed-in type argument vector if the function is generic.
if (expect_type_args) {
__ Comment("Copy passed-in type args");
Label store_type_args, ok;
__ cmpl(FieldAddress(EDX, ArgumentsDescriptor::type_args_len_offset()),
Immediate(Smi::RawValue(0)));
if (is_optimizing()) {
// Initialize type_args to null if none passed in.
const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
__ movl(EAX, raw_null);
__ j(EQUAL, &store_type_args, Assembler::kNearJump);
} else {
__ j(EQUAL, &ok, Assembler::kNearJump); // Already initialized to null.
}
// Load the passed type args vector in EAX from
// fp[kParamEndSlotFromFp + num_args + 1]; num_args (EBX) is Smi.
__ movl(EBX, FieldAddress(EDX, ArgumentsDescriptor::count_offset()));
__ movl(EAX,
Address(EBP, EBX, TIMES_2, (kParamEndSlotFromFp + 1) * kWordSize));
// Store EAX into the stack slot reserved for the function type arguments.
// If the function type arguments variable is captured, a copy will happen
// after the context is allocated.
const intptr_t slot_base = parsed_function().first_stack_local_index();
ASSERT(parsed_function().function_type_arguments()->is_captured() ||
parsed_function().function_type_arguments()->index() == slot_base);
__ Bind(&store_type_args);
__ movl(Address(EBP, slot_base * kWordSize), EAX);
__ Bind(&ok);
}
EndCodeSourceRange(TokenPosition::kDartCodePrologue); EndCodeSourceRange(TokenPosition::kDartCodePrologue);
ASSERT(!block_order().is_empty()); ASSERT(!block_order().is_empty());
VisitBlocks(); VisitBlocks();

View file

@ -704,264 +704,6 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
} }
} }
// Input parameters:
// R10: arguments descriptor array.
void FlowGraphCompiler::CheckTypeArgsLen(bool expect_type_args,
Label* wrong_num_arguments) {
__ Comment("Check type args len");
const Function& function = parsed_function().function();
Label correct_type_args_len;
if (expect_type_args) {
// Type args are always optional, so length can always be zero.
// If expect_type_args, a non-zero length must match the declaration length.
__ movq(RAX,
FieldAddress(R10, ArgumentsDescriptor::type_args_len_offset()));
if (isolate()->strong()) {
__ andq(RAX,
Immediate(Smi::RawValue(
ArgumentsDescriptor::TypeArgsLenField::mask_in_place())));
}
__ CompareImmediate(RAX, Immediate(Smi::RawValue(0)));
__ j(EQUAL, &correct_type_args_len, Assembler::kNearJump);
__ CompareImmediate(RAX,
Immediate(Smi::RawValue(function.NumTypeParameters())));
} else {
__ cmpq(FieldAddress(R10, ArgumentsDescriptor::type_args_len_offset()),
Immediate(Smi::RawValue(0)));
}
__ j(NOT_EQUAL, wrong_num_arguments);
__ Bind(&correct_type_args_len);
}
// Input parameters:
// R10: arguments descriptor array.
void FlowGraphCompiler::CopyParameters(bool expect_type_args,
bool check_arguments) {
Label wrong_num_arguments;
if (check_arguments) {
CheckTypeArgsLen(expect_type_args, &wrong_num_arguments);
}
__ Comment("Copy parameters");
const Function& function = parsed_function().function();
LocalScope* scope = parsed_function().node_sequence()->scope();
const int num_fixed_params = function.num_fixed_parameters();
const int num_opt_pos_params = function.NumOptionalPositionalParameters();
const int num_opt_named_params = function.NumOptionalNamedParameters();
const int num_params =
num_fixed_params + num_opt_pos_params + num_opt_named_params;
ASSERT(function.NumParameters() == num_params);
ASSERT(parsed_function().first_parameter_index() == kFirstLocalSlotFromFp);
// Check that min_num_pos_args <= num_pos_args <= max_num_pos_args,
// where num_pos_args is the number of positional arguments passed in.
const int min_num_pos_args = num_fixed_params;
const int max_num_pos_args = num_fixed_params + num_opt_pos_params;
__ movq(RCX,
FieldAddress(R10, ArgumentsDescriptor::positional_count_offset()));
if (isolate()->strong()) {
__ andq(RCX,
Immediate(Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place())));
}
// Check that min_num_pos_args <= num_pos_args.
__ CompareImmediate(RCX, Immediate(Smi::RawValue(min_num_pos_args)));
__ j(LESS, &wrong_num_arguments);
// Check that num_pos_args <= max_num_pos_args.
__ CompareImmediate(RCX, Immediate(Smi::RawValue(max_num_pos_args)));
__ j(GREATER, &wrong_num_arguments);
// Copy positional arguments.
// Argument i passed at fp[kParamEndSlotFromFp + num_args - i] is copied
// to fp[kFirstLocalSlotFromFp - i].
__ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
// Since RBX and RCX are Smi, use TIMES_4 instead of TIMES_8.
// Let RBX point to the last passed positional argument, i.e. to
// fp[kParamEndSlotFromFp + num_args - (num_pos_args - 1)].
__ subq(RBX, RCX);
__ leaq(RBX,
Address(RBP, RBX, TIMES_4, (kParamEndSlotFromFp + 1) * kWordSize));
// Let RDI point to the last copied positional argument, i.e. to
// fp[kFirstLocalSlotFromFp - (num_pos_args - 1)].
__ SmiUntag(RCX);
__ movq(RAX, RCX);
__ negq(RAX);
// -num_pos_args is in RAX.
__ leaq(RDI,
Address(RBP, RAX, TIMES_8, (kFirstLocalSlotFromFp + 1) * kWordSize));
Label loop, loop_condition;
__ jmp(&loop_condition, Assembler::kNearJump);
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const Address argument_addr(RBX, RCX, TIMES_8, 0);
const Address copy_addr(RDI, RCX, TIMES_8, 0);
__ Bind(&loop);
__ movq(RAX, argument_addr);
__ movq(copy_addr, RAX);
__ Bind(&loop_condition);
__ decq(RCX);
__ j(POSITIVE, &loop, Assembler::kNearJump);
// Copy or initialize optional named arguments.
Label all_arguments_processed;
#ifdef DEBUG
const bool check_correct_named_args = true;
#else
const bool check_correct_named_args = check_arguments;
#endif
if (num_opt_named_params > 0) {
// Start by alphabetically sorting the names of the optional parameters.
LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
int* opt_param_position = new int[num_opt_named_params];
for (int pos = num_fixed_params; pos < num_params; pos++) {
LocalVariable* parameter = scope->VariableAt(pos);
const String& opt_param_name = parameter->name();
int i = pos - num_fixed_params;
while (--i >= 0) {
LocalVariable* param_i = opt_param[i];
const intptr_t result = opt_param_name.CompareTo(param_i->name());
ASSERT(result != 0);
if (result > 0) break;
opt_param[i + 1] = opt_param[i];
opt_param_position[i + 1] = opt_param_position[i];
}
opt_param[i + 1] = parameter;
opt_param_position[i + 1] = pos;
}
// Generate code handling each optional parameter in alphabetical order.
__ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
// Let RBX point to the first passed argument, i.e. to
// fp[kParamEndSlotFromFp + num_args]; num_args (RBX) is Smi.
__ leaq(RBX, Address(RBP, RBX, TIMES_4, kParamEndSlotFromFp * kWordSize));
// Let RDI point to the entry of the first named argument.
__ leaq(RDI,
FieldAddress(R10, ArgumentsDescriptor::first_named_entry_offset()));
for (int i = 0; i < num_opt_named_params; i++) {
Label load_default_value, assign_optional_parameter;
const int param_pos = opt_param_position[i];
// Check if this named parameter was passed in.
// Load RAX with the name of the argument.
__ movq(RAX, Address(RDI, ArgumentsDescriptor::name_offset()));
ASSERT(opt_param[i]->name().IsSymbol());
__ CompareObject(RAX, opt_param[i]->name());
__ j(NOT_EQUAL, &load_default_value, Assembler::kNearJump);
// Load RAX with passed-in argument at provided arg_pos, i.e. at
// fp[kParamEndSlotFromFp + num_args - arg_pos].
__ movq(RAX, Address(RDI, ArgumentsDescriptor::position_offset()));
if (isolate()->strong()) {
__ andq(
RAX,
Immediate(Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place())));
}
// RAX is arg_pos as Smi.
// Point to next named entry.
__ AddImmediate(RDI, Immediate(ArgumentsDescriptor::named_entry_size()));
__ negq(RAX);
Address argument_addr(RBX, RAX, TIMES_4, 0); // RAX is a negative Smi.
__ movq(RAX, argument_addr);
__ jmp(&assign_optional_parameter, Assembler::kNearJump);
__ Bind(&load_default_value);
// Load RAX with default argument.
const Instance& value = parsed_function().DefaultParameterValueAt(
param_pos - num_fixed_params);
__ LoadObject(RAX, value);
__ Bind(&assign_optional_parameter);
// Assign RAX to fp[kFirstLocalSlotFromFp - param_pos].
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
const Address param_addr(RBP, computed_param_pos * kWordSize);
__ movq(param_addr, RAX);
}
delete[] opt_param;
delete[] opt_param_position;
if (check_correct_named_args) {
// Check that RDI now points to the null terminator in the arguments
// descriptor.
__ LoadObject(TMP, Object::null_object());
__ cmpq(Address(RDI, 0), TMP);
__ j(EQUAL, &all_arguments_processed, Assembler::kNearJump);
}
} else {
ASSERT(num_opt_pos_params > 0);
__ movq(RCX,
FieldAddress(R10, ArgumentsDescriptor::positional_count_offset()));
__ SmiUntag(RCX);
if (isolate()->strong()) {
__ andq(RCX,
Immediate(
ArgumentsDescriptor::PositionalCountField::mask_in_place()));
}
for (int i = 0; i < num_opt_pos_params; i++) {
Label next_parameter;
// Handle this optional positional parameter only if k or fewer positional
// arguments have been passed, where k is param_pos, the position of this
// optional parameter in the formal parameter list.
const int param_pos = num_fixed_params + i;
__ CompareImmediate(RCX, Immediate(param_pos));
__ j(GREATER, &next_parameter, Assembler::kNearJump);
// Load RAX with default argument.
const Object& value = parsed_function().DefaultParameterValueAt(i);
__ LoadObject(RAX, value);
// Assign RAX to fp[kFirstLocalSlotFromFp - param_pos].
// We do not use the final allocation index of the variable here, i.e.
// scope->VariableAt(i)->index(), because captured variables still need
// to be copied to the context that is not yet allocated.
const intptr_t computed_param_pos = kFirstLocalSlotFromFp - param_pos;
const Address param_addr(RBP, computed_param_pos * kWordSize);
__ movq(param_addr, RAX);
__ Bind(&next_parameter);
}
if (check_correct_named_args) {
__ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
__ SmiUntag(RBX);
// Check that RCX equals RBX, i.e. no named arguments passed.
__ cmpq(RCX, RBX);
__ j(EQUAL, &all_arguments_processed, Assembler::kNearJump);
}
}
__ Bind(&wrong_num_arguments);
if (check_arguments) {
__ LeaveDartFrame(kKeepCalleePP); // The arguments are still on the stack.
__ Jmp(*StubCode::CallClosureNoSuchMethod_entry());
// The noSuchMethod call may return to the caller, but not here.
} else if (check_correct_named_args) {
__ Stop("Wrong arguments");
}
__ Bind(&all_arguments_processed);
// Nullify originally passed arguments only after they have been copied and
// checked, otherwise noSuchMethod would not see their original values.
// This step can be skipped in case we decide that formal parameters are
// implicitly final, since garbage collecting the unmodified value is not
// an issue anymore.
// R10 : arguments descriptor array.
__ movq(RCX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
__ SmiUntag(RCX);
__ LoadObject(R12, Object::null_object());
Label null_args_loop, null_args_loop_condition;
__ jmp(&null_args_loop_condition, Assembler::kNearJump);
const Address original_argument_addr(RBP, RCX, TIMES_8,
(kParamEndSlotFromFp + 1) * kWordSize);
__ Bind(&null_args_loop);
__ movq(original_argument_addr, R12);
__ Bind(&null_args_loop_condition);
__ decq(RCX);
__ j(POSITIVE, &null_args_loop, Assembler::kNearJump);
}
void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) { void FlowGraphCompiler::GenerateInlinedGetter(intptr_t offset) {
// TOS: return address. // TOS: return address.
// +1 : receiver. // +1 : receiver.
@ -989,8 +731,7 @@ void FlowGraphCompiler::GenerateInlinedSetter(intptr_t offset) {
// needs to be updated to match. // needs to be updated to match.
void FlowGraphCompiler::EmitFrameEntry() { void FlowGraphCompiler::EmitFrameEntry() {
if (flow_graph().IsCompiledForOsr()) { if (flow_graph().IsCompiledForOsr()) {
intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals() - intptr_t extra_slots = StackSize() - flow_graph().num_stack_locals();
flow_graph().num_copied_params();
ASSERT(extra_slots >= 0); ASSERT(extra_slots >= 0);
__ EnterOsrFrame(extra_slots * kWordSize); __ EnterOsrFrame(extra_slots * kWordSize);
} else { } else {
@ -1023,9 +764,8 @@ void FlowGraphCompiler::EmitFrameEntry() {
void FlowGraphCompiler::CompileGraph() { void FlowGraphCompiler::CompileGraph() {
InitCompiler(); InitCompiler();
const Function& function = parsed_function().function();
#ifdef DART_PRECOMPILER #ifdef DART_PRECOMPILER
const Function& function = parsed_function().function();
if (function.IsDynamicFunction()) { if (function.IsDynamicFunction()) {
__ MonomorphicCheckedEntry(); __ MonomorphicCheckedEntry();
} }
@ -1039,130 +779,26 @@ void FlowGraphCompiler::CompileGraph() {
EmitFrameEntry(); EmitFrameEntry();
ASSERT(assembler()->constant_pool_allowed()); ASSERT(assembler()->constant_pool_allowed());
const int num_fixed_params = function.num_fixed_parameters(); // In unoptimized code, initialize (non-argument) stack allocated slots.
const int num_copied_params = parsed_function().num_copied_params();
const int num_locals = parsed_function().num_stack_locals();
// The prolog of OSR functions is never executed, hence greatly simplified.
const bool expect_type_args = isolate()->reify_generic_functions() &&
function.IsGeneric() &&
!flow_graph().IsCompiledForOsr();
const bool check_arguments =
(function.IsClosureFunction() || function.IsConvertedClosureFunction()) &&
!flow_graph().IsCompiledForOsr();
// We check the number of passed arguments when we have to copy them due to
// the presence of optional parameters.
// No such checking code is generated if only fixed parameters are declared,
// unless we are in debug mode or unless we are compiling a closure.
if (num_copied_params == 0) {
if (check_arguments) {
Label correct_num_arguments, wrong_num_arguments;
CheckTypeArgsLen(expect_type_args, &wrong_num_arguments);
__ Comment("Check argument count");
// Check that exactly num_fixed arguments are passed in.
__ movq(RAX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
__ CompareImmediate(RAX, Immediate(Smi::RawValue(num_fixed_params)));
__ j(NOT_EQUAL, &wrong_num_arguments, Assembler::kNearJump);
if (isolate()->strong()) {
__ movq(RCX, FieldAddress(
R10, ArgumentsDescriptor::positional_count_offset()));
__ andq(
RCX,
Immediate(Smi::RawValue(
ArgumentsDescriptor::PositionalCountField::mask_in_place())));
__ cmpq(RAX, RCX);
} else {
__ cmpq(RAX, FieldAddress(
R10, ArgumentsDescriptor::positional_count_offset()));
}
__ j(EQUAL, &correct_num_arguments, Assembler::kNearJump);
__ Bind(&wrong_num_arguments);
__ LeaveDartFrame(kKeepCalleePP); // Leave arguments on the stack.
__ Jmp(*StubCode::CallClosureNoSuchMethod_entry());
// The noSuchMethod call may return to the caller, but not here.
__ Bind(&correct_num_arguments);
}
} else if (!flow_graph().IsCompiledForOsr()) {
CopyParameters(expect_type_args, check_arguments);
}
if (function.IsClosureFunction() && !flow_graph().IsCompiledForOsr()) {
// Load context from the closure object (first argument).
LocalScope* scope = parsed_function().node_sequence()->scope();
LocalVariable* closure_parameter = scope->VariableAt(0);
__ movq(CTX, Address(RBP, closure_parameter->index() * kWordSize));
__ movq(CTX, FieldAddress(CTX, Closure::context_offset()));
#ifdef DEBUG
Label ok;
__ LoadClassId(RAX, CTX);
__ cmpq(RAX, Immediate(kContextCid));
__ j(EQUAL, &ok, Assembler::kNearJump);
__ Stop("Incorrect context at entry");
__ Bind(&ok);
#endif
}
// In unoptimized code, initialize (non-argument) stack allocated slots to
// null.
if (!is_optimizing()) { if (!is_optimizing()) {
ASSERT(num_locals > 0); // There is always at least context_var. const int num_locals = parsed_function().num_stack_locals();
intptr_t args_desc_index = -1;
if (parsed_function().has_arg_desc_var()) {
args_desc_index =
-(parsed_function().arg_desc_var()->index() - kFirstLocalSlotFromFp);
}
__ Comment("Initialize spill slots"); __ Comment("Initialize spill slots");
const intptr_t slot_base = parsed_function().first_stack_local_index(); if (num_locals > 1 || (num_locals == 1 && args_desc_index == -1)) {
const intptr_t context_index =
parsed_function().current_context_var()->index();
if (num_locals > 1) {
__ LoadObject(RAX, Object::null_object()); __ LoadObject(RAX, Object::null_object());
} }
for (intptr_t i = 0; i < num_locals; ++i) { for (intptr_t i = 0; i < num_locals; ++i) {
// Subtract index i (locals lie at lower addresses than RBP). Register value_reg = i == args_desc_index ? ARGS_DESC_REG : RAX;
if (((slot_base - i) == context_index)) { __ movq(Address(RBP, (kFirstLocalSlotFromFp - i) * kWordSize), value_reg);
if (function.IsClosureFunction()) {
__ movq(Address(RBP, (slot_base - i) * kWordSize), CTX);
} else {
__ StoreObject(Address(RBP, (slot_base - i) * kWordSize),
Object::empty_context());
}
} else {
ASSERT(num_locals > 1);
__ movq(Address(RBP, (slot_base - i) * kWordSize), RAX);
}
} }
} }
// Copy passed-in type argument vector if the function is generic.
if (expect_type_args) {
__ Comment("Copy passed-in type args");
Label store_type_args, ok;
__ cmpq(FieldAddress(R10, ArgumentsDescriptor::type_args_len_offset()),
Immediate(Smi::RawValue(0)));
if (is_optimizing()) {
// Initialize type_args to null if none passed in.
__ LoadObject(RAX, Object::null_object());
__ j(EQUAL, &store_type_args, Assembler::kNearJump);
} else {
__ j(EQUAL, &ok, Assembler::kNearJump); // Already initialized to null.
}
// Load the passed type args vector in RAX from
// fp[kParamEndSlotFromFp + num_args + 1]; num_args (RBX) is Smi.
__ movq(RBX, FieldAddress(R10, ArgumentsDescriptor::count_offset()));
__ movq(RAX,
Address(RBP, RBX, TIMES_4, (kParamEndSlotFromFp + 1) * kWordSize));
// Store RAX into the stack slot reserved for the function type arguments.
// If the function type arguments variable is captured, a copy will happen
// after the context is allocated.
const intptr_t slot_base = parsed_function().first_stack_local_index();
ASSERT(parsed_function().function_type_arguments()->is_captured() ||
parsed_function().function_type_arguments()->index() == slot_base);
__ Bind(&store_type_args);
__ movq(Address(RBP, slot_base * kWordSize), RAX);
__ Bind(&ok);
}
EndCodeSourceRange(TokenPosition::kDartCodePrologue); EndCodeSourceRange(TokenPosition::kDartCodePrologue);
ASSERT(!block_order().is_empty()); ASSERT(!block_order().is_empty());
VisitBlocks(); VisitBlocks();

View file

@ -372,6 +372,9 @@ class EmbeddedArray<T, 0> {
M(Phi) \ M(Phi) \
M(Redefinition) \ M(Redefinition) \
M(Parameter) \ M(Parameter) \
M(LoadIndexedUnsafe) \
M(StoreIndexedUnsafe) \
M(TailCall) \
M(ParallelMove) \ M(ParallelMove) \
M(PushArgument) \ M(PushArgument) \
M(Return) \ M(Return) \
@ -2031,6 +2034,148 @@ class ParameterInstr : public Definition {
DISALLOW_COPY_AND_ASSIGN(ParameterInstr); DISALLOW_COPY_AND_ASSIGN(ParameterInstr);
}; };
// Stores a tagged pointer to a slot accessable from a fixed register. It has
// the form:
//
// base_reg[index + #constant] = value
//
// Input 0: A tagged Smi [index]
// Input 1: A tagged pointer [value]
// offset: A signed constant offset which fits into 8 bits
//
// Currently this instruction uses pinpoints the register to be FP.
//
// This lowlevel instruction is non-inlinable since it makes assumptons about
// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
class StoreIndexedUnsafeInstr : public TemplateDefinition<2, NoThrow> {
public:
StoreIndexedUnsafeInstr(Value* index, Value* value, intptr_t offset)
: offset_(offset) {
SetInputAt(kIndexPos, index);
SetInputAt(kValuePos, value);
}
enum { kIndexPos = 0, kValuePos = 1 };
DECLARE_INSTRUCTION(StoreIndexedUnsafe)
virtual Representation RequiredInputRepresentation(intptr_t index) const {
ASSERT(index == kIndexPos || index == kValuePos);
return kTagged;
}
virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool HasUnknownSideEffects() const { return false; }
virtual bool AttributesEqual(Instruction* other) const {
return other->AsStoreIndexedUnsafe()->offset() == offset();
}
Value* index() const { return inputs_[kIndexPos]; }
Value* value() const { return inputs_[kValuePos]; }
Register base_reg() const { return FPREG; }
intptr_t offset() const { return offset_; }
PRINT_OPERANDS_TO_SUPPORT
private:
const intptr_t offset_;
DISALLOW_COPY_AND_ASSIGN(StoreIndexedUnsafeInstr);
};
// Loads a tagged pointer from slot accessable from a fixed register. It has
// the form:
//
// base_reg[index + #constant]
//
// Input 0: A tagged Smi [index]
// offset: A signed constant offset which fits into 8 bits
//
// Currently this instruction uses pinpoints the register to be FP.
//
// This lowlevel instruction is non-inlinable since it makes assumptons about
// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
class LoadIndexedUnsafeInstr : public TemplateDefinition<1, NoThrow> {
public:
LoadIndexedUnsafeInstr(Value* index, intptr_t offset) : offset_(offset) {
SetInputAt(0, index);
}
DECLARE_INSTRUCTION(LoadIndexedUnsafe)
virtual Representation RequiredInputRepresentation(intptr_t index) const {
ASSERT(index == 0);
return kTagged;
}
virtual Representation representation() const { return kTagged; }
virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool HasUnknownSideEffects() const { return false; }
virtual bool AttributesEqual(Instruction* other) const {
return other->AsLoadIndexedUnsafe()->offset() == offset();
}
Value* index() const { return InputAt(0); }
Register base_reg() const { return FPREG; }
intptr_t offset() const { return offset_; }
PRINT_OPERANDS_TO_SUPPORT
private:
const intptr_t offset_;
DISALLOW_COPY_AND_ASSIGN(LoadIndexedUnsafeInstr);
};
// Unwinds the current frame and taill calls a target.
//
// The return address saved by the original caller of this frame will be in it's
// usual location (stack or LR). The arguments descriptor supplied by the
// original caller will be put into ARGS_DESC_REG.
//
// This lowlevel instruction is non-inlinable since it makes assumptons about
// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
class TailCallInstr : public Instruction {
public:
TailCallInstr(const Code& code, Value* arg_desc)
: code_(code), arg_desc_(NULL) {
SetInputAt(0, arg_desc);
}
DECLARE_INSTRUCTION(TailCall)
const Code& code() const { return code_; }
virtual intptr_t InputCount() const { return 1; }
virtual Value* InputAt(intptr_t i) const {
ASSERT(i == 0);
return arg_desc_;
}
virtual void RawSetInputAt(intptr_t i, Value* value) {
ASSERT(i == 0);
arg_desc_ = value;
}
// Two tailcalls can be canonicalized into one instruction if both have the
// same destination.
virtual bool AllowsCSE() const { return true; }
virtual bool AttributesEqual(Instruction* other) const {
return &other->AsTailCall()->code() == &code();
}
// Since no code after this instruction will be executed, there will be no
// side-effects for the following code.
virtual bool HasUnknownSideEffects() const { return false; }
virtual bool MayThrow() const { return true; }
virtual bool ComputeCanDeoptimize() const { return false; }
PRINT_OPERANDS_TO_SUPPORT
private:
const Code& code_;
Value* arg_desc_;
};
class PushArgumentInstr : public TemplateDefinition<1, NoThrow> { class PushArgumentInstr : public TemplateDefinition<1, NoThrow> {
public: public:
explicit PushArgumentInstr(Value* value) { SetInputAt(0, value); } explicit PushArgumentInstr(Value* value) { SetInputAt(0, value); }
@ -2745,11 +2890,12 @@ class AssertBooleanInstr : public TemplateDefinition<1, Throws, Pure> {
DISALLOW_COPY_AND_ASSIGN(AssertBooleanInstr); DISALLOW_COPY_AND_ASSIGN(AssertBooleanInstr);
}; };
// Denotes a special parameter, currently either the context of a closure // Denotes a special parameter, currently either the context of a closure,
// or the type arguments of a generic function. // the type arguments of a generic function or an arguments descriptor.
class SpecialParameterInstr : public TemplateDefinition<0, NoThrow> { class SpecialParameterInstr : public TemplateDefinition<0, NoThrow> {
public: public:
enum SpecialParameterKind { kContext, kTypeArgs }; enum SpecialParameterKind { kContext, kTypeArgs, kArgDescriptor };
SpecialParameterInstr(SpecialParameterKind kind, intptr_t deopt_id) SpecialParameterInstr(SpecialParameterKind kind, intptr_t deopt_id)
: TemplateDefinition(deopt_id), kind_(kind) {} : TemplateDefinition(deopt_id), kind_(kind) {}
@ -2765,6 +2911,23 @@ class SpecialParameterInstr : public TemplateDefinition<0, NoThrow> {
} }
SpecialParameterKind kind() const { return kind_; } SpecialParameterKind kind() const { return kind_; }
const char* ToCString() const;
PRINT_OPERANDS_TO_SUPPORT
static const char* KindToCString(SpecialParameterKind kind) {
switch (kind) {
case kContext:
return "kContext";
case kTypeArgs:
return "kTypeArgs";
case kArgDescriptor:
return "kArgDescriptor";
}
UNREACHABLE();
return NULL;
}
private: private:
const SpecialParameterKind kind_; const SpecialParameterKind kind_;
DISALLOW_COPY_AND_ASSIGN(SpecialParameterInstr); DISALLOW_COPY_AND_ASSIGN(SpecialParameterInstr);

View file

@ -37,6 +37,42 @@ LocationSummary* Instruction::MakeCallSummary(Zone* zone) {
return result; return result;
} }
DEFINE_BACKEND(LoadIndexedUnsafe, (Register out, Register index)) {
ASSERT(instr->RequiredInputRepresentation(0) == kTagged); // It is a Smi.
__ add(out, instr->base_reg(), Operand(index, LSL, 1));
__ ldr(out, Address(out, instr->offset()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
}
DEFINE_BACKEND(StoreIndexedUnsafe,
(NoLocation, Register index, Register value)) {
ASSERT(instr->RequiredInputRepresentation(
StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
__ add(TMP, instr->base_reg(), Operand(index, LSL, 1));
__ str(value, Address(TMP, instr->offset()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
}
DEFINE_BACKEND(TailCall,
(NoLocation,
Fixed<Register, ARGS_DESC_REG>,
Temp<Register> temp)) {
__ LoadObject(CODE_REG, instr->code());
__ LeaveDartFrame(); // The arguments are still on the stack.
__ ldr(temp, FieldAddress(CODE_REG, Code::entry_point_offset()));
__ bx(temp);
// Even though the TailCallInstr will be the last instruction in a basic
// block, the flow graph compiler will emit native code for other blocks after
// the one containing this instruction and needs to be able to use the pool.
// (The `LeaveDartFrame` above disables usages of the pool.)
__ set_constant_pool_allowed(true);
}
LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone, LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone,
bool opt) const { bool opt) const {
const intptr_t kNumInputs = 1; const intptr_t kNumInputs = 1;

View file

@ -36,6 +36,42 @@ LocationSummary* Instruction::MakeCallSummary(Zone* zone) {
return result; return result;
} }
DEFINE_BACKEND(LoadIndexedUnsafe, (Register out, Register index)) {
ASSERT(instr->RequiredInputRepresentation(0) == kTagged); // It is a Smi.
__ add(out, instr->base_reg(), Operand(index, LSL, 2));
__ ldr(out, Address(out, instr->offset()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
}
DEFINE_BACKEND(StoreIndexedUnsafe,
(NoLocation, Register index, Register value)) {
ASSERT(instr->RequiredInputRepresentation(
StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
__ add(TMP, instr->base_reg(), Operand(index, LSL, 2));
__ ldr(value, Address(TMP, instr->offset()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
}
DEFINE_BACKEND(TailCall,
(NoLocation,
Fixed<Register, ARGS_DESC_REG>,
Temp<Register> temp)) {
__ LoadObject(CODE_REG, instr->code());
__ LeaveDartFrame(); // The arguments are still on the stack.
__ ldr(temp, FieldAddress(CODE_REG, Code::entry_point_offset()));
__ br(temp);
// Even though the TailCallInstr will be the last instruction in a basic
// block, the flow graph compiler will emit native code for other blocks after
// the one containing this instruction and needs to be able to use the pool.
// (The `LeaveDartFrame` above disables usages of the pool.)
__ set_constant_pool_allowed(true);
}
LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone, LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone,
bool opt) const { bool opt) const {
const intptr_t kNumInputs = 1; const intptr_t kNumInputs = 1;

View file

@ -253,6 +253,52 @@ EMIT_NATIVE_CODE(PolymorphicInstanceCall,
__ PopLocal(locs()->out(0).reg()); __ PopLocal(locs()->out(0).reg());
} }
EMIT_NATIVE_CODE(LoadIndexedUnsafe, 1, Location::RegisterLocation(0)) {
ASSERT(base_reg() == FPREG);
ASSERT(Utils::IsInt(8, offset_));
ASSERT(offset_ % kWordSize == 0);
const intptr_t slot_offset = offset_ / kWordSize;
ASSERT(-128 <= slot_offset && slot_offset < 128);
if (compiler->is_optimizing()) {
const Register index = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
__ LoadFpRelativeSlotOpt(result, index, slot_offset);
} else {
__ LoadFpRelativeSlot(slot_offset);
}
}
EMIT_NATIVE_CODE(StoreIndexedUnsafe, 2, Location::RegisterLocation(0)) {
ASSERT(base_reg() == FPREG);
ASSERT(Utils::IsInt(8, offset_));
if (compiler->is_optimizing()) {
const Register index = locs()->in(kIndexPos).reg();
const Register value = locs()->in(kValuePos).reg();
__ StoreFpRelativeSlotOpt(value, index, offset_ / kWordSize);
} else {
__ StoreFpRelativeSlot(offset_ / kWordSize);
}
}
EMIT_NATIVE_CODE(TailCall,
1,
Location::NoLocation(),
LocationSummary::kNoCall,
1) {
if (compiler->is_optimizing()) {
const Register arg_desc = locs()->in(0).reg();
const Register temp = locs()->temp(0).reg();
__ LoadConstant(temp, code());
__ TailCallOpt(arg_desc, temp);
} else {
__ PushConstant(code());
__ TailCall();
}
}
EMIT_NATIVE_CODE(Stop, 0) { EMIT_NATIVE_CODE(Stop, 0) {
__ Stop(message()); __ Stop(message());
} }
@ -751,110 +797,123 @@ EMIT_NATIVE_CODE(LoadIndexed,
Location::RequiresRegister(), Location::RequiresRegister(),
LocationSummary::kNoCall, LocationSummary::kNoCall,
1) { 1) {
ASSERT(compiler->is_optimizing()); if (compiler->is_optimizing()) {
const Register array = locs()->in(0).reg(); ASSERT(compiler->is_optimizing());
const Register index = locs()->in(1).reg(); const Register array = locs()->in(0).reg();
const Register temp = locs()->temp(0).reg(); const Register index = locs()->in(1).reg();
const Register result = locs()->out(0).reg(); const Register temp = locs()->temp(0).reg();
switch (class_id()) { const Register result = locs()->out(0).reg();
case kArrayCid: switch (class_id()) {
case kImmutableArrayCid: case kArrayCid:
__ LoadIndexed(result, array, index); case kImmutableArrayCid:
break; __ LoadIndexed(result, array, index);
case kTypedDataUint8ArrayCid: break;
case kTypedDataUint8ClampedArrayCid: case kTypedDataUint8ArrayCid:
case kExternalOneByteStringCid: case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid: case kExternalOneByteStringCid:
case kExternalTypedDataUint8ClampedArrayCid: case kExternalTypedDataUint8ArrayCid:
ASSERT(index_scale() == 1); case kExternalTypedDataUint8ClampedArrayCid:
if (IsExternal()) { ASSERT(index_scale() == 1);
__ LoadIndexedExternalUint8(result, array, index); if (IsExternal()) {
} else { __ LoadIndexedExternalUint8(result, array, index);
__ LoadIndexedUint8(result, array, index); } else {
} __ LoadIndexedUint8(result, array, index);
break; }
case kTypedDataInt8ArrayCid: break;
ASSERT(index_scale() == 1); case kTypedDataInt8ArrayCid:
if (IsExternal()) { ASSERT(index_scale() == 1);
__ LoadIndexedExternalInt8(result, array, index); if (IsExternal()) {
} else { __ LoadIndexedExternalInt8(result, array, index);
__ LoadIndexedInt8(result, array, index); } else {
} __ LoadIndexedInt8(result, array, index);
break; }
case kOneByteStringCid: break;
ASSERT(index_scale() == 1); case kOneByteStringCid:
__ LoadIndexedOneByteString(result, array, index); ASSERT(index_scale() == 1);
break; __ LoadIndexedOneByteString(result, array, index);
case kTwoByteStringCid: break;
if (index_scale() != 2) { case kTwoByteStringCid:
// TODO(zra): Fix-up index. if (index_scale() != 2) {
// TODO(zra): Fix-up index.
Unsupported(compiler);
UNREACHABLE();
}
if (IsExternal()) {
Unsupported(compiler);
UNREACHABLE();
}
__ LoadIndexedTwoByteString(result, array, index);
break;
case kTypedDataInt32ArrayCid:
ASSERT(representation() == kUnboxedInt32);
if (IsExternal()) {
Unsupported(compiler);
UNREACHABLE();
}
if (index_scale() == 1) {
__ LoadIndexedInt32(result, array, index);
} else {
__ ShlImm(temp, index, Utils::ShiftForPowerOfTwo(index_scale()));
__ LoadIndexedInt32(result, array, temp);
}
break;
case kTypedDataUint32ArrayCid:
ASSERT(representation() == kUnboxedUint32);
if (IsExternal()) {
Unsupported(compiler);
UNREACHABLE();
}
if (index_scale() == 1) {
__ LoadIndexedUint32(result, array, index);
} else {
__ ShlImm(temp, index, Utils::ShiftForPowerOfTwo(index_scale()));
__ LoadIndexedUint32(result, array, temp);
}
break;
case kTypedDataFloat32ArrayCid:
if (IsExternal()) {
Unsupported(compiler);
UNREACHABLE();
}
if (index_scale() == 1) {
__ LoadIndexedFloat32(result, array, index);
} else if (index_scale() == 4) {
__ LoadIndexed4Float32(result, array, index);
} else {
__ ShlImm(temp, index, Utils::ShiftForPowerOfTwo(index_scale()));
__ LoadIndexedFloat32(result, array, temp);
}
break;
case kTypedDataFloat64ArrayCid:
if (IsExternal()) {
Unsupported(compiler);
UNREACHABLE();
}
if (index_scale() == 1) {
__ LoadIndexedFloat64(result, array, index);
} else if (index_scale() == 8) {
__ LoadIndexed8Float64(result, array, index);
} else {
__ ShlImm(temp, index, Utils::ShiftForPowerOfTwo(index_scale()));
__ LoadIndexedFloat64(result, array, temp);
}
break;
default:
Unsupported(compiler); Unsupported(compiler);
UNREACHABLE(); UNREACHABLE();
} break;
if (IsExternal()) { }
} else {
switch (class_id()) {
case kArrayCid:
case kImmutableArrayCid:
__ LoadIndexedTOS();
break;
default:
Unsupported(compiler); Unsupported(compiler);
UNREACHABLE(); UNREACHABLE();
} break;
__ LoadIndexedTwoByteString(result, array, index); }
break;
case kTypedDataInt32ArrayCid:
ASSERT(representation() == kUnboxedInt32);
if (IsExternal()) {
Unsupported(compiler);
UNREACHABLE();
}
if (index_scale() == 1) {
__ LoadIndexedInt32(result, array, index);
} else {
__ ShlImm(temp, index, Utils::ShiftForPowerOfTwo(index_scale()));
__ LoadIndexedInt32(result, array, temp);
}
break;
case kTypedDataUint32ArrayCid:
ASSERT(representation() == kUnboxedUint32);
if (IsExternal()) {
Unsupported(compiler);
UNREACHABLE();
}
if (index_scale() == 1) {
__ LoadIndexedUint32(result, array, index);
} else {
__ ShlImm(temp, index, Utils::ShiftForPowerOfTwo(index_scale()));
__ LoadIndexedUint32(result, array, temp);
}
break;
case kTypedDataFloat32ArrayCid:
if (IsExternal()) {
Unsupported(compiler);
UNREACHABLE();
}
if (index_scale() == 1) {
__ LoadIndexedFloat32(result, array, index);
} else if (index_scale() == 4) {
__ LoadIndexed4Float32(result, array, index);
} else {
__ ShlImm(temp, index, Utils::ShiftForPowerOfTwo(index_scale()));
__ LoadIndexedFloat32(result, array, temp);
}
break;
case kTypedDataFloat64ArrayCid:
if (IsExternal()) {
Unsupported(compiler);
UNREACHABLE();
}
if (index_scale() == 1) {
__ LoadIndexedFloat64(result, array, index);
} else if (index_scale() == 8) {
__ LoadIndexed8Float64(result, array, index);
} else {
__ ShlImm(temp, index, Utils::ShiftForPowerOfTwo(index_scale()));
__ LoadIndexedFloat64(result, array, temp);
}
break;
default:
Unsupported(compiler);
UNREACHABLE();
break;
} }
} }
@ -1481,59 +1540,75 @@ EMIT_NATIVE_CODE(CheckClass, 1) {
} }
EMIT_NATIVE_CODE(BinarySmiOp, 2, Location::RequiresRegister()) { EMIT_NATIVE_CODE(BinarySmiOp, 2, Location::RequiresRegister()) {
const Register left = locs()->in(0).reg(); if (compiler->is_optimizing()) {
const Register right = locs()->in(1).reg(); const Register left = locs()->in(0).reg();
const Register out = locs()->out(0).reg(); const Register right = locs()->in(1).reg();
const bool can_deopt = CanDeoptimize(); const Register out = locs()->out(0).reg();
bool needs_nop = false; const bool can_deopt = CanDeoptimize();
switch (op_kind()) { bool needs_nop = false;
case Token::kADD: switch (op_kind()) {
__ Add(out, left, right); case Token::kADD:
needs_nop = true; __ Add(out, left, right);
break; needs_nop = true;
case Token::kSUB: break;
__ Sub(out, left, right); case Token::kSUB:
needs_nop = true; __ Sub(out, left, right);
break; needs_nop = true;
case Token::kMUL: break;
__ Mul(out, left, right); case Token::kMUL:
needs_nop = true; __ Mul(out, left, right);
break; needs_nop = true;
case Token::kTRUNCDIV: break;
ASSERT(can_deopt); case Token::kTRUNCDIV:
__ Div(out, left, right); ASSERT(can_deopt);
break; __ Div(out, left, right);
case Token::kBIT_AND: break;
ASSERT(!can_deopt); case Token::kBIT_AND:
__ BitAnd(out, left, right); ASSERT(!can_deopt);
break; __ BitAnd(out, left, right);
case Token::kBIT_OR: break;
ASSERT(!can_deopt); case Token::kBIT_OR:
__ BitOr(out, left, right); ASSERT(!can_deopt);
break; __ BitOr(out, left, right);
case Token::kBIT_XOR: break;
ASSERT(!can_deopt); case Token::kBIT_XOR:
__ BitXor(out, left, right); ASSERT(!can_deopt);
break; __ BitXor(out, left, right);
case Token::kMOD: break;
__ Mod(out, left, right); case Token::kMOD:
needs_nop = true; __ Mod(out, left, right);
break; needs_nop = true;
case Token::kSHR: break;
__ Shr(out, left, right); case Token::kSHR:
needs_nop = true; __ Shr(out, left, right);
break; needs_nop = true;
case Token::kSHL: break;
__ Shl(out, left, right); case Token::kSHL:
needs_nop = true; __ Shl(out, left, right);
break; needs_nop = true;
default: break;
UNREACHABLE(); default:
} UNREACHABLE();
if (can_deopt) { }
compiler->EmitDeopt(deopt_id(), ICData::kDeoptBinarySmiOp); if (can_deopt) {
} else if (needs_nop) { compiler->EmitDeopt(deopt_id(), ICData::kDeoptBinarySmiOp);
__ Nop(0); } else if (needs_nop) {
__ Nop(0);
}
} else {
switch (op_kind()) {
case Token::kADD:
__ SmiAddTOS();
break;
case Token::kSUB:
__ SmiSubTOS();
break;
case Token::kMUL:
__ SmiMulTOS();
break;
default:
UNIMPLEMENTED();
}
} }
} }
@ -1851,8 +1926,6 @@ static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
LocationSummary* locs, LocationSummary* locs,
Token::Kind kind, Token::Kind kind,
BranchLabels labels) { BranchLabels labels) {
const Register left = locs->in(0).reg();
const Register right = locs->in(1).reg();
Token::Kind comparison = kind; Token::Kind comparison = kind;
Condition condition = NEXT_IS_TRUE; Condition condition = NEXT_IS_TRUE;
if (labels.fall_through != labels.false_label) { if (labels.fall_through != labels.false_label) {
@ -1864,8 +1937,36 @@ static Condition EmitSmiComparisonOp(FlowGraphCompiler* compiler,
condition = NEXT_IS_FALSE; condition = NEXT_IS_FALSE;
comparison = FlipCondition(kind); comparison = FlipCondition(kind);
} }
__ Emit(Bytecode::Encode(OpcodeForSmiCondition(comparison), left, right)); if (compiler->is_optimizing()) {
return condition; const Register left = locs->in(0).reg();
const Register right = locs->in(1).reg();
__ Emit(Bytecode::Encode(OpcodeForSmiCondition(comparison), left, right));
return condition;
} else {
switch (kind) {
case Token::kEQ:
__ IfEqStrictTOS();
break;
case Token::kNE:
__ IfNeStrictTOS();
break;
case Token::kLT:
__ IfSmiLtTOS();
break;
case Token::kLTE:
__ IfSmiLeTOS();
break;
case Token::kGT:
__ IfSmiGtTOS();
break;
case Token::kGTE:
__ IfSmiGeTOS();
break;
default:
UNIMPLEMENTED();
}
return condition;
}
} }
static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler, static Condition EmitDoubleComparisonOp(FlowGraphCompiler* compiler,

View file

@ -37,6 +37,34 @@ LocationSummary* Instruction::MakeCallSummary(Zone* zone) {
return result; return result;
} }
DEFINE_BACKEND(LoadIndexedUnsafe, (Register out, Register index)) {
ASSERT(instr->RequiredInputRepresentation(0) == kTagged); // It is a Smi.
__ movl(out, Address(instr->base_reg(), index, TIMES_2, instr->offset()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
}
DEFINE_BACKEND(StoreIndexedUnsafe,
(NoLocation, Register index, Register value)) {
ASSERT(instr->RequiredInputRepresentation(
StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
__ movl(Address(instr->base_reg(), index, TIMES_2, instr->offset()), value);
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
}
DEFINE_BACKEND(TailCall,
(NoLocation,
Fixed<Register, ARGS_DESC_REG>,
Temp<Register> temp)) {
__ LoadObject(CODE_REG, instr->code());
__ LeaveFrame(); // The arguments are still on the stack.
__ movl(temp, FieldAddress(CODE_REG, Code::entry_point_offset()));
__ jmp(temp);
}
LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone, LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone,
bool opt) const { bool opt) const {
const intptr_t kNumInputs = 1; const intptr_t kNumInputs = 1;

View file

@ -957,6 +957,17 @@ void ParameterInstr::PrintOperandsTo(BufferFormatter* f) const {
f->Print("%" Pd, index()); f->Print("%" Pd, index());
} }
void SpecialParameterInstr::PrintOperandsTo(BufferFormatter* f) const {
f->Print("%s", KindToCString(kind()));
}
const char* SpecialParameterInstr::ToCString() const {
char buffer[1024];
BufferFormatter bf(buffer, 1024);
PrintTo(&bf);
return Thread::Current()->zone()->MakeCopyOfString(buffer);
}
void CheckStackOverflowInstr::PrintOperandsTo(BufferFormatter* f) const { void CheckStackOverflowInstr::PrintOperandsTo(BufferFormatter* f) const {
if (in_loop()) f->Print("depth %" Pd, loop_depth()); if (in_loop()) f->Print("depth %" Pd, loop_depth());
} }
@ -994,6 +1005,33 @@ void CatchBlockEntryInstr::PrintTo(BufferFormatter* f) const {
} }
} }
void LoadIndexedUnsafeInstr::PrintOperandsTo(BufferFormatter* f) const {
f->Print("%s[", Assembler::RegisterName(base_reg()));
index()->PrintTo(f);
f->Print(" + %" Pd "]", offset());
}
void StoreIndexedUnsafeInstr::PrintOperandsTo(BufferFormatter* f) const {
f->Print("%s[", Assembler::RegisterName(base_reg()));
index()->PrintTo(f);
f->Print(" + %" Pd "], ", offset());
value()->PrintTo(f);
}
void TailCallInstr::PrintOperandsTo(BufferFormatter* f) const {
const char* name = "<unknown code>";
if (code_.IsStubCode()) {
name = StubCode::NameOfStub(code_.UncheckedEntryPoint());
} else {
const Object& owner = Object::Handle(code_.owner());
if (owner.IsFunction()) {
name = Function::Handle(Function::RawCast(owner.raw()))
.ToFullyQualifiedCString();
}
}
f->Print("%s", name);
}
void PushArgumentInstr::PrintOperandsTo(BufferFormatter* f) const { void PushArgumentInstr::PrintOperandsTo(BufferFormatter* f) const {
value()->PrintTo(f); value()->PrintTo(f);
} }

View file

@ -35,6 +35,36 @@ LocationSummary* Instruction::MakeCallSummary(Zone* zone) {
return result; return result;
} }
DEFINE_BACKEND(LoadIndexedUnsafe, (Register out, Register index)) {
ASSERT(instr->RequiredInputRepresentation(0) == kTagged); // It is a Smi.
__ movq(out, Address(instr->base_reg(), index, TIMES_4, instr->offset()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
}
DEFINE_BACKEND(StoreIndexedUnsafe,
(NoLocation, Register index, Register value)) {
ASSERT(instr->RequiredInputRepresentation(
StoreIndexedUnsafeInstr::kIndexPos) == kTagged); // It is a Smi.
__ movq(Address(instr->base_reg(), index, TIMES_4, instr->offset()), value);
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
}
DEFINE_BACKEND(TailCall, (NoLocation, Fixed<Register, ARGS_DESC_REG>)) {
__ LoadObject(CODE_REG, instr->code());
__ LeaveDartFrame(); // The arguments are still on the stack.
__ jmp(FieldAddress(CODE_REG, Code::entry_point_offset()));
// Even though the TailCallInstr will be the last instruction in a basic
// block, the flow graph compiler will emit native code for other blocks after
// the one containing this instruction and needs to be able to use the pool.
// (The `LeaveDartFrame` above disables usages of the pool.)
__ set_constant_pool_allowed(true);
}
LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone, LocationSummary* PushArgumentInstr::MakeLocationSummary(Zone* zone,
bool opt) const { bool opt) const {
const intptr_t kNumInputs = 1; const intptr_t kNumInputs = 1;

View file

@ -141,6 +141,33 @@ struct NamedArgument {
NamedArgument(String* name, Value* value) : name(name), value(value) {} NamedArgument(String* name, Value* value) : name(name), value(value) {}
}; };
// Ensures we only inline callee graphs which are safe. There are certain
// instructions which cannot be inlined and we ensure here that we don't do
// that.
class CalleeGraphValidator : public AllStatic {
public:
static void Validate(FlowGraph* callee_graph) {
#ifdef DEBUG
for (BlockIterator block_it = callee_graph->reverse_postorder_iterator();
!block_it.Done(); block_it.Advance()) {
BlockEntryInstr* entry = block_it.Current();
for (ForwardInstructionIterator it(entry); !it.Done(); it.Advance()) {
Instruction* current = it.Current();
if (current->IsBranch()) {
current = current->AsBranch()->comparison();
}
// The following instructions are not safe to inline, since they make
// assumptions about the frame layout.
ASSERT(!current->IsTailCall());
ASSERT(!current->IsLoadIndexedUnsafe());
ASSERT(!current->IsStoreIndexedUnsafe());
}
}
#endif // DEBUG
}
};
// Helper to collect information about a callee graph when considering it for // Helper to collect information about a callee graph when considering it for
// inlining. // inlining.
class GraphInfoCollector : public ValueObject { class GraphInfoCollector : public ValueObject {
@ -431,11 +458,13 @@ class CallSites : public ValueObject {
struct InlinedCallData { struct InlinedCallData {
InlinedCallData(Definition* call, InlinedCallData(Definition* call,
const Array& arguments_descriptor,
intptr_t first_arg_index, // 1 if type args are passed. intptr_t first_arg_index, // 1 if type args are passed.
GrowableArray<Value*>* arguments, GrowableArray<Value*>* arguments,
const Function& caller, const Function& caller,
intptr_t caller_inlining_id) intptr_t caller_inlining_id)
: call(call), : call(call),
arguments_descriptor(arguments_descriptor),
first_arg_index(first_arg_index), first_arg_index(first_arg_index),
arguments(arguments), arguments(arguments),
callee_graph(NULL), callee_graph(NULL),
@ -445,6 +474,7 @@ struct InlinedCallData {
caller_inlining_id(caller_inlining_id) {} caller_inlining_id(caller_inlining_id) {}
Definition* call; Definition* call;
const Array& arguments_descriptor;
const intptr_t first_arg_index; const intptr_t first_arg_index;
GrowableArray<Value*>* arguments; GrowableArray<Value*>* arguments;
FlowGraph* callee_graph; FlowGraph* callee_graph;
@ -571,29 +601,42 @@ static void ReplaceParameterStubs(Zone* zone,
} }
SpecialParameterInstr* param = (*defns)[i]->AsSpecialParameter(); SpecialParameterInstr* param = (*defns)[i]->AsSpecialParameter();
if ((param != NULL) && param->HasUses()) { if ((param != NULL) && param->HasUses()) {
if (param->kind() == SpecialParameterInstr::kContext) { switch (param->kind()) {
ASSERT(!is_polymorphic); case SpecialParameterInstr::kContext: {
// We do not support polymorphic inlining of closure calls (we did when ASSERT(!is_polymorphic);
// there was a class per closure). // We do not support polymorphic inlining of closure calls.
ASSERT(call_data->call->IsClosureCall()); ASSERT(call_data->call->IsClosureCall());
LoadFieldInstr* context_load = new (zone) LoadFieldInstr( LoadFieldInstr* context_load = new (zone) LoadFieldInstr(
new Value((*arguments)[first_arg_index]->definition()), new Value((*arguments)[first_arg_index]->definition()),
Closure::context_offset(), Closure::context_offset(),
AbstractType::ZoneHandle(zone, AbstractType::null()), AbstractType::ZoneHandle(zone, AbstractType::null()),
call_data->call->token_pos()); call_data->call->token_pos());
context_load->set_is_immutable(true); context_load->set_is_immutable(true);
context_load->set_ssa_temp_index(caller_graph->alloc_ssa_temp_index()); context_load->set_ssa_temp_index(
context_load->InsertBefore(callee_entry->next()); caller_graph->alloc_ssa_temp_index());
param->ReplaceUsesWith(context_load); context_load->InsertBefore(callee_entry->next());
} else { param->ReplaceUsesWith(context_load);
ASSERT(param->kind() == SpecialParameterInstr::kTypeArgs); break;
Definition* type_args; }
if (first_arg_index > 0) { case SpecialParameterInstr::kTypeArgs: {
type_args = (*arguments)[0]->definition(); Definition* type_args;
} else { if (first_arg_index > 0) {
type_args = callee_graph->constant_null(); type_args = (*arguments)[0]->definition();
} else {
type_args = caller_graph->constant_null();
}
param->ReplaceUsesWith(type_args);
break;
}
case SpecialParameterInstr::kArgDescriptor: {
param->ReplaceUsesWith(
caller_graph->GetConstant(call_data->arguments_descriptor));
break;
}
default: {
UNREACHABLE();
break;
} }
param->ReplaceUsesWith(type_args);
} }
} }
} }
@ -918,6 +961,8 @@ class CallSiteInliner : public ValueObject {
{ {
CSTAT_TIMER_SCOPE(thread(), graphinliner_build_timer); CSTAT_TIMER_SCOPE(thread(), graphinliner_build_timer);
callee_graph = builder.BuildGraph(); callee_graph = builder.BuildGraph();
CalleeGraphValidator::Validate(callee_graph);
} }
} else { } else {
FlowGraphBuilder builder(*parsed_function, *ic_data_array, FlowGraphBuilder builder(*parsed_function, *ic_data_array,
@ -927,6 +972,8 @@ class CallSiteInliner : public ValueObject {
{ {
CSTAT_TIMER_SCOPE(thread(), graphinliner_build_timer); CSTAT_TIMER_SCOPE(thread(), graphinliner_build_timer);
callee_graph = builder.BuildGraph(); callee_graph = builder.BuildGraph();
CalleeGraphValidator::Validate(callee_graph);
} }
} }
#ifdef DART_PRECOMPILER #ifdef DART_PRECOMPILER
@ -989,8 +1036,6 @@ class CallSiteInliner : public ValueObject {
// match. // match.
ASSERT(arguments->length() == ASSERT(arguments->length() ==
first_actual_param_index + function.NumParameters()); first_actual_param_index + function.NumParameters());
ASSERT(param_stubs->length() ==
inlined_type_args_param + callee_graph->parameter_count());
// Update try-index of the callee graph. // Update try-index of the callee graph.
BlockEntryInstr* call_block = call_data->call->GetBlock(); BlockEntryInstr* call_block = call_data->call->GetBlock();
@ -1332,7 +1377,8 @@ class CallSiteInliner : public ValueObject {
arguments.Add(call->PushArgumentAt(i)->value()); arguments.Add(call->PushArgumentAt(i)->value());
} }
InlinedCallData call_data( InlinedCallData call_data(
call, call->FirstArgIndex(), &arguments, call_info[call_idx].caller(), call, Array::ZoneHandle(Z, call->GetArgumentsDescriptor()),
call->FirstArgIndex(), &arguments, call_info[call_idx].caller(),
call_info[call_idx].caller_graph->inlining_id()); call_info[call_idx].caller_graph->inlining_id());
if (TryInlining(call->function(), call->argument_names(), &call_data)) { if (TryInlining(call->function(), call->argument_names(), &call_data)) {
InlineCall(&call_data); InlineCall(&call_data);
@ -1380,8 +1426,11 @@ class CallSiteInliner : public ValueObject {
for (int i = 0; i < call->ArgumentCount(); ++i) { for (int i = 0; i < call->ArgumentCount(); ++i) {
arguments.Add(call->PushArgumentAt(i)->value()); arguments.Add(call->PushArgumentAt(i)->value());
} }
const Array& arguments_descriptor =
Array::ZoneHandle(Z, call->GetArgumentsDescriptor());
InlinedCallData call_data( InlinedCallData call_data(
call, call->FirstArgIndex(), &arguments, call_info[call_idx].caller(), call, arguments_descriptor, call->FirstArgIndex(), &arguments,
call_info[call_idx].caller(),
call_info[call_idx].caller_graph->inlining_id()); call_info[call_idx].caller_graph->inlining_id());
if (TryInlining(target, call->argument_names(), &call_data)) { if (TryInlining(target, call->argument_names(), &call_data)) {
InlineCall(&call_data); InlineCall(&call_data);
@ -1637,8 +1686,11 @@ bool PolymorphicInliner::TryInliningPoly(const TargetInfo& target_info) {
for (int i = 0; i < call_->ArgumentCount(); ++i) { for (int i = 0; i < call_->ArgumentCount(); ++i) {
arguments.Add(call_->PushArgumentAt(i)->value()); arguments.Add(call_->PushArgumentAt(i)->value());
} }
InlinedCallData call_data(call_, call_->instance_call()->FirstArgIndex(), const Array& arguments_descriptor =
&arguments, caller_function_, caller_inlining_id_); Array::ZoneHandle(Z, call_->instance_call()->GetArgumentsDescriptor());
InlinedCallData call_data(call_, arguments_descriptor,
call_->instance_call()->FirstArgIndex(), &arguments,
caller_function_, caller_inlining_id_);
Function& target = Function::ZoneHandle(zone(), target_info.target->raw()); Function& target = Function::ZoneHandle(zone(), target_info.target->raw());
if (!owner_->TryInlining(target, call_->instance_call()->argument_names(), if (!owner_->TryInlining(target, call_->instance_call()->argument_names(),
&call_data)) { &call_data)) {

View file

@ -103,7 +103,10 @@ FlowGraphAllocator::FlowGraphAllocator(const FlowGraph& flow_graph,
// TODO(fschneider): Handle saving and restoring these registers when // TODO(fschneider): Handle saving and restoring these registers when
// generating intrinsic code. // generating intrinsic code.
if (intrinsic_mode) { if (intrinsic_mode) {
#if !defined(TARGET_ARCH_DBC)
blocked_cpu_registers_[ARGS_DESC_REG] = true; blocked_cpu_registers_[ARGS_DESC_REG] = true;
#endif
#if !defined(TARGET_ARCH_IA32) #if !defined(TARGET_ARCH_IA32)
// Need to preserve CODE_REG to be able to store the PC marker // Need to preserve CODE_REG to be able to store the PC marker
// and load the pool pointer. // and load the pool pointer.
@ -674,17 +677,12 @@ void FlowGraphAllocator::ProcessInitialDefinition(Definition* defn,
intptr_t range_end = range->End(); intptr_t range_end = range->End();
if (defn->IsParameter()) { if (defn->IsParameter()) {
ParameterInstr* param = defn->AsParameter(); ParameterInstr* param = defn->AsParameter();
// Assert that copied and non-copied parameters are mutually exclusive.
// This might change in the future and, if so, the index will be wrong.
ASSERT((flow_graph_.num_copied_params() == 0) ||
(flow_graph_.num_non_copied_params() == 0));
intptr_t slot_index = param->index(); intptr_t slot_index = param->index();
ASSERT(slot_index >= 0); ASSERT(slot_index >= 0);
ASSERT((param->base_reg() == FPREG) || (param->base_reg() == SPREG)); ASSERT((param->base_reg() == FPREG) || (param->base_reg() == SPREG));
if (param->base_reg() == FPREG) { if (param->base_reg() == FPREG) {
// Slot index for the leftmost copied parameter is 0.
// Slot index for the rightmost fixed parameter is -1. // Slot index for the rightmost fixed parameter is -1.
slot_index -= flow_graph_.num_non_copied_params(); slot_index -= flow_graph_.num_direct_parameters();
} }
#if defined(TARGET_ARCH_DBC) #if defined(TARGET_ARCH_DBC)
@ -701,36 +699,19 @@ void FlowGraphAllocator::ProcessInitialDefinition(Definition* defn,
range->set_assigned_location( range->set_assigned_location(
Location::StackSlot(slot_index, param->base_reg())); Location::StackSlot(slot_index, param->base_reg()));
range->set_spill_slot(Location::StackSlot(slot_index, param->base_reg())); range->set_spill_slot(Location::StackSlot(slot_index, param->base_reg()));
} else if (defn->IsSpecialParameter()) { } else if (defn->IsSpecialParameter()) {
SpecialParameterInstr* param = defn->AsSpecialParameter(); SpecialParameterInstr* param = defn->AsSpecialParameter();
ASSERT(param->kind() == SpecialParameterInstr::kArgDescriptor);
Location loc; Location loc;
#if defined(TARGET_ARCH_DBC) #if defined(TARGET_ARCH_DBC)
intptr_t slot_index = flow_graph_.num_copied_params(); loc = Location::ArgumentsDescriptorLocation();
if ((param->kind() == SpecialParameterInstr::kContext) && range->set_assigned_location(loc);
flow_graph_.isolate()->reify_generic_functions() &&
flow_graph_.function().IsGeneric()) {
// The first slot is used for function type arguments, either as their
// permanent location or as their temporary location when captured.
// So use the next one for the context.
// (see FlowGraphCompiler::EmitFrameEntry)
slot_index++;
}
loc = Location::RegisterLocation(slot_index);
#else #else
if (param->kind() == SpecialParameterInstr::kContext) { loc = Location::RegisterLocation(ARGS_DESC_REG);
loc = Location::RegisterLocation(CTX); range->set_assigned_location(loc);
} else {
ASSERT(param->kind() == SpecialParameterInstr::kTypeArgs);
loc = Location::StackSlot(flow_graph_.num_copied_params(), FPREG);
range->set_assigned_location(loc);
range->set_spill_slot(loc);
}
#endif // defined(TARGET_ARCH_DBC) #endif // defined(TARGET_ARCH_DBC)
if (loc.IsRegister()) { if (loc.IsRegister()) {
AssignSafepoints(defn, range); AssignSafepoints(defn, range);
range->set_assigned_location(loc);
if (range->End() > kNormalEntryPos) { if (range->End() > kNormalEntryPos) {
LiveRange* tail = range->SplitAt(kNormalEntryPos); LiveRange* tail = range->SplitAt(kNormalEntryPos);
CompleteRange(tail, Location::kRegister); CompleteRange(tail, Location::kRegister);
@ -3002,9 +2983,8 @@ void FlowGraphAllocator::AllocateRegisters() {
// introducing a separate field. It has roughly the same meaning: // introducing a separate field. It has roughly the same meaning:
// number of used registers determines how big of a frame to reserve for // number of used registers determines how big of a frame to reserve for
// this function on DBC stack. // this function on DBC stack.
entry->set_spill_slot_count(Utils::Maximum( entry->set_spill_slot_count((last_used_cpu_register + 1) +
(last_used_cpu_register + 1) + (last_used_fpu_register + 1), (last_used_fpu_register + 1));
flow_graph_.num_copied_params()));
#endif #endif
if (FLAG_print_ssa_liveranges) { if (FLAG_print_ssa_liveranges) {

View file

@ -179,6 +179,10 @@ const char* Location::Name() const {
return "0"; return "0";
} }
UNREACHABLE(); UNREACHABLE();
#if TARGET_ARCH_DBC
case kArgsDescRegister:
return "ArgDesc";
#endif
default: default:
if (IsConstant()) { if (IsConstant()) {
return "C"; return "C";

View file

@ -51,8 +51,11 @@ class Location : public ValueObject {
private: private:
enum { enum {
// Number of bits required to encode Kind value. // Number of bits required to encode Kind value.
kBitsForKind = 4, kKindBitsPos = 0,
kBitsForPayload = kWordSize * kBitsPerByte - kBitsForKind, kKindBitsSize = 5,
kPayloadBitsPos = kKindBitsPos + kKindBitsSize,
kPayloadBitsSize = kBitsPerWord - kPayloadBitsPos,
}; };
static const uword kInvalidLocation = 0; static const uword kInvalidLocation = 0;
@ -94,6 +97,12 @@ class Location : public ValueObject {
// FpuRegister location represents a fixed fpu register. Payload contains // FpuRegister location represents a fixed fpu register. Payload contains
// its code. // its code.
kFpuRegister = 12, kFpuRegister = 12,
#ifdef TARGET_ARCH_DBC
// We use this to signify a special `Location` where the arguments
// descriptor can be found on DBC.
kArgsDescRegister = 15,
#endif
}; };
Location() : value_(kInvalidLocation) { Location() : value_(kInvalidLocation) {
@ -120,6 +129,11 @@ class Location : public ValueObject {
COMPILE_ASSERT((kFpuRegister & kLocationTagMask) != kConstantTag); COMPILE_ASSERT((kFpuRegister & kLocationTagMask) != kConstantTag);
COMPILE_ASSERT((kFpuRegister & kLocationTagMask) != kPairLocationTag); COMPILE_ASSERT((kFpuRegister & kLocationTagMask) != kPairLocationTag);
#ifdef TARGET_ARCH_DBC
COMPILE_ASSERT((kArgsDescRegister & kLocationTagMask) != kConstantTag);
COMPILE_ASSERT((kArgsDescRegister & kLocationTagMask) != kPairLocationTag);
#endif
// Verify tags and tagmask. // Verify tags and tagmask.
COMPILE_ASSERT((kConstantTag & kLocationTagMask) == kConstantTag); COMPILE_ASSERT((kConstantTag & kLocationTagMask) == kConstantTag);
@ -233,6 +247,14 @@ class Location : public ValueObject {
bool IsFpuRegister() const { return kind() == kFpuRegister; } bool IsFpuRegister() const { return kind() == kFpuRegister; }
#ifdef TARGET_ARCH_DBC
static Location ArgumentsDescriptorLocation() {
return Location(kArgsDescRegister, 0);
}
bool IsArgsDescRegister() const { return kind() == kArgsDescRegister; }
#endif
FpuRegister fpu_reg() const { FpuRegister fpu_reg() const {
ASSERT(IsFpuRegister()); ASSERT(IsFpuRegister());
return static_cast<FpuRegister>(payload()); return static_cast<FpuRegister>(payload());
@ -356,9 +378,10 @@ class Location : public ValueObject {
uword payload() const { return PayloadField::decode(value_); } uword payload() const { return PayloadField::decode(value_); }
class KindField : public BitField<uword, Kind, 0, kBitsForKind> {}; class KindField : public BitField<uword, Kind, kKindBitsPos, kKindBitsSize> {
};
class PayloadField class PayloadField
: public BitField<uword, uword, kBitsForKind, kBitsForPayload> {}; : public BitField<uword, uword, kPayloadBitsPos, kPayloadBitsSize> {};
// Layout for kUnallocated locations payload. // Layout for kUnallocated locations payload.
typedef BitField<uword, Policy, 0, 3> PolicyField; typedef BitField<uword, Policy, 0, 3> PolicyField;
@ -369,7 +392,7 @@ class Location : public ValueObject {
#else #else
static const intptr_t kBitsForBaseReg = 5; static const intptr_t kBitsForBaseReg = 5;
#endif #endif
static const intptr_t kBitsForStackIndex = kBitsForPayload - kBitsForBaseReg; static const intptr_t kBitsForStackIndex = kPayloadBitsSize - kBitsForBaseReg;
class StackSlotBaseField class StackSlotBaseField
: public BitField<uword, Register, 0, kBitsForBaseReg> {}; : public BitField<uword, Register, 0, kBitsForBaseReg> {};
class StackIndexField class StackIndexField

View file

@ -78,6 +78,10 @@ struct LocationTrait;
// register and the instruction will produce output in the same register. // register and the instruction will produce output in the same register.
struct SameAsFirstInput {}; struct SameAsFirstInput {};
// Marker type used to signal that output has NoLocation register
// constraint.
struct NoLocation {};
// Marker type used to signal that this input, output or temp needs to // Marker type used to signal that this input, output or temp needs to
// be in a fixed register `reg` of type `R` (either Register or FpuRegister). // be in a fixed register `reg` of type `R` (either Register or FpuRegister).
template <typename R, R reg> template <typename R, R reg>
@ -211,6 +215,15 @@ struct LocationTrait<SameAsFirstInput> {
static Location ToConstraint() { return Location::SameAsFirstInput(); } static Location ToConstraint() { return Location::SameAsFirstInput(); }
}; };
template <>
struct LocationTrait<NoLocation> {
static const bool kIsTemp = false; // This is not a temporary.
static NoLocation Unwrap(const Location& loc) { return NoLocation(); }
static Location ToConstraint() { return Location::NoLocation(); }
};
// Auxiliary types and macro helpers to construct lists of types. // Auxiliary types and macro helpers to construct lists of types.
// TODO(vegorov) rewrite this using variadic templates when we enable C++11 // TODO(vegorov) rewrite this using variadic templates when we enable C++11

View file

@ -3152,7 +3152,7 @@ void TryCatchAnalyzer::Optimize(FlowGraph* flow_graph) {
// at the catch-entry with this constant. // at the catch-entry with this constant.
const GrowableArray<CatchBlockEntryInstr*>& catch_entries = const GrowableArray<CatchBlockEntryInstr*>& catch_entries =
flow_graph->graph_entry()->catch_entries(); flow_graph->graph_entry()->catch_entries();
intptr_t base = kFirstLocalSlotFromFp + flow_graph->num_non_copied_params();
for (intptr_t catch_idx = 0; catch_idx < catch_entries.length(); for (intptr_t catch_idx = 0; catch_idx < catch_entries.length();
++catch_idx) { ++catch_idx) {
CatchBlockEntryInstr* catch_entry = catch_entries[catch_idx]; CatchBlockEntryInstr* catch_entry = catch_entries[catch_idx];
@ -3169,11 +3169,13 @@ void TryCatchAnalyzer::Optimize(FlowGraph* flow_graph) {
// exception_var and stacktrace_var are never constant. In asynchronous or // exception_var and stacktrace_var are never constant. In asynchronous or
// generator functions they may be context-allocated in which case they are // generator functions they may be context-allocated in which case they are
// not tracked in the environment anyway. // not tracked in the environment anyway.
const intptr_t parameter_count = flow_graph->num_direct_parameters();
if (!catch_entry->exception_var().is_captured()) { if (!catch_entry->exception_var().is_captured()) {
cdefs[base - catch_entry->exception_var().index()] = NULL; cdefs[catch_entry->exception_var().BitIndexIn(parameter_count)] = NULL;
} }
if (!catch_entry->stacktrace_var().is_captured()) { if (!catch_entry->stacktrace_var().is_captured()) {
cdefs[base - catch_entry->stacktrace_var().index()] = NULL; cdefs[catch_entry->stacktrace_var().BitIndexIn(parameter_count)] = NULL;
} }
for (BlockIterator block_it = flow_graph->reverse_postorder_iterator(); for (BlockIterator block_it = flow_graph->reverse_postorder_iterator();

View file

@ -1021,6 +1021,8 @@ CompileType SpecialParameterInstr::ComputeType() const {
return CompileType::FromCid(kContextCid); return CompileType::FromCid(kContextCid);
case kTypeArgs: case kTypeArgs:
return CompileType::FromCid(kTypeArgumentsCid); return CompileType::FromCid(kTypeArgumentsCid);
case kArgDescriptor:
return CompileType::FromCid(kImmutableArrayCid);
} }
UNREACHABLE(); UNREACHABLE();
return CompileType::Dynamic(); return CompileType::Dynamic();

View file

@ -75,6 +75,8 @@ compiler_sources = [
"frontend/kernel_binary_flowgraph.h", "frontend/kernel_binary_flowgraph.h",
"frontend/kernel_to_il.cc", "frontend/kernel_to_il.cc",
"frontend/kernel_to_il.h", "frontend/kernel_to_il.h",
"frontend/prologue_builder.cc",
"frontend/prologue_builder.h",
"intrinsifier.cc", "intrinsifier.cc",
"intrinsifier.h", "intrinsifier.h",
"intrinsifier_arm.cc", "intrinsifier_arm.cc",

View file

@ -14,6 +14,8 @@
#include "vm/compiler/backend/flow_graph_compiler.h" #include "vm/compiler/backend/flow_graph_compiler.h"
#include "vm/compiler/backend/il.h" #include "vm/compiler/backend/il.h"
#include "vm/compiler/backend/il_printer.h" #include "vm/compiler/backend/il_printer.h"
#include "vm/compiler/frontend/kernel_to_il.h"
#include "vm/compiler/frontend/prologue_builder.h"
#include "vm/compiler/jit/compiler.h" #include "vm/compiler/jit/compiler.h"
#include "vm/exceptions.h" #include "vm/exceptions.h"
#include "vm/flags.h" #include "vm/flags.h"
@ -302,12 +304,6 @@ FlowGraphBuilder::FlowGraphBuilder(
: parsed_function_(parsed_function), : parsed_function_(parsed_function),
ic_data_array_(ic_data_array), ic_data_array_(ic_data_array),
context_level_array_(context_level_array), context_level_array_(context_level_array),
num_copied_params_(parsed_function.num_copied_params()),
// All parameters are copied if any parameter is.
num_non_copied_params_(
(num_copied_params_ == 0)
? parsed_function.function().num_fixed_parameters()
: 0),
num_stack_locals_(parsed_function.num_stack_locals()), num_stack_locals_(parsed_function.num_stack_locals()),
exit_collector_(exit_collector), exit_collector_(exit_collector),
last_used_block_id_(0), // 0 is used for the graph entry. last_used_block_id_(0), // 0 is used for the graph entry.
@ -2109,8 +2105,7 @@ void EffectGraphVisitor::VisitAwaitMarkerNode(AwaitMarkerNode* node) {
intptr_t EffectGraphVisitor::GetCurrentTempLocalIndex() const { intptr_t EffectGraphVisitor::GetCurrentTempLocalIndex() const {
return kFirstLocalSlotFromFp - owner()->num_stack_locals() - return kFirstLocalSlotFromFp - owner()->num_stack_locals() -
owner()->num_copied_params() - owner()->args_pushed() - owner()->args_pushed() - owner()->temp_count() + 1;
owner()->temp_count() + 1;
} }
LocalVariable* EffectGraphVisitor::EnterTempLocalScope(Value* value) { LocalVariable* EffectGraphVisitor::EnterTempLocalScope(Value* value) {
@ -3326,7 +3321,7 @@ void EffectGraphVisitor::VisitNativeBodyNode(NativeBodyNode* node) {
ZoneGrowableArray<PushArgumentInstr*>& args = ZoneGrowableArray<PushArgumentInstr*>& args =
*new (Z) ZoneGrowableArray<PushArgumentInstr*>(function.NumParameters()); *new (Z) ZoneGrowableArray<PushArgumentInstr*>(function.NumParameters());
for (intptr_t i = 0; i < function.NumParameters(); ++i) { for (intptr_t i = 0; i < function.NumParameters(); ++i) {
LocalVariable* parameter = pf.node_sequence()->scope()->VariableAt(i); LocalVariable* parameter = pf.RawParameterVariable(i);
Value* value = Bind(new (Z) LoadLocalInstr(*parameter, node->token_pos())); Value* value = Bind(new (Z) LoadLocalInstr(*parameter, node->token_pos()));
args.Add(PushArgument(value)); args.Add(PushArgument(value));
} }
@ -3736,37 +3731,26 @@ void EffectGraphVisitor::VisitSequenceNode(SequenceNode* node) {
if (is_top_level_sequence) { if (is_top_level_sequence) {
ASSERT(scope->context_level() == 1); ASSERT(scope->context_level() == 1);
const int num_params = function.NumParameters(); const int num_params = function.NumParameters();
int param_frame_index = (num_params == function.num_fixed_parameters()) for (int pos = 0; pos < num_params; pos++) {
? (kParamEndSlotFromFp + num_params)
: kFirstLocalSlotFromFp;
for (int pos = 0; pos < num_params; param_frame_index--, pos++) {
const LocalVariable& parameter = *scope->VariableAt(pos); const LocalVariable& parameter = *scope->VariableAt(pos);
ASSERT(parameter.owner() == scope); ASSERT(parameter.owner() == scope);
if (parameter.is_captured()) { if (parameter.is_captured()) {
// Create a temporary local describing the original position. LocalVariable& raw_parameter =
const String& temp_name = Symbols::TempParam(); *owner_->parsed_function().RawParameterVariable(pos);
LocalVariable* temp_local = ASSERT((function.HasOptionalParameters() &&
new (Z) LocalVariable(TokenPosition::kNoSource, // Token index. raw_parameter.owner() == scope) ||
TokenPosition::kNoSource, // Token index. !(function.HasOptionalParameters() &&
temp_name, raw_parameter.owner() == NULL));
Object::dynamic_type()); // Type. ASSERT(!raw_parameter.is_captured());
temp_local->set_index(param_frame_index);
// Mark this local as captured parameter so that the optimizer
// correctly handles these when compiling try-catch: Captured
// parameters are not in the stack environment, therefore they
// must be skipped when emitting sync-code in try-blocks.
temp_local->set_is_captured_parameter(true);
// Copy parameter from local frame to current context. // Copy parameter from local frame to current context.
Value* load = Bind(BuildLoadLocal(*temp_local, node->token_pos())); Value* load = Bind(BuildLoadLocal(raw_parameter, node->token_pos()));
Do(BuildStoreLocal(parameter, load, ST(node->token_pos()))); Do(BuildStoreLocal(parameter, load, ST(node->token_pos())));
// Write NULL to the source location to detect buggy accesses and // Write NULL to the source location to detect buggy accesses and
// allow GC of passed value if it gets overwritten by a new value in // allow GC of passed value if it gets overwritten by a new value in
// the function. // the function.
Value* null_constant = Bind( Value* null_constant = Bind(
new (Z) ConstantInstr(Object::ZoneHandle(Z, Object::null()))); new (Z) ConstantInstr(Object::ZoneHandle(Z, Object::null())));
Do(BuildStoreLocal(*temp_local, null_constant, Do(BuildStoreLocal(raw_parameter, null_constant,
ST(node->token_pos()))); ST(node->token_pos())));
} }
} }
@ -3784,23 +3768,9 @@ void EffectGraphVisitor::VisitSequenceNode(SequenceNode* node) {
LocalVariable* parent_type_args_var = LocalVariable* parent_type_args_var =
parsed_function.parent_type_arguments(); parsed_function.parent_type_arguments();
if (type_args_var->is_captured() || (parent_type_args_var != NULL)) { if (type_args_var->is_captured() || (parent_type_args_var != NULL)) {
// Create a temporary local describing the original position. LocalVariable* raw_type_args = parsed_function.RawTypeArgumentsVariable();
const String& temp_name = Symbols::TempParam();
LocalVariable* temp_local =
new (Z) LocalVariable(TokenPosition::kNoSource, // Token index.
TokenPosition::kNoSource, // Token index.
temp_name,
Object::dynamic_type()); // Type.
temp_local->set_index(parsed_function.first_stack_local_index());
// Mark this local as captured parameter so that the optimizer
// correctly handles these when compiling try-catch: Captured
// parameters are not in the stack environment, therefore they
// must be skipped when emitting sync-code in try-blocks.
temp_local->set_is_captured_parameter(true); // TODO(regis): Correct?
Value* type_args_val = Value* type_args_val =
Bind(BuildLoadLocal(*temp_local, node->token_pos())); Bind(BuildLoadLocal(*raw_type_args, node->token_pos()));
if (parent_type_args_var != NULL) { if (parent_type_args_var != NULL) {
ASSERT(parent_type_args_var->owner() != scope); ASSERT(parent_type_args_var->owner() != scope);
// Call the runtime to concatenate both vectors. // Call the runtime to concatenate both vectors.
@ -3828,21 +3798,6 @@ void EffectGraphVisitor::VisitSequenceNode(SequenceNode* node) {
ICData::kStatic)); ICData::kStatic));
} }
Do(BuildStoreLocal(*type_args_var, type_args_val, ST(node->token_pos()))); Do(BuildStoreLocal(*type_args_var, type_args_val, ST(node->token_pos())));
if (type_args_var->is_captured()) {
// Write NULL to the source location to detect buggy accesses and
// allow GC of passed value if it gets overwritten by a new value in
// the function.
Value* null_constant =
Bind(new (Z) ConstantInstr(Object::ZoneHandle(Z, Object::null())));
Do(BuildStoreLocal(*temp_local, null_constant, ST(node->token_pos())));
} else {
// Do not write NULL, since the temp is also the final location.
ASSERT(temp_local->index() == type_args_var->index());
}
} else {
// The type args slot is the final location. No copy needed.
ASSERT(type_args_var->index() ==
parsed_function.first_stack_local_index());
} }
} }
@ -4412,11 +4367,22 @@ FlowGraph* FlowGraphBuilder::BuildGraph() {
} }
TargetEntryInstr* normal_entry = new (Z) TargetEntryInstr( TargetEntryInstr* normal_entry = new (Z) TargetEntryInstr(
AllocateBlockId(), CatchClauseNode::kInvalidTryIndex, GetNextDeoptId()); AllocateBlockId(), CatchClauseNode::kInvalidTryIndex, GetNextDeoptId());
// Generate optional positional/named argument copying!
const bool compiling_for_osr = osr_id_ != Compiler::kNoOSRDeoptId;
kernel::PrologueBuilder prologue_builder(
&parsed_function_, last_used_block_id_, compiling_for_osr, IsInlining());
PrologueInfo prologue_info(-1, -1);
BlockEntryInstr* instruction_cursor =
prologue_builder.BuildPrologue(normal_entry, &prologue_info);
last_used_block_id_ = prologue_builder.last_used_block_id();
graph_entry_ = graph_entry_ =
new (Z) GraphEntryInstr(parsed_function(), normal_entry, osr_id_); new (Z) GraphEntryInstr(parsed_function(), normal_entry, osr_id_);
EffectGraphVisitor for_effect(this); EffectGraphVisitor for_effect(this);
parsed_function().node_sequence()->Visit(&for_effect); parsed_function().node_sequence()->Visit(&for_effect);
AppendFragment(normal_entry, for_effect); AppendFragment(instruction_cursor, for_effect);
// Check that the graph is properly terminated. // Check that the graph is properly terminated.
ASSERT(!for_effect.is_open()); ASSERT(!for_effect.is_open());
@ -4428,7 +4394,6 @@ FlowGraph* FlowGraphBuilder::BuildGraph() {
graph_entry_->RelinkToOsrEntry(Z, last_used_block_id_); graph_entry_->RelinkToOsrEntry(Z, last_used_block_id_);
} }
PrologueInfo prologue_info(-1, -1);
FlowGraph* graph = new (Z) FlowGraph(parsed_function(), graph_entry_, FlowGraph* graph = new (Z) FlowGraph(parsed_function(), graph_entry_,
last_used_block_id_, prologue_info); last_used_block_id_, prologue_info);
graph->set_await_token_positions(await_token_positions_); graph->set_await_token_positions(await_token_positions_);

View file

@ -140,8 +140,6 @@ class FlowGraphBuilder : public ValueObject {
GraphEntryInstr* graph_entry() const { return graph_entry_; } GraphEntryInstr* graph_entry() const { return graph_entry_; }
intptr_t num_copied_params() const { return num_copied_params_; }
intptr_t num_non_copied_params() const { return num_non_copied_params_; }
intptr_t num_stack_locals() const { return num_stack_locals_; } intptr_t num_stack_locals() const { return num_stack_locals_; }
bool IsInlining() const { return (exit_collector_ != NULL); } bool IsInlining() const { return (exit_collector_ != NULL); }
@ -187,10 +185,7 @@ class FlowGraphBuilder : public ValueObject {
friend class Intrinsifier; friend class Intrinsifier;
intptr_t parameter_count() const { intptr_t parameter_count() const {
return num_copied_params_ + num_non_copied_params_; return parsed_function_.function().NumParameters();
}
intptr_t variable_count() const {
return parameter_count() + num_stack_locals_;
} }
const ParsedFunction& parsed_function_; const ParsedFunction& parsed_function_;
@ -198,9 +193,7 @@ class FlowGraphBuilder : public ValueObject {
// Contains (deopt_id, context_level) pairs. // Contains (deopt_id, context_level) pairs.
ZoneGrowableArray<intptr_t>* context_level_array_; ZoneGrowableArray<intptr_t>* context_level_array_;
const intptr_t num_copied_params_; const intptr_t num_stack_locals_;
const intptr_t num_non_copied_params_;
const intptr_t num_stack_locals_; // Does not include any parameters.
InlineExitCollector* const exit_collector_; InlineExitCollector* const exit_collector_;
intptr_t last_used_block_id_; intptr_t last_used_block_id_;

View file

@ -777,6 +777,11 @@ ScopeBuildingResult* StreamingScopeBuilder::BuildScopes() {
parsed_function_->set_function_type_arguments(type_args_var); parsed_function_->set_function_type_arguments(type_args_var);
} }
if (parsed_function_->has_arg_desc_var()) {
needs_expr_temp_ = true;
scope_->AddVariable(parsed_function_->arg_desc_var());
}
LocalVariable* context_var = parsed_function_->current_context_var(); LocalVariable* context_var = parsed_function_->current_context_var();
context_var->set_is_forced_stack(); context_var->set_is_forced_stack();
scope_->AddVariable(context_var); scope_->AddVariable(context_var);
@ -3838,14 +3843,20 @@ Fragment StreamingFlowGraphBuilder::BuildInitializers(
FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfImplicitClosureFunction( FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfImplicitClosureFunction(
const Function& function) { const Function& function) {
// The prologue builder needs the default parameter values.
SetupDefaultParameterValues();
const Function& target = Function::ZoneHandle(Z, function.parent_function()); const Function& target = Function::ZoneHandle(Z, function.parent_function());
TargetEntryInstr* normal_entry = flow_graph_builder_->BuildTargetEntry(); TargetEntryInstr* normal_entry = flow_graph_builder_->BuildTargetEntry();
PrologueInfo prologue_info(-1, -1);
BlockEntryInstr* instruction_cursor =
flow_graph_builder_->BuildPrologue(normal_entry, &prologue_info);
flow_graph_builder_->graph_entry_ = new (Z) GraphEntryInstr( flow_graph_builder_->graph_entry_ = new (Z) GraphEntryInstr(
*parsed_function(), normal_entry, Compiler::kNoOSRDeoptId); *parsed_function(), normal_entry, Compiler::kNoOSRDeoptId);
SetupDefaultParameterValues();
Fragment body(normal_entry); Fragment body(instruction_cursor);
body += flow_graph_builder_->CheckStackOverflowInPrologue(); body += flow_graph_builder_->CheckStackOverflowInPrologue();
intptr_t type_args_len = 0; intptr_t type_args_len = 0;
@ -3905,32 +3916,24 @@ FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfImplicitClosureFunction(
// Return the result. // Return the result.
body += Return(function_node_helper.end_position_); body += Return(function_node_helper.end_position_);
PrologueInfo prologue_info(-1, -1);
return new (Z) return new (Z)
FlowGraph(*parsed_function(), flow_graph_builder_->graph_entry_, FlowGraph(*parsed_function(), flow_graph_builder_->graph_entry_,
flow_graph_builder_->last_used_block_id_, prologue_info); flow_graph_builder_->last_used_block_id_, prologue_info);
} }
LocalVariable* StreamingFlowGraphBuilder::LookupParameterDirect(
intptr_t kernel_offset,
intptr_t parameter_index) {
LocalVariable* var = LookupVariable(kernel_offset);
LocalVariable* parameter =
new (Z) LocalVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
Symbols::TempParam(), var->type());
parameter->set_index(parameter_index);
if (var->is_captured()) parameter->set_is_captured_parameter(true);
return parameter;
}
FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfFunction(bool constructor) { FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfFunction(bool constructor) {
// The prologue builder needs the default parameter values.
SetupDefaultParameterValues();
const Function& dart_function = parsed_function()->function(); const Function& dart_function = parsed_function()->function();
TargetEntryInstr* normal_entry = flow_graph_builder_->BuildTargetEntry(); TargetEntryInstr* normal_entry = flow_graph_builder_->BuildTargetEntry();
PrologueInfo prologue_info(-1, -1);
BlockEntryInstr* instruction_cursor =
flow_graph_builder_->BuildPrologue(normal_entry, &prologue_info);
flow_graph_builder_->graph_entry_ = new (Z) GraphEntryInstr( flow_graph_builder_->graph_entry_ = new (Z) GraphEntryInstr(
*parsed_function(), normal_entry, flow_graph_builder_->osr_id_); *parsed_function(), normal_entry, flow_graph_builder_->osr_id_);
SetupDefaultParameterValues();
Fragment body; Fragment body;
if (dart_function.IsConvertedClosureFunction()) { if (dart_function.IsConvertedClosureFunction()) {
@ -3998,29 +4001,29 @@ FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfFunction(bool constructor) {
intptr_t parameter_count = dart_function.NumParameters(); intptr_t parameter_count = dart_function.NumParameters();
intptr_t parameter_index = parsed_function()->first_parameter_index(); intptr_t parameter_index = parsed_function()->first_parameter_index();
const ParsedFunction& pf = *flow_graph_builder_->parsed_function_;
const Function& function = pf.function();
for (intptr_t i = 0; i < parameter_count; ++i, --parameter_index) { for (intptr_t i = 0; i < parameter_count; ++i, --parameter_index) {
LocalVariable* variable = scope->VariableAt(i); LocalVariable* variable = scope->VariableAt(i);
if (variable->is_captured()) { if (variable->is_captured()) {
// There is no LocalVariable describing the on-stack parameter so LocalVariable& raw_parameter = *pf.RawParameterVariable(i);
// create one directly and use the same type. ASSERT((function.HasOptionalParameters() &&
LocalVariable* parameter = new (Z) raw_parameter.owner() == scope) ||
LocalVariable(TokenPosition::kNoSource, TokenPosition::kNoSource, (!function.HasOptionalParameters() &&
Symbols::TempParam(), variable->type()); raw_parameter.owner() == NULL));
parameter->set_index(parameter_index); ASSERT(!raw_parameter.is_captured());
// Mark the stack variable so it will be ignored by the code for
// try/catch.
parameter->set_is_captured_parameter(true);
// Copy the parameter from the stack to the context. Overwrite it // Copy the parameter from the stack to the context. Overwrite it
// with a null constant on the stack so the original value is // with a null constant on the stack so the original value is
// eligible for garbage collection. // eligible for garbage collection.
body += LoadLocal(context); body += LoadLocal(context);
body += LoadLocal(parameter); body += LoadLocal(&raw_parameter);
body += flow_graph_builder_->StoreInstanceField( body += flow_graph_builder_->StoreInstanceField(
TokenPosition::kNoSource, TokenPosition::kNoSource,
Context::variable_offset(variable->index())); Context::variable_offset(variable->index()));
body += NullConstant(); body += NullConstant();
body += StoreLocal(TokenPosition::kNoSource, parameter); body += StoreLocal(TokenPosition::kNoSource, &raw_parameter);
body += Drop(); body += Drop();
} }
} }
@ -4287,7 +4290,7 @@ FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfFunction(bool constructor) {
flow_graph_builder_->context_depth_ = current_context_depth; flow_graph_builder_->context_depth_ = current_context_depth;
} }
normal_entry->LinkTo(body.entry); instruction_cursor->LinkTo(body.entry);
GraphEntryInstr* graph_entry = flow_graph_builder_->graph_entry_; GraphEntryInstr* graph_entry = flow_graph_builder_->graph_entry_;
// When compiling for OSR, use a depth first search to find the OSR // When compiling for OSR, use a depth first search to find the OSR
@ -4298,7 +4301,6 @@ FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfFunction(bool constructor) {
graph_entry->RelinkToOsrEntry(Z, graph_entry->RelinkToOsrEntry(Z,
flow_graph_builder_->last_used_block_id_ + 1); flow_graph_builder_->last_used_block_id_ + 1);
} }
PrologueInfo prologue_info(-1, -1);
return new (Z) return new (Z)
FlowGraph(*parsed_function(), graph_entry, FlowGraph(*parsed_function(), graph_entry,
flow_graph_builder_->last_used_block_id_, prologue_info); flow_graph_builder_->last_used_block_id_, prologue_info);

View file

@ -1033,8 +1033,6 @@ class StreamingFlowGraphBuilder {
const TypeArguments& PeekArgumentsInstantiatedType(const Class& klass); const TypeArguments& PeekArgumentsInstantiatedType(const Class& klass);
intptr_t PeekArgumentsCount(); intptr_t PeekArgumentsCount();
LocalVariable* LookupParameterDirect(intptr_t kernel_offset,
intptr_t parameter_index);
LocalVariable* LookupVariable(intptr_t kernel_offset); LocalVariable* LookupVariable(intptr_t kernel_offset);
LocalVariable* MakeTemporary(); LocalVariable* MakeTemporary();
RawFunction* LookupMethodByMember(NameIndex target, RawFunction* LookupMethodByMember(NameIndex target,

View file

@ -7,7 +7,9 @@
#include "vm/compiler/frontend/kernel_to_il.h" #include "vm/compiler/frontend/kernel_to_il.h"
#include "vm/compiler/backend/il.h" #include "vm/compiler/backend/il.h"
#include "vm/compiler/backend/il_printer.h"
#include "vm/compiler/frontend/kernel_binary_flowgraph.h" #include "vm/compiler/frontend/kernel_binary_flowgraph.h"
#include "vm/compiler/frontend/prologue_builder.h"
#include "vm/compiler/jit/compiler.h" #include "vm/compiler/jit/compiler.h"
#include "vm/kernel_loader.h" #include "vm/kernel_loader.h"
#include "vm/longjump.h" #include "vm/longjump.h"
@ -1272,6 +1274,12 @@ Fragment BaseFlowGraphBuilder::ThrowException(TokenPosition position) {
return instructions; return instructions;
} }
Fragment BaseFlowGraphBuilder::TailCall(const Code& code) {
Fragment instructions;
Value* arg_desc = Pop();
return Fragment(new (Z) TailCallInstr(code, arg_desc));
}
Fragment FlowGraphBuilder::RethrowException(TokenPosition position, Fragment FlowGraphBuilder::RethrowException(TokenPosition position,
int catch_try_index) { int catch_try_index) {
Fragment instructions; Fragment instructions;
@ -1755,7 +1763,7 @@ LocalVariable* BaseFlowGraphBuilder::MakeTemporary() {
symbol_name, Object::dynamic_type()); symbol_name, Object::dynamic_type());
// Set the index relative to the base of the expression stack including // Set the index relative to the base of the expression stack including
// outgoing arguments. // outgoing arguments.
variable->set_index(parsed_function_->first_stack_local_index() - variable->set_index(kFirstLocalSlotFromFp -
parsed_function_->num_stack_locals() - parsed_function_->num_stack_locals() -
pending_argument_count_ - index); pending_argument_count_ - index);
@ -2008,8 +2016,7 @@ Fragment FlowGraphBuilder::NativeFunctionBody(intptr_t first_positional_offset,
default: { default: {
String& name = String::ZoneHandle(Z, function.native_name()); String& name = String::ZoneHandle(Z, function.native_name());
for (intptr_t i = 0; i < function.NumParameters(); ++i) { for (intptr_t i = 0; i < function.NumParameters(); ++i) {
body += LoadLocal( body += LoadLocal(parsed_function_->RawParameterVariable(i));
parsed_function_->node_sequence()->scope()->VariableAt(i));
body += PushArgument(); body += PushArgument();
} }
body += NativeCall(&name, &function); body += NativeCall(&name, &function);
@ -2217,6 +2224,20 @@ Fragment FlowGraphBuilder::AssertSubtype(TokenPosition position,
return instructions; return instructions;
} }
BlockEntryInstr* FlowGraphBuilder::BuildPrologue(TargetEntryInstr* normal_entry,
PrologueInfo* prologue_info) {
const bool compiling_for_osr = IsCompiledForOsr();
kernel::PrologueBuilder prologue_builder(
parsed_function_, last_used_block_id_, compiling_for_osr, IsInlining());
BlockEntryInstr* instruction_cursor =
prologue_builder.BuildPrologue(normal_entry, prologue_info);
last_used_block_id_ = prologue_builder.last_used_block_id();
return instruction_cursor;
}
FlowGraph* FlowGraphBuilder::BuildGraphOfMethodExtractor( FlowGraph* FlowGraphBuilder::BuildGraphOfMethodExtractor(
const Function& method) { const Function& method) {
// A method extractor is the implicit getter for a method. // A method extractor is the implicit getter for a method.
@ -2243,6 +2264,9 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfNoSuchMethodDispatcher(
// the arguments descriptor at a call site. // the arguments descriptor at a call site.
TargetEntryInstr* normal_entry = BuildTargetEntry(); TargetEntryInstr* normal_entry = BuildTargetEntry();
PrologueInfo prologue_info(-1, -1);
BlockEntryInstr* instruction_cursor =
BuildPrologue(normal_entry, &prologue_info);
graph_entry_ = new (Z) graph_entry_ = new (Z)
GraphEntryInstr(*parsed_function_, normal_entry, Compiler::kNoOSRDeoptId); GraphEntryInstr(*parsed_function_, normal_entry, Compiler::kNoOSRDeoptId);
@ -2260,7 +2284,7 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfNoSuchMethodDispatcher(
} }
parsed_function_->set_default_parameter_values(default_values); parsed_function_->set_default_parameter_values(default_values);
Fragment body(normal_entry); Fragment body(instruction_cursor);
body += CheckStackOverflowInPrologue(); body += CheckStackOverflowInPrologue();
// The receiver is the first argument to noSuchMethod, and it is the first // The receiver is the first argument to noSuchMethod, and it is the first
@ -2347,7 +2371,6 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfNoSuchMethodDispatcher(
/* argument_count = */ 2, ICData::kNSMDispatch); /* argument_count = */ 2, ICData::kNSMDispatch);
body += Return(TokenPosition::kNoSource); body += Return(TokenPosition::kNoSource);
PrologueInfo prologue_info(-1, -1);
return new (Z) FlowGraph(*parsed_function_, graph_entry_, last_used_block_id_, return new (Z) FlowGraph(*parsed_function_, graph_entry_, last_used_block_id_,
prologue_info); prologue_info);
} }
@ -2390,10 +2413,13 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfInvokeFieldDispatcher(
parsed_function_->set_default_parameter_values(default_values); parsed_function_->set_default_parameter_values(default_values);
TargetEntryInstr* normal_entry = BuildTargetEntry(); TargetEntryInstr* normal_entry = BuildTargetEntry();
PrologueInfo prologue_info(-1, -1);
BlockEntryInstr* instruction_cursor =
BuildPrologue(normal_entry, &prologue_info);
graph_entry_ = new (Z) graph_entry_ = new (Z)
GraphEntryInstr(*parsed_function_, normal_entry, Compiler::kNoOSRDeoptId); GraphEntryInstr(*parsed_function_, normal_entry, Compiler::kNoOSRDeoptId);
Fragment body(normal_entry); Fragment body(instruction_cursor);
body += CheckStackOverflowInPrologue(); body += CheckStackOverflowInPrologue();
LocalScope* scope = parsed_function_->node_sequence()->scope(); LocalScope* scope = parsed_function_->node_sequence()->scope();
@ -2448,7 +2474,6 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfInvokeFieldDispatcher(
body += Return(TokenPosition::kNoSource); body += Return(TokenPosition::kNoSource);
PrologueInfo prologue_info(-1, -1);
return new (Z) FlowGraph(*parsed_function_, graph_entry_, last_used_block_id_, return new (Z) FlowGraph(*parsed_function_, graph_entry_, last_used_block_id_,
prologue_info); prologue_info);
} }
@ -2482,6 +2507,43 @@ ArgumentArray BaseFlowGraphBuilder::GetArguments(int count) {
return arguments; return arguments;
} }
Fragment BaseFlowGraphBuilder::SmiRelationalOp(Token::Kind kind) {
Value* right = Pop();
Value* left = Pop();
RelationalOpInstr* instr = new (Z) RelationalOpInstr(
TokenPosition::kNoSource, kind, left, right, kSmiCid, GetNextDeoptId());
Push(instr);
return Fragment(instr);
}
Fragment BaseFlowGraphBuilder::SmiBinaryOp(Token::Kind kind,
bool is_truncating) {
Value* right = Pop();
Value* left = Pop();
BinarySmiOpInstr* instr =
new (Z) BinarySmiOpInstr(kind, left, right, GetNextDeoptId());
if (is_truncating) {
instr->mark_truncating();
}
Push(instr);
return Fragment(instr);
}
Fragment BaseFlowGraphBuilder::LoadFpRelativeSlot(intptr_t offset) {
LoadIndexedUnsafeInstr* instr = new (Z) LoadIndexedUnsafeInstr(Pop(), offset);
Push(instr);
return Fragment(instr);
}
Fragment BaseFlowGraphBuilder::StoreFpRelativeSlot(intptr_t offset) {
Value* value = Pop();
Value* index = Pop();
StoreIndexedUnsafeInstr* instr =
new (Z) StoreIndexedUnsafeInstr(index, value, offset);
Push(instr);
return Fragment(instr);
}
RawObject* EvaluateMetadata(const Field& metadata_field) { RawObject* EvaluateMetadata(const Field& metadata_field) {
LongJumpScope jump; LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) { if (setjmp(*jump.Set()) == 0) {

View file

@ -568,7 +568,10 @@ class BaseFlowGraphBuilder {
Fragment IntConstant(int64_t value); Fragment IntConstant(int64_t value);
Fragment Constant(const Object& value); Fragment Constant(const Object& value);
Fragment NullConstant(); Fragment NullConstant();
Fragment SmiRelationalOp(Token::Kind kind);
Fragment SmiBinaryOp(Token::Kind op, bool is_truncating = false);
Fragment LoadFpRelativeSlot(intptr_t offset); Fragment LoadFpRelativeSlot(intptr_t offset);
Fragment StoreFpRelativeSlot(intptr_t offset);
Fragment BranchIfTrue(TargetEntryInstr** then_entry, Fragment BranchIfTrue(TargetEntryInstr** then_entry,
TargetEntryInstr** otherwise_entry, TargetEntryInstr** otherwise_entry,
bool negate = false); bool negate = false);
@ -620,6 +623,7 @@ class BaseFlowGraphBuilder {
friend class TryCatchBlock; friend class TryCatchBlock;
friend class StreamingFlowGraphBuilder; friend class StreamingFlowGraphBuilder;
friend class FlowGraphBuilder; friend class FlowGraphBuilder;
friend class PrologueBuilder;
}; };
class FlowGraphBuilder : public BaseFlowGraphBuilder { class FlowGraphBuilder : public BaseFlowGraphBuilder {
@ -638,8 +642,7 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {
private: private:
BlockEntryInstr* BuildPrologue(TargetEntryInstr* normal_entry, BlockEntryInstr* BuildPrologue(TargetEntryInstr* normal_entry,
intptr_t* min_prologue_block_id, PrologueInfo* prologue_info);
intptr_t* max_prologue_block_id);
FlowGraph* BuildGraphOfMethodExtractor(const Function& method); FlowGraph* BuildGraphOfMethodExtractor(const Function& method);
FlowGraph* BuildGraphOfNoSuchMethodDispatcher(const Function& function); FlowGraph* BuildGraphOfNoSuchMethodDispatcher(const Function& function);

View file

@ -0,0 +1,483 @@
// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/compiler/frontend/prologue_builder.h"
#include "vm/compiler/backend/il.h"
#include "vm/compiler/backend/il_printer.h"
#include "vm/compiler/frontend/kernel_binary_flowgraph.h"
#include "vm/compiler/jit/compiler.h"
#include "vm/kernel_loader.h"
#include "vm/longjump.h"
#include "vm/object_store.h"
#include "vm/report.h"
#include "vm/resolver.h"
#include "vm/stack_frame.h"
#if !defined(DART_PRECOMPILED_RUNTIME)
namespace dart {
namespace kernel {
#define Z (zone_)
BlockEntryInstr* PrologueBuilder::BuildPrologue(BlockEntryInstr* entry,
PrologueInfo* prologue_info) {
Isolate* isolate = Isolate::Current();
const bool strong = isolate->strong();
// We always have to build the graph, but we only link it sometimes.
const bool link = !is_inlining_ && !compiling_for_osr_;
const intptr_t previous_block_id = last_used_block_id_;
const bool load_optional_arguments = function_.HasOptionalParameters();
const bool expect_type_args =
function_.IsGeneric() && isolate->reify_generic_functions();
const bool check_arguments =
(function_.IsClosureFunction() || function_.IsConvertedClosureFunction());
Fragment prologue = Fragment(entry);
JoinEntryInstr* nsm = NULL;
if (load_optional_arguments || check_arguments || expect_type_args) {
nsm = BuildThrowNoSuchMethod();
}
if (check_arguments) {
Fragment f = BuildTypeArgumentsLengthCheck(strong, nsm, expect_type_args);
if (link) prologue += f;
}
if (load_optional_arguments) {
Fragment f = BuildOptionalParameterHandling(strong, nsm);
if (link) prologue += f;
} else if (check_arguments) {
Fragment f = BuildFixedParameterLengthChecks(strong, nsm);
if (link) prologue += f;
}
if (function_.IsClosureFunction()) {
Fragment f = BuildClosureContextHandling();
if (!compiling_for_osr_) prologue += f;
}
if (expect_type_args) {
Fragment f = BuildTypeArgumentsHandling(strong);
if (link) prologue += f;
}
const bool is_empty_prologue = prologue.entry == prologue.current;
// Always do this to preserve deoptid numbering.
JoinEntryInstr* normal_code = BuildJoinEntry();
prologue += Goto(normal_code);
if (is_empty_prologue) {
*prologue_info = PrologueInfo(-1, -1);
return entry;
} else {
*prologue_info =
PrologueInfo(previous_block_id, normal_code->block_id() - 1);
return normal_code;
}
}
JoinEntryInstr* PrologueBuilder::BuildThrowNoSuchMethod() {
JoinEntryInstr* nsm = BuildJoinEntry();
Fragment failing(nsm);
const Code& nsm_handler =
Code::ZoneHandle(StubCode::CallClosureNoSuchMethod_entry()->code());
failing += LoadArgDescriptor();
failing += TailCall(nsm_handler);
return nsm;
}
Fragment PrologueBuilder::BuildTypeArgumentsLengthCheck(bool strong,
JoinEntryInstr* nsm,
bool expect_type_args) {
Fragment check_type_args;
JoinEntryInstr* done = BuildJoinEntry();
// Type args are always optional, so length can always be zero.
// If expect_type_args, a non-zero length must match the declaration length.
TargetEntryInstr *then, *fail;
check_type_args += LoadArgDescriptor();
check_type_args += LoadField(ArgumentsDescriptor::type_args_len_offset());
if (strong) {
check_type_args +=
IntConstant(ArgumentsDescriptor::TypeArgsLenField::mask());
check_type_args += SmiBinaryOp(Token::kBIT_AND, /* truncate= */ true);
}
if (expect_type_args) {
JoinEntryInstr* join2 = BuildJoinEntry();
LocalVariable* len = MakeTemporary();
TargetEntryInstr* otherwise;
check_type_args += LoadLocal(len);
check_type_args += IntConstant(0);
check_type_args += BranchIfEqual(&then, &otherwise);
TargetEntryInstr* then2;
Fragment check_len(otherwise);
check_len += LoadLocal(len);
check_len += IntConstant(function_.NumTypeParameters());
check_len += BranchIfEqual(&then2, &fail);
Fragment(then) + Goto(join2);
Fragment(then2) + Goto(join2);
Fragment(join2) + Drop() + Goto(done);
Fragment(fail) + Goto(nsm);
} else {
check_type_args += IntConstant(0);
check_type_args += BranchIfEqual(&then, &fail);
Fragment(then) + Goto(done);
Fragment(fail) + Goto(nsm);
}
return Fragment(check_type_args.entry, done);
}
Fragment PrologueBuilder::BuildOptionalParameterHandling(bool strong,
JoinEntryInstr* nsm) {
Fragment copy_args_prologue;
const int num_fixed_params = function_.num_fixed_parameters();
const int num_opt_pos_params = function_.NumOptionalPositionalParameters();
const int num_opt_named_params = function_.NumOptionalNamedParameters();
const int num_params =
num_fixed_params + num_opt_pos_params + num_opt_named_params;
ASSERT(function_.NumParameters() == num_params);
// Check that min_num_pos_args <= num_pos_args <= max_num_pos_args,
// where num_pos_args is the number of positional arguments passed in.
const int min_num_pos_args = num_fixed_params;
const int max_num_pos_args = num_fixed_params + num_opt_pos_params;
copy_args_prologue += LoadArgDescriptor();
copy_args_prologue +=
LoadField(ArgumentsDescriptor::positional_count_offset());
if (strong) {
copy_args_prologue +=
IntConstant(ArgumentsDescriptor::PositionalCountField::mask());
copy_args_prologue += SmiBinaryOp(Token::kBIT_AND, /* truncate= */ true);
}
LocalVariable* positional_count_var = MakeTemporary();
copy_args_prologue += LoadArgDescriptor();
copy_args_prologue += LoadField(ArgumentsDescriptor::count_offset());
LocalVariable* count_var = MakeTemporary();
// Ensure the caller provided at least [min_num_pos_args] arguments.
copy_args_prologue += IntConstant(min_num_pos_args);
copy_args_prologue += LoadLocal(positional_count_var);
copy_args_prologue += SmiRelationalOp(Token::kLTE);
TargetEntryInstr *success1, *fail1;
copy_args_prologue += BranchIfTrue(&success1, &fail1);
copy_args_prologue = Fragment(copy_args_prologue.entry, success1);
// Ensure the caller provided at most [max_num_pos_args] arguments.
copy_args_prologue += LoadLocal(positional_count_var);
copy_args_prologue += IntConstant(max_num_pos_args);
copy_args_prologue += SmiRelationalOp(Token::kLTE);
TargetEntryInstr *success2, *fail2;
copy_args_prologue += BranchIfTrue(&success2, &fail2);
copy_args_prologue = Fragment(copy_args_prologue.entry, success2);
// Link up the argument check failing code.
Fragment(fail1) + Goto(nsm);
Fragment(fail2) + Goto(nsm);
copy_args_prologue += LoadLocal(count_var);
copy_args_prologue += IntConstant(min_num_pos_args);
copy_args_prologue += SmiBinaryOp(Token::kSUB, /* truncate= */ true);
LocalVariable* optional_count_var = MakeTemporary();
// Copy mandatory parameters down.
intptr_t param = 0;
for (; param < num_fixed_params; ++param) {
copy_args_prologue += LoadLocal(optional_count_var);
copy_args_prologue += LoadFpRelativeSlot(
kWordSize * (kParamEndSlotFromFp + num_fixed_params - param));
copy_args_prologue +=
StoreLocalRaw(TokenPosition::kNoSource, ParameterVariable(param));
copy_args_prologue += Drop();
}
// Copy optional parameters down.
if (num_opt_pos_params > 0) {
JoinEntryInstr* next_missing = NULL;
for (intptr_t opt_param = 1; param < num_params; ++param, ++opt_param) {
TargetEntryInstr *supplied, *missing;
copy_args_prologue += IntConstant(opt_param);
copy_args_prologue += LoadLocal(optional_count_var);
copy_args_prologue += SmiRelationalOp(Token::kLTE);
copy_args_prologue += BranchIfTrue(&supplied, &missing);
Fragment good(supplied);
good += LoadLocal(optional_count_var);
good += LoadFpRelativeSlot(
kWordSize * (kParamEndSlotFromFp + num_fixed_params - param));
good += StoreLocalRaw(TokenPosition::kNoSource, ParameterVariable(param));
good += Drop();
Fragment not_good(missing);
if (next_missing != NULL) {
not_good += Goto(next_missing);
not_good.current = next_missing;
}
next_missing = BuildJoinEntry();
not_good += Constant(DefaultParameterValueAt(opt_param - 1));
not_good +=
StoreLocalRaw(TokenPosition::kNoSource, ParameterVariable(param));
not_good += Drop();
not_good += Goto(next_missing);
copy_args_prologue.current = good.current;
}
copy_args_prologue += Goto(next_missing /* join good/not_good flows */);
copy_args_prologue.current = next_missing;
// If there are more arguments from the caller we haven't processed, go
// NSM.
TargetEntryInstr *done, *unknown_named_arg_passed;
copy_args_prologue += LoadLocal(positional_count_var);
copy_args_prologue += LoadLocal(count_var);
copy_args_prologue += BranchIfEqual(&done, &unknown_named_arg_passed);
copy_args_prologue.current = done;
{
Fragment f(unknown_named_arg_passed);
f += Goto(nsm);
}
} else {
ASSERT(num_opt_named_params > 0);
const intptr_t first_name_offset =
ArgumentsDescriptor::first_named_entry_offset() - Array::data_offset();
// Start by alphabetically sorting the names of the optional parameters.
LocalVariable** opt_param = new LocalVariable*[num_opt_named_params];
int* opt_param_position = new int[num_opt_named_params];
SortOptionalNamedParametersInto(opt_param, opt_param_position,
num_fixed_params, num_params);
LocalVariable* optional_count_vars_processed =
parsed_function_->expression_temp_var();
copy_args_prologue += IntConstant(0);
copy_args_prologue +=
StoreLocalRaw(TokenPosition::kNoSource, optional_count_vars_processed);
copy_args_prologue += Drop();
for (intptr_t i = 0; param < num_params; ++param, ++i) {
JoinEntryInstr* join = BuildJoinEntry();
copy_args_prologue +=
IntConstant(ArgumentsDescriptor::named_entry_size() / kWordSize);
copy_args_prologue += LoadLocal(optional_count_vars_processed);
copy_args_prologue += SmiBinaryOp(Token::kMUL, /* truncate= */ true);
LocalVariable* tuple_diff = MakeTemporary();
// name = arg_desc[names_offset + arg_desc_name_index + nameOffset]
copy_args_prologue += LoadArgDescriptor();
copy_args_prologue += IntConstant(
(first_name_offset + ArgumentsDescriptor::name_offset()) / kWordSize);
copy_args_prologue += LoadLocal(tuple_diff);
copy_args_prologue += SmiBinaryOp(Token::kADD, /* truncate= */ true);
copy_args_prologue += LoadIndexed(/* index_scale = */ kWordSize);
// first name in sorted list of all names
ASSERT(opt_param[i]->name().IsSymbol());
copy_args_prologue += Constant(opt_param[i]->name());
// Compare the two names: Note that the ArgumentDescriptor array always
// terminates with a "null" name (i.e. kNullCid), which will prevent us
// from running out-of-bounds.
TargetEntryInstr *supplied, *missing;
copy_args_prologue += BranchIfStrictEqual(&supplied, &missing);
// Let's load position from arg descriptor (to see which parameter is the
// name) and move kEntrySize forward in ArgDescriptopr names array.
Fragment good(supplied);
{
// fp[kParamEndSlotFromFp + (count_var - pos)]
good += LoadLocal(count_var);
{
// pos = arg_desc[names_offset + arg_desc_name_index + positionOffset]
good += LoadArgDescriptor();
good += IntConstant(
(first_name_offset + ArgumentsDescriptor::position_offset()) /
kWordSize);
good += LoadLocal(tuple_diff);
good += SmiBinaryOp(Token::kADD, /* truncate= */ true);
good += LoadIndexed(/* index_scale = */ kWordSize);
}
good += SmiBinaryOp(Token::kSUB, /* truncate= */ true);
good += LoadFpRelativeSlot(kWordSize * kParamEndSlotFromFp);
// Copy down.
good += StoreLocalRaw(TokenPosition::kNoSource,
ParameterVariable(opt_param_position[i]));
good += Drop();
// Increase processed optional variable count.
good += LoadLocal(optional_count_vars_processed);
good += IntConstant(1);
good += SmiBinaryOp(Token::kADD, /* truncate= */ true);
good += StoreLocalRaw(TokenPosition::kNoSource,
optional_count_vars_processed);
good += Drop();
good += Goto(join);
}
// We had no match, let's just load the default constant.
Fragment not_good(missing);
{
not_good += Constant(
DefaultParameterValueAt(opt_param_position[i] - num_fixed_params));
// Copy down with default value.
not_good += StoreLocalRaw(TokenPosition::kNoSource,
ParameterVariable(opt_param_position[i]));
not_good += Drop();
not_good += Goto(join);
}
copy_args_prologue.current = join;
copy_args_prologue += Drop(); // tuple_diff
}
delete[] opt_param;
delete[] opt_param_position;
// If there are more arguments from the caller we haven't processed, go
// NSM.
TargetEntryInstr *done, *unknown_named_arg_passed;
copy_args_prologue += LoadLocal(optional_count_var);
copy_args_prologue += LoadLocal(optional_count_vars_processed);
copy_args_prologue += BranchIfEqual(&done, &unknown_named_arg_passed);
copy_args_prologue.current = done;
{
Fragment f(unknown_named_arg_passed);
f += Goto(nsm);
}
}
copy_args_prologue += Drop(); // optional_count_var
copy_args_prologue += Drop(); // count_var
copy_args_prologue += Drop(); // positional_count_var
return copy_args_prologue;
}
Fragment PrologueBuilder::BuildFixedParameterLengthChecks(bool strong,
JoinEntryInstr* nsm) {
Fragment check_args;
JoinEntryInstr* done = BuildJoinEntry();
check_args += LoadArgDescriptor();
check_args += LoadField(ArgumentsDescriptor::count_offset());
LocalVariable* count = MakeTemporary();
TargetEntryInstr *then, *fail;
check_args += LoadLocal(count);
check_args += IntConstant(function_.num_fixed_parameters());
check_args += BranchIfEqual(&then, &fail);
TargetEntryInstr *then2, *fail2;
Fragment check_len(then);
check_len += LoadArgDescriptor();
check_len += LoadField(ArgumentsDescriptor::positional_count_offset());
if (strong) {
check_len += IntConstant(ArgumentsDescriptor::PositionalCountField::mask());
check_len += SmiBinaryOp(Token::kBIT_AND, /* truncate= */ true);
}
check_len += BranchIfEqual(&then2, &fail2);
Fragment(fail) + Goto(nsm);
Fragment(fail2) + Goto(nsm);
Fragment(then2) + Goto(done);
return Fragment(check_args.entry, done);
}
Fragment PrologueBuilder::BuildClosureContextHandling() {
LocalScope* scope = parsed_function_->node_sequence()->scope();
LocalVariable* closure_parameter = scope->VariableAt(0);
LocalVariable* context = parsed_function_->current_context_var();
// Load closure.context & store it into the context variable.
// (both load/store happen on the copyied-down places).
Fragment populate_context;
populate_context += LoadLocal(closure_parameter);
populate_context += LoadField(Closure::context_offset());
populate_context += StoreLocal(TokenPosition::kNoSource, context);
populate_context += Drop();
return populate_context;
}
Fragment PrologueBuilder::BuildTypeArgumentsHandling(bool strong) {
Fragment populate_args_desc;
LocalVariable* type_args_var = parsed_function_->RawTypeArgumentsVariable();
TargetEntryInstr *passed, *not_passed;
populate_args_desc += LoadArgDescriptor();
populate_args_desc += LoadField(ArgumentsDescriptor::type_args_len_offset());
if (strong) {
populate_args_desc +=
IntConstant(ArgumentsDescriptor::TypeArgsLenField::mask());
populate_args_desc += SmiBinaryOp(Token::kBIT_AND, /* truncate= */ true);
}
populate_args_desc += IntConstant(0);
populate_args_desc += BranchIfEqual(&not_passed, &passed);
JoinEntryInstr* join = BuildJoinEntry();
Fragment store_type_args(passed);
store_type_args += LoadArgDescriptor();
store_type_args += LoadField(ArgumentsDescriptor::count_offset());
store_type_args += LoadFpRelativeSlot(kWordSize * (1 + kParamEndSlotFromFp));
store_type_args += StoreLocal(TokenPosition::kNoSource, type_args_var);
store_type_args += Drop();
store_type_args += Goto(join);
Fragment store_null(not_passed);
store_null += NullConstant();
store_null += StoreLocal(TokenPosition::kNoSource, type_args_var);
store_null += Drop();
store_null += Goto(join);
populate_args_desc = Fragment(populate_args_desc.entry, join);
return populate_args_desc;
}
void PrologueBuilder::SortOptionalNamedParametersInto(LocalVariable** opt_param,
int* opt_param_position,
int num_fixed_params,
int num_params) {
LocalScope* scope = parsed_function_->node_sequence()->scope();
for (int pos = num_fixed_params; pos < num_params; pos++) {
LocalVariable* parameter = scope->VariableAt(pos);
const String& opt_param_name = parameter->name();
int i = pos - num_fixed_params;
while (--i >= 0) {
LocalVariable* param_i = opt_param[i];
const intptr_t result = opt_param_name.CompareTo(param_i->name());
ASSERT(result != 0);
if (result > 0) break;
opt_param[i + 1] = opt_param[i];
opt_param_position[i + 1] = opt_param_position[i];
}
opt_param[i + 1] = parameter;
opt_param_position[i + 1] = pos;
}
}
} // namespace kernel
} // namespace dart
#endif // !defined(DART_PRECOMPILED_RUNTIME)

View file

@ -0,0 +1,99 @@
// Copyright (c) 2017, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_COMPILER_FRONTEND_PROLOGUE_BUILDER_H_
#define RUNTIME_VM_COMPILER_FRONTEND_PROLOGUE_BUILDER_H_
#if !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/compiler/frontend/kernel_to_il.h"
namespace dart {
namespace kernel {
// Responsible for building IR code for prologues of functions.
//
// This code handles initialization of local variables which the
// prologue needs to setup, including initialization of the:
//
// * current context variable, from the passed closure object
// * function_type_arguments variable, from the stack above fp
// * raw parameter variables, from the stack above fp
//
// if needed.
//
// Furthermore it performs all necessary checks which could lead into a
// no-such-method bailout, including check that:
//
// * the number of passed positional arguments is correct
// * the names of passed named arguments are correct
// * the number of function type arguments is correct
//
// if needed.
//
// Most of these things are done by interpreting the caller-supplied arguments
// descriptor.
class PrologueBuilder : public BaseFlowGraphBuilder {
public:
PrologueBuilder(const ParsedFunction* parsed_function,
intptr_t last_used_id,
bool compiling_for_osr,
bool is_inlining)
: BaseFlowGraphBuilder(parsed_function, last_used_id),
compiling_for_osr_(compiling_for_osr),
is_inlining_(is_inlining) {}
BlockEntryInstr* BuildPrologue(BlockEntryInstr* entry,
PrologueInfo* prologue_info);
intptr_t last_used_block_id() const { return last_used_block_id_; }
private:
JoinEntryInstr* BuildThrowNoSuchMethod();
Fragment BuildTypeArgumentsLengthCheck(bool strong,
JoinEntryInstr* nsm,
bool expect_type_args);
Fragment BuildOptionalParameterHandling(bool strong, JoinEntryInstr* nsm);
Fragment BuildFixedParameterLengthChecks(bool strong, JoinEntryInstr* nsm);
Fragment BuildClosureContextHandling();
Fragment BuildTypeArgumentsHandling(bool strong);
LocalVariable* ParameterVariable(intptr_t index) {
return parsed_function_->RawParameterVariable(index);
}
Fragment LoadArgDescriptor() {
ASSERT(parsed_function_->has_arg_desc_var());
return LoadLocal(parsed_function_->arg_desc_var());
}
const Instance& DefaultParameterValueAt(intptr_t i) {
if (parsed_function_->default_parameter_values() != NULL) {
return parsed_function_->DefaultParameterValueAt(i);
}
ASSERT(parsed_function_->function().kind() ==
RawFunction::kNoSuchMethodDispatcher);
return Instance::null_instance();
}
void SortOptionalNamedParametersInto(LocalVariable** opt_param,
int* opt_param_position,
int num_fixed_params,
int num_params);
bool compiling_for_osr_;
bool is_inlining_;
};
} // namespace kernel
} // namespace dart
#endif // !defined(DART_PRECOMPILED_RUNTIME)
#endif // RUNTIME_VM_COMPILER_FRONTEND_PROLOGUE_BUILDER_H_

View file

@ -166,6 +166,8 @@ bool Intrinsifier::GraphIntrinsify(const ParsedFunction& parsed_function,
FlowGraphCompiler* compiler) { FlowGraphCompiler* compiler) {
#if !defined(TARGET_ARCH_DBC) #if !defined(TARGET_ARCH_DBC)
ASSERT(!parsed_function.function().HasOptionalParameters()); ASSERT(!parsed_function.function().HasOptionalParameters());
PrologueInfo prologue_info(-1, -1);
ZoneGrowableArray<const ICData*>* ic_data_array = ZoneGrowableArray<const ICData*>* ic_data_array =
new ZoneGrowableArray<const ICData*>(); new ZoneGrowableArray<const ICData*>();
FlowGraphBuilder builder(parsed_function, *ic_data_array, FlowGraphBuilder builder(parsed_function, *ic_data_array,
@ -178,7 +180,6 @@ bool Intrinsifier::GraphIntrinsify(const ParsedFunction& parsed_function,
Thread::Current()->GetNextDeoptId()); Thread::Current()->GetNextDeoptId());
GraphEntryInstr* graph_entry = new GraphEntryInstr( GraphEntryInstr* graph_entry = new GraphEntryInstr(
parsed_function, normal_entry, Compiler::kNoOSRDeoptId); parsed_function, normal_entry, Compiler::kNoOSRDeoptId);
PrologueInfo prologue_info(-1, -1);
FlowGraph* graph = FlowGraph* graph =
new FlowGraph(parsed_function, graph_entry, block_id, prologue_info); new FlowGraph(parsed_function, graph_entry, block_id, prologue_info);
const Function& function = parsed_function.function(); const Function& function = parsed_function.function();

View file

@ -73,6 +73,10 @@ namespace dart {
// +--------+--------+--------+--------+ // +--------+--------+--------+--------+
// //
// +--------+--------+--------+--------+ // +--------+--------+--------+--------+
// | opcode | A | B | Y | A_B_Y: 2 unsigned 8-bit operands
// +--------+--------+--------+--------+ 1 signed 8-bit operand
//
// +--------+--------+--------+--------+
// | opcode | T | T: signed 24-bit operand // | opcode | T | T: signed 24-bit operand
// +--------+--------+--------+--------+ // +--------+--------+--------+--------+
// //
@ -210,6 +214,11 @@ namespace dart {
// the immediately following instruction is skipped. These instructions // the immediately following instruction is skipped. These instructions
// expect their operands to be Smis, but don't check that they are. // expect their operands to be Smis, but don't check that they are.
// //
// - Smi<op>TOS
//
// Performs SP[0] <op> SP[-1], pops operands and pushes result on the stack.
// Assumes SP[0] and SP[-1] are both smis and the result is a Smi.
//
// - ShlImm rA, rB, rC // - ShlImm rA, rB, rC
// //
// FP[rA] <- FP[rB] << rC. Shifts the Smi in FP[rB] left by rC. rC is // FP[rA] <- FP[rB] << rC. Shifts the Smi in FP[rB] left by rC. rC is
@ -338,6 +347,12 @@ namespace dart {
// Skips the next instruction unless FP[rA] <Cond> FP[rD]. Assumes that // Skips the next instruction unless FP[rA] <Cond> FP[rD]. Assumes that
// FP[rA] and FP[rD] are Smis or unboxed doubles as indicated by <Cond>. // FP[rA] and FP[rD] are Smis or unboxed doubles as indicated by <Cond>.
// //
// - IfSmi<Cond>TOS
//
// Cond is Lt, Le, Ge, Gt.
// Skips the next instruction unless SP[-1] <Cond> SP[-0].
// It is expected both SP[-1] and SP[-0] are Smis.
//
// - CreateArrayTOS // - CreateArrayTOS
// //
// Allocate array of length SP[0] with type arguments SP[-1]. // Allocate array of length SP[0] with type arguments SP[-1].
@ -395,6 +410,55 @@ namespace dart {
// //
// Similar to StoreIndexedUint8 but FP[rA] is an external typed data aray. // Similar to StoreIndexedUint8 but FP[rA] is an external typed data aray.
// //
// - NoSuchMethod
//
// Performs noSuchmethod handling code.
//
// - TailCall
//
// Unwinds the current frame, populates the arguments descriptor register
// with SP[-1] and tail calls the code in SP[-0].
//
// - TailCallOpt rA, rD
//
// Unwinds the current frame, populates the arguments descriptor register
// with rA and tail calls the code in rD.
//
// - LoadArgDescriptor
//
// Load the caller-provoided argument descriptor and pushes it onto the
// stack.
//
// - LoadArgDescriptorOpt rA
//
// Load the caller-provoided argument descriptor into [rA].
//
// - LoadFpRelativeSlot rD
//
// Loads from FP using the negative index of SP[-0]+rD.
// It is assumed that SP[-0] is a Smi.
//
// - LoadFpRelativeSlotOpt rA, rB, rY
//
// Loads from FP using the negative index of FP[rB]+rY and stores the result
// into rA.
// It is assumed that rY is a Smi.
//
// - StoreFpRelativeSlot rD
//
// Stores SP[-0] by indexing into FP using the negative index of SP[-1]+rD.
// It is assumed that SP[-1] is a Smi.
//
// - StoreFpRelativeSlotOpt rA, rB, rY
//
// Stores rA by indexing into FP using the the negative index of FP[rB]+rY.
// It is assumed that rY is a Smi.
//
// - LoadIndexedTOS
//
// Loads from array SP[-1] at index SP[-0].
// It is assumed that SP[-0] is a Smi.
//
// - LoadIndexed rA, rB, rC // - LoadIndexed rA, rB, rC
// //
// Loads from array FP[rB] at index FP[rC] into FP[rA]. No typechecking is // Loads from array FP[rB] at index FP[rC] into FP[rA]. No typechecking is
@ -460,42 +524,15 @@ namespace dart {
// Throw (Rethrow if A != 0) exception. Exception object and stack object // Throw (Rethrow if A != 0) exception. Exception object and stack object
// are taken from TOS. // are taken from TOS.
// //
// - Entry A, B, rC // - Entry rD
// //
// Function prologue for the function with no optional or named arguments: // Function prologue for the function
// A - expected number of positional arguments; // rD - number of local slots to reserve;
// B - number of local slots to reserve;
// rC - specifies context register to initialize with empty context.
// //
// - EntryOptional A, B, C // - EntryOptimized rD
// //
// Function prologue for the function with optional or named arguments: // Function prologue for optimized functions.
// A - expected number of positional arguments; // rD - number of local slots to reserve for registers;
// B - number of optional arguments;
// C - number of named arguments;
//
// Only one of B and C can be not 0.
//
// If B is not 0 then EntryOptional bytecode is followed by B LoadConstant
// bytecodes specifying default values for optional arguments.
//
// If C is not 0 then EntryOptional is followed by 2 * B LoadConstant
// bytecodes.
// Bytecode at 2 * i specifies name of the i-th named argument and at
// 2 * i + 1 default value. rA part of the LoadConstant bytecode specifies
// the location of the parameter on the stack. Here named arguments are
// sorted alphabetically to enable linear matching similar to how function
// prologues are implemented on other architectures.
//
// Note: Unlike Entry bytecode EntryOptional does not setup the frame for
// local variables this is done by a separate bytecode Frame.
//
// - EntryOptimized A, D
//
// Function prologue for optimized functions with no optional or named
// arguments.
// A - expected number of positional arguments;
// D - number of local slots to reserve for registers;
// //
// Note: reserved slots are not initialized because optimized code // Note: reserved slots are not initialized because optimized code
// has stack maps attached to call sites. // has stack maps attached to call sites.
@ -730,6 +767,9 @@ namespace dart {
V(EqualTOS, 0, ___, ___, ___) \ V(EqualTOS, 0, ___, ___, ___) \
V(LessThanTOS, 0, ___, ___, ___) \ V(LessThanTOS, 0, ___, ___, ___) \
V(GreaterThanTOS, 0, ___, ___, ___) \ V(GreaterThanTOS, 0, ___, ___, ___) \
V(SmiAddTOS, 0, ___, ___, ___) \
V(SmiSubTOS, 0, ___, ___, ___) \
V(SmiMulTOS, 0, ___, ___, ___) \
V(Add, A_B_C, reg, reg, reg) \ V(Add, A_B_C, reg, reg, reg) \
V(Sub, A_B_C, reg, reg, reg) \ V(Sub, A_B_C, reg, reg, reg) \
V(Mul, A_B_C, reg, reg, reg) \ V(Mul, A_B_C, reg, reg, reg) \
@ -779,6 +819,10 @@ namespace dart {
V(IfEqStrictTOS, 0, ___, ___, ___) \ V(IfEqStrictTOS, 0, ___, ___, ___) \
V(IfNeStrictNumTOS, 0, ___, ___, ___) \ V(IfNeStrictNumTOS, 0, ___, ___, ___) \
V(IfEqStrictNumTOS, 0, ___, ___, ___) \ V(IfEqStrictNumTOS, 0, ___, ___, ___) \
V(IfSmiLtTOS, 0, ___, ___, ___) \
V(IfSmiLeTOS, 0, ___, ___, ___) \
V(IfSmiGeTOS, 0, ___, ___, ___) \
V(IfSmiGtTOS, 0, ___, ___, ___) \
V(IfNeStrict, A_D, reg, reg, ___) \ V(IfNeStrict, A_D, reg, reg, ___) \
V(IfEqStrict, A_D, reg, reg, ___) \ V(IfEqStrict, A_D, reg, reg, ___) \
V(IfLe, A_D, reg, reg, ___) \ V(IfLe, A_D, reg, reg, ___) \
@ -815,6 +859,16 @@ namespace dart {
V(StoreIndexed4Float32, A_B_C, reg, reg, reg) \ V(StoreIndexed4Float32, A_B_C, reg, reg, reg) \
V(StoreIndexedFloat64, A_B_C, reg, reg, reg) \ V(StoreIndexedFloat64, A_B_C, reg, reg, reg) \
V(StoreIndexed8Float64, A_B_C, reg, reg, reg) \ V(StoreIndexed8Float64, A_B_C, reg, reg, reg) \
V(NoSuchMethod, 0, ___, ___, ___) \
V(TailCall, 0, ___, ___, ___) \
V(TailCallOpt, A_D, reg, reg, ___) \
V(LoadArgDescriptor, 0, ___, ___, ___) \
V(LoadArgDescriptorOpt, A, reg, ___, ___) \
V(LoadFpRelativeSlot, X, reg, ___, ___) \
V(LoadFpRelativeSlotOpt, A_B_Y, reg, reg, reg) \
V(StoreFpRelativeSlot, X, reg, ___, ___) \
V(StoreFpRelativeSlotOpt, A_B_Y, reg, reg, reg) \
V(LoadIndexedTOS, 0, ___, ___, ___) \
V(LoadIndexed, A_B_C, reg, reg, reg) \ V(LoadIndexed, A_B_C, reg, reg, reg) \
V(LoadIndexedUint8, A_B_C, reg, reg, reg) \ V(LoadIndexedUint8, A_B_C, reg, reg, reg) \
V(LoadIndexedInt8, A_B_C, reg, reg, reg) \ V(LoadIndexedInt8, A_B_C, reg, reg, reg) \
@ -838,8 +892,7 @@ namespace dart {
V(BooleanNegateTOS, 0, ___, ___, ___) \ V(BooleanNegateTOS, 0, ___, ___, ___) \
V(BooleanNegate, A_D, reg, reg, ___) \ V(BooleanNegate, A_D, reg, reg, ___) \
V(Throw, A, num, ___, ___) \ V(Throw, A, num, ___, ___) \
V(Entry, A_B_C, num, num, num) \ V(Entry, D, num, ___, ___) \
V(EntryOptional, A_B_C, num, num, num) \
V(EntryOptimized, A_D, num, num, ___) \ V(EntryOptimized, A_D, num, num, ___) \
V(Frame, D, num, ___, ___) \ V(Frame, D, num, ___, ___) \
V(SetFrame, A, num, ___, num) \ V(SetFrame, A, num, ___, num) \
@ -882,6 +935,15 @@ class Bytecode {
#undef DECLARE_BYTECODE #undef DECLARE_BYTECODE
}; };
static const char* NameOf(Instr instr) {
const char* names[] = {
#define NAME(name, encoding, op1, op2, op3) #name,
BYTECODES_LIST(NAME)
#undef DECLARE_BYTECODE
};
return names[DecodeOpcode(instr)];
}
static const intptr_t kOpShift = 0; static const intptr_t kOpShift = 0;
static const intptr_t kAShift = 8; static const intptr_t kAShift = 8;
static const intptr_t kAMask = 0xFF; static const intptr_t kAMask = 0xFF;
@ -891,6 +953,8 @@ class Bytecode {
static const intptr_t kCMask = 0xFF; static const intptr_t kCMask = 0xFF;
static const intptr_t kDShift = 16; static const intptr_t kDShift = 16;
static const intptr_t kDMask = 0xFFFF; static const intptr_t kDMask = 0xFFFF;
static const intptr_t kYShift = 24;
static const intptr_t kYMask = 0xFF;
static Instr Encode(Opcode op, uintptr_t a, uintptr_t b, uintptr_t c) { static Instr Encode(Opcode op, uintptr_t a, uintptr_t b, uintptr_t c) {
ASSERT((a & kAMask) == a); ASSERT((a & kAMask) == a);
@ -930,6 +994,10 @@ class Bytecode {
return static_cast<Opcode>(bc & 0xFF); return static_cast<Opcode>(bc & 0xFF);
} }
DART_FORCE_INLINE static bool IsTrap(Instr instr) {
return DecodeOpcode(instr) == Bytecode::kTrap;
}
DART_FORCE_INLINE static bool IsCallOpcode(Instr instr) { DART_FORCE_INLINE static bool IsCallOpcode(Instr instr) {
switch (DecodeOpcode(instr)) { switch (DecodeOpcode(instr)) {
case Bytecode::kStaticCall: case Bytecode::kStaticCall:

View file

@ -2544,9 +2544,9 @@ ISOLATE_UNIT_TEST_CASE(ContextScope) {
int next_frame_index = parent_scope->AllocateVariables( int next_frame_index = parent_scope->AllocateVariables(
first_parameter_index, num_parameters, first_frame_index, NULL, first_parameter_index, num_parameters, first_frame_index, NULL,
&found_captured_vars); &found_captured_vars);
// Variables a and c are captured, therefore are not allocated in frame. // Variables a, c and var_ta are captured, therefore are not allocated in
// Variable var_ta, although captured, still requires a slot in frame. // frame.
EXPECT_EQ(-1, next_frame_index - first_frame_index); // Indices in frame < 0. EXPECT_EQ(0, next_frame_index - first_frame_index); // Indices in frame < 0.
const intptr_t parent_scope_context_level = 1; const intptr_t parent_scope_context_level = 1;
EXPECT_EQ(parent_scope_context_level, parent_scope->context_level()); EXPECT_EQ(parent_scope_context_level, parent_scope->context_level());
EXPECT(found_captured_vars); EXPECT(found_captured_vars);

View file

@ -13,6 +13,7 @@
#include "vm/bootstrap.h" #include "vm/bootstrap.h"
#include "vm/class_finalizer.h" #include "vm/class_finalizer.h"
#include "vm/compiler/aot/precompiler.h" #include "vm/compiler/aot/precompiler.h"
#include "vm/compiler/backend/il_printer.h"
#include "vm/compiler/frontend/kernel_binary_flowgraph.h" #include "vm/compiler/frontend/kernel_binary_flowgraph.h"
#include "vm/compiler/jit/compiler.h" #include "vm/compiler/jit/compiler.h"
#include "vm/compiler_stats.h" #include "vm/compiler_stats.h"
@ -167,6 +168,52 @@ static RawTypeArguments* NewTypeArguments(
return a.raw(); return a.raw();
} }
ParsedFunction::ParsedFunction(Thread* thread, const Function& function)
: thread_(thread),
function_(function),
code_(Code::Handle(zone(), function.unoptimized_code())),
node_sequence_(NULL),
regexp_compile_data_(NULL),
instantiator_(NULL),
function_type_arguments_(NULL),
parent_type_arguments_(NULL),
current_context_var_(NULL),
arg_desc_var_(NULL),
expression_temp_var_(NULL),
finally_return_temp_var_(NULL),
deferred_prefixes_(new ZoneGrowableArray<const LibraryPrefix*>()),
guarded_fields_(new ZoneGrowableArray<const Field*>()),
default_parameter_values_(NULL),
raw_type_arguments_var_(NULL),
first_parameter_index_(0),
num_stack_locals_(0),
have_seen_await_expr_(false),
kernel_scopes_(NULL) {
ASSERT(function.IsZoneHandle());
// Every function has a local variable for the current context.
LocalVariable* temp = new (zone())
LocalVariable(function.token_pos(), function.token_pos(),
Symbols::CurrentContextVar(), Object::dynamic_type());
current_context_var_ = temp;
const bool reify_generic_argument =
function.IsGeneric() && Isolate::Current()->reify_generic_functions();
const bool load_optional_arguments = function.HasOptionalParameters();
const bool check_arguments =
(function_.IsClosureFunction() || function_.IsConvertedClosureFunction());
const bool need_argument_descriptor =
load_optional_arguments || check_arguments || reify_generic_argument;
if (need_argument_descriptor) {
arg_desc_var_ = new (zone())
LocalVariable(TokenPosition::kNoSource, TokenPosition::kNoSource,
Symbols::ArgDescVar(), Object::dynamic_type());
}
}
void ParsedFunction::AddToGuardedFields(const Field* field) const { void ParsedFunction::AddToGuardedFields(const Field* field) const {
if ((field->guarded_cid() == kDynamicCid) || if ((field->guarded_cid() == kDynamicCid) ||
(field->guarded_cid() == kIllegalCid)) { (field->guarded_cid() == kIllegalCid)) {
@ -274,32 +321,97 @@ void ParsedFunction::AllocateVariables() {
const intptr_t num_fixed_params = function().num_fixed_parameters(); const intptr_t num_fixed_params = function().num_fixed_parameters();
const intptr_t num_opt_params = function().NumOptionalParameters(); const intptr_t num_opt_params = function().NumOptionalParameters();
const intptr_t num_params = num_fixed_params + num_opt_params; const intptr_t num_params = num_fixed_params + num_opt_params;
// Compute start indices to parameters and locals, and the number of
// parameters to copy. // Before we start allocating frame indices to variables, we'll setup the
if (num_opt_params == 0) { // parameters array, which can be used to access the raw parameters (i.e. not
// Parameter i will be at fp[kParamEndSlotFromFp + num_params - i] and // the potentially variables which are in the context)
// local variable j will be at fp[kFirstLocalSlotFromFp - j].
first_parameter_index_ = kParamEndSlotFromFp + num_params; for (intptr_t param = 0; param < function().NumParameters(); ++param) {
first_stack_local_index_ = kFirstLocalSlotFromFp; LocalVariable* raw_parameter = scope->VariableAt(param);
num_copied_params_ = 0; if (raw_parameter->is_captured()) {
} else { String& tmp = String::ZoneHandle(Z);
// Parameter i will be at fp[kFirstLocalSlotFromFp - i] and local variable tmp = Symbols::FromConcat(T, Symbols::OriginalParam(),
// j will be at fp[kFirstLocalSlotFromFp - num_params - j]. raw_parameter->name());
first_parameter_index_ = kFirstLocalSlotFromFp;
first_stack_local_index_ = first_parameter_index_ - num_params; RELEASE_ASSERT(scope->LocalLookupVariable(tmp) == NULL);
num_copied_params_ = num_params; raw_parameter = new LocalVariable(raw_parameter->declaration_token_pos(),
raw_parameter->token_pos(), tmp,
raw_parameter->type());
if (function().HasOptionalParameters()) {
bool ok = scope->AddVariable(raw_parameter);
ASSERT(ok);
// Currently our optimizer cannot prove liveness of variables properly
// when a function has try/catch. It therefore makes the conservative
// estimate that all [LocalVariable]s in the frame are live and spills
// them before call sites (in some shape or form).
//
// Since we are guaranteed to not need that, we tell the try/catch
// spilling mechanism not to care about this variable.
raw_parameter->set_is_captured_parameter(true);
} else {
raw_parameter->set_index(kParamEndSlotFromFp +
function().NumParameters() - param);
}
}
raw_parameters_.Add(raw_parameter);
}
if (function_type_arguments_ != NULL) {
LocalVariable* raw_type_args_parameter = function_type_arguments_;
if (function_type_arguments_->is_captured()) {
String& tmp = String::ZoneHandle(Z);
tmp = Symbols::FromConcat(T, Symbols::OriginalParam(),
function_type_arguments_->name());
ASSERT(scope->LocalLookupVariable(tmp) == NULL);
raw_type_args_parameter =
new LocalVariable(raw_type_args_parameter->declaration_token_pos(),
raw_type_args_parameter->token_pos(), tmp,
raw_type_args_parameter->type());
bool ok = scope->AddVariable(raw_type_args_parameter);
ASSERT(ok);
}
raw_type_arguments_var_ = raw_type_args_parameter;
}
// The copy parameters implementation will still write to local variables
// which we assign indices as with the old CopyParams implementation.
intptr_t parameter_frame_index_start;
intptr_t reamining_local_variables_start;
{
// Compute start indices to parameters and locals, and the number of
// parameters to copy.
if (num_opt_params == 0) {
// Parameter i will be at fp[kParamEndSlotFromFp + num_params - i] and
// local variable j will be at fp[kFirstLocalSlotFromFp - j].
parameter_frame_index_start = first_parameter_index_ =
kParamEndSlotFromFp + num_params;
reamining_local_variables_start = kFirstLocalSlotFromFp;
} else {
// Parameter i will be at fp[kFirstLocalSlotFromFp - i] and local variable
// j will be at fp[kFirstLocalSlotFromFp - num_params - j].
parameter_frame_index_start = first_parameter_index_ =
kFirstLocalSlotFromFp;
reamining_local_variables_start = first_parameter_index_ - num_params;
}
}
if (function_type_arguments_ != NULL && num_opt_params > 0) {
reamining_local_variables_start--;
} }
// Allocate parameters and local variables, either in the local frame or // Allocate parameters and local variables, either in the local frame or
// in the context(s). // in the context(s).
bool found_captured_variables = false; bool found_captured_variables = false;
int next_free_frame_index = scope->AllocateVariables( int next_free_frame_index = scope->AllocateVariables(
first_parameter_index_, num_params, first_stack_local_index_, NULL, parameter_frame_index_start, num_params,
&found_captured_variables); parameter_frame_index_start > 0 ? kFirstLocalSlotFromFp
: kFirstLocalSlotFromFp - num_params,
NULL, &found_captured_variables);
// Frame indices are relative to the frame pointer and are decreasing. // Frame indices are relative to the frame pointer and are decreasing.
ASSERT(next_free_frame_index <= first_stack_local_index_); num_stack_locals_ = -(next_free_frame_index - kFirstLocalSlotFromFp);
num_stack_locals_ = first_stack_local_index_ - next_free_frame_index;
} }
struct CatchParamDesc { struct CatchParamDesc {
@ -324,8 +436,6 @@ void ParsedFunction::AllocateIrregexpVariables(intptr_t num_stack_locals) {
// Parameter i will be at fp[kParamEndSlotFromFp + num_params - i] and // Parameter i will be at fp[kParamEndSlotFromFp + num_params - i] and
// local variable j will be at fp[kFirstLocalSlotFromFp - j]. // local variable j will be at fp[kFirstLocalSlotFromFp - j].
first_parameter_index_ = kParamEndSlotFromFp + num_params; first_parameter_index_ = kParamEndSlotFromFp + num_params;
first_stack_local_index_ = kFirstLocalSlotFromFp;
num_copied_params_ = 0;
// Frame indices are relative to the frame pointer and are decreasing. // Frame indices are relative to the frame pointer and are decreasing.
num_stack_locals_ = num_stack_locals; num_stack_locals_ = num_stack_locals;
@ -1063,6 +1173,7 @@ void Parser::ParseFunction(ParsedFunction* parsed_function) {
} }
#endif // !PRODUCT #endif // !PRODUCT
SequenceNode* node_sequence = NULL; SequenceNode* node_sequence = NULL;
switch (func.kind()) { switch (func.kind()) {
case RawFunction::kImplicitClosureFunction: case RawFunction::kImplicitClosureFunction:
node_sequence = parser.ParseImplicitClosure(func); node_sequence = parser.ParseImplicitClosure(func);
@ -1454,6 +1565,11 @@ SequenceNode* Parser::ParseImplicitClosure(const Function& func) {
OpenFunctionBlock(func); OpenFunctionBlock(func);
if (parsed_function_->has_arg_desc_var() && FunctionLevel() == 0) {
EnsureExpressionTemp();
current_block_->scope->AddVariable(parsed_function_->arg_desc_var());
}
const Function& parent = Function::Handle(func.parent_function()); const Function& parent = Function::Handle(func.parent_function());
intptr_t type_args_len = 0; // Length of type args vector passed to parent. intptr_t type_args_len = 0; // Length of type args vector passed to parent.
LocalVariable* type_args_var = NULL; LocalVariable* type_args_var = NULL;
@ -1674,6 +1790,11 @@ void Parser::BuildDispatcherScope(const Function& func,
ASSERT(FunctionLevel() == 0); ASSERT(FunctionLevel() == 0);
parsed_function_->set_function_type_arguments(type_args_var); parsed_function_->set_function_type_arguments(type_args_var);
} }
if (parsed_function_->has_arg_desc_var() && FunctionLevel() == 0) {
EnsureExpressionTemp();
current_block_->scope->AddVariable(parsed_function_->arg_desc_var());
}
} }
SequenceNode* Parser::ParseNoSuchMethodDispatcher(const Function& func) { SequenceNode* Parser::ParseNoSuchMethodDispatcher(const Function& func) {
@ -3249,6 +3370,12 @@ SequenceNode* Parser::ParseConstructor(const Function& func) {
} }
OpenFunctionBlock(func); OpenFunctionBlock(func);
if (parsed_function_->has_arg_desc_var() && FunctionLevel() == 0) {
EnsureExpressionTemp();
current_block_->scope->AddVariable(parsed_function_->arg_desc_var());
}
ParamList params; ParamList params;
ASSERT(CurrentToken() == Token::kLPAREN); ASSERT(CurrentToken() == Token::kLPAREN);
@ -3421,6 +3548,11 @@ SequenceNode* Parser::ParseFunc(const Function& func, bool check_semicolon) {
ASSERT(!func.IsGenerativeConstructor()); ASSERT(!func.IsGenerativeConstructor());
OpenFunctionBlock(func); // Build local scope for function. OpenFunctionBlock(func); // Build local scope for function.
if (parsed_function_->has_arg_desc_var() && FunctionLevel() == 0) {
EnsureExpressionTemp();
current_block_->scope->AddVariable(parsed_function_->arg_desc_var());
}
if (Isolate::Current()->reify_generic_functions()) { if (Isolate::Current()->reify_generic_functions()) {
// Lookup function type arguments variable in parent function scope, if any. // Lookup function type arguments variable in parent function scope, if any.
if (func.HasGenericParent()) { if (func.HasGenericParent()) {

View file

@ -89,35 +89,7 @@ typedef UnorderedHashMap<ConstMapKeyEqualsTraits> ConstantsMap;
// The class ParsedFunction holds the result of parsing a function. // The class ParsedFunction holds the result of parsing a function.
class ParsedFunction : public ZoneAllocated { class ParsedFunction : public ZoneAllocated {
public: public:
ParsedFunction(Thread* thread, const Function& function) ParsedFunction(Thread* thread, const Function& function);
: thread_(thread),
function_(function),
code_(Code::Handle(zone(), function.unoptimized_code())),
node_sequence_(NULL),
regexp_compile_data_(NULL),
instantiator_(NULL),
function_type_arguments_(NULL),
parent_type_arguments_(NULL),
current_context_var_(NULL),
expression_temp_var_(NULL),
finally_return_temp_var_(NULL),
deferred_prefixes_(new ZoneGrowableArray<const LibraryPrefix*>()),
guarded_fields_(new ZoneGrowableArray<const Field*>()),
default_parameter_values_(NULL),
first_parameter_index_(0),
first_stack_local_index_(0),
num_copied_params_(0),
num_stack_locals_(0),
have_seen_await_expr_(false),
kernel_scopes_(NULL) {
ASSERT(function.IsZoneHandle());
// Every function has a local variable for the current context.
LocalVariable* temp = new (zone())
LocalVariable(function.token_pos(), function.token_pos(),
Symbols::CurrentContextVar(), Object::dynamic_type());
ASSERT(temp != NULL);
current_context_var_ = temp;
}
const Function& function() const { return function_; } const Function& function() const { return function_; }
const Code& code() const { return code_; } const Code& code() const { return code_; }
@ -172,6 +144,9 @@ class ParsedFunction : public ZoneAllocated {
LocalVariable* current_context_var() const { return current_context_var_; } LocalVariable* current_context_var() const { return current_context_var_; }
bool has_arg_desc_var() const { return arg_desc_var_ != NULL; }
LocalVariable* arg_desc_var() const { return arg_desc_var_; }
LocalVariable* expression_temp_var() const { LocalVariable* expression_temp_var() const {
ASSERT(has_expression_temp_var()); ASSERT(has_expression_temp_var());
return expression_temp_var_; return expression_temp_var_;
@ -208,12 +183,7 @@ class ParsedFunction : public ZoneAllocated {
} }
int first_parameter_index() const { return first_parameter_index_; } int first_parameter_index() const { return first_parameter_index_; }
int first_stack_local_index() const { return first_stack_local_index_; }
int num_copied_params() const { return num_copied_params_; }
int num_stack_locals() const { return num_stack_locals_; } int num_stack_locals() const { return num_stack_locals_; }
int num_non_copied_params() const {
return (num_copied_params_ == 0) ? function().num_fixed_parameters() : 0;
}
void AllocateVariables(); void AllocateVariables();
void AllocateIrregexpVariables(intptr_t num_stack_locals); void AllocateIrregexpVariables(intptr_t num_stack_locals);
@ -233,6 +203,14 @@ class ParsedFunction : public ZoneAllocated {
kernel::ScopeBuildingResult* EnsureKernelScopes(); kernel::ScopeBuildingResult* EnsureKernelScopes();
LocalVariable* RawTypeArgumentsVariable() const {
return raw_type_arguments_var_;
}
LocalVariable* RawParameterVariable(intptr_t i) const {
return raw_parameters_[i];
}
private: private:
Thread* thread_; Thread* thread_;
const Function& function_; const Function& function_;
@ -243,15 +221,17 @@ class ParsedFunction : public ZoneAllocated {
LocalVariable* function_type_arguments_; LocalVariable* function_type_arguments_;
LocalVariable* parent_type_arguments_; LocalVariable* parent_type_arguments_;
LocalVariable* current_context_var_; LocalVariable* current_context_var_;
LocalVariable* arg_desc_var_;
LocalVariable* expression_temp_var_; LocalVariable* expression_temp_var_;
LocalVariable* finally_return_temp_var_; LocalVariable* finally_return_temp_var_;
ZoneGrowableArray<const LibraryPrefix*>* deferred_prefixes_; ZoneGrowableArray<const LibraryPrefix*>* deferred_prefixes_;
ZoneGrowableArray<const Field*>* guarded_fields_; ZoneGrowableArray<const Field*>* guarded_fields_;
ZoneGrowableArray<const Instance*>* default_parameter_values_; ZoneGrowableArray<const Instance*>* default_parameter_values_;
LocalVariable* raw_type_arguments_var_;
ZoneGrowableArray<LocalVariable*> raw_parameters_;
int first_parameter_index_; int first_parameter_index_;
int first_stack_local_index_;
int num_copied_params_;
int num_stack_locals_; int num_stack_locals_;
bool have_seen_await_expr_; bool have_seen_await_expr_;

View file

@ -261,21 +261,21 @@ TEST_CASE(Parser_AllocateVariables_CapturedVar) {
EXPECT_STREQ( EXPECT_STREQ(
// function f uses one ctx var at (0); doesn't save ctx. // function f uses one ctx var at (0); doesn't save ctx.
"main.f\n" "main.f\n"
" 0 ContextLevel level=0 begin=0 end=12\n" " 0 ContextLevel level=0 begin=0 end=56\n"
" 1 ContextVar level=0 begin=14 end=28 name=value\n" " 1 ContextVar level=0 begin=14 end=28 name=value\n"
" 2 StackVar scope=1 begin=16 end=28 name=param\n" " 2 StackVar scope=1 begin=16 end=28 name=param\n"
" 3 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 3 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
// Closure call saves current context. // Closure call saves current context.
"_Closure.call\n" "_Closure.call\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
// function main uses one ctx var at (1); saves caller ctx. // function main uses one ctx var at (1); saves caller ctx.
"main\n" "main\n"
" 0 ContextLevel level=0 begin=0 end=6\n" " 0 ContextLevel level=0 begin=0 end=10\n"
" 1 ContextLevel level=1 begin=8 end=16\n" " 1 ContextLevel level=1 begin=12 end=20\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
" 3 ContextVar level=1 begin=10 end=38 name=value\n" " 3 ContextVar level=1 begin=10 end=38 name=value\n"
" 4 StackVar scope=2 begin=12 end=38 name=f\n", " 4 StackVar scope=2 begin=12 end=38 name=f\n",
@ -302,28 +302,28 @@ TEST_CASE(Parser_AllocateVariables_NestedCapturedVar) {
// Innermost function uses captured variable 'value' from middle // Innermost function uses captured variable 'value' from middle
// function. // function.
"a.b.c\n" "a.b.c\n"
" 0 ContextLevel level=0 begin=0 end=10\n" " 0 ContextLevel level=0 begin=0 end=54\n"
" 1 ContextVar level=0 begin=20 end=30 name=value\n" " 1 ContextVar level=0 begin=20 end=30 name=value\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
// Closure call saves current context. // Closure call saves current context.
"_Closure.call\n" "_Closure.call\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
// Middle function saves the entry context. Notice that this // Middle function saves the entry context. Notice that this
// happens here and not in the outermost function. We always // happens here and not in the outermost function. We always
// save the entry context at the last possible moment. // save the entry context at the last possible moment.
"a.b\n" "a.b\n"
" 0 ContextLevel level=0 begin=0 end=6\n" " 0 ContextLevel level=0 begin=0 end=50\n"
" 1 ContextLevel level=1 begin=8 end=16\n" " 1 ContextLevel level=1 begin=52 end=60\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
" 3 ContextVar level=1 begin=16 end=38 name=value\n" " 3 ContextVar level=1 begin=16 end=38 name=value\n"
" 4 StackVar scope=2 begin=18 end=38 name=c\n" " 4 StackVar scope=2 begin=18 end=38 name=c\n"
// Closure call saves current context. // Closure call saves current context.
"_Closure.call\n" "_Closure.call\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
@ -331,7 +331,7 @@ TEST_CASE(Parser_AllocateVariables_NestedCapturedVar) {
// don't save the entry context if the function has no captured // don't save the entry context if the function has no captured
// variables. // variables.
"a\n" "a\n"
" 0 ContextLevel level=0 begin=0 end=14\n" " 0 ContextLevel level=0 begin=0 end=18\n"
" 1 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 1 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
" 2 StackVar scope=2 begin=6 end=46 name=b\n", " 2 StackVar scope=2 begin=6 end=46 name=b\n",
vars); vars);
@ -361,14 +361,14 @@ TEST_CASE(Parser_AllocateVariables_TwoChains) {
EXPECT_STREQ( EXPECT_STREQ(
// bb captures only value2 from aa. No others. // bb captures only value2 from aa. No others.
"a.b.aa.bb\n" "a.b.aa.bb\n"
" 0 ContextLevel level=0 begin=0 end=10\n" " 0 ContextLevel level=0 begin=0 end=54\n"
" 1 ContextVar level=0 begin=35 end=46 name=value2\n" " 1 ContextVar level=0 begin=35 end=46 name=value2\n"
" 2 CurrentCtx scope=0 begin=0 end=0" " 2 CurrentCtx scope=0 begin=0 end=0"
" name=:current_context_var\n" " name=:current_context_var\n"
// Closure call saves current context. // Closure call saves current context.
"_Closure.call\n" "_Closure.call\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0" " 2 CurrentCtx scope=0 begin=0 end=0"
" name=:current_context_var\n" " name=:current_context_var\n"
@ -377,22 +377,22 @@ TEST_CASE(Parser_AllocateVariables_TwoChains) {
// of chaining from b. This keeps us from holding onto closures // of chaining from b. This keeps us from holding onto closures
// that we would never access. // that we would never access.
"a.b.aa\n" "a.b.aa\n"
" 0 ContextLevel level=0 begin=0 end=6\n" " 0 ContextLevel level=0 begin=0 end=50\n"
" 1 ContextLevel level=1 begin=8 end=16\n" " 1 ContextLevel level=1 begin=52 end=60\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
" 3 ContextVar level=1 begin=30 end=55 name=value2\n" " 3 ContextVar level=1 begin=30 end=55 name=value2\n"
" 4 StackVar scope=2 begin=32 end=55 name=bb\n" " 4 StackVar scope=2 begin=32 end=55 name=bb\n"
// Closure call saves current context. // Closure call saves current context.
"_Closure.call\n" "_Closure.call\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0" " 2 CurrentCtx scope=0 begin=0 end=0"
" name=:current_context_var\n" " name=:current_context_var\n"
// b captures value1 from a. // b captures value1 from a.
"a.b\n" "a.b\n"
" 0 ContextLevel level=0 begin=0 end=16\n" " 0 ContextLevel level=0 begin=0 end=60\n"
" 1 ContextVar level=0 begin=14 end=65 name=value1\n" " 1 ContextVar level=0 begin=14 end=65 name=value1\n"
" 2 CurrentCtx scope=0 begin=0 end=0" " 2 CurrentCtx scope=0 begin=0 end=0"
" name=:current_context_var\n" " name=:current_context_var\n"
@ -400,15 +400,15 @@ TEST_CASE(Parser_AllocateVariables_TwoChains) {
// Closure call saves current context. // Closure call saves current context.
"_Closure.call\n" "_Closure.call\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0" " 2 CurrentCtx scope=0 begin=0 end=0"
" name=:current_context_var\n" " name=:current_context_var\n"
// a shares value1, saves entry ctx. // a shares value1, saves entry ctx.
"a\n" "a\n"
" 0 ContextLevel level=0 begin=0 end=6\n" " 0 ContextLevel level=0 begin=0 end=10\n"
" 1 ContextLevel level=1 begin=8 end=16\n" " 1 ContextLevel level=1 begin=12 end=20\n"
" 2 CurrentCtx scope=0 begin=0 end=0" " 2 CurrentCtx scope=0 begin=0 end=0"
" name=:current_context_var\n" " name=:current_context_var\n"
" 3 ContextVar level=1 begin=10 end=73 name=value1\n" " 3 ContextVar level=1 begin=10 end=73 name=value1\n"
@ -450,24 +450,24 @@ TEST_CASE(Parser_AllocateVariables_Issue7681) {
// This frame saves the entry context instead of chaining. Good. // This frame saves the entry context instead of chaining. Good.
"doIt.<anonymous closure>\n" "doIt.<anonymous closure>\n"
" 0 ContextLevel level=0 begin=0 end=0\n" " 0 ContextLevel level=0 begin=0 end=0\n"
" 1 ContextLevel level=1 begin=4 end=12\n" " 1 ContextLevel level=1 begin=48 end=56\n"
" 2 ContextVar level=1 begin=44 end=67 name=y\n" " 2 ContextVar level=1 begin=44 end=67 name=y\n"
" 3 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 3 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
// Closure call saves current context. // Closure call saves current context.
"_Closure.call\n" "_Closure.call\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
"X.onX\n" "X.onX\n"
" 0 ContextLevel level=0 begin=0 end=10\n" " 0 ContextLevel level=0 begin=0 end=14\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
// No context is saved here since no vars are captured. // No context is saved here since no vars are captured.
"doIt\n" "doIt\n"
" 0 ContextLevel level=0 begin=0 end=18\n" " 0 ContextLevel level=0 begin=0 end=22\n"
" 1 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 1 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
" 2 StackVar scope=2 begin=36 end=83 name=x\n", " 2 StackVar scope=2 begin=36 end=83 name=x\n",
vars); vars);
@ -496,22 +496,22 @@ TEST_CASE(Parser_AllocateVariables_CaptureLoopVar) {
EXPECT_STREQ( EXPECT_STREQ(
// inner function captures variable value. That's fine. // inner function captures variable value. That's fine.
"outer.inner\n" "outer.inner\n"
" 0 ContextLevel level=0 begin=0 end=10\n" " 0 ContextLevel level=0 begin=0 end=54\n"
" 1 ContextVar level=0 begin=34 end=44 name=value\n" " 1 ContextVar level=0 begin=34 end=44 name=value\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
// Closure call saves current context. // Closure call saves current context.
"_Closure.call\n" "_Closure.call\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
// The outer function saves the entry context, even though the // The outer function saves the entry context, even though the
// captured variable is in a loop. Good. // captured variable is in a loop. Good.
"outer\n" "outer\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 ContextLevel level=1 begin=10 end=18\n" " 1 ContextLevel level=1 begin=14 end=22\n"
" 2 ContextLevel level=0 begin=20 end=34\n" " 2 ContextLevel level=0 begin=24 end=38\n"
" 3 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 3 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
" 4 StackVar scope=3 begin=12 end=53 name=i\n" " 4 StackVar scope=3 begin=12 end=53 name=i\n"
" 5 ContextVar level=1 begin=29 end=53 name=value\n" " 5 ContextVar level=1 begin=29 end=53 name=value\n"
@ -542,20 +542,20 @@ TEST_CASE(Parser_AllocateVariables_MiddleChain) {
char* vars = CaptureVarsAtLine(lib, "a", 10); char* vars = CaptureVarsAtLine(lib, "a", 10);
EXPECT_STREQ( EXPECT_STREQ(
"a.b.c\n" "a.b.c\n"
" 0 ContextLevel level=0 begin=0 end=12\n" " 0 ContextLevel level=0 begin=0 end=56\n"
" 1 ContextVar level=0 begin=52 end=65 name=x\n" " 1 ContextVar level=0 begin=52 end=65 name=x\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
"_Closure.call\n" "_Closure.call\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
// Doesn't save the entry context. Chains to parent instead. // Doesn't save the entry context. Chains to parent instead.
"a.b\n" "a.b\n"
" 0 ContextLevel level=0 begin=0 end=6\n" " 0 ContextLevel level=0 begin=0 end=50\n"
" 1 ContextLevel level=1 begin=8 end=32\n" " 1 ContextLevel level=1 begin=52 end=76\n"
" 2 ContextLevel level=0 begin=34 end=40\n" " 2 ContextLevel level=0 begin=78 end=84\n"
" 3 ContextVar level=0 begin=12 end=74 name=x\n" " 3 ContextVar level=0 begin=12 end=74 name=x\n"
" 4 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 4 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
" 5 StackVar scope=2 begin=49 end=74 name=c\n" " 5 StackVar scope=2 begin=49 end=74 name=c\n"
@ -563,13 +563,13 @@ TEST_CASE(Parser_AllocateVariables_MiddleChain) {
" 7 StackVar scope=4 begin=34 end=49 name=d\n" " 7 StackVar scope=4 begin=34 end=49 name=d\n"
"_Closure.call\n" "_Closure.call\n"
" 0 ContextLevel level=0 begin=0 end=8\n" " 0 ContextLevel level=0 begin=0 end=12\n"
" 1 StackVar scope=1 begin=-1 end=0 name=this\n" " 1 StackVar scope=1 begin=-1 end=0 name=this\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
"a\n" "a\n"
" 0 ContextLevel level=0 begin=0 end=6\n" " 0 ContextLevel level=0 begin=0 end=10\n"
" 1 ContextLevel level=1 begin=8 end=16\n" " 1 ContextLevel level=1 begin=12 end=20\n"
" 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n" " 2 CurrentCtx scope=0 begin=0 end=0 name=:current_context_var\n"
" 3 ContextVar level=1 begin=9 end=82 name=x\n" " 3 ContextVar level=1 begin=9 end=82 name=x\n"
" 4 StackVar scope=2 begin=11 end=82 name=b\n", " 4 StackVar scope=2 begin=11 end=82 name=b\n",

View file

@ -1603,6 +1603,7 @@ DEFINE_RUNTIME_ENTRY(InvokeClosureNoSuchMethod, 3) {
// name of the closurized function so that exception contains more // name of the closurized function so that exception contains more
// relevant information. // relevant information.
const Function& function = Function::Handle(receiver.function()); const Function& function = Function::Handle(receiver.function());
ASSERT(!function.IsNull());
const String& original_function_name = const String& original_function_name =
String::Handle(function.QualifiedUserVisibleName()); String::Handle(function.QualifiedUserVisibleName());
const Object& result = Object::Handle(DartEntry::InvokeNoSuchMethod( const Object& result = Object::Handle(DartEntry::InvokeNoSuchMethod(

View file

@ -222,17 +222,7 @@ int LocalScope::AllocateVariables(int first_parameter_index,
if (variable->is_captured()) { if (variable->is_captured()) {
AllocateContextVariable(variable, &context_owner); AllocateContextVariable(variable, &context_owner);
*found_captured_variables = true; *found_captured_variables = true;
if (variable->name().raw() ==
Symbols::FunctionTypeArgumentsVar().raw()) {
ASSERT(pos == num_parameters);
// A captured type args variable has a slot allocated in the frame and
// one in the context, where it gets copied to.
frame_index--;
}
} else { } else {
ASSERT((variable->name().raw() !=
Symbols::FunctionTypeArgumentsVar().raw()) ||
(pos == num_parameters));
variable->set_index(frame_index--); variable->set_index(frame_index--);
} }
} }
@ -447,6 +437,7 @@ LocalVariable* LocalScope::LookupVariable(const String& name, bool test_only) {
void LocalScope::CaptureVariable(LocalVariable* variable) { void LocalScope::CaptureVariable(LocalVariable* variable) {
ASSERT(variable != NULL); ASSERT(variable != NULL);
// The variable must exist in an enclosing scope, not necessarily in this one. // The variable must exist in an enclosing scope, not necessarily in this one.
variable->set_is_captured(); variable->set_is_captured();
const int variable_function_level = variable->owner()->function_level(); const int variable_function_level = variable->owner()->function_level();
@ -651,7 +642,8 @@ void LocalScope::CaptureLocalVariables(LocalScope* top_scope) {
if (variable->is_forced_stack() || if (variable->is_forced_stack() ||
(variable->name().raw() == Symbols::StackTraceVar().raw()) || (variable->name().raw() == Symbols::StackTraceVar().raw()) ||
(variable->name().raw() == Symbols::ExceptionVar().raw()) || (variable->name().raw() == Symbols::ExceptionVar().raw()) ||
(variable->name().raw() == Symbols::SavedTryContextVar().raw())) { (variable->name().raw() == Symbols::SavedTryContextVar().raw()) ||
(variable->name().raw() == Symbols::ArgDescVar().raw())) {
// Don't capture those variables because the VM expects them to be on // Don't capture those variables because the VM expects them to be on
// the stack. // the stack.
continue; continue;
@ -700,8 +692,8 @@ bool LocalVariable::Equals(const LocalVariable& other) const {
int LocalVariable::BitIndexIn(intptr_t fixed_parameter_count) const { int LocalVariable::BitIndexIn(intptr_t fixed_parameter_count) const {
ASSERT(!is_captured()); ASSERT(!is_captured());
// Parameters have positive indexes with the lowest index being // Parameters have positive indexes with the lowest index being
// kParamEndSlotFromFp + 1. Locals and copied parameters have negative // kParamEndSlotFromFp + 1. Locals have negative indexes with the lowest
// indexes with the lowest (closest to 0) index being kFirstLocalSlotFromFp. // (closest to 0) index being kFirstLocalSlotFromFp.
if (index() > 0) { if (index() > 0) {
// Shift non-negative indexes so that the lowest one is 0. // Shift non-negative indexes so that the lowest one is 0.
return fixed_parameter_count - (index() - kParamEndSlotFromFp); return fixed_parameter_count - (index() - kParamEndSlotFromFp);

View file

@ -104,7 +104,7 @@ class LocalVariable : public ZoneAllocated {
// allocated to the frame. // allocated to the frame.
// var_count is the total number of stack-allocated variables including // var_count is the total number of stack-allocated variables including
// all parameters. // all parameters.
int BitIndexIn(intptr_t var_count) const; int BitIndexIn(intptr_t fixed_parameter_count) const;
private: private:
static const int kUninitializedIndex = INT_MIN; static const int kUninitializedIndex = INT_MIN;

View file

@ -500,7 +500,8 @@ DART_FORCE_INLINE static uint32_t* SavedCallerPC(RawObject** FP) {
DART_FORCE_INLINE static RawFunction* FrameFunction(RawObject** FP) { DART_FORCE_INLINE static RawFunction* FrameFunction(RawObject** FP) {
RawFunction* function = static_cast<RawFunction*>(FP[kFunctionSlotFromFp]); RawFunction* function = static_cast<RawFunction*>(FP[kFunctionSlotFromFp]);
ASSERT(SimulatorHelpers::GetClassId(function) == kFunctionCid); ASSERT(SimulatorHelpers::GetClassId(function) == kFunctionCid ||
SimulatorHelpers::GetClassId(function) == kNullCid);
return function; return function;
} }
@ -932,6 +933,24 @@ DART_FORCE_INLINE void Simulator::InstanceCall2(Thread* thread,
Invoke(thread, call_base, top, pc, FP, SP); Invoke(thread, call_base, top, pc, FP, SP);
} }
DART_FORCE_INLINE void Simulator::PrepareForTailCall(
RawCode* code,
RawImmutableArray* args_desc,
RawObject** FP,
RawObject*** SP,
uint32_t** pc) {
// Drop all stack locals.
*SP = FP - 1;
// Replace the callee with the new [code].
FP[kFunctionSlotFromFp] = Object::null();
FP[kPcMarkerSlotFromFp] = code;
*pc = reinterpret_cast<uint32_t*>(code->ptr()->entry_point_);
pc_ = reinterpret_cast<uword>(pc); // For the profiler.
pp_ = code->ptr()->object_pool_;
argdesc_ = args_desc;
}
// Note: functions below are marked DART_NOINLINE to recover performance on // Note: functions below are marked DART_NOINLINE to recover performance on
// ARM where inlining these functions into the interpreter loop seemed to cause // ARM where inlining these functions into the interpreter loop seemed to cause
// some code quality issues. // some code quality issues.
@ -1045,6 +1064,15 @@ static DART_NOINLINE bool InvokeNativeAutoScopeWrapper(Thread* thread,
rB = ((op >> Bytecode::kBShift) & Bytecode::kBMask); \ rB = ((op >> Bytecode::kBShift) & Bytecode::kBMask); \
rC = ((op >> Bytecode::kCShift) & Bytecode::kCMask); rC = ((op >> Bytecode::kCShift) & Bytecode::kCMask);
#define DECLARE_A_B_Y \
uint16_t rB; \
int8_t rY; \
USE(rB); \
USE(rY)
#define DECODE_A_B_Y \
rB = ((op >> Bytecode::kBShift) & Bytecode::kBMask); \
rY = ((op >> Bytecode::kYShift) & Bytecode::kYMask);
#define DECLARE_0 #define DECLARE_0
#define DECODE_0 #define DECODE_0
@ -1308,7 +1336,6 @@ RawObject* Simulator::Call(const Code& code,
RawBool* true_value = Bool::True().raw(); RawBool* true_value = Bool::True().raw();
RawBool* false_value = Bool::False().raw(); RawBool* false_value = Bool::False().raw();
RawObject* null_value = Object::null(); RawObject* null_value = Object::null();
RawObject* empty_context = Object::empty_context().raw();
#if defined(DEBUG) #if defined(DEBUG)
Function& function_h = Function::Handle(); Function& function_h = Function::Handle();
@ -1319,170 +1346,28 @@ RawObject* Simulator::Call(const Code& code,
// Bytecode handlers (see constants_dbc.h for bytecode descriptions). // Bytecode handlers (see constants_dbc.h for bytecode descriptions).
{ {
BYTECODE(Entry, A_B_C); BYTECODE(Entry, A_D);
const uint8_t num_fixed_params = rA; const uint16_t num_locals = rD;
const uint16_t num_locals = rB;
const uint16_t context_reg = rC;
// Decode arguments descriptor. // Initialize locals with null & set SP.
const intptr_t pos_count = SimulatorHelpers::ArgDescPosCount(argdesc_); for (intptr_t i = 0; i < num_locals; i++) {
FP[i] = null_value;
// Check that we got the right number of positional parameters.
if (pos_count != num_fixed_params) {
// Mismatch can only occur if current function is a closure.
goto ClosureNoSuchMethod;
}
// Initialize locals with null and set current context variable to
// empty context.
{
RawObject** L = FP;
for (intptr_t i = 0; i < num_locals; i++) {
L[i] = null_value;
}
L[context_reg] = empty_context;
SP = FP + num_locals - 1;
} }
SP = FP + num_locals - 1;
DISPATCH(); DISPATCH();
} }
{ {
BYTECODE(EntryOptimized, A_D); BYTECODE(EntryOptimized, A_D);
const uint8_t num_fixed_params = rA;
const uint16_t num_registers = rD; const uint16_t num_registers = rD;
// Decode arguments descriptor.
const intptr_t pos_count = SimulatorHelpers::ArgDescPosCount(argdesc_);
// Check that we got the right number of positional parameters.
if (pos_count != num_fixed_params) {
// Mismatch can only occur if current function is a closure.
goto ClosureNoSuchMethod;
}
// Reserve space for registers used by the optimized code. // Reserve space for registers used by the optimized code.
SP = FP + num_registers - 1; SP = FP + num_registers - 1;
DISPATCH(); DISPATCH();
} }
{
BYTECODE(EntryOptional, A_B_C);
const uint16_t num_fixed_params = rA;
const uint16_t num_opt_pos_params = rB;
const uint16_t num_opt_named_params = rC;
const intptr_t min_num_pos_args = num_fixed_params;
const intptr_t max_num_pos_args = num_fixed_params + num_opt_pos_params;
// Decode arguments descriptor.
const intptr_t arg_count = SimulatorHelpers::ArgDescArgCount(argdesc_);
const intptr_t pos_count = SimulatorHelpers::ArgDescPosCount(argdesc_);
const intptr_t named_count = (arg_count - pos_count);
// Check that got the right number of positional parameters.
if ((min_num_pos_args > pos_count) || (pos_count > max_num_pos_args)) {
goto ClosureNoSuchMethod;
}
// Copy all passed position arguments.
RawObject** first_arg = FrameArguments(FP, arg_count);
memmove(FP, first_arg, pos_count * kWordSize);
if (num_opt_named_params != 0) {
// This is a function with named parameters.
// Walk the list of named parameters and their
// default values encoded as pairs of LoadConstant instructions that
// follows the entry point and find matching values via arguments
// descriptor.
RawObject** argdesc_data = argdesc_->ptr()->data();
intptr_t i = named_count - 1; // argument position
intptr_t j = num_opt_named_params - 1; // parameter position
while ((j >= 0) && (i >= 0)) {
// Fetch formal parameter information: name, default value, target slot.
const uint32_t load_name = pc[2 * j];
const uint32_t load_value = pc[2 * j + 1];
ASSERT(Bytecode::DecodeOpcode(load_name) == Bytecode::kLoadConstant);
ASSERT(Bytecode::DecodeOpcode(load_value) == Bytecode::kLoadConstant);
const uint8_t reg = Bytecode::DecodeA(load_name);
ASSERT(reg == Bytecode::DecodeA(load_value));
RawString* name = static_cast<RawString*>(
LOAD_CONSTANT(Bytecode::DecodeD(load_name)));
if (name == argdesc_data[ArgumentsDescriptor::name_index(i)]) {
// Parameter was passed. Fetch passed value.
const intptr_t arg_index = Smi::Value(static_cast<RawSmi*>(
argdesc_data[ArgumentsDescriptor::position_index(i)]));
FP[reg] = first_arg[arg_index];
i--; // Consume passed argument.
} else {
// Parameter was not passed. Fetch default value.
FP[reg] = LOAD_CONSTANT(Bytecode::DecodeD(load_value));
}
j--; // Next formal parameter.
}
// If we have unprocessed formal parameters then initialize them all
// using default values.
while (j >= 0) {
const uint32_t load_name = pc[2 * j];
const uint32_t load_value = pc[2 * j + 1];
ASSERT(Bytecode::DecodeOpcode(load_name) == Bytecode::kLoadConstant);
ASSERT(Bytecode::DecodeOpcode(load_value) == Bytecode::kLoadConstant);
const uint8_t reg = Bytecode::DecodeA(load_name);
ASSERT(reg == Bytecode::DecodeA(load_value));
FP[reg] = LOAD_CONSTANT(Bytecode::DecodeD(load_value));
j--;
}
// If we have unprocessed passed arguments that means we have mismatch
// between formal parameters and concrete arguments. This can only
// occur if the current function is a closure.
if (i != -1) {
goto ClosureNoSuchMethod;
}
// Skip LoadConstant-s encoding information about named parameters.
pc += num_opt_named_params * 2;
// SP points past copied arguments.
SP = FP + num_fixed_params + num_opt_named_params - 1;
} else {
ASSERT(num_opt_pos_params != 0);
if (named_count != 0) {
// Function can't have both named and optional positional parameters.
// This kind of mismatch can only occur if the current function
// is a closure.
goto ClosureNoSuchMethod;
}
// Process the list of default values encoded as a sequence of
// LoadConstant instructions after EntryOpt bytecode.
// Execute only those that correspond to parameters that were not passed.
for (intptr_t i = pos_count - num_fixed_params; i < num_opt_pos_params;
i++) {
const uint32_t load_value = pc[i];
ASSERT(Bytecode::DecodeOpcode(load_value) == Bytecode::kLoadConstant);
#if defined(DEBUG)
const uint8_t reg = Bytecode::DecodeA(load_value);
ASSERT((num_fixed_params + i) == reg);
#endif
FP[num_fixed_params + i] = LOAD_CONSTANT(Bytecode::DecodeD(load_value));
}
// Skip LoadConstant-s encoding default values for optional positional
// parameters.
pc += num_opt_pos_params;
// SP points past the last copied parameter.
SP = FP + max_num_pos_args - 1;
}
DISPATCH();
}
{ {
BYTECODE(Frame, A_D); BYTECODE(Frame, A_D);
// Initialize locals with null and increment SP. // Initialize locals with null and increment SP.
@ -2082,6 +1967,30 @@ RawObject* Simulator::Call(const Code& code,
SMI_FASTPATH_TOS(RawObject*, SMI_GT); SMI_FASTPATH_TOS(RawObject*, SMI_GT);
DISPATCH(); DISPATCH();
} }
{
BYTECODE(SmiAddTOS, 0);
RawSmi* left = Smi::RawCast(SP[-1]);
RawSmi* right = Smi::RawCast(SP[-0]);
SP--;
SP[0] = Smi::New(Smi::Value(left) + Smi::Value(right));
DISPATCH();
}
{
BYTECODE(SmiSubTOS, 0);
RawSmi* left = Smi::RawCast(SP[-1]);
RawSmi* right = Smi::RawCast(SP[-0]);
SP--;
SP[0] = Smi::New(Smi::Value(left) - Smi::Value(right));
DISPATCH();
}
{
BYTECODE(SmiMulTOS, 0);
RawSmi* left = Smi::RawCast(SP[-1]);
RawSmi* right = Smi::RawCast(SP[-0]);
SP--;
SP[0] = Smi::New(Smi::Value(left) * Smi::Value(right));
DISPATCH();
}
{ {
BYTECODE(Add, A_B_C); BYTECODE(Add, A_B_C);
SMI_OP_CHECK(intptr_t, SignedAddWithOverflow); SMI_OP_CHECK(intptr_t, SignedAddWithOverflow);
@ -3448,6 +3357,50 @@ RawObject* Simulator::Call(const Code& code,
DISPATCH(); DISPATCH();
} }
{
BYTECODE(IfSmiLtTOS, 0);
RawSmi* left = Smi::RawCast(SP[-1]);
RawSmi* right = Smi::RawCast(SP[-0]);
if (!(Smi::Value(left) < Smi::Value(right))) {
pc++;
}
SP -= 2;
DISPATCH();
}
{
BYTECODE(IfSmiLeTOS, 0);
RawSmi* left = Smi::RawCast(SP[-1]);
RawSmi* right = Smi::RawCast(SP[-0]);
if (!(Smi::Value(left) <= Smi::Value(right))) {
pc++;
}
SP -= 2;
DISPATCH();
}
{
BYTECODE(IfSmiGeTOS, 0);
RawSmi* left = Smi::RawCast(SP[-1]);
RawSmi* right = Smi::RawCast(SP[-0]);
if (!(Smi::Value(left) >= Smi::Value(right))) {
pc++;
}
SP -= 2;
DISPATCH();
}
{
BYTECODE(IfSmiGtTOS, 0);
RawSmi* left = Smi::RawCast(SP[-1]);
RawSmi* right = Smi::RawCast(SP[-0]);
if (!(Smi::Value(left) > Smi::Value(right))) {
pc++;
}
SP -= 2;
DISPATCH();
}
{ {
BYTECODE(IfEqStrict, A_D); BYTECODE(IfEqStrict, A_D);
RawObject* lhs = FP[rA]; RawObject* lhs = FP[rA];
@ -3747,6 +3700,89 @@ RawObject* Simulator::Call(const Code& code,
DISPATCH(); DISPATCH();
} }
{
BYTECODE(TailCall, 0);
RawCode* code = RAW_CAST(Code, SP[-0]);
RawImmutableArray* args_desc = RAW_CAST(ImmutableArray, SP[-1]);
PrepareForTailCall(code, args_desc, FP, &SP, &pc);
DISPATCH();
}
{
BYTECODE(TailCallOpt, A_D);
RawImmutableArray* args_desc = RAW_CAST(ImmutableArray, FP[rA]);
RawCode* code = RAW_CAST(Code, FP[rD]);
PrepareForTailCall(code, args_desc, FP, &SP, &pc);
DISPATCH();
}
{
BYTECODE(LoadArgDescriptor, 0);
SP++;
SP[0] = argdesc_;
DISPATCH();
}
{
BYTECODE(LoadArgDescriptorOpt, A);
FP[rA] = argdesc_;
DISPATCH();
}
{
BYTECODE(NoSuchMethod, 0);
goto ClosureNoSuchMethod;
}
{
BYTECODE(LoadFpRelativeSlot, A_X);
RawSmi* index = RAW_CAST(Smi, SP[-0]);
const int16_t offset = rD;
SP[-0] = FP[-(Smi::Value(index) + offset)];
DISPATCH();
}
{
BYTECODE(LoadFpRelativeSlotOpt, A_B_Y);
RawSmi* index = RAW_CAST(Smi, FP[rB]);
const int8_t offset = rY;
FP[rA] = FP[-(Smi::Value(index) + offset)];
DISPATCH();
}
{
BYTECODE(StoreFpRelativeSlot, A_X);
RawSmi* index = RAW_CAST(Smi, SP[-1]);
const int16_t offset = rD;
FP[-(Smi::Value(index) + offset) - 0] = SP[-0];
SP--;
DISPATCH();
}
{
BYTECODE(StoreFpRelativeSlotOpt, A_B_Y);
RawSmi* index = RAW_CAST(Smi, FP[rB]);
const int8_t offset = rY;
FP[-(Smi::Value(index) + offset) - 0] = FP[rA];
DISPATCH();
}
{
BYTECODE(LoadIndexedTOS, 0);
// Currently this instruction is only emitted if it's safe to do.
ASSERT(!SP[0]->IsHeapObject());
ASSERT(SP[-1]->IsArray() || SP[-1]->IsImmutableArray());
const intptr_t index_scale = rA;
RawSmi* index = RAW_CAST(Smi, SP[-0]);
RawArray* array = Array::RawCast(SP[-1]);
ASSERT(SimulatorHelpers::CheckIndex(index, array->ptr()->length_));
SP[-1] = array->ptr()->data()[Smi::Value(index) << index_scale];
SP--;
DISPATCH();
}
{ {
BYTECODE(LoadIndexed, A_B_C); BYTECODE(LoadIndexed, A_B_C);
RawObject* obj = FP[rB]; RawObject* obj = FP[rB];
@ -3862,7 +3898,7 @@ RawObject* Simulator::Call(const Code& code,
ClosureNoSuchMethod: ClosureNoSuchMethod:
#if defined(DEBUG) #if defined(DEBUG)
function_h ^= FrameFunction(FP); function_h ^= FrameFunction(FP);
ASSERT(function_h.IsClosureFunction()); ASSERT(function_h.IsNull() || function_h.IsClosureFunction());
#endif #endif
// Restore caller context as we are going to throw NoSuchMethod. // Restore caller context as we are going to throw NoSuchMethod.

View file

@ -21,6 +21,7 @@ class Thread;
class Code; class Code;
class Array; class Array;
class RawICData; class RawICData;
class RawImmutableArray;
class RawArray; class RawArray;
class RawObjectPool; class RawObjectPool;
class RawFunction; class RawFunction;
@ -164,6 +165,12 @@ class Simulator {
RawObject*** SP, RawObject*** SP,
bool optimized); bool optimized);
void PrepareForTailCall(RawCode* code,
RawImmutableArray* args_desc,
RawObject** FP,
RawObject*** SP,
uint32_t** pc);
#if !defined(PRODUCT) #if !defined(PRODUCT)
// Returns true if tracing of executed instructions is enabled. // Returns true if tracing of executed instructions is enabled.
bool IsTracingExecution() const; bool IsTracingExecution() const;

View file

@ -75,6 +75,7 @@ class SnapshotWriter;
#define VM_STUB_CODE_LIST(V) \ #define VM_STUB_CODE_LIST(V) \
V(LazyCompile) \ V(LazyCompile) \
V(OptimizeFunction) \ V(OptimizeFunction) \
V(CallClosureNoSuchMethod) \
V(RunExceptionHandler) \ V(RunExceptionHandler) \
V(DeoptForRewind) \ V(DeoptForRewind) \
V(FixCallersTarget) \ V(FixCallersTarget) \

View file

@ -33,6 +33,10 @@ void StubCode::GenerateLazyCompileStub(Assembler* assembler) {
__ Compile(); __ Compile();
} }
void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) {
__ NoSuchMethod();
}
// Not executed, but used as a stack marker when calling // Not executed, but used as a stack marker when calling
// DRT_OptimizeInvokedFunction. // DRT_OptimizeInvokedFunction.
void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) {

View file

@ -92,6 +92,7 @@ class ObjectPointerVisitor;
V(InterpolateSingle, "_interpolateSingle") \ V(InterpolateSingle, "_interpolateSingle") \
V(Iterator, "iterator") \ V(Iterator, "iterator") \
V(NoSuchMethod, "noSuchMethod") \ V(NoSuchMethod, "noSuchMethod") \
V(ArgDescVar, ":arg_desc") \
V(CurrentContextVar, ":current_context_var") \ V(CurrentContextVar, ":current_context_var") \
V(SavedTryContextVar, ":saved_try_context_var") \ V(SavedTryContextVar, ":saved_try_context_var") \
V(ExceptionParameter, ":exception") \ V(ExceptionParameter, ":exception") \
@ -397,6 +398,7 @@ class ObjectPointerVisitor;
V(OptimizedOut, "<optimized out>") \ V(OptimizedOut, "<optimized out>") \
V(NotInitialized, "<not initialized>") \ V(NotInitialized, "<not initialized>") \
V(NotNamed, "<not named>") \ V(NotNamed, "<not named>") \
V(OriginalParam, ":original:") \
V(TempParam, ":temp_param") \ V(TempParam, ":temp_param") \
V(_UserTag, "_UserTag") \ V(_UserTag, "_UserTag") \
V(Default, "Default") \ V(Default, "Default") \