Initial split of precompilation code from compiler.cc

This removes precompiler code from the regular JIT-mode VM, and allows us to add
more precompilation-specific optimizations without affecting the JIT compiler.

Remove JIT-related code from precompiler pipeline:
No type feedback, no OSR, no deopt, no block reordering, no background compilation

With precompilation, use precompiler pipeline for static initializers and const expressions as well.

BUG=
R=rmacnak@google.com

Review URL: https://codereview.chromium.org/1663163003 .
This commit is contained in:
Florian Schneider 2016-02-09 11:29:13 +01:00
parent 1a4ba676ad
commit 117c7b0d3e
10 changed files with 1189 additions and 351 deletions

View file

@ -16,6 +16,7 @@
#include "vm/dart_entry.h"
#include "vm/debugger.h"
#include "vm/deopt_instructions.h"
#include "vm/disassembler.h"
#include "vm/exceptions.h"
#include "vm/flags.h"
#include "vm/flow_graph.h"
@ -31,10 +32,10 @@
#include "vm/object_store.h"
#include "vm/os.h"
#include "vm/parser.h"
#include "vm/precompiler.h"
#include "vm/redundancy_elimination.h"
#include "vm/regexp_parser.h"
#include "vm/regexp_assembler.h"
#include "vm/scanner.h"
#include "vm/symbols.h"
#include "vm/tags.h"
#include "vm/thread_registry.h"
@ -66,105 +67,76 @@ DEFINE_FLAG(bool, trace_bailout, false, "Print bailout from ssa compiler.");
DEFINE_FLAG(bool, use_inlining, true, "Enable call-site inlining");
DEFINE_FLAG(bool, verify_compiler, false,
"Enable compiler verification assertions");
DEFINE_FLAG(int, max_speculative_inlining_attempts, 1,
"Max number of attempts with speculative inlining (precompilation only)");
DECLARE_FLAG(bool, background_compilation);
DECLARE_FLAG(bool, huge_method_cutoff_in_code_size);
DECLARE_FLAG(bool, load_deferred_eagerly);
DECLARE_FLAG(bool, trace_failed_optimization_attempts);
DECLARE_FLAG(bool, trace_inlining_intervals);
DECLARE_FLAG(bool, trace_irregexp);
DECLARE_FLAG(bool, precompilation);
#ifndef DART_PRECOMPILED_RUNTIME
// TODO(zerny): Factor out unoptimizing/optimizing pipelines and remove
// separate helpers functions & `optimizing` args.
class CompilationPipeline : public ZoneAllocated {
public:
static CompilationPipeline* New(Zone* zone, const Function& function);
virtual void ParseFunction(ParsedFunction* parsed_function) = 0;
virtual FlowGraph* BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
const ZoneGrowableArray<const ICData*>& ic_data_array,
intptr_t osr_id) = 0;
virtual void FinalizeCompilation() = 0;
virtual ~CompilationPipeline() { }
};
void DartCompilationPipeline::ParseFunction(ParsedFunction* parsed_function) {
Parser::ParseFunction(parsed_function);
parsed_function->AllocateVariables();
}
class DartCompilationPipeline : public CompilationPipeline {
public:
virtual void ParseFunction(ParsedFunction* parsed_function) {
Parser::ParseFunction(parsed_function);
parsed_function->AllocateVariables();
}
FlowGraph* DartCompilationPipeline::BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
const ZoneGrowableArray<const ICData*>& ic_data_array,
intptr_t osr_id) {
// Build the flow graph.
FlowGraphBuilder builder(*parsed_function,
ic_data_array,
NULL, // NULL = not inlining.
osr_id);
virtual FlowGraph* BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
const ZoneGrowableArray<const ICData*>& ic_data_array,
intptr_t osr_id) {
// Build the flow graph.
FlowGraphBuilder builder(*parsed_function,
ic_data_array,
NULL, // NULL = not inlining.
osr_id);
return builder.BuildGraph();
}
virtual void FinalizeCompilation() { }
};
return builder.BuildGraph();
}
class IrregexpCompilationPipeline : public CompilationPipeline {
public:
IrregexpCompilationPipeline() : backtrack_goto_(NULL) { }
void DartCompilationPipeline::FinalizeCompilation() { }
virtual void ParseFunction(ParsedFunction* parsed_function) {
RegExpParser::ParseFunction(parsed_function);
// Variables are allocated after compilation.
}
virtual FlowGraph* BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
const ZoneGrowableArray<const ICData*>& ic_data_array,
intptr_t osr_id) {
// Compile to the dart IR.
RegExpEngine::CompilationResult result =
RegExpEngine::CompileIR(parsed_function->regexp_compile_data(),
parsed_function,
ic_data_array);
backtrack_goto_ = result.backtrack_goto;
void IrregexpCompilationPipeline::ParseFunction(
ParsedFunction* parsed_function) {
RegExpParser::ParseFunction(parsed_function);
// Variables are allocated after compilation.
}
// Allocate variables now that we know the number of locals.
parsed_function->AllocateIrregexpVariables(result.num_stack_locals);
FlowGraph* IrregexpCompilationPipeline::BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
const ZoneGrowableArray<const ICData*>& ic_data_array,
intptr_t osr_id) {
// Compile to the dart IR.
RegExpEngine::CompilationResult result =
RegExpEngine::CompileIR(parsed_function->regexp_compile_data(),
parsed_function,
ic_data_array);
backtrack_goto_ = result.backtrack_goto;
// Build the flow graph.
FlowGraphBuilder builder(*parsed_function,
ic_data_array,
NULL, // NULL = not inlining.
osr_id);
// Allocate variables now that we know the number of locals.
parsed_function->AllocateIrregexpVariables(result.num_stack_locals);
return new(zone) FlowGraph(*parsed_function,
result.graph_entry,
result.num_blocks);
}
// Build the flow graph.
FlowGraphBuilder builder(*parsed_function,
ic_data_array,
NULL, // NULL = not inlining.
osr_id);
virtual void FinalizeCompilation() {
backtrack_goto_->ComputeOffsetTable();
}
private:
IndirectGotoInstr* backtrack_goto_;
};
return new(zone) FlowGraph(*parsed_function,
result.graph_entry,
result.num_blocks);
}
void IrregexpCompilationPipeline::FinalizeCompilation() {
backtrack_goto_->ComputeOffsetTable();
}
CompilationPipeline* CompilationPipeline::New(Zone* zone,
const Function& function) {
@ -445,6 +417,7 @@ void CompileParsedFunctionHelper::FinalizeCompilation(
Assembler* assembler,
FlowGraphCompiler* graph_compiler,
FlowGraph* flow_graph) {
ASSERT(!FLAG_precompilation);
const Function& function = parsed_function()->function();
Zone* const zone = thread()->zone();
@ -543,35 +516,31 @@ void CompileParsedFunctionHelper::FinalizeCompilation(
}
// Register code with the classes it depends on because of CHA and
// fields it depends on because of store guards, unless we cannot
// deopt.
if (!FLAG_precompilation) {
// Deoptimize field dependent code first, before registering
// this yet uninstalled code as dependent on a field.
// TODO(srdjan): Debugging dart2js crashes;
// FlowGraphOptimizer::VisitStoreInstanceField populates
// deoptimize_dependent_code() list, currently disabled.
for (intptr_t i = 0;
i < flow_graph->deoptimize_dependent_code().length();
i++) {
const Field* field = flow_graph->deoptimize_dependent_code()[i];
field->DeoptimizeDependentCode();
}
for (intptr_t i = 0;
i < thread()->cha()->leaf_classes().length();
++i) {
thread()->cha()->leaf_classes()[i]->RegisterCHACode(code);
}
for (intptr_t i = 0;
i < flow_graph->guarded_fields()->length();
i++) {
const Field* field = (*flow_graph->guarded_fields())[i];
field->RegisterDependentCode(code);
}
// fields it depends on because of store guards.
// Deoptimize field dependent code first, before registering
// this yet uninstalled code as dependent on a field.
// TODO(srdjan): Debugging dart2js crashes;
// FlowGraphOptimizer::VisitStoreInstanceField populates
// deoptimize_dependent_code() list, currently disabled.
for (intptr_t i = 0;
i < flow_graph->deoptimize_dependent_code().length();
i++) {
const Field* field = flow_graph->deoptimize_dependent_code()[i];
field->DeoptimizeDependentCode();
}
for (intptr_t i = 0;
i < thread()->cha()->leaf_classes().length();
++i) {
thread()->cha()->leaf_classes()[i]->RegisterCHACode(code);
}
for (intptr_t i = 0;
i < flow_graph->guarded_fields()->length();
i++) {
const Field* field = (*flow_graph->guarded_fields())[i];
field->RegisterDependentCode(code);
}
} else { // not optimized.
if (!FLAG_precompilation &&
(function.ic_data_array() == Array::null())) {
if (function.ic_data_array() == Array::null()) {
function.SaveICDataMap(
graph_compiler->deopt_id_to_ic_data(),
Array::Handle(zone, graph_compiler->edge_counters_array()));
@ -594,6 +563,7 @@ void CompileParsedFunctionHelper::FinalizeCompilation(
// If optimized_result_code is not NULL then it is caller's responsibility
// to install code.
bool CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
ASSERT(!FLAG_precompilation);
const Function& function = parsed_function()->function();
if (optimized() && !function.IsOptimizable()) {
return false;
@ -613,9 +583,7 @@ bool CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
bool done = false;
// volatile because the variable may be clobbered by a longjmp.
volatile bool use_far_branches = false;
volatile bool use_speculative_inlining =
FLAG_max_speculative_inlining_attempts > 0;
GrowableArray<intptr_t> inlining_black_list;
const bool use_speculative_inlining = false;
while (!done) {
const intptr_t prev_deopt_id = thread()->deopt_id();
@ -724,16 +692,7 @@ bool CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
FlowGraphOptimizer optimizer(flow_graph,
use_speculative_inlining,
&inlining_black_list);
if (FLAG_precompilation) {
optimizer.PopulateWithICData();
optimizer.ApplyClassIds();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
FlowGraphTypePropagator::Propagate(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
NULL);
optimizer.ApplyICData();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
@ -762,7 +721,7 @@ bool CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
&inline_id_to_function,
&caller_inline_id,
use_speculative_inlining,
&inlining_black_list);
NULL);
inliner.Inline();
// Use lists are maintained and validated by the inliner.
DEBUG_ASSERT(flow_graph->VerifyUseLists());
@ -1085,25 +1044,8 @@ bool CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
ASSERT(!use_far_branches);
use_far_branches = true;
} else if (error.raw() == Object::speculative_inlining_error().raw()) {
// The return value of setjmp is the deopt id of the check instruction
// that caused the bailout.
done = false;
#if defined(DEBUG)
ASSERT(FLAG_precompilation);
ASSERT(use_speculative_inlining);
for (intptr_t i = 0; i < inlining_black_list.length(); ++i) {
ASSERT(inlining_black_list[i] != val);
}
#endif
inlining_black_list.Add(val);
const intptr_t max_attempts = FLAG_max_speculative_inlining_attempts;
if (inlining_black_list.length() >= max_attempts) {
use_speculative_inlining = false;
if (FLAG_trace_compiler || FLAG_trace_optimizing_compiler) {
THR_Print("Disabled speculative inlining after %" Pd " attempts.\n",
inlining_black_list.length());
}
}
// Can only happen with precompilation.
UNREACHABLE();
} else {
// If the error isn't due to an out of range branch offset, we don't
// try again (done = true), and indicate that we did not finish
@ -1128,147 +1070,6 @@ bool CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
}
static void DisassembleCode(const Function& function, bool optimized) {
const char* function_fullname = function.ToFullyQualifiedCString();
THR_Print("Code for %sfunction '%s' {\n",
optimized ? "optimized " : "",
function_fullname);
const Code& code = Code::Handle(function.CurrentCode());
code.Disassemble();
THR_Print("}\n");
THR_Print("Pointer offsets for function: {\n");
// Pointer offsets are stored in descending order.
Object& obj = Object::Handle();
for (intptr_t i = code.pointer_offsets_length() - 1; i >= 0; i--) {
const uword addr = code.GetPointerOffsetAt(i) + code.EntryPoint();
obj = *reinterpret_cast<RawObject**>(addr);
THR_Print(" %d : %#" Px " '%s'\n",
code.GetPointerOffsetAt(i), addr, obj.ToCString());
}
THR_Print("}\n");
THR_Print("PC Descriptors for function '%s' {\n", function_fullname);
PcDescriptors::PrintHeaderString();
const PcDescriptors& descriptors =
PcDescriptors::Handle(code.pc_descriptors());
THR_Print("%s}\n", descriptors.ToCString());
uword start = Instructions::Handle(code.instructions()).EntryPoint();
const Array& deopt_table = Array::Handle(code.deopt_info_array());
intptr_t deopt_table_length = DeoptTable::GetLength(deopt_table);
if (deopt_table_length > 0) {
THR_Print("DeoptInfo: {\n");
Smi& offset = Smi::Handle();
TypedData& info = TypedData::Handle();
Smi& reason_and_flags = Smi::Handle();
for (intptr_t i = 0; i < deopt_table_length; ++i) {
DeoptTable::GetEntry(deopt_table, i, &offset, &info, &reason_and_flags);
const intptr_t reason =
DeoptTable::ReasonField::decode(reason_and_flags.Value());
ASSERT((0 <= reason) && (reason < ICData::kDeoptNumReasons));
THR_Print("%4" Pd ": 0x%" Px " %s (%s)\n",
i,
start + offset.Value(),
DeoptInfo::ToCString(deopt_table, info),
DeoptReasonToCString(
static_cast<ICData::DeoptReasonId>(reason)));
}
THR_Print("}\n");
}
const ObjectPool& object_pool = ObjectPool::Handle(code.GetObjectPool());
object_pool.DebugPrint();
THR_Print("Stackmaps for function '%s' {\n", function_fullname);
if (code.stackmaps() != Array::null()) {
const Array& stackmap_table = Array::Handle(code.stackmaps());
Stackmap& map = Stackmap::Handle();
for (intptr_t i = 0; i < stackmap_table.Length(); ++i) {
map ^= stackmap_table.At(i);
THR_Print("%s\n", map.ToCString());
}
}
THR_Print("}\n");
THR_Print("Variable Descriptors for function '%s' {\n",
function_fullname);
const LocalVarDescriptors& var_descriptors =
LocalVarDescriptors::Handle(code.GetLocalVarDescriptors());
intptr_t var_desc_length =
var_descriptors.IsNull() ? 0 : var_descriptors.Length();
String& var_name = String::Handle();
for (intptr_t i = 0; i < var_desc_length; i++) {
var_name = var_descriptors.GetName(i);
RawLocalVarDescriptors::VarInfo var_info;
var_descriptors.GetInfo(i, &var_info);
const int8_t kind = var_info.kind();
if (kind == RawLocalVarDescriptors::kSavedCurrentContext) {
THR_Print(" saved current CTX reg offset %d\n", var_info.index());
} else {
if (kind == RawLocalVarDescriptors::kContextLevel) {
THR_Print(" context level %d scope %d", var_info.index(),
var_info.scope_id);
} else if (kind == RawLocalVarDescriptors::kStackVar) {
THR_Print(" stack var '%s' offset %d",
var_name.ToCString(), var_info.index());
} else {
ASSERT(kind == RawLocalVarDescriptors::kContextVar);
THR_Print(" context var '%s' level %d offset %d",
var_name.ToCString(), var_info.scope_id, var_info.index());
}
THR_Print(" (valid %s-%s)\n", var_info.begin_pos.ToCString(),
var_info.end_pos.ToCString());
}
}
THR_Print("}\n");
THR_Print("Exception Handlers for function '%s' {\n", function_fullname);
const ExceptionHandlers& handlers =
ExceptionHandlers::Handle(code.exception_handlers());
THR_Print("%s}\n", handlers.ToCString());
{
THR_Print("Static call target functions {\n");
const Array& table = Array::Handle(code.static_calls_target_table());
Smi& offset = Smi::Handle();
Function& function = Function::Handle();
Code& code = Code::Handle();
for (intptr_t i = 0; i < table.Length();
i += Code::kSCallTableEntryLength) {
offset ^= table.At(i + Code::kSCallTableOffsetEntry);
function ^= table.At(i + Code::kSCallTableFunctionEntry);
code ^= table.At(i + Code::kSCallTableCodeEntry);
if (function.IsNull()) {
Class& cls = Class::Handle();
cls ^= code.owner();
if (cls.IsNull()) {
const String& code_name = String::Handle(code.Name());
THR_Print(" 0x%" Px ": %s, %p\n",
start + offset.Value(),
code_name.ToCString(),
code.raw());
} else {
THR_Print(" 0x%" Px ": allocation stub for %s, %p\n",
start + offset.Value(),
cls.ToCString(),
code.raw());
}
} else {
THR_Print(" 0x%" Px ": %s, %p\n",
start + offset.Value(),
function.ToFullyQualifiedCString(),
code.raw());
}
}
THR_Print("}\n");
}
if (optimized && FLAG_trace_inlining_intervals) {
code.DumpInlinedIntervals();
}
}
#if defined(DEBUG)
// Verifies that the inliner is always in the list of inlined functions.
// If this fails run with --trace-inlining-intervals to get more information.
@ -1294,11 +1095,7 @@ static RawError* CompileFunctionHelper(CompilationPipeline* pipeline,
const Function& function,
bool optimized,
intptr_t osr_id) {
// Check that we optimize if 'FLAG_precompilation' is set to true,
// except if the function is marked as not optimizable.
ASSERT(!function.IsOptimizable() ||
!FLAG_precompilation || optimized);
ASSERT(!FLAG_precompilation || !function.HasCode());
ASSERT(!FLAG_precompilation);
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {
Thread* const thread = Thread::Current();
@ -1340,7 +1137,7 @@ static RawError* CompileFunctionHelper(CompilationPipeline* pipeline,
CompileParsedFunctionHelper helper(parsed_function, optimized, osr_id);
const bool success = helper.Compile(pipeline);
if (!success) {
if (optimized && !FLAG_precompilation) {
if (optimized) {
// Optimizer bailed out. Disable optimizations and never try again.
if (trace_compiler) {
THR_Print("--> disabling optimizations for '%s'\n",
@ -1378,13 +1175,13 @@ static RawError* CompileFunctionHelper(CompilationPipeline* pipeline,
}
if (FLAG_disassemble && FlowGraphPrinter::ShouldPrint(function)) {
DisassembleCode(function, optimized);
Disassembler::DisassembleCode(function, optimized);
} else if (FLAG_disassemble_optimized &&
optimized &&
FlowGraphPrinter::ShouldPrint(function)) {
// TODO(fschneider): Print unoptimized code along with the optimized code.
THR_Print("*** BEGIN CODE\n");
DisassembleCode(function, true);
Disassembler::DisassembleCode(function, true);
THR_Print("*** END CODE\n");
}
#if defined(DEBUG)
@ -1400,7 +1197,7 @@ static RawError* CompileFunctionHelper(CompilationPipeline* pipeline,
thread->clear_sticky_error();
// Unoptimized compilation or precompilation may encounter compile-time
// errors, but regular optimized compilation should not.
ASSERT(!optimized || FLAG_precompilation);
ASSERT(!optimized);
// Do not attempt to optimize functions that can cause errors.
function.set_is_optimizable(false);
return error.raw();
@ -1412,6 +1209,11 @@ static RawError* CompileFunctionHelper(CompilationPipeline* pipeline,
RawError* Compiler::CompileFunction(Thread* thread,
const Function& function) {
#ifdef DART_PRECOMPILER
if (FLAG_precompilation) {
return Precompiler::CompileFunction(thread, function);
}
#endif
Isolate* isolate = thread->isolate();
VMTagScope tagScope(thread, VMTag::kCompileUnoptimizedTagId);
TIMELINE_FUNCTION_COMPILATION_DURATION(thread, "Function", function);
@ -1426,12 +1228,9 @@ RawError* Compiler::CompileFunction(Thread* thread,
CompilationPipeline* pipeline =
CompilationPipeline::New(thread->zone(), function);
const bool optimized =
FLAG_precompilation && function.IsOptimizable();
return CompileFunctionHelper(pipeline,
function,
optimized,
/* optimized = */ false,
kNoOSRDeoptId);
}
@ -1499,7 +1298,7 @@ RawError* Compiler::CompileParsedFunction(
CompileParsedFunctionHelper helper(parsed_function, false, kNoOSRDeoptId);
helper.Compile(&pipeline);
if (FLAG_disassemble) {
DisassembleCode(parsed_function->function(), false);
Disassembler::DisassembleCode(parsed_function->function(), false);
}
return Error::null();
} else {
@ -1566,58 +1365,35 @@ RawError* Compiler::CompileAllFunctions(const Class& cls) {
}
void Compiler::CompileStaticInitializer(const Field& field) {
ASSERT(field.is_static());
if (field.HasPrecompiledInitializer()) {
// TODO(rmacnak): Investigate why this happens for _enum_names.
OS::Print("Warning: Ignoring repeated request for initializer for %s\n",
field.ToCString());
return;
}
Thread* thread = Thread::Current();
StackZone zone(thread);
ParsedFunction* parsed_function = Parser::ParseStaticFieldInitializer(field);
parsed_function->AllocateVariables();
// Non-optimized code generator.
DartCompilationPipeline pipeline;
CompileParsedFunctionHelper helper(parsed_function, false, kNoOSRDeoptId);
helper.Compile(&pipeline);
const Function& initializer = parsed_function->function();
field.SetPrecompiledInitializer(initializer);
}
RawObject* Compiler::EvaluateStaticInitializer(const Field& field) {
#ifdef DART_PRECOMPILER
if (FLAG_precompilation) {
return Precompiler::EvaluateStaticInitializer(field);
}
#endif
ASSERT(field.is_static());
// The VM sets the field's value to transiton_sentinel prior to
// evaluating the initializer value.
ASSERT(field.StaticValue() == Object::transition_sentinel().raw());
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {
// Under precompilation, the initializer may have already been compiled, in
// which case use it. Under lazy compilation or early in precompilation, the
// initializer has not yet been created, so create it now, but don't bother
// remembering it because it won't be used again.
Function& initializer = Function::Handle();
if (!field.HasPrecompiledInitializer()) {
Thread* const thread = Thread::Current();
StackZone zone(thread);
ParsedFunction* parsed_function =
Parser::ParseStaticFieldInitializer(field);
// Under lazy compilation initializer has not yet been created, so create
// it now, but don't bother remembering it because it won't be used again.
ASSERT(!field.HasPrecompiledInitializer());
Thread* const thread = Thread::Current();
StackZone zone(thread);
ParsedFunction* parsed_function =
Parser::ParseStaticFieldInitializer(field);
parsed_function->AllocateVariables();
// Non-optimized code generator.
DartCompilationPipeline pipeline;
CompileParsedFunctionHelper helper(parsed_function, false, kNoOSRDeoptId);
helper.Compile(&pipeline);
initializer = parsed_function->function().raw();
Code::Handle(initializer.unoptimized_code()).set_var_descriptors(
Object::empty_var_descriptors());
} else {
initializer ^= field.PrecompiledInitializer();
}
parsed_function->AllocateVariables();
// Non-optimized code generator.
DartCompilationPipeline pipeline;
CompileParsedFunctionHelper helper(parsed_function, false, kNoOSRDeoptId);
helper.Compile(&pipeline);
const Function& initializer =
Function::Handle(parsed_function->function().raw());
Code::Handle(initializer.unoptimized_code()).set_var_descriptors(
Object::empty_var_descriptors());
// Invoke the function to evaluate the expression.
return DartEntry::InvokeFunction(initializer, Object::empty_array());
} else {
@ -1634,6 +1410,11 @@ RawObject* Compiler::EvaluateStaticInitializer(const Field& field) {
RawObject* Compiler::ExecuteOnce(SequenceNode* fragment) {
#ifdef DART_PRECOMPILER
if (FLAG_precompilation) {
return Precompiler::ExecuteOnce(fragment);
}
#endif
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {
Thread* const thread = Thread::Current();
@ -1946,6 +1727,13 @@ void BackgroundCompiler::EnsureInit(Thread* thread) {
#else // DART_PRECOMPILED_RUNTIME
CompilationPipeline* CompilationPipeline::New(Zone* zone,
const Function& function) {
UNREACHABLE();
return NULL;
}
DEFINE_RUNTIME_ENTRY(CompileFunction, 1) {
const Function& function = Function::CheckedHandle(arguments.ArgAt(0));
FATAL3("Precompilation missed function %s (%" Pd ", %s)\n",
@ -2013,11 +1801,6 @@ RawError* Compiler::CompileAllFunctions(const Class& cls) {
}
void Compiler::CompileStaticInitializer(const Field& field) {
UNREACHABLE();
}
RawObject* Compiler::EvaluateStaticInitializer(const Field& field) {
ASSERT(field.HasPrecompiledInitializer());
const Function& initializer =
@ -2026,7 +1809,6 @@ RawObject* Compiler::EvaluateStaticInitializer(const Field& field) {
}
RawObject* Compiler::ExecuteOnce(SequenceNode* fragment) {
UNREACHABLE();
return Object::null();

View file

@ -17,7 +17,9 @@ class BackgroundCompilationQueue;
class Class;
class Code;
class CompilationWorkQueue;
class FlowGraph;
class Function;
class IndirectGotoInstr;
class Library;
class ParsedFunction;
class QueueElement;
@ -26,6 +28,54 @@ class Script;
class SequenceNode;
class CompilationPipeline : public ZoneAllocated {
public:
static CompilationPipeline* New(Zone* zone, const Function& function);
virtual void ParseFunction(ParsedFunction* parsed_function) = 0;
virtual FlowGraph* BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
const ZoneGrowableArray<const ICData*>& ic_data_array,
intptr_t osr_id) = 0;
virtual void FinalizeCompilation() = 0;
virtual ~CompilationPipeline() { }
};
class DartCompilationPipeline : public CompilationPipeline {
public:
virtual void ParseFunction(ParsedFunction* parsed_function);
virtual FlowGraph* BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
const ZoneGrowableArray<const ICData*>& ic_data_array,
intptr_t osr_id);
virtual void FinalizeCompilation();
};
class IrregexpCompilationPipeline : public CompilationPipeline {
public:
IrregexpCompilationPipeline() : backtrack_goto_(NULL) { }
virtual void ParseFunction(ParsedFunction* parsed_function);
virtual FlowGraph* BuildFlowGraph(
Zone* zone,
ParsedFunction* parsed_function,
const ZoneGrowableArray<const ICData*>& ic_data_array,
intptr_t osr_id);
virtual void FinalizeCompilation();
private:
IndirectGotoInstr* backtrack_goto_;
};
class Compiler : public AllStatic {
public:
static const intptr_t kNoOSRDeoptId = Thread::kNoDeoptId;
@ -82,7 +132,6 @@ class Compiler : public AllStatic {
// The return value is either a RawInstance on success or a RawError
// on compilation failure.
static RawObject* EvaluateStaticInitializer(const Field& field);
static void CompileStaticInitializer(const Field& field);
// Generates local var descriptors and sets it in 'code'. Do not call if the
// local var descriptor already exists.
@ -93,8 +142,6 @@ class Compiler : public AllStatic {
// Returns Error::null() if there is no compilation error.
static RawError* CompileAllFunctions(const Class& cls);
static bool always_optimize();
// Notify the compiler that background (optimized) compilation has failed
// because the mutator thread changed the state (e.g., deoptimization,
// deferred loading). The background compilation may retry to compile

View file

@ -6034,6 +6034,8 @@ DART_EXPORT Dart_Handle Dart_TimelineAsyncEnd(const char* label,
}
// The precompiler is included in dart_no_snapshot and dart_noopt, and
// excluded from dart and dart_precompiled_runtime.
#if !defined(DART_PRECOMPILER)
DART_EXPORT Dart_Handle Dart_Precompile(

View file

@ -5,6 +5,7 @@
#include "vm/disassembler.h"
#include "vm/assembler.h"
#include "vm/deopt_instructions.h"
#include "vm/globals.h"
#include "vm/il_printer.h"
#include "vm/instructions.h"
@ -16,6 +17,8 @@
namespace dart {
DECLARE_FLAG(bool, trace_inlining_intervals);
void DisassembleToStdout::ConsumeInstruction(const Code& code,
char* hex_buffer,
intptr_t hex_size,
@ -182,4 +185,146 @@ void Disassembler::Disassemble(uword start,
}
}
void Disassembler::DisassembleCode(const Function& function, bool optimized) {
const char* function_fullname = function.ToFullyQualifiedCString();
THR_Print("Code for %sfunction '%s' {\n",
optimized ? "optimized " : "",
function_fullname);
const Code& code = Code::Handle(function.CurrentCode());
code.Disassemble();
THR_Print("}\n");
THR_Print("Pointer offsets for function: {\n");
// Pointer offsets are stored in descending order.
Object& obj = Object::Handle();
for (intptr_t i = code.pointer_offsets_length() - 1; i >= 0; i--) {
const uword addr = code.GetPointerOffsetAt(i) + code.EntryPoint();
obj = *reinterpret_cast<RawObject**>(addr);
THR_Print(" %d : %#" Px " '%s'\n",
code.GetPointerOffsetAt(i), addr, obj.ToCString());
}
THR_Print("}\n");
THR_Print("PC Descriptors for function '%s' {\n", function_fullname);
PcDescriptors::PrintHeaderString();
const PcDescriptors& descriptors =
PcDescriptors::Handle(code.pc_descriptors());
THR_Print("%s}\n", descriptors.ToCString());
uword start = Instructions::Handle(code.instructions()).EntryPoint();
const Array& deopt_table = Array::Handle(code.deopt_info_array());
intptr_t deopt_table_length = DeoptTable::GetLength(deopt_table);
if (deopt_table_length > 0) {
THR_Print("DeoptInfo: {\n");
Smi& offset = Smi::Handle();
TypedData& info = TypedData::Handle();
Smi& reason_and_flags = Smi::Handle();
for (intptr_t i = 0; i < deopt_table_length; ++i) {
DeoptTable::GetEntry(deopt_table, i, &offset, &info, &reason_and_flags);
const intptr_t reason =
DeoptTable::ReasonField::decode(reason_and_flags.Value());
ASSERT((0 <= reason) && (reason < ICData::kDeoptNumReasons));
THR_Print("%4" Pd ": 0x%" Px " %s (%s)\n",
i,
start + offset.Value(),
DeoptInfo::ToCString(deopt_table, info),
DeoptReasonToCString(
static_cast<ICData::DeoptReasonId>(reason)));
}
THR_Print("}\n");
}
const ObjectPool& object_pool = ObjectPool::Handle(code.GetObjectPool());
object_pool.DebugPrint();
THR_Print("Stackmaps for function '%s' {\n", function_fullname);
if (code.stackmaps() != Array::null()) {
const Array& stackmap_table = Array::Handle(code.stackmaps());
Stackmap& map = Stackmap::Handle();
for (intptr_t i = 0; i < stackmap_table.Length(); ++i) {
map ^= stackmap_table.At(i);
THR_Print("%s\n", map.ToCString());
}
}
THR_Print("}\n");
THR_Print("Variable Descriptors for function '%s' {\n",
function_fullname);
const LocalVarDescriptors& var_descriptors =
LocalVarDescriptors::Handle(code.GetLocalVarDescriptors());
intptr_t var_desc_length =
var_descriptors.IsNull() ? 0 : var_descriptors.Length();
String& var_name = String::Handle();
for (intptr_t i = 0; i < var_desc_length; i++) {
var_name = var_descriptors.GetName(i);
RawLocalVarDescriptors::VarInfo var_info;
var_descriptors.GetInfo(i, &var_info);
const int8_t kind = var_info.kind();
if (kind == RawLocalVarDescriptors::kSavedCurrentContext) {
THR_Print(" saved current CTX reg offset %d\n", var_info.index());
} else {
if (kind == RawLocalVarDescriptors::kContextLevel) {
THR_Print(" context level %d scope %d", var_info.index(),
var_info.scope_id);
} else if (kind == RawLocalVarDescriptors::kStackVar) {
THR_Print(" stack var '%s' offset %d",
var_name.ToCString(), var_info.index());
} else {
ASSERT(kind == RawLocalVarDescriptors::kContextVar);
THR_Print(" context var '%s' level %d offset %d",
var_name.ToCString(), var_info.scope_id, var_info.index());
}
THR_Print(" (valid %s-%s)\n", var_info.begin_pos.ToCString(),
var_info.end_pos.ToCString());
}
}
THR_Print("}\n");
THR_Print("Exception Handlers for function '%s' {\n", function_fullname);
const ExceptionHandlers& handlers =
ExceptionHandlers::Handle(code.exception_handlers());
THR_Print("%s}\n", handlers.ToCString());
{
THR_Print("Static call target functions {\n");
const Array& table = Array::Handle(code.static_calls_target_table());
Smi& offset = Smi::Handle();
Function& function = Function::Handle();
Code& code = Code::Handle();
for (intptr_t i = 0; i < table.Length();
i += Code::kSCallTableEntryLength) {
offset ^= table.At(i + Code::kSCallTableOffsetEntry);
function ^= table.At(i + Code::kSCallTableFunctionEntry);
code ^= table.At(i + Code::kSCallTableCodeEntry);
if (function.IsNull()) {
Class& cls = Class::Handle();
cls ^= code.owner();
if (cls.IsNull()) {
const String& code_name = String::Handle(code.Name());
THR_Print(" 0x%" Px ": %s, %p\n",
start + offset.Value(),
code_name.ToCString(),
code.raw());
} else {
THR_Print(" 0x%" Px ": allocation stub for %s, %p\n",
start + offset.Value(),
cls.ToCString(),
code.raw());
}
} else {
THR_Print(" 0x%" Px ": %s, %p\n",
start + offset.Value(),
function.ToFullyQualifiedCString(),
code.raw());
}
}
THR_Print("}\n");
}
if (optimized && FLAG_trace_inlining_intervals) {
code.DumpInlinedIntervals();
}
}
} // namespace dart

View file

@ -136,6 +136,8 @@ class Disassembler : public AllStatic {
static bool CanFindOldObject(uword addr);
static void DisassembleCode(const Function& function, bool optimized);
private:
static const int kHexadecimalBufferSize = 32;
static const int kUserReadableBufferSize = 256;

View file

@ -933,7 +933,7 @@ Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id,
return &intrinsic_slow_path_label_;
}
// No deoptimization allowed when 'always_optimize' is set.
// No deoptimization allowed when 'FLAG_precompilation' is set.
if (FLAG_precompilation) {
if (FLAG_trace_compiler) {
THR_Print(
@ -981,7 +981,7 @@ void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
RawArray* FlowGraphCompiler::CreateDeoptInfo(Assembler* assembler) {
// No deopt information if we 'always_optimize' (no deoptimization allowed).
// No deopt information if we precompile (no deoptimization allowed).
if (FLAG_precompilation) {
return Array::empty_array().raw();
}

View file

@ -250,6 +250,7 @@ bool FlowGraphOptimizer::TryCreateICData(InstanceCallInstr* call) {
return true;
}
#ifdef DART_PRECOMPILER
if (FLAG_precompilation &&
(isolate()->object_store()->unique_dynamic_targets() != Array::null())) {
// Check if the target is unique.
@ -270,6 +271,7 @@ bool FlowGraphOptimizer::TryCreateICData(InstanceCallInstr* call) {
return true;
}
}
#endif
// Check if getter or setter in function's class and class is currently leaf.
if (FLAG_guess_icdata_cid &&

View file

@ -29,6 +29,7 @@
#include "vm/intrinsifier.h"
#include "vm/object_store.h"
#include "vm/parser.h"
#include "vm/precompiler.h"
#include "vm/profiler.h"
#include "vm/report.h"
#include "vm/reusable_handles.h"
@ -4938,7 +4939,7 @@ bool Function::HasBreakpoint() const {
void Function::InstallOptimizedCode(const Code& code, bool is_osr) const {
DEBUG_ASSERT(IsMutatorOrAtSafepoint());
// We may not have previous code if 'always_optimize' is set.
// We may not have previous code if FLAG_precompile is set.
if (!is_osr && HasCode()) {
Code::Handle(CurrentCode()).DisableDartCode();
}
@ -7342,7 +7343,8 @@ void Field::EvaluateInitializer() const {
ASSERT(is_static());
if (StaticValue() == Object::sentinel().raw()) {
SetStaticValue(Object::transition_sentinel());
Object& value = Object::Handle(Compiler::EvaluateStaticInitializer(*this));
const Object& value =
Object::Handle(Compiler::EvaluateStaticInitializer(*this));
if (value.IsError()) {
SetStaticValue(Object::null_instance());
Exceptions::PropagateError(Error::Cast(value));

View file

@ -4,17 +4,41 @@
#include "vm/precompiler.h"
#include "vm/assembler.h"
#include "vm/ast_printer.h"
#include "vm/branch_optimizer.h"
#include "vm/cha.h"
#include "vm/code_generator.h"
#include "vm/code_patcher.h"
#include "vm/compiler.h"
#include "vm/constant_propagator.h"
#include "vm/dart_entry.h"
#include "vm/disassembler.h"
#include "vm/exceptions.h"
#include "vm/flags.h"
#include "vm/flow_graph.h"
#include "vm/flow_graph_allocator.h"
#include "vm/flow_graph_builder.h"
#include "vm/flow_graph_compiler.h"
#include "vm/flow_graph_inliner.h"
#include "vm/flow_graph_optimizer.h"
#include "vm/flow_graph_type_propagator.h"
#include "vm/hash_table.h"
#include "vm/il_printer.h"
#include "vm/isolate.h"
#include "vm/log.h"
#include "vm/longjump.h"
#include "vm/object.h"
#include "vm/object_store.h"
#include "vm/os.h"
#include "vm/parser.h"
#include "vm/redundancy_elimination.h"
#include "vm/regexp_assembler.h"
#include "vm/regexp_parser.h"
#include "vm/resolver.h"
#include "vm/symbols.h"
#include "vm/tags.h"
#include "vm/timer.h"
namespace dart {
@ -29,6 +53,59 @@ DEFINE_FLAG(bool, collect_dynamic_function_names, false,
" identify unique targets");
DEFINE_FLAG(bool, print_unique_targets, false, "Print unique dynaic targets");
DEFINE_FLAG(bool, trace_precompiler, false, "Trace precompiler.");
DEFINE_FLAG(int, max_speculative_inlining_attempts, 1,
"Max number of attempts with speculative inlining (precompilation only)");
DECLARE_FLAG(bool, allocation_sinking);
DECLARE_FLAG(bool, common_subexpression_elimination);
DECLARE_FLAG(bool, constant_propagation);
DECLARE_FLAG(bool, disassemble);
DECLARE_FLAG(bool, disassemble_optimized);
DECLARE_FLAG(bool, loop_invariant_code_motion);
DECLARE_FLAG(bool, print_flow_graph);
DECLARE_FLAG(bool, print_flow_graph_optimized);
DECLARE_FLAG(bool, range_analysis);
DECLARE_FLAG(bool, trace_compiler);
DECLARE_FLAG(bool, trace_optimizing_compiler);
DECLARE_FLAG(bool, trace_bailout);
DECLARE_FLAG(bool, use_inlining);
DECLARE_FLAG(bool, verify_compiler);
DECLARE_FLAG(bool, precompilation);
DECLARE_FLAG(bool, huge_method_cutoff_in_code_size);
DECLARE_FLAG(bool, load_deferred_eagerly);
DECLARE_FLAG(bool, trace_failed_optimization_attempts);
DECLARE_FLAG(bool, trace_inlining_intervals);
DECLARE_FLAG(bool, trace_irregexp);
#ifdef DART_PRECOMPILER
class PrecompileParsedFunctionHelper : public ValueObject {
public:
PrecompileParsedFunctionHelper(ParsedFunction* parsed_function,
bool optimized)
: parsed_function_(parsed_function),
optimized_(optimized),
thread_(Thread::Current()) {
}
bool Compile(CompilationPipeline* pipeline);
private:
ParsedFunction* parsed_function() const { return parsed_function_; }
bool optimized() const { return optimized_; }
Thread* thread() const { return thread_; }
Isolate* isolate() const { return thread_->isolate(); }
void FinalizeCompilation(Assembler* assembler,
FlowGraphCompiler* graph_compiler,
FlowGraph* flow_graph);
ParsedFunction* parsed_function_;
const bool optimized_;
Thread* const thread_;
DISALLOW_COPY_AND_ASSIGN(PrecompileParsedFunctionHelper);
};
static void Jump(const Error& error) {
@ -407,7 +484,7 @@ void Precompiler::ProcessFunction(const Function& function) {
ASSERT(!function.is_abstract());
ASSERT(!function.IsRedirectingFactory());
error_ = Compiler::CompileFunction(thread_, function);
error_ = CompileFunction(thread_, function);
if (!error_.IsNull()) {
Jump(error_);
}
@ -591,7 +668,11 @@ void Precompiler::AddField(const Field& field) {
}
ASSERT(!Dart::IsRunningPrecompiledCode());
field.SetStaticValue(Instance::Handle(field.SavedInitialStaticValue()));
Compiler::CompileStaticInitializer(field);
const Function& initializer =
Function::Handle(CompileStaticInitializer(field));
if (!initializer.IsNull()) {
field.SetPrecompiledInitializer(initializer);
}
}
const Function& function =
@ -602,6 +683,127 @@ void Precompiler::AddField(const Field& field) {
}
RawFunction* Precompiler::CompileStaticInitializer(const Field& field) {
ASSERT(field.is_static());
if (field.HasPrecompiledInitializer()) {
// TODO(rmacnak): Investigate why this happens for _enum_names.
OS::Print("Warning: Ignoring repeated request for initializer for %s\n",
field.ToCString());
return Function::null();
}
Thread* thread = Thread::Current();
StackZone zone(thread);
ParsedFunction* parsed_function = Parser::ParseStaticFieldInitializer(field);
parsed_function->AllocateVariables();
// Non-optimized code generator.
DartCompilationPipeline pipeline;
PrecompileParsedFunctionHelper helper(parsed_function,
/* optimized = */ false);
helper.Compile(&pipeline);
return parsed_function->function().raw();
}
RawObject* Precompiler::EvaluateStaticInitializer(const Field& field) {
ASSERT(field.is_static());
// The VM sets the field's value to transiton_sentinel prior to
// evaluating the initializer value.
ASSERT(field.StaticValue() == Object::transition_sentinel().raw());
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {
// Under precompilation, the initializer may have already been compiled, in
// which case use it. Under lazy compilation or early in precompilation, the
// initializer has not yet been created, so create it now, but don't bother
// remembering it because it won't be used again.
Function& initializer = Function::Handle();
if (!field.HasPrecompiledInitializer()) {
initializer = CompileStaticInitializer(field);
Code::Handle(initializer.unoptimized_code()).set_var_descriptors(
Object::empty_var_descriptors());
} else {
initializer ^= field.PrecompiledInitializer();
}
// Invoke the function to evaluate the expression.
return DartEntry::InvokeFunction(initializer, Object::empty_array());
} else {
Thread* const thread = Thread::Current();
StackZone zone(thread);
const Error& error =
Error::Handle(thread->zone(), thread->sticky_error());
thread->clear_sticky_error();
return error.raw();
}
UNREACHABLE();
return Object::null();
}
RawObject* Precompiler::ExecuteOnce(SequenceNode* fragment) {
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {
Thread* const thread = Thread::Current();
if (FLAG_trace_compiler) {
THR_Print("compiling expression: ");
AstPrinter::PrintNode(fragment);
}
// Create a dummy function object for the code generator.
// The function needs to be associated with a named Class: the interface
// Function fits the bill.
const char* kEvalConst = "eval_const";
const Function& func = Function::ZoneHandle(Function::New(
String::Handle(Symbols::New(kEvalConst)),
RawFunction::kRegularFunction,
true, // static function
false, // not const function
false, // not abstract
false, // not external
false, // not native
Class::Handle(Type::Handle(Type::Function()).type_class()),
fragment->token_pos()));
func.set_result_type(Object::dynamic_type());
func.set_num_fixed_parameters(0);
func.SetNumOptionalParameters(0, true);
// Manually generated AST, do not recompile.
func.SetIsOptimizable(false);
func.set_is_debuggable(false);
// We compile the function here, even though InvokeFunction() below
// would compile func automatically. We are checking fewer invariants
// here.
ParsedFunction* parsed_function = new ParsedFunction(thread, func);
parsed_function->SetNodeSequence(fragment);
fragment->scope()->AddVariable(parsed_function->EnsureExpressionTemp());
fragment->scope()->AddVariable(
parsed_function->current_context_var());
parsed_function->AllocateVariables();
// Non-optimized code generator.
DartCompilationPipeline pipeline;
PrecompileParsedFunctionHelper helper(parsed_function,
/* optimized = */ false);
helper.Compile(&pipeline);
Code::Handle(func.unoptimized_code()).set_var_descriptors(
Object::empty_var_descriptors());
const Object& result = PassiveObject::Handle(
DartEntry::InvokeFunction(func, Object::empty_array()));
return result.raw();
} else {
Thread* const thread = Thread::Current();
const Object& result =
PassiveObject::Handle(thread->sticky_error());
thread->clear_sticky_error();
return result.raw();
}
UNREACHABLE();
return Object::null();
}
void Precompiler::AddFunction(const Function& function) {
if (enqueued_functions_.Lookup(&function) != NULL) return;
@ -1349,4 +1551,649 @@ void Precompiler::ResetPrecompilerState() {
}
}
void PrecompileParsedFunctionHelper::FinalizeCompilation(
Assembler* assembler,
FlowGraphCompiler* graph_compiler,
FlowGraph* flow_graph) {
const Function& function = parsed_function()->function();
Zone* const zone = thread()->zone();
CSTAT_TIMER_SCOPE(thread(), codefinalizer_timer);
// CreateDeoptInfo uses the object pool and needs to be done before
// FinalizeCode.
const Array& deopt_info_array =
Array::Handle(zone, graph_compiler->CreateDeoptInfo(assembler));
INC_STAT(thread(), total_code_size,
deopt_info_array.Length() * sizeof(uword));
// Allocates instruction object. Since this occurs only at safepoint,
// there can be no concurrent access to the instruction page.
const Code& code = Code::Handle(
Code::FinalizeCode(function, assembler, optimized()));
code.set_is_optimized(optimized());
code.set_owner(function);
if (!function.IsOptimizable()) {
// A function with huge unoptimized code can become non-optimizable
// after generating unoptimized code.
function.set_usage_counter(INT_MIN);
}
const Array& intervals = graph_compiler->inlined_code_intervals();
INC_STAT(thread(), total_code_size,
intervals.Length() * sizeof(uword));
code.SetInlinedIntervals(intervals);
const Array& inlined_id_array =
Array::Handle(zone, graph_compiler->InliningIdToFunction());
INC_STAT(thread(), total_code_size,
inlined_id_array.Length() * sizeof(uword));
code.SetInlinedIdToFunction(inlined_id_array);
const Array& caller_inlining_id_map_array =
Array::Handle(zone, graph_compiler->CallerInliningIdMap());
INC_STAT(thread(), total_code_size,
caller_inlining_id_map_array.Length() * sizeof(uword));
code.SetInlinedCallerIdMap(caller_inlining_id_map_array);
graph_compiler->FinalizePcDescriptors(code);
code.set_deopt_info_array(deopt_info_array);
graph_compiler->FinalizeStackmaps(code);
graph_compiler->FinalizeVarDescriptors(code);
graph_compiler->FinalizeExceptionHandlers(code);
graph_compiler->FinalizeStaticCallTargetsTable(code);
if (optimized()) {
// Installs code while at safepoint.
ASSERT(thread()->IsMutatorThread());
function.InstallOptimizedCode(code, /* is_osr = */ false);
} else { // not optimized.
function.set_unoptimized_code(code);
function.AttachCode(code);
}
ASSERT(!parsed_function()->HasDeferredPrefixes());
ASSERT(FLAG_load_deferred_eagerly);
}
// Return false if bailed out.
// If optimized_result_code is not NULL then it is caller's responsibility
// to install code.
bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
ASSERT(FLAG_precompilation);
const Function& function = parsed_function()->function();
if (optimized() && !function.IsOptimizable()) {
return false;
}
bool is_compiled = false;
Zone* const zone = thread()->zone();
TimelineStream* compiler_timeline = isolate()->GetCompilerStream();
CSTAT_TIMER_SCOPE(thread(), codegen_timer);
HANDLESCOPE(thread());
// We may reattempt compilation if the function needs to be assembled using
// far branches on ARM and MIPS. In the else branch of the setjmp call,
// done is set to false, and use_far_branches is set to true if there is a
// longjmp from the ARM or MIPS assemblers. In all other paths through this
// while loop, done is set to true. use_far_branches is always false on ia32
// and x64.
bool done = false;
// volatile because the variable may be clobbered by a longjmp.
volatile bool use_far_branches = false;
volatile bool use_speculative_inlining =
FLAG_max_speculative_inlining_attempts > 0;
GrowableArray<intptr_t> inlining_black_list;
while (!done) {
const intptr_t prev_deopt_id = thread()->deopt_id();
thread()->set_deopt_id(0);
LongJumpScope jump;
const intptr_t val = setjmp(*jump.Set());
if (val == 0) {
FlowGraph* flow_graph = NULL;
// Class hierarchy analysis is registered with the isolate in the
// constructor and unregisters itself upon destruction.
CHA cha(thread());
// TimerScope needs an isolate to be properly terminated in case of a
// LongJump.
{
CSTAT_TIMER_SCOPE(thread(), graphbuilder_timer);
ZoneGrowableArray<const ICData*>* ic_data_array =
new(zone) ZoneGrowableArray<const ICData*>();
TimelineDurationScope tds(thread(),
compiler_timeline,
"BuildFlowGraph");
flow_graph = pipeline->BuildFlowGraph(zone,
parsed_function(),
*ic_data_array,
Compiler::kNoOSRDeoptId);
}
const bool print_flow_graph =
(FLAG_print_flow_graph ||
(optimized() && FLAG_print_flow_graph_optimized)) &&
FlowGraphPrinter::ShouldPrint(function);
if (print_flow_graph) {
FlowGraphPrinter::PrintGraph("Before Optimizations", flow_graph);
}
if (optimized()) {
TimelineDurationScope tds(thread(),
compiler_timeline,
"ComputeSSA");
CSTAT_TIMER_SCOPE(thread(), ssa_timer);
// Transform to SSA (virtual register 0 and no inlining arguments).
flow_graph->ComputeSSA(0, NULL);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
if (print_flow_graph) {
FlowGraphPrinter::PrintGraph("After SSA", flow_graph);
}
}
// Maps inline_id_to_function[inline_id] -> function. Top scope
// function has inline_id 0. The map is populated by the inliner.
GrowableArray<const Function*> inline_id_to_function;
// For a given inlining-id(index) specifies the caller's inlining-id.
GrowableArray<intptr_t> caller_inline_id;
// Collect all instance fields that are loaded in the graph and
// have non-generic type feedback attached to them that can
// potentially affect optimizations.
if (optimized()) {
TimelineDurationScope tds(thread(),
compiler_timeline,
"OptimizationPasses");
inline_id_to_function.Add(&function);
// Top scope function has no caller (-1).
caller_inline_id.Add(-1);
CSTAT_TIMER_SCOPE(thread(), graphoptimizer_timer);
FlowGraphOptimizer optimizer(flow_graph,
use_speculative_inlining,
&inlining_black_list);
optimizer.PopulateWithICData();
optimizer.ApplyClassIds();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
FlowGraphTypePropagator::Propagate(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
optimizer.ApplyICData();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
// Optimize (a << b) & c patterns, merge operations.
// Run early in order to have more opportunity to optimize left shifts.
optimizer.TryOptimizePatterns();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
FlowGraphInliner::SetInliningId(flow_graph, 0);
// Inlining (mutates the flow graph)
if (FLAG_use_inlining) {
TimelineDurationScope tds2(thread(),
compiler_timeline,
"Inlining");
CSTAT_TIMER_SCOPE(thread(), graphinliner_timer);
// Propagate types to create more inlining opportunities.
FlowGraphTypePropagator::Propagate(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
// Use propagated class-ids to create more inlining opportunities.
optimizer.ApplyClassIds();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
FlowGraphInliner inliner(flow_graph,
&inline_id_to_function,
&caller_inline_id,
use_speculative_inlining,
&inlining_black_list);
inliner.Inline();
// Use lists are maintained and validated by the inliner.
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
// Propagate types and eliminate more type tests.
FlowGraphTypePropagator::Propagate(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
{
TimelineDurationScope tds2(thread(),
compiler_timeline,
"ApplyClassIds");
// Use propagated class-ids to optimize further.
optimizer.ApplyClassIds();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
// Propagate types for potentially newly added instructions by
// ApplyClassIds(). Must occur before canonicalization.
FlowGraphTypePropagator::Propagate(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
// Do optimizations that depend on the propagated type information.
if (optimizer.Canonicalize()) {
// Invoke Canonicalize twice in order to fully canonicalize patterns
// like "if (a & const == 0) { }".
optimizer.Canonicalize();
}
DEBUG_ASSERT(flow_graph->VerifyUseLists());
{
TimelineDurationScope tds2(thread(),
compiler_timeline,
"BranchSimplifier");
BranchSimplifier::Simplify(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
IfConverter::Simplify(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
if (FLAG_constant_propagation) {
TimelineDurationScope tds2(thread(),
compiler_timeline,
"ConstantPropagation");
ConstantPropagator::Optimize(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
// A canonicalization pass to remove e.g. smi checks on smi constants.
optimizer.Canonicalize();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
// Canonicalization introduced more opportunities for constant
// propagation.
ConstantPropagator::Optimize(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
// Optimistically convert loop phis that have a single non-smi input
// coming from the loop pre-header into smi-phis.
if (FLAG_loop_invariant_code_motion) {
LICM licm(flow_graph);
licm.OptimisticallySpecializeSmiPhis();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
// Propagate types and eliminate even more type tests.
// Recompute types after constant propagation to infer more precise
// types for uses that were previously reached by now eliminated phis.
FlowGraphTypePropagator::Propagate(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
{
TimelineDurationScope tds2(thread(),
compiler_timeline,
"SelectRepresentations");
// Where beneficial convert Smi operations into Int32 operations.
// Only meanigful for 32bit platforms right now.
optimizer.WidenSmiToInt32();
// Unbox doubles. Performed after constant propagation to minimize
// interference from phis merging double values and tagged
// values coming from dead paths.
optimizer.SelectRepresentations();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
{
TimelineDurationScope tds2(thread(),
compiler_timeline,
"CommonSubexpressionElinination");
if (FLAG_common_subexpression_elimination ||
FLAG_loop_invariant_code_motion) {
flow_graph->ComputeBlockEffects();
}
if (FLAG_common_subexpression_elimination) {
if (DominatorBasedCSE::Optimize(flow_graph)) {
DEBUG_ASSERT(flow_graph->VerifyUseLists());
optimizer.Canonicalize();
// Do another round of CSE to take secondary effects into account:
// e.g. when eliminating dependent loads (a.x[0] + a.x[0])
// TODO(fschneider): Change to a one-pass optimization pass.
if (DominatorBasedCSE::Optimize(flow_graph)) {
optimizer.Canonicalize();
}
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
}
// Run loop-invariant code motion right after load elimination since
// it depends on the numbering of loads from the previous
// load-elimination.
if (FLAG_loop_invariant_code_motion) {
LICM licm(flow_graph);
licm.Optimize();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
flow_graph->RemoveRedefinitions();
}
// Optimize (a << b) & c patterns, merge operations.
// Run after CSE in order to have more opportunity to merge
// instructions that have same inputs.
optimizer.TryOptimizePatterns();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
{
TimelineDurationScope tds2(thread(),
compiler_timeline,
"DeadStoreElimination");
DeadStoreElimination::Optimize(flow_graph);
}
if (FLAG_range_analysis) {
TimelineDurationScope tds2(thread(),
compiler_timeline,
"RangeAnalysis");
// Propagate types after store-load-forwarding. Some phis may have
// become smi phis that can be processed by range analysis.
FlowGraphTypePropagator::Propagate(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
// We have to perform range analysis after LICM because it
// optimistically moves CheckSmi through phis into loop preheaders
// making some phis smi.
optimizer.InferIntRanges();
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
if (FLAG_constant_propagation) {
TimelineDurationScope tds2(thread(),
compiler_timeline,
"ConstantPropagator::OptimizeBranches");
// Constant propagation can use information from range analysis to
// find unreachable branch targets and eliminate branches that have
// the same true- and false-target.
ConstantPropagator::OptimizeBranches(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
// Recompute types after code movement was done to ensure correct
// reaching types for hoisted values.
FlowGraphTypePropagator::Propagate(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
{
TimelineDurationScope tds2(thread(),
compiler_timeline,
"TryCatchAnalyzer::Optimize");
// Optimize try-blocks.
TryCatchAnalyzer::Optimize(flow_graph);
}
// Detach environments from the instructions that can't deoptimize.
// Do it before we attempt to perform allocation sinking to minimize
// amount of materializations it has to perform.
optimizer.EliminateEnvironments();
{
TimelineDurationScope tds2(thread(),
compiler_timeline,
"EliminateDeadPhis");
DeadCodeElimination::EliminateDeadPhis(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
}
if (optimizer.Canonicalize()) {
optimizer.Canonicalize();
}
// Attempt to sink allocations of temporary non-escaping objects to
// the deoptimization path.
AllocationSinking* sinking = NULL;
if (FLAG_allocation_sinking &&
(flow_graph->graph_entry()->SuccessorCount() == 1)) {
TimelineDurationScope tds2(thread(),
compiler_timeline,
"AllocationSinking::Optimize");
// TODO(fschneider): Support allocation sinking with try-catch.
sinking = new AllocationSinking(flow_graph);
sinking->Optimize();
}
DEBUG_ASSERT(flow_graph->VerifyUseLists());
DeadCodeElimination::EliminateDeadPhis(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
FlowGraphTypePropagator::Propagate(flow_graph);
DEBUG_ASSERT(flow_graph->VerifyUseLists());
{
TimelineDurationScope tds2(thread(),
compiler_timeline,
"SelectRepresentations");
// Ensure that all phis inserted by optimization passes have
// consistent representations.
optimizer.SelectRepresentations();
}
if (optimizer.Canonicalize()) {
// To fully remove redundant boxing (e.g. BoxDouble used only in
// environments and UnboxDouble instructions) instruction we
// first need to replace all their uses and then fold them away.
// For now we just repeat Canonicalize twice to do that.
// TODO(vegorov): implement a separate representation folding pass.
optimizer.Canonicalize();
}
DEBUG_ASSERT(flow_graph->VerifyUseLists());
if (sinking != NULL) {
TimelineDurationScope tds2(
thread(),
compiler_timeline,
"AllocationSinking::DetachMaterializations");
// Remove all MaterializeObject instructions inserted by allocation
// sinking from the flow graph and let them float on the side
// referenced only from environments. Register allocator will consider
// them as part of a deoptimization environment.
sinking->DetachMaterializations();
}
// Compute and store graph informations (call & instruction counts)
// to be later used by the inliner.
FlowGraphInliner::CollectGraphInfo(flow_graph, true);
{
TimelineDurationScope tds2(thread(),
compiler_timeline,
"AllocateRegisters");
// Perform register allocation on the SSA graph.
FlowGraphAllocator allocator(*flow_graph);
allocator.AllocateRegisters();
}
if (print_flow_graph) {
FlowGraphPrinter::PrintGraph("After Optimizations", flow_graph);
}
}
ASSERT(inline_id_to_function.length() == caller_inline_id.length());
Assembler assembler(use_far_branches);
FlowGraphCompiler graph_compiler(&assembler, flow_graph,
*parsed_function(), optimized(),
inline_id_to_function,
caller_inline_id);
{
CSTAT_TIMER_SCOPE(thread(), graphcompiler_timer);
TimelineDurationScope tds(thread(),
compiler_timeline,
"CompileGraph");
graph_compiler.CompileGraph();
pipeline->FinalizeCompilation();
}
{
TimelineDurationScope tds(thread(),
compiler_timeline,
"FinalizeCompilation");
ASSERT(thread()->IsMutatorThread());
FinalizeCompilation(&assembler, &graph_compiler, flow_graph);
}
// Mark that this isolate now has compiled code.
isolate()->set_has_compiled_code(true);
// Exit the loop and the function with the correct result value.
is_compiled = true;
done = true;
} else {
// We bailed out or we encountered an error.
const Error& error = Error::Handle(thread()->sticky_error());
if (error.raw() == Object::branch_offset_error().raw()) {
// Compilation failed due to an out of range branch offset in the
// assembler. We try again (done = false) with far branches enabled.
done = false;
ASSERT(!use_far_branches);
use_far_branches = true;
} else if (error.raw() == Object::speculative_inlining_error().raw()) {
// The return value of setjmp is the deopt id of the check instruction
// that caused the bailout.
done = false;
#if defined(DEBUG)
ASSERT(use_speculative_inlining);
for (intptr_t i = 0; i < inlining_black_list.length(); ++i) {
ASSERT(inlining_black_list[i] != val);
}
#endif
inlining_black_list.Add(val);
const intptr_t max_attempts = FLAG_max_speculative_inlining_attempts;
if (inlining_black_list.length() >= max_attempts) {
use_speculative_inlining = false;
if (FLAG_trace_compiler || FLAG_trace_optimizing_compiler) {
THR_Print("Disabled speculative inlining after %" Pd " attempts.\n",
inlining_black_list.length());
}
}
} else {
// If the error isn't due to an out of range branch offset, we don't
// try again (done = true), and indicate that we did not finish
// compiling (is_compiled = false).
if (FLAG_trace_bailout) {
THR_Print("%s\n", error.ToErrorCString());
}
done = true;
}
// Clear the error if it was not a real error, but just a bailout.
if (error.IsLanguageError() &&
(LanguageError::Cast(error).kind() == Report::kBailout)) {
thread()->clear_sticky_error();
}
is_compiled = false;
}
// Reset global isolate state.
thread()->set_deopt_id(prev_deopt_id);
}
return is_compiled;
}
static RawError* PrecompileFunctionHelper(CompilationPipeline* pipeline,
const Function& function,
bool optimized) {
// Check that we optimize, except if the function is not optimizable.
ASSERT(FLAG_precompilation);
ASSERT(!function.IsOptimizable() || optimized);
ASSERT(!function.HasCode());
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {
Thread* const thread = Thread::Current();
StackZone stack_zone(thread);
Zone* const zone = stack_zone.GetZone();
const bool trace_compiler =
FLAG_trace_compiler ||
(FLAG_trace_optimizing_compiler && optimized);
Timer per_compile_timer(trace_compiler, "Compilation time");
per_compile_timer.Start();
ParsedFunction* parsed_function = new(zone) ParsedFunction(
thread, Function::ZoneHandle(zone, function.raw()));
if (trace_compiler) {
THR_Print(
"Precompiling %sfunction: '%s' @ token %" Pd ", size %" Pd "\n",
(optimized ? "optimized " : ""),
function.ToFullyQualifiedCString(),
function.token_pos().Pos(),
(function.end_token_pos().Pos() - function.token_pos().Pos()));
}
INC_STAT(thread, num_functions_compiled, 1);
if (optimized) {
INC_STAT(thread, num_functions_optimized, 1);
}
{
HANDLESCOPE(thread);
const int64_t num_tokens_before = STAT_VALUE(thread, num_tokens_consumed);
pipeline->ParseFunction(parsed_function);
const int64_t num_tokens_after = STAT_VALUE(thread, num_tokens_consumed);
INC_STAT(thread,
num_func_tokens_compiled,
num_tokens_after - num_tokens_before);
}
PrecompileParsedFunctionHelper helper(parsed_function, optimized);
const bool success = helper.Compile(pipeline);
if (!success) {
// Encountered error.
Error& error = Error::Handle();
// We got an error during compilation.
error = thread->sticky_error();
thread->clear_sticky_error();
ASSERT(error.IsLanguageError() &&
LanguageError::Cast(error).kind() != Report::kBailout);
return error.raw();
}
per_compile_timer.Stop();
if (trace_compiler && success) {
THR_Print("--> '%s' entry: %#" Px " size: %" Pd " time: %" Pd64 " us\n",
function.ToFullyQualifiedCString(),
Code::Handle(function.CurrentCode()).EntryPoint(),
Code::Handle(function.CurrentCode()).Size(),
per_compile_timer.TotalElapsedTime());
}
if (FLAG_disassemble && FlowGraphPrinter::ShouldPrint(function)) {
Disassembler::DisassembleCode(function, optimized);
} else if (FLAG_disassemble_optimized &&
optimized &&
FlowGraphPrinter::ShouldPrint(function)) {
// TODO(fschneider): Print unoptimized code along with the optimized code.
THR_Print("*** BEGIN CODE\n");
Disassembler::DisassembleCode(function, true);
THR_Print("*** END CODE\n");
}
return Error::null();
} else {
Thread* const thread = Thread::Current();
StackZone stack_zone(thread);
Error& error = Error::Handle();
// We got an error during compilation.
error = thread->sticky_error();
thread->clear_sticky_error();
// Precompilation may encounter compile-time errors.
// Do not attempt to optimize functions that can cause errors.
function.set_is_optimizable(false);
return error.raw();
}
UNREACHABLE();
return Error::null();
}
RawError* Precompiler::CompileFunction(Thread* thread,
const Function& function) {
VMTagScope tagScope(thread, VMTag::kCompileUnoptimizedTagId);
TIMELINE_FUNCTION_COMPILATION_DURATION(thread, "Function", function);
CompilationPipeline* pipeline =
CompilationPipeline::New(thread->zone(), function);
ASSERT(FLAG_precompilation);
const bool optimized = function.IsOptimizable(); // False for natives.
return PrecompileFunctionHelper(pipeline, function, optimized);
}
#endif // DART_PRECOMPILER
} // namespace dart

View file

@ -18,6 +18,7 @@ class Field;
class Function;
class GrowableObjectArray;
class RawError;
class SequenceNode;
class String;
class SymbolKeyValueTrait {
@ -181,9 +182,17 @@ class Precompiler : public ValueObject {
const String& fname,
Object* function);
static RawError* CompileFunction(Thread* thread, const Function& function);
static RawObject* EvaluateStaticInitializer(const Field& field);
static RawObject* ExecuteOnce(SequenceNode* fragment);
private:
Precompiler(Thread* thread, bool reset_fields);
static RawFunction* CompileStaticInitializer(const Field& field);
void DoCompileAll(Dart_QualifiedFunctionName embedder_entry_points[]);
void ClearAllCode();
void AddRoots(Dart_QualifiedFunctionName embedder_entry_points[]);