[VM] Bare instructions - Part 4: Add --use-bare-instructions flag to AOT compiler & runtime

This is the final CL which adds a new --use-bare-instructions flag to
the VM.

If this flag is set during AOT compilation, we will:

  * Build one global object pool (abbr: GOP) which all code objects
    share. This gop will be stored in the object store.  The PP register
    is populated in the enter dart stub and it is restored when
    returning from native calls.

  * Gets rid of the CODE_REG/PP slots from the dart frames. Instead the
    compiled code uses the global object pool, which is always in PP.

  * Starts emitting pc-relative calls for calls between two dart
    functions or when invoking a stub.
    Limitation: We only emit pc-relative calls between two code objects
    in the same isolate (this is because the image writer is writing
    instruction objects for vm-isolate/main-isolate seperately)

  * We do compile-time relocation of those static calls after the
    precompiler has finished its work, but before writing the snapshot.
    This patches all the instruction objects with pc-relative calls to
    have the right .text distance.

  * We emit a sorted list of code objects in ObjectStore::reverse_code_table,
    which will be used by the AOT runtime to go back from PC to Code
    objects (where all metadata, e.g. stack maps, catch entry moves, pc
    descriptors are available).

Issue https://github.com/dart-lang/sdk/issues/33274

Change-Id: I6c5dd2b1571e3a889b27e804a24c2986c71e03b6
Reviewed-on: https://dart-review.googlesource.com/c/85769
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
This commit is contained in:
Martin Kustermann 2018-12-14 16:03:04 +00:00 committed by commit-bot@chromium.org
parent 9ec260fd8e
commit f205292227
50 changed files with 882 additions and 229 deletions

View file

@ -562,6 +562,7 @@ typedef struct {
bool use_osr;
bool obfuscate;
Dart_QualifiedFunctionName* entry_points;
bool use_bare_instructions;
bool load_vmservice_library;
bool unsafe_trust_strong_mode_types;
} Dart_IsolateFlags;

View file

@ -2046,7 +2046,7 @@ void ClassFinalizer::RehashTypes() {
object_store->set_canonical_type_arguments(typeargs_table.Release());
}
void ClassFinalizer::ClearAllCode() {
void ClassFinalizer::ClearAllCode(bool including_nonchanging_cids) {
class ClearCodeFunctionVisitor : public FunctionVisitor {
void Visit(const Function& function) {
function.ClearCode();
@ -2057,14 +2057,34 @@ void ClassFinalizer::ClearAllCode() {
ProgramVisitor::VisitFunctions(&function_visitor);
class ClearCodeClassVisitor : public ClassVisitor {
public:
explicit ClearCodeClassVisitor(bool force) : force_(force) {}
void Visit(const Class& cls) {
if (cls.id() >= kNumPredefinedCids) {
if (force_ || cls.id() >= kNumPredefinedCids) {
cls.DisableAllocationStub();
}
}
private:
bool force_;
};
ClearCodeClassVisitor class_visitor;
ClearCodeClassVisitor class_visitor(including_nonchanging_cids);
ProgramVisitor::VisitClasses(&class_visitor);
// Apart from normal function code and allocation stubs we have two global
// code objects to clear.
if (including_nonchanging_cids) {
auto thread = Thread::Current();
auto object_store = thread->isolate()->object_store();
auto& null_code = Code::Handle(thread->zone());
object_store->set_build_method_extractor_code(null_code);
auto& miss_function = Function::Handle(
thread->zone(), object_store->megamorphic_miss_function());
miss_function.ClearCode();
object_store->SetMegamorphicMissHandler(null_code, miss_function);
}
}
} // namespace dart

View file

@ -43,7 +43,7 @@ class ClassFinalizer : public AllStatic {
static void SortClasses();
static void RemapClassIds(intptr_t* old_to_new_cid);
static void RehashTypes();
static void ClearAllCode();
static void ClearAllCode(bool including_nonchanging_cids = false);
// Return whether processing pending classes (ObjectStore::pending_classes_)
// failed. The function returns true if the processing was successful.

View file

@ -1446,13 +1446,41 @@ class CodeDeserializationCluster : public DeserializationCluster {
~CodeDeserializationCluster() {}
void ReadAlloc(Deserializer* d) {
const bool is_vm_object = d->isolate() == Dart::vm_isolate();
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
for (intptr_t i = 0; i < count; i++) {
d->AssignRef(AllocateUninitialized(old_space, Code::InstanceSize(0)));
// Build an array of code objects representing the order in which the
// [Code]'s instructions will be located in memory.
const bool build_code_order =
FLAG_precompiled_mode && FLAG_use_bare_instructions;
RawArray* code_order = nullptr;
const intptr_t code_order_length = d->code_order_length();
if (build_code_order) {
code_order = static_cast<RawArray*>(
AllocateUninitialized(old_space, Array::InstanceSize(count)));
Deserializer::InitializeHeader(code_order, kArrayCid,
Array::InstanceSize(count), is_vm_object,
/*is_canonical=*/false);
code_order->ptr()->type_arguments_ = TypeArguments::null();
code_order->ptr()->length_ = Smi::New(code_order_length);
}
for (intptr_t i = 0; i < count; i++) {
auto code = AllocateUninitialized(old_space, Code::InstanceSize(0));
d->AssignRef(code);
if (code_order != nullptr && i < code_order_length) {
code_order->ptr()->data()[i] = code;
}
}
if (code_order != nullptr) {
const auto& code_order_table = Array::Handle(code_order);
d->isolate()->object_store()->set_code_order_table(code_order_table);
}
stop_index_ = d->next_index();
}

View file

@ -24,6 +24,7 @@
#include "vm/compiler/compiler_pass.h"
#include "vm/compiler/compiler_state.h"
#include "vm/compiler/frontend/flow_graph_builder.h"
#include "vm/compiler/frontend/kernel_to_il.h"
#include "vm/compiler/jit/compiler.h"
#include "vm/dart_entry.h"
#include "vm/exceptions.h"
@ -58,6 +59,7 @@ namespace dart {
#define Z (zone())
DEFINE_FLAG(bool, print_unique_targets, false, "Print unique dynamic targets");
DEFINE_FLAG(bool, print_gop, false, "Print global object pool");
DEFINE_FLAG(bool, trace_precompiler, false, "Trace precompiler.");
DEFINE_FLAG(
int,
@ -84,6 +86,8 @@ DECLARE_FLAG(int, inlining_constant_arguments_max_size_threshold);
DECLARE_FLAG(int, inlining_constant_arguments_min_size_threshold);
DECLARE_FLAG(bool, print_instruction_stats);
Precompiler* Precompiler::singleton_ = nullptr;
#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_DBC) && \
!defined(TARGET_ARCH_IA32)
@ -173,7 +177,15 @@ Precompiler::Precompiler(Thread* thread)
types_to_retain_(),
consts_to_retain_(),
error_(Error::Handle()),
get_runtime_type_is_unique_(false) {}
get_runtime_type_is_unique_(false) {
ASSERT(Precompiler::singleton_ == NULL);
Precompiler::singleton_ = this;
}
Precompiler::~Precompiler() {
ASSERT(Precompiler::singleton_ == this);
Precompiler::singleton_ = NULL;
}
void Precompiler::DoCompileAll() {
ASSERT(I->compilation_allowed());
@ -182,8 +194,16 @@ void Precompiler::DoCompileAll() {
StackZone stack_zone(T);
zone_ = stack_zone.GetZone();
if (FLAG_use_bare_instructions) {
// Since we keep the object pool until the end of AOT compilation, it
// will hang on to its entries until the very end. Therefore we have
// to use handles which survive that long, so we use [zone_] here.
global_object_pool_wrapper_.InitializeWithZone(zone_);
}
{
HANDLESCOPE(T);
// Make sure class hierarchy is stable before compilation so that CHA
// can be used. Also ensures lookup of entry points won't miss functions
// because their class hasn't been finalized yet.
@ -202,10 +222,35 @@ void Precompiler::DoCompileAll() {
// Precompile constructors to compute information such as
// optimized instruction count (used in inlining heuristics).
ClassFinalizer::ClearAllCode();
ClassFinalizer::ClearAllCode(
/*including_nonchanging_cids=*/FLAG_use_bare_instructions);
PrecompileConstructors();
ClassFinalizer::ClearAllCode();
ClassFinalizer::ClearAllCode(
/*including_nonchanging_cids=*/FLAG_use_bare_instructions);
// All stubs have already been generated, all of them share the same pool.
// We use that pool to initialize our global object pool, to guarantee
// stubs as well as code compiled from here on will have the same pool.
if (FLAG_use_bare_instructions) {
// We use any stub here to get it's object pool (all stubs share the
// same object pool in bare instructions mode).
const Code& code = StubCode::InterpretCall();
const ObjectPool& stub_pool = ObjectPool::Handle(code.object_pool());
global_object_pool_wrapper()->Reset();
global_object_pool_wrapper()->InitializeFrom(stub_pool);
// We have two global code objects we need to re-generate with the new
// global object pool, namely the
// - megamorphic miss handler code and the
// - build method extractor code
MegamorphicCacheTable::ReInitMissHandlerCode(
isolate_, global_object_pool_wrapper());
I->object_store()->set_build_method_extractor_code(
Code::Handle(StubCode::GetBuildMethodExtractorStub(
global_object_pool_wrapper())));
}
CollectDynamicFunctionNames();
@ -221,6 +266,21 @@ void Precompiler::DoCompileAll() {
// [Type]-specialized stubs.
AttachOptimizedTypeTestingStub();
if (FLAG_use_bare_instructions) {
// Now we generate the actual object pool instance and attach it to the
// object store. The AOT runtime will use it from there in the enter
// dart code stub.
const auto& pool =
ObjectPool::Handle(global_object_pool_wrapper()->MakeObjectPool());
I->object_store()->set_global_object_pool(pool);
global_object_pool_wrapper()->Reset();
if (FLAG_print_gop) {
THR_Print("Global object pool:\n");
pool.DebugPrint();
}
}
I->set_compilation_allowed(false);
TraceForRetainedFunctions();
@ -433,6 +493,10 @@ void Precompiler::CollectCallbackFields() {
}
void Precompiler::ProcessFunction(const Function& function) {
const intptr_t gop_offset =
FLAG_use_bare_instructions ? global_object_pool_wrapper()->CurrentLength()
: 0;
if (!function.HasCode()) {
function_count_++;
@ -464,10 +528,10 @@ void Precompiler::ProcessFunction(const Function& function) {
}
ASSERT(function.HasCode());
AddCalleesOf(function);
AddCalleesOf(function, gop_offset);
}
void Precompiler::AddCalleesOf(const Function& function) {
void Precompiler::AddCalleesOf(const Function& function, intptr_t gop_offset) {
ASSERT(function.HasCode());
const Code& code = Code::Handle(Z, function.CurrentCode());
@ -494,12 +558,24 @@ void Precompiler::AddCalleesOf(const Function& function) {
FATAL("Callee scanning unimplemented for IA32");
#endif
const ObjectPool& pool = ObjectPool::Handle(Z, code.GetObjectPool());
String& selector = String::Handle(Z);
for (intptr_t i = 0; i < pool.Length(); i++) {
if (pool.TypeAt(i) == ObjectPool::kTaggedObject) {
entry = pool.ObjectAt(i);
AddCalleesOfHelper(entry, &selector, &cls);
if (FLAG_use_bare_instructions) {
for (intptr_t i = gop_offset;
i < global_object_pool_wrapper()->CurrentLength(); i++) {
const auto& wrapper_entry = global_object_pool_wrapper()->EntryAt(i);
if (wrapper_entry.type() == ObjectPool::kTaggedObject) {
const auto& entry = *wrapper_entry.obj_;
AddCalleesOfHelper(entry, &selector, &cls);
}
}
} else {
const auto& pool = ObjectPool::Handle(Z, code.object_pool());
auto& entry = Object::Handle(Z);
for (intptr_t i = 0; i < pool.Length(); i++) {
if (pool.TypeAt(i) == ObjectPool::kTaggedObject) {
entry = pool.ObjectAt(i);
AddCalleesOfHelper(entry, &selector, &cls);
}
}
}
@ -672,7 +748,7 @@ void Precompiler::AddTypeArguments(const TypeArguments& args) {
}
}
void Precompiler::AddConstObject(const Instance& instance) {
void Precompiler::AddConstObject(const class Instance& instance) {
// Types and type arguments require special handling.
if (instance.IsAbstractType()) {
AddType(AbstractType::Cast(instance));
@ -770,12 +846,16 @@ void Precompiler::AddField(const Field& field) {
if (FLAG_trace_precompiler) {
THR_Print("Precompiling initializer for %s\n", field.ToCString());
}
const intptr_t gop_offset =
FLAG_use_bare_instructions
? global_object_pool_wrapper()->CurrentLength()
: 0;
ASSERT(Dart::vm_snapshot_kind() != Snapshot::kFullAOT);
const Function& initializer =
Function::Handle(Z, CompileStaticInitializer(field));
ASSERT(!initializer.IsNull());
field.SetPrecompiledInitializer(initializer);
AddCalleesOf(initializer);
AddCalleesOf(initializer, gop_offset);
}
}
}
@ -792,7 +872,7 @@ RawFunction* Precompiler::CompileStaticInitializer(const Field& field) {
kernel::ParseStaticFieldInitializer(zone, field);
DartCompilationPipeline pipeline;
PrecompileParsedFunctionHelper helper(/* precompiler = */ NULL,
PrecompileParsedFunctionHelper helper(Precompiler::Instance(),
parsed_function,
/* optimized = */ true);
if (!helper.Compile(&pipeline)) {
@ -1527,12 +1607,12 @@ void Precompiler::DropScriptData() {
}
void Precompiler::TraceTypesFromRetainedClasses() {
Library& lib = Library::Handle(Z);
Class& cls = Class::Handle(Z);
Array& members = Array::Handle(Z);
Array& constants = Array::Handle(Z);
GrowableObjectArray& retained_constants = GrowableObjectArray::Handle(Z);
Instance& constant = Instance::Handle(Z);
auto& lib = Library::Handle(Z);
auto& cls = Class::Handle(Z);
auto& members = Array::Handle(Z);
auto& constants = Array::Handle(Z);
auto& retained_constants = GrowableObjectArray::Handle(Z);
auto& constant = Instance::Handle(Z);
for (intptr_t i = 0; i < libraries_.Length(); i++) {
lib ^= libraries_.At(i);
@ -1837,33 +1917,41 @@ void Precompiler::BindStaticCalls() {
code_ = function.CurrentCode();
table_ = code_.static_calls_target_table();
StaticCallsTable static_calls(table_);
bool only_call_via_code = true;
for (auto& view : static_calls) {
kind_and_offset_ = view.Get<Code::kSCallTableKindAndOffset>();
auto kind = Code::KindField::decode(kind_and_offset_.Value());
ASSERT(kind == Code::kCallViaCode);
auto pc_offset = Code::OffsetField::decode(kind_and_offset_.Value());
target_ = view.Get<Code::kSCallTableFunctionTarget>();
if (target_.IsNull()) {
target_ = view.Get<Code::kSCallTableCodeTarget>();
ASSERT(!Code::Cast(target_).IsFunctionCode());
// Allocation stub or AllocateContext or AllocateArray or ...
if (kind == Code::kCallViaCode) {
target_ = view.Get<Code::kSCallTableFunctionTarget>();
if (target_.IsNull()) {
target_ = view.Get<Code::kSCallTableCodeTarget>();
ASSERT(!Code::Cast(target_).IsFunctionCode());
// Allocation stub or AllocateContext or AllocateArray or ...
} else {
// Static calls initially call the CallStaticFunction stub because
// their target might not be compiled yet. After tree shaking, all
// static call targets are compiled.
// Cf. runtime entry PatchStaticCall called from CallStaticFunction
// stub.
auto& fun = Function::Cast(target_);
ASSERT(fun.HasCode());
target_code_ ^= fun.CurrentCode();
uword pc = pc_offset + code_.PayloadStart();
CodePatcher::PatchStaticCallAt(pc, code_, target_code_);
}
} else {
// Static calls initially call the CallStaticFunction stub because
// their target might not be compiled yet. After tree shaking, all
// static call targets are compiled.
// Cf. runtime entry PatchStaticCall called from CallStaticFunction
// stub.
const auto& fun = Function::Cast(target_);
ASSERT(fun.HasCode());
target_code_ ^= fun.CurrentCode();
uword pc = pc_offset + code_.PayloadStart();
CodePatcher::PatchStaticCallAt(pc, code_, target_code_);
ASSERT(kind == Code::kPcRelativeCall ||
kind == Code::kPcRelativeTailCall);
only_call_via_code = false;
}
}
// We won't patch static calls anymore, so drop the static call table to
// save space.
code_.set_static_calls_target_table(Object::empty_array());
if (only_call_via_code) {
code_.set_static_calls_target_table(Object::empty_array());
}
}
private:
@ -1889,6 +1977,7 @@ void Precompiler::BindStaticCalls() {
}
void Precompiler::SwitchICCalls() {
ASSERT(!I->compilation_allowed());
#if !defined(TARGET_ARCH_DBC)
// Now that all functions have been compiled, we can switch to an instance
// call sequence that loads the Code object and entry point directly from
@ -1978,17 +2067,23 @@ void Precompiler::SwitchICCalls() {
};
ICCallSwitcher switcher(Z);
SwitchICCallsVisitor visitor(&switcher, Z);
auto& gop = ObjectPool::Handle(I->object_store()->global_object_pool());
ASSERT(gop.IsNull() != FLAG_use_bare_instructions);
if (FLAG_use_bare_instructions) {
switcher.SwitchPool(gop);
} else {
SwitchICCallsVisitor visitor(&switcher, Z);
// We need both iterations to ensure we visit all the functions that might end
// up in the snapshot. The ProgramVisitor will miss closures from duplicated
// finally clauses, and not all functions are compiled through the
// tree-shaker's queue
ProgramVisitor::VisitFunctions(&visitor);
FunctionSet::Iterator it(enqueued_functions_.GetIterator());
for (const Function** current = it.Next(); current != NULL;
current = it.Next()) {
visitor.Visit(**current);
// We need both iterations to ensure we visit all the functions that might
// end up in the snapshot. The ProgramVisitor will miss closures from
// duplicated finally clauses, and not all functions are compiled through
// the tree-shaker's queue
ProgramVisitor::VisitFunctions(&visitor);
FunctionSet::Iterator it(enqueued_functions_.GetIterator());
for (const Function** current = it.Next(); current != NULL;
current = it.Next()) {
visitor.Visit(**current);
}
}
#endif
}
@ -2115,9 +2210,12 @@ void PrecompileParsedFunctionHelper::FinalizeCompilation(
Array::Handle(zone, graph_compiler->CreateDeoptInfo(assembler));
// Allocates instruction object. Since this occurs only at safepoint,
// there can be no concurrent access to the instruction page.
const Code& code = Code::Handle(Code::FinalizeCode(
function, graph_compiler, assembler, Code::PoolAttachment::kAttachPool,
optimized(), stats));
const auto pool_attachment = FLAG_use_bare_instructions
? Code::PoolAttachment::kNotAttachPool
: Code::PoolAttachment::kAttachPool;
const Code& code =
Code::Handle(Code::FinalizeCode(function, graph_compiler, assembler,
pool_attachment, optimized(), stats));
code.set_is_optimized(optimized());
code.set_owner(function);
if (!function.IsOptimizable()) {
@ -2244,8 +2342,14 @@ bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
ASSERT(pass_state.inline_id_to_function.length() ==
pass_state.caller_inline_id.length());
ASSERT(!FLAG_use_bare_instructions || precompiler_ != nullptr);
ObjectPoolWrapper object_pool;
Assembler assembler(&object_pool, use_far_branches);
ObjectPoolWrapper* active_object_pool_wrapper =
FLAG_use_bare_instructions
? precompiler_->global_object_pool_wrapper()
: &object_pool;
Assembler assembler(active_object_pool_wrapper, use_far_branches);
CodeStatistics* function_stats = NULL;
if (FLAG_print_instruction_stats) {

View file

@ -6,6 +6,7 @@
#define RUNTIME_VM_COMPILER_AOT_PRECOMPILER_H_
#include "vm/allocation.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/hash_map.h"
#include "vm/hash_table.h"
#include "vm/object.h"
@ -246,8 +247,18 @@ class Precompiler : public ValueObject {
return get_runtime_type_is_unique_;
}
ObjectPoolWrapper* global_object_pool_wrapper() {
ASSERT(FLAG_use_bare_instructions);
return &global_object_pool_wrapper_;
}
static Precompiler* Instance() { return singleton_; }
private:
static Precompiler* singleton_;
explicit Precompiler(Thread* thread);
~Precompiler();
void DoCompileAll();
void AddRoots();
@ -258,11 +269,11 @@ class Precompiler : public ValueObject {
void AddTypesOf(const Class& cls);
void AddTypesOf(const Function& function);
void AddTypeArguments(const TypeArguments& args);
void AddCalleesOf(const Function& function);
void AddCalleesOf(const Function& function, intptr_t gop_offset);
void AddCalleesOfHelper(const Object& entry,
String* temp_selector,
Class* temp_cls);
void AddConstObject(const Instance& instance);
void AddConstObject(const class Instance& instance);
void AddClosureCall(const Array& arguments_descriptor);
void AddField(const Field& field);
void AddFunction(const Function& function);
@ -320,6 +331,7 @@ class Precompiler : public ValueObject {
intptr_t dropped_type_count_;
intptr_t dropped_library_count_;
ObjectPoolWrapper global_object_pool_wrapper_;
GrowableObjectArray& libraries_;
const GrowableObjectArray& pending_functions_;
SymbolSet sent_selectors_;

View file

@ -250,6 +250,46 @@ intptr_t ObjIndexPair::Hashcode(Key key) {
// Unlikely.
return key.obj_->GetClassId();
}
void ObjectPoolWrapper::Reset() {
// Null out the handles we've accumulated.
for (intptr_t i = 0; i < object_pool_.length(); ++i) {
if (object_pool_[i].type() == ObjectPool::kTaggedObject) {
*const_cast<Object*>(object_pool_[i].obj_) = Object::null();
*const_cast<Object*>(object_pool_[i].equivalence_) = Object::null();
}
}
object_pool_.Clear();
object_pool_index_table_.Clear();
}
void ObjectPoolWrapper::InitializeFrom(const ObjectPool& other) {
ASSERT(object_pool_.length() == 0);
for (intptr_t i = 0; i < other.Length(); i++) {
auto type = other.TypeAt(i);
auto patchable = other.PatchableAt(i);
switch (type) {
case ObjectPool::kTaggedObject: {
ObjectPoolWrapperEntry entry(&Object::ZoneHandle(other.ObjectAt(i)),
patchable);
AddObject(entry);
break;
}
case ObjectPool::kImmediate:
case ObjectPool::kNativeFunction:
case ObjectPool::kNativeFunctionWrapper: {
ObjectPoolWrapperEntry entry(other.RawValueAt(i), type, patchable);
AddObject(entry);
break;
}
default:
UNREACHABLE();
}
}
ASSERT(CurrentLength() == other.Length());
}
intptr_t ObjectPoolWrapper::AddObject(const Object& obj,
ObjectPool::Patchability patchable) {
@ -267,6 +307,19 @@ intptr_t ObjectPoolWrapper::AddObject(ObjectPoolWrapperEntry entry) {
(entry.obj_->IsNotTemporaryScopedHandle() &&
(entry.equivalence_ == NULL ||
entry.equivalence_->IsNotTemporaryScopedHandle())));
if (entry.type() == ObjectPool::kTaggedObject) {
// If the owner of the object pool wrapper specified a specific zone we
// shoulld use we'll do so.
if (zone_ != NULL) {
entry.obj_ = &Object::ZoneHandle(zone_, entry.obj_->raw());
if (entry.equivalence_ != NULL) {
entry.equivalence_ =
&Object::ZoneHandle(zone_, entry.equivalence_->raw());
}
}
}
object_pool_.Add(entry);
if (entry.patchable() == ObjectPool::kNotPatchable) {
// The object isn't patchable. Record the index for fast lookup.

View file

@ -370,10 +370,40 @@ class ObjIndexPair {
class ObjectPoolWrapper : public ValueObject {
public:
ObjectPoolWrapper() : zone_(nullptr) {}
~ObjectPoolWrapper() {
if (zone_ != nullptr) {
Reset();
zone_ = nullptr;
}
}
// Clears all existing entries in this object pool builder.
//
// Note: Any code which has been compiled via this builder might use offsets
// into the pool which are not correct anymore.
void Reset();
// Initializes this object pool builder from [other].
//
// All entries from [other] will be populated, including their
// kind/patchability bits.
void InitializeFrom(const ObjectPool& other);
// Initialize this object pool builder with a [zone].
//
// Any objects added later on will be referenced using handles from [zone].
void InitializeWithZone(Zone* zone) {
ASSERT(object_pool_.length() == 0);
ASSERT(zone_ == nullptr && zone != nullptr);
zone_ = zone;
}
intptr_t AddObject(
const Object& obj,
ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
intptr_t AddImmediate(uword imm);
intptr_t FindObject(
const Object& obj,
ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
@ -386,6 +416,9 @@ class ObjectPoolWrapper : public ValueObject {
RawObjectPool* MakeObjectPool();
intptr_t CurrentLength() { return object_pool_.length(); }
ObjectPoolWrapperEntry& EntryAt(intptr_t i) { return object_pool_[i]; }
private:
intptr_t AddObject(ObjectPoolWrapperEntry entry);
intptr_t FindObject(ObjectPoolWrapperEntry entry);
@ -395,6 +428,11 @@ class ObjectPoolWrapper : public ValueObject {
// Hashmap for fast lookup in object pool.
DirectChainedHashMap<ObjIndexPair> object_pool_index_table_;
// The zone used for allocating the handles we keep in the map and array (or
// NULL, in which case allocations happen using the zone active at the point
// of insertion).
Zone* zone_;
};
enum RestorePP { kRestoreCallerPP, kKeepCalleePP };

View file

@ -24,6 +24,7 @@ namespace dart {
DECLARE_FLAG(bool, check_code_pointer);
DECLARE_FLAG(bool, inline_alloc);
DECLARE_FLAG(bool, precompiled_mode);
uint32_t Address::encoding3() const {
if (kind_ == Immediate) {
@ -3162,10 +3163,16 @@ void Assembler::EnterDartFrame(intptr_t frame_size) {
COMPILE_ASSERT(PP < CODE_REG);
COMPILE_ASSERT(CODE_REG < FP);
COMPILE_ASSERT(FP < LR);
EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0);
// Setup pool pointer for this dart function.
LoadPoolPointer();
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
EnterFrame((1 << PP) | (1 << CODE_REG) | (1 << FP) | (1 << LR), 0);
// Setup pool pointer for this dart function.
LoadPoolPointer();
} else {
EnterFrame((1 << FP) | (1 << LR), 0);
}
set_constant_pool_allowed(true);
// Reserve space for locals.
AddImmediate(SP, -frame_size);
@ -3186,8 +3193,10 @@ void Assembler::EnterOsrFrame(intptr_t extra_size) {
}
void Assembler::LeaveDartFrame() {
ldr(PP,
Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
ldr(PP,
Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
}
set_constant_pool_allowed(false);
// This will implicitly drop saved PP, PC marker due to restoring SP from FP
@ -3196,8 +3205,10 @@ void Assembler::LeaveDartFrame() {
}
void Assembler::LeaveDartFrameAndReturn() {
ldr(PP,
Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
ldr(PP,
Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
}
set_constant_pool_allowed(false);
// This will implicitly drop saved PP, PC marker due to restoring SP from FP

View file

@ -18,6 +18,7 @@ namespace dart {
DECLARE_FLAG(bool, check_code_pointer);
DECLARE_FLAG(bool, inline_alloc);
DECLARE_FLAG(bool, precompiled_mode);
DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches");
@ -1250,15 +1251,18 @@ void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
ASSERT(!constant_pool_allowed());
// Setup the frame.
EnterFrame(0);
TagAndPushPPAndPcMarker(); // Save PP and PC marker.
// Load the pool pointer.
if (new_pp == kNoRegister) {
LoadPoolPointer();
} else {
mov(PP, new_pp);
set_constant_pool_allowed(true);
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
TagAndPushPPAndPcMarker(); // Save PP and PC marker.
// Load the pool pointer.
if (new_pp == kNoRegister) {
LoadPoolPointer();
} else {
mov(PP, new_pp);
}
}
set_constant_pool_allowed(true);
// Reserve space.
if (frame_size > 0) {
@ -1283,13 +1287,15 @@ void Assembler::EnterOsrFrame(intptr_t extra_size, Register new_pp) {
}
void Assembler::LeaveDartFrame(RestorePP restore_pp) {
if (restore_pp == kRestoreCallerPP) {
set_constant_pool_allowed(false);
// Restore and untag PP.
LoadFromOffset(PP, FP,
compiler_frame_layout.saved_caller_pp_from_fp * kWordSize);
sub(PP, PP, Operand(kHeapObjectTag));
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
if (restore_pp == kRestoreCallerPP) {
// Restore and untag PP.
LoadFromOffset(PP, FP,
compiler_frame_layout.saved_caller_pp_from_fp * kWordSize);
sub(PP, PP, Operand(kHeapObjectTag));
}
}
set_constant_pool_allowed(false);
LeaveFrame();
}
@ -1325,7 +1331,8 @@ void Assembler::LeaveCallRuntimeFrame() {
const intptr_t kPushedRegistersSize =
kDartVolatileCpuRegCount * kWordSize +
kDartVolatileFpuRegCount * kWordSize +
2 * kWordSize; // PP and pc marker from EnterStubFrame.
(compiler_frame_layout.dart_fixed_frame_size - 2) *
kWordSize; // From EnterStubFrame (excluding PC / FP)
AddImmediate(SP, FP, -kPushedRegistersSize);
for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) {
const Register reg = static_cast<Register>(i);

View file

@ -21,6 +21,7 @@ namespace dart {
DECLARE_FLAG(bool, check_code_pointer);
DECLARE_FLAG(bool, inline_alloc);
DECLARE_FLAG(bool, precompiled_mode);
Assembler::Assembler(ObjectPoolWrapper* object_pool_wrapper,
bool use_far_branches)
@ -1531,7 +1532,9 @@ void Assembler::LeaveCallRuntimeFrame() {
const intptr_t kPushedRegistersSize =
kPushedCpuRegistersCount * kWordSize +
kPushedXmmRegistersCount * kFpuRegisterSize +
2 * kWordSize; // PP, pc marker from EnterStubFrame
(compiler_frame_layout.dart_fixed_frame_size - 2) *
kWordSize; // From EnterStubFrame (excluding PC / FP)
leaq(RSP, Address(RBP, -kPushedRegistersSize));
// TODO(vegorov): avoid saving FpuTMP, it is used only as scratch.
@ -1568,12 +1571,14 @@ void Assembler::LoadPoolPointer(Register pp) {
void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
ASSERT(!constant_pool_allowed());
EnterFrame(0);
pushq(CODE_REG);
pushq(PP);
if (new_pp == kNoRegister) {
LoadPoolPointer(PP);
} else {
movq(PP, new_pp);
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
pushq(CODE_REG);
pushq(PP);
if (new_pp == kNoRegister) {
LoadPoolPointer(PP);
} else {
movq(PP, new_pp);
}
}
set_constant_pool_allowed(true);
if (frame_size != 0) {
@ -1583,11 +1588,13 @@ void Assembler::EnterDartFrame(intptr_t frame_size, Register new_pp) {
void Assembler::LeaveDartFrame(RestorePP restore_pp) {
// Restore caller's PP register that was pushed in EnterDartFrame.
if (restore_pp == kRestoreCallerPP) {
movq(PP, Address(RBP, (compiler_frame_layout.saved_caller_pp_from_fp *
kWordSize)));
set_constant_pool_allowed(false);
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
if (restore_pp == kRestoreCallerPP) {
movq(PP, Address(RBP, (compiler_frame_layout.saved_caller_pp_from_fp *
kWordSize)));
}
}
set_constant_pool_allowed(false);
LeaveFrame();
}

View file

@ -329,19 +329,23 @@ void Disassembler::DisassembleCodeHelper(const char* function_fullname,
THR_Print("Static call target functions {\n");
const auto& table = Array::Handle(zone, code.static_calls_target_table());
auto& cls = Class::Handle(zone);
auto& kind_and_offset = Smi::Handle(zone);
auto& kind_type_and_offset = Smi::Handle(zone);
auto& function = Function::Handle(zone);
auto& code = Code::Handle(zone);
if (!table.IsNull()) {
StaticCallsTable static_calls(table);
for (auto& call : static_calls) {
kind_and_offset = call.Get<Code::kSCallTableKindAndOffset>();
kind_type_and_offset = call.Get<Code::kSCallTableKindAndOffset>();
function = call.Get<Code::kSCallTableFunctionTarget>();
code = call.Get<Code::kSCallTableCodeTarget>();
auto kind = Code::KindField::decode(kind_and_offset.Value());
auto offset = Code::OffsetField::decode(kind_and_offset.Value());
auto kind = Code::KindField::decode(kind_type_and_offset.Value());
auto offset = Code::OffsetField::decode(kind_type_and_offset.Value());
auto entry_point =
Code::EntryPointField::decode(kind_type_and_offset.Value());
const char* s_entry_point =
entry_point == Code::kUncheckedEntry ? " <unchecked-entry>" : "";
const char* skind = nullptr;
switch (kind) {
case Code::kPcRelativeCall:
@ -359,15 +363,17 @@ void Disassembler::DisassembleCodeHelper(const char* function_fullname,
if (function.IsNull()) {
cls ^= code.owner();
if (cls.IsNull()) {
THR_Print(" 0x%" Px ": %s, %p (%s)\n", start + offset,
code.QualifiedName(), code.raw(), skind);
THR_Print(" 0x%" Px ": %s, %p (%s)%s\n", start + offset,
code.QualifiedName(), code.raw(), skind, s_entry_point);
} else {
THR_Print(" 0x%" Px ": allocation stub for %s, %p (%s)\n",
start + offset, cls.ToCString(), code.raw(), skind);
THR_Print(" 0x%" Px ": allocation stub for %s, %p (%s)%s\n",
start + offset, cls.ToCString(), code.raw(), skind,
s_entry_point);
}
} else {
THR_Print(" 0x%" Px ": %s, %p (%s)\n", start + offset,
function.ToFullyQualifiedCString(), code.raw(), skind);
THR_Print(" 0x%" Px ": %s, %p (%s)%s\n", start + offset,
function.ToFullyQualifiedCString(), code.raw(), skind,
s_entry_point);
}
}
}
@ -389,6 +395,11 @@ void Disassembler::DisassembleCode(const Function& function,
DisassembleCodeHelper(function_fullname, code, optimized);
}
#else // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
void Disassembler::DisassembleCode(const Function& function,
const Code& code,
bool optimized) {}
#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
} // namespace dart

View file

@ -693,29 +693,40 @@ void FlowGraphCompiler::AddNullCheck(intptr_t pc_offset,
null_check_name_idx);
}
void FlowGraphCompiler::AddPcRelativeCallTarget(const Function& function) {
void FlowGraphCompiler::AddPcRelativeCallTarget(const Function& function,
Code::EntryKind entry_kind) {
ASSERT(function.IsZoneHandle());
static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
Code::kPcRelativeCall, assembler()->CodeSize(), &function, NULL));
const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
? Code::kUncheckedEntry
: Code::kDefaultEntry;
static_calls_target_table_.Add(
new (zone()) StaticCallsStruct(Code::kPcRelativeCall, entry_point,
assembler()->CodeSize(), &function, NULL));
}
void FlowGraphCompiler::AddPcRelativeCallStubTarget(const Code& stub_code) {
ASSERT(stub_code.IsZoneHandle() || stub_code.IsReadOnlyHandle());
ASSERT(!stub_code.IsNull());
static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
Code::kPcRelativeCall, assembler()->CodeSize(), NULL, &stub_code));
Code::kPcRelativeCall, Code::kDefaultEntry, assembler()->CodeSize(), NULL,
&stub_code));
}
void FlowGraphCompiler::AddStaticCallTarget(const Function& func) {
void FlowGraphCompiler::AddStaticCallTarget(const Function& func,
Code::EntryKind entry_kind) {
ASSERT(func.IsZoneHandle());
const auto entry_point = entry_kind == Code::EntryKind::kUnchecked
? Code::kUncheckedEntry
: Code::kDefaultEntry;
static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
Code::kCallViaCode, assembler()->CodeSize(), &func, NULL));
Code::kCallViaCode, entry_point, assembler()->CodeSize(), &func, NULL));
}
void FlowGraphCompiler::AddStubCallTarget(const Code& code) {
ASSERT(code.IsZoneHandle() || code.IsReadOnlyHandle());
static_calls_target_table_.Add(new (zone()) StaticCallsStruct(
Code::kCallViaCode, assembler()->CodeSize(), NULL, &code));
static_calls_target_table_.Add(
new (zone()) StaticCallsStruct(Code::kCallViaCode, Code::kDefaultEntry,
assembler()->CodeSize(), NULL, &code));
}
CompilerDeoptInfo* FlowGraphCompiler::AddDeoptIndexAtCall(intptr_t deopt_id) {
@ -1075,13 +1086,15 @@ void FlowGraphCompiler::FinalizeStaticCallTargetsTable(const Code& code) {
Array::Handle(zone(), Array::New(array_length, Heap::kOld));
StaticCallsTable entries(targets);
auto& kind_and_offset = Smi::Handle(zone());
auto& kind_type_and_offset = Smi::Handle(zone());
for (intptr_t i = 0; i < calls.length(); i++) {
auto entry = calls[i];
kind_and_offset = Smi::New(Code::KindField::encode(entry->call_kind) |
Code::OffsetField::encode(entry->offset));
kind_type_and_offset =
Smi::New(Code::KindField::encode(entry->call_kind) |
Code::EntryPointField::encode(entry->entry_point) |
Code::OffsetField::encode(entry->offset));
auto view = entries[i];
view.Set<Code::kSCallTableKindAndOffset>(kind_and_offset);
view.Set<Code::kSCallTableKindAndOffset>(kind_type_and_offset);
const Object* target = nullptr;
if (entry->function != nullptr) {
view.Set<Code::kSCallTableFunctionTarget>(*calls[i]->function);

View file

@ -773,9 +773,11 @@ class FlowGraphCompiler : public ValueObject {
void EmitFrameEntry();
void AddPcRelativeCallTarget(const Function& function);
void AddPcRelativeCallTarget(const Function& function,
Code::EntryKind entry_kind);
void AddPcRelativeCallStubTarget(const Code& stub_code);
void AddStaticCallTarget(const Function& function);
void AddStaticCallTarget(const Function& function,
Code::EntryKind entry_kind);
void GenerateDeferredCode();
@ -927,14 +929,17 @@ class FlowGraphCompiler : public ValueObject {
class StaticCallsStruct : public ZoneAllocated {
public:
Code::CallKind call_kind;
Code::CallEntryPoint entry_point;
const intptr_t offset;
const Function* function; // Can be NULL.
const Code* code; // Can be NULL.
StaticCallsStruct(Code::CallKind call_kind,
Code::CallEntryPoint entry_point,
intptr_t offset_arg,
const Function* function_arg,
const Code* code_arg)
: call_kind(call_kind),
entry_point(entry_point),
offset(offset_arg),
function(function_arg),
code(code_arg) {

View file

@ -787,19 +787,23 @@ void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
extracted_method, ObjectPool::Patchability::kNotPatchable);
// We use a custom pool register to preserve caller PP.
const Register kPoolReg = R0;
Register kPoolReg = R0;
// R1 = extracted function
// R4 = offset of type argument vector (or 0 if class is not generic)
__ LoadFieldFromOffset(kWord, kPoolReg, CODE_REG, Code::object_pool_offset());
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
kPoolReg = PP;
} else {
__ LoadFieldFromOffset(kWord, kPoolReg, CODE_REG,
Code::object_pool_offset());
}
__ LoadImmediate(R4, type_arguments_field_offset);
__ LoadFieldFromOffset(kWord, R1, kPoolReg,
ObjectPool::element_offset(function_index));
__ LoadFieldFromOffset(kWord, CODE_REG, kPoolReg,
ObjectPool::element_offset(stub_index));
__ LoadFieldFromOffset(kWord, R3, CODE_REG,
Code::entry_point_offset(Code::EntryKind::kUnchecked));
__ bx(R3);
__ Branch(FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kUnchecked)));
}
void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
@ -833,9 +837,11 @@ void FlowGraphCompiler::EmitFrameEntry() {
(!is_optimizing() || may_reoptimize())) {
__ Comment("Invocation Count Check");
const Register function_reg = R8;
// The pool pointer is not setup before entering the Dart frame.
// Temporarily setup pool pointer for this dart function.
__ LoadPoolPointer(new_pp);
if (!FLAG_precompiled_mode || !FLAG_use_bare_instructions) {
// The pool pointer is not setup before entering the Dart frame.
// Temporarily setup pool pointer for this dart function.
__ LoadPoolPointer(new_pp);
}
// Load function object from object pool.
__ LoadFunctionFromCalleePool(function_reg, function, new_pp);
@ -918,9 +924,16 @@ void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
LocationSummary* locs) {
__ BranchLink(stub);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
AddStubCallTarget(stub);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions && !stub.InVMHeap()) {
AddPcRelativeCallStubTarget(stub);
__ GenerateUnRelocatedPcRelativeCall();
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
} else {
ASSERT(!stub.IsNull());
__ BranchLink(stub);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
AddStubCallTarget(stub);
}
}
void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
@ -947,14 +960,21 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
// Call sites to the same target can share object pool entries. These
// call sites are never patched for breakpoints: the function is deoptimized
// and the unoptimized code with IC calls for static calls is patched instead.
ASSERT(is_optimizing());
const auto& stub = StubCode::CallStaticFunction();
__ BranchLinkWithEquivalence(stub, target, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
AddStaticCallTarget(target);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
AddPcRelativeCallTarget(target, entry_kind);
__ GenerateUnRelocatedPcRelativeCall();
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
} else {
ASSERT(is_optimizing());
// Call sites to the same target can share object pool entries. These
// call sites are never patched for breakpoints: the function is deoptimized
// and the unoptimized code with IC calls for static calls is patched
// instead.
const auto& stub = StubCode::CallStaticFunction();
__ BranchLinkWithEquivalence(stub, target, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
AddStaticCallTarget(target, entry_kind);
}
}
void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,

View file

@ -766,17 +766,23 @@ void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
extracted_method, ObjectPool::Patchability::kNotPatchable);
// We use a custom pool register to preserve caller PP.
const Register kPoolReg = R0;
Register kPoolReg = R0;
// R1 = extracted function
// R4 = offset of type argument vector (or 0 if class is not generic)
__ ldr(kPoolReg, FieldAddress(CODE_REG, Code::object_pool_offset()));
__ LoadFieldFromOffset(kPoolReg, CODE_REG, Code::object_pool_offset());
intptr_t pp_offset = 0;
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
// PP is not tagged on arm64.
kPoolReg = PP;
pp_offset = kHeapObjectTag;
} else {
__ LoadFieldFromOffset(kPoolReg, CODE_REG, Code::object_pool_offset());
}
__ LoadImmediate(R4, type_arguments_field_offset);
__ LoadFieldFromOffset(R1, kPoolReg,
ObjectPool::element_offset(function_index));
__ LoadFieldFromOffset(
R1, kPoolReg, ObjectPool::element_offset(function_index) + pp_offset);
__ LoadFieldFromOffset(CODE_REG, kPoolReg,
ObjectPool::element_offset(stub_index));
ObjectPool::element_offset(stub_index) + pp_offset);
__ LoadFieldFromOffset(R0, CODE_REG,
Code::entry_point_offset(Code::EntryKind::kUnchecked));
__ br(R0);
@ -813,9 +819,11 @@ void FlowGraphCompiler::EmitFrameEntry() {
__ Comment("Invocation Count Check");
const Register function_reg = R6;
new_pp = R13;
// The pool pointer is not setup before entering the Dart frame.
// Temporarily setup pool pointer for this dart function.
__ LoadPoolPointer(new_pp);
if (!FLAG_precompiled_mode || !FLAG_use_bare_instructions) {
// The pool pointer is not setup before entering the Dart frame.
// Temporarily setup pool pointer for this dart function.
__ LoadPoolPointer(new_pp);
}
// Load function object using the callee's pool pointer.
__ LoadFunctionFromCalleePool(function_reg, function, new_pp);
@ -912,9 +920,16 @@ void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
LocationSummary* locs) {
__ BranchLink(stub);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
AddStubCallTarget(stub);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions && !stub.InVMHeap()) {
AddPcRelativeCallStubTarget(stub);
__ GenerateUnRelocatedPcRelativeCall();
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
} else {
ASSERT(!stub.IsNull());
__ BranchLink(stub);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
AddStubCallTarget(stub);
}
}
void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
@ -943,14 +958,21 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
const Function& target,
Code::EntryKind entry_kind) {
// TODO(sjindel/entrypoints): Support multiple entrypoints on ARM64.
// Call sites to the same target can share object pool entries. These
// call sites are never patched for breakpoints: the function is deoptimized
// and the unoptimized code with IC calls for static calls is patched instead.
ASSERT(is_optimizing());
const Code& stub = StubCode::CallStaticFunction();
__ BranchLinkWithEquivalence(stub, target);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
AddStaticCallTarget(target);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
AddPcRelativeCallTarget(target, entry_kind);
__ GenerateUnRelocatedPcRelativeCall();
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
} else {
// Call sites to the same target can share object pool entries. These
// call sites are never patched for breakpoints: the function is deoptimized
// and the unoptimized code with IC calls for static calls is patched
// instead.
ASSERT(is_optimizing());
const auto& stub = StubCode::CallStaticFunction();
__ BranchLinkWithEquivalence(stub, target);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
AddStaticCallTarget(target, entry_kind);
}
}
void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,

View file

@ -860,7 +860,7 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
const auto& stub = StubCode::CallStaticFunction();
__ Call(stub, true /* movable_target */);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
AddStaticCallTarget(target);
AddStaticCallTarget(target, entry_kind);
}
void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,

View file

@ -777,6 +777,7 @@ void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
const Code& build_method_extractor = Code::ZoneHandle(
isolate()->object_store()->build_method_extractor_code());
ASSERT(!build_method_extractor.IsNull());
const intptr_t stub_index = __ object_pool_wrapper().AddObject(
build_method_extractor, ObjectPool::Patchability::kNotPatchable);
@ -784,11 +785,15 @@ void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
extracted_method, ObjectPool::Patchability::kNotPatchable);
// We use a custom pool register to preserve caller PP.
const Register kPoolReg = RAX;
Register kPoolReg = RAX;
// RBX = extracted function
// RDX = offset of type argument vector (or 0 if class is not generic)
__ movq(kPoolReg, FieldAddress(CODE_REG, Code::object_pool_offset()));
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
kPoolReg = PP;
} else {
__ movq(kPoolReg, FieldAddress(CODE_REG, Code::object_pool_offset()));
}
__ movq(RDX, Immediate(type_arguments_field_offset));
__ movq(RBX,
FieldAddress(kPoolReg, ObjectPool::element_offset(function_index)));
@ -830,7 +835,9 @@ void FlowGraphCompiler::EmitFrameEntry() {
__ EnterOsrFrame(extra_slots * kWordSize);
} else {
const Register new_pp = R13;
__ LoadPoolPointer(new_pp);
if (!FLAG_precompiled_mode || !FLAG_use_bare_instructions) {
__ LoadPoolPointer(new_pp);
}
const Function& function = parsed_function().function();
if (CanOptimizeFunction() && function.IsOptimizable() &&
@ -909,9 +916,16 @@ void FlowGraphCompiler::GenerateCall(TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
LocationSummary* locs) {
__ Call(stub);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
AddStubCallTarget(stub);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions && !stub.InVMHeap()) {
AddPcRelativeCallStubTarget(stub);
__ GenerateUnRelocatedPcRelativeCall();
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
} else {
ASSERT(!stub.IsNull());
__ Call(stub);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
AddStubCallTarget(stub);
}
}
void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
@ -938,14 +952,21 @@ void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
// Call sites to the same target can share object pool entries. These
// call sites are never patched for breakpoints: the function is deoptimized
// and the unoptimized code with IC calls for static calls is patched instead.
ASSERT(is_optimizing());
const auto& stub_entry = StubCode::CallStaticFunction();
__ CallWithEquivalence(stub_entry, target, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
AddStaticCallTarget(target);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
AddPcRelativeCallTarget(target, entry_kind);
__ GenerateUnRelocatedPcRelativeCall();
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
} else {
// Call sites to the same target can share object pool entries. These
// call sites are never patched for breakpoints: the function is deoptimized
// and the unoptimized code with IC calls for static calls is patched
// instead.
const auto& stub_entry = StubCode::CallStaticFunction();
__ CallWithEquivalence(stub_entry, target, entry_kind);
EmitCallsiteMetadata(token_pos, deopt_id, kind, locs);
AddStaticCallTarget(target, entry_kind);
}
}
void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,

View file

@ -111,7 +111,8 @@ void TranslationHelper::SetMetadataMappings(
}
void TranslationHelper::SetConstants(const Array& constants) {
ASSERT(constants_.IsNull());
ASSERT(constants_.IsNull() ||
(constants.IsNull() || constants.Length() == 0));
constants_ = constants.raw();
}

View file

@ -52,13 +52,15 @@ void CodeRelocator::Relocate(bool is_vm_isolate) {
GrowableArray<RawCode*> callers;
// The offset from the instruction at which the call happens.
GrowableArray<intptr_t> call_offsets;
// Type entry-point type we call in the destination.
GrowableArray<Code::CallEntryPoint> call_entry_points;
// The offset in the .text segment where the call happens.
GrowableArray<intptr_t> text_offsets;
// The target of the forward call.
GrowableArray<RawCode*> callees;
auto& targets = Array::Handle(zone);
auto& kind_and_offset = Smi::Handle(zone);
auto& kind_type_and_offset = Smi::Handle(zone);
auto& target = Object::Handle(zone);
auto& destination = Code::Handle(zone);
auto& instructions = Instructions::Handle(zone);
@ -86,9 +88,11 @@ void CodeRelocator::Relocate(bool is_vm_isolate) {
if (!targets.IsNull()) {
StaticCallsTable calls(targets);
for (auto call : calls) {
kind_and_offset = call.Get<Code::kSCallTableKindAndOffset>();
auto kind = Code::KindField::decode(kind_and_offset.Value());
auto offset = Code::OffsetField::decode(kind_and_offset.Value());
kind_type_and_offset = call.Get<Code::kSCallTableKindAndOffset>();
auto kind = Code::KindField::decode(kind_type_and_offset.Value());
auto offset = Code::OffsetField::decode(kind_type_and_offset.Value());
auto entry_point =
Code::EntryPointField::decode(kind_type_and_offset.Value());
if (kind == Code::kCallViaCode) {
continue;
@ -113,6 +117,7 @@ void CodeRelocator::Relocate(bool is_vm_isolate) {
callees.Add(destination.raw());
text_offsets.Add(start_of_call);
call_offsets.Add(offset);
call_entry_points.Add(entry_point);
}
}
}
@ -125,12 +130,16 @@ void CodeRelocator::Relocate(bool is_vm_isolate) {
callee = callees[i];
const intptr_t text_offset = text_offsets[i];
const intptr_t call_offset = call_offsets[i];
const bool use_unchecked_entry =
call_entry_points[i] == Code::kUncheckedEntry;
caller_instruction = caller.instructions();
destination_instruction = callee.instructions();
const intptr_t unchecked_offset = destination_instruction.HeaderSize() +
(destination_instruction.EntryPoint() -
destination_instruction.PayloadStart());
const uword entry_point = use_unchecked_entry ? callee.UncheckedEntryPoint()
: callee.EntryPoint();
const intptr_t unchecked_offset =
destination_instruction.HeaderSize() +
(entry_point - destination_instruction.PayloadStart());
auto map_entry = instructions_map.Lookup(destination_instruction.raw());
auto& dst = (*commands_)[map_entry->inst_nr];

View file

@ -27,6 +27,7 @@
#include "vm/object_store.h"
#include "vm/port.h"
#include "vm/profiler.h"
#include "vm/reverse_pc_lookup_cache.h"
#include "vm/service_isolate.h"
#include "vm/simulator.h"
#include "vm/snapshot.h"
@ -248,6 +249,9 @@ char* Dart::Init(const uint8_t* vm_isolate_snapshot,
// Must copy before leaving the zone.
return strdup(error.ToErrorCString());
}
ReversePcLookupCache::BuildAndAttachToIsolate(vm_isolate_);
Object::FinishInit(vm_isolate_);
#if !defined(PRODUCT)
if (tds.enabled()) {
@ -604,6 +608,9 @@ RawError* Dart::InitializeIsolate(const uint8_t* snapshot_data,
if (!error.IsNull()) {
return error.raw();
}
ReversePcLookupCache::BuildAndAttachToIsolate(I);
#if !defined(PRODUCT)
if (tds.enabled()) {
tds.SetNumArguments(2);
@ -630,17 +637,17 @@ RawError* Dart::InitializeIsolate(const uint8_t* snapshot_data,
#if defined(DART_PRECOMPILED_RUNTIME)
// AOT: The megamorphic miss function and code come from the snapshot.
ASSERT(I->object_store()->megamorphic_miss_code() != Code::null());
ASSERT(I->object_store()->build_method_extractor_code() != Code::null());
#else
// JIT: The megamorphic miss function and code come from the snapshot in JIT
// app snapshot, otherwise create them.
if (I->object_store()->megamorphic_miss_code() == Code::null()) {
MegamorphicCacheTable::InitMissHandler(I);
}
#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
if (I != Dart::vm_isolate()) {
I->object_store()->set_build_method_extractor_code(
Code::Handle(StubCode::GetBuildMethodExtractorStub()));
Code::Handle(StubCode::GetBuildMethodExtractorStub(nullptr)));
}
#endif
#endif // defined(DART_PRECOMPILED_RUNTIME)
@ -716,6 +723,10 @@ const char* Dart::FeaturesString(Isolate* isolate,
ADD_FLAG(asserts, enable_asserts, FLAG_enable_asserts);
// sync-async affects deopt_ids.
buffer.AddString(FLAG_sync_async ? " sync_async" : " no-sync_async");
if (kind == Snapshot::kFullAOT) {
ADD_FLAG(use_bare_instructions, use_bare_instructions,
FLAG_use_bare_instructions);
}
if (kind == Snapshot::kFullJIT) {
ADD_FLAG(use_field_guards, use_field_guards, FLAG_use_field_guards);
ADD_FLAG(use_osr, use_osr, FLAG_use_osr);

View file

@ -21,6 +21,7 @@
namespace dart {
DECLARE_FLAG(bool, enable_interpreter);
DECLARE_FLAG(bool, precompiled_mode);
// A cache of VM heap allocated arguments descriptors.
RawArray* ArgumentsDescriptor::cached_args_descriptors_[kCachedDescriptorCount];
@ -116,11 +117,18 @@ RawObject* DartEntry::InvokeFunction(const Function& function,
// We use a kernel2kernel constant evaluator in Dart 2.0 AOT compilation
// and never start the VM service isolate. So we should never end up invoking
// any dart code in the Dart 2.0 AOT compiler.
#if !defined(DART_PRECOMPILED_RUNTIME)
if (FLAG_precompiled_mode) {
#if !defined(DART_PRECOMPILED_RUNTIME)
UNREACHABLE();
}
#else
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
Thread* thread = Thread::Current();
thread->set_global_object_pool(
thread->isolate()->object_store()->global_object_pool());
ASSERT(thread->global_object_pool() != Object::null());
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
}
ASSERT(!function.IsNull());

View file

@ -248,7 +248,7 @@ class ExceptionHandlerFinder : public StackResource {
switch (move.source_kind()) {
case CatchEntryMove::SourceKind::kConstant:
if (pool == nullptr) {
pool = &ObjectPool::Handle(code_->object_pool());
pool = &ObjectPool::Handle(code_->GetObjectPool());
}
value = pool->ObjectAt(move.src_slot());
break;

View file

@ -157,6 +157,7 @@ constexpr bool kDartPrecompiledRuntime = false;
C(stress_async_stacks, false, false, bool, false, \
"Stress test async stack traces") \
P(sync_async, bool, true, "Start `async` functions synchronously.") \
P(use_bare_instructions, bool, false, "Enable bare instructions mode.") \
R(support_disassembler, false, bool, true, "Support the disassembler.") \
R(support_il_printer, false, bool, true, "Support the IL printer.") \
C(support_reload, false, false, bool, true, "Support isolate reload.") \

View file

@ -27,6 +27,8 @@ class BaseDirectChainedHashMap : public B {
BaseDirectChainedHashMap(const BaseDirectChainedHashMap& other);
intptr_t Length() const { return count_; }
virtual ~BaseDirectChainedHashMap() {
allocator_->template Free<HashMapListElement>(array_, array_size_);
allocator_->template Free<HashMapListElement>(lists_, lists_size_);

View file

@ -138,6 +138,8 @@ typedef FixedCache<intptr_t, CatchEntryMovesRefPtr, 16> CatchEntryMovesCache;
//
#define ISOLATE_FLAG_LIST(V) \
V(NONPRODUCT, asserts, EnableAsserts, enable_asserts, FLAG_enable_asserts) \
V(PRODUCT, use_bare_instructions, Bare, use_bare_instructions, \
FLAG_use_bare_instructions) \
V(NONPRODUCT, use_field_guards, UseFieldGuards, use_field_guards, \
FLAG_use_field_guards) \
V(NONPRODUCT, use_osr, UseOsr, use_osr, FLAG_use_osr) \
@ -882,6 +884,7 @@ class Isolate : public BaseIsolate {
V(EnableAsserts) \
V(ErrorOnBadType) \
V(ErrorOnBadOverride) \
V(Bare) \
V(UseFieldGuards) \
V(UseOsr) \
V(Obfuscate) \

View file

@ -86,6 +86,21 @@ void MegamorphicCacheTable::InitMissHandler(Isolate* isolate) {
Function::null());
isolate->object_store()->SetMegamorphicMissHandler(code, function);
}
void MegamorphicCacheTable::ReInitMissHandlerCode(Isolate* isolate,
ObjectPoolWrapper* wrapper) {
ASSERT(FLAG_precompiled_mode && FLAG_use_bare_instructions);
const Code& code = Code::Handle(StubCode::Generate(
"_stub_MegamorphicMiss", wrapper, StubCode::GenerateMegamorphicMissStub));
code.set_exception_handlers(Object::empty_exception_handlers());
auto object_store = isolate->object_store();
auto& function = Function::Handle(object_store->megamorphic_miss_function());
function.AttachCode(code);
object_store->SetMegamorphicMissHandler(code, function);
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
void MegamorphicCacheTable::PrintSizes(Isolate* isolate) {

View file

@ -13,6 +13,7 @@ class Array;
class Function;
class Isolate;
class ObjectPointerVisitor;
class ObjectPoolWrapper;
class RawArray;
class RawFunction;
class RawCode;
@ -25,6 +26,15 @@ class MegamorphicCacheTable : public AllStatic {
static RawFunction* miss_handler(Isolate* isolate);
NOT_IN_PRECOMPILED(static void InitMissHandler(Isolate* isolate));
// Re-initializes the megamorphic miss handler function in the object store.
//
// Normally we initialize the megamorphic miss handler during isolate startup.
// Though if we AOT compile with bare instructions support, we need to
// re-generate the handler to ensure it uses the common object pool.
NOT_IN_PRECOMPILED(
static void ReInitMissHandlerCode(Isolate* isolate,
ObjectPoolWrapper* wrapper));
static RawMegamorphicCache* Lookup(Isolate* isolate,
const String& name,
const Array& descriptor);

View file

@ -17,16 +17,16 @@ namespace dart {
// --- Message sending/receiving from native code ---
class IsolateSaver {
class IsolateLeaveScope {
public:
explicit IsolateSaver(Isolate* current_isolate)
explicit IsolateLeaveScope(Isolate* current_isolate)
: saved_isolate_(current_isolate) {
if (current_isolate != NULL) {
ASSERT(current_isolate == Isolate::Current());
Dart_ExitIsolate();
}
}
~IsolateSaver() {
~IsolateLeaveScope() {
if (saved_isolate_ != NULL) {
Dart_Isolate I = reinterpret_cast<Dart_Isolate>(saved_isolate_);
Dart_EnterIsolate(I);
@ -36,7 +36,7 @@ class IsolateSaver {
private:
Isolate* saved_isolate_;
DISALLOW_COPY_AND_ASSIGN(IsolateSaver);
DISALLOW_COPY_AND_ASSIGN(IsolateLeaveScope);
};
static bool PostCObjectHelper(Dart_Port port_id, Dart_CObject* message) {
@ -79,7 +79,7 @@ DART_EXPORT Dart_Port Dart_NewNativePort(const char* name,
return ILLEGAL_PORT;
}
// Start the native port without a current isolate.
IsolateSaver saver(Isolate::Current());
IsolateLeaveScope saver(Isolate::Current());
NativeMessageHandler* nmh = new NativeMessageHandler(name, handler);
Dart_Port port_id = PortMap::CreatePort(nmh);
@ -90,7 +90,7 @@ DART_EXPORT Dart_Port Dart_NewNativePort(const char* name,
DART_EXPORT bool Dart_CloseNativePort(Dart_Port native_port_id) {
// Close the native port without a current isolate.
IsolateSaver saver(Isolate::Current());
IsolateLeaveScope saver(Isolate::Current());
// TODO(turnidge): Check that the port is native before trying to close.
return PortMap::ClosePort(native_port_id);
@ -105,7 +105,7 @@ DART_EXPORT bool Dart_InvokeVMServiceMethod(uint8_t* request_json,
char** error) {
Isolate* isolate = Isolate::Current();
ASSERT(isolate == nullptr || !isolate->is_service_isolate());
IsolateSaver saver(isolate);
IsolateLeaveScope saver(isolate);
// We only allow one isolate reload at a time. If this turns out to be on the
// critical path, we can change it to have a global datastructure which is

View file

@ -83,6 +83,7 @@ DECLARE_FLAG(bool, trace_deoptimization);
DECLARE_FLAG(bool, trace_deoptimization_verbose);
DECLARE_FLAG(bool, trace_reload);
DECLARE_FLAG(bool, write_protect_code);
DECLARE_FLAG(bool, precompiled_mode);
static const char* const kGetterPrefix = "get:";
static const intptr_t kGetterPrefixLength = strlen(kGetterPrefix);
@ -14194,6 +14195,15 @@ void Code::set_static_calls_target_table(const Array& value) const {
#endif // DEBUG
}
RawObjectPool* Code::GetObjectPool() const {
#if defined(DART_PRECOMPILED_RUNTIME)
if (FLAG_use_bare_instructions) {
return Isolate::Current()->object_store()->global_object_pool();
}
#endif
return object_pool();
}
bool Code::HasBreakpoint() const {
#if defined(PRODUCT)
return false;

View file

@ -4279,7 +4279,11 @@ class Instructions : public Object {
uword PayloadStart() const { return PayloadStart(raw()); }
uword MonomorphicEntryPoint() const { return MonomorphicEntryPoint(raw()); }
uword MonomorphicUncheckedEntryPoint() const {
return MonomorphicUncheckedEntryPoint(raw());
}
uword EntryPoint() const { return EntryPoint(raw()); }
uword UncheckedEntryPoint() const { return UncheckedEntryPoint(raw()); }
static uword PayloadStart(const RawInstructions* instr) {
return reinterpret_cast<uword>(instr->ptr()) + HeaderSize();
}
@ -4855,8 +4859,7 @@ class Code : public Object {
return Instructions::MonomorphicUncheckedEntryPoint(instructions());
}
intptr_t Size() const { return Instructions::Size(instructions()); }
RawObjectPool* GetObjectPool() const { return object_pool(); }
RawObjectPool* GetObjectPool() const;
bool ContainsInstructionAt(uword addr) const {
return ContainsInstructionAt(raw(), addr);
}
@ -4918,13 +4921,17 @@ class Code : public Object {
RawStackMap* GetStackMap(uint32_t pc_offset,
Array* stackmaps,
StackMap* map) const;
enum CallKind {
kPcRelativeCall = 1,
kPcRelativeTailCall = 2,
kCallViaCode = 3,
};
enum CallEntryPoint {
kDefaultEntry,
kUncheckedEntry,
};
enum SCallTableEntry {
kSCallTableKindAndOffset = 0,
kSCallTableCodeTarget = 1,
@ -4938,7 +4945,10 @@ class Code : public Object {
};
class KindField : public BitField<intptr_t, CallKind, 0, 2> {};
class OffsetField : public BitField<intptr_t, intptr_t, 2, 28> {};
class EntryPointField
: public BitField<intptr_t, CallEntryPoint, KindField::kNextBit, 1> {};
class OffsetField
: public BitField<intptr_t, intptr_t, EntryPointField::kNextBit, 27> {};
void set_static_calls_target_table(const Array& value) const;
RawArray* static_calls_target_table() const {
@ -5248,6 +5258,8 @@ class Code : public Object {
FINAL_HEAP_OBJECT_IMPLEMENTATION(Code, Object);
friend class Class;
friend class SnapshotWriter;
friend class StubCode; // for set_object_pool
friend class Precompiler; // for set_object_pool
friend class FunctionSerializationCluster;
friend class CodeSerializationCluster;
friend class StubCode; // for set_object_pool
@ -7807,6 +7819,7 @@ class Array : public Instance {
static intptr_t LengthOf(const RawArray* array) {
return Smi::Value(array->ptr()->length_);
}
static intptr_t length_offset() { return OFFSET_OF(RawArray, length_); }
static intptr_t data_offset() {
return OFFSET_OF_RETURNED_VALUE(RawArray, data);

View file

@ -120,13 +120,14 @@ class ObjectPointerVisitor;
RW(Function, async_star_move_next_helper) \
RW(Function, complete_on_async_return) \
RW(Class, async_star_stream_controller) \
RW(ObjectPool, global_object_pool) \
RW(Array, library_load_error_table) \
RW(Array, unique_dynamic_targets) \
RW(GrowableObjectArray, megamorphic_cache_table) \
RW(Code, build_method_extractor_code) \
RW(Array, code_order_table) \
R_(Code, megamorphic_miss_code) \
R_(Function, megamorphic_miss_function) \
RW(Array, code_order_table) \
RW(Array, obfuscation_map) \
RW(GrowableObjectArray, type_testing_stubs) \
RW(GrowableObjectArray, changed_in_last_reload) \

View file

@ -129,8 +129,7 @@ void ProgramVisitor::ShareMegamorphicBuckets() {
zone, Array::New(MegamorphicCache::kEntryLength * capacity, Heap::kOld));
const Function& handler =
Function::Handle(zone, MegamorphicCacheTable::miss_handler(isolate));
MegamorphicCache::SetEntry(buckets, 0, MegamorphicCache::smi_illegal_cid(),
handler);
MegamorphicCache::SetEntry(buckets, 0, Object::smi_illegal_cid(), handler);
for (intptr_t i = 0; i < table.Length(); i++) {
cache ^= table.At(i);
@ -636,6 +635,12 @@ void ProgramVisitor::DedupLists() {
ProgramVisitor::VisitFunctions(&visitor);
}
// Traits for comparing two [Instructions] objects for equality, which is
// implemented as bit-wise equality.
//
// This considers two instruction objects to be equal even if they have
// different static call targets. Since the static call targets are called via
// the object pool this is ok.
class InstructionsKeyValueTrait {
public:
// Typedefs needed for the DirectChainedHashMap template.
@ -656,6 +661,52 @@ class InstructionsKeyValueTrait {
typedef DirectChainedHashMap<InstructionsKeyValueTrait> InstructionsSet;
// Traits for comparing two [Code] objects for equality.
//
// It considers two [Code] objects to be equal if
//
// * their [RawInstruction]s are bit-wise equal
// * their [RawPcDescriptor]s are the same
// * their [RawStackMaps]s are the same
// * their static call targets are the same
#if defined(DART_PRECOMPILER)
class CodeKeyValueTrait {
public:
// Typedefs needed for the DirectChainedHashMap template.
typedef const Code* Key;
typedef const Code* Value;
typedef const Code* Pair;
static Key KeyOf(Pair kv) { return kv; }
static Value ValueOf(Pair kv) { return kv; }
static inline intptr_t Hashcode(Key key) { return key->Size(); }
static inline bool IsKeyEqual(Pair pair, Key key) {
if (pair->raw() == key->raw()) return true;
// Notice we assume that these entries have already been de-duped, so we
// can use pointer equality.
if (pair->static_calls_target_table() != key->static_calls_target_table()) {
return false;
}
if (pair->pc_descriptors() == key->pc_descriptors()) {
return false;
}
if (pair->stackmaps() == key->stackmaps()) {
return false;
}
if (pair->catch_entry_moves_maps() == key->catch_entry_moves_maps()) {
return false;
}
return Instructions::Equals(pair->instructions(), key->instructions());
}
};
typedef DirectChainedHashMap<CodeKeyValueTrait> CodeSet;
#endif // defined(DART_PRECOMPILER)
void ProgramVisitor::DedupInstructions() {
class DedupInstructionsVisitor : public FunctionVisitor,
public ObjectVisitor {
@ -712,6 +763,59 @@ void ProgramVisitor::DedupInstructions() {
ProgramVisitor::VisitFunctions(&visitor);
}
void ProgramVisitor::DedupInstructionsWithSameMetadata() {
#if defined(DART_PRECOMPILER)
class DedupInstructionsWithSameMetadataVisitor : public FunctionVisitor,
public ObjectVisitor {
public:
explicit DedupInstructionsWithSameMetadataVisitor(Zone* zone)
: zone_(zone),
canonical_set_(),
code_(Code::Handle(zone)),
owner_(Object::Handle(zone)),
instructions_(Instructions::Handle(zone)) {}
void VisitObject(RawObject* obj) {
if (obj->IsCode()) {
canonical_set_.Insert(&Code::ZoneHandle(zone_, Code::RawCast(obj)));
}
}
void Visit(const Function& function) {
if (!function.HasCode()) {
return;
}
code_ = function.CurrentCode();
instructions_ = DedupOneInstructions(code_);
code_.SetActiveInstructions(instructions_);
code_.set_instructions(instructions_);
function.SetInstructions(code_); // Update cached entry point.
}
RawInstructions* DedupOneInstructions(const Code& code) {
const Code* canonical = canonical_set_.LookupValue(&code);
if (canonical == NULL) {
canonical_set_.Insert(&Code::ZoneHandle(zone_, code.raw()));
return code.instructions();
} else {
owner_ = code.owner();
return canonical->instructions();
}
}
private:
Zone* zone_;
CodeSet canonical_set_;
Code& code_;
Object& owner_;
Instructions& instructions_;
};
DedupInstructionsWithSameMetadataVisitor visitor(Thread::Current()->zone());
ProgramVisitor::VisitFunctions(&visitor);
#endif // defined(DART_PRECOMPILER)
}
void ProgramVisitor::Dedup() {
Thread* thread = Thread::Current();
StackZone stack_zone(thread);
@ -731,7 +835,11 @@ void ProgramVisitor::Dedup() {
#if defined(PRODUCT)
// Reduces binary size but obfuscates profiler results.
DedupInstructions();
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
DedupInstructionsWithSameMetadata();
} else {
DedupInstructions();
}
#endif
}

View file

@ -40,6 +40,7 @@ class ProgramVisitor : public AllStatic {
static void DedupCodeSourceMaps();
static void DedupLists();
static void DedupInstructions();
static void DedupInstructionsWithSameMetadata();
};
} // namespace dart

View file

@ -2254,6 +2254,7 @@ class RawArray : public RawInstance {
friend class LinkedHashMapSerializationCluster;
friend class LinkedHashMapDeserializationCluster;
friend class CodeDeserializationCluster;
friend class Deserializer;
friend class RawCode;
friend class RawImmutableArray;

View file

@ -183,7 +183,7 @@ DEFINE_RUNTIME_ENTRY(NullError, 0) {
const intptr_t name_index = reader.GetNullCheckNameIndexAt(pc_offset);
RELEASE_ASSERT(name_index >= 0);
const ObjectPool& pool = ObjectPool::Handle(zone, code.object_pool());
const ObjectPool& pool = ObjectPool::Handle(zone, code.GetObjectPool());
const String& member_name =
String::CheckedHandle(zone, pool.ObjectAt(name_index));
@ -787,7 +787,7 @@ DEFINE_RUNTIME_ENTRY(TypeCheck, 7) {
const Code& caller_code =
Code::Handle(zone, caller_frame->LookupDartCode());
const ObjectPool& pool =
ObjectPool::Handle(zone, caller_code.object_pool());
ObjectPool::Handle(zone, caller_code.GetObjectPool());
TypeTestingStubCallPattern tts_pattern(caller_frame->pc());
const intptr_t stc_pool_idx = tts_pattern.GetSubtypeTestCachePoolIndex();
const intptr_t dst_name_idx = stc_pool_idx + 1;
@ -824,7 +824,7 @@ DEFINE_RUNTIME_ENTRY(TypeCheck, 7) {
const Code& caller_code =
Code::Handle(zone, caller_frame->LookupDartCode());
const ObjectPool& pool =
ObjectPool::Handle(zone, caller_code.object_pool());
ObjectPool::Handle(zone, caller_code.GetObjectPool());
TypeTestingStubCallPattern tts_pattern(caller_frame->pc());
const intptr_t stc_pool_idx = tts_pattern.GetSubtypeTestCachePoolIndex();

View file

@ -3745,8 +3745,11 @@ void Simulator::JumpToFrame(uword pc, uword sp, uword fp, Thread* thread) {
// Restore pool pointer.
int32_t code =
*reinterpret_cast<int32_t*>(fp + kPcMarkerSlotFromFp * kWordSize);
int32_t pp = *reinterpret_cast<int32_t*>(code + Code::object_pool_offset() -
kHeapObjectTag);
int32_t pp = (FLAG_precompiled_mode && FLAG_use_bare_instructions)
? reinterpret_cast<int32_t>(thread->global_object_pool())
: *reinterpret_cast<int32_t*>(
(code + Code::object_pool_offset() - kHeapObjectTag));
set_register(CODE_REG, code);
set_register(PP, pp);
buf->Longjmp();

View file

@ -3565,8 +3565,10 @@ void Simulator::JumpToFrame(uword pc, uword sp, uword fp, Thread* thread) {
// Restore pool pointer.
int64_t code =
*reinterpret_cast<int64_t*>(fp + kPcMarkerSlotFromFp * kWordSize);
int64_t pp = *reinterpret_cast<int64_t*>(code + Code::object_pool_offset() -
kHeapObjectTag);
int64_t pp = (FLAG_precompiled_mode && FLAG_use_bare_instructions)
? reinterpret_cast<int64_t>(thread->global_object_pool())
: *reinterpret_cast<int64_t*>(
code + Code::object_pool_offset() - kHeapObjectTag);
pp -= kHeapObjectTag; // In the PP register, the pool pointer is untagged.
set_register(NULL, CODE_REG, code);
set_register(NULL, PP, pp);

View file

@ -43,6 +43,18 @@ const FrameLayout default_frame_layout = {
/*.saved_caller_pp_from_fp = */ kSavedCallerPpSlotFromFp,
/*.code_from_fp = */ kPcMarkerSlotFromFp,
};
const FrameLayout bare_instructions_frame_layout = {
/*.first_object_from_pc =*/kFirstObjectSlotFromFp, // No saved PP slot.
/*.last_fixed_object_from_fp = */ kLastFixedObjectSlotFromFp +
2, // No saved CODE, PP slots
/*.param_end_from_fp = */ kParamEndSlotFromFp,
/*.first_local_from_fp =*/kFirstLocalSlotFromFp +
2, // No saved CODE, PP slots.
/*.dart_fixed_frame_size =*/kDartFrameFixedSize -
2, // No saved CODE, PP slots.
/*.saved_caller_pp_from_fp = */ 0, // No saved PP slot.
/*.code_from_fp = */ 0, // No saved CODE
};
FrameLayout compiler_frame_layout = invalid_frame_layout;
FrameLayout runtime_frame_layout = invalid_frame_layout;
@ -62,8 +74,19 @@ int FrameLayout::FrameSlotForVariableIndex(int variable_index) const {
}
void FrameLayout::Init() {
// By default we use frames with CODE_REG/PP in the frame.
compiler_frame_layout = default_frame_layout;
runtime_frame_layout = default_frame_layout;
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
compiler_frame_layout = bare_instructions_frame_layout;
}
#if defined(DART_PRECOMPILED_RUNTIME)
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
compiler_frame_layout = invalid_frame_layout;
runtime_frame_layout = bare_instructions_frame_layout;
}
#endif
}
Isolate* StackFrame::IsolateOfBareInstructionsFrame() const {
@ -118,8 +141,8 @@ bool StackFrame::IsStubFrame() const {
return false;
}
if (IsBareInstructionsStubFrame()) {
return true;
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
return IsBareInstructionsStubFrame();
}
ASSERT(!(IsEntryFrame() || IsExitFrame()));

View file

@ -42,7 +42,6 @@ static const int kSavedCallerPcSlotFromFp = 1;
static const int kParamEndSlotFromFp = 1; // One slot past last parameter.
static const int kCallerSpSlotFromFp = 2;
static const int kSavedAboveReturnAddress = 3; // Saved above return address.
// Entry and exit frame layout.
static const int kExitLinkSlotFromEntryFp = -22;

View file

@ -43,7 +43,6 @@ static const int kSavedCallerPcSlotFromFp = 1;
static const int kParamEndSlotFromFp = 1; // One slot past last parameter.
static const int kCallerSpSlotFromFp = 2;
static const int kSavedAboveReturnAddress = 3; // Saved above return address.
// Entry and exit frame layout.
#if defined(_WIN64)

View file

@ -7,6 +7,7 @@
#include "platform/assert.h"
#include "platform/globals.h"
#include "vm/clustered_snapshot.h"
#include "vm/compiler/aot/precompiler.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/assembler/disassembler.h"
#include "vm/flags.h"
@ -20,6 +21,7 @@
namespace dart {
DEFINE_FLAG(bool, disassemble_stubs, false, "Disassemble generated stubs.");
DECLARE_FLAG(bool, precompiled_mode);
DECLARE_FLAG(bool, enable_interpreter);
@ -162,13 +164,24 @@ RawCode* StubCode::GetAllocationStubForClass(const Class& cls) {
#if !defined(DART_PRECOMPILED_RUNTIME)
if (stub.IsNull()) {
ObjectPoolWrapper object_pool_wrapper;
Assembler assembler(&object_pool_wrapper);
Precompiler* precompiler = Precompiler::Instance();
ObjectPoolWrapper* wrapper =
FLAG_use_bare_instructions && precompiler != NULL
? precompiler->global_object_pool_wrapper()
: &object_pool_wrapper;
const auto pool_attachment =
FLAG_precompiled_mode && FLAG_use_bare_instructions
? Code::PoolAttachment::kNotAttachPool
: Code::PoolAttachment::kAttachPool;
Assembler assembler(wrapper);
const char* name = cls.ToCString();
StubCode::GenerateAllocationStubForClass(&assembler, cls);
if (thread->IsMutatorThread()) {
stub ^= Code::FinalizeCode(name, nullptr, &assembler,
Code::PoolAttachment::kAttachPool,
stub ^= Code::FinalizeCode(name, nullptr, &assembler, pool_attachment,
/*optimized1*/ false);
// Check if background compilation thread has not already added the stub.
if (cls.allocation_stub() == Code::null()) {
@ -193,8 +206,7 @@ RawCode* StubCode::GetAllocationStubForClass(const Class& cls) {
// Do not Garbage collect during this stage and instead allow the
// heap to grow.
NoHeapGrowthControlScope no_growth_control;
stub ^= Code::FinalizeCode(name, nullptr, &assembler,
Code::PoolAttachment::kAttachPool,
stub ^= Code::FinalizeCode(name, nullptr, &assembler, pool_attachment,
false /* optimized */);
stub.set_owner(cls);
cls.set_allocation_stub(stub);
@ -226,16 +238,23 @@ RawCode* StubCode::GetAllocationStubForClass(const Class& cls) {
}
#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
RawCode* StubCode::GetBuildMethodExtractorStub() {
RawCode* StubCode::GetBuildMethodExtractorStub(ObjectPoolWrapper* pool) {
#if !defined(DART_PRECOMPILED_RUNTIME)
ObjectPoolWrapper object_pool_wrapper;
Assembler assembler(&object_pool_wrapper);
Assembler assembler(pool != nullptr ? pool : &object_pool_wrapper);
StubCode::GenerateBuildMethodExtractorStub(&assembler);
const char* name = "BuildMethodExtractor";
const Code& stub = Code::Handle(Code::FinalizeCode(
name, nullptr, &assembler, Code::PoolAttachment::kAttachPool,
name, nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
/*optimized=*/false));
if (pool == nullptr) {
const ObjectPool& object_pool =
ObjectPool::Handle(object_pool_wrapper.MakeObjectPool());
stub.set_object_pool(object_pool.raw());
}
#ifndef PRODUCT
if (FLAG_support_disassembler && FLAG_disassemble_stubs) {
LogBlock lb;

View file

@ -14,6 +14,7 @@ namespace dart {
class Code;
class Isolate;
class ObjectPointerVisitor;
class ObjectPoolWrapper;
class RawCode;
class SnapshotReader;
class SnapshotWriter;
@ -151,7 +152,7 @@ class StubCode : public AllStatic {
static RawCode* GetAllocationStubForClass(const Class& cls);
#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
static RawCode* GetBuildMethodExtractorStub();
static RawCode* GetBuildMethodExtractorStub(ObjectPoolWrapper* pool);
static void GenerateBuildMethodExtractorStub(Assembler* assembler);
#endif

View file

@ -29,6 +29,7 @@ DEFINE_FLAG(bool,
use_slow_path,
false,
"Set to true for debugging & verifying the slow paths.");
DECLARE_FLAG(bool, precompiled_mode);
// Input parameters:
// LR : return address.
@ -1018,7 +1019,11 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ Bind(&done_push_arguments);
// Call the Dart code entrypoint.
__ LoadImmediate(PP, 0); // GC safe value into PP.
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
} else {
__ LoadImmediate(PP, 0); // GC safe value into PP.
}
__ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
__ ldr(R0, FieldAddress(CODE_REG, Code::entry_point_offset()));
__ blx(R0); // R4 is the arguments descriptor array.
@ -2466,7 +2471,12 @@ void StubCode::GenerateJumpToFrameStub(Assembler* assembler) {
__ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
// Restore the pool pointer.
__ RestoreCodePointer();
__ LoadPoolPointer();
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
__ set_constant_pool_allowed(true);
} else {
__ LoadPoolPointer();
}
__ bx(LR); // Jump to continuation point.
}

View file

@ -28,6 +28,7 @@ DEFINE_FLAG(bool,
false,
"Set to true for debugging & verifying the slow paths.");
DECLARE_FLAG(bool, enable_interpreter);
DECLARE_FLAG(bool, precompiled_mode);
// Input parameters:
// LR : return address.
@ -1094,10 +1095,15 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ b(&push_arguments, LT);
__ Bind(&done_push_arguments);
// We now load the pool pointer(PP) with a GC safe value as we are about to
// invoke dart code. We don't need a real object pool here.
// Smi zero does not work because ARM64 assumes PP to be untagged.
__ LoadObject(PP, Object::null_object());
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
__ sub(PP, PP, Operand(kHeapObjectTag)); // Pool in PP is untagged!
} else {
// We now load the pool pointer(PP) with a GC safe value as we are about to
// invoke dart code. We don't need a real object pool here.
// Smi zero does not work because ARM64 assumes PP to be untagged.
__ LoadObject(PP, Object::null_object());
}
// Call the Dart code entrypoint.
__ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
@ -2734,7 +2740,12 @@ void StubCode::GenerateJumpToFrameStub(Assembler* assembler) {
__ StoreToOffset(ZR, THR, Thread::top_exit_frame_info_offset());
// Restore the pool pointer.
__ RestoreCodePointer();
__ LoadPoolPointer();
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ ldr(PP, Address(THR, Thread::global_object_pool_offset()));
__ sub(PP, PP, Operand(kHeapObjectTag)); // Pool in PP is untagged!
} else {
__ LoadPoolPointer();
}
__ ret(); // Jump to continuation point.
}

View file

@ -33,6 +33,7 @@ DEFINE_FLAG(bool,
false,
"Set to true for debugging & verifying the slow paths.");
DECLARE_FLAG(bool, enable_interpreter);
DECLARE_FLAG(bool, precompiled_mode);
// Input parameters:
// RSP : points to return address.
@ -1025,7 +1026,11 @@ void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ Bind(&done_push_arguments);
// Call the Dart code entrypoint.
__ xorq(PP, PP); // GC-safe value into PP.
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ movq(PP, Address(THR, Thread::global_object_pool_offset()));
} else {
__ xorq(PP, PP); // GC-safe value into PP.
}
__ movq(CODE_REG,
Address(kTargetCodeReg, VMHandles::kOffsetOfRawPtrInHandle));
__ movq(kTargetCodeReg, FieldAddress(CODE_REG, Code::entry_point_offset()));
@ -2737,7 +2742,11 @@ void StubCode::GenerateJumpToFrameStub(Assembler* assembler) {
__ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
// Restore the pool pointer.
__ RestoreCodePointer();
__ LoadPoolPointer(PP);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ movq(PP, Address(THR, Thread::global_object_pool_offset()));
} else {
__ LoadPoolPointer(PP);
}
__ jmp(CallingConventions::kArg1Reg); // Jump to program counter.
}

View file

@ -48,6 +48,7 @@ class RawObject;
class RawCode;
class RawError;
class RawGrowableObjectArray;
class RawObjectPool;
class RawStackTrace;
class RawString;
class RuntimeEntry;
@ -121,7 +122,8 @@ class Zone;
#define CACHED_NON_VM_STUB_LIST(V) \
V(RawObject*, object_null_, Object::null(), NULL) \
V(RawBool*, bool_true_, Object::bool_true().raw(), NULL) \
V(RawBool*, bool_false_, Object::bool_false().raw(), NULL)
V(RawBool*, bool_false_, Object::bool_false().raw(), NULL) \
V(RawObjectPool*, global_object_pool_, ObjectPool::null(), NULL)
// List of VM-global objects/addresses cached in each Thread object.
// Important: constant false must immediately follow constant true.
@ -552,6 +554,11 @@ class Thread : public BaseThread {
LEAF_RUNTIME_ENTRY_LIST(DEFINE_OFFSET_METHOD)
#undef DEFINE_OFFSET_METHOD
RawObjectPool* global_object_pool() const { return global_object_pool_; }
void set_global_object_pool(RawObjectPool* raw_value) {
global_object_pool_ = raw_value;
}
static bool CanLoadFromThread(const Object& object);
static intptr_t OffsetFromThread(const Object& object);
static bool ObjectAtOffset(intptr_t offset, Object* object);

View file

@ -345,9 +345,11 @@ RawInstructions* TypeTestingStubGenerator::BuildCodeForType(const Type& type) {
BuildOptimizedTypeTestStub(&assembler, hi, type, type_class);
const char* name = namer_.StubNameForType(type);
const auto pool_attachment = FLAG_use_bare_instructions
? Code::PoolAttachment::kNotAttachPool
: Code::PoolAttachment::kAttachPool;
const Code& code = Code::Handle(Code::FinalizeCode(
name, nullptr, &assembler, Code::PoolAttachment::kAttachPool,
false /* optimized */));
name, nullptr, &assembler, pool_attachment, false /* optimized */));
#ifndef PRODUCT
if (FLAG_support_disassembler && FLAG_disassemble_stubs) {
LogBlock lb;

View file

@ -10,6 +10,8 @@
namespace dart {
class ObjectPoolWrapper;
class TypeTestingStubNamer {
public:
TypeTestingStubNamer();

View file

@ -79,7 +79,6 @@ io/test_extension_test: RuntimeError # Platform.script points to dill file.
no_lazy_dispatchers_test: SkipByDesign # KBC interpreter doesn't support --no_lazy_dispatchers
[ $compiler == dartkp && $mode == debug && $runtime == dart_precompiled && $strong ]
io/compile_all_test: Crash # Issue 32373
io/raw_socket_test: Crash
io/skipping_dart2js_compilations_test: Crash
io/socket_exception_test: Pass, Crash
@ -97,7 +96,7 @@ io/namespace_test: RuntimeError
[ $compiler == dartkp && $runtime == dart_precompiled && $strong ]
dwarf_stack_trace_test: RuntimeError
io/compile_all_test: RuntimeError # Issue 32338
io/compile_all_test: Skip # We do not support --compile-all for precompilation
io/file_fuzz_test: RuntimeError, Pass
io/http_client_connect_test: Skip # Flaky.
io/http_close_test: Crash