[vm] Decouple assemblers from runtime.

This is the next step towards preventing compiler from directly peeking
into runtime and instead interact with runtime through a well defined
surface. The goal of the refactoring to locate all places where compiler
accesses some runtime information and partion those accesses into two
categories:

- creating objects in the host runtime (e.g. allocating strings, numbers, etc)
during compilation;
- accessing properties of the target runtime (e.g. offsets of fields) to
embed those into the generated code;

This change introduces dart::compiler and dart::compiler::target namespaces.

All code in the compiler will gradually be moved into dart::compiler namespace.
One of the motivations for this change is to be able to prevent access to
globally defined host constants like kWordSize by shadowing them in the
dart::compiler namespace.

The nested namespace dart::compiler::target hosts all information about
target runtime that compiler could access, e.g. compiler::target::kWordSize
defines word size of the target which will eventually be made different
from the host kWordSize (defined by dart::kWordSize).

The API for compiler to runtime interaction is placed into compiler_api.h.

Note that we still permit runtime to access compiler internals directly -
this is not going to be decoupled as part of this work.

Issue https://github.com/dart-lang/sdk/issues/31709

Change-Id: If4396d295879391becfa6c38d4802bbff81f5b20
Reviewed-on: https://dart-review.googlesource.com/c/90242
Commit-Queue: Vyacheslav Egorov <vegorov@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Vyacheslav Egorov 2019-01-25 16:45:13 +00:00 committed by commit-bot@chromium.org
parent a5030ed92f
commit f496e538f4
122 changed files with 3139 additions and 2220 deletions

View file

@ -7,13 +7,13 @@
#include "platform/allocation.h"
#include "platform/assert.h"
#include "vm/base_isolate.h"
#include "vm/globals.h"
namespace dart {
// Forward declarations.
class ThreadState;
class Zone;
// Stack resources subclass from this base class. The VM will ensure that the
// destructors of these objects are called before the stack is unwound past the

View file

@ -5,6 +5,7 @@
#ifndef RUNTIME_VM_BITFIELD_H_
#define RUNTIME_VM_BITFIELD_H_
#include "platform/assert.h"
#include "platform/globals.h"
namespace dart {
@ -16,6 +17,9 @@ static const uword kUwordOne = 1U;
template <typename S, typename T, int position, int size>
class BitField {
public:
static_assert((sizeof(S) * kBitsPerByte) >= (position + size),
"BitField does not fit into the type.");
static const intptr_t kNextBit = position + size;
// Tells whether the provided value fits into the bit field.
@ -39,7 +43,6 @@ class BitField {
// Returns an S with the bit field value encoded.
static S encode(T value) {
COMPILE_ASSERT((sizeof(S) * kBitsPerByte) >= (position + size));
ASSERT(is_valid(value));
return static_cast<S>(value) << position;
}

View file

@ -6,15 +6,11 @@
#define RUNTIME_VM_BITMAP_H_
#include "vm/allocation.h"
#include "vm/isolate.h"
#include "vm/thread_state.h"
#include "vm/zone.h"
namespace dart {
// Forward declarations.
class RawStackMap;
class StackMap;
// BitmapBuilder is used to build a bitmap. The implementation is optimized
// for a dense set of small bit maps without a fixed upper bound (e.g: a
// pointer map description of a stack).
@ -23,7 +19,8 @@ class BitmapBuilder : public ZoneAllocated {
BitmapBuilder()
: length_(0),
data_size_in_bytes_(kInitialSizeInBytes),
data_(Thread::Current()->zone()->Alloc<uint8_t>(kInitialSizeInBytes)) {
data_(ThreadState::Current()->zone()->Alloc<uint8_t>(
kInitialSizeInBytes)) {
memset(data_, 0, kInitialSizeInBytes);
}

184
runtime/vm/class_id.h Normal file
View file

@ -0,0 +1,184 @@
// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_CLASS_ID_H_
#define RUNTIME_VM_CLASS_ID_H_
// This header defines the list of VM implementation classes and their ids.
//
// Note: we assume that all builds of Dart VM use exactly the same class ids
// for these classes.
namespace dart {
#define CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY(V) \
V(Class) \
V(PatchClass) \
V(Function) \
V(ClosureData) \
V(SignatureData) \
V(RedirectionData) \
V(Field) \
V(Script) \
V(Library) \
V(Namespace) \
V(KernelProgramInfo) \
V(Code) \
V(Bytecode) \
V(Instructions) \
V(ObjectPool) \
V(PcDescriptors) \
V(CodeSourceMap) \
V(StackMap) \
V(LocalVarDescriptors) \
V(ExceptionHandlers) \
V(Context) \
V(ContextScope) \
V(SingleTargetCache) \
V(UnlinkedCall) \
V(ICData) \
V(MegamorphicCache) \
V(SubtypeTestCache) \
V(Error) \
V(ApiError) \
V(LanguageError) \
V(UnhandledException) \
V(UnwindError) \
V(Instance) \
V(LibraryPrefix) \
V(TypeArguments) \
V(AbstractType) \
V(Type) \
V(TypeRef) \
V(TypeParameter) \
V(Closure) \
V(Number) \
V(Integer) \
V(Smi) \
V(Mint) \
V(Double) \
V(Bool) \
V(GrowableObjectArray) \
V(Float32x4) \
V(Int32x4) \
V(Float64x2) \
V(TypedData) \
V(ExternalTypedData) \
V(Capability) \
V(ReceivePort) \
V(SendPort) \
V(StackTrace) \
V(RegExp) \
V(WeakProperty) \
V(MirrorReference) \
V(LinkedHashMap) \
V(UserTag)
#define CLASS_LIST_ARRAYS(V) \
V(Array) \
V(ImmutableArray)
#define CLASS_LIST_STRINGS(V) \
V(String) \
V(OneByteString) \
V(TwoByteString) \
V(ExternalOneByteString) \
V(ExternalTwoByteString)
#define CLASS_LIST_TYPED_DATA(V) \
V(Int8Array) \
V(Uint8Array) \
V(Uint8ClampedArray) \
V(Int16Array) \
V(Uint16Array) \
V(Int32Array) \
V(Uint32Array) \
V(Int64Array) \
V(Uint64Array) \
V(Float32Array) \
V(Float64Array) \
V(Float32x4Array) \
V(Int32x4Array) \
V(Float64x2Array)
#define DART_CLASS_LIST_TYPED_DATA(V) \
V(Int8) \
V(Uint8) \
V(Uint8Clamped) \
V(Int16) \
V(Uint16) \
V(Int32) \
V(Uint32) \
V(Int64) \
V(Uint64) \
V(Float32) \
V(Float64) \
V(Float32x4) \
V(Int32x4) \
V(Float64x2)
#define CLASS_LIST_FOR_HANDLES(V) \
CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY(V) \
V(Array) \
V(String)
#define CLASS_LIST_NO_OBJECT(V) \
CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY(V) \
CLASS_LIST_ARRAYS(V) \
CLASS_LIST_STRINGS(V)
#define CLASS_LIST(V) \
V(Object) \
CLASS_LIST_NO_OBJECT(V)
enum ClassId {
// Illegal class id.
kIllegalCid = 0,
// A sentinel used by the vm service's heap snapshots to represent references
// from the stack.
kStackCid = 1,
// The following entries describes classes for pseudo-objects in the heap
// that should never be reachable from live objects. Free list elements
// maintain the free list for old space, and forwarding corpses are used to
// implement one-way become.
kFreeListElement,
kForwardingCorpse,
// List of Ids for predefined classes.
#define DEFINE_OBJECT_KIND(clazz) k##clazz##Cid,
CLASS_LIST(DEFINE_OBJECT_KIND)
#undef DEFINE_OBJECT_KIND
// clang-format off
#define DEFINE_OBJECT_KIND(clazz) kTypedData##clazz##Cid,
CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
#undef DEFINE_OBJECT_KIND
#define DEFINE_OBJECT_KIND(clazz) kTypedData##clazz##ViewCid,
CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
#undef DEFINE_OBJECT_KIND
kByteDataViewCid,
#define DEFINE_OBJECT_KIND(clazz) kExternalTypedData##clazz##Cid,
CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
#undef DEFINE_OBJECT_KIND
kByteBufferCid,
// clang-format on
// The following entries do not describe a predefined class, but instead
// are class indexes for pre-allocated instances (Null, dynamic and Void).
kNullCid,
kDynamicCid,
kVoidCid,
kNumPredefinedCids,
};
} // namespace dart
#endif // RUNTIME_VM_CLASS_ID_H_

View file

@ -1633,8 +1633,8 @@ class ObjectPoolSerializationCluster : public SerializationCluster {
uint8_t* entry_bits = pool->ptr()->entry_bits();
for (intptr_t i = 0; i < length; i++) {
auto entry_type = ObjectPool::TypeBits::decode(entry_bits[i]);
if ((entry_type == ObjectPool::kTaggedObject) ||
(entry_type == ObjectPool::kNativeEntryData)) {
if ((entry_type == ObjectPool::EntryType::kTaggedObject) ||
(entry_type == ObjectPool::EntryType::kNativeEntryData)) {
s->Push(pool->ptr()->data()[i].raw_obj_);
}
}
@ -1665,7 +1665,7 @@ class ObjectPoolSerializationCluster : public SerializationCluster {
s->Write<uint8_t>(entry_bits[j]);
RawObjectPool::Entry& entry = pool->ptr()->data()[j];
switch (ObjectPool::TypeBits::decode(entry_bits[j])) {
case ObjectPool::kTaggedObject: {
case ObjectPool::EntryType::kTaggedObject: {
#if !defined(TARGET_ARCH_DBC)
if ((entry.raw_obj_ == StubCode::CallNoScopeNative().raw()) ||
(entry.raw_obj_ == StubCode::CallAutoScopeNative().raw())) {
@ -1679,11 +1679,11 @@ class ObjectPoolSerializationCluster : public SerializationCluster {
s->WriteElementRef(entry.raw_obj_, j);
break;
}
case ObjectPool::kImmediate: {
case ObjectPool::EntryType::kImmediate: {
s->Write<intptr_t>(entry.raw_value_);
break;
}
case ObjectPool::kNativeEntryData: {
case ObjectPool::EntryType::kNativeEntryData: {
RawObject* raw = entry.raw_obj_;
RawTypedData* raw_data = reinterpret_cast<RawTypedData*>(raw);
// kNativeEntryData object pool entries are for linking natives for
@ -1699,8 +1699,8 @@ class ObjectPoolSerializationCluster : public SerializationCluster {
s->WriteElementRef(raw, j);
break;
}
case ObjectPool::kNativeFunction:
case ObjectPool::kNativeFunctionWrapper: {
case ObjectPool::EntryType::kNativeFunction:
case ObjectPool::EntryType::kNativeFunctionWrapper: {
// Write nothing. Will initialize with the lazy link entry.
break;
}
@ -1746,21 +1746,21 @@ class ObjectPoolDeserializationCluster : public DeserializationCluster {
pool->ptr()->entry_bits()[j] = entry_bits;
RawObjectPool::Entry& entry = pool->ptr()->data()[j];
switch (ObjectPool::TypeBits::decode(entry_bits)) {
case ObjectPool::kNativeEntryData:
case ObjectPool::kTaggedObject:
case ObjectPool::EntryType::kNativeEntryData:
case ObjectPool::EntryType::kTaggedObject:
entry.raw_obj_ = d->ReadRef();
break;
case ObjectPool::kImmediate:
case ObjectPool::EntryType::kImmediate:
entry.raw_value_ = d->Read<intptr_t>();
break;
case ObjectPool::kNativeFunction: {
case ObjectPool::EntryType::kNativeFunction: {
// Read nothing. Initialize with the lazy link entry.
uword new_entry = NativeEntry::LinkNativeCallEntry();
entry.raw_value_ = static_cast<intptr_t>(new_entry);
break;
}
#if defined(TARGET_ARCH_DBC)
case ObjectPool::kNativeFunctionWrapper: {
case ObjectPool::EntryType::kNativeFunctionWrapper: {
// Read nothing. Initialize with the lazy link entry.
uword new_entry = NativeEntry::BootstrapNativeCallWrapperEntry();
entry.raw_value_ = static_cast<intptr_t>(new_entry);
@ -5715,12 +5715,13 @@ RawApiError* FullSnapshotReader::ReadIsolateSnapshot() {
auto& entry = Object::Handle(zone);
auto& smi = Smi::Handle(zone);
for (intptr_t i = 0; i < pool.Length(); i++) {
if (pool.TypeAt(i) == ObjectPool::kTaggedObject) {
if (pool.TypeAt(i) == ObjectPool::EntryType::kTaggedObject) {
entry = pool.ObjectAt(i);
if (entry.raw() == StubCode::UnlinkedCall().raw()) {
smi = Smi::FromAlignedAddress(
StubCode::UnlinkedCall().MonomorphicEntryPoint());
pool.SetTypeAt(i, ObjectPool::kImmediate, ObjectPool::kPatchable);
pool.SetTypeAt(i, ObjectPool::EntryType::kImmediate,
ObjectPool::Patchability::kPatchable);
pool.SetObjectAt(i, smi);
}
}

View file

@ -0,0 +1,44 @@
// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_CODE_ENTRY_KIND_H_
#define RUNTIME_VM_CODE_ENTRY_KIND_H_
namespace dart {
// Compiled functions might have several different entry points, which either
// perform additional checking on entry into the function or skip some of the
// checks normally performed on the entry.
//
// Which checks are performed and skipped depend on the function and VM mode.
enum class CodeEntryKind {
// Normal entry into the function.
//
// Usually such entries perform type checks for all parameters which are not
// guaranteed to be type checked on the callee side. This can happen if
// parameter type depends on the type parameter of an enclosing class.
kNormal,
// Unchecked entry into the function.
//
// These entries usually skip most of the type checks that normal entries
// perform and are used when optimizing compiler can prove that those
// checks are not needed at a specific call site.
kUnchecked,
// Monomorphic entry into the function.
//
// In AOT mode we might patch call-site to directly invoke target function,
// which would then validate that it is invoked with the expected type of
// the receiver. This validation is handled by monomorphic entry, which then
// falls through to the normal entry.
kMonomorphic,
// Similar to monomorphic entry but with a fallthrough into unchecked entry.
kMonomorphicUnchecked,
};
} // namespace dart
#endif // RUNTIME_VM_CODE_ENTRY_KIND_H_

View file

@ -12,17 +12,12 @@
namespace dart {
// Forward declaration.
class Array;
class Code;
class ExternalLabel;
class Function;
class ICData;
class RawArray;
class RawCode;
class RawFunction;
class RawICData;
class RawObject;
class String;
// Stack-allocated class to create a scope where the specified region
// [address, address + size] has write access enabled. This is used

View file

@ -378,11 +378,13 @@ class BareSwitchableCall : public SwitchableCallBase {
} else {
FATAL1("Failed to decode at %" Px, pc);
}
ASSERT(object_pool_.TypeAt(target_index_) == ObjectPool::kImmediate);
ASSERT(object_pool_.TypeAt(target_index_) ==
ObjectPool::EntryType::kImmediate);
}
void SetTarget(const Code& target) const {
ASSERT(object_pool_.TypeAt(target_index()) == ObjectPool::kImmediate);
ASSERT(object_pool_.TypeAt(target_index()) ==
ObjectPool::EntryType::kImmediate);
object_pool_.SetRawValueAt(target_index(), target.MonomorphicEntryPoint());
}

View file

@ -198,7 +198,7 @@ void Precompiler::DoCompileAll() {
// Since we keep the object pool until the end of AOT compilation, it
// will hang on to its entries until the very end. Therefore we have
// to use handles which survive that long, so we use [zone_] here.
global_object_pool_wrapper_.InitializeWithZone(zone_);
global_object_pool_builder_.InitializeWithZone(zone_);
}
{
@ -238,50 +238,50 @@ void Precompiler::DoCompileAll() {
const Code& code = StubCode::InterpretCall();
const ObjectPool& stub_pool = ObjectPool::Handle(code.object_pool());
global_object_pool_wrapper()->Reset();
global_object_pool_wrapper()->InitializeFrom(stub_pool);
global_object_pool_builder()->Reset();
stub_pool.CopyInto(global_object_pool_builder());
// We have two global code objects we need to re-generate with the new
// global object pool, namely the
// - megamorphic miss handler code and the
// - build method extractor code
MegamorphicCacheTable::ReInitMissHandlerCode(
isolate_, global_object_pool_wrapper());
isolate_, global_object_pool_builder());
auto& stub_code = Code::Handle();
stub_code =
StubCode::GetBuildMethodExtractorStub(global_object_pool_wrapper());
StubCode::GetBuildMethodExtractorStub(global_object_pool_builder());
I->object_store()->set_build_method_extractor_code(stub_code);
stub_code =
StubCode::BuildIsolateSpecificNullErrorSharedWithFPURegsStub(
global_object_pool_wrapper());
global_object_pool_builder());
I->object_store()->set_null_error_stub_with_fpu_regs_stub(stub_code);
stub_code =
StubCode::BuildIsolateSpecificNullErrorSharedWithoutFPURegsStub(
global_object_pool_wrapper());
global_object_pool_builder());
I->object_store()->set_null_error_stub_without_fpu_regs_stub(stub_code);
stub_code =
StubCode::BuildIsolateSpecificStackOverflowSharedWithFPURegsStub(
global_object_pool_wrapper());
global_object_pool_builder());
I->object_store()->set_stack_overflow_stub_with_fpu_regs_stub(
stub_code);
stub_code =
StubCode::BuildIsolateSpecificStackOverflowSharedWithoutFPURegsStub(
global_object_pool_wrapper());
global_object_pool_builder());
I->object_store()->set_stack_overflow_stub_without_fpu_regs_stub(
stub_code);
stub_code = StubCode::BuildIsolateSpecificWriteBarrierWrappersStub(
global_object_pool_wrapper());
global_object_pool_builder());
I->object_store()->set_write_barrier_wrappers_stub(stub_code);
stub_code = StubCode::BuildIsolateSpecificArrayWriteBarrierStub(
global_object_pool_wrapper());
global_object_pool_builder());
I->object_store()->set_array_write_barrier_stub(stub_code);
}
@ -303,10 +303,10 @@ void Precompiler::DoCompileAll() {
// Now we generate the actual object pool instance and attach it to the
// object store. The AOT runtime will use it from there in the enter
// dart code stub.
const auto& pool =
ObjectPool::Handle(global_object_pool_wrapper()->MakeObjectPool());
const auto& pool = ObjectPool::Handle(
ObjectPool::NewFromBuilder(*global_object_pool_builder()));
I->object_store()->set_global_object_pool(pool);
global_object_pool_wrapper()->Reset();
global_object_pool_builder()->Reset();
if (FLAG_print_gop) {
THR_Print("Global object pool:\n");
@ -527,7 +527,7 @@ void Precompiler::CollectCallbackFields() {
void Precompiler::ProcessFunction(const Function& function) {
const intptr_t gop_offset =
FLAG_use_bare_instructions ? global_object_pool_wrapper()->CurrentLength()
FLAG_use_bare_instructions ? global_object_pool_builder()->CurrentLength()
: 0;
if (!function.HasCode()) {
@ -594,9 +594,10 @@ void Precompiler::AddCalleesOf(const Function& function, intptr_t gop_offset) {
String& selector = String::Handle(Z);
if (FLAG_use_bare_instructions) {
for (intptr_t i = gop_offset;
i < global_object_pool_wrapper()->CurrentLength(); i++) {
const auto& wrapper_entry = global_object_pool_wrapper()->EntryAt(i);
if (wrapper_entry.type() == ObjectPool::kTaggedObject) {
i < global_object_pool_builder()->CurrentLength(); i++) {
const auto& wrapper_entry = global_object_pool_builder()->EntryAt(i);
if (wrapper_entry.type() ==
compiler::ObjectPoolBuilderEntry::kTaggedObject) {
const auto& entry = *wrapper_entry.obj_;
AddCalleesOfHelper(entry, &selector, &cls);
}
@ -605,7 +606,7 @@ void Precompiler::AddCalleesOf(const Function& function, intptr_t gop_offset) {
const auto& pool = ObjectPool::Handle(Z, code.object_pool());
auto& entry = Object::Handle(Z);
for (intptr_t i = 0; i < pool.Length(); i++) {
if (pool.TypeAt(i) == ObjectPool::kTaggedObject) {
if (pool.TypeAt(i) == ObjectPool::EntryType::kTaggedObject) {
entry = pool.ObjectAt(i);
AddCalleesOfHelper(entry, &selector, &cls);
}
@ -881,7 +882,7 @@ void Precompiler::AddField(const Field& field) {
}
const intptr_t gop_offset =
FLAG_use_bare_instructions
? global_object_pool_wrapper()->CurrentLength()
? global_object_pool_builder()->CurrentLength()
: 0;
ASSERT(Dart::vm_snapshot_kind() != Snapshot::kFullAOT);
const Function& initializer =
@ -2032,7 +2033,9 @@ void Precompiler::SwitchICCalls() {
void SwitchPool(const ObjectPool& pool) {
for (intptr_t i = 0; i < pool.Length(); i++) {
if (pool.TypeAt(i) != ObjectPool::kTaggedObject) continue;
if (pool.TypeAt(i) != ObjectPool::EntryType::kTaggedObject) {
continue;
}
entry_ = pool.ObjectAt(i);
if (entry_.IsICData()) {
// The only IC calls generated by precompilation are for switchable
@ -2372,12 +2375,12 @@ bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
ASSERT(!FLAG_use_bare_instructions || precompiler_ != nullptr);
ObjectPoolWrapper object_pool;
ObjectPoolWrapper* active_object_pool_wrapper =
ObjectPoolBuilder object_pool;
ObjectPoolBuilder* active_object_pool_builder =
FLAG_use_bare_instructions
? precompiler_->global_object_pool_wrapper()
? precompiler_->global_object_pool_builder()
: &object_pool;
Assembler assembler(active_object_pool_wrapper, use_far_branches);
Assembler assembler(active_object_pool_builder, use_far_branches);
CodeStatistics* function_stats = NULL;
if (FLAG_print_instruction_stats) {

View file

@ -247,9 +247,9 @@ class Precompiler : public ValueObject {
return get_runtime_type_is_unique_;
}
ObjectPoolWrapper* global_object_pool_wrapper() {
compiler::ObjectPoolBuilder* global_object_pool_builder() {
ASSERT(FLAG_use_bare_instructions);
return &global_object_pool_wrapper_;
return &global_object_pool_builder_;
}
static Precompiler* Instance() { return singleton_; }
@ -331,7 +331,7 @@ class Precompiler : public ValueObject {
intptr_t dropped_type_count_;
intptr_t dropped_library_count_;
ObjectPoolWrapper global_object_pool_wrapper_;
compiler::ObjectPoolBuilder global_object_pool_builder_;
GrowableObjectArray& libraries_;
const GrowableObjectArray& pending_functions_;
SymbolSet sent_selectors_;

View file

@ -28,6 +28,8 @@ DEFINE_FLAG(bool,
DEFINE_FLAG(bool, use_far_branches, false, "Enable far branches for ARM.");
#endif
namespace compiler {
static uword NewContents(intptr_t capacity) {
Zone* zone = Thread::Current()->zone();
uword result = zone->AllocUnsafe(capacity);
@ -163,14 +165,16 @@ intptr_t AssemblerBuffer::CountPointerOffsets() const {
return count;
}
#if defined(TARGET_ARCH_IA32)
void AssemblerBuffer::EmitObject(const Object& object) {
// Since we are going to store the handle as part of the fixup information
// the handle needs to be a zone handle.
ASSERT(object.IsNotTemporaryScopedHandle());
ASSERT(object.IsOld());
ASSERT(IsNotTemporaryScopedHandle(object));
ASSERT(IsInOldSpace(object));
EmitFixup(new PatchCodeWithHandle(pointer_offsets_, object));
cursor_ += kWordSize; // Reserve space for pointer.
cursor_ += target::kWordSize; // Reserve space for pointer.
}
#endif
// Shared macros are implemented here.
void AssemblerBase::Unimplemented(const char* message) {
@ -207,8 +211,7 @@ void AssemblerBase::Comment(const char* format, ...) {
va_end(args);
comments_.Add(
new CodeComment(buffer_.GetPosition(),
String::ZoneHandle(String::New(buffer, Heap::kOld))));
new CodeComment(buffer_.GetPosition(), AllocateString(buffer)));
}
}
@ -216,46 +219,27 @@ bool AssemblerBase::EmittingComments() {
return FLAG_code_comments || FLAG_disassemble || FLAG_disassemble_optimized;
}
const Code::Comments& AssemblerBase::GetCodeComments() const {
Code::Comments& comments = Code::Comments::New(comments_.length());
for (intptr_t i = 0; i < comments_.length(); i++) {
comments.SetPCOffsetAt(i, comments_[i]->pc_offset());
comments.SetCommentAt(i, comments_[i]->comment());
}
return comments;
#if !defined(TARGET_ARCH_DBC)
void Assembler::Stop(const char* message) {
Comment("Stop: %s", message);
Breakpoint();
}
#endif
intptr_t ObjIndexPair::Hashcode(Key key) {
if (key.type() != ObjectPool::kTaggedObject) {
if (key.type() != ObjectPoolBuilderEntry::kTaggedObject) {
return key.raw_value_;
}
if (key.obj_->IsNull()) {
return 2011;
}
if (key.obj_->IsString() || key.obj_->IsNumber()) {
return Instance::Cast(*key.obj_).CanonicalizeHash();
}
if (key.obj_->IsCode()) {
// Instructions don't move during compaction.
return Code::Cast(*key.obj_).PayloadStart();
}
if (key.obj_->IsFunction()) {
return Function::Cast(*key.obj_).Hash();
}
if (key.obj_->IsField()) {
return String::HashRawSymbol(Field::Cast(*key.obj_).name());
}
// Unlikely.
return key.obj_->GetClassId();
return ObjectHash(*key.obj_);
}
void ObjectPoolWrapper::Reset() {
void ObjectPoolBuilder::Reset() {
// Null out the handles we've accumulated.
for (intptr_t i = 0; i < object_pool_.length(); ++i) {
if (object_pool_[i].type() == ObjectPool::kTaggedObject) {
*const_cast<Object*>(object_pool_[i].obj_) = Object::null();
*const_cast<Object*>(object_pool_[i].equivalence_) = Object::null();
if (object_pool_[i].type() == ObjectPoolBuilderEntry::kTaggedObject) {
SetToNull(const_cast<Object*>(object_pool_[i].obj_));
SetToNull(const_cast<Object*>(object_pool_[i].equivalence_));
}
}
@ -263,65 +247,38 @@ void ObjectPoolWrapper::Reset() {
object_pool_index_table_.Clear();
}
void ObjectPoolWrapper::InitializeFrom(const ObjectPool& other) {
ASSERT(object_pool_.length() == 0);
for (intptr_t i = 0; i < other.Length(); i++) {
auto type = other.TypeAt(i);
auto patchable = other.PatchableAt(i);
switch (type) {
case ObjectPool::kTaggedObject: {
ObjectPoolWrapperEntry entry(&Object::ZoneHandle(other.ObjectAt(i)),
patchable);
AddObject(entry);
break;
}
case ObjectPool::kImmediate:
case ObjectPool::kNativeFunction:
case ObjectPool::kNativeFunctionWrapper: {
ObjectPoolWrapperEntry entry(other.RawValueAt(i), type, patchable);
AddObject(entry);
break;
}
default:
UNREACHABLE();
}
}
ASSERT(CurrentLength() == other.Length());
intptr_t ObjectPoolBuilder::AddObject(
const Object& obj,
ObjectPoolBuilderEntry::Patchability patchable) {
ASSERT(IsNotTemporaryScopedHandle(obj));
return AddObject(ObjectPoolBuilderEntry(&obj, patchable));
}
intptr_t ObjectPoolWrapper::AddObject(const Object& obj,
ObjectPool::Patchability patchable) {
ASSERT(obj.IsNotTemporaryScopedHandle());
return AddObject(ObjectPoolWrapperEntry(&obj, patchable));
intptr_t ObjectPoolBuilder::AddImmediate(uword imm) {
return AddObject(
ObjectPoolBuilderEntry(imm, ObjectPoolBuilderEntry::kImmediate,
ObjectPoolBuilderEntry::kNotPatchable));
}
intptr_t ObjectPoolWrapper::AddImmediate(uword imm) {
return AddObject(ObjectPoolWrapperEntry(imm, ObjectPool::kImmediate,
ObjectPool::kNotPatchable));
}
intptr_t ObjectPoolWrapper::AddObject(ObjectPoolWrapperEntry entry) {
ASSERT((entry.type() != ObjectPool::kTaggedObject) ||
(entry.obj_->IsNotTemporaryScopedHandle() &&
intptr_t ObjectPoolBuilder::AddObject(ObjectPoolBuilderEntry entry) {
ASSERT((entry.type() != ObjectPoolBuilderEntry::kTaggedObject) ||
(IsNotTemporaryScopedHandle(*entry.obj_) &&
(entry.equivalence_ == NULL ||
entry.equivalence_->IsNotTemporaryScopedHandle())));
IsNotTemporaryScopedHandle(*entry.equivalence_))));
if (entry.type() == ObjectPool::kTaggedObject) {
if (entry.type() == ObjectPoolBuilderEntry::kTaggedObject) {
// If the owner of the object pool wrapper specified a specific zone we
// shoulld use we'll do so.
if (zone_ != NULL) {
entry.obj_ = &Object::ZoneHandle(zone_, entry.obj_->raw());
entry.obj_ = &NewZoneHandle(zone_, *entry.obj_);
if (entry.equivalence_ != NULL) {
entry.equivalence_ =
&Object::ZoneHandle(zone_, entry.equivalence_->raw());
entry.equivalence_ = &NewZoneHandle(zone_, *entry.equivalence_);
}
}
}
object_pool_.Add(entry);
if (entry.patchable() == ObjectPool::kNotPatchable) {
if (entry.patchable() == ObjectPoolBuilderEntry::kNotPatchable) {
// The object isn't patchable. Record the index for fast lookup.
object_pool_index_table_.Insert(
ObjIndexPair(entry, object_pool_.length() - 1));
@ -329,10 +286,10 @@ intptr_t ObjectPoolWrapper::AddObject(ObjectPoolWrapperEntry entry) {
return object_pool_.length() - 1;
}
intptr_t ObjectPoolWrapper::FindObject(ObjectPoolWrapperEntry entry) {
intptr_t ObjectPoolBuilder::FindObject(ObjectPoolBuilderEntry entry) {
// If the object is not patchable, check if we've already got it in the
// object pool.
if (entry.patchable() == ObjectPool::kNotPatchable) {
if (entry.patchable() == ObjectPoolBuilderEntry::kNotPatchable) {
intptr_t idx = object_pool_index_table_.LookupValue(entry);
if (idx != ObjIndexPair::kNoIndex) {
return idx;
@ -341,54 +298,40 @@ intptr_t ObjectPoolWrapper::FindObject(ObjectPoolWrapperEntry entry) {
return AddObject(entry);
}
intptr_t ObjectPoolWrapper::FindObject(const Object& obj,
ObjectPool::Patchability patchable) {
return FindObject(ObjectPoolWrapperEntry(&obj, patchable));
intptr_t ObjectPoolBuilder::FindObject(
const Object& obj,
ObjectPoolBuilderEntry::Patchability patchable) {
return FindObject(ObjectPoolBuilderEntry(&obj, patchable));
}
intptr_t ObjectPoolWrapper::FindObject(const Object& obj,
intptr_t ObjectPoolBuilder::FindObject(const Object& obj,
const Object& equivalence) {
return FindObject(ObjectPoolBuilderEntry(
&obj, &equivalence, ObjectPoolBuilderEntry::kNotPatchable));
}
intptr_t ObjectPoolBuilder::FindImmediate(uword imm) {
return FindObject(
ObjectPoolWrapperEntry(&obj, &equivalence, ObjectPool::kNotPatchable));
ObjectPoolBuilderEntry(imm, ObjectPoolBuilderEntry::kImmediate,
ObjectPoolBuilderEntry::kNotPatchable));
}
intptr_t ObjectPoolWrapper::FindImmediate(uword imm) {
return FindObject(ObjectPoolWrapperEntry(imm, ObjectPool::kImmediate,
ObjectPool::kNotPatchable));
}
intptr_t ObjectPoolWrapper::FindNativeFunction(
intptr_t ObjectPoolBuilder::FindNativeFunction(
const ExternalLabel* label,
ObjectPool::Patchability patchable) {
return FindObject(ObjectPoolWrapperEntry(
label->address(), ObjectPool::kNativeFunction, patchable));
ObjectPoolBuilderEntry::Patchability patchable) {
return FindObject(ObjectPoolBuilderEntry(
label->address(), ObjectPoolBuilderEntry::kNativeFunction, patchable));
}
intptr_t ObjectPoolWrapper::FindNativeFunctionWrapper(
intptr_t ObjectPoolBuilder::FindNativeFunctionWrapper(
const ExternalLabel* label,
ObjectPool::Patchability patchable) {
return FindObject(ObjectPoolWrapperEntry(
label->address(), ObjectPool::kNativeFunctionWrapper, patchable));
ObjectPoolBuilderEntry::Patchability patchable) {
return FindObject(ObjectPoolBuilderEntry(
label->address(), ObjectPoolBuilderEntry::kNativeFunctionWrapper,
patchable));
}
RawObjectPool* ObjectPoolWrapper::MakeObjectPool() {
intptr_t len = object_pool_.length();
if (len == 0) {
return Object::empty_object_pool().raw();
}
const ObjectPool& result = ObjectPool::Handle(ObjectPool::New(len));
for (intptr_t i = 0; i < len; ++i) {
auto type = object_pool_[i].type();
auto patchable = object_pool_[i].patchable();
result.SetTypeAt(i, type, patchable);
if (type == ObjectPool::kTaggedObject) {
result.SetObjectAt(i, *object_pool_[i].obj_);
} else {
result.SetRawValueAt(i, object_pool_[i].raw_value_);
}
}
return result.raw();
}
} // namespace compiler
} // namespace dart

View file

@ -7,11 +7,11 @@
#include "platform/assert.h"
#include "vm/allocation.h"
#include "vm/compiler/assembler/object_pool_builder.h"
#include "vm/compiler/runtime_api.h"
#include "vm/globals.h"
#include "vm/growable_array.h"
#include "vm/hash_map.h"
#include "vm/object.h"
#include "vm/thread.h"
namespace dart {
@ -19,11 +19,14 @@ namespace dart {
DECLARE_FLAG(bool, use_far_branches);
#endif
class MemoryRegion;
namespace compiler {
// Forward declarations.
class Assembler;
class AssemblerFixup;
class AssemblerBuffer;
class MemoryRegion;
class Label : public ZoneAllocated {
public:
@ -45,12 +48,12 @@ class Label : public ZoneAllocated {
// for unused labels.
intptr_t Position() const {
ASSERT(!IsUnused());
return IsBound() ? -position_ - kWordSize : position_ - kWordSize;
return IsBound() ? -position_ - kBias : position_ - kBias;
}
intptr_t LinkPosition() const {
ASSERT(IsLinked());
return position_ - kWordSize;
return position_ - kBias;
}
intptr_t NearPosition() {
@ -69,6 +72,12 @@ class Label : public ZoneAllocated {
#else
static const int kMaxUnresolvedBranches = 1; // Unused on non-Intel.
#endif
// Zero position_ means unused (neither bound nor linked to).
// Thus we offset actual positions by the given bias to prevent zero
// positions from occurring.
// Note: we use target::kWordSize as a bias because on ARM
// there are assertions that check that distance is aligned.
static constexpr int kBias = 4;
intptr_t position_;
intptr_t unresolved_;
@ -79,13 +88,13 @@ class Label : public ZoneAllocated {
void BindTo(intptr_t position) {
ASSERT(!IsBound());
ASSERT(!HasNear());
position_ = -position - kWordSize;
position_ = -position - kBias;
ASSERT(IsBound());
}
void LinkTo(intptr_t position) {
ASSERT(!IsBound());
position_ = position + kWordSize;
position_ = position + kBias;
ASSERT(IsLinked());
}
@ -185,8 +194,10 @@ class AssemblerBuffer : public ValueObject {
return *pointer_offsets_;
}
#if defined(TARGET_ARCH_IA32)
// Emit an object pointer directly in the code.
void EmitObject(const Object& object);
#endif
// Emit a fixup at the current location.
void EmitFixup(AssemblerFixup* fixup) {
@ -285,173 +296,23 @@ class AssemblerBuffer : public ValueObject {
friend class AssemblerFixup;
};
struct ObjectPoolWrapperEntry {
ObjectPoolWrapperEntry() : raw_value_(), entry_bits_(0), equivalence_() {}
ObjectPoolWrapperEntry(const Object* obj, ObjectPool::Patchability patchable)
: obj_(obj),
entry_bits_(ObjectPool::TypeBits::encode(ObjectPool::kTaggedObject) |
ObjectPool::PatchableBit::encode(patchable)),
equivalence_(obj) {}
ObjectPoolWrapperEntry(const Object* obj,
const Object* eqv,
ObjectPool::Patchability patchable)
: obj_(obj),
entry_bits_(ObjectPool::TypeBits::encode(ObjectPool::kTaggedObject) |
ObjectPool::PatchableBit::encode(patchable)),
equivalence_(eqv) {}
ObjectPoolWrapperEntry(uword value,
ObjectPool::EntryType info,
ObjectPool::Patchability patchable)
: raw_value_(value),
entry_bits_(ObjectPool::TypeBits::encode(info) |
ObjectPool::PatchableBit::encode(patchable)),
equivalence_() {}
ObjectPool::EntryType type() const {
return ObjectPool::TypeBits::decode(entry_bits_);
}
ObjectPool::Patchability patchable() const {
return ObjectPool::PatchableBit::decode(entry_bits_);
}
union {
const Object* obj_;
uword raw_value_;
};
uint8_t entry_bits_;
const Object* equivalence_;
};
// Pair type parameter for DirectChainedHashMap used for the constant pool.
class ObjIndexPair {
public:
// Typedefs needed for the DirectChainedHashMap template.
typedef ObjectPoolWrapperEntry Key;
typedef intptr_t Value;
typedef ObjIndexPair Pair;
static const intptr_t kNoIndex = -1;
ObjIndexPair()
: key_(static_cast<uword>(NULL),
ObjectPool::kTaggedObject,
ObjectPool::kPatchable),
value_(kNoIndex) {}
ObjIndexPair(Key key, Value value) : value_(value) {
key_.entry_bits_ = key.entry_bits_;
if (key.type() == ObjectPool::kTaggedObject) {
key_.obj_ = key.obj_;
key_.equivalence_ = key.equivalence_;
} else {
key_.raw_value_ = key.raw_value_;
}
}
static Key KeyOf(Pair kv) { return kv.key_; }
static Value ValueOf(Pair kv) { return kv.value_; }
static intptr_t Hashcode(Key key);
static inline bool IsKeyEqual(Pair kv, Key key) {
if (kv.key_.entry_bits_ != key.entry_bits_) return false;
if (kv.key_.type() == ObjectPool::kTaggedObject) {
return (kv.key_.obj_->raw() == key.obj_->raw()) &&
(kv.key_.equivalence_->raw() == key.equivalence_->raw());
}
return kv.key_.raw_value_ == key.raw_value_;
}
private:
Key key_;
Value value_;
};
class ObjectPoolWrapper : public ValueObject {
public:
ObjectPoolWrapper() : zone_(nullptr) {}
~ObjectPoolWrapper() {
if (zone_ != nullptr) {
Reset();
zone_ = nullptr;
}
}
// Clears all existing entries in this object pool builder.
//
// Note: Any code which has been compiled via this builder might use offsets
// into the pool which are not correct anymore.
void Reset();
// Initializes this object pool builder from [other].
//
// All entries from [other] will be populated, including their
// kind/patchability bits.
void InitializeFrom(const ObjectPool& other);
// Initialize this object pool builder with a [zone].
//
// Any objects added later on will be referenced using handles from [zone].
void InitializeWithZone(Zone* zone) {
ASSERT(object_pool_.length() == 0);
ASSERT(zone_ == nullptr && zone != nullptr);
zone_ = zone;
}
intptr_t AddObject(
const Object& obj,
ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
intptr_t AddImmediate(uword imm);
intptr_t FindObject(
const Object& obj,
ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
intptr_t FindObject(const Object& obj, const Object& equivalence);
intptr_t FindImmediate(uword imm);
intptr_t FindNativeFunction(const ExternalLabel* label,
ObjectPool::Patchability patchable);
intptr_t FindNativeFunctionWrapper(const ExternalLabel* label,
ObjectPool::Patchability patchable);
RawObjectPool* MakeObjectPool();
intptr_t CurrentLength() { return object_pool_.length(); }
ObjectPoolWrapperEntry& EntryAt(intptr_t i) { return object_pool_[i]; }
private:
intptr_t AddObject(ObjectPoolWrapperEntry entry);
intptr_t FindObject(ObjectPoolWrapperEntry entry);
// Objects and jump targets.
GrowableArray<ObjectPoolWrapperEntry> object_pool_;
// Hashmap for fast lookup in object pool.
DirectChainedHashMap<ObjIndexPair> object_pool_index_table_;
// The zone used for allocating the handles we keep in the map and array (or
// NULL, in which case allocations happen using the zone active at the point
// of insertion).
Zone* zone_;
};
enum RestorePP { kRestoreCallerPP, kKeepCalleePP };
class AssemblerBase : public StackResource {
public:
explicit AssemblerBase(ObjectPoolWrapper* object_pool_wrapper)
: StackResource(Thread::Current()),
explicit AssemblerBase(ObjectPoolBuilder* object_pool_builder)
: StackResource(ThreadState::Current()),
prologue_offset_(-1),
has_single_entry_point_(true),
object_pool_wrapper_(object_pool_wrapper) {}
object_pool_builder_(object_pool_builder) {}
virtual ~AssemblerBase() {}
intptr_t CodeSize() const { return buffer_.Size(); }
uword CodeAddress(intptr_t offset) { return buffer_.Address(offset); }
ObjectPoolWrapper& object_pool_wrapper() { return *object_pool_wrapper_; }
bool HasObjectPoolBuilder() const { return object_pool_builder_ != nullptr; }
ObjectPoolBuilder& object_pool_builder() { return *object_pool_builder_; }
intptr_t prologue_offset() const { return prologue_offset_; }
bool has_single_entry_point() const { return has_single_entry_point_; }
@ -459,8 +320,6 @@ class AssemblerBase : public StackResource {
void Comment(const char* format, ...) PRINTF_ATTRIBUTE(2, 3);
static bool EmittingComments();
const Code::Comments& GetCodeComments() const;
void Unimplemented(const char* message);
void Untested(const char* message);
void Unreachable(const char* message);
@ -478,19 +337,6 @@ class AssemblerBase : public StackResource {
return buffer_.pointer_offsets();
}
RawObjectPool* MakeObjectPool() {
if (object_pool_wrapper_ != nullptr) {
return object_pool_wrapper_->MakeObjectPool();
}
return ObjectPool::null();
}
protected:
AssemblerBuffer buffer_; // Contains position independent code.
int32_t prologue_offset_;
bool has_single_entry_point_;
private:
class CodeComment : public ZoneAllocated {
public:
CodeComment(intptr_t pc_offset, const String& comment)
@ -506,10 +352,20 @@ class AssemblerBase : public StackResource {
DISALLOW_COPY_AND_ASSIGN(CodeComment);
};
const GrowableArray<CodeComment*>& comments() const { return comments_; }
protected:
AssemblerBuffer buffer_; // Contains position independent code.
int32_t prologue_offset_;
bool has_single_entry_point_;
private:
GrowableArray<CodeComment*> comments_;
ObjectPoolWrapper* object_pool_wrapper_;
ObjectPoolBuilder* object_pool_builder_;
};
} // namespace compiler
} // namespace dart
#if defined(TARGET_ARCH_IA32)
@ -526,4 +382,11 @@ class AssemblerBase : public StackResource {
#error Unknown architecture.
#endif
namespace dart {
using compiler::Assembler;
using compiler::ExternalLabel;
using compiler::Label;
using compiler::ObjectPoolBuilder;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_

View file

@ -5,15 +5,13 @@
#include "vm/globals.h" // NOLINT
#if defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)
#define SHOULD_NOT_INCLUDE_RUNTIME
#include "vm/class_id.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/locations.h"
#include "vm/cpu.h"
#include "vm/instructions.h"
#include "vm/longjump.h"
#include "vm/runtime_entry.h"
#include "vm/simulator.h"
#include "vm/stack_frame.h"
#include "vm/stub_code.h"
// An extra check since we are assuming the existence of /proc/cpuinfo below.
#if !defined(USING_SIMULATOR) && !defined(__linux__) && !defined(ANDROID) && \
@ -27,9 +25,26 @@ DECLARE_FLAG(bool, check_code_pointer);
DECLARE_FLAG(bool, inline_alloc);
DECLARE_FLAG(bool, precompiled_mode);
Assembler::Assembler(ObjectPoolWrapper* object_pool_wrapper,
namespace compiler {
#ifndef PRODUCT
using target::ClassHeapStats;
#endif
using target::ClassTable;
using target::Double;
using target::Float32x4;
using target::Float64x2;
using target::Heap;
using target::Instance;
using target::Instructions;
using target::Isolate;
using target::ObjectPool;
using target::RawObject;
using target::Thread;
Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
bool use_far_branches)
: AssemblerBase(object_pool_wrapper),
: AssemblerBase(object_pool_builder),
use_far_branches_(use_far_branches),
constant_pool_allowed_(false) {
generate_invoke_write_barrier_wrapper_ = [&](Condition cond, Register reg) {
@ -37,7 +52,7 @@ Assembler::Assembler(ObjectPoolWrapper* object_pool_wrapper,
cond);
blx(LR, cond);
};
invoke_array_write_barrier_ = [&](Condition cond) {
generate_invoke_array_write_barrier_ = [&](Condition cond) {
ldr(LR, Address(THR, Thread::array_write_barrier_entry_point_offset()),
cond);
blx(LR, cond);
@ -469,7 +484,7 @@ void Assembler::ldrd(Register rd,
ASSERT(rd2 == rd + 1);
if (TargetCPUFeatures::arm_version() == ARMv5TE) {
ldr(rd, Address(rn, offset), cond);
ldr(rd2, Address(rn, offset + kWordSize), cond);
ldr(rd2, Address(rn, offset + target::kWordSize), cond);
} else {
EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, Address(rn, offset));
}
@ -484,7 +499,7 @@ void Assembler::strd(Register rd,
ASSERT(rd2 == rd + 1);
if (TargetCPUFeatures::arm_version() == ARMv5TE) {
str(rd, Address(rn, offset), cond);
str(rd2, Address(rn, offset + kWordSize), cond);
str(rd2, Address(rn, offset + target::kWordSize), cond);
} else {
EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, Address(rn, offset));
}
@ -1391,12 +1406,12 @@ void Assembler::MarkExceptionHandler(Label* label) {
void Assembler::Drop(intptr_t stack_elements) {
ASSERT(stack_elements >= 0);
if (stack_elements > 0) {
AddImmediate(SP, stack_elements * kWordSize);
AddImmediate(SP, stack_elements * target::kWordSize);
}
}
intptr_t Assembler::FindImmediate(int32_t imm) {
return object_pool_wrapper().FindImmediate(imm);
return object_pool_builder().FindImmediate(imm);
}
// Uses a code sequence that can easily be decoded.
@ -1442,7 +1457,7 @@ void Assembler::CheckCodePointer() {
Instructions::HeaderSize() - kHeapObjectTag;
mov(R0, Operand(PC));
AddImmediate(R0, -offset);
ldr(IP, FieldAddress(CODE_REG, Code::saved_instructions_offset()));
ldr(IP, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
cmp(R0, Operand(IP));
b(&instructions_ok, EQ);
bkpt(1);
@ -1453,14 +1468,15 @@ void Assembler::CheckCodePointer() {
}
void Assembler::RestoreCodePointer() {
ldr(CODE_REG, Address(FP, compiler_frame_layout.code_from_fp * kWordSize));
ldr(CODE_REG,
Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
CheckCodePointer();
}
void Assembler::LoadPoolPointer(Register reg) {
// Load new pool pointer.
CheckCodePointer();
ldr(reg, FieldAddress(CODE_REG, Code::object_pool_offset()));
ldr(reg, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
set_constant_pool_allowed(reg == PP);
}
@ -1469,15 +1485,14 @@ void Assembler::LoadIsolate(Register rd) {
}
bool Assembler::CanLoadFromObjectPool(const Object& object) const {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
ASSERT(!Thread::CanLoadFromThread(object));
ASSERT(IsOriginalObject(object));
ASSERT(!target::CanLoadFromThread(object));
if (!constant_pool_allowed()) {
return false;
}
ASSERT(object.IsNotTemporaryScopedHandle());
ASSERT(object.IsOld());
ASSERT(IsNotTemporaryScopedHandle(object));
ASSERT(IsInOldSpace(object));
return true;
}
@ -1486,21 +1501,21 @@ void Assembler::LoadObjectHelper(Register rd,
Condition cond,
bool is_unique,
Register pp) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
if (Thread::CanLoadFromThread(object)) {
ASSERT(IsOriginalObject(object));
intptr_t offset = 0;
if (target::CanLoadFromThread(object, &offset)) {
// Load common VM constants from the thread. This works also in places where
// no constant pool is set up (e.g. intrinsic code).
ldr(rd, Address(THR, Thread::OffsetFromThread(object)), cond);
} else if (object.IsSmi()) {
ldr(rd, Address(THR, offset), cond);
} else if (target::IsSmi(object)) {
// Relocation doesn't apply to Smis.
LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw()), cond);
LoadImmediate(rd, target::ToRawSmi(object), cond);
} else if (CanLoadFromObjectPool(object)) {
// Make sure that class CallPattern is able to decode this load from the
// object pool.
const int32_t offset = ObjectPool::element_offset(
is_unique ? object_pool_wrapper().AddObject(object)
: object_pool_wrapper().FindObject(object));
is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object));
LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond);
} else {
UNREACHABLE();
@ -1520,33 +1535,31 @@ void Assembler::LoadUniqueObject(Register rd,
void Assembler::LoadFunctionFromCalleePool(Register dst,
const Function& function,
Register new_pp) {
const int32_t offset =
ObjectPool::element_offset(object_pool_wrapper().FindObject(function));
const int32_t offset = ObjectPool::element_offset(
object_pool_builder().FindObject(ToObject(function)));
LoadWordFromPoolOffset(dst, offset - kHeapObjectTag, new_pp, AL);
}
void Assembler::LoadNativeEntry(Register rd,
const ExternalLabel* label,
ObjectPool::Patchability patchable,
ObjectPoolBuilderEntry::Patchability patchable,
Condition cond) {
const int32_t offset = ObjectPool::element_offset(
object_pool_wrapper().FindNativeFunction(label, patchable));
object_pool_builder().FindNativeFunction(label, patchable));
LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond);
}
void Assembler::PushObject(const Object& object) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
ASSERT(IsOriginalObject(object));
LoadObject(IP, object);
Push(IP);
}
void Assembler::CompareObject(Register rn, const Object& object) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
ASSERT(IsOriginalObject(object));
ASSERT(rn != IP);
if (object.IsSmi()) {
CompareImmediate(rn, reinterpret_cast<int32_t>(object.raw()));
if (target::IsSmi(object)) {
CompareImmediate(rn, target::ToRawSmi(object));
} else {
LoadObject(IP, object);
cmp(rn, Operand(IP));
@ -1559,12 +1572,15 @@ void Assembler::StoreIntoObjectFilter(Register object,
Label* label,
CanBeSmi value_can_be_smi,
BarrierFilterMode how_to_jump) {
COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
(kOldObjectAlignmentOffset == 0));
COMPILE_ASSERT((target::ObjectAlignment::kNewObjectAlignmentOffset ==
target::kWordSize) &&
(target::ObjectAlignment::kOldObjectAlignmentOffset == 0));
// For the value we are only interested in the new/old bit and the tag bit.
// And the new bit with the tag bit. The resulting bit will be 0 for a Smi.
if (value_can_be_smi == kValueCanBeSmi) {
and_(IP, value, Operand(value, LSL, kObjectAlignmentLog2 - 1));
and_(
IP, value,
Operand(value, LSL, target::ObjectAlignment::kObjectAlignmentLog2 - 1));
// And the result with the negated space bit of the object.
bic(IP, IP, Operand(object));
} else {
@ -1576,7 +1592,7 @@ void Assembler::StoreIntoObjectFilter(Register object,
#endif
bic(IP, value, Operand(object));
}
tst(IP, Operand(kNewObjectAlignmentOffset));
tst(IP, Operand(target::ObjectAlignment::kNewObjectAlignmentOffset));
if (how_to_jump != kNoJump) {
b(label, how_to_jump == kJumpToNoUpdate ? EQ : NE);
}
@ -1627,8 +1643,8 @@ void Assembler::StoreIntoObject(Register object,
BranchIfSmi(value, &done);
}
if (!lr_reserved) Push(LR);
ldrb(TMP, FieldAddress(object, Object::tags_offset()));
ldrb(LR, FieldAddress(value, Object::tags_offset()));
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
and_(TMP, LR, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
ldr(LR, Address(THR, Thread::write_barrier_mask_offset()));
tst(TMP, Operand(LR));
@ -1691,8 +1707,8 @@ void Assembler::StoreIntoArray(Register object,
BranchIfSmi(value, &done);
}
if (!lr_reserved) Push(LR);
ldrb(TMP, FieldAddress(object, Object::tags_offset()));
ldrb(LR, FieldAddress(value, Object::tags_offset()));
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
and_(TMP, LR, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
ldr(LR, Address(THR, Thread::write_barrier_mask_offset()));
tst(TMP, Operand(LR));
@ -1704,7 +1720,7 @@ void Assembler::StoreIntoArray(Register object,
// allocator.
UNIMPLEMENTED();
}
invoke_array_write_barrier_(NE);
generate_invoke_array_write_barrier_(NE);
if (!lr_reserved) Pop(LR);
Bind(&done);
}
@ -1740,10 +1756,8 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
void Assembler::StoreIntoObjectNoBarrier(Register object,
const Address& dest,
const Object& value) {
ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
ASSERT(value.IsSmi() || value.InVMHeap() ||
(value.IsOld() && value.IsNotTemporaryScopedHandle()));
ASSERT(IsOriginalObject(value));
ASSERT(IsNotTemporaryScopedHandle(value));
// No store buffer update.
LoadObject(IP, value);
str(IP, dest);
@ -1767,8 +1781,7 @@ void Assembler::StoreIntoObjectNoBarrierOffset(Register object,
void Assembler::StoreIntoObjectNoBarrierOffset(Register object,
int32_t offset,
const Object& value) {
ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
ASSERT(IsOriginalObject(value));
int32_t ignored = 0;
if (Address::CanHoldStoreOffset(kWord, offset - kHeapObjectTag, &ignored)) {
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
@ -1789,11 +1802,11 @@ void Assembler::InitializeFieldsNoBarrier(Register object,
ASSERT(value_odd == value_even + 1);
Label init_loop;
Bind(&init_loop);
AddImmediate(begin, 2 * kWordSize);
AddImmediate(begin, 2 * target::kWordSize);
cmp(begin, Operand(end));
strd(value_even, value_odd, begin, -2 * kWordSize, LS);
strd(value_even, value_odd, begin, -2 * target::kWordSize, LS);
b(&init_loop, CC);
str(value_even, Address(begin, -2 * kWordSize), HI);
str(value_even, Address(begin, -2 * target::kWordSize), HI);
#if defined(DEBUG)
Label done;
StoreIntoObjectFilter(object, value_even, &done, kValueCanBeSmi,
@ -1814,13 +1827,13 @@ void Assembler::InitializeFieldsNoBarrierUnrolled(Register object,
Register value_odd) {
ASSERT(value_odd == value_even + 1);
intptr_t current_offset = begin_offset;
while (current_offset + kWordSize < end_offset) {
while (current_offset + target::kWordSize < end_offset) {
strd(value_even, value_odd, base, current_offset);
current_offset += 2 * kWordSize;
current_offset += 2 * target::kWordSize;
}
while (current_offset < end_offset) {
str(value_even, Address(base, current_offset));
current_offset += kWordSize;
current_offset += target::kWordSize;
}
#if defined(DEBUG)
Label done;
@ -1849,7 +1862,7 @@ void Assembler::LoadClassId(Register result, Register object, Condition cond) {
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
ldrh(result, FieldAddress(object, class_id_offset), cond);
}
@ -1859,7 +1872,7 @@ void Assembler::LoadClassById(Register result, Register class_id) {
const intptr_t offset =
Isolate::class_table_offset() + ClassTable::table_offset();
LoadFromOffset(kWord, result, result, offset);
ldr(result, Address(result, class_id, LSL, kSizeOfClassPairLog2));
ldr(result, Address(result, class_id, LSL, ClassTable::kSizeOfClassPairLog2));
}
void Assembler::CompareClassId(Register object,
@ -1883,7 +1896,7 @@ void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
void Assembler::BailoutIfInvalidBranchOffset(int32_t offset) {
if (!CanEncodeBranchDistance(offset)) {
ASSERT(!use_far_branches());
Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error());
BailoutWithBranchOffsetError();
}
}
@ -2317,11 +2330,11 @@ bool Address::CanHoldImmediateOffset(bool is_load,
}
void Assembler::Push(Register rd, Condition cond) {
str(rd, Address(SP, -kWordSize, Address::PreIndex), cond);
str(rd, Address(SP, -target::kWordSize, Address::PreIndex), cond);
}
void Assembler::Pop(Register rd, Condition cond) {
ldr(rd, Address(SP, kWordSize, Address::PostIndex), cond);
ldr(rd, Address(SP, target::kWordSize, Address::PostIndex), cond);
}
void Assembler::PushList(RegList regs, Condition cond) {
@ -2538,13 +2551,13 @@ void Assembler::Vdivqs(QRegister qd, QRegister qn, QRegister qm) {
}
void Assembler::Branch(const Code& target,
ObjectPool::Patchability patchable,
ObjectPoolBuilderEntry::Patchability patchable,
Register pp,
Condition cond) {
const int32_t offset = ObjectPool::element_offset(
object_pool_wrapper().FindObject(target, patchable));
object_pool_builder().FindObject(ToObject(target), patchable));
LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp, cond);
Branch(FieldAddress(CODE_REG, Code::entry_point_offset()), cond);
Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()), cond);
}
void Assembler::Branch(const Address& address, Condition cond) {
@ -2552,22 +2565,22 @@ void Assembler::Branch(const Address& address, Condition cond) {
}
void Assembler::BranchLink(const Code& target,
ObjectPool::Patchability patchable,
Code::EntryKind entry_kind) {
ObjectPoolBuilderEntry::Patchability patchable,
CodeEntryKind entry_kind) {
// Make sure that class CallPattern is able to patch the label referred
// to by this code sequence.
// For added code robustness, use 'blx lr' in a patchable sequence and
// use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
const int32_t offset = ObjectPool::element_offset(
object_pool_wrapper().FindObject(target, patchable));
object_pool_builder().FindObject(ToObject(target), patchable));
LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL);
ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset(entry_kind)));
ldr(LR, FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
blx(LR); // Use blx instruction so that the return branch prediction works.
}
void Assembler::BranchLinkPatchable(const Code& target,
Code::EntryKind entry_kind) {
BranchLink(target, ObjectPool::kPatchable, entry_kind);
CodeEntryKind entry_kind) {
BranchLink(target, ObjectPoolBuilderEntry::kPatchable, entry_kind);
}
void Assembler::BranchLinkToRuntime() {
@ -2586,15 +2599,15 @@ void Assembler::CallNullErrorShared(bool save_fpu_registers) {
void Assembler::BranchLinkWithEquivalence(const Code& target,
const Object& equivalence,
Code::EntryKind entry_kind) {
CodeEntryKind entry_kind) {
// Make sure that class CallPattern is able to patch the label referred
// to by this code sequence.
// For added code robustness, use 'blx lr' in a patchable sequence and
// use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
const int32_t offset = ObjectPool::element_offset(
object_pool_wrapper().FindObject(target, equivalence));
object_pool_builder().FindObject(ToObject(target), equivalence));
LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL);
ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset(entry_kind)));
ldr(LR, FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
blx(LR); // Use blx instruction so that the return branch prediction works.
}
@ -2838,10 +2851,10 @@ void Assembler::CopyDoubleField(Register dst,
} else {
LoadFromOffset(kWord, tmp1, src, Double::value_offset() - kHeapObjectTag);
LoadFromOffset(kWord, tmp2, src,
Double::value_offset() + kWordSize - kHeapObjectTag);
Double::value_offset() + target::kWordSize - kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst, Double::value_offset() - kHeapObjectTag);
StoreToOffset(kWord, tmp2, dst,
Double::value_offset() + kWordSize - kHeapObjectTag);
Double::value_offset() + target::kWordSize - kHeapObjectTag);
}
}
@ -2858,25 +2871,29 @@ void Assembler::CopyFloat32x4Field(Register dst,
} else {
LoadFromOffset(
kWord, tmp1, src,
(Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag);
(Float32x4::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(
kWord, tmp2, src,
(Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst,
(Float32x4::value_offset() + 0 * kWordSize) - kHeapObjectTag);
StoreToOffset(kWord, tmp2, dst,
(Float32x4::value_offset() + 1 * kWordSize) - kHeapObjectTag);
(Float32x4::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp1, dst,
(Float32x4::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp2, dst,
(Float32x4::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(
kWord, tmp1, src,
(Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag);
(Float32x4::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(
kWord, tmp2, src,
(Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst,
(Float32x4::value_offset() + 2 * kWordSize) - kHeapObjectTag);
StoreToOffset(kWord, tmp2, dst,
(Float32x4::value_offset() + 3 * kWordSize) - kHeapObjectTag);
(Float32x4::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp1, dst,
(Float32x4::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp2, dst,
(Float32x4::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
}
}
@ -2893,25 +2910,29 @@ void Assembler::CopyFloat64x2Field(Register dst,
} else {
LoadFromOffset(
kWord, tmp1, src,
(Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag);
(Float64x2::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(
kWord, tmp2, src,
(Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst,
(Float64x2::value_offset() + 0 * kWordSize) - kHeapObjectTag);
StoreToOffset(kWord, tmp2, dst,
(Float64x2::value_offset() + 1 * kWordSize) - kHeapObjectTag);
(Float64x2::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp1, dst,
(Float64x2::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp2, dst,
(Float64x2::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(
kWord, tmp1, src,
(Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag);
(Float64x2::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(
kWord, tmp2, src,
(Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst,
(Float64x2::value_offset() + 2 * kWordSize) - kHeapObjectTag);
StoreToOffset(kWord, tmp2, dst,
(Float64x2::value_offset() + 3 * kWordSize) - kHeapObjectTag);
(Float64x2::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp1, dst,
(Float64x2::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp2, dst,
(Float64x2::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
}
}
@ -3145,7 +3166,7 @@ void Assembler::LeaveCallRuntimeFrame() {
// kVolatileCpuRegCount +1 for PP, -1 because even though LR is volatile,
// it is pushed ahead of FP.
const intptr_t kPushedRegistersSize =
kDartVolatileCpuRegCount * kWordSize + kPushedFpuRegisterSize;
kDartVolatileCpuRegCount * target::kWordSize + kPushedFpuRegisterSize;
AddImmediate(SP, FP, -kPushedRegistersSize);
// Restore all volatile FPU registers.
@ -3208,8 +3229,8 @@ void Assembler::EnterOsrFrame(intptr_t extra_size) {
void Assembler::LeaveDartFrame() {
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
ldr(PP,
Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
target::kWordSize));
}
set_constant_pool_allowed(false);
@ -3220,8 +3241,8 @@ void Assembler::LeaveDartFrame() {
void Assembler::LeaveDartFrameAndReturn() {
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
ldr(PP,
Address(FP, compiler_frame_layout.saved_caller_pp_from_fp * kWordSize));
ldr(PP, Address(FP, target::frame_layout.saved_caller_pp_from_fp *
target::kWordSize));
}
set_constant_pool_allowed(false);
@ -3285,15 +3306,12 @@ void Assembler::LoadAllocationStatsAddress(Register dest, intptr_t cid) {
}
void Assembler::IncrementAllocationStats(Register stats_addr_reg,
intptr_t cid,
Heap::Space space) {
intptr_t cid) {
ASSERT(stats_addr_reg != kNoRegister);
ASSERT(stats_addr_reg != TMP);
ASSERT(cid > 0);
const uword count_field_offset =
(space == Heap::kNew)
? ClassHeapStats::allocated_since_gc_new_space_offset()
: ClassHeapStats::allocated_since_gc_old_space_offset();
ClassHeapStats::allocated_since_gc_new_space_offset();
const Address& count_address = Address(stats_addr_reg, count_field_offset);
ldr(TMP, count_address);
AddImmediate(TMP, 1);
@ -3301,18 +3319,13 @@ void Assembler::IncrementAllocationStats(Register stats_addr_reg,
}
void Assembler::IncrementAllocationStatsWithSize(Register stats_addr_reg,
Register size_reg,
Heap::Space space) {
Register size_reg) {
ASSERT(stats_addr_reg != kNoRegister);
ASSERT(stats_addr_reg != TMP);
const uword count_field_offset =
(space == Heap::kNew)
? ClassHeapStats::allocated_since_gc_new_space_offset()
: ClassHeapStats::allocated_since_gc_old_space_offset();
ClassHeapStats::allocated_since_gc_new_space_offset();
const uword size_field_offset =
(space == Heap::kNew)
? ClassHeapStats::allocated_size_since_gc_new_space_offset()
: ClassHeapStats::allocated_size_since_gc_old_space_offset();
ClassHeapStats::allocated_size_since_gc_new_space_offset();
const Address& count_address = Address(stats_addr_reg, count_field_offset);
const Address& size_address = Address(stats_addr_reg, size_field_offset);
ldr(TMP, count_address);
@ -3329,13 +3342,13 @@ void Assembler::TryAllocate(const Class& cls,
Register instance_reg,
Register temp_reg) {
ASSERT(failure != NULL);
const intptr_t instance_size = cls.instance_size();
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
const classid_t cid = target::Class::GetId(cls);
ASSERT(instance_reg != temp_reg);
ASSERT(temp_reg != IP);
ASSERT(instance_size != 0);
NOT_IN_PRODUCT(LoadAllocationStatsAddress(temp_reg, cls.id()));
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
NOT_IN_PRODUCT(LoadAllocationStatsAddress(temp_reg, cid));
ldr(instance_reg, Address(THR, Thread::top_offset()));
// TODO(koda): Protect against unsigned overflow here.
AddImmediateSetFlags(instance_reg, instance_reg, instance_size);
@ -3358,15 +3371,12 @@ void Assembler::TryAllocate(const Class& cls,
ASSERT(instance_size >= kHeapObjectTag);
AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
uint32_t tags = 0;
tags = RawObject::SizeTag::update(instance_size, tags);
ASSERT(cls.id() != kIllegalCid);
tags = RawObject::ClassIdTag::update(cls.id(), tags);
tags = RawObject::NewBit::update(true, tags);
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, instance_size);
LoadImmediate(IP, tags);
str(IP, FieldAddress(instance_reg, Object::tags_offset()));
str(IP, FieldAddress(instance_reg, target::Object::tags_offset()));
NOT_IN_PRODUCT(IncrementAllocationStats(temp_reg, cls.id(), space));
NOT_IN_PRODUCT(IncrementAllocationStats(temp_reg, cid));
} else {
b(failure);
}
@ -3381,7 +3391,6 @@ void Assembler::TryAllocateArray(intptr_t cid,
Register temp2) {
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
NOT_IN_PRODUCT(LoadAllocationStatsAddress(temp1, cid));
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
// Potential new object start.
ldr(instance, Address(THR, Thread::top_offset()));
AddImmediateSetFlags(end_address, instance, instance_size);
@ -3406,15 +3415,14 @@ void Assembler::TryAllocateArray(intptr_t cid,
// Initialize the tags.
// instance: new object start as a tagged pointer.
uint32_t tags = 0;
tags = RawObject::ClassIdTag::update(cid, tags);
tags = RawObject::SizeTag::update(instance_size, tags);
tags = RawObject::NewBit::update(true, tags);
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, instance_size);
LoadImmediate(temp2, tags);
str(temp2, FieldAddress(instance, Array::tags_offset())); // Store tags.
str(temp2,
FieldAddress(instance, target::Object::tags_offset())); // Store tags.
LoadImmediate(temp2, instance_size);
NOT_IN_PRODUCT(IncrementAllocationStatsWithSize(temp1, temp2, space));
NOT_IN_PRODUCT(IncrementAllocationStatsWithSize(temp1, temp2));
} else {
b(failure);
}
@ -3430,18 +3438,6 @@ void Assembler::GenerateUnRelocatedPcRelativeCall(Condition cond,
pattern.set_distance(offset_into_target);
}
void Assembler::Stop(const char* message) {
if (FLAG_print_stop_message) {
PushList((1 << R0) | (1 << IP) | (1 << LR)); // Preserve R0, IP, LR.
LoadImmediate(R0, reinterpret_cast<int32_t>(message));
// PrintStopMessage() preserves all registers.
ExternalLabel label(StubCode::PrintStopMessage().EntryPoint());
BranchLink(&label);
PopList((1 << R0) | (1 << IP) | (1 << LR)); // Restore R0, IP, LR.
}
bkpt(Instr::kStopMessageCode);
}
Address Assembler::ElementAddressForIntIndex(bool is_load,
bool is_external,
intptr_t cid,
@ -3610,6 +3606,7 @@ const char* Assembler::FpuRegisterName(FpuRegister reg) {
return fpu_reg_names[reg];
}
} // namespace compiler
} // namespace dart
#endif // defined(TARGET_ARCH_ARM) && !defined(DART_PRECOMPILED_RUNTIME)

View file

@ -13,17 +13,49 @@
#include "platform/assert.h"
#include "platform/utils.h"
#include "vm/code_entry_kind.h"
#include "vm/compiler/runtime_api.h"
#include "vm/constants_arm.h"
#include "vm/cpu.h"
#include "vm/hash_map.h"
#include "vm/object.h"
#include "vm/simulator.h"
namespace dart {
// Forward declarations.
class RuntimeEntry;
class FlowGraphCompiler;
class RegisterSet;
class RuntimeEntry;
// TODO(vegorov) these enumerations are temporarily moved out of compiler
// namespace to make refactoring easier.
enum OperandSize {
kByte,
kUnsignedByte,
kHalfword,
kUnsignedHalfword,
kWord,
kUnsignedWord,
kWordPair,
kSWord,
kDWord,
kRegList,
};
// Load/store multiple addressing mode.
enum BlockAddressMode {
// bit encoding P U W
DA = (0 | 0 | 0) << 21, // decrement after
IA = (0 | 4 | 0) << 21, // increment after
DB = (8 | 0 | 0) << 21, // decrement before
IB = (8 | 4 | 0) << 21, // increment before
DA_W = (0 | 0 | 1) << 21, // decrement after with writeback to base
IA_W = (0 | 4 | 1) << 21, // increment after with writeback to base
DB_W = (8 | 0 | 1) << 21, // decrement before with writeback to base
IB_W = (8 | 4 | 1) << 21 // increment before with writeback to base
};
namespace compiler {
// Instruction encoding bits.
enum {
@ -182,32 +214,6 @@ class Operand : public ValueObject {
friend class Address;
};
enum OperandSize {
kByte,
kUnsignedByte,
kHalfword,
kUnsignedHalfword,
kWord,
kUnsignedWord,
kWordPair,
kSWord,
kDWord,
kRegList,
};
// Load/store multiple addressing mode.
enum BlockAddressMode {
// bit encoding P U W
DA = (0 | 0 | 0) << 21, // decrement after
IA = (0 | 4 | 0) << 21, // increment after
DB = (8 | 0 | 0) << 21, // decrement before
IB = (8 | 4 | 0) << 21, // increment before
DA_W = (0 | 0 | 1) << 21, // decrement after with writeback to base
IA_W = (0 | 4 | 1) << 21, // increment after with writeback to base
DB_W = (8 | 0 | 1) << 21, // decrement before with writeback to base
IB_W = (8 | 4 | 1) << 21 // increment before with writeback to base
};
class Address : public ValueObject {
public:
enum OffsetKind {
@ -339,7 +345,7 @@ class FieldAddress : public Address {
class Assembler : public AssemblerBase {
public:
explicit Assembler(ObjectPoolWrapper* object_pool_wrapper,
explicit Assembler(ObjectPoolBuilder* object_pool_builder,
bool use_far_branches = false);
~Assembler() {}
@ -654,31 +660,31 @@ class Assembler : public AssemblerBase {
void blx(Register rm, Condition cond = AL);
void Branch(const Code& code,
ObjectPool::Patchability patchable = ObjectPool::kNotPatchable,
ObjectPoolBuilderEntry::Patchability patchable =
ObjectPoolBuilderEntry::kNotPatchable,
Register pp = PP,
Condition cond = AL);
void Branch(const Address& address, Condition cond = AL);
void BranchLink(
const Code& code,
ObjectPool::Patchability patchable = ObjectPool::kNotPatchable,
Code::EntryKind entry_kind = Code::EntryKind::kNormal);
void BranchLink(const Code& code,
ObjectPoolBuilderEntry::Patchability patchable =
ObjectPoolBuilderEntry::kNotPatchable,
CodeEntryKind entry_kind = CodeEntryKind::kNormal);
void BranchLinkToRuntime();
void CallNullErrorShared(bool save_fpu_registers);
// Branch and link to an entry address. Call sequence can be patched.
void BranchLinkPatchable(
const Code& code,
Code::EntryKind entry_kind = Code::EntryKind::kNormal);
void BranchLinkPatchable(const Code& code,
CodeEntryKind entry_kind = CodeEntryKind::kNormal);
// Emit a call that shares its object pool entries with other calls
// that have the same equivalence marker.
void BranchLinkWithEquivalence(
const Code& code,
const Object& equivalence,
Code::EntryKind entry_kind = Code::EntryKind::kNormal);
CodeEntryKind entry_kind = CodeEntryKind::kNormal);
// Branch and link to [base + offset]. Call sequence is never patched.
void BranchLinkOffset(Register base, int32_t offset);
@ -754,7 +760,7 @@ class Assembler : public AssemblerBase {
Register new_pp);
void LoadNativeEntry(Register dst,
const ExternalLabel* label,
ObjectPool::Patchability patchable,
ObjectPoolBuilderEntry::Patchability patchable,
Condition cond = AL);
void PushObject(const Object& object);
void CompareObject(Register rn, const Object& object);
@ -1028,12 +1034,9 @@ class Assembler : public AssemblerBase {
// allocation stats. These are separate assembler macros so we can
// avoid a dependent load too nearby the load of the table address.
void LoadAllocationStatsAddress(Register dest, intptr_t cid);
void IncrementAllocationStats(Register stats_addr,
intptr_t cid,
Heap::Space space);
void IncrementAllocationStats(Register stats_addr, intptr_t cid);
void IncrementAllocationStatsWithSize(Register stats_addr_reg,
Register size_reg,
Heap::Space space);
Register size_reg);
Address ElementAddressForIntIndex(bool is_load,
bool is_external,
@ -1117,7 +1120,7 @@ class Assembler : public AssemblerBase {
// On some other platforms, we draw a distinction between safe and unsafe
// smis.
static bool IsSafe(const Object& object) { return true; }
static bool IsSafeSmi(const Object& object) { return object.IsSmi(); }
static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
bool constant_pool_allowed() const { return constant_pool_allowed_; }
void set_constant_pool_allowed(bool b) { constant_pool_allowed_ = b; }
@ -1276,15 +1279,23 @@ class Assembler : public AssemblerBase {
CanBeSmi can_be_smi,
BarrierFilterMode barrier_filter_mode);
friend class FlowGraphCompiler;
friend class dart::FlowGraphCompiler;
std::function<void(Condition, Register)>
generate_invoke_write_barrier_wrapper_;
std::function<void(Condition)> invoke_array_write_barrier_;
std::function<void(Condition)> generate_invoke_array_write_barrier_;
DISALLOW_ALLOCATION();
DISALLOW_COPY_AND_ASSIGN(Assembler);
};
} // namespace compiler
// TODO(vegorov) temporary export commonly used classes into dart namespace
// to ease migration.
using compiler::Address;
using compiler::FieldAddress;
using compiler::Operand;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM_H_

View file

@ -5,15 +5,13 @@
#include "vm/globals.h" // NOLINT
#if defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)
#define SHOULD_NOT_INCLUDE_RUNTIME
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/locations.h"
#include "vm/cpu.h"
#include "vm/instructions.h"
#include "vm/longjump.h"
#include "vm/runtime_entry.h"
#include "vm/simulator.h"
#include "vm/stack_frame.h"
#include "vm/stub_code.h"
namespace dart {
@ -23,16 +21,33 @@ DECLARE_FLAG(bool, precompiled_mode);
DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches");
Assembler::Assembler(ObjectPoolWrapper* object_pool_wrapper,
namespace compiler {
#ifndef PRODUCT
using target::ClassHeapStats;
#endif
using target::ClassTable;
using target::Double;
using target::Float32x4;
using target::Float64x2;
using target::Heap;
using target::Instance;
using target::Instructions;
using target::Isolate;
using target::ObjectPool;
using target::RawObject;
using target::Thread;
Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
bool use_far_branches)
: AssemblerBase(object_pool_wrapper),
: AssemblerBase(object_pool_builder),
use_far_branches_(use_far_branches),
constant_pool_allowed_(false) {
generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
ldr(LR, Address(THR, Thread::write_barrier_wrappers_thread_offset(reg)));
blr(LR);
};
invoke_array_write_barrier_ = [&]() {
generate_invoke_array_write_barrier_ = [&]() {
ldr(LR, Address(THR, Thread::array_write_barrier_entry_point_offset()));
blr(LR);
};
@ -227,13 +242,6 @@ void Assembler::Bind(Label* label) {
label->BindTo(bound_pc);
}
void Assembler::Stop(const char* message) {
if (FLAG_print_stop_message) {
UNIMPLEMENTED();
}
brk(Instr::kStopMessageCode);
}
static int CountLeadingZeros(uint64_t value, int width) {
ASSERT((width == 32) || (width == 64));
if (value == 0) {
@ -363,7 +371,7 @@ bool Operand::IsImmLogical(uint64_t value, uint8_t width, Operand* imm_op) {
void Assembler::LoadPoolPointer(Register pp) {
CheckCodePointer();
ldr(pp, FieldAddress(CODE_REG, Code::object_pool_offset()));
ldr(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
// When in the PP register, the pool pointer is untagged. When we
// push it on the stack with TagAndPushPP it is tagged again. PopAndUntagPP
@ -456,35 +464,34 @@ void Assembler::LoadDoubleWordFromPoolOffset(Register lower,
}
intptr_t Assembler::FindImmediate(int64_t imm) {
return object_pool_wrapper().FindImmediate(imm);
return object_pool_builder().FindImmediate(imm);
}
bool Assembler::CanLoadFromObjectPool(const Object& object) const {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
ASSERT(!Thread::CanLoadFromThread(object));
ASSERT(IsOriginalObject(object));
ASSERT(!target::CanLoadFromThread(object));
if (!constant_pool_allowed()) {
return false;
}
// TODO(zra, kmillikin): Also load other large immediates from the object
// pool
if (object.IsSmi()) {
ASSERT(Smi::IsValid(Smi::Value(reinterpret_cast<RawSmi*>(object.raw()))));
if (target::IsSmi(object)) {
// If the raw smi does not fit into a 32-bit signed int, then we'll keep
// the raw value in the object pool.
return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw()));
return !Utils::IsInt(32, target::ToRawSmi(object));
}
ASSERT(object.IsNotTemporaryScopedHandle());
ASSERT(object.IsOld());
ASSERT(IsNotTemporaryScopedHandle(object));
ASSERT(IsInOldSpace(object));
return true;
}
void Assembler::LoadNativeEntry(Register dst,
const ExternalLabel* label,
ObjectPool::Patchability patchable) {
void Assembler::LoadNativeEntry(
Register dst,
const ExternalLabel* label,
ObjectPoolBuilderEntry::Patchability patchable) {
const int32_t offset = ObjectPool::element_offset(
object_pool_wrapper().FindNativeFunction(label, patchable));
object_pool_builder().FindNativeFunction(label, patchable));
LoadWordFromPoolOffset(dst, offset);
}
@ -495,18 +502,18 @@ void Assembler::LoadIsolate(Register dst) {
void Assembler::LoadObjectHelper(Register dst,
const Object& object,
bool is_unique) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
if (Thread::CanLoadFromThread(object)) {
ldr(dst, Address(THR, Thread::OffsetFromThread(object)));
ASSERT(IsOriginalObject(object));
word offset = 0;
if (target::CanLoadFromThread(object, &offset)) {
ldr(dst, Address(THR, offset));
} else if (CanLoadFromObjectPool(object)) {
const int32_t offset = ObjectPool::element_offset(
is_unique ? object_pool_wrapper().AddObject(object)
: object_pool_wrapper().FindObject(object));
is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object));
LoadWordFromPoolOffset(dst, offset);
} else {
ASSERT(object.IsSmi());
LoadImmediate(dst, reinterpret_cast<int64_t>(object.raw()));
ASSERT(target::IsSmi(object));
LoadImmediate(dst, target::ToRawSmi(object));
}
}
@ -515,8 +522,8 @@ void Assembler::LoadFunctionFromCalleePool(Register dst,
Register new_pp) {
ASSERT(!constant_pool_allowed());
ASSERT(new_pp != PP);
const int32_t offset =
ObjectPool::element_offset(object_pool_wrapper().FindObject(function));
const int32_t offset = ObjectPool::element_offset(
object_pool_builder().FindObject(ToObject(function)));
ASSERT(Address::CanHoldOffset(offset));
ldr(dst, Address(new_pp, offset));
}
@ -530,17 +537,17 @@ void Assembler::LoadUniqueObject(Register dst, const Object& object) {
}
void Assembler::CompareObject(Register reg, const Object& object) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
if (Thread::CanLoadFromThread(object)) {
ldr(TMP, Address(THR, Thread::OffsetFromThread(object)));
ASSERT(IsOriginalObject(object));
word offset = 0;
if (target::CanLoadFromThread(object, &offset)) {
ldr(TMP, Address(THR, offset));
CompareRegisters(reg, TMP);
} else if (CanLoadFromObjectPool(object)) {
LoadObject(TMP, object);
CompareRegisters(reg, TMP);
} else {
ASSERT(object.IsSmi());
CompareImmediate(reg, reinterpret_cast<int64_t>(object.raw()));
ASSERT(target::IsSmi(object));
CompareImmediate(reg, target::ToRawSmi(object));
}
}
@ -639,24 +646,24 @@ void Assembler::LoadDImmediate(VRegister vd, double immd) {
void Assembler::Branch(const Code& target,
Register pp,
ObjectPool::Patchability patchable) {
ObjectPoolBuilderEntry::Patchability patchable) {
const int32_t offset = ObjectPool::element_offset(
object_pool_wrapper().FindObject(target, patchable));
object_pool_builder().FindObject(ToObject(target), patchable));
LoadWordFromPoolOffset(CODE_REG, offset, pp);
ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
br(TMP);
}
void Assembler::BranchPatchable(const Code& code) {
Branch(code, PP, ObjectPool::kPatchable);
Branch(code, PP, ObjectPoolBuilderEntry::kPatchable);
}
void Assembler::BranchLink(const Code& target,
ObjectPool::Patchability patchable) {
ObjectPoolBuilderEntry::Patchability patchable) {
const int32_t offset = ObjectPool::element_offset(
object_pool_wrapper().FindObject(target, patchable));
object_pool_builder().FindObject(ToObject(target), patchable));
LoadWordFromPoolOffset(CODE_REG, offset);
ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
blr(TMP);
}
@ -668,9 +675,9 @@ void Assembler::BranchLinkToRuntime() {
void Assembler::BranchLinkWithEquivalence(const Code& target,
const Object& equivalence) {
const int32_t offset = ObjectPool::element_offset(
object_pool_wrapper().FindObject(target, equivalence));
object_pool_builder().FindObject(ToObject(target), equivalence));
LoadWordFromPoolOffset(CODE_REG, offset);
ldr(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
blr(TMP);
}
@ -927,8 +934,9 @@ void Assembler::StoreIntoObjectFilter(Register object,
Label* label,
CanBeSmi value_can_be_smi,
BarrierFilterMode how_to_jump) {
COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
(kOldObjectAlignmentOffset == 0));
COMPILE_ASSERT((target::ObjectAlignment::kNewObjectAlignmentOffset ==
target::kWordSize) &&
(target::ObjectAlignment::kOldObjectAlignmentOffset == 0));
// Write-barrier triggers if the value is in the new space (has bit set) and
// the object is in the old space (has bit cleared).
@ -945,14 +953,15 @@ void Assembler::StoreIntoObjectFilter(Register object,
} else {
// For the value we are only interested in the new/old bit and the tag bit.
// And the new bit with the tag bit. The resulting bit will be 0 for a Smi.
and_(TMP, value, Operand(value, LSL, kNewObjectBitPosition));
and_(TMP, value,
Operand(value, LSL, target::ObjectAlignment::kNewObjectBitPosition));
// And the result with the negated space bit of the object.
bic(TMP, TMP, Operand(object));
}
if (how_to_jump == kJumpToNoUpdate) {
tbz(label, TMP, kNewObjectBitPosition);
tbz(label, TMP, target::ObjectAlignment::kNewObjectBitPosition);
} else {
tbnz(label, TMP, kNewObjectBitPosition);
tbnz(label, TMP, target::ObjectAlignment::kNewObjectBitPosition);
}
}
@ -997,8 +1006,8 @@ void Assembler::StoreIntoObject(Register object,
if (can_be_smi == kValueCanBeSmi) {
BranchIfSmi(value, &done);
}
ldr(TMP, FieldAddress(object, Object::tags_offset()), kUnsignedByte);
ldr(TMP2, FieldAddress(value, Object::tags_offset()), kUnsignedByte);
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
and_(TMP, TMP2, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
tst(TMP, Operand(BARRIER_MASK));
b(&done, ZERO);
@ -1058,8 +1067,8 @@ void Assembler::StoreIntoArray(Register object,
if (can_be_smi == kValueCanBeSmi) {
BranchIfSmi(value, &done);
}
ldr(TMP, FieldAddress(object, Object::tags_offset()), kUnsignedByte);
ldr(TMP2, FieldAddress(value, Object::tags_offset()), kUnsignedByte);
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
and_(TMP, TMP2, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
tst(TMP, Operand(BARRIER_MASK));
b(&done, ZERO);
@ -1072,7 +1081,7 @@ void Assembler::StoreIntoArray(Register object,
// allocator.
UNIMPLEMENTED();
}
invoke_array_write_barrier_();
generate_invoke_array_write_barrier_();
if (!lr_reserved) Pop(LR);
Bind(&done);
}
@ -1104,10 +1113,8 @@ void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
void Assembler::StoreIntoObjectNoBarrier(Register object,
const Address& dest,
const Object& value) {
ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
ASSERT(value.IsSmi() || value.InVMHeap() ||
(value.IsOld() && value.IsNotTemporaryScopedHandle()));
ASSERT(IsOriginalObject(value));
ASSERT(IsNotTemporaryScopedHandle(value));
// No store buffer update.
LoadObject(TMP2, value);
str(TMP2, dest);
@ -1128,7 +1135,7 @@ void Assembler::LoadClassId(Register result, Register object) {
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
LoadFromOffset(result, object, class_id_offset - kHeapObjectTag,
kUnsignedHalfword);
}
@ -1139,7 +1146,7 @@ void Assembler::LoadClassById(Register result, Register class_id) {
const intptr_t offset =
Isolate::class_table_offset() + ClassTable::table_offset();
LoadFromOffset(result, result, offset);
ASSERT(kSizeOfClassPairLog2 == 4);
ASSERT(ClassTable::kSizeOfClassPairLog2 == 4);
add(class_id, class_id, Operand(class_id));
ldr(result, Address(result, class_id, UXTX, Address::Scaled));
}
@ -1180,7 +1187,8 @@ void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
}
void Assembler::RestoreCodePointer() {
ldr(CODE_REG, Address(FP, compiler_frame_layout.code_from_fp * kWordSize));
ldr(CODE_REG, Address(FP, compiler::target::frame_layout.code_from_fp *
target::kWordSize));
CheckCodePointer();
}
@ -1200,7 +1208,7 @@ void Assembler::CheckCodePointer() {
const intptr_t entry_offset =
CodeSize() + Instructions::HeaderSize() - kHeapObjectTag;
adr(R0, Immediate(-entry_offset));
ldr(TMP, FieldAddress(CODE_REG, Code::saved_instructions_offset()));
ldr(TMP, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
cmp(R0, Operand(TMP));
b(&instructions_ok, EQ);
brk(1);
@ -1299,7 +1307,8 @@ void Assembler::LeaveDartFrame(RestorePP restore_pp) {
if (restore_pp == kRestoreCallerPP) {
// Restore and untag PP.
LoadFromOffset(PP, FP,
compiler_frame_layout.saved_caller_pp_from_fp * kWordSize);
compiler::target::frame_layout.saved_caller_pp_from_fp *
target::kWordSize);
sub(PP, PP, Operand(kHeapObjectTag));
}
}
@ -1337,10 +1346,10 @@ void Assembler::LeaveCallRuntimeFrame() {
// and ensure proper alignment of the stack frame.
// We need to restore it before restoring registers.
const intptr_t kPushedRegistersSize =
kDartVolatileCpuRegCount * kWordSize +
kDartVolatileFpuRegCount * kWordSize +
(compiler_frame_layout.dart_fixed_frame_size - 2) *
kWordSize; // From EnterStubFrame (excluding PC / FP)
kDartVolatileCpuRegCount * target::kWordSize +
kDartVolatileFpuRegCount * target::kWordSize +
(compiler::target::frame_layout.dart_fixed_frame_size - 2) *
target::kWordSize; // From EnterStubFrame (excluding PC / FP)
AddImmediate(SP, FP, -kPushedRegistersSize);
for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) {
const Register reg = static_cast<Register>(i);
@ -1415,10 +1424,9 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
b(trace, NE);
}
void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) {
void Assembler::UpdateAllocationStats(intptr_t cid) {
ASSERT(cid > 0);
intptr_t counter_offset =
ClassTable::CounterOffsetFor(cid, space == Heap::kNew);
intptr_t counter_offset = ClassTable::CounterOffsetFor(cid, /*is_new=*/true);
LoadIsolate(TMP2);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
@ -1429,19 +1437,13 @@ void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) {
str(TMP, Address(TMP2, 0));
}
void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
Register size_reg,
Heap::Space space) {
void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg) {
ASSERT(cid > 0);
const uword class_offset = ClassTable::ClassOffsetFor(cid);
const uword count_field_offset =
(space == Heap::kNew)
? ClassHeapStats::allocated_since_gc_new_space_offset()
: ClassHeapStats::allocated_since_gc_old_space_offset();
ClassHeapStats::allocated_since_gc_new_space_offset();
const uword size_field_offset =
(space == Heap::kNew)
? ClassHeapStats::allocated_size_since_gc_new_space_offset()
: ClassHeapStats::allocated_size_since_gc_old_space_offset();
ClassHeapStats::allocated_size_since_gc_new_space_offset();
LoadIsolate(TMP2);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
@ -1462,18 +1464,19 @@ void Assembler::TryAllocate(const Class& cls,
Register top_reg,
bool tag_result) {
ASSERT(failure != NULL);
const intptr_t instance_size = cls.instance_size();
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(
MaybeTraceAllocation(cls.id(), /*temp_reg=*/top_reg, failure));
const classid_t cid = target::Class::GetId(cls);
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, /*temp_reg=*/top_reg, failure));
const Register kEndReg = TMP;
// instance_reg: potential next object start.
RELEASE_ASSERT((Thread::top_offset() + kWordSize) == Thread::end_offset());
RELEASE_ASSERT((Thread::top_offset() + target::kWordSize) ==
Thread::end_offset());
ldp(instance_reg, kEndReg,
Address(THR, Thread::top_offset(), Address::PairOffset));
@ -1486,18 +1489,14 @@ void Assembler::TryAllocate(const Class& cls,
// next object start and store the class in the class field of object.
str(top_reg, Address(THR, Thread::top_offset()));
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), space));
NOT_IN_PRODUCT(UpdateAllocationStats(cid));
uint32_t tags = 0;
tags = RawObject::SizeTag::update(instance_size, tags);
ASSERT(cls.id() != kIllegalCid);
tags = RawObject::ClassIdTag::update(cls.id(), tags);
tags = RawObject::NewBit::update(true, tags);
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, instance_size);
// Extends the 32 bit tags with zeros, which is the uninitialized
// hash code.
LoadImmediate(TMP, tags);
StoreToOffset(TMP, instance_reg, Object::tags_offset());
StoreToOffset(TMP, instance_reg, target::Object::tags_offset());
if (tag_result) {
AddImmediate(instance_reg, kHeapObjectTag);
@ -1519,7 +1518,6 @@ void Assembler::TryAllocateArray(intptr_t cid,
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp1, failure));
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
// Potential new object start.
ldr(instance, Address(THR, Thread::top_offset()));
AddImmediateSetFlags(end_address, instance, instance_size);
@ -1537,18 +1535,16 @@ void Assembler::TryAllocateArray(intptr_t cid,
str(end_address, Address(THR, Thread::top_offset()));
add(instance, instance, Operand(kHeapObjectTag));
LoadImmediate(temp2, instance_size);
NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, temp2, space));
NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, temp2));
// Initialize the tags.
// instance: new object start as a tagged pointer.
uint32_t tags = 0;
tags = RawObject::ClassIdTag::update(cid, tags);
tags = RawObject::SizeTag::update(instance_size, tags);
tags = RawObject::NewBit::update(true, tags);
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, instance_size);
// Extends the 32 bit tags with zeros, which is the uninitialized
// hash code.
LoadImmediate(temp2, tags);
str(temp2, FieldAddress(instance, Array::tags_offset())); // Store tags.
str(temp2, FieldAddress(instance, target::Object::tags_offset()));
} else {
b(failure);
}
@ -1777,6 +1773,8 @@ void Assembler::PopRegisters(const RegisterSet& regs) {
}
}
} // namespace compiler
} // namespace dart
#endif // defined(TARGET_ARCH_ARM64) && !defined(DART_PRECOMPILED_RUNTIME)

View file

@ -13,18 +13,20 @@
#include "platform/assert.h"
#include "platform/utils.h"
#include "vm/class_id.h"
#include "vm/constants_arm64.h"
#include "vm/hash_map.h"
#include "vm/longjump.h"
#include "vm/object.h"
#include "vm/simulator.h"
namespace dart {
// Forward declarations.
class FlowGraphCompiler;
class RuntimeEntry;
class RegisterSet;
namespace compiler {
class Immediate : public ValueObject {
public:
explicit Immediate(int64_t value) : value_(value) {}
@ -426,7 +428,7 @@ class Operand : public ValueObject {
class Assembler : public AssemblerBase {
public:
explicit Assembler(ObjectPoolWrapper* object_pool_wrapper,
explicit Assembler(ObjectPoolBuilder* object_pool_builder,
bool use_far_branches = false);
~Assembler() {}
@ -445,7 +447,7 @@ class Assembler : public AssemblerBase {
void Drop(intptr_t stack_elements) {
ASSERT(stack_elements >= 0);
if (stack_elements > 0) {
add(SP, SP, Operand(stack_elements * kWordSize));
add(SP, SP, Operand(stack_elements * target::kWordSize));
}
}
@ -497,7 +499,7 @@ class Assembler : public AssemblerBase {
// On some other platforms, we draw a distinction between safe and unsafe
// smis.
static bool IsSafe(const Object& object) { return true; }
static bool IsSafeSmi(const Object& object) { return object.IsSmi(); }
static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
// Addition and subtraction.
// For add and sub, to use CSP for rn, o must be of type Operand::Extend.
@ -1256,19 +1258,19 @@ class Assembler : public AssemblerBase {
}
void Push(Register reg) {
ASSERT(reg != PP); // Only push PP with TagAndPushPP().
str(reg, Address(SP, -1 * kWordSize, Address::PreIndex));
str(reg, Address(SP, -1 * target::kWordSize, Address::PreIndex));
}
void Pop(Register reg) {
ASSERT(reg != PP); // Only pop PP with PopAndUntagPP().
ldr(reg, Address(SP, 1 * kWordSize, Address::PostIndex));
ldr(reg, Address(SP, 1 * target::kWordSize, Address::PostIndex));
}
void PushPair(Register low, Register high) {
ASSERT((low != PP) && (high != PP));
stp(low, high, Address(SP, -2 * kWordSize, Address::PairPreIndex));
stp(low, high, Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
}
void PopPair(Register low, Register high) {
ASSERT((low != PP) && (high != PP));
ldp(low, high, Address(SP, 2 * kWordSize, Address::PairPostIndex));
ldp(low, high, Address(SP, 2 * target::kWordSize, Address::PairPostIndex));
}
void PushFloat(VRegister reg) {
fstrs(reg, Address(SP, -1 * kFloatSize, Address::PreIndex));
@ -1291,16 +1293,17 @@ class Assembler : public AssemblerBase {
void TagAndPushPP() {
// Add the heap object tag back to PP before putting it on the stack.
add(TMP, PP, Operand(kHeapObjectTag));
str(TMP, Address(SP, -1 * kWordSize, Address::PreIndex));
str(TMP, Address(SP, -1 * target::kWordSize, Address::PreIndex));
}
void TagAndPushPPAndPcMarker() {
COMPILE_ASSERT(CODE_REG != TMP2);
// Add the heap object tag back to PP before putting it on the stack.
add(TMP2, PP, Operand(kHeapObjectTag));
stp(TMP2, CODE_REG, Address(SP, -2 * kWordSize, Address::PairPreIndex));
stp(TMP2, CODE_REG,
Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
}
void PopAndUntagPP() {
ldr(PP, Address(SP, 1 * kWordSize, Address::PostIndex));
ldr(PP, Address(SP, 1 * target::kWordSize, Address::PostIndex));
sub(PP, PP, Operand(kHeapObjectTag));
// The caller of PopAndUntagPP() must explicitly allow use of popped PP.
set_constant_pool_allowed(false);
@ -1353,15 +1356,16 @@ class Assembler : public AssemblerBase {
void Branch(const Code& code,
Register pp,
ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
ObjectPoolBuilderEntry::Patchability patchable =
ObjectPoolBuilderEntry::kNotPatchable);
void BranchPatchable(const Code& code);
void BranchLink(
const Code& code,
ObjectPool::Patchability patchable = ObjectPool::kNotPatchable);
void BranchLink(const Code& code,
ObjectPoolBuilderEntry::Patchability patchable =
ObjectPoolBuilderEntry::kNotPatchable);
void BranchLinkPatchable(const Code& code) {
BranchLink(code, ObjectPool::kPatchable);
BranchLink(code, ObjectPoolBuilderEntry::kPatchable);
}
void BranchLinkToRuntime();
@ -1481,7 +1485,7 @@ class Assembler : public AssemblerBase {
bool CanLoadFromObjectPool(const Object& object) const;
void LoadNativeEntry(Register dst,
const ExternalLabel* label,
ObjectPool::Patchability patchable);
ObjectPoolBuilderEntry::Patchability patchable);
void LoadFunctionFromCalleePool(Register dst,
const Function& function,
Register new_pp);
@ -1538,11 +1542,9 @@ class Assembler : public AssemblerBase {
void MonomorphicCheckedEntry();
void UpdateAllocationStats(intptr_t cid, Heap::Space space);
void UpdateAllocationStats(intptr_t cid);
void UpdateAllocationStatsWithSize(intptr_t cid,
Register size_reg,
Heap::Space space);
void UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg);
// If allocation tracing for |cid| is enabled, will jump to |trace| label,
// which will allocate in the runtime where tracing occurs.
@ -1761,8 +1763,7 @@ class Assembler : public AssemblerBase {
int32_t EncodeImm19BranchOffset(int64_t imm, int32_t instr) {
if (!CanEncodeImm19BranchOffset(imm)) {
ASSERT(!use_far_branches());
Thread::Current()->long_jump_base()->Jump(1,
Object::branch_offset_error());
BailoutWithBranchOffsetError();
}
const int32_t imm32 = static_cast<int32_t>(imm);
const int32_t off = (((imm32 >> 2) << kImm19Shift) & kImm19Mask);
@ -1777,8 +1778,7 @@ class Assembler : public AssemblerBase {
int32_t EncodeImm14BranchOffset(int64_t imm, int32_t instr) {
if (!CanEncodeImm14BranchOffset(imm)) {
ASSERT(!use_far_branches());
Thread::Current()->long_jump_base()->Jump(1,
Object::branch_offset_error());
BailoutWithBranchOffsetError();
}
const int32_t imm32 = static_cast<int32_t>(imm);
const int32_t off = (((imm32 >> 2) << kImm14Shift) & kImm14Mask);
@ -2239,14 +2239,21 @@ class Assembler : public AssemblerBase {
CanBeSmi can_be_smi,
BarrierFilterMode barrier_filter_mode);
friend class FlowGraphCompiler;
friend class dart::FlowGraphCompiler;
std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
std::function<void()> invoke_array_write_barrier_;
std::function<void()> generate_invoke_array_write_barrier_;
DISALLOW_ALLOCATION();
DISALLOW_COPY_AND_ASSIGN(Assembler);
};
} // namespace compiler
using compiler::Address;
using compiler::FieldAddress;
using compiler::Immediate;
using compiler::Operand;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM64_H_

View file

@ -12,7 +12,7 @@
#include "vm/virtual_memory.h"
namespace dart {
namespace compiler {
#define __ assembler->
ASSEMBLER_TEST_GENERATE(Simple, assembler) {
@ -339,12 +339,13 @@ ASSEMBLER_TEST_RUN(WordOverflow, test) {
ASSEMBLER_TEST_GENERATE(SimpleLoadStore, assembler) {
__ SetupDartSP();
__ sub(CSP, CSP, Operand(2 * kWordSize)); // Must not access beyond CSP.
__ sub(CSP, CSP,
Operand(2 * target::kWordSize)); // Must not access beyond CSP.
__ movz(R0, Immediate(43), 0);
__ movz(R1, Immediate(42), 0);
__ str(R1, Address(SP, -1 * kWordSize, Address::PreIndex));
__ ldr(R0, Address(SP, 1 * kWordSize, Address::PostIndex));
__ str(R1, Address(SP, -1 * target::kWordSize, Address::PreIndex));
__ ldr(R0, Address(SP, 1 * target::kWordSize, Address::PostIndex));
__ RestoreCSP();
__ ret();
}
@ -373,16 +374,17 @@ ASSEMBLER_TEST_RUN(SimpleLoadStoreHeapTag, test) {
ASSEMBLER_TEST_GENERATE(LoadStoreLargeIndex, assembler) {
__ SetupDartSP();
__ sub(CSP, CSP, Operand(32 * kWordSize)); // Must not access beyond CSP.
__ sub(CSP, CSP,
Operand(32 * target::kWordSize)); // Must not access beyond CSP.
__ movz(R0, Immediate(43), 0);
__ movz(R1, Immediate(42), 0);
// Largest negative offset that can fit in the signed 9-bit immediate field.
__ str(R1, Address(SP, -32 * kWordSize, Address::PreIndex));
__ str(R1, Address(SP, -32 * target::kWordSize, Address::PreIndex));
// Largest positive kWordSize aligned offset that we can fit.
__ ldr(R0, Address(SP, 31 * kWordSize, Address::PostIndex));
__ ldr(R0, Address(SP, 31 * target::kWordSize, Address::PostIndex));
// Correction.
__ add(SP, SP, Operand(kWordSize)); // Restore SP.
__ add(SP, SP, Operand(target::kWordSize)); // Restore SP.
__ RestoreCSP();
__ ret();
}
@ -396,10 +398,10 @@ ASSEMBLER_TEST_GENERATE(LoadStoreLargeOffset, assembler) {
__ SetupDartSP();
__ movz(R0, Immediate(43), 0);
__ movz(R1, Immediate(42), 0);
__ sub(SP, SP, Operand(512 * kWordSize));
__ sub(SP, SP, Operand(512 * target::kWordSize));
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
__ str(R1, Address(SP, 512 * kWordSize, Address::Offset));
__ add(SP, SP, Operand(512 * kWordSize));
__ str(R1, Address(SP, 512 * target::kWordSize, Address::Offset));
__ add(SP, SP, Operand(512 * target::kWordSize));
__ ldr(R0, Address(SP));
__ RestoreCSP();
__ ret();
@ -419,10 +421,10 @@ ASSEMBLER_TEST_GENERATE(LoadStoreExtReg, assembler) {
// This should sign extend R2, and add to SP to get address,
// i.e. SP - kWordSize.
__ str(R1, Address(SP, R2, SXTW));
__ sub(SP, SP, Operand(kWordSize));
__ sub(SP, SP, Operand(target::kWordSize));
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
__ ldr(R0, Address(SP));
__ add(SP, SP, Operand(kWordSize));
__ add(SP, SP, Operand(target::kWordSize));
__ RestoreCSP();
__ ret();
}
@ -437,12 +439,12 @@ ASSEMBLER_TEST_GENERATE(LoadStoreScaledReg, assembler) {
__ movz(R0, Immediate(43), 0);
__ movz(R1, Immediate(42), 0);
__ movz(R2, Immediate(10), 0);
__ sub(SP, SP, Operand(10 * kWordSize));
__ sub(SP, SP, Operand(10 * target::kWordSize));
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
// Store R1 into SP + R2 * kWordSize.
__ str(R1, Address(SP, R2, UXTX, Address::Scaled));
__ ldr(R0, Address(SP, R2, UXTX, Address::Scaled));
__ add(SP, SP, Operand(10 * kWordSize));
__ add(SP, SP, Operand(10 * target::kWordSize));
__ RestoreCSP();
__ ret();
}
@ -455,7 +457,8 @@ ASSEMBLER_TEST_RUN(LoadStoreScaledReg, test) {
ASSEMBLER_TEST_GENERATE(LoadSigned32Bit, assembler) {
__ SetupDartSP();
__ sub(CSP, CSP, Operand(2 * kWordSize)); // Must not access beyond CSP.
__ sub(CSP, CSP,
Operand(2 * target::kWordSize)); // Must not access beyond CSP.
__ LoadImmediate(R1, 0xffffffff);
__ str(R1, Address(SP, -4, Address::PreIndex, kWord), kWord);
@ -473,12 +476,13 @@ ASSEMBLER_TEST_RUN(LoadSigned32Bit, test) {
ASSEMBLER_TEST_GENERATE(SimpleLoadStorePair, assembler) {
__ SetupDartSP();
__ sub(CSP, CSP, Operand(2 * kWordSize)); // Must not access beyond CSP.
__ sub(CSP, CSP,
Operand(2 * target::kWordSize)); // Must not access beyond CSP.
__ LoadImmediate(R2, 43);
__ LoadImmediate(R3, 42);
__ stp(R2, R3, Address(SP, -2 * kWordSize, Address::PairPreIndex));
__ ldp(R0, R1, Address(SP, 2 * kWordSize, Address::PairPostIndex));
__ stp(R2, R3, Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
__ ldp(R0, R1, Address(SP, 2 * target::kWordSize, Address::PairPostIndex));
__ sub(R0, R0, Operand(R1));
__ RestoreCSP();
__ ret();
@ -493,11 +497,11 @@ ASSEMBLER_TEST_GENERATE(LoadStorePairOffset, assembler) {
__ SetupDartSP();
__ LoadImmediate(R2, 43);
__ LoadImmediate(R3, 42);
__ sub(SP, SP, Operand(4 * kWordSize));
__ sub(SP, SP, Operand(4 * target::kWordSize));
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
__ stp(R2, R3, Address::Pair(SP, 2 * kWordSize));
__ ldp(R0, R1, Address::Pair(SP, 2 * kWordSize));
__ add(SP, SP, Operand(4 * kWordSize));
__ stp(R2, R3, Address::Pair(SP, 2 * target::kWordSize));
__ ldp(R0, R1, Address::Pair(SP, 2 * target::kWordSize));
__ add(SP, SP, Operand(4 * target::kWordSize));
__ sub(R0, R0, Operand(R1));
__ RestoreCSP();
__ ret();
@ -2520,11 +2524,12 @@ ASSEMBLER_TEST_RUN(Fmovsr, test) {
ASSEMBLER_TEST_GENERATE(FldrdFstrdPrePostIndex, assembler) {
__ SetupDartSP();
__ sub(CSP, CSP, Operand(2 * kWordSize)); // Must not access beyond CSP.
__ sub(CSP, CSP,
Operand(2 * target::kWordSize)); // Must not access beyond CSP.
__ LoadDImmediate(V1, 42.0);
__ fstrd(V1, Address(SP, -1 * kWordSize, Address::PreIndex));
__ fldrd(V0, Address(SP, 1 * kWordSize, Address::PostIndex));
__ fstrd(V1, Address(SP, -1 * target::kWordSize, Address::PreIndex));
__ fldrd(V0, Address(SP, 1 * target::kWordSize, Address::PostIndex));
__ RestoreCSP();
__ ret();
}
@ -2537,12 +2542,13 @@ ASSEMBLER_TEST_RUN(FldrdFstrdPrePostIndex, test) {
ASSEMBLER_TEST_GENERATE(FldrsFstrsPrePostIndex, assembler) {
__ SetupDartSP();
__ sub(CSP, CSP, Operand(2 * kWordSize)); // Must not access beyond CSP.
__ sub(CSP, CSP,
Operand(2 * target::kWordSize)); // Must not access beyond CSP.
__ LoadDImmediate(V1, 42.0);
__ fcvtsd(V2, V1);
__ fstrs(V2, Address(SP, -1 * kWordSize, Address::PreIndex));
__ fldrs(V3, Address(SP, 1 * kWordSize, Address::PostIndex));
__ fstrs(V2, Address(SP, -1 * target::kWordSize, Address::PreIndex));
__ fldrs(V3, Address(SP, 1 * target::kWordSize, Address::PostIndex));
__ fcvtds(V0, V3);
__ RestoreCSP();
__ ret();
@ -2556,7 +2562,8 @@ ASSEMBLER_TEST_RUN(FldrsFstrsPrePostIndex, test) {
ASSEMBLER_TEST_GENERATE(FldrqFstrqPrePostIndex, assembler) {
__ SetupDartSP();
__ sub(CSP, CSP, Operand(2 * kWordSize)); // Must not access beyond CSP.
__ sub(CSP, CSP,
Operand(2 * target::kWordSize)); // Must not access beyond CSP.
__ LoadDImmediate(V1, 21.0);
__ LoadDImmediate(V2, 21.0);
@ -2564,9 +2571,9 @@ ASSEMBLER_TEST_GENERATE(FldrqFstrqPrePostIndex, assembler) {
__ Push(R1);
__ PushDouble(V1);
__ PushDouble(V2);
__ fldrq(V3, Address(SP, 2 * kWordSize, Address::PostIndex));
__ fldrq(V3, Address(SP, 2 * target::kWordSize, Address::PostIndex));
__ Pop(R0);
__ fstrq(V3, Address(SP, -2 * kWordSize, Address::PreIndex));
__ fstrq(V3, Address(SP, -2 * target::kWordSize, Address::PreIndex));
__ PopDouble(V0);
__ PopDouble(V1);
__ faddd(V0, V0, V1);
@ -2720,11 +2727,11 @@ ASSEMBLER_TEST_GENERATE(FldrdFstrdHeapTag, assembler) {
__ SetupDartSP();
__ LoadDImmediate(V0, 43.0);
__ LoadDImmediate(V1, 42.0);
__ AddImmediate(SP, SP, -1 * kWordSize);
__ AddImmediate(SP, SP, -1 * target::kWordSize);
__ add(R2, SP, Operand(1));
__ fstrd(V1, Address(R2, -1));
__ fldrd(V0, Address(R2, -1));
__ AddImmediate(SP, 1 * kWordSize);
__ AddImmediate(SP, 1 * target::kWordSize);
__ RestoreCSP();
__ ret();
}
@ -2737,16 +2744,17 @@ ASSEMBLER_TEST_RUN(FldrdFstrdHeapTag, test) {
ASSEMBLER_TEST_GENERATE(FldrdFstrdLargeIndex, assembler) {
__ SetupDartSP();
__ sub(CSP, CSP, Operand(32 * kWordSize)); // Must not access beyond CSP.
__ sub(CSP, CSP,
Operand(32 * target::kWordSize)); // Must not access beyond CSP.
__ LoadDImmediate(V0, 43.0);
__ LoadDImmediate(V1, 42.0);
// Largest negative offset that can fit in the signed 9-bit immediate field.
__ fstrd(V1, Address(SP, -32 * kWordSize, Address::PreIndex));
__ fstrd(V1, Address(SP, -32 * target::kWordSize, Address::PreIndex));
// Largest positive kWordSize aligned offset that we can fit.
__ fldrd(V0, Address(SP, 31 * kWordSize, Address::PostIndex));
__ fldrd(V0, Address(SP, 31 * target::kWordSize, Address::PostIndex));
// Correction.
__ add(SP, SP, Operand(kWordSize)); // Restore SP.
__ add(SP, SP, Operand(target::kWordSize)); // Restore SP.
__ RestoreCSP();
__ ret();
}
@ -2760,10 +2768,10 @@ ASSEMBLER_TEST_GENERATE(FldrdFstrdLargeOffset, assembler) {
__ SetupDartSP();
__ LoadDImmediate(V0, 43.0);
__ LoadDImmediate(V1, 42.0);
__ sub(SP, SP, Operand(512 * kWordSize));
__ sub(SP, SP, Operand(512 * target::kWordSize));
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
__ fstrd(V1, Address(SP, 512 * kWordSize, Address::Offset));
__ add(SP, SP, Operand(512 * kWordSize));
__ fstrd(V1, Address(SP, 512 * target::kWordSize, Address::Offset));
__ add(SP, SP, Operand(512 * target::kWordSize));
__ fldrd(V0, Address(SP));
__ RestoreCSP();
__ ret();
@ -2783,10 +2791,10 @@ ASSEMBLER_TEST_GENERATE(FldrdFstrdExtReg, assembler) {
// This should sign extend R2, and add to SP to get address,
// i.e. SP - kWordSize.
__ fstrd(V1, Address(SP, R2, SXTW));
__ sub(SP, SP, Operand(kWordSize));
__ sub(SP, SP, Operand(target::kWordSize));
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
__ fldrd(V0, Address(SP));
__ add(SP, SP, Operand(kWordSize));
__ add(SP, SP, Operand(target::kWordSize));
__ RestoreCSP();
__ ret();
}
@ -2801,12 +2809,12 @@ ASSEMBLER_TEST_GENERATE(FldrdFstrdScaledReg, assembler) {
__ LoadDImmediate(V0, 43.0);
__ LoadDImmediate(V1, 42.0);
__ movz(R2, Immediate(10), 0);
__ sub(SP, SP, Operand(10 * kWordSize));
__ sub(SP, SP, Operand(10 * target::kWordSize));
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
// Store V1 into SP + R2 * kWordSize.
__ fstrd(V1, Address(SP, R2, UXTX, Address::Scaled));
__ fldrd(V0, Address(SP, R2, UXTX, Address::Scaled));
__ add(SP, SP, Operand(10 * kWordSize));
__ add(SP, SP, Operand(10 * target::kWordSize));
__ RestoreCSP();
__ ret();
}
@ -4107,6 +4115,7 @@ ASSEMBLER_TEST_GENERATE(StoreIntoObject, assembler) {
__ ret();
}
} // namespace compiler
} // namespace dart
#endif // defined(TARGET_ARCH_ARM64)

View file

@ -12,6 +12,7 @@
#include "vm/virtual_memory.h"
namespace dart {
namespace compiler {
#define __ assembler->
@ -262,11 +263,11 @@ ASSEMBLER_TEST_GENERATE(SingleVLoadStore, assembler) {
if (TargetCPUFeatures::vfp_supported()) {
__ LoadImmediate(R0, bit_cast<int32_t, float>(12.3f));
__ mov(R2, Operand(SP));
__ str(R0, Address(SP, (-kWordSize * 30), Address::PreIndex));
__ vldrs(S0, Address(R2, (-kWordSize * 30)));
__ str(R0, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
__ vldrs(S0, Address(R2, (-target::kWordSize * 30)));
__ vadds(S0, S0, S0);
__ vstrs(S0, Address(R2, (-kWordSize * 30)));
__ ldr(R0, Address(SP, (kWordSize * 30), Address::PostIndex));
__ vstrs(S0, Address(R2, (-target::kWordSize * 30)));
__ ldr(R0, Address(SP, (target::kWordSize * 30), Address::PostIndex));
}
__ bx(LR);
}
@ -286,11 +287,11 @@ ASSEMBLER_TEST_GENERATE(SingleVShiftLoadStore, assembler) {
__ mov(R2, Operand(SP));
// Expressing __str(R0, Address(SP, (-kWordSize * 32), Address::PreIndex));
// as:
__ mov(R1, Operand(kWordSize));
__ mov(R1, Operand(target::kWordSize));
__ str(R0, Address(SP, R1, LSL, 5, Address::NegPreIndex));
__ vldrs(S0, Address(R2, (-kWordSize * 32)));
__ vldrs(S0, Address(R2, (-target::kWordSize * 32)));
__ vadds(S0, S0, S0);
__ vstrs(S0, Address(R2, (-kWordSize * 32)));
__ vstrs(S0, Address(R2, (-target::kWordSize * 32)));
// Expressing __ldr(R0, Address(SP, (kWordSize * 32), Address::PostIndex));
// as:
__ ldr(R0, Address(SP, R1, LSL, 5, Address::PostIndex));
@ -313,13 +314,13 @@ ASSEMBLER_TEST_GENERATE(DoubleVLoadStore, assembler) {
__ LoadImmediate(R0, Utils::Low32Bits(value));
__ LoadImmediate(R1, Utils::High32Bits(value));
__ mov(R2, Operand(SP));
__ str(R0, Address(SP, (-kWordSize * 30), Address::PreIndex));
__ str(R1, Address(R2, (-kWordSize * 29)));
__ vldrd(D0, Address(R2, (-kWordSize * 30)));
__ str(R0, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
__ str(R1, Address(R2, (-target::kWordSize * 29)));
__ vldrd(D0, Address(R2, (-target::kWordSize * 30)));
__ vaddd(D0, D0, D0);
__ vstrd(D0, Address(R2, (-kWordSize * 30)));
__ ldr(R1, Address(R2, (-kWordSize * 29)));
__ ldr(R0, Address(SP, (kWordSize * 30), Address::PostIndex));
__ vstrd(D0, Address(R2, (-target::kWordSize * 30)));
__ ldr(R1, Address(R2, (-target::kWordSize * 29)));
__ ldr(R0, Address(SP, (target::kWordSize * 30), Address::PostIndex));
}
__ bx(LR);
}
@ -1082,8 +1083,8 @@ ASSEMBLER_TEST_GENERATE(Ldrh, assembler) {
__ mov(R1, Operand(0x11));
__ mov(R2, Operand(SP));
__ str(R1, Address(SP, (-kWordSize * 30), Address::PreIndex));
__ ldrh(R0, Address(R2, (-kWordSize * 30)));
__ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
__ ldrh(R0, Address(R2, (-target::kWordSize * 30)));
__ cmp(R0, Operand(0x11));
__ b(&Test1, EQ);
__ mov(R0, Operand(1));
@ -1091,8 +1092,8 @@ ASSEMBLER_TEST_GENERATE(Ldrh, assembler) {
__ Bind(&Test1);
__ mov(R0, Operand(0x22));
__ strh(R0, Address(R2, (-kWordSize * 30)));
__ ldrh(R1, Address(R2, (-kWordSize * 30)));
__ strh(R0, Address(R2, (-target::kWordSize * 30)));
__ ldrh(R1, Address(R2, (-target::kWordSize * 30)));
__ cmp(R1, Operand(0x22));
__ b(&Test2, EQ);
__ mov(R0, Operand(1));
@ -1100,7 +1101,7 @@ ASSEMBLER_TEST_GENERATE(Ldrh, assembler) {
__ Bind(&Test2);
__ mov(R0, Operand(0));
__ AddImmediate(R2, (-kWordSize * 30));
__ AddImmediate(R2, (-target::kWordSize * 30));
__ strh(R0, Address(R2));
__ ldrh(R1, Address(R2));
__ cmp(R1, Operand(0));
@ -1111,7 +1112,7 @@ ASSEMBLER_TEST_GENERATE(Ldrh, assembler) {
__ mov(R0, Operand(0));
__ Bind(&Done);
__ ldr(R1, Address(SP, (kWordSize * 30), Address::PostIndex));
__ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex));
__ bx(LR);
}
@ -1124,9 +1125,9 @@ ASSEMBLER_TEST_RUN(Ldrh, test) {
ASSEMBLER_TEST_GENERATE(Ldrsb, assembler) {
__ mov(R1, Operand(0xFF));
__ mov(R2, Operand(SP));
__ str(R1, Address(SP, (-kWordSize * 30), Address::PreIndex));
__ ldrsb(R0, Address(R2, (-kWordSize * 30)));
__ ldr(R1, Address(SP, (kWordSize * 30), Address::PostIndex));
__ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
__ ldrsb(R0, Address(R2, (-target::kWordSize * 30)));
__ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex));
__ bx(LR);
}
@ -1139,9 +1140,9 @@ ASSEMBLER_TEST_RUN(Ldrsb, test) {
ASSEMBLER_TEST_GENERATE(Ldrb, assembler) {
__ mov(R1, Operand(0xFF));
__ mov(R2, Operand(SP));
__ str(R1, Address(SP, (-kWordSize * 30), Address::PreIndex));
__ ldrb(R0, Address(R2, (-kWordSize * 30)));
__ ldr(R1, Address(SP, (kWordSize * 30), Address::PostIndex));
__ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
__ ldrb(R0, Address(R2, (-target::kWordSize * 30)));
__ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex));
__ bx(LR);
}
@ -1154,9 +1155,9 @@ ASSEMBLER_TEST_RUN(Ldrb, test) {
ASSEMBLER_TEST_GENERATE(Ldrsh, assembler) {
__ mov(R1, Operand(0xFF));
__ mov(R2, Operand(SP));
__ str(R1, Address(SP, (-kWordSize * 30), Address::PreIndex));
__ ldrsh(R0, Address(R2, (-kWordSize * 30)));
__ ldr(R1, Address(SP, (kWordSize * 30), Address::PostIndex));
__ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
__ ldrsh(R0, Address(R2, (-target::kWordSize * 30)));
__ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex));
__ bx(LR);
}
@ -1169,9 +1170,9 @@ ASSEMBLER_TEST_RUN(Ldrsh, test) {
ASSEMBLER_TEST_GENERATE(Ldrh1, assembler) {
__ mov(R1, Operand(0xFF));
__ mov(R2, Operand(SP));
__ str(R1, Address(SP, (-kWordSize * 30), Address::PreIndex));
__ ldrh(R0, Address(R2, (-kWordSize * 30)));
__ ldr(R1, Address(SP, (kWordSize * 30), Address::PostIndex));
__ str(R1, Address(SP, (-target::kWordSize * 30), Address::PreIndex));
__ ldrh(R0, Address(R2, (-target::kWordSize * 30)));
__ ldr(R1, Address(SP, (target::kWordSize * 30), Address::PostIndex));
__ bx(LR);
}
@ -1183,12 +1184,12 @@ ASSEMBLER_TEST_RUN(Ldrh1, test) {
ASSEMBLER_TEST_GENERATE(Ldrd, assembler) {
__ mov(IP, Operand(SP));
__ sub(SP, SP, Operand(kWordSize * 30));
__ sub(SP, SP, Operand(target::kWordSize * 30));
__ strd(R2, R3, SP, 0);
__ strd(R0, R1, IP, (-kWordSize * 28));
__ ldrd(R2, R3, IP, (-kWordSize * 28));
__ strd(R0, R1, IP, (-target::kWordSize * 28));
__ ldrd(R2, R3, IP, (-target::kWordSize * 28));
__ ldrd(R0, R1, SP, 0);
__ add(SP, SP, Operand(kWordSize * 30));
__ add(SP, SP, Operand(target::kWordSize * 30));
__ sub(R0, R0, Operand(R2));
__ add(R1, R1, Operand(R3));
__ bx(LR);
@ -1215,12 +1216,12 @@ ASSEMBLER_TEST_GENERATE(Ldm_stm_da, assembler) {
__ Push(R0); // Make room, so we can decrement after.
__ stm(DA_W, SP, (1 << R0 | 1 << R1 | 1 << R2 | 1 << R3));
__ str(R2, Address(SP)); // Should be a free slot.
__ ldr(R9, Address(SP, 1 * kWordSize)); // R0. R9 = +1.
__ ldr(IP, Address(SP, 2 * kWordSize)); // R1.
__ ldr(R9, Address(SP, 1 * target::kWordSize)); // R0. R9 = +1.
__ ldr(IP, Address(SP, 2 * target::kWordSize)); // R1.
__ sub(R9, R9, Operand(IP)); // -R1. R9 = -6.
__ ldr(IP, Address(SP, 3 * kWordSize)); // R2.
__ ldr(IP, Address(SP, 3 * target::kWordSize)); // R2.
__ add(R9, R9, Operand(IP)); // +R2. R9 = +5.
__ ldr(IP, Address(SP, 4 * kWordSize)); // R3.
__ ldr(IP, Address(SP, 4 * target::kWordSize)); // R3.
__ sub(R9, R9, Operand(IP)); // -R3. R9 = -26.
__ ldm(IB_W, SP, (1 << R0 | 1 << R1 | 1 << R2 | 1 << R3));
// Same operations again. But this time from the restore registers.
@ -1245,9 +1246,9 @@ ASSEMBLER_TEST_RUN(Ldm_stm_da, test) {
ASSEMBLER_TEST_GENERATE(AddressShiftStrLSL1NegOffset, assembler) {
__ mov(R2, Operand(42));
__ mov(R1, Operand(kWordSize));
__ mov(R1, Operand(target::kWordSize));
__ str(R2, Address(SP, R1, LSL, 1, Address::NegOffset));
__ ldr(R0, Address(SP, (-kWordSize * 2), Address::Offset));
__ ldr(R0, Address(SP, (-target::kWordSize * 2), Address::Offset));
__ bx(LR);
}
@ -1259,8 +1260,8 @@ ASSEMBLER_TEST_RUN(AddressShiftStrLSL1NegOffset, test) {
ASSEMBLER_TEST_GENERATE(AddressShiftLdrLSL5NegOffset, assembler) {
__ mov(R2, Operand(42));
__ mov(R1, Operand(kWordSize));
__ str(R2, Address(SP, (-kWordSize * 32), Address::Offset));
__ mov(R1, Operand(target::kWordSize));
__ str(R2, Address(SP, (-target::kWordSize * 32), Address::Offset));
__ ldr(R0, Address(SP, R1, LSL, 5, Address::NegOffset));
__ bx(LR);
}
@ -1273,9 +1274,9 @@ ASSEMBLER_TEST_RUN(AddressShiftLdrLSL5NegOffset, test) {
ASSEMBLER_TEST_GENERATE(AddressShiftStrLRS1NegOffset, assembler) {
__ mov(R2, Operand(42));
__ mov(R1, Operand(kWordSize * 2));
__ mov(R1, Operand(target::kWordSize * 2));
__ str(R2, Address(SP, R1, LSR, 1, Address::NegOffset));
__ ldr(R0, Address(SP, -kWordSize, Address::Offset));
__ ldr(R0, Address(SP, -target::kWordSize, Address::Offset));
__ bx(LR);
}
@ -1287,8 +1288,8 @@ ASSEMBLER_TEST_RUN(AddressShiftStrLRS1NegOffset, test) {
ASSEMBLER_TEST_GENERATE(AddressShiftLdrLRS1NegOffset, assembler) {
__ mov(R2, Operand(42));
__ mov(R1, Operand(kWordSize * 2));
__ str(R2, Address(SP, -kWordSize, Address::Offset));
__ mov(R1, Operand(target::kWordSize * 2));
__ str(R2, Address(SP, -target::kWordSize, Address::Offset));
__ ldr(R0, Address(SP, R1, LSR, 1, Address::NegOffset));
__ bx(LR);
}
@ -1301,10 +1302,10 @@ ASSEMBLER_TEST_RUN(AddressShiftLdrLRS1NegOffset, test) {
ASSEMBLER_TEST_GENERATE(AddressShiftStrLSLNegPreIndex, assembler) {
__ mov(R2, Operand(42));
__ mov(R1, Operand(kWordSize));
__ mov(R1, Operand(target::kWordSize));
__ mov(R3, Operand(SP));
__ str(R2, Address(SP, R1, LSL, 5, Address::NegPreIndex));
__ ldr(R0, Address(R3, (-kWordSize * 32), Address::Offset));
__ ldr(R0, Address(R3, (-target::kWordSize * 32), Address::Offset));
__ mov(SP, Operand(R3));
__ bx(LR);
}
@ -1317,8 +1318,8 @@ ASSEMBLER_TEST_RUN(AddressShiftStrLSLNegPreIndex, test) {
ASSEMBLER_TEST_GENERATE(AddressShiftLdrLSLNegPreIndex, assembler) {
__ mov(R2, Operand(42));
__ mov(R1, Operand(kWordSize));
__ str(R2, Address(SP, (-kWordSize * 32), Address::PreIndex));
__ mov(R1, Operand(target::kWordSize));
__ str(R2, Address(SP, (-target::kWordSize * 32), Address::PreIndex));
__ ldr(R0, Address(SP, R1, LSL, 5, Address::PostIndex));
__ bx(LR);
}
@ -3845,6 +3846,7 @@ ASSEMBLER_TEST_GENERATE(StoreIntoObject, assembler) {
__ Ret();
}
} // namespace compiler
} // namespace dart
#endif // defined TARGET_ARCH_ARM

View file

@ -5,19 +5,19 @@
#include "vm/globals.h" // NOLINT
#if defined(TARGET_ARCH_DBC)
#define SHOULD_NOT_INCLUDE_RUNTIME
#include "vm/compiler/assembler/assembler.h"
#include "vm/cpu.h"
#include "vm/longjump.h"
#include "vm/runtime_entry.h"
#include "vm/simulator.h"
#include "vm/stack_frame.h"
#include "vm/stub_code.h"
namespace dart {
DECLARE_FLAG(bool, check_code_pointer);
DECLARE_FLAG(bool, inline_alloc);
namespace compiler {
void Assembler::InitializeMemoryWithBreakpoints(uword data, intptr_t length) {
const uword end = data + length;
while (data < end) {
@ -72,11 +72,11 @@ void Assembler::Emit(int32_t value) {
}
const char* Assembler::RegisterName(Register reg) {
return Thread::Current()->zone()->PrintToString("R%d", reg);
return ThreadState::Current()->zone()->PrintToString("R%d", reg);
}
const char* Assembler::FpuRegisterName(FpuRegister reg) {
return Thread::Current()->zone()->PrintToString("F%d", reg);
return ThreadState::Current()->zone()->PrintToString("F%d", reg);
}
static int32_t EncodeJump(int32_t relative_pc) {
@ -125,9 +125,11 @@ void Assembler::LoadConstant(uintptr_t ra, const Object& obj) {
}
intptr_t Assembler::AddConstant(const Object& obj) {
return object_pool_wrapper().FindObject(Object::ZoneHandle(obj.raw()));
return object_pool_builder().FindObject(
NewZoneHandle(ThreadState::Current()->zone(), obj));
}
} // namespace compiler
} // namespace dart
#endif // defined TARGET_ARCH_DBC

View file

@ -14,11 +14,12 @@
#include "vm/constants_dbc.h"
#include "vm/cpu.h"
#include "vm/hash_map.h"
#include "vm/object.h"
#include "vm/simulator.h"
namespace dart {
namespace compiler {
// Dummy declaration to make things compile.
class Address : public ValueObject {
private:
@ -27,9 +28,9 @@ class Address : public ValueObject {
class Assembler : public AssemblerBase {
public:
explicit Assembler(ObjectPoolWrapper* object_pool_wrapper,
explicit Assembler(ObjectPoolBuilder* object_pool_builder,
bool use_far_branches = false)
: AssemblerBase(object_pool_wrapper) {}
: AssemblerBase(object_pool_builder) {}
~Assembler() {}
void Bind(Label* label);
@ -99,6 +100,10 @@ class Assembler : public AssemblerBase {
DISALLOW_COPY_AND_ASSIGN(Assembler);
};
} // namespace compiler
using compiler::Address;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_DBC_H_

View file

@ -12,6 +12,7 @@
#include "vm/unit_test.h"
namespace dart {
namespace compiler {
static RawObject* ExecuteTest(const Code& code) {
const intptr_t kTypeArgsLen = 0;
@ -68,8 +69,8 @@ static void GenerateDummyCode(Assembler* assembler, const Object& result) {
static void MakeDummyInstanceCall(Assembler* assembler, const Object& result) {
// Make a dummy function.
ObjectPoolWrapper object_pool_wrapper;
Assembler _assembler_(&object_pool_wrapper);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
GenerateDummyCode(&_assembler_, result);
const char* dummy_function_name = "dummy_instance_function";
const Function& dummy_instance_function =
@ -134,7 +135,7 @@ ASSEMBLER_TEST_GENERATE(StoreIntoObject, assembler) {
__ Frame(2);
__ Move(0, -kParamEndSlotFromFp - 1);
__ Move(1, -kParamEndSlotFromFp - 2);
__ StoreField(0, GrowableObjectArray::data_offset() / kWordSize, 1);
__ StoreField(0, GrowableObjectArray::data_offset() / target::kWordSize, 1);
__ Return(0);
}
@ -2513,6 +2514,7 @@ ASSEMBLER_TEST_RUN(DMax, test) {
#endif // defined(ARCH_IS_64_BIT)
} // namespace compiler
} // namespace dart
#endif // defined(TARGET_ARCH_DBC)

View file

@ -5,20 +5,30 @@
#include "vm/globals.h" // NOLINT
#if defined(TARGET_ARCH_IA32)
#define SHOULD_NOT_INCLUDE_RUNTIME
#include "vm/class_id.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/cpu.h"
#include "vm/heap/heap.h"
#include "vm/instructions.h"
#include "vm/memory_region.h"
#include "vm/runtime_entry.h"
#include "vm/stack_frame.h"
#include "vm/stub_code.h"
namespace dart {
#if !defined(DART_PRECOMPILED_RUNTIME)
DECLARE_FLAG(bool, inline_alloc);
#endif
namespace compiler {
using target::ClassTable;
using target::Heap;
using target::Instance;
using target::Instructions;
using target::Isolate;
using target::RawObject;
using target::Thread;
#if !defined(DART_PRECOMPILED_RUNTIME)
class DirectCallRelocation : public AssemblerFixup {
public:
@ -34,8 +44,7 @@ class DirectCallRelocation : public AssemblerFixup {
int32_t Assembler::jit_cookie() {
if (jit_cookie_ == 0) {
jit_cookie_ =
static_cast<int32_t>(Isolate::Current()->random()->NextUInt32());
jit_cookie_ = CreateJitCookie();
}
return jit_cookie_;
}
@ -1759,7 +1768,7 @@ void Assembler::SubImmediate(Register reg, const Immediate& imm) {
void Assembler::Drop(intptr_t stack_elements) {
ASSERT(stack_elements >= 0);
if (stack_elements > 0) {
addl(ESP, Immediate(stack_elements * kWordSize));
addl(ESP, Immediate(stack_elements * target::kWordSize));
}
}
@ -1770,17 +1779,18 @@ void Assembler::LoadIsolate(Register dst) {
void Assembler::LoadObject(Register dst,
const Object& object,
bool movable_referent) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
ASSERT(IsOriginalObject(object));
// movable_referent: some references to VM heap objects may be patched with
// references to isolate-local objects (e.g., optimized static calls).
// We need to track such references since the latter may move during
// compaction.
if (object.IsSmi() || (object.InVMHeap() && !movable_referent)) {
movl(dst, Immediate(reinterpret_cast<int32_t>(object.raw())));
if (target::CanEmbedAsRawPointerInGeneratedCode(object) &&
!movable_referent) {
movl(dst, Immediate(target::ToRawPointer(object)));
} else {
ASSERT(object.IsNotTemporaryScopedHandle());
ASSERT(object.IsOld());
ASSERT(IsNotTemporaryScopedHandle(object));
ASSERT(IsInOldSpace(object));
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xB8 + dst);
buffer_.EmitObject(object);
@ -1788,25 +1798,23 @@ void Assembler::LoadObject(Register dst,
}
void Assembler::LoadObjectSafely(Register dst, const Object& object) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
if (Assembler::IsSafe(object)) {
LoadObject(dst, object);
} else {
int32_t cookie = jit_cookie();
movl(dst, Immediate(reinterpret_cast<int32_t>(object.raw()) ^ cookie));
ASSERT(IsOriginalObject(object));
if (target::IsSmi(object) && !IsSafeSmi(object)) {
const int32_t cookie = jit_cookie();
movl(dst, Immediate(target::ToRawSmi(object) ^ cookie));
xorl(dst, Immediate(cookie));
} else {
LoadObject(dst, object);
}
}
void Assembler::PushObject(const Object& object) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
if (object.IsSmi() || object.InVMHeap()) {
pushl(Immediate(reinterpret_cast<int32_t>(object.raw())));
ASSERT(IsOriginalObject(object));
if (target::CanEmbedAsRawPointerInGeneratedCode(object)) {
pushl(Immediate(target::ToRawPointer(object)));
} else {
ASSERT(object.IsNotTemporaryScopedHandle());
ASSERT(object.IsOld());
ASSERT(IsNotTemporaryScopedHandle(object));
ASSERT(IsInOldSpace(object));
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x68);
buffer_.EmitObject(object);
@ -1814,13 +1822,12 @@ void Assembler::PushObject(const Object& object) {
}
void Assembler::CompareObject(Register reg, const Object& object) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
if (object.IsSmi() || object.InVMHeap()) {
cmpl(reg, Immediate(reinterpret_cast<int32_t>(object.raw())));
ASSERT(IsOriginalObject(object));
if (target::CanEmbedAsRawPointerInGeneratedCode(object)) {
cmpl(reg, Immediate(target::ToRawPointer(object)));
} else {
ASSERT(object.IsNotTemporaryScopedHandle());
ASSERT(object.IsOld());
ASSERT(IsNotTemporaryScopedHandle(object));
ASSERT(IsInOldSpace(object));
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
if (reg == EAX) {
EmitUint8(0x05 + (7 << 3));
@ -1846,8 +1853,9 @@ void Assembler::StoreIntoObjectFilter(Register object,
Stop("Unexpected Smi!");
Bind(&okay);
#endif
COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
(kOldObjectAlignmentOffset == 0));
COMPILE_ASSERT((target::ObjectAlignment::kNewObjectAlignmentOffset ==
target::kWordSize) &&
(target::ObjectAlignment::kOldObjectAlignmentOffset == 0));
// Write-barrier triggers if the value is in the new space (has bit set) and
// the object is in the old space (has bit cleared).
// To check that we could compute value & ~object and skip the write barrier
@ -1856,9 +1864,9 @@ void Assembler::StoreIntoObjectFilter(Register object,
// ~value | object instead and skip the write barrier if the bit is set.
notl(value);
orl(value, object);
testl(value, Immediate(kNewObjectAlignmentOffset));
testl(value, Immediate(target::ObjectAlignment::kNewObjectAlignmentOffset));
} else {
ASSERT(kNewObjectAlignmentOffset == 4);
ASSERT(target::ObjectAlignment::kNewObjectAlignmentOffset == 4);
ASSERT(kHeapObjectTag == 1);
// Detect value being ...101 and object being ...001.
andl(value, Immediate(7));
@ -1951,28 +1959,18 @@ void Assembler::StoreIntoArray(Register object,
Bind(&done);
}
void Assembler::UnverifiedStoreOldObject(const Address& dest,
const Object& value) {
ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
ASSERT(value.IsOld());
ASSERT(!value.InVMHeap());
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xC7);
EmitOperand(0, dest);
buffer_.EmitObject(value);
}
void Assembler::StoreIntoObjectNoBarrier(Register object,
const Address& dest,
const Object& value) {
ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
if (value.IsSmi() || value.InVMHeap()) {
Immediate imm_value(reinterpret_cast<int32_t>(value.raw()));
ASSERT(IsOriginalObject(value));
if (target::CanEmbedAsRawPointerInGeneratedCode(value)) {
Immediate imm_value(target::ToRawPointer(value));
movl(dest, imm_value);
} else {
UnverifiedStoreOldObject(dest, value);
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0xC7);
EmitOperand(0, dest);
buffer_.EmitObject(value);
}
// No store buffer update.
}
@ -1989,14 +1987,14 @@ void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
}
void Assembler::ZeroInitSmiField(const Address& dest) {
Immediate zero(Smi::RawValue(0));
Immediate zero(target::ToRawSmi(0));
movl(dest, zero);
}
void Assembler::IncrementSmiField(const Address& dest, int32_t increment) {
// Note: FlowGraphCompiler::EdgeCounterIncrementSizeInBytes depends on
// the length of this instruction sequence.
Immediate inc_imm(Smi::RawValue(increment));
Immediate inc_imm(target::ToRawSmi(increment));
addl(dest, inc_imm);
}
@ -2006,7 +2004,7 @@ void Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
pushl(Immediate(Utils::High32Bits(constant)));
pushl(Immediate(Utils::Low32Bits(constant)));
movsd(dst, Address(ESP, 0));
addl(ESP, Immediate(2 * kWordSize));
addl(ESP, Immediate(2 * target::kWordSize));
}
void Assembler::FloatNegate(XmmRegister f) {
@ -2105,7 +2103,7 @@ void Assembler::LeaveCallRuntimeFrame() {
// and ensure proper alignment of the stack frame.
// We need to restore it before restoring registers.
const intptr_t kPushedRegistersSize =
kNumberOfVolatileCpuRegisters * kWordSize +
kNumberOfVolatileCpuRegisters * target::kWordSize +
kNumberOfVolatileXmmRegisters * kFpuRegisterSize;
leal(ESP, Address(EBP, -kPushedRegistersSize));
@ -2133,8 +2131,8 @@ void Assembler::CallRuntime(const RuntimeEntry& entry,
}
void Assembler::Call(const Code& target, bool movable_target) {
LoadObject(CODE_REG, target, movable_target);
call(FieldAddress(CODE_REG, Code::entry_point_offset()));
LoadObject(CODE_REG, ToObject(target), movable_target);
call(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
}
void Assembler::CallToRuntime() {
@ -2142,12 +2140,12 @@ void Assembler::CallToRuntime() {
}
void Assembler::Jmp(const Code& target) {
const ExternalLabel label(target.EntryPoint());
const ExternalLabel label(target::Code::EntryPointOf(target));
jmp(&label);
}
void Assembler::J(Condition condition, const Code& target) {
const ExternalLabel label(target.EntryPoint());
const ExternalLabel label(target::Code::EntryPointOf(target));
j(condition, &label);
}
@ -2201,18 +2199,17 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
movl(temp_reg, Address(temp_reg, table_offset));
state_address = Address(temp_reg, state_offset);
testb(state_address, Immediate(ClassHeapStats::TraceAllocationMask()));
testb(state_address,
Immediate(target::ClassHeapStats::TraceAllocationMask()));
// We are tracing for this class, jump to the trace label which will use
// the allocation stub.
j(NOT_ZERO, trace, near_jump);
}
void Assembler::UpdateAllocationStats(intptr_t cid,
Register temp_reg,
Heap::Space space) {
void Assembler::UpdateAllocationStats(intptr_t cid, Register temp_reg) {
ASSERT(cid > 0);
intptr_t counter_offset =
ClassTable::CounterOffsetFor(cid, space == Heap::kNew);
ClassTable::CounterOffsetFor(cid, /*is_new_space=*/true);
ASSERT(temp_reg != kNoRegister);
LoadIsolate(temp_reg);
intptr_t table_offset =
@ -2223,23 +2220,21 @@ void Assembler::UpdateAllocationStats(intptr_t cid,
void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
Register size_reg,
Register temp_reg,
Heap::Space space) {
Register temp_reg) {
ASSERT(cid > 0);
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid, temp_reg, space);
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, space == Heap::kNew);
UpdateAllocationStats(cid, temp_reg);
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
addl(Address(temp_reg, size_offset), size_reg);
}
void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
intptr_t size_in_bytes,
Register temp_reg,
Heap::Space space) {
Register temp_reg) {
ASSERT(cid > 0);
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid, temp_reg, space);
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, space == Heap::kNew);
UpdateAllocationStats(cid, temp_reg);
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
addl(Address(temp_reg, size_offset), Immediate(size_in_bytes));
}
#endif // !PRODUCT
@ -2251,14 +2246,13 @@ void Assembler::TryAllocate(const Class& cls,
Register temp_reg) {
ASSERT(failure != NULL);
ASSERT(temp_reg != kNoRegister);
const intptr_t instance_size = cls.instance_size();
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(
MaybeTraceAllocation(cls.id(), temp_reg, failure, near_jump));
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
const classid_t cid = target::Class::GetId(cls);
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, near_jump));
movl(instance_reg, Address(THR, Thread::top_offset()));
addl(instance_reg, Immediate(instance_size));
// instance_reg: potential next object start.
@ -2267,15 +2261,13 @@ void Assembler::TryAllocate(const Class& cls,
// Successfully allocated the object, now update top to point to
// next object start and store the class in the class field of object.
movl(Address(THR, Thread::top_offset()), instance_reg);
NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), temp_reg, space));
NOT_IN_PRODUCT(UpdateAllocationStats(cid, temp_reg));
ASSERT(instance_size >= kHeapObjectTag);
subl(instance_reg, Immediate(instance_size - kHeapObjectTag));
uint32_t tags = 0;
tags = RawObject::SizeTag::update(instance_size, tags);
ASSERT(cls.id() != kIllegalCid);
tags = RawObject::ClassIdTag::update(cls.id(), tags);
tags = RawObject::NewBit::update(true, tags);
movl(FieldAddress(instance_reg, Object::tags_offset()), Immediate(tags));
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, instance_size);
movl(FieldAddress(instance_reg, target::Object::tags_offset()),
Immediate(tags));
} else {
jmp(failure);
}
@ -2295,7 +2287,6 @@ void Assembler::TryAllocateArray(intptr_t cid,
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, near_jump));
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
movl(instance, Address(THR, Thread::top_offset()));
movl(end_address, instance);
@ -2312,22 +2303,20 @@ void Assembler::TryAllocateArray(intptr_t cid,
// next object start and initialize the object.
movl(Address(THR, Thread::top_offset()), end_address);
addl(instance, Immediate(kHeapObjectTag));
NOT_IN_PRODUCT(
UpdateAllocationStatsWithSize(cid, instance_size, temp_reg, space));
NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, instance_size, temp_reg));
// Initialize the tags.
uint32_t tags = 0;
tags = RawObject::ClassIdTag::update(cid, tags);
tags = RawObject::SizeTag::update(instance_size, tags);
tags = RawObject::NewBit::update(true, tags);
movl(FieldAddress(instance, Object::tags_offset()), Immediate(tags));
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, instance_size);
movl(FieldAddress(instance, target::Object::tags_offset()),
Immediate(tags));
} else {
jmp(failure);
}
}
void Assembler::PushCodeObject() {
ASSERT(code_.IsNotTemporaryScopedHandle());
ASSERT(IsNotTemporaryScopedHandle(code_));
AssemblerBuffer::EnsureCapacity ensured(&buffer_);
EmitUint8(0x68);
buffer_.EmitObject(code_);
@ -2363,17 +2352,6 @@ void Assembler::EnterStubFrame() {
EnterDartFrame(0);
}
void Assembler::Stop(const char* message) {
if (FLAG_print_stop_message) {
pushl(EAX); // Preserve EAX.
movl(EAX, Immediate(reinterpret_cast<int32_t>(message)));
Call(StubCode::PrintStopMessage()); // Passing message in EAX.
popl(EAX); // Restore EAX.
}
// Emit the int3 instruction.
int3(); // Execution can be resumed with the 'cont' command in gdb.
}
void Assembler::EmitOperand(int rm, const Operand& operand) {
ASSERT(rm >= 0 && rm < 8);
const intptr_t length = operand.length_;
@ -2461,7 +2439,7 @@ void Assembler::LoadClassId(Register result, Register object) {
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
movzxw(result, FieldAddress(object, class_id_offset));
}
@ -2471,7 +2449,7 @@ void Assembler::LoadClassById(Register result, Register class_id) {
const intptr_t offset =
Isolate::class_table_offset() + ClassTable::table_offset();
movl(result, Address(result, offset));
ASSERT(kSizeOfClassPairLog2 == 3);
ASSERT(ClassTable::kSizeOfClassPairLog2 == 3);
movl(result, Address(result, class_id, TIMES_8, 0));
}
@ -2490,7 +2468,7 @@ void Assembler::SmiUntagOrCheckClass(Register object,
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);
@ -2542,7 +2520,7 @@ void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
jmp(&join, Assembler::kNearJump);
Bind(&smi);
movl(result, Immediate(Smi::RawValue(kSmiCid)));
movl(result, Immediate(target::ToRawSmi(kSmiCid)));
Bind(&join);
} else {
@ -2623,6 +2601,7 @@ const char* Assembler::FpuRegisterName(FpuRegister reg) {
return xmm_reg_names[reg];
}
} // namespace compiler
} // namespace dart
#endif // defined(TARGET_ARCH_IA32)

View file

@ -13,11 +13,10 @@
#include "platform/utils.h"
#include "vm/constants_ia32.h"
#include "vm/constants_x86.h"
#include "vm/pointer_tagging.h"
namespace dart {
// Forward declarations.
class RuntimeEntry;
namespace compiler {
class Immediate : public ValueObject {
public:
@ -222,11 +221,11 @@ class FieldAddress : public Address {
class Assembler : public AssemblerBase {
public:
explicit Assembler(ObjectPoolWrapper* object_pool_wrapper,
explicit Assembler(ObjectPoolBuilder* object_pool_builder,
bool use_far_branches = false)
: AssemblerBase(object_pool_wrapper),
: AssemblerBase(object_pool_builder),
jit_cookie_(0),
code_(Code::ZoneHandle()) {
code_(NewZoneHandle(ThreadState::Current()->zone())) {
// This mode is only needed and implemented for ARM.
ASSERT(!use_far_branches);
}
@ -689,7 +688,7 @@ class Assembler : public AssemblerBase {
intptr_t extra_disp = 0);
static Address VMTagAddress() {
return Address(THR, Thread::vm_tag_offset());
return Address(THR, target::Thread::vm_tag_offset());
}
/*
@ -774,18 +773,14 @@ class Assembler : public AssemblerBase {
Label* trace,
bool near_jump);
void UpdateAllocationStats(intptr_t cid,
Register temp_reg,
Heap::Space space);
void UpdateAllocationStats(intptr_t cid, Register temp_reg);
void UpdateAllocationStatsWithSize(intptr_t cid,
Register size_reg,
Register temp_reg,
Heap::Space space);
Register temp_reg);
void UpdateAllocationStatsWithSize(intptr_t cid,
intptr_t instance_size,
Register temp_reg,
Heap::Space space);
Register temp_reg);
// Inlined allocation of an instance of class 'cls', code has no runtime
// calls. Jump to 'failure' if the instance cannot be allocated here.
@ -814,29 +809,23 @@ class Assembler : public AssemblerBase {
static const char* RegisterName(Register reg);
static const char* FpuRegisterName(FpuRegister reg);
// Smis that do not fit into 17 bits (16 bits of payload) are unsafe.
// Check if the given value is an integer value that can be directly
// emdedded into the code without additional XORing with jit_cookie.
// We consider 16-bit integers, powers of two and corresponding masks
// as safe values that can be emdedded into the code object.
static bool IsSafeSmi(const Object& object) {
if (!object.IsSmi()) {
return false;
int64_t value;
if (HasIntegerValue(object, &value)) {
return Utils::IsInt(16, value) || Utils::IsPowerOfTwo(value) ||
Utils::IsPowerOfTwo(value + 1);
}
if (Utils::IsInt(17, reinterpret_cast<intptr_t>(object.raw()))) {
return true;
}
// Single bit smis (powers of two) and corresponding masks are safe.
const intptr_t value = Smi::Cast(object).Value();
if (Utils::IsPowerOfTwo(value) || Utils::IsPowerOfTwo(value + 1)) {
return true;
}
return false;
}
static bool IsSafe(const Object& object) {
return !object.IsSmi() || IsSafeSmi(object);
return !target::IsSmi(object) || IsSafeSmi(object);
}
void set_code_object(const Code& code) { code_ ^= code.raw(); }
Object& GetSelfHandle() const { return code_; }
void PushCodeObject();
@ -880,12 +869,10 @@ class Assembler : public AssemblerBase {
CanBeSmi can_be_smi,
BarrierFilterMode barrier_filter_mode);
void UnverifiedStoreOldObject(const Address& dest, const Object& value);
int32_t jit_cookie();
int32_t jit_cookie_;
Code& code_;
Object& code_;
DISALLOW_ALLOCATION();
DISALLOW_COPY_AND_ASSIGN(Assembler);
@ -916,6 +903,12 @@ inline void Assembler::EmitOperandSizeOverride() {
EmitUint8(0x66);
}
} // namespace compiler
using compiler::Address;
using compiler::FieldAddress;
using compiler::Immediate;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_IA32_H_

View file

@ -19,6 +19,7 @@
#endif
namespace dart {
namespace compiler {
#define __ assembler->
@ -36,7 +37,7 @@ ASSEMBLER_TEST_RUN(Simple, test) {
}
ASSEMBLER_TEST_GENERATE(ReadArgument, assembler) {
__ movl(EAX, Address(ESP, kWordSize));
__ movl(EAX, Address(ESP, target::kWordSize));
__ ret();
}
@ -54,21 +55,21 @@ ASSEMBLER_TEST_GENERATE(AddressingModes, assembler) {
__ movl(EAX, Address(EBP, 0));
__ movl(EAX, Address(EAX, 0));
__ movl(EAX, Address(ESP, kWordSize));
__ movl(EAX, Address(EBP, kWordSize));
__ movl(EAX, Address(EAX, kWordSize));
__ movl(EAX, Address(ESP, target::kWordSize));
__ movl(EAX, Address(EBP, target::kWordSize));
__ movl(EAX, Address(EAX, target::kWordSize));
__ movl(EAX, Address(ESP, -kWordSize));
__ movl(EAX, Address(EBP, -kWordSize));
__ movl(EAX, Address(EAX, -kWordSize));
__ movl(EAX, Address(ESP, -target::kWordSize));
__ movl(EAX, Address(EBP, -target::kWordSize));
__ movl(EAX, Address(EAX, -target::kWordSize));
__ movl(EAX, Address(ESP, 256 * kWordSize));
__ movl(EAX, Address(EBP, 256 * kWordSize));
__ movl(EAX, Address(EAX, 256 * kWordSize));
__ movl(EAX, Address(ESP, 256 * target::kWordSize));
__ movl(EAX, Address(EBP, 256 * target::kWordSize));
__ movl(EAX, Address(EAX, 256 * target::kWordSize));
__ movl(EAX, Address(ESP, -256 * kWordSize));
__ movl(EAX, Address(EBP, -256 * kWordSize));
__ movl(EAX, Address(EAX, -256 * kWordSize));
__ movl(EAX, Address(ESP, -256 * target::kWordSize));
__ movl(EAX, Address(EBP, -256 * target::kWordSize));
__ movl(EAX, Address(EAX, -256 * target::kWordSize));
__ movl(EAX, Address(EAX, TIMES_1));
__ movl(EAX, Address(EAX, TIMES_2));
@ -78,11 +79,11 @@ ASSEMBLER_TEST_GENERATE(AddressingModes, assembler) {
__ movl(EAX, Address(EBP, TIMES_2));
__ movl(EAX, Address(EAX, TIMES_2));
__ movl(EAX, Address(EBP, TIMES_2, kWordSize));
__ movl(EAX, Address(EAX, TIMES_2, kWordSize));
__ movl(EAX, Address(EBP, TIMES_2, target::kWordSize));
__ movl(EAX, Address(EAX, TIMES_2, target::kWordSize));
__ movl(EAX, Address(EBP, TIMES_2, 256 * kWordSize));
__ movl(EAX, Address(EAX, TIMES_2, 256 * kWordSize));
__ movl(EAX, Address(EBP, TIMES_2, 256 * target::kWordSize));
__ movl(EAX, Address(EAX, TIMES_2, 256 * target::kWordSize));
__ movl(EAX, Address(EAX, EBP, TIMES_2, 0));
__ movl(EAX, Address(EAX, EAX, TIMES_2, 0));
@ -91,19 +92,19 @@ ASSEMBLER_TEST_GENERATE(AddressingModes, assembler) {
__ movl(EAX, Address(ESP, EBP, TIMES_2, 0));
__ movl(EAX, Address(ESP, EAX, TIMES_2, 0));
__ movl(EAX, Address(EAX, EBP, TIMES_2, kWordSize));
__ movl(EAX, Address(EAX, EAX, TIMES_2, kWordSize));
__ movl(EAX, Address(EBP, EBP, TIMES_2, kWordSize));
__ movl(EAX, Address(EBP, EAX, TIMES_2, kWordSize));
__ movl(EAX, Address(ESP, EBP, TIMES_2, kWordSize));
__ movl(EAX, Address(ESP, EAX, TIMES_2, kWordSize));
__ movl(EAX, Address(EAX, EBP, TIMES_2, target::kWordSize));
__ movl(EAX, Address(EAX, EAX, TIMES_2, target::kWordSize));
__ movl(EAX, Address(EBP, EBP, TIMES_2, target::kWordSize));
__ movl(EAX, Address(EBP, EAX, TIMES_2, target::kWordSize));
__ movl(EAX, Address(ESP, EBP, TIMES_2, target::kWordSize));
__ movl(EAX, Address(ESP, EAX, TIMES_2, target::kWordSize));
__ movl(EAX, Address(EAX, EBP, TIMES_2, 256 * kWordSize));
__ movl(EAX, Address(EAX, EAX, TIMES_2, 256 * kWordSize));
__ movl(EAX, Address(EBP, EBP, TIMES_2, 256 * kWordSize));
__ movl(EAX, Address(EBP, EAX, TIMES_2, 256 * kWordSize));
__ movl(EAX, Address(ESP, EBP, TIMES_2, 256 * kWordSize));
__ movl(EAX, Address(ESP, EAX, TIMES_2, 256 * kWordSize));
__ movl(EAX, Address(EAX, EBP, TIMES_2, 256 * target::kWordSize));
__ movl(EAX, Address(EAX, EAX, TIMES_2, 256 * target::kWordSize));
__ movl(EAX, Address(EBP, EBP, TIMES_2, 256 * target::kWordSize));
__ movl(EAX, Address(EBP, EAX, TIMES_2, 256 * target::kWordSize));
__ movl(EAX, Address(ESP, EBP, TIMES_2, 256 * target::kWordSize));
__ movl(EAX, Address(ESP, EAX, TIMES_2, 256 * target::kWordSize));
}
ASSEMBLER_TEST_RUN(AddressingModes, test) {
@ -376,11 +377,11 @@ ASSEMBLER_TEST_RUN(Decrement, test) {
}
ASSEMBLER_TEST_GENERATE(AddressBinOp, assembler) {
__ movl(EAX, Address(ESP, kWordSize));
__ addl(EAX, Address(ESP, kWordSize));
__ movl(EAX, Address(ESP, target::kWordSize));
__ addl(EAX, Address(ESP, target::kWordSize));
__ incl(EAX);
__ subl(EAX, Address(ESP, kWordSize));
__ imull(EAX, Address(ESP, kWordSize));
__ subl(EAX, Address(ESP, target::kWordSize));
__ imull(EAX, Address(ESP, target::kWordSize));
__ ret();
}
@ -480,7 +481,7 @@ ASSEMBLER_TEST_RUN(Negate, test) {
}
ASSEMBLER_TEST_GENERATE(BitScanReverse, assembler) {
__ movl(ECX, Address(ESP, kWordSize));
__ movl(ECX, Address(ESP, target::kWordSize));
__ movl(EAX, Immediate(666)); // Marker for conditional write.
__ bsrl(EAX, ECX);
__ ret();
@ -538,7 +539,7 @@ ASSEMBLER_TEST_GENERATE(MoveExtendMemory, assembler) {
__ movzxb(EAX, Address(ESP, 0)); // EAX = 0xff
__ movsxw(EBX, Address(ESP, 0)); // EBX = -1
__ movzxw(ECX, Address(ESP, 0)); // ECX = 0xffff
__ addl(ESP, Immediate(kWordSize));
__ addl(ESP, Immediate(target::kWordSize));
__ addl(EBX, ECX);
__ addl(EAX, EBX);
@ -584,7 +585,7 @@ ASSEMBLER_TEST_GENERATE(Bitwise, assembler) {
__ pushl(Immediate(0x1C));
__ xorl(ECX, Address(ESP, 0)); // 0x65B.
__ popl(EAX); // Discard.
__ movl(EAX, Address(ESP, kWordSize));
__ movl(EAX, Address(ESP, target::kWordSize));
__ movl(EDX, Immediate(0xB0));
__ orl(Address(EAX, 0), EDX);
__ movl(EAX, ECX);
@ -762,7 +763,7 @@ ASSEMBLER_TEST_GENERATE(LogicalOps, assembler) {
__ Bind(&donetest13);
Label donetest14;
__ subl(ESP, Immediate(kWordSize));
__ subl(ESP, Immediate(target::kWordSize));
__ movl(Address(ESP, 0), Immediate(0x80000000));
__ movl(EAX, Immediate(0));
__ movl(ECX, Immediate(3));
@ -772,10 +773,10 @@ ASSEMBLER_TEST_GENERATE(LogicalOps, assembler) {
__ j(EQUAL, &donetest14);
__ int3();
__ Bind(&donetest14);
__ addl(ESP, Immediate(kWordSize));
__ addl(ESP, Immediate(target::kWordSize));
Label donetest15;
__ subl(ESP, Immediate(kWordSize));
__ subl(ESP, Immediate(target::kWordSize));
__ movl(Address(ESP, 0), Immediate(0xFF000000));
__ movl(EAX, Immediate(-1));
__ movl(ECX, Immediate(2));
@ -785,7 +786,7 @@ ASSEMBLER_TEST_GENERATE(LogicalOps, assembler) {
__ j(EQUAL, &donetest15);
__ int3();
__ Bind(&donetest15);
__ addl(ESP, Immediate(kWordSize));
__ addl(ESP, Immediate(target::kWordSize));
Label donetest16;
__ movl(EDX, Immediate(0x80000000));
@ -2645,10 +2646,10 @@ ASSEMBLER_TEST_RUN(PackedSingleToDouble, test) {
ASSEMBLER_TEST_GENERATE(SingleFPOperationsStack, assembler) {
__ movl(EAX, Immediate(bit_cast<int32_t, float>(12.3f)));
__ movd(XMM0, EAX);
__ addss(XMM0, Address(ESP, kWordSize)); // 15.7f
__ mulss(XMM0, Address(ESP, kWordSize)); // 53.38f
__ subss(XMM0, Address(ESP, kWordSize)); // 49.98f
__ divss(XMM0, Address(ESP, kWordSize)); // 14.7f
__ addss(XMM0, Address(ESP, target::kWordSize)); // 15.7f
__ mulss(XMM0, Address(ESP, target::kWordSize)); // 53.38f
__ subss(XMM0, Address(ESP, target::kWordSize)); // 49.98f
__ divss(XMM0, Address(ESP, target::kWordSize)); // 14.7f
__ pushl(EAX);
__ movss(Address(ESP, 0), XMM0);
__ flds(Address(ESP, 0));
@ -2689,7 +2690,7 @@ ASSEMBLER_TEST_GENERATE(DoubleFPMoves, assembler) {
__ movsd(XMM6, XMM5);
__ movsd(XMM7, XMM6);
__ movl(Address(ESP, 0), Immediate(0));
__ movl(Address(ESP, kWordSize), Immediate(0));
__ movl(Address(ESP, target::kWordSize), Immediate(0));
__ movsd(XMM0, Address(ESP, 0));
__ movsd(Address(ESP, 0), XMM7);
__ movsd(XMM7, Address(ESP, 0));
@ -2701,7 +2702,7 @@ ASSEMBLER_TEST_GENERATE(DoubleFPMoves, assembler) {
__ movaps(XMM1, XMM2);
__ movaps(XMM0, XMM1);
__ movl(Address(ESP, 0), Immediate(0));
__ movl(Address(ESP, kWordSize), Immediate(0));
__ movl(Address(ESP, target::kWordSize), Immediate(0));
__ movsd(Address(ESP, 0), XMM0);
__ fldl(Address(ESP, 0));
__ popl(EAX);
@ -2755,7 +2756,7 @@ ASSEMBLER_TEST_GENERATE(DoubleFPUStackMoves, assembler) {
__ pushl(EAX);
__ fldl(Address(ESP, 0));
__ movl(Address(ESP, 0), Immediate(0));
__ movl(Address(ESP, kWordSize), Immediate(0));
__ movl(Address(ESP, target::kWordSize), Immediate(0));
__ fstpl(Address(ESP, 0));
__ popl(EAX);
__ popl(EDX);
@ -2844,10 +2845,10 @@ ASSEMBLER_TEST_GENERATE(DoubleFPOperationsStack, assembler) {
__ popl(EAX);
__ popl(EAX);
__ addsd(XMM0, Address(ESP, kWordSize)); // 15.7
__ mulsd(XMM0, Address(ESP, kWordSize)); // 53.38
__ subsd(XMM0, Address(ESP, kWordSize)); // 49.98
__ divsd(XMM0, Address(ESP, kWordSize)); // 14.7
__ addsd(XMM0, Address(ESP, target::kWordSize)); // 15.7
__ mulsd(XMM0, Address(ESP, target::kWordSize)); // 53.38
__ subsd(XMM0, Address(ESP, target::kWordSize)); // 49.98
__ divsd(XMM0, Address(ESP, target::kWordSize)); // 14.7
__ pushl(EAX);
__ pushl(EAX);
@ -2913,7 +2914,7 @@ ASSEMBLER_TEST_RUN(IntToDoubleConversion, test) {
}
ASSEMBLER_TEST_GENERATE(IntToDoubleConversion2, assembler) {
__ filds(Address(ESP, kWordSize));
__ filds(Address(ESP, target::kWordSize));
__ ret();
}
@ -3004,7 +3005,7 @@ ASSEMBLER_TEST_RUN(IntToFloatConversion, test) {
}
ASSEMBLER_TEST_GENERATE(FloatToIntConversionRound, assembler) {
__ movsd(XMM1, Address(ESP, kWordSize));
__ movsd(XMM1, Address(ESP, target::kWordSize));
__ cvtss2si(EDX, XMM1);
__ movl(EAX, EDX);
__ ret();
@ -3025,7 +3026,7 @@ ASSEMBLER_TEST_RUN(FloatToIntConversionRound, test) {
}
ASSEMBLER_TEST_GENERATE(FloatToIntConversionTrunc, assembler) {
__ movsd(XMM1, Address(ESP, kWordSize));
__ movsd(XMM1, Address(ESP, target::kWordSize));
__ cvttss2si(EDX, XMM1);
__ movl(EAX, EDX);
__ ret();
@ -3286,7 +3287,7 @@ ASSEMBLER_TEST_RUN(DoubleToFloatConversion, test) {
}
ASSEMBLER_TEST_GENERATE(DoubleToIntConversionRound, assembler) {
__ movsd(XMM3, Address(ESP, kWordSize));
__ movsd(XMM3, Address(ESP, target::kWordSize));
__ cvtsd2si(EAX, XMM3);
__ ret();
}
@ -3305,7 +3306,7 @@ ASSEMBLER_TEST_RUN(DoubleToIntConversionRound, test) {
}
ASSEMBLER_TEST_GENERATE(DoubleToIntConversionTrunc, assembler) {
__ movsd(XMM3, Address(ESP, kWordSize));
__ movsd(XMM3, Address(ESP, target::kWordSize));
__ cvttsd2si(EAX, XMM3);
__ ret();
}
@ -3324,7 +3325,7 @@ ASSEMBLER_TEST_RUN(DoubleToIntConversionTrunc, test) {
}
ASSEMBLER_TEST_GENERATE(DoubleToDoubleTrunc, assembler) {
__ movsd(XMM3, Address(ESP, kWordSize));
__ movsd(XMM3, Address(ESP, target::kWordSize));
__ roundsd(XMM2, XMM3, Assembler::kRoundToZero);
__ pushl(EAX);
__ pushl(EAX);
@ -3386,7 +3387,7 @@ ASSEMBLER_TEST_RUN(GlobalAddress, test) {
}
ASSEMBLER_TEST_GENERATE(Sine, assembler) {
__ flds(Address(ESP, kWordSize));
__ flds(Address(ESP, target::kWordSize));
__ fsin();
__ ret();
}
@ -3403,7 +3404,7 @@ ASSEMBLER_TEST_RUN(Sine, test) {
}
ASSEMBLER_TEST_GENERATE(Cosine, assembler) {
__ flds(Address(ESP, kWordSize));
__ flds(Address(ESP, target::kWordSize));
__ fcos();
__ ret();
}
@ -3420,9 +3421,9 @@ ASSEMBLER_TEST_RUN(Cosine, test) {
}
ASSEMBLER_TEST_GENERATE(SinCos, assembler) {
__ fldl(Address(ESP, kWordSize));
__ fldl(Address(ESP, target::kWordSize));
__ fsincos();
__ subl(ESP, Immediate(2 * kWordSize));
__ subl(ESP, Immediate(2 * target::kWordSize));
__ fstpl(Address(ESP, 0)); // cos result.
__ movsd(XMM0, Address(ESP, 0));
__ fstpl(Address(ESP, 0)); // sin result.
@ -3430,7 +3431,7 @@ ASSEMBLER_TEST_GENERATE(SinCos, assembler) {
__ subsd(XMM1, XMM0); // sin - cos.
__ movsd(Address(ESP, 0), XMM1);
__ fldl(Address(ESP, 0));
__ addl(ESP, Immediate(2 * kWordSize));
__ addl(ESP, Immediate(2 * target::kWordSize));
__ ret();
}
@ -3456,7 +3457,7 @@ ASSEMBLER_TEST_RUN(SinCos, test) {
}
ASSEMBLER_TEST_GENERATE(Tangent, assembler) {
__ fldl(Address(ESP, kWordSize));
__ fldl(Address(ESP, target::kWordSize));
__ fptan();
__ ffree(0);
__ fincstp();
@ -3477,7 +3478,7 @@ ASSEMBLER_TEST_RUN(Tangent, test) {
}
ASSEMBLER_TEST_GENERATE(SquareRootFloat, assembler) {
__ movss(XMM0, Address(ESP, kWordSize));
__ movss(XMM0, Address(ESP, target::kWordSize));
__ sqrtss(XMM1, XMM0);
__ pushl(EAX);
__ movss(Address(ESP, 0), XMM1);
@ -3502,7 +3503,7 @@ ASSEMBLER_TEST_RUN(SquareRootFloat, test) {
}
ASSEMBLER_TEST_GENERATE(SquareRootDouble, assembler) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ sqrtsd(XMM1, XMM0);
__ pushl(EAX);
__ pushl(EAX);
@ -3563,7 +3564,7 @@ ASSEMBLER_TEST_RUN(XmmAlu, test) {
}
ASSEMBLER_TEST_GENERATE(FloatNegate, assembler) {
__ movss(XMM0, Address(ESP, kWordSize));
__ movss(XMM0, Address(ESP, target::kWordSize));
__ FloatNegate(XMM0);
__ pushl(EAX);
__ movss(Address(ESP, 0), XMM0);
@ -3588,7 +3589,7 @@ ASSEMBLER_TEST_RUN(FloatNegate, test) {
}
ASSEMBLER_TEST_GENERATE(DoubleNegate, assembler) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ DoubleNegate(XMM0);
__ pushl(EAX);
__ pushl(EAX);
@ -3617,8 +3618,8 @@ ASSEMBLER_TEST_RUN(DoubleNegate, test) {
}
ASSEMBLER_TEST_GENERATE(LongMulReg, assembler) {
__ movl(ECX, Address(ESP, kWordSize));
__ movl(EAX, Address(ESP, 2 * kWordSize));
__ movl(ECX, Address(ESP, target::kWordSize));
__ movl(EAX, Address(ESP, 2 * target::kWordSize));
__ imull(ECX);
__ ret();
}
@ -3638,8 +3639,8 @@ ASSEMBLER_TEST_RUN(LongMulReg, test) {
}
ASSEMBLER_TEST_GENERATE(LongMulAddress, assembler) {
__ movl(EAX, Address(ESP, 2 * kWordSize));
__ imull(Address(ESP, kWordSize));
__ movl(EAX, Address(ESP, 2 * target::kWordSize));
__ imull(Address(ESP, target::kWordSize));
__ ret();
}
@ -3657,8 +3658,8 @@ ASSEMBLER_TEST_RUN(LongMulAddress, test) {
}
ASSEMBLER_TEST_GENERATE(LongUnsignedMulReg, assembler) {
__ movl(ECX, Address(ESP, kWordSize));
__ movl(EAX, Address(ESP, 2 * kWordSize));
__ movl(ECX, Address(ESP, target::kWordSize));
__ movl(EAX, Address(ESP, 2 * target::kWordSize));
__ mull(ECX);
__ ret();
}
@ -3683,8 +3684,8 @@ ASSEMBLER_TEST_RUN(LongUnsignedMulReg, test) {
}
ASSEMBLER_TEST_GENERATE(LongUnsignedMulAddress, assembler) {
__ movl(EAX, Address(ESP, 2 * kWordSize));
__ mull(Address(ESP, kWordSize));
__ movl(EAX, Address(ESP, 2 * target::kWordSize));
__ mull(Address(ESP, target::kWordSize));
__ ret();
}
@ -3710,10 +3711,10 @@ ASSEMBLER_TEST_RUN(LongUnsignedMulAddress, test) {
ASSEMBLER_TEST_GENERATE(LongAddReg, assembler) {
// Preserve clobbered callee-saved register (EBX).
__ pushl(EBX);
__ movl(EAX, Address(ESP, 2 * kWordSize)); // left low.
__ movl(EDX, Address(ESP, 3 * kWordSize)); // left high.
__ movl(ECX, Address(ESP, 4 * kWordSize)); // right low.
__ movl(EBX, Address(ESP, 5 * kWordSize)); // right high
__ movl(EAX, Address(ESP, 2 * target::kWordSize)); // left low.
__ movl(EDX, Address(ESP, 3 * target::kWordSize)); // left high.
__ movl(ECX, Address(ESP, 4 * target::kWordSize)); // right low.
__ movl(EBX, Address(ESP, 5 * target::kWordSize)); // right high
__ addl(EAX, ECX);
__ adcl(EDX, EBX);
__ popl(EBX);
@ -3744,10 +3745,10 @@ ASSEMBLER_TEST_RUN(LongAddReg, test) {
}
ASSEMBLER_TEST_GENERATE(LongAddAddress, assembler) {
__ movl(EAX, Address(ESP, 1 * kWordSize)); // left low.
__ movl(EDX, Address(ESP, 2 * kWordSize)); // left high.
__ addl(EAX, Address(ESP, 3 * kWordSize)); // low.
__ adcl(EDX, Address(ESP, 4 * kWordSize)); // high.
__ movl(EAX, Address(ESP, 1 * target::kWordSize)); // left low.
__ movl(EDX, Address(ESP, 2 * target::kWordSize)); // left high.
__ addl(EAX, Address(ESP, 3 * target::kWordSize)); // low.
__ adcl(EDX, Address(ESP, 4 * target::kWordSize)); // high.
// Result is in EAX/EDX.
__ ret();
}
@ -3773,10 +3774,10 @@ ASSEMBLER_TEST_RUN(LongAddAddress, test) {
ASSEMBLER_TEST_GENERATE(LongSubReg, assembler) {
// Preserve clobbered callee-saved register (EBX).
__ pushl(EBX);
__ movl(EAX, Address(ESP, 2 * kWordSize)); // left low.
__ movl(EDX, Address(ESP, 3 * kWordSize)); // left high.
__ movl(ECX, Address(ESP, 4 * kWordSize)); // right low.
__ movl(EBX, Address(ESP, 5 * kWordSize)); // right high
__ movl(EAX, Address(ESP, 2 * target::kWordSize)); // left low.
__ movl(EDX, Address(ESP, 3 * target::kWordSize)); // left high.
__ movl(ECX, Address(ESP, 4 * target::kWordSize)); // right low.
__ movl(EBX, Address(ESP, 5 * target::kWordSize)); // right high
__ subl(EAX, ECX);
__ sbbl(EDX, EBX);
__ popl(EBX);
@ -3807,10 +3808,10 @@ ASSEMBLER_TEST_RUN(LongSubReg, test) {
}
ASSEMBLER_TEST_GENERATE(LongSubAddress, assembler) {
__ movl(EAX, Address(ESP, 1 * kWordSize)); // left low.
__ movl(EDX, Address(ESP, 2 * kWordSize)); // left high.
__ subl(EAX, Address(ESP, 3 * kWordSize)); // low.
__ sbbl(EDX, Address(ESP, 4 * kWordSize)); // high.
__ movl(EAX, Address(ESP, 1 * target::kWordSize)); // left low.
__ movl(EDX, Address(ESP, 2 * target::kWordSize)); // left high.
__ subl(EAX, Address(ESP, 3 * target::kWordSize)); // low.
__ sbbl(EDX, Address(ESP, 4 * target::kWordSize)); // high.
// Result is in EAX/EDX.
__ ret();
}
@ -3836,18 +3837,18 @@ ASSEMBLER_TEST_RUN(LongSubAddress, test) {
ASSEMBLER_TEST_GENERATE(LongSubAddress2, assembler) {
// Preserve clobbered callee-saved register (EBX).
__ pushl(EBX);
__ movl(EAX, Address(ESP, 2 * kWordSize)); // left low.
__ movl(EDX, Address(ESP, 3 * kWordSize)); // left high.
__ movl(ECX, Address(ESP, 4 * kWordSize)); // right low.
__ movl(EBX, Address(ESP, 5 * kWordSize)); // right high
__ subl(ESP, Immediate(2 * kWordSize));
__ movl(Address(ESP, 0 * kWordSize), EAX); // left low.
__ movl(Address(ESP, 1 * kWordSize), EDX); // left high.
__ subl(Address(ESP, 0 * kWordSize), ECX);
__ sbbl(Address(ESP, 1 * kWordSize), EBX);
__ movl(EAX, Address(ESP, 0 * kWordSize));
__ movl(EDX, Address(ESP, 1 * kWordSize));
__ addl(ESP, Immediate(2 * kWordSize));
__ movl(EAX, Address(ESP, 2 * target::kWordSize)); // left low.
__ movl(EDX, Address(ESP, 3 * target::kWordSize)); // left high.
__ movl(ECX, Address(ESP, 4 * target::kWordSize)); // right low.
__ movl(EBX, Address(ESP, 5 * target::kWordSize)); // right high
__ subl(ESP, Immediate(2 * target::kWordSize));
__ movl(Address(ESP, 0 * target::kWordSize), EAX); // left low.
__ movl(Address(ESP, 1 * target::kWordSize), EDX); // left high.
__ subl(Address(ESP, 0 * target::kWordSize), ECX);
__ sbbl(Address(ESP, 1 * target::kWordSize), EBX);
__ movl(EAX, Address(ESP, 0 * target::kWordSize));
__ movl(EDX, Address(ESP, 1 * target::kWordSize));
__ addl(ESP, Immediate(2 * target::kWordSize));
__ popl(EBX);
// Result is in EAX/EDX.
__ ret();
@ -3884,18 +3885,18 @@ ASSEMBLER_TEST_RUN(LongSubAddress2, test) {
ASSEMBLER_TEST_GENERATE(LongAddAddress2, assembler) {
// Preserve clobbered callee-saved register (EBX).
__ pushl(EBX);
__ movl(EAX, Address(ESP, 2 * kWordSize)); // left low.
__ movl(EDX, Address(ESP, 3 * kWordSize)); // left high.
__ movl(ECX, Address(ESP, 4 * kWordSize)); // right low.
__ movl(EBX, Address(ESP, 5 * kWordSize)); // right high
__ subl(ESP, Immediate(2 * kWordSize));
__ movl(Address(ESP, 0 * kWordSize), EAX); // left low.
__ movl(Address(ESP, 1 * kWordSize), EDX); // left high.
__ addl(Address(ESP, 0 * kWordSize), ECX);
__ adcl(Address(ESP, 1 * kWordSize), EBX);
__ movl(EAX, Address(ESP, 0 * kWordSize));
__ movl(EDX, Address(ESP, 1 * kWordSize));
__ addl(ESP, Immediate(2 * kWordSize));
__ movl(EAX, Address(ESP, 2 * target::kWordSize)); // left low.
__ movl(EDX, Address(ESP, 3 * target::kWordSize)); // left high.
__ movl(ECX, Address(ESP, 4 * target::kWordSize)); // right low.
__ movl(EBX, Address(ESP, 5 * target::kWordSize)); // right high
__ subl(ESP, Immediate(2 * target::kWordSize));
__ movl(Address(ESP, 0 * target::kWordSize), EAX); // left low.
__ movl(Address(ESP, 1 * target::kWordSize), EDX); // left high.
__ addl(Address(ESP, 0 * target::kWordSize), ECX);
__ adcl(Address(ESP, 1 * target::kWordSize), EBX);
__ movl(EAX, Address(ESP, 0 * target::kWordSize));
__ movl(EDX, Address(ESP, 1 * target::kWordSize));
__ addl(ESP, Immediate(2 * target::kWordSize));
__ popl(EBX);
// Result is in EAX/EDX.
__ ret();
@ -3931,7 +3932,7 @@ ASSEMBLER_TEST_RUN(LongAddAddress2, test) {
// Testing only the lower 64-bit value of 'cvtdq2pd'.
ASSEMBLER_TEST_GENERATE(IntegerToDoubleConversion, assembler) {
__ movsd(XMM1, Address(ESP, kWordSize));
__ movsd(XMM1, Address(ESP, target::kWordSize));
__ cvtdq2pd(XMM2, XMM1);
__ pushl(EAX);
__ pushl(EAX);
@ -3962,21 +3963,21 @@ ASSEMBLER_TEST_RUN(IntegerToDoubleConversion, test) {
// Implement with truncation.
ASSEMBLER_TEST_GENERATE(FPUStoreLong, assembler) {
__ fldl(Address(ESP, kWordSize));
__ fldl(Address(ESP, target::kWordSize));
__ pushl(EAX);
__ pushl(EAX);
__ fnstcw(Address(ESP, 0));
__ movzxw(EAX, Address(ESP, 0));
__ orl(EAX, Immediate(0x0c00));
__ movw(Address(ESP, kWordSize), EAX);
__ fldcw(Address(ESP, kWordSize));
__ movw(Address(ESP, target::kWordSize), EAX);
__ fldcw(Address(ESP, target::kWordSize));
__ pushl(EAX);
__ pushl(EAX);
__ fistpl(Address(ESP, 0));
__ popl(EAX);
__ popl(EDX);
__ fldcw(Address(ESP, 0));
__ addl(ESP, Immediate(kWordSize * 2));
__ addl(ESP, Immediate(target::kWordSize * 2));
__ ret();
}
@ -4014,7 +4015,7 @@ ASSEMBLER_TEST_RUN(FPUStoreLong, test) {
}
ASSEMBLER_TEST_GENERATE(XorpdZeroing, assembler) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ xorpd(XMM0, XMM0);
__ pushl(EAX);
__ pushl(EAX);
@ -4042,7 +4043,7 @@ ASSEMBLER_TEST_RUN(XorpdZeroing, test) {
}
ASSEMBLER_TEST_GENERATE(Pxor, assembler) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ pxor(XMM0, XMM0);
__ pushl(EAX);
__ pushl(EAX);
@ -4070,7 +4071,7 @@ ASSEMBLER_TEST_RUN(Pxor, test) {
}
ASSEMBLER_TEST_GENERATE(Orpd, assembler) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ xorpd(XMM1, XMM1);
__ DoubleNegate(XMM1);
__ orpd(XMM0, XMM1);
@ -4103,7 +4104,7 @@ ASSEMBLER_TEST_RUN(Orpd, test) {
ASSEMBLER_TEST_GENERATE(Pextrd0, assembler) {
if (TargetCPUFeatures::sse4_1_supported()) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ pextrd(EAX, XMM0, Immediate(0));
}
__ ret();
@ -4123,7 +4124,7 @@ ASSEMBLER_TEST_RUN(Pextrd0, test) {
ASSEMBLER_TEST_GENERATE(Pextrd1, assembler) {
if (TargetCPUFeatures::sse4_1_supported()) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ pextrd(EAX, XMM0, Immediate(1));
}
__ ret();
@ -4143,7 +4144,7 @@ ASSEMBLER_TEST_RUN(Pextrd1, test) {
ASSEMBLER_TEST_GENERATE(Pmovsxdq, assembler) {
if (TargetCPUFeatures::sse4_1_supported()) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ pmovsxdq(XMM0, XMM0);
__ pextrd(EAX, XMM0, Immediate(1));
}
@ -4165,7 +4166,7 @@ ASSEMBLER_TEST_RUN(Pmovsxdq, test) {
ASSEMBLER_TEST_GENERATE(Pcmpeqq, assembler) {
if (TargetCPUFeatures::sse4_1_supported()) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ xorpd(XMM1, XMM1);
__ pcmpeqq(XMM0, XMM1);
__ movd(EAX, XMM0);
@ -4188,7 +4189,7 @@ ASSEMBLER_TEST_RUN(Pcmpeqq, test) {
}
ASSEMBLER_TEST_GENERATE(AndPd, assembler) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ andpd(XMM0, XMM0);
__ pushl(EAX);
__ pushl(EAX);
@ -4216,7 +4217,7 @@ ASSEMBLER_TEST_RUN(AndPd, test) {
}
ASSEMBLER_TEST_GENERATE(Movq, assembler) {
__ movq(XMM0, Address(ESP, kWordSize));
__ movq(XMM0, Address(ESP, target::kWordSize));
__ subl(ESP, Immediate(kDoubleSize));
__ movq(Address(ESP, 0), XMM0);
__ fldl(Address(ESP, 0));
@ -4238,7 +4239,7 @@ ASSEMBLER_TEST_RUN(Movq, test) {
}
ASSEMBLER_TEST_GENERATE(DoubleAbs, assembler) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ DoubleAbs(XMM0);
__ pushl(EAX);
__ pushl(EAX);
@ -4270,7 +4271,7 @@ ASSEMBLER_TEST_RUN(DoubleAbs, test) {
}
ASSEMBLER_TEST_GENERATE(ExtractSignBits, assembler) {
__ movsd(XMM0, Address(ESP, kWordSize));
__ movsd(XMM0, Address(ESP, target::kWordSize));
__ movmskpd(EAX, XMM0);
__ andl(EAX, Immediate(0x1));
__ ret();
@ -4296,7 +4297,7 @@ ASSEMBLER_TEST_GENERATE(ConditionalMovesSign, assembler) {
// Preserve clobbered callee-saved register (EBX).
__ pushl(EBX);
__ movl(EDX, Address(ESP, 2 * kWordSize));
__ movl(EDX, Address(ESP, 2 * target::kWordSize));
__ xorl(EAX, EAX);
__ movl(EBX, Immediate(1));
__ movl(ECX, Immediate(-1));
@ -4332,8 +4333,8 @@ ASSEMBLER_TEST_RUN(ConditionalMovesSign, test) {
// Return 1 if overflow, 0 if no overflow.
ASSEMBLER_TEST_GENERATE(ConditionalMovesNoOverflow, assembler) {
__ movl(EDX, Address(ESP, 1 * kWordSize));
__ addl(EDX, Address(ESP, 2 * kWordSize));
__ movl(EDX, Address(ESP, 1 * target::kWordSize));
__ addl(EDX, Address(ESP, 2 * target::kWordSize));
__ movl(EAX, Immediate(1));
__ movl(ECX, Immediate(0));
__ cmovno(EAX, ECX);
@ -4360,7 +4361,7 @@ ASSEMBLER_TEST_RUN(ConditionalMovesNoOverflow, test) {
ASSEMBLER_TEST_GENERATE(ConditionalMovesEqual, assembler) {
__ xorl(EAX, EAX);
__ movl(ECX, Immediate(1));
__ movl(EDX, Address(ESP, 1 * kWordSize));
__ movl(EDX, Address(ESP, 1 * target::kWordSize));
__ cmpl(EDX, Immediate(785));
__ cmove(EAX, ECX);
__ ret();
@ -4385,7 +4386,7 @@ ASSEMBLER_TEST_RUN(ConditionalMovesEqual, test) {
ASSEMBLER_TEST_GENERATE(ConditionalMovesNotEqual, assembler) {
__ xorl(EAX, EAX);
__ movl(ECX, Immediate(1));
__ movl(EDX, Address(ESP, 1 * kWordSize));
__ movl(EDX, Address(ESP, 1 * target::kWordSize));
__ cmpl(EDX, Immediate(785));
__ cmovne(EAX, ECX);
__ ret();
@ -4409,8 +4410,8 @@ ASSEMBLER_TEST_RUN(ConditionalMovesNotEqual, test) {
ASSEMBLER_TEST_GENERATE(ConditionalMovesCompare, assembler) {
__ movl(EDX, Immediate(1)); // Greater equal.
__ movl(ECX, Immediate(-1)); // Less
__ movl(EAX, Address(ESP, 1 * kWordSize));
__ cmpl(EAX, Address(ESP, 2 * kWordSize));
__ movl(EAX, Address(ESP, 1 * target::kWordSize));
__ cmpl(EAX, Address(ESP, 2 * target::kWordSize));
__ cmovlessl(EAX, ECX);
__ cmovgel(EAX, EDX);
__ ret();
@ -4620,9 +4621,9 @@ ASSEMBLER_TEST_GENERATE(TestRepMovsBytes, assembler) {
__ pushl(ESI);
__ pushl(EDI);
__ pushl(ECX);
__ movl(ESI, Address(ESP, 4 * kWordSize)); // from.
__ movl(EDI, Address(ESP, 5 * kWordSize)); // to.
__ movl(ECX, Address(ESP, 6 * kWordSize)); // count.
__ movl(ESI, Address(ESP, 4 * target::kWordSize)); // from.
__ movl(EDI, Address(ESP, 5 * target::kWordSize)); // to.
__ movl(ECX, Address(ESP, 6 * target::kWordSize)); // count.
__ rep_movsb();
__ popl(ECX);
__ popl(EDI);
@ -4657,9 +4658,9 @@ ASSEMBLER_TEST_RUN(TestRepMovsBytes, test) {
// Called from assembler_test.cc.
ASSEMBLER_TEST_GENERATE(StoreIntoObject, assembler) {
__ pushl(THR);
__ movl(EAX, Address(ESP, 2 * kWordSize));
__ movl(ECX, Address(ESP, 3 * kWordSize));
__ movl(THR, Address(ESP, 4 * kWordSize));
__ movl(EAX, Address(ESP, 2 * target::kWordSize));
__ movl(ECX, Address(ESP, 3 * target::kWordSize));
__ movl(THR, Address(ESP, 4 * target::kWordSize));
__ pushl(EAX);
__ StoreIntoObject(ECX, FieldAddress(ECX, GrowableObjectArray::data_offset()),
EAX);
@ -4804,6 +4805,7 @@ IMMEDIATE_TEST(AddrImmEAXByte,
Address(ESP, 0),
__ popl(EAX))
} // namespace compiler
} // namespace dart
#endif // defined TARGET_ARCH_IA32

View file

@ -11,7 +11,9 @@
namespace dart {
namespace compiler {
ASSEMBLER_TEST_EXTERN(StoreIntoObject);
} // namespace compiler
ASSEMBLER_TEST_RUN(StoreIntoObject, test) {
#define TEST_CODE(value, growable_array, thread) \

View file

@ -5,34 +5,43 @@
#include "vm/globals.h" // NOLINT
#if defined(TARGET_ARCH_X64)
#define SHOULD_NOT_INCLUDE_RUNTIME
#include "vm/class_id.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/locations.h"
#include "vm/cpu.h"
#include "vm/heap/heap.h"
#include "vm/instructions.h"
#include "vm/memory_region.h"
#include "vm/runtime_entry.h"
#include "vm/stack_frame.h"
#include "vm/stub_code.h"
namespace dart {
#if !defined(DART_PRECOMPILED_RUNTIME)
DECLARE_FLAG(bool, check_code_pointer);
DECLARE_FLAG(bool, inline_alloc);
DECLARE_FLAG(bool, precompiled_mode);
#endif
Assembler::Assembler(ObjectPoolWrapper* object_pool_wrapper,
namespace compiler {
using target::ClassTable;
using target::Heap;
using target::Instance;
using target::Instructions;
using target::Isolate;
using target::RawObject;
using target::Thread;
#if !defined(DART_PRECOMPILED_RUNTIME)
Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
bool use_far_branches)
: AssemblerBase(object_pool_wrapper), constant_pool_allowed_(false) {
: AssemblerBase(object_pool_builder), constant_pool_allowed_(false) {
// Far branching mode is only needed and implemented for ARM.
ASSERT(!use_far_branches);
generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
call(Address(THR, Thread::write_barrier_wrappers_thread_offset(reg)));
};
invoke_array_write_barrier_ = [&]() {
generate_invoke_array_write_barrier_ = [&]() {
call(Address(THR, Thread::array_write_barrier_entry_point_offset()));
};
}
@ -48,11 +57,12 @@ void Assembler::call(Label* label) {
EmitLabel(label, kSize);
}
void Assembler::LoadNativeEntry(Register dst,
const ExternalLabel* label,
ObjectPool::Patchability patchable) {
const int32_t offset = ObjectPool::element_offset(
object_pool_wrapper().FindNativeFunction(label, patchable));
void Assembler::LoadNativeEntry(
Register dst,
const ExternalLabel* label,
ObjectPoolBuilderEntry::Patchability patchable) {
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindNativeFunction(label, patchable));
LoadWordFromPoolOffset(dst, offset - kHeapObjectTag);
}
@ -66,32 +76,33 @@ void Assembler::call(const ExternalLabel* label) {
call(TMP);
}
void Assembler::CallPatchable(const Code& target, Code::EntryKind entry_kind) {
void Assembler::CallPatchable(const Code& target, CodeEntryKind entry_kind) {
ASSERT(constant_pool_allowed());
const intptr_t idx =
object_pool_wrapper().AddObject(target, ObjectPool::kPatchable);
const int32_t offset = ObjectPool::element_offset(idx);
const intptr_t idx = object_pool_builder().AddObject(
ToObject(target), ObjectPoolBuilderEntry::kPatchable);
const int32_t offset = target::ObjectPool::element_offset(idx);
LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
call(FieldAddress(CODE_REG, Code::entry_point_offset(entry_kind)));
call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
}
void Assembler::CallWithEquivalence(const Code& target,
const Object& equivalence,
Code::EntryKind entry_kind) {
CodeEntryKind entry_kind) {
ASSERT(constant_pool_allowed());
const intptr_t idx = object_pool_wrapper().FindObject(target, equivalence);
const int32_t offset = ObjectPool::element_offset(idx);
const intptr_t idx =
object_pool_builder().FindObject(ToObject(target), equivalence);
const int32_t offset = target::ObjectPool::element_offset(idx);
LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
call(FieldAddress(CODE_REG, Code::entry_point_offset(entry_kind)));
call(FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
}
void Assembler::Call(const Code& target) {
ASSERT(constant_pool_allowed());
const intptr_t idx =
object_pool_wrapper().FindObject(target, ObjectPool::kNotPatchable);
const int32_t offset = ObjectPool::element_offset(idx);
const intptr_t idx = object_pool_builder().FindObject(
ToObject(target), ObjectPoolBuilderEntry::kNotPatchable);
const int32_t offset = target::ObjectPool::element_offset(idx);
LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag);
call(FieldAddress(CODE_REG, Code::entry_point_offset()));
call(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
}
void Assembler::CallToRuntime() {
@ -925,21 +936,21 @@ void Assembler::jmp(const ExternalLabel* label) {
void Assembler::JmpPatchable(const Code& target, Register pp) {
ASSERT((pp != PP) || constant_pool_allowed());
const intptr_t idx =
object_pool_wrapper().AddObject(target, ObjectPool::kPatchable);
const int32_t offset = ObjectPool::element_offset(idx);
const intptr_t idx = object_pool_builder().AddObject(
ToObject(target), ObjectPoolBuilderEntry::kPatchable);
const int32_t offset = target::ObjectPool::element_offset(idx);
movq(CODE_REG, Address::AddressBaseImm32(pp, offset - kHeapObjectTag));
movq(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
movq(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
jmp(TMP);
}
void Assembler::Jmp(const Code& target, Register pp) {
ASSERT((pp != PP) || constant_pool_allowed());
const intptr_t idx =
object_pool_wrapper().FindObject(target, ObjectPool::kNotPatchable);
const int32_t offset = ObjectPool::element_offset(idx);
const intptr_t idx = object_pool_builder().FindObject(
ToObject(target), ObjectPoolBuilderEntry::kNotPatchable);
const int32_t offset = target::ObjectPool::element_offset(idx);
movq(CODE_REG, FieldAddress(pp, offset));
movq(TMP, FieldAddress(CODE_REG, Code::entry_point_offset()));
movq(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
jmp(TMP);
}
@ -1078,26 +1089,23 @@ void Assembler::Drop(intptr_t stack_elements, Register tmp) {
}
return;
}
addq(RSP, Immediate(stack_elements * kWordSize));
addq(RSP, Immediate(stack_elements * target::kWordSize));
}
bool Assembler::CanLoadFromObjectPool(const Object& object) const {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
ASSERT(!Thread::CanLoadFromThread(object));
ASSERT(IsOriginalObject(object));
ASSERT(!target::CanLoadFromThread(object));
if (!constant_pool_allowed()) {
return false;
}
// TODO(zra, kmillikin): Also load other large immediates from the object
// pool
if (object.IsSmi()) {
if (target::IsSmi(object)) {
// If the raw smi does not fit into a 32-bit signed int, then we'll keep
// the raw value in the object pool.
return !Utils::IsInt(32, reinterpret_cast<int64_t>(object.raw()));
return !Utils::IsInt(32, target::ToRawSmi(object));
}
ASSERT(object.IsNotTemporaryScopedHandle());
ASSERT(object.IsOld());
ASSERT(IsNotTemporaryScopedHandle(object));
ASSERT(IsInOldSpace(object));
return true;
}
@ -1115,18 +1123,19 @@ void Assembler::LoadIsolate(Register dst) {
void Assembler::LoadObjectHelper(Register dst,
const Object& object,
bool is_unique) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
if (Thread::CanLoadFromThread(object)) {
movq(dst, Address(THR, Thread::OffsetFromThread(object)));
ASSERT(IsOriginalObject(object));
target::word offset_from_thread;
if (target::CanLoadFromThread(object, &offset_from_thread)) {
movq(dst, Address(THR, offset_from_thread));
} else if (CanLoadFromObjectPool(object)) {
const intptr_t idx = is_unique ? object_pool_wrapper().AddObject(object)
: object_pool_wrapper().FindObject(object);
const int32_t offset = ObjectPool::element_offset(idx);
const intptr_t idx = is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object);
const int32_t offset = target::ObjectPool::element_offset(idx);
LoadWordFromPoolOffset(dst, offset - kHeapObjectTag);
} else {
ASSERT(object.IsSmi());
LoadImmediate(dst, Immediate(reinterpret_cast<int64_t>(object.raw())));
ASSERT(target::IsSmi(object));
LoadImmediate(dst, Immediate(target::ToRawSmi(object)));
}
}
@ -1135,9 +1144,9 @@ void Assembler::LoadFunctionFromCalleePool(Register dst,
Register new_pp) {
ASSERT(!constant_pool_allowed());
ASSERT(new_pp != PP);
const intptr_t idx =
object_pool_wrapper().FindObject(function, ObjectPool::kNotPatchable);
const int32_t offset = ObjectPool::element_offset(idx);
const intptr_t idx = object_pool_builder().FindObject(
ToObject(function), ObjectPoolBuilderEntry::kNotPatchable);
const int32_t offset = target::ObjectPool::element_offset(idx);
movq(dst, Address::AddressBaseImm32(new_pp, offset - kHeapObjectTag));
}
@ -1150,52 +1159,55 @@ void Assembler::LoadUniqueObject(Register dst, const Object& object) {
}
void Assembler::StoreObject(const Address& dst, const Object& object) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
if (Thread::CanLoadFromThread(object)) {
movq(TMP, Address(THR, Thread::OffsetFromThread(object)));
ASSERT(IsOriginalObject(object));
target::word offset_from_thread;
if (target::CanLoadFromThread(object, &offset_from_thread)) {
movq(TMP, Address(THR, offset_from_thread));
movq(dst, TMP);
} else if (CanLoadFromObjectPool(object)) {
LoadObject(TMP, object);
movq(dst, TMP);
} else {
ASSERT(object.IsSmi());
MoveImmediate(dst, Immediate(reinterpret_cast<int64_t>(object.raw())));
ASSERT(target::IsSmi(object));
MoveImmediate(dst, Immediate(target::ToRawSmi(object)));
}
}
void Assembler::PushObject(const Object& object) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
if (Thread::CanLoadFromThread(object)) {
pushq(Address(THR, Thread::OffsetFromThread(object)));
ASSERT(IsOriginalObject(object));
target::word offset_from_thread;
if (target::CanLoadFromThread(object, &offset_from_thread)) {
pushq(Address(THR, offset_from_thread));
} else if (CanLoadFromObjectPool(object)) {
LoadObject(TMP, object);
pushq(TMP);
} else {
ASSERT(object.IsSmi());
PushImmediate(Immediate(reinterpret_cast<int64_t>(object.raw())));
ASSERT(target::IsSmi(object));
PushImmediate(Immediate(target::ToRawSmi(object)));
}
}
void Assembler::CompareObject(Register reg, const Object& object) {
ASSERT(!object.IsICData() || ICData::Cast(object).IsOriginal());
ASSERT(!object.IsField() || Field::Cast(object).IsOriginal());
if (Thread::CanLoadFromThread(object)) {
cmpq(reg, Address(THR, Thread::OffsetFromThread(object)));
ASSERT(IsOriginalObject(object));
target::word offset_from_thread;
if (target::CanLoadFromThread(object, &offset_from_thread)) {
cmpq(reg, Address(THR, offset_from_thread));
} else if (CanLoadFromObjectPool(object)) {
const intptr_t idx =
object_pool_wrapper().FindObject(object, ObjectPool::kNotPatchable);
const int32_t offset = ObjectPool::element_offset(idx);
const intptr_t idx = object_pool_builder().FindObject(
object, ObjectPoolBuilderEntry::kNotPatchable);
const int32_t offset = target::ObjectPool::element_offset(idx);
cmpq(reg, Address(PP, offset - kHeapObjectTag));
} else {
ASSERT(object.IsSmi());
CompareImmediate(reg, Immediate(reinterpret_cast<int64_t>(object.raw())));
ASSERT(target::IsSmi(object));
CompareImmediate(reg, Immediate(target::ToRawSmi(object)));
}
}
intptr_t Assembler::FindImmediate(int64_t imm) {
return object_pool_wrapper().FindImmediate(imm);
return object_pool_builder().FindImmediate(imm);
}
void Assembler::LoadImmediate(Register reg, const Immediate& imm) {
@ -1204,7 +1216,8 @@ void Assembler::LoadImmediate(Register reg, const Immediate& imm) {
} else if (imm.is_int32() || !constant_pool_allowed()) {
movq(reg, imm);
} else {
int32_t offset = ObjectPool::element_offset(FindImmediate(imm.value()));
int32_t offset =
target::ObjectPool::element_offset(FindImmediate(imm.value()));
LoadWordFromPoolOffset(reg, offset - kHeapObjectTag);
}
}
@ -1224,8 +1237,9 @@ void Assembler::StoreIntoObjectFilter(Register object,
Label* label,
CanBeSmi can_be_smi,
BarrierFilterMode how_to_jump) {
COMPILE_ASSERT((kNewObjectAlignmentOffset == kWordSize) &&
(kOldObjectAlignmentOffset == 0));
COMPILE_ASSERT((target::ObjectAlignment::kNewObjectAlignmentOffset ==
target::kWordSize) &&
(target::ObjectAlignment::kOldObjectAlignmentOffset == 0));
if (can_be_smi == kValueIsNotSmi) {
#if defined(DEBUG)
@ -1242,7 +1256,7 @@ void Assembler::StoreIntoObjectFilter(Register object,
// ~value | object instead and skip the write barrier if the bit is set.
notl(value);
orl(value, object);
testl(value, Immediate(kNewObjectAlignmentOffset));
testl(value, Immediate(target::ObjectAlignment::kNewObjectAlignmentOffset));
} else {
ASSERT(kHeapObjectTag == 1);
// Detect value being ...1001 and object being ...0001.
@ -1278,13 +1292,13 @@ void Assembler::StoreIntoObject(Register object,
testq(value, Immediate(kSmiTagMask));
j(ZERO, &done, kNearJump);
}
movb(TMP, FieldAddress(object, Object::tags_offset()));
movb(TMP, FieldAddress(object, target::Object::tags_offset()));
shrl(TMP, Immediate(RawObject::kBarrierOverlapShift));
andl(TMP, Address(THR, Thread::write_barrier_mask_offset()));
testb(FieldAddress(value, Object::tags_offset()), TMP);
testb(FieldAddress(value, target::Object::tags_offset()), TMP);
j(ZERO, &done, kNearJump);
Register objectForCall = object;
Register object_for_call = object;
if (value != kWriteBarrierValueReg) {
// Unlikely. Only non-graph intrinsics.
// TODO(rmacnak): Shuffle registers in intrinsics.
@ -1292,16 +1306,16 @@ void Assembler::StoreIntoObject(Register object,
if (object == kWriteBarrierValueReg) {
COMPILE_ASSERT(RBX != kWriteBarrierValueReg);
COMPILE_ASSERT(RCX != kWriteBarrierValueReg);
objectForCall = (value == RBX) ? RCX : RBX;
pushq(objectForCall);
movq(objectForCall, object);
object_for_call = (value == RBX) ? RCX : RBX;
pushq(object_for_call);
movq(object_for_call, object);
}
movq(kWriteBarrierValueReg, value);
}
generate_invoke_write_barrier_wrapper_(objectForCall);
generate_invoke_write_barrier_wrapper_(object_for_call);
if (value != kWriteBarrierValueReg) {
if (object == kWriteBarrierValueReg) {
popq(objectForCall);
popq(object_for_call);
}
popq(kWriteBarrierValueReg);
}
@ -1330,10 +1344,10 @@ void Assembler::StoreIntoArray(Register object,
testq(value, Immediate(kSmiTagMask));
j(ZERO, &done, kNearJump);
}
movb(TMP, FieldAddress(object, Object::tags_offset()));
shrl(TMP, Immediate(RawObject::kBarrierOverlapShift));
movb(TMP, FieldAddress(object, target::Object::tags_offset()));
shrl(TMP, Immediate(target::RawObject::kBarrierOverlapShift));
andl(TMP, Address(THR, Thread::write_barrier_mask_offset()));
testb(FieldAddress(value, Object::tags_offset()), TMP);
testb(FieldAddress(value, target::Object::tags_offset()), TMP);
j(ZERO, &done, kNearJump);
if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
@ -1344,7 +1358,7 @@ void Assembler::StoreIntoArray(Register object,
UNIMPLEMENTED();
}
invoke_array_write_barrier_();
generate_invoke_array_write_barrier_();
Bind(&done);
}
@ -1367,8 +1381,6 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
void Assembler::StoreIntoObjectNoBarrier(Register object,
const Address& dest,
const Object& value) {
ASSERT(!value.IsICData() || ICData::Cast(value).IsOriginal());
ASSERT(!value.IsField() || Field::Cast(value).IsOriginal());
StoreObject(dest, value);
}
@ -1384,32 +1396,17 @@ void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
}
void Assembler::ZeroInitSmiField(const Address& dest) {
Immediate zero(Smi::RawValue(0));
Immediate zero(target::ToRawSmi(0));
movq(dest, zero);
}
void Assembler::IncrementSmiField(const Address& dest, int64_t increment) {
// Note: FlowGraphCompiler::EdgeCounterIncrementSizeInBytes depends on
// the length of this instruction sequence.
Immediate inc_imm(Smi::RawValue(increment));
Immediate inc_imm(target::ToRawSmi(increment));
addq(dest, inc_imm);
}
void Assembler::Stop(const char* message) {
if (FLAG_print_stop_message) {
int64_t message_address = reinterpret_cast<int64_t>(message);
pushq(TMP); // Preserve TMP register.
pushq(RDI); // Preserve RDI register.
LoadImmediate(RDI, Immediate(message_address));
ExternalLabel label(StubCode::PrintStopMessage().EntryPoint());
call(&label);
popq(RDI); // Restore RDI register.
popq(TMP); // Restore TMP register.
}
// Emit the int3 instruction.
int3(); // Execution can be resumed with the 'cont' command in gdb.
}
void Assembler::Bind(Label* label) {
intptr_t bound = buffer_.Size();
ASSERT(!label->IsBound()); // Labels can only be bound once.
@ -1537,10 +1534,10 @@ void Assembler::LeaveCallRuntimeFrame() {
const intptr_t kPushedXmmRegistersCount =
RegisterSet::RegisterCount(CallingConventions::kVolatileXmmRegisters);
const intptr_t kPushedRegistersSize =
kPushedCpuRegistersCount * kWordSize +
kPushedCpuRegistersCount * target::kWordSize +
kPushedXmmRegistersCount * kFpuRegisterSize +
(compiler_frame_layout.dart_fixed_frame_size - 2) *
kWordSize; // From EnterStubFrame (excluding PC / FP)
(target::frame_layout.dart_fixed_frame_size - 2) *
target::kWordSize; // From EnterStubFrame (excluding PC / FP)
leaq(RSP, Address(RBP, -kPushedRegistersSize));
@ -1565,13 +1562,14 @@ void Assembler::CallRuntime(const RuntimeEntry& entry,
}
void Assembler::RestoreCodePointer() {
movq(CODE_REG, Address(RBP, compiler_frame_layout.code_from_fp * kWordSize));
movq(CODE_REG,
Address(RBP, target::frame_layout.code_from_fp * target::kWordSize));
}
void Assembler::LoadPoolPointer(Register pp) {
// Load new pool pointer.
CheckCodePointer();
movq(pp, FieldAddress(CODE_REG, Code::object_pool_offset()));
movq(pp, FieldAddress(CODE_REG, target::Code::object_pool_offset()));
set_constant_pool_allowed(pp == PP);
}
@ -1597,8 +1595,8 @@ void Assembler::LeaveDartFrame(RestorePP restore_pp) {
// Restore caller's PP register that was pushed in EnterDartFrame.
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
if (restore_pp == kRestoreCallerPP) {
movq(PP, Address(RBP, (compiler_frame_layout.saved_caller_pp_from_fp *
kWordSize)));
movq(PP, Address(RBP, (target::frame_layout.saved_caller_pp_from_fp *
target::kWordSize)));
}
}
set_constant_pool_allowed(false);
@ -1627,7 +1625,7 @@ void Assembler::CheckCodePointer() {
leaq(RAX, Address::AddressRIPRelative(-header_to_rip_offset));
ASSERT(CodeSize() == (header_to_rip_offset - header_to_entry_offset));
}
cmpq(RAX, FieldAddress(CODE_REG, Code::saved_instructions_offset()));
cmpq(RAX, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
j(EQUAL, &instructions_ok);
int3();
Bind(&instructions_ok);
@ -1711,16 +1709,16 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
movq(temp_reg, Address(temp_reg, table_offset));
testb(Address(temp_reg, state_offset),
Immediate(ClassHeapStats::TraceAllocationMask()));
Immediate(target::ClassHeapStats::TraceAllocationMask()));
// We are tracing for this class, jump to the trace label which will use
// the allocation stub.
j(NOT_ZERO, trace, near_jump);
}
void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) {
void Assembler::UpdateAllocationStats(intptr_t cid) {
ASSERT(cid > 0);
intptr_t counter_offset =
ClassTable::CounterOffsetFor(cid, space == Heap::kNew);
ClassTable::CounterOffsetFor(cid, /*is_new_space=*/true);
Register temp_reg = TMP;
LoadIsolate(temp_reg);
intptr_t table_offset =
@ -1729,25 +1727,22 @@ void Assembler::UpdateAllocationStats(intptr_t cid, Heap::Space space) {
incq(Address(temp_reg, counter_offset));
}
void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
Register size_reg,
Heap::Space space) {
void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg) {
ASSERT(cid > 0);
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid, space);
UpdateAllocationStats(cid);
Register temp_reg = TMP;
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, space == Heap::kNew);
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
addq(Address(temp_reg, size_offset), size_reg);
}
void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
intptr_t size_in_bytes,
Heap::Space space) {
intptr_t size_in_bytes) {
ASSERT(cid > 0);
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid, space);
UpdateAllocationStats(cid);
Register temp_reg = TMP;
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, space == Heap::kNew);
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
addq(Address(temp_reg, size_offset), Immediate(size_in_bytes));
}
#endif // !PRODUCT
@ -1758,13 +1753,13 @@ void Assembler::TryAllocate(const Class& cls,
Register instance_reg,
Register temp) {
ASSERT(failure != NULL);
const intptr_t instance_size = cls.instance_size();
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
const classid_t cid = target::Class::GetId(cls);
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cls.id(), failure, near_jump));
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, near_jump));
movq(instance_reg, Address(THR, Thread::top_offset()));
addq(instance_reg, Immediate(instance_size));
// instance_reg: potential next object start.
@ -1773,17 +1768,14 @@ void Assembler::TryAllocate(const Class& cls,
// Successfully allocated the object, now update top to point to
// next object start and store the class in the class field of object.
movq(Address(THR, Thread::top_offset()), instance_reg);
NOT_IN_PRODUCT(UpdateAllocationStats(cls.id(), space));
NOT_IN_PRODUCT(UpdateAllocationStats(cid));
ASSERT(instance_size >= kHeapObjectTag);
AddImmediate(instance_reg, Immediate(kHeapObjectTag - instance_size));
uint32_t tags = 0;
tags = RawObject::SizeTag::update(instance_size, tags);
ASSERT(cls.id() != kIllegalCid);
tags = RawObject::ClassIdTag::update(cls.id(), tags);
tags = RawObject::NewBit::update(true, tags);
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, instance_size);
// Extends the 32 bit tags with zeros, which is the uninitialized
// hash code.
MoveImmediate(FieldAddress(instance_reg, Object::tags_offset()),
MoveImmediate(FieldAddress(instance_reg, target::Object::tags_offset()),
Immediate(tags));
} else {
jmp(failure);
@ -1803,7 +1795,6 @@ void Assembler::TryAllocateArray(intptr_t cid,
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, near_jump));
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
movq(instance, Address(THR, Thread::top_offset()));
movq(end_address, instance);
@ -1820,17 +1811,16 @@ void Assembler::TryAllocateArray(intptr_t cid,
// next object start and initialize the object.
movq(Address(THR, Thread::top_offset()), end_address);
addq(instance, Immediate(kHeapObjectTag));
NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, instance_size, space));
NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, instance_size));
// Initialize the tags.
// instance: new object start as a tagged pointer.
uint32_t tags = 0;
tags = RawObject::ClassIdTag::update(cid, tags);
tags = RawObject::SizeTag::update(instance_size, tags);
tags = RawObject::NewBit::update(true, tags);
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, instance_size);
// Extends the 32 bit tags with zeros, which is the uninitialized
// hash code.
movq(FieldAddress(instance, Array::tags_offset()), Immediate(tags));
movq(FieldAddress(instance, target::Object::tags_offset()),
Immediate(tags));
} else {
jmp(failure);
}
@ -1974,6 +1964,7 @@ void Assembler::EmitGenericShift(bool wide,
}
void Assembler::LoadClassId(Register result, Register object) {
using target::Object;
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
ASSERT(sizeof(classid_t) == sizeof(uint16_t));
@ -1988,7 +1979,7 @@ void Assembler::LoadClassById(Register result, Register class_id) {
const intptr_t offset =
Isolate::class_table_offset() + ClassTable::table_offset();
movq(result, Address(result, offset));
ASSERT(kSizeOfClassPairLog2 == 4);
ASSERT(ClassTable::kSizeOfClassPairLog2 == 4);
// TIMES_16 is not a real scale factor on x64, so we double the class id
// and use TIMES_8.
addq(class_id, class_id);
@ -2006,6 +1997,7 @@ void Assembler::CompareClassId(Register object,
void Assembler::SmiUntagOrCheckClass(Register object,
intptr_t class_id,
Label* is_smi) {
using target::Object;
ASSERT(kSmiTagShift == 1);
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
@ -2060,7 +2052,7 @@ void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
jmp(&join, Assembler::kNearJump);
Bind(&smi);
movq(result, Immediate(Smi::RawValue(kSmiCid)));
movq(result, Immediate(target::ToRawSmi(kSmiCid)));
Bind(&join);
} else {
@ -2074,6 +2066,10 @@ void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
}
}
Address Assembler::VMTagAddress() {
return Address(THR, Thread::vm_tag_offset());
}
Address Assembler::ElementAddressForIntIndex(bool is_external,
intptr_t cid,
intptr_t index_scale,
@ -2146,6 +2142,7 @@ const char* Assembler::RegisterName(Register reg) {
return cpu_reg_names[reg];
}
} // namespace compiler
} // namespace dart
#endif // defined(TARGET_ARCH_X64)

View file

@ -16,12 +16,14 @@
#include "vm/constants_x64.h"
#include "vm/constants_x86.h"
#include "vm/hash_map.h"
#include "vm/object.h"
#include "vm/pointer_tagging.h"
namespace dart {
// Forward declarations.
class RuntimeEntry;
class FlowGraphCompiler;
namespace compiler {
class Immediate : public ValueObject {
public:
@ -278,7 +280,7 @@ class FieldAddress : public Address {
class Assembler : public AssemblerBase {
public:
explicit Assembler(ObjectPoolWrapper* object_pool_wrapper,
explicit Assembler(ObjectPoolBuilder* object_pool_builder,
bool use_far_branches = false);
~Assembler() {}
@ -686,7 +688,7 @@ class Assembler : public AssemblerBase {
void LoadUniqueObject(Register dst, const Object& obj);
void LoadNativeEntry(Register dst,
const ExternalLabel* label,
ObjectPool::Patchability patchable);
ObjectPoolBuilderEntry::Patchability patchable);
void LoadFunctionFromCalleePool(Register dst,
const Function& function,
Register new_pp);
@ -694,7 +696,7 @@ class Assembler : public AssemblerBase {
void Jmp(const Code& code, Register pp = PP);
void J(Condition condition, const Code& code, Register pp);
void CallPatchable(const Code& code,
Code::EntryKind entry_kind = Code::EntryKind::kNormal);
CodeEntryKind entry_kind = CodeEntryKind::kNormal);
void Call(const Code& stub_entry);
void CallToRuntime();
@ -702,10 +704,9 @@ class Assembler : public AssemblerBase {
// Emit a call that shares its object pool entries with other calls
// that have the same equivalence marker.
void CallWithEquivalence(
const Code& code,
const Object& equivalence,
Code::EntryKind entry_kind = Code::EntryKind::kNormal);
void CallWithEquivalence(const Code& code,
const Object& equivalence,
CodeEntryKind entry_kind = CodeEntryKind::kNormal);
// Unaware of write barrier (use StoreInto* methods for storing to objects).
// TODO(koda): Add StackAddress/HeapAddress types to prevent misuse.
@ -774,8 +775,6 @@ class Assembler : public AssemblerBase {
void LeaveCallRuntimeFrame();
void CallRuntime(const RuntimeEntry& entry, intptr_t argument_count);
void CallRuntimeSavingRegisters(const RuntimeEntry& entry,
intptr_t argument_count);
// Call runtime function. Reserves shadow space on the stack before calling
// if platform ABI requires that. Does not restore RSP after the call itself.
@ -871,14 +870,10 @@ class Assembler : public AssemblerBase {
void MonomorphicCheckedEntry();
void UpdateAllocationStats(intptr_t cid, Heap::Space space);
void UpdateAllocationStats(intptr_t cid);
void UpdateAllocationStatsWithSize(intptr_t cid,
Register size_reg,
Heap::Space space);
void UpdateAllocationStatsWithSize(intptr_t cid,
intptr_t instance_size,
Heap::Space space);
void UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg);
void UpdateAllocationStatsWithSize(intptr_t cid, intptr_t instance_size);
// If allocation tracing for |cid| is enabled, will jump to |trace| label,
// which will allocate in the runtime where tracing occurs.
@ -940,14 +935,12 @@ class Assembler : public AssemblerBase {
Register array,
Register index);
static Address VMTagAddress() {
return Address(THR, Thread::vm_tag_offset());
}
static Address VMTagAddress();
// On some other platforms, we draw a distinction between safe and unsafe
// smis.
static bool IsSafe(const Object& object) { return true; }
static bool IsSafeSmi(const Object& object) { return object.IsSmi(); }
static bool IsSafeSmi(const Object& object) { return target::IsSmi(object); }
private:
bool constant_pool_allowed_;
@ -1046,14 +1039,9 @@ class Assembler : public AssemblerBase {
// Unaware of write barrier (use StoreInto* methods for storing to objects).
void MoveImmediate(const Address& dst, const Immediate& imm);
void ComputeCounterAddressesForCid(intptr_t cid,
Heap::Space space,
Address* count_address,
Address* size_address);
friend class FlowGraphCompiler;
friend class dart::FlowGraphCompiler;
std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
std::function<void()> invoke_array_write_barrier_;
std::function<void()> generate_invoke_array_write_barrier_;
DISALLOW_ALLOCATION();
DISALLOW_COPY_AND_ASSIGN(Assembler);
@ -1106,6 +1094,13 @@ inline void Assembler::EmitOperandSizeOverride() {
EmitUint8(0x66);
}
} // namespace compiler
using compiler::Address;
using compiler::FieldAddress;
using compiler::Immediate;
using compiler::Label;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_

View file

@ -11,6 +11,7 @@
#include "vm/virtual_memory.h"
namespace dart {
namespace compiler {
#define __ assembler->
@ -72,33 +73,33 @@ ASSEMBLER_TEST_GENERATE(AddressingModes, assembler) {
__ movq(RAX, Address(R13, 0));
__ movq(R10, Address(RAX, 0));
__ movq(RAX, Address(RSP, kWordSize));
__ movq(RAX, Address(RBP, kWordSize));
__ movq(RAX, Address(RAX, kWordSize));
__ movq(RAX, Address(R10, kWordSize));
__ movq(RAX, Address(R12, kWordSize));
__ movq(RAX, Address(R13, kWordSize));
__ movq(RAX, Address(RSP, target::kWordSize));
__ movq(RAX, Address(RBP, target::kWordSize));
__ movq(RAX, Address(RAX, target::kWordSize));
__ movq(RAX, Address(R10, target::kWordSize));
__ movq(RAX, Address(R12, target::kWordSize));
__ movq(RAX, Address(R13, target::kWordSize));
__ movq(RAX, Address(RSP, -kWordSize));
__ movq(RAX, Address(RBP, -kWordSize));
__ movq(RAX, Address(RAX, -kWordSize));
__ movq(RAX, Address(R10, -kWordSize));
__ movq(RAX, Address(R12, -kWordSize));
__ movq(RAX, Address(R13, -kWordSize));
__ movq(RAX, Address(RSP, -target::kWordSize));
__ movq(RAX, Address(RBP, -target::kWordSize));
__ movq(RAX, Address(RAX, -target::kWordSize));
__ movq(RAX, Address(R10, -target::kWordSize));
__ movq(RAX, Address(R12, -target::kWordSize));
__ movq(RAX, Address(R13, -target::kWordSize));
__ movq(RAX, Address(RSP, 256 * kWordSize));
__ movq(RAX, Address(RBP, 256 * kWordSize));
__ movq(RAX, Address(RAX, 256 * kWordSize));
__ movq(RAX, Address(R10, 256 * kWordSize));
__ movq(RAX, Address(R12, 256 * kWordSize));
__ movq(RAX, Address(R13, 256 * kWordSize));
__ movq(RAX, Address(RSP, 256 * target::kWordSize));
__ movq(RAX, Address(RBP, 256 * target::kWordSize));
__ movq(RAX, Address(RAX, 256 * target::kWordSize));
__ movq(RAX, Address(R10, 256 * target::kWordSize));
__ movq(RAX, Address(R12, 256 * target::kWordSize));
__ movq(RAX, Address(R13, 256 * target::kWordSize));
__ movq(RAX, Address(RSP, -256 * kWordSize));
__ movq(RAX, Address(RBP, -256 * kWordSize));
__ movq(RAX, Address(RAX, -256 * kWordSize));
__ movq(RAX, Address(R10, -256 * kWordSize));
__ movq(RAX, Address(R12, -256 * kWordSize));
__ movq(RAX, Address(R13, -256 * kWordSize));
__ movq(RAX, Address(RSP, -256 * target::kWordSize));
__ movq(RAX, Address(RBP, -256 * target::kWordSize));
__ movq(RAX, Address(RAX, -256 * target::kWordSize));
__ movq(RAX, Address(R10, -256 * target::kWordSize));
__ movq(RAX, Address(R12, -256 * target::kWordSize));
__ movq(RAX, Address(R13, -256 * target::kWordSize));
__ movq(RAX, Address(RAX, TIMES_1, 0));
__ movq(RAX, Address(RAX, TIMES_2, 0));
@ -111,17 +112,17 @@ ASSEMBLER_TEST_GENERATE(AddressingModes, assembler) {
__ movq(RAX, Address(R12, TIMES_2, 0));
__ movq(RAX, Address(R13, TIMES_2, 0));
__ movq(RAX, Address(RBP, TIMES_2, kWordSize));
__ movq(RAX, Address(RAX, TIMES_2, kWordSize));
__ movq(RAX, Address(R10, TIMES_2, kWordSize));
__ movq(RAX, Address(R12, TIMES_2, kWordSize));
__ movq(RAX, Address(R13, TIMES_2, kWordSize));
__ movq(RAX, Address(RBP, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RAX, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R10, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R12, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R13, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RBP, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RAX, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R10, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R12, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R13, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RBP, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RAX, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R10, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R12, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R13, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RAX, RBP, TIMES_2, 0));
__ movq(RAX, Address(RAX, RAX, TIMES_2, 0));
@ -159,77 +160,77 @@ ASSEMBLER_TEST_GENERATE(AddressingModes, assembler) {
__ movq(RAX, Address(R13, R12, TIMES_2, 0));
__ movq(RAX, Address(R13, R13, TIMES_2, 0));
__ movq(RAX, Address(RAX, RBP, TIMES_2, kWordSize));
__ movq(RAX, Address(RAX, RAX, TIMES_2, kWordSize));
__ movq(RAX, Address(RAX, R10, TIMES_2, kWordSize));
__ movq(RAX, Address(RAX, R12, TIMES_2, kWordSize));
__ movq(RAX, Address(RAX, R13, TIMES_2, kWordSize));
__ movq(RAX, Address(RAX, RBP, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RAX, RAX, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RAX, R10, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RAX, R12, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RAX, R13, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RBP, RBP, TIMES_2, kWordSize));
__ movq(RAX, Address(RBP, RAX, TIMES_2, kWordSize));
__ movq(RAX, Address(RBP, R10, TIMES_2, kWordSize));
__ movq(RAX, Address(RBP, R12, TIMES_2, kWordSize));
__ movq(RAX, Address(RBP, R13, TIMES_2, kWordSize));
__ movq(RAX, Address(RBP, RBP, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RBP, RAX, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RBP, R10, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RBP, R12, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RBP, R13, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RSP, RBP, TIMES_2, kWordSize));
__ movq(RAX, Address(RSP, RAX, TIMES_2, kWordSize));
__ movq(RAX, Address(RSP, R10, TIMES_2, kWordSize));
__ movq(RAX, Address(RSP, R12, TIMES_2, kWordSize));
__ movq(RAX, Address(RSP, R13, TIMES_2, kWordSize));
__ movq(RAX, Address(RSP, RBP, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RSP, RAX, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RSP, R10, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RSP, R12, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RSP, R13, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R10, RBP, TIMES_2, kWordSize));
__ movq(RAX, Address(R10, RAX, TIMES_2, kWordSize));
__ movq(RAX, Address(R10, R10, TIMES_2, kWordSize));
__ movq(RAX, Address(R10, R12, TIMES_2, kWordSize));
__ movq(RAX, Address(R10, R13, TIMES_2, kWordSize));
__ movq(RAX, Address(R10, RBP, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R10, RAX, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R10, R10, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R10, R12, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R10, R13, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R12, RBP, TIMES_2, kWordSize));
__ movq(RAX, Address(R12, RAX, TIMES_2, kWordSize));
__ movq(RAX, Address(R12, R10, TIMES_2, kWordSize));
__ movq(RAX, Address(R12, R12, TIMES_2, kWordSize));
__ movq(RAX, Address(R12, R13, TIMES_2, kWordSize));
__ movq(RAX, Address(R12, RBP, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R12, RAX, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R12, R10, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R12, R12, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R12, R13, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R13, RBP, TIMES_2, kWordSize));
__ movq(RAX, Address(R13, RAX, TIMES_2, kWordSize));
__ movq(RAX, Address(R13, R10, TIMES_2, kWordSize));
__ movq(RAX, Address(R13, R12, TIMES_2, kWordSize));
__ movq(RAX, Address(R13, R13, TIMES_2, kWordSize));
__ movq(RAX, Address(R13, RBP, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R13, RAX, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R13, R10, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R13, R12, TIMES_2, target::kWordSize));
__ movq(RAX, Address(R13, R13, TIMES_2, target::kWordSize));
__ movq(RAX, Address(RAX, RBP, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RAX, RAX, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RAX, R10, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RAX, R12, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RAX, R13, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RAX, RBP, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RAX, RAX, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RAX, R10, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RAX, R12, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RAX, R13, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RBP, RBP, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RBP, RAX, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RBP, R10, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RBP, R12, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RBP, R13, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RBP, RBP, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RBP, RAX, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RBP, R10, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RBP, R12, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RBP, R13, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RSP, RBP, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RSP, RAX, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RSP, R10, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RSP, R12, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RSP, R13, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(RSP, RBP, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RSP, RAX, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RSP, R10, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RSP, R12, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(RSP, R13, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R10, RBP, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R10, RAX, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R10, R10, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R10, R12, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R10, R13, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R10, RBP, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R10, RAX, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R10, R10, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R10, R12, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R10, R13, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R12, RBP, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R12, RAX, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R12, R10, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R12, R12, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R12, R13, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R12, RBP, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R12, RAX, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R12, R10, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R12, R12, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R12, R13, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R13, RBP, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R13, RAX, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R13, R10, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R13, R12, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R13, R13, TIMES_2, 256 * kWordSize));
__ movq(RAX, Address(R13, RBP, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R13, RAX, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R13, R10, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R13, R12, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address(R13, R13, TIMES_2, 256 * target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RSP, 0));
__ movq(RAX, Address::AddressBaseImm32(RBP, 0));
@ -239,19 +240,19 @@ ASSEMBLER_TEST_GENERATE(AddressingModes, assembler) {
__ movq(RAX, Address::AddressBaseImm32(R13, 0));
__ movq(R10, Address::AddressBaseImm32(RAX, 0));
__ movq(RAX, Address::AddressBaseImm32(RSP, kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RBP, kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RAX, kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R10, kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R12, kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R13, kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RSP, target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RBP, target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RAX, target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R10, target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R12, target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R13, target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RSP, -kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RBP, -kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RAX, -kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R10, -kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R12, -kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R13, -kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RSP, -target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RBP, -target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(RAX, -target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R10, -target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R12, -target::kWordSize));
__ movq(RAX, Address::AddressBaseImm32(R13, -target::kWordSize));
}
ASSEMBLER_TEST_RUN(AddressingModes, test) {
@ -1163,7 +1164,7 @@ ASSEMBLER_TEST_GENERATE(MoveExtendMemory, assembler) {
__ movzxb(RAX, Address(RSP, 0)); // RAX = 0xff
__ movsxw(R8, Address(RSP, 0)); // R8 = -1
__ movzxw(RCX, Address(RSP, 0)); // RCX = 0xffff
__ addq(RSP, Immediate(kWordSize));
__ addq(RSP, Immediate(target::kWordSize));
__ addq(R8, RCX);
__ addq(RAX, R8);
@ -1189,9 +1190,9 @@ ASSEMBLER_TEST_RUN(MoveExtendMemory, test) {
ASSEMBLER_TEST_GENERATE(MoveExtend32Memory, assembler) {
__ pushq(Immediate(0xffffffff));
__ pushq(Immediate(0x7fffffff));
__ movsxd(RDX, Address(RSP, kWordSize));
__ movsxd(RDX, Address(RSP, target::kWordSize));
__ movsxd(RAX, Address(RSP, 0));
__ addq(RSP, Immediate(kWordSize * 2));
__ addq(RSP, Immediate(target::kWordSize * 2));
__ addq(RAX, RDX);
__ ret();
@ -1218,7 +1219,7 @@ ASSEMBLER_TEST_GENERATE(MoveWord, assembler) {
__ movq(RCX, Immediate(-1));
__ movw(Address(RAX, 0), RCX);
__ movzxw(RAX, Address(RAX, 0)); // RAX = 0xffff
__ addq(RSP, Immediate(kWordSize));
__ addq(RSP, Immediate(target::kWordSize));
__ ret();
}
@ -1298,7 +1299,7 @@ ASSEMBLER_TEST_GENERATE(MoveWordRex, assembler) {
__ movzxw(R8, Address(R8, 0)); // 0xffff
__ xorq(RAX, RAX);
__ addq(RAX, R8); // RAX = 0xffff
__ addq(RSP, Immediate(kWordSize));
__ addq(RSP, Immediate(target::kWordSize));
__ ret();
}
@ -1615,10 +1616,10 @@ ASSEMBLER_TEST_GENERATE(AddAddress, assembler) {
__ pushq(CallingConventions::kArg3Reg);
__ pushq(CallingConventions::kArg2Reg);
__ pushq(CallingConventions::kArg1Reg);
__ movq(R10, Address(RSP, 0 * kWordSize)); // al.
__ addq(R10, Address(RSP, 2 * kWordSize)); // bl.
__ movq(RAX, Address(RSP, 1 * kWordSize)); // ah.
__ adcq(RAX, Address(RSP, 3 * kWordSize)); // bh.
__ movq(R10, Address(RSP, 0 * target::kWordSize)); // al.
__ addq(R10, Address(RSP, 2 * target::kWordSize)); // bl.
__ movq(RAX, Address(RSP, 1 * target::kWordSize)); // ah.
__ adcq(RAX, Address(RSP, 3 * target::kWordSize)); // bh.
// RAX = high64(ah:al + bh:bl).
__ Drop(4);
__ ret();
@ -1711,10 +1712,10 @@ ASSEMBLER_TEST_GENERATE(SubAddress, assembler) {
__ pushq(CallingConventions::kArg3Reg);
__ pushq(CallingConventions::kArg2Reg);
__ pushq(CallingConventions::kArg1Reg);
__ movq(R10, Address(RSP, 0 * kWordSize)); // al.
__ subq(R10, Address(RSP, 2 * kWordSize)); // bl.
__ movq(RAX, Address(RSP, 1 * kWordSize)); // ah.
__ sbbq(RAX, Address(RSP, 3 * kWordSize)); // bh.
__ movq(R10, Address(RSP, 0 * target::kWordSize)); // al.
__ subq(R10, Address(RSP, 2 * target::kWordSize)); // bl.
__ movq(RAX, Address(RSP, 1 * target::kWordSize)); // ah.
__ sbbq(RAX, Address(RSP, 3 * target::kWordSize)); // bh.
// RAX = high64(ah:al - bh:bl).
__ Drop(4);
__ ret();
@ -5417,9 +5418,9 @@ ASSEMBLER_TEST_GENERATE(TestRepMovsBytes, assembler) {
__ pushq(CallingConventions::kArg1Reg); // from.
__ pushq(CallingConventions::kArg2Reg); // to.
__ pushq(CallingConventions::kArg3Reg); // count.
__ movq(RSI, Address(RSP, 2 * kWordSize)); // from.
__ movq(RDI, Address(RSP, 1 * kWordSize)); // to.
__ movq(RCX, Address(RSP, 0 * kWordSize)); // count.
__ movq(RSI, Address(RSP, 2 * target::kWordSize)); // from.
__ movq(RDI, Address(RSP, 1 * target::kWordSize)); // to.
__ movq(RCX, Address(RSP, 0 * target::kWordSize)); // count.
__ rep_movsb();
// Remove saved arguments.
__ popq(RAX);
@ -5835,6 +5836,8 @@ IMMEDIATE_TEST(AddrImmRAXByte,
__ pushq(RAX),
Address(RSP, 0),
__ popq(RAX))
} // namespace compiler
} // namespace dart
#endif // defined TARGET_ARCH_X64

View file

@ -9,6 +9,7 @@
#include "vm/compiler/assembler/assembler.h"
#include "vm/globals.h"
#include "vm/log.h"
#include "vm/object.h"
namespace dart {

View file

@ -655,16 +655,6 @@ void ARMDecoder::DecodeType01(Instr* instr) {
case 7: {
if ((instr->Bits(21, 2) == 0x1) && (instr->ConditionField() == AL)) {
Format(instr, "bkpt #'imm12_4");
if (instr->BkptField() == Instr::kStopMessageCode) {
const char* message = "Stop messages not enabled";
if (FLAG_print_stop_message) {
message = *reinterpret_cast<const char**>(
reinterpret_cast<intptr_t>(instr) - Instr::kInstrSize);
}
buffer_pos_ += Utils::SNPrint(current_position_in_buffer(),
remaining_size_in_buffer(),
" ; \"%s\"", message);
}
} else {
// Format(instr, "smc'cond");
Unknown(instr); // Not used.

View file

@ -885,16 +885,6 @@ void ARM64Decoder::DecodeExceptionGen(Instr* instr) {
} else if ((instr->Bits(0, 2) == 0) && (instr->Bits(2, 3) == 0) &&
(instr->Bits(21, 3) == 1)) {
Format(instr, "brk 'imm16");
if (instr->Imm16Field() == Instr::kStopMessageCode) {
const char* message = "Stop messages not enabled";
if (FLAG_print_stop_message) {
message = *reinterpret_cast<const char**>(
reinterpret_cast<intptr_t>(instr) - 2 * Instr::kInstrSize);
}
buffer_pos_ +=
Utils::SNPrint(current_position_in_buffer(),
remaining_size_in_buffer(), " ; \"%s\"", message);
}
} else if ((instr->Bits(0, 2) == 0) && (instr->Bits(2, 3) == 0) &&
(instr->Bits(21, 3) == 2)) {
Format(instr, "hlt 'imm16");

View file

@ -234,7 +234,7 @@ static bool GetLoadedObjectAt(uword pc,
KBCInstr instr = KernelBytecode::At(pc);
if (HasLoadFromPool(instr)) {
uint16_t index = KernelBytecode::DecodeD(instr);
if (object_pool.TypeAt(index) == ObjectPool::kTaggedObject) {
if (object_pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
*obj = object_pool.ObjectAt(index);
return true;
}

View file

@ -14,8 +14,8 @@ namespace dart {
#if !defined(PRODUCT) && !defined(TARGET_ARCH_DBC)
ISOLATE_UNIT_TEST_CASE(Disassembler) {
ObjectPoolWrapper object_pool_wrapper;
Assembler assembler(&object_pool_wrapper);
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder);
// The used instructions work on all platforms.
Register reg = static_cast<Register>(0);

View file

@ -308,7 +308,7 @@ class DisassemblerX64 : public ValueObject {
}
const char* NameOfCPURegister(int reg) const {
return Assembler::RegisterName(static_cast<Register>(reg));
return compiler::Assembler::RegisterName(static_cast<Register>(reg));
}
const char* NameOfByteCPURegister(int reg) const {
@ -344,7 +344,6 @@ class DisassemblerX64 : public ValueObject {
const char* TwoByteMnemonic(uint8_t opcode);
int TwoByteOpcodeInstruction(uint8_t* data);
int Print660F38Instruction(uint8_t* data);
void CheckPrintStop(uint8_t* data);
int F6F7Instruction(uint8_t* data);
int ShiftInstruction(uint8_t* data);
@ -1229,20 +1228,6 @@ int DisassemblerX64::Print660F38Instruction(uint8_t* current) {
}
}
// Called when disassembling test eax, 0xXXXXX.
void DisassemblerX64::CheckPrintStop(uint8_t* data) {
#if defined(TARGET_ARCH_IA32)
// Recognize stop pattern.
if (*data == 0xCC) {
const char* message = "Stop messages not enabled";
if (FLAG_print_stop_message) {
message = *reinterpret_cast<const char**>(data - 4);
}
Print(" STOP:'%s'", message);
}
#endif
}
// Handle all two-byte opcodes, which start with 0x0F.
// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
@ -1854,12 +1839,8 @@ int DisassemblerX64::InstructionDecode(uword pc) {
case 0xA9: {
data++;
bool check_for_stop = operand_size() == DOUBLEWORD_SIZE;
Print("test%s %s,", operand_size_code(), Rax());
data += PrintImmediate(data, operand_size());
if (check_for_stop) {
CheckPrintStop(data);
}
break;
}
case 0xD1: // fall through

View file

@ -0,0 +1,187 @@
// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_COMPILER_ASSEMBLER_OBJECT_POOL_BUILDER_H_
#define RUNTIME_VM_COMPILER_ASSEMBLER_OBJECT_POOL_BUILDER_H_
#include "platform/globals.h"
#include "vm/bitfield.h"
#include "vm/hash_map.h"
namespace dart {
class Object;
namespace compiler {
class ExternalLabel;
bool IsSameObject(const Object& a, const Object& b);
struct ObjectPoolBuilderEntry {
enum Patchability {
kPatchable,
kNotPatchable,
};
enum EntryType {
kTaggedObject,
kImmediate,
kNativeFunction,
kNativeFunctionWrapper,
kNativeEntryData,
};
using TypeBits = BitField<uint8_t, EntryType, 0, 7>;
using PatchableBit = BitField<uint8_t, Patchability, TypeBits::kNextBit, 1>;
static inline uint8_t EncodeTraits(EntryType type, Patchability patchable) {
return TypeBits::encode(type) | PatchableBit::encode(patchable);
}
ObjectPoolBuilderEntry() : raw_value_(), entry_bits_(0), equivalence_() {}
ObjectPoolBuilderEntry(const Object* obj, Patchability patchable)
: ObjectPoolBuilderEntry(obj, obj, patchable) {}
ObjectPoolBuilderEntry(const Object* obj,
const Object* eqv,
Patchability patchable)
: obj_(obj),
entry_bits_(EncodeTraits(kTaggedObject, patchable)),
equivalence_(eqv) {}
ObjectPoolBuilderEntry(uword value, EntryType info, Patchability patchable)
: raw_value_(value),
entry_bits_(EncodeTraits(info, patchable)),
equivalence_() {}
EntryType type() const { return TypeBits::decode(entry_bits_); }
Patchability patchable() const { return PatchableBit::decode(entry_bits_); }
union {
const Object* obj_;
uword raw_value_;
};
uint8_t entry_bits_;
const Object* equivalence_;
};
// Pair type parameter for DirectChainedHashMap used for the constant pool.
class ObjIndexPair {
public:
// Typedefs needed for the DirectChainedHashMap template.
typedef ObjectPoolBuilderEntry Key;
typedef intptr_t Value;
typedef ObjIndexPair Pair;
static const intptr_t kNoIndex = -1;
ObjIndexPair()
: key_(reinterpret_cast<uword>(nullptr),
ObjectPoolBuilderEntry::kTaggedObject,
ObjectPoolBuilderEntry::kPatchable),
value_(kNoIndex) {}
ObjIndexPair(Key key, Value value) : value_(value) {
key_.entry_bits_ = key.entry_bits_;
if (key.type() == ObjectPoolBuilderEntry::kTaggedObject) {
key_.obj_ = key.obj_;
key_.equivalence_ = key.equivalence_;
} else {
key_.raw_value_ = key.raw_value_;
}
}
static Key KeyOf(Pair kv) { return kv.key_; }
static Value ValueOf(Pair kv) { return kv.value_; }
static intptr_t Hashcode(Key key);
static inline bool IsKeyEqual(Pair kv, Key key) {
if (kv.key_.entry_bits_ != key.entry_bits_) return false;
if (kv.key_.type() == ObjectPoolBuilderEntry::kTaggedObject) {
return IsSameObject(*kv.key_.obj_, *key.obj_) &&
IsSameObject(*kv.key_.equivalence_, *key.equivalence_);
}
return kv.key_.raw_value_ == key.raw_value_;
}
private:
Key key_;
Value value_;
};
class ObjectPoolBuilder : public ValueObject {
public:
ObjectPoolBuilder() : zone_(nullptr) {}
~ObjectPoolBuilder() {
if (zone_ != nullptr) {
Reset();
zone_ = nullptr;
}
}
// Clears all existing entries in this object pool builder.
//
// Note: Any code which has been compiled via this builder might use offsets
// into the pool which are not correct anymore.
void Reset();
// Initialize this object pool builder with a [zone].
//
// Any objects added later on will be referenced using handles from [zone].
void InitializeWithZone(Zone* zone) {
ASSERT(object_pool_.length() == 0);
ASSERT(zone_ == nullptr && zone != nullptr);
zone_ = zone;
}
intptr_t AddObject(const Object& obj,
ObjectPoolBuilderEntry::Patchability patchable =
ObjectPoolBuilderEntry::kNotPatchable);
intptr_t AddImmediate(uword imm);
intptr_t FindObject(const Object& obj,
ObjectPoolBuilderEntry::Patchability patchable =
ObjectPoolBuilderEntry::kNotPatchable);
intptr_t FindObject(const Object& obj, const Object& equivalence);
intptr_t FindImmediate(uword imm);
intptr_t FindNativeFunction(const ExternalLabel* label,
ObjectPoolBuilderEntry::Patchability patchable);
intptr_t FindNativeFunctionWrapper(
const ExternalLabel* label,
ObjectPoolBuilderEntry::Patchability patchable);
intptr_t CurrentLength() const { return object_pool_.length(); }
ObjectPoolBuilderEntry& EntryAt(intptr_t i) { return object_pool_[i]; }
const ObjectPoolBuilderEntry& EntryAt(intptr_t i) const {
return object_pool_[i];
}
intptr_t AddObject(ObjectPoolBuilderEntry entry);
private:
intptr_t FindObject(ObjectPoolBuilderEntry entry);
// Objects and jump targets.
GrowableArray<ObjectPoolBuilderEntry> object_pool_;
// Hashmap for fast lookup in object pool.
DirectChainedHashMap<ObjIndexPair> object_pool_index_table_;
// The zone used for allocating the handles we keep in the map and array (or
// NULL, in which case allocations happen using the zone active at the point
// of insertion).
Zone* zone_;
};
} // namespace compiler
} // namespace dart
namespace dart {
using compiler::ObjectPoolBuilder;
}
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_OBJECT_POOL_BUILDER_H_

View file

@ -80,7 +80,8 @@ void CompilerDeoptInfo::AllocateIncomingParametersRecursive(
if (it.CurrentLocation().IsInvalid() &&
it.CurrentValue()->definition()->IsPushArgument()) {
it.SetCurrentLocation(Location::StackSlot(
compiler_frame_layout.FrameSlotForVariableIndex(-*stack_height)));
compiler::target::frame_layout.FrameSlotForVariableIndex(
-*stack_height)));
(*stack_height)++;
}
}
@ -356,7 +357,8 @@ intptr_t FlowGraphCompiler::UncheckedEntryOffset() const {
#if defined(DART_PRECOMPILER)
static intptr_t LocationToStackIndex(const Location& src) {
ASSERT(src.HasStackIndex());
return -compiler_frame_layout.VariableIndexForFrameSlot(src.stack_index());
return -compiler::target::frame_layout.VariableIndexForFrameSlot(
src.stack_index());
}
static CatchEntryMove CatchEntryMoveFor(Assembler* assembler,
@ -369,7 +371,7 @@ static CatchEntryMove CatchEntryMoveFor(Assembler* assembler,
return CatchEntryMove();
}
const intptr_t pool_index =
assembler->object_pool_wrapper().FindObject(src.constant());
assembler->object_pool_builder().FindObject(src.constant());
return CatchEntryMove::FromSlot(CatchEntryMove::SourceKind::kConstant,
pool_index, dst_index);
}
@ -1073,7 +1075,7 @@ void FlowGraphCompiler::FinalizeVarDescriptors(const Code& code) {
info.scope_id = 0;
info.begin_pos = TokenPosition::kMinSource;
info.end_pos = TokenPosition::kMinSource;
info.set_index(compiler_frame_layout.FrameSlotForVariable(
info.set_index(compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().current_context_var()));
var_descs.SetVar(0, Symbols::CurrentContextVar(), &info);
}

View file

@ -47,10 +47,11 @@ void FlowGraphCompiler::ArchSpecificInitialization() {
const auto& array_stub =
Code::ZoneHandle(object_store->array_write_barrier_stub());
if (!array_stub.InVMHeap()) {
assembler_->invoke_array_write_barrier_ = [&](Condition condition) {
AddPcRelativeCallStubTarget(array_stub);
assembler_->GenerateUnRelocatedPcRelativeCall(condition);
};
assembler_->generate_invoke_array_write_barrier_ =
[&](Condition condition) {
AddPcRelativeCallStubTarget(array_stub);
assembler_->GenerateUnRelocatedPcRelativeCall(condition);
};
}
}
}
@ -770,11 +771,11 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
// (see runtime/vm/runtime_entry.cc:TypeCheck). It will use pattern matching
// on the call site to find out at which pool index the destination name is
// located.
const intptr_t sub_type_cache_index = __ object_pool_wrapper().AddObject(
const intptr_t sub_type_cache_index = __ object_pool_builder().AddObject(
Object::null_object(), ObjectPool::Patchability::kPatchable);
const intptr_t sub_type_cache_offset =
ObjectPool::element_offset(sub_type_cache_index) - kHeapObjectTag;
const intptr_t dst_name_index = __ object_pool_wrapper().AddObject(
const intptr_t dst_name_index = __ object_pool_builder().AddObject(
dst_name, ObjectPool::Patchability::kPatchable);
ASSERT((sub_type_cache_index + 1) == dst_name_index);
ASSERT(__ constant_pool_allowed());
@ -809,9 +810,9 @@ void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
const Code& build_method_extractor = Code::ZoneHandle(
isolate()->object_store()->build_method_extractor_code());
const intptr_t stub_index = __ object_pool_wrapper().AddObject(
const intptr_t stub_index = __ object_pool_builder().AddObject(
build_method_extractor, ObjectPool::Patchability::kNotPatchable);
const intptr_t function_index = __ object_pool_wrapper().AddObject(
const intptr_t function_index = __ object_pool_builder().AddObject(
extracted_method, ObjectPool::Patchability::kNotPatchable);
// We use a custom pool register to preserve caller PP.
@ -882,8 +883,8 @@ void FlowGraphCompiler::EmitFrameEntry() {
}
__ CompareImmediate(R3, GetOptimizationThreshold());
ASSERT(function_reg == R8);
__ Branch(StubCode::OptimizeFunction(), ObjectPool::kNotPatchable, new_pp,
GE);
__ Branch(StubCode::OptimizeFunction(),
compiler::ObjectPoolBuilderEntry::kNotPatchable, new_pp, GE);
}
__ Comment("Enter frame");
if (flow_graph().IsCompiledForOsr()) {
@ -906,7 +907,7 @@ void FlowGraphCompiler::EmitPrologue() {
intptr_t args_desc_slot = -1;
if (parsed_function().has_arg_desc_var()) {
args_desc_slot = compiler_frame_layout.FrameSlotForVariable(
args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().arg_desc_var());
}
@ -916,7 +917,7 @@ void FlowGraphCompiler::EmitPrologue() {
}
for (intptr_t i = 0; i < num_locals; ++i) {
const intptr_t slot_index =
compiler_frame_layout.FrameSlotForVariableIndex(-i);
compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0;
__ StoreToOffset(kWord, value_reg, FP, slot_index * kWordSize);
}

View file

@ -44,7 +44,7 @@ void FlowGraphCompiler::ArchSpecificInitialization() {
const auto& array_stub =
Code::ZoneHandle(object_store->array_write_barrier_stub());
if (!array_stub.InVMHeap()) {
assembler_->invoke_array_write_barrier_ = [&]() {
assembler_->generate_invoke_array_write_barrier_ = [&]() {
AddPcRelativeCallStubTarget(array_stub);
assembler_->GenerateUnRelocatedPcRelativeCall();
};
@ -748,11 +748,11 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
// (see runtime/vm/runtime_entry.cc:TypeCheck). It will use pattern matching
// on the call site to find out at which pool index the destination name is
// located.
const intptr_t sub_type_cache_index = __ object_pool_wrapper().AddObject(
const intptr_t sub_type_cache_index = __ object_pool_builder().AddObject(
Object::null_object(), ObjectPool::Patchability::kPatchable);
const intptr_t sub_type_cache_offset =
ObjectPool::element_offset(sub_type_cache_index);
const intptr_t dst_name_index = __ object_pool_wrapper().AddObject(
const intptr_t dst_name_index = __ object_pool_builder().AddObject(
dst_name, ObjectPool::Patchability::kPatchable);
ASSERT((sub_type_cache_index + 1) == dst_name_index);
ASSERT(__ constant_pool_allowed());
@ -786,9 +786,9 @@ void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
const Code& build_method_extractor = Code::ZoneHandle(
isolate()->object_store()->build_method_extractor_code());
const intptr_t stub_index = __ object_pool_wrapper().AddObject(
const intptr_t stub_index = __ object_pool_builder().AddObject(
build_method_extractor, ObjectPool::Patchability::kNotPatchable);
const intptr_t function_index = __ object_pool_wrapper().AddObject(
const intptr_t function_index = __ object_pool_builder().AddObject(
extracted_method, ObjectPool::Patchability::kNotPatchable);
// We use a custom pool register to preserve caller PP.
@ -918,7 +918,7 @@ void FlowGraphCompiler::CompileGraph() {
intptr_t args_desc_slot = -1;
if (parsed_function().has_arg_desc_var()) {
args_desc_slot = compiler_frame_layout.FrameSlotForVariable(
args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().arg_desc_var());
}
@ -928,7 +928,7 @@ void FlowGraphCompiler::CompileGraph() {
}
for (intptr_t i = 0; i < num_locals; ++i) {
const intptr_t slot_index =
compiler_frame_layout.FrameSlotForVariableIndex(-i);
compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0;
__ StoreToOffset(value_reg, FP, slot_index * kWordSize);
}
@ -1111,7 +1111,7 @@ void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
ASSERT(ic_data.NumArgsTested() == 1);
const Code& initial_stub = StubCode::ICCallThroughFunction();
auto& op = __ object_pool_wrapper();
auto& op = __ object_pool_builder();
__ Comment("SwitchableCall");
__ LoadFromOffset(R0, SP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize);

View file

@ -112,7 +112,8 @@ RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
builder->AddCopy(
NULL,
Location::StackSlot(
compiler_frame_layout.FrameSlotForVariableIndex(-stack_height)),
compiler::target::frame_layout.FrameSlotForVariableIndex(
-stack_height)),
slot_ix++);
}
@ -282,7 +283,7 @@ void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
}
void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
__ Move(0, -(1 + compiler_frame_layout.param_end_from_fp));
__ Move(0, -(1 + compiler::target::frame_layout.param_end_from_fp));
ASSERT(offset % kWordSize == 0);
if (Utils::IsInt(8, offset / kWordSize)) {
__ LoadField(0, 0, offset / kWordSize);
@ -294,8 +295,8 @@ void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
}
void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
__ Move(0, -(2 + compiler_frame_layout.param_end_from_fp));
__ Move(1, -(1 + compiler_frame_layout.param_end_from_fp));
__ Move(0, -(2 + compiler::target::frame_layout.param_end_from_fp));
__ Move(1, -(1 + compiler::target::frame_layout.param_end_from_fp));
ASSERT(offset % kWordSize == 0);
if (Utils::IsInt(8, offset / kWordSize)) {
__ StoreField(0, offset / kWordSize, 1);
@ -328,8 +329,9 @@ void FlowGraphCompiler::EmitFrameEntry() {
if (parsed_function().has_arg_desc_var()) {
// TODO(kustermann): If dbc simulator put the args_desc_ into the
// _special_regs, we could replace these 3 with the MoveSpecial bytecode.
const intptr_t slot_index = compiler_frame_layout.FrameSlotForVariable(
parsed_function().arg_desc_var());
const intptr_t slot_index =
compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().arg_desc_var());
__ LoadArgDescriptor();
__ StoreLocal(LocalVarIndex(0, slot_index));
__ Drop(1);
@ -369,7 +371,8 @@ void ParallelMoveResolver::EmitMove(int index) {
// Only allow access to the arguments (which have in the non-inverted stack
// positive indices).
ASSERT(source.base_reg() == FPREG);
ASSERT(source.stack_index() > compiler_frame_layout.param_end_from_fp);
ASSERT(source.stack_index() >
compiler::target::frame_layout.param_end_from_fp);
__ Move(destination.reg(), -source.stack_index());
} else if (source.IsRegister() && destination.IsRegister()) {
__ Move(destination.reg(), source.reg());

View file

@ -806,7 +806,7 @@ void FlowGraphCompiler::CompileGraph() {
intptr_t args_desc_slot = -1;
if (parsed_function().has_arg_desc_var()) {
args_desc_slot = compiler_frame_layout.FrameSlotForVariable(
args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().arg_desc_var());
}
@ -818,7 +818,7 @@ void FlowGraphCompiler::CompileGraph() {
}
for (intptr_t i = 0; i < num_locals; ++i) {
const intptr_t slot_index =
compiler_frame_layout.FrameSlotForVariableIndex(-i);
compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : EAX;
__ movl(Address(EBP, slot_index * kWordSize), value_reg);
}

View file

@ -43,7 +43,7 @@ void FlowGraphCompiler::ArchSpecificInitialization() {
const auto& array_stub =
Code::ZoneHandle(object_store->array_write_barrier_stub());
if (!array_stub.InVMHeap()) {
assembler_->invoke_array_write_barrier_ = [&]() {
assembler_->generate_invoke_array_write_barrier_ = [&]() {
AddPcRelativeCallStubTarget(array_stub);
assembler_->GenerateUnRelocatedPcRelativeCall();
};
@ -761,12 +761,12 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
// (see runtime/vm/runtime_entry.cc:TypeCheck). It will use pattern matching
// on the call site to find out at which pool index the destination name is
// located.
const intptr_t sub_type_cache_index = __ object_pool_wrapper().AddObject(
Object::null_object(), ObjectPool::Patchability::kPatchable);
const intptr_t sub_type_cache_index = __ object_pool_builder().AddObject(
Object::null_object(), compiler::ObjectPoolBuilderEntry::kPatchable);
const intptr_t sub_type_cache_offset =
ObjectPool::element_offset(sub_type_cache_index) - kHeapObjectTag;
const intptr_t dst_name_index = __ object_pool_wrapper().AddObject(
dst_name, ObjectPool::Patchability::kPatchable);
const intptr_t dst_name_index = __ object_pool_builder().AddObject(
dst_name, compiler::ObjectPoolBuilderEntry::kPatchable);
ASSERT((sub_type_cache_index + 1) == dst_name_index);
ASSERT(__ constant_pool_allowed());
@ -805,10 +805,10 @@ void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
isolate()->object_store()->build_method_extractor_code());
ASSERT(!build_method_extractor.IsNull());
const intptr_t stub_index = __ object_pool_wrapper().AddObject(
build_method_extractor, ObjectPool::Patchability::kNotPatchable);
const intptr_t function_index = __ object_pool_wrapper().AddObject(
extracted_method, ObjectPool::Patchability::kNotPatchable);
const intptr_t stub_index = __ object_pool_builder().AddObject(
build_method_extractor, compiler::ObjectPoolBuilderEntry::kNotPatchable);
const intptr_t function_index = __ object_pool_builder().AddObject(
extracted_method, compiler::ObjectPoolBuilderEntry::kNotPatchable);
// We use a custom pool register to preserve caller PP.
Register kPoolReg = RAX;
@ -901,7 +901,7 @@ void FlowGraphCompiler::EmitPrologue() {
intptr_t args_desc_slot = -1;
if (parsed_function().has_arg_desc_var()) {
args_desc_slot = compiler_frame_layout.FrameSlotForVariable(
args_desc_slot = compiler::target::frame_layout.FrameSlotForVariable(
parsed_function().arg_desc_var());
}
@ -911,7 +911,7 @@ void FlowGraphCompiler::EmitPrologue() {
}
for (intptr_t i = 0; i < num_locals; ++i) {
const intptr_t slot_index =
compiler_frame_layout.FrameSlotForVariableIndex(-i);
compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : RAX;
__ movq(Address(RBP, slot_index * kWordSize), value_reg);
}
@ -1307,13 +1307,13 @@ void ParallelMoveResolver::EmitMove(int index) {
} else {
ASSERT(destination.IsStackSlot());
ASSERT((destination.base_reg() != FPREG) ||
((-compiler_frame_layout.VariableIndexForFrameSlot(
((-compiler::target::frame_layout.VariableIndexForFrameSlot(
destination.stack_index())) < compiler_->StackSize()));
__ movq(destination.ToStackSlotAddress(), source.reg());
}
} else if (source.IsStackSlot()) {
ASSERT((source.base_reg() != FPREG) ||
((-compiler_frame_layout.VariableIndexForFrameSlot(
((-compiler::target::frame_layout.VariableIndexForFrameSlot(
source.stack_index())) < compiler_->StackSize()));
if (destination.IsRegister()) {
__ movq(destination.reg(), source.ToStackSlotAddress());

View file

@ -4537,7 +4537,7 @@ void CheckNullInstr::AddMetadataForRuntimeCall(CheckNullInstr* check_null,
FlowGraphCompiler* compiler) {
const String& function_name = check_null->function_name();
const intptr_t name_index =
compiler->assembler()->object_pool_wrapper().FindObject(function_name);
compiler->assembler()->object_pool_builder().FindObject(function_name);
compiler->AddNullCheck(compiler->assembler()->CodeSize(),
check_null->token_pos(), name_index);
}

View file

@ -127,7 +127,8 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label stack_ok;
__ Comment("Stack Check");
const intptr_t fp_sp_dist =
(compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
ASSERT(fp_sp_dist <= 0);
__ sub(R2, SP, Operand(FP));
@ -282,9 +283,8 @@ LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register result = locs()->out(0).reg();
__ LoadFromOffset(
kWord, result, FP,
compiler_frame_layout.FrameOffsetInBytesForVariable(&local()));
__ LoadFromOffset(kWord, result, FP,
compiler::target::FrameOffsetInBytesForVariable(&local()));
}
LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
@ -297,9 +297,8 @@ void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
ASSERT(result == value); // Assert that register assignment is correct.
__ StoreToOffset(
kWord, value, FP,
compiler_frame_layout.FrameOffsetInBytesForVariable(&local()));
__ StoreToOffset(kWord, value, FP,
compiler::target::FrameOffsetInBytesForVariable(&local()));
}
LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
@ -967,9 +966,10 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
__ LoadImmediate(R1, argc_tag);
ExternalLabel label(entry);
__ LoadNativeEntry(
R9, &label,
link_lazily() ? ObjectPool::kPatchable : ObjectPool::kNotPatchable);
__ LoadNativeEntry(R9, &label,
link_lazily()
? compiler::ObjectPoolBuilderEntry::kPatchable
: compiler::ObjectPoolBuilderEntry::kNotPatchable);
if (link_lazily()) {
compiler->GeneratePatchableCall(token_pos(), *stub,
RawPcDescriptors::kOther, locs());
@ -2965,21 +2965,22 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Restore SP from FP as we are coming from a throw and the code for
// popping arguments has not been run.
const intptr_t fp_sp_dist =
(compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
ASSERT(fp_sp_dist <= 0);
__ AddImmediate(SP, FP, fp_sp_dist);
if (!compiler->is_optimizing()) {
if (raw_exception_var_ != nullptr) {
__ StoreToOffset(kWord, kExceptionObjectReg, FP,
compiler_frame_layout.FrameOffsetInBytesForVariable(
raw_exception_var_));
__ StoreToOffset(
kWord, kExceptionObjectReg, FP,
compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_));
}
if (raw_stacktrace_var_ != nullptr) {
__ StoreToOffset(kWord, kStackTraceObjectReg, FP,
compiler_frame_layout.FrameOffsetInBytesForVariable(
raw_stacktrace_var_));
__ StoreToOffset(
kWord, kStackTraceObjectReg, FP,
compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_));
}
}
}
@ -3029,11 +3030,9 @@ class CheckStackOverflowSlowPath
compiler->pending_deoptimization_env_ = env;
if (using_shared_stub) {
uword entry_point_offset =
instruction()->locs()->live_registers()->FpuRegisterCount() > 0
? Thread::stack_overflow_shared_with_fpu_regs_entry_point_offset()
: Thread::
stack_overflow_shared_without_fpu_regs_entry_point_offset();
const uword entry_point_offset =
Thread::stack_overflow_shared_stub_entry_point_offset(
instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
__ ldr(LR, Address(THR, entry_point_offset));
__ blx(LR);
compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);

View file

@ -127,7 +127,8 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label stack_ok;
__ Comment("Stack Check");
const intptr_t fp_sp_dist =
(compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
ASSERT(fp_sp_dist <= 0);
__ sub(R2, SP, Operand(FP));
@ -281,9 +282,8 @@ LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
void LoadLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register result = locs()->out(0).reg();
__ LoadFromOffset(
result, FP,
compiler_frame_layout.FrameOffsetInBytesForVariable(&local()));
__ LoadFromOffset(result, FP,
compiler::target::FrameOffsetInBytesForVariable(&local()));
}
LocationSummary* StoreLocalInstr::MakeLocationSummary(Zone* zone,
@ -296,8 +296,8 @@ void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
ASSERT(result == value); // Assert that register assignment is correct.
__ StoreToOffset(
value, FP, compiler_frame_layout.FrameOffsetInBytesForVariable(&local()));
__ StoreToOffset(value, FP,
compiler::target::FrameOffsetInBytesForVariable(&local()));
}
LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
@ -857,9 +857,9 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
__ LoadImmediate(R1, argc_tag);
ExternalLabel label(entry);
__ LoadNativeEntry(
R5, &label,
link_lazily() ? ObjectPool::kPatchable : ObjectPool::kNotPatchable);
__ LoadNativeEntry(R5, &label,
link_lazily() ? ObjectPool::Patchability::kPatchable
: ObjectPool::Patchability::kNotPatchable);
if (link_lazily()) {
compiler->GeneratePatchableCall(token_pos(), *stub,
RawPcDescriptors::kOther, locs());
@ -2653,21 +2653,22 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Restore SP from FP as we are coming from a throw and the code for
// popping arguments has not been run.
const intptr_t fp_sp_dist =
(compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
ASSERT(fp_sp_dist <= 0);
__ AddImmediate(SP, FP, fp_sp_dist);
if (!compiler->is_optimizing()) {
if (raw_exception_var_ != nullptr) {
__ StoreToOffset(kExceptionObjectReg, FP,
compiler_frame_layout.FrameOffsetInBytesForVariable(
raw_exception_var_));
__ StoreToOffset(
kExceptionObjectReg, FP,
compiler::target::FrameOffsetInBytesForVariable(raw_exception_var_));
}
if (raw_stacktrace_var_ != nullptr) {
__ StoreToOffset(kStackTraceObjectReg, FP,
compiler_frame_layout.FrameOffsetInBytesForVariable(
raw_stacktrace_var_));
__ StoreToOffset(
kStackTraceObjectReg, FP,
compiler::target::FrameOffsetInBytesForVariable(raw_stacktrace_var_));
}
}
}
@ -2731,12 +2732,9 @@ class CheckStackOverflowSlowPath
__ GenerateUnRelocatedPcRelativeCall();
} else {
uword entry_point_offset =
locs->live_registers()->FpuRegisterCount() > 0
? Thread::
stack_overflow_shared_with_fpu_regs_entry_point_offset()
: Thread::
stack_overflow_shared_without_fpu_regs_entry_point_offset();
const uword entry_point_offset =
Thread::stack_overflow_shared_stub_entry_point_offset(
locs->live_registers()->FpuRegisterCount() > 0);
__ ldr(LR, Address(THR, entry_point_offset));
__ blr(LR);
}

View file

@ -332,14 +332,14 @@ EMIT_NATIVE_CODE(PushArgument, 1) {
EMIT_NATIVE_CODE(LoadLocal, 0) {
ASSERT(!compiler->is_optimizing());
const intptr_t slot_index =
compiler_frame_layout.FrameSlotForVariable(&local());
compiler::target::frame_layout.FrameSlotForVariable(&local());
__ Push(LocalVarIndex(0, slot_index));
}
EMIT_NATIVE_CODE(StoreLocal, 0) {
ASSERT(!compiler->is_optimizing());
const intptr_t slot_index =
compiler_frame_layout.FrameSlotForVariable(&local());
compiler::target::frame_layout.FrameSlotForVariable(&local());
if (HasTemp()) {
__ StoreLocal(LocalVarIndex(0, slot_index));
} else {
@ -984,13 +984,13 @@ EMIT_NATIVE_CODE(NativeCall,
const ExternalLabel trampoline_label(reinterpret_cast<uword>(trampoline));
const intptr_t trampoline_kidx =
__ object_pool_wrapper().FindNativeFunctionWrapper(
&trampoline_label, ObjectPool::kPatchable);
__ object_pool_builder().FindNativeFunctionWrapper(
&trampoline_label, ObjectPool::Patchability::kPatchable);
const ExternalLabel label(reinterpret_cast<uword>(function));
const intptr_t target_kidx = __ object_pool_wrapper().FindNativeFunction(
&label, ObjectPool::kPatchable);
const intptr_t target_kidx = __ object_pool_builder().FindNativeFunction(
&label, ObjectPool::Patchability::kPatchable);
const intptr_t argc_tag_kidx =
__ object_pool_wrapper().FindImmediate(static_cast<uword>(argc_tag));
__ object_pool_builder().FindImmediate(static_cast<uword>(argc_tag));
__ NativeCall(trampoline_kidx, target_kidx, argc_tag_kidx);
compiler->RecordSafepoint(locs());
compiler->AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone,
@ -1199,13 +1199,13 @@ EMIT_NATIVE_CODE(CatchBlockEntry, 0) {
if (!compiler->is_optimizing()) {
if (raw_exception_var_ != nullptr) {
__ MoveSpecial(
LocalVarIndex(0, compiler_frame_layout.FrameSlotForVariable(
LocalVarIndex(0, compiler::target::frame_layout.FrameSlotForVariable(
raw_exception_var_)),
Simulator::kExceptionSpecialIndex);
}
if (raw_stacktrace_var_ != nullptr) {
__ MoveSpecial(
LocalVarIndex(0, compiler_frame_layout.FrameSlotForVariable(
LocalVarIndex(0, compiler::target::frame_layout.FrameSlotForVariable(
raw_stacktrace_var_)),
Simulator::kStackTraceSpecialIndex);
}

View file

@ -118,7 +118,8 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("Stack Check");
Label done;
const intptr_t fp_sp_dist =
(compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
ASSERT(fp_sp_dist <= 0);
__ movl(EDI, ESP);
@ -136,7 +137,7 @@ LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 0;
const intptr_t stack_index =
compiler_frame_layout.FrameSlotForVariable(&local());
compiler::target::frame_layout.FrameSlotForVariable(&local());
return LocationSummary::Make(zone, kNumInputs,
Location::StackSlot(stack_index),
LocationSummary::kNoCall);
@ -158,9 +159,9 @@ void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register result = locs()->out(0).reg();
ASSERT(result == value); // Assert that register assignment is correct.
__ movl(Address(EBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
&local())),
value);
__ movl(
Address(EBP, compiler::target::FrameOffsetInBytesForVariable(&local())),
value);
}
LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
@ -2544,19 +2545,20 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Restore ESP from EBP as we are coming from a throw and the code for
// popping arguments has not been run.
const intptr_t fp_sp_dist =
(compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
ASSERT(fp_sp_dist <= 0);
__ leal(ESP, Address(EBP, fp_sp_dist));
if (!compiler->is_optimizing()) {
if (raw_exception_var_ != nullptr) {
__ movl(Address(EBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
__ movl(Address(EBP, compiler::target::FrameOffsetInBytesForVariable(
raw_exception_var_)),
kExceptionObjectReg);
}
if (raw_stacktrace_var_ != nullptr) {
__ movl(Address(EBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
__ movl(Address(EBP, compiler::target::FrameOffsetInBytesForVariable(
raw_stacktrace_var_)),
kStackTraceObjectReg);
}
@ -5989,8 +5991,8 @@ void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register target_reg = locs()->temp_slot(0)->reg();
// Load code object from frame.
__ movl(target_reg,
Address(EBP, compiler_frame_layout.code_from_fp * kWordSize));
__ movl(target_reg, Address(EBP, compiler::target::frame_layout.code_from_fp *
kWordSize));
// Load instructions object (active_instructions and Code::entry_point() may
// not point to this instruction object any more; see Code::DisableDartCode).
__ movl(target_reg,

View file

@ -118,7 +118,8 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("Stack Check");
Label done;
const intptr_t fp_sp_dist =
(compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
ASSERT(fp_sp_dist <= 0);
__ movq(RDI, RSP);
@ -238,7 +239,7 @@ LocationSummary* LoadLocalInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 0;
const intptr_t stack_index =
compiler_frame_layout.FrameSlotForVariable(&local());
compiler::target::frame_layout.FrameSlotForVariable(&local());
return LocationSummary::Make(zone, kNumInputs,
Location::StackSlot(stack_index),
LocationSummary::kNoCall);
@ -260,9 +261,9 @@ void StoreLocalInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Register result = locs()->out(0).reg();
ASSERT(result == value); // Assert that register assignment is correct.
__ movq(Address(RBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
&local())),
value);
__ movq(
Address(RBP, compiler::target::FrameOffsetInBytesForVariable(&local())),
value);
}
LocationSummary* ConstantInstr::MakeLocationSummary(Zone* zone,
@ -853,7 +854,8 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (link_lazily()) {
stub = &StubCode::CallBootstrapNative();
ExternalLabel label(NativeEntry::LinkNativeCallEntry());
__ LoadNativeEntry(RBX, &label, ObjectPool::kPatchable);
__ LoadNativeEntry(RBX, &label,
compiler::ObjectPoolBuilderEntry::kPatchable);
compiler->GeneratePatchableCall(token_pos(), *stub,
RawPcDescriptors::kOther, locs());
} else {
@ -865,7 +867,8 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
stub = &StubCode::CallNoScopeNative();
}
const ExternalLabel label(reinterpret_cast<uword>(native_c_function()));
__ LoadNativeEntry(RBX, &label, ObjectPool::kNotPatchable);
__ LoadNativeEntry(RBX, &label,
compiler::ObjectPoolBuilderEntry::kNotPatchable);
compiler->GenerateCall(token_pos(), *stub, RawPcDescriptors::kOther,
locs());
}
@ -2668,19 +2671,20 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Restore RSP from RBP as we are coming from a throw and the code for
// popping arguments has not been run.
const intptr_t fp_sp_dist =
(compiler_frame_layout.first_local_from_fp + 1 - compiler->StackSize()) *
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
ASSERT(fp_sp_dist <= 0);
__ leaq(RSP, Address(RBP, fp_sp_dist));
if (!compiler->is_optimizing()) {
if (raw_exception_var_ != nullptr) {
__ movq(Address(RBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
__ movq(Address(RBP, compiler::target::FrameOffsetInBytesForVariable(
raw_exception_var_)),
kExceptionObjectReg);
}
if (raw_stacktrace_var_ != nullptr) {
__ movq(Address(RBP, compiler_frame_layout.FrameOffsetInBytesForVariable(
__ movq(Address(RBP, compiler::target::FrameOffsetInBytesForVariable(
raw_stacktrace_var_)),
kStackTraceObjectReg);
}
@ -2730,11 +2734,9 @@ class CheckStackOverflowSlowPath
compiler->pending_deoptimization_env_ = env;
if (using_shared_stub) {
uword entry_point_offset =
instruction()->locs()->live_registers()->FpuRegisterCount() > 0
? Thread::stack_overflow_shared_with_fpu_regs_entry_point_offset()
: Thread::
stack_overflow_shared_without_fpu_regs_entry_point_offset();
const uword entry_point_offset =
Thread::stack_overflow_shared_stub_entry_point_offset(
instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
__ call(Address(THR, entry_point_offset));
compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
compiler->RecordCatchEntryMoves();
@ -6185,7 +6187,7 @@ void IndirectGotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(__ CodeSize() == entry_to_rip_offset);
}
// Load from [current frame pointer] + compiler_frame_layout.code_from_fp.
// Load from FP+compiler::target::frame_layout.code_from_fp.
// Calculate the final absolute address.
if (offset()->definition()->representation() == kTagged) {

View file

@ -474,7 +474,7 @@ void LiveRange::Print() {
assigned_location().Print();
if (spill_slot_.HasStackIndex()) {
const intptr_t stack_slot =
-compiler_frame_layout.VariableIndexForFrameSlot(
-compiler::target::frame_layout.VariableIndexForFrameSlot(
spill_slot_.stack_index());
THR_Print(" allocated spill slot: %" Pd "", stack_slot);
}
@ -750,7 +750,8 @@ void FlowGraphAllocator::ProcessInitialDefinition(Definition* defn,
}
#endif // defined(TARGET_ARCH_DBC)
if (param->base_reg() == FPREG) {
slot_index = compiler_frame_layout.FrameSlotForVariableIndex(-slot_index);
slot_index =
compiler::target::frame_layout.FrameSlotForVariableIndex(-slot_index);
}
range->set_assigned_location(
Location::StackSlot(slot_index, param->base_reg()));
@ -793,7 +794,8 @@ void FlowGraphAllocator::ProcessInitialDefinition(Definition* defn,
ConvertAllUses(range);
Location spill_slot = range->spill_slot();
if (spill_slot.IsStackSlot() && spill_slot.base_reg() == FPREG &&
spill_slot.stack_index() <= compiler_frame_layout.first_local_from_fp) {
spill_slot.stack_index() <=
compiler::target::frame_layout.first_local_from_fp) {
// On entry to the function, range is stored on the stack above the FP in
// the same space which is used for spill slots. Update spill slot state to
// reflect that and prevent register allocator from reusing this space as a
@ -2039,15 +2041,16 @@ void FlowGraphAllocator::AllocateSpillSlotFor(LiveRange* range) {
// Assign spill slot to the range.
if (register_kind_ == Location::kRegister) {
const intptr_t slot_index =
compiler_frame_layout.FrameSlotForVariableIndex(-idx);
compiler::target::frame_layout.FrameSlotForVariableIndex(-idx);
range->set_spill_slot(Location::StackSlot(slot_index));
} else {
// We use the index of the slot with the lowest address as an index for the
// FPU register spill slot. In terms of indexes this relation is inverted:
// so we have to take the highest index.
const intptr_t slot_idx = compiler_frame_layout.FrameSlotForVariableIndex(
-(cpu_spill_slot_count_ + idx * kDoubleSpillFactor +
(kDoubleSpillFactor - 1)));
const intptr_t slot_idx =
compiler::target::frame_layout.FrameSlotForVariableIndex(
-(cpu_spill_slot_count_ + idx * kDoubleSpillFactor +
(kDoubleSpillFactor - 1)));
Location location;
if ((range->representation() == kUnboxedFloat32x4) ||
@ -2069,7 +2072,7 @@ void FlowGraphAllocator::MarkAsObjectAtSafepoints(LiveRange* range) {
Location spill_slot = range->spill_slot();
intptr_t stack_index = spill_slot.stack_index();
if (spill_slot.base_reg() == FPREG) {
stack_index = -compiler_frame_layout.VariableIndexForFrameSlot(
stack_index = -compiler::target::frame_layout.VariableIndexForFrameSlot(
spill_slot.stack_index());
}
ASSERT(stack_index >= 0);

View file

@ -233,20 +233,20 @@ Location Location::RemapForSlowPath(Definition* def,
intptr_t index = cpu_reg_slots[reg()];
ASSERT(index >= 0);
return Location::StackSlot(
compiler_frame_layout.FrameSlotForVariableIndex(-index));
compiler::target::frame_layout.FrameSlotForVariableIndex(-index));
} else if (IsFpuRegister()) {
intptr_t index = fpu_reg_slots[fpu_reg()];
ASSERT(index >= 0);
switch (def->representation()) {
case kUnboxedDouble:
return Location::DoubleStackSlot(
compiler_frame_layout.FrameSlotForVariableIndex(-index));
compiler::target::frame_layout.FrameSlotForVariableIndex(-index));
case kUnboxedFloat32x4:
case kUnboxedInt32x4:
case kUnboxedFloat64x2:
return Location::QuadStackSlot(
compiler_frame_layout.FrameSlotForVariableIndex(-index));
compiler::target::frame_layout.FrameSlotForVariableIndex(-index));
default:
UNREACHABLE();
@ -258,7 +258,7 @@ Location Location::RemapForSlowPath(Definition* def,
intptr_t index_hi;
if (value_pair->At(0).IsRegister()) {
index_lo = compiler_frame_layout.FrameSlotForVariableIndex(
index_lo = compiler::target::frame_layout.FrameSlotForVariableIndex(
-cpu_reg_slots[value_pair->At(0).reg()]);
} else {
ASSERT(value_pair->At(0).IsStackSlot());
@ -266,7 +266,7 @@ Location Location::RemapForSlowPath(Definition* def,
}
if (value_pair->At(1).IsRegister()) {
index_hi = compiler_frame_layout.FrameSlotForVariableIndex(
index_hi = compiler::target::frame_layout.FrameSlotForVariableIndex(
-cpu_reg_slots[value_pair->At(1).reg()]);
} else {
ASSERT(value_pair->At(1).IsStackSlot());

View file

@ -7,6 +7,7 @@
#include "vm/allocation.h"
#include "vm/bitfield.h"
#include "vm/bitmap.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/log.h"
@ -17,7 +18,6 @@ class ConstantInstr;
class Definition;
class PairLocation;
class Value;
struct FrameLayout;
enum Representation {
kNoRepresentation,

View file

@ -52,6 +52,7 @@ class Function;
class Precompiler;
class SpeculativeInliningPolicy;
class TimelineStream;
class Thread;
struct CompilerPassState {
CompilerPassState(Thread* thread,

View file

@ -29,6 +29,7 @@ compiler_sources = [
"assembler/disassembler_kbc.cc",
"assembler/disassembler_kbc.h",
"assembler/disassembler_x86.cc",
"assembler/object_pool_builder.h",
"backend/block_scheduler.cc",
"backend/block_scheduler.h",
"backend/branch_optimizer.cc",
@ -119,6 +120,8 @@ compiler_sources = [
"method_recognizer.h",
"relocation.cc",
"relocation.h",
"runtime_api.cc",
"runtime_api.h",
]
compiler_sources_tests = [

View file

@ -597,7 +597,7 @@ void BytecodeFlowGraphBuilder::BuildCheckFunctionTypeArgs() {
store_type_args += B->LoadArgDescriptor();
store_type_args += B->LoadNativeField(Slot::ArgumentsDescriptor_count());
store_type_args += B->LoadFpRelativeSlot(
kWordSize * (1 + compiler_frame_layout.param_end_from_fp));
kWordSize * (1 + compiler::target::frame_layout.param_end_from_fp));
store_type_args +=
B->StoreLocalRaw(TokenPosition::kNoSource, type_args_var);
store_type_args += B->Drop();
@ -1364,7 +1364,7 @@ void BytecodeFlowGraphBuilder::BuildCompareIntLe() {
}
static bool IsICDataEntry(const ObjectPool& object_pool, intptr_t index) {
if (object_pool.TypeAt(index) != ObjectPool::kTaggedObject) {
if (object_pool.TypeAt(index) != ObjectPool::EntryType::kTaggedObject) {
return false;
}
RawObject* entry = object_pool.ObjectAt(index);

View file

@ -478,7 +478,8 @@ void BytecodeMetadataHelper::ReadConstantPool(const Function& function,
// InstanceField constant occupies 2 entries.
// The first entry is used for field offset.
obj = Smi::New(field.Offset() / kWordSize);
pool.SetTypeAt(i, ObjectPool::kTaggedObject, ObjectPool::kNotPatchable);
pool.SetTypeAt(i, ObjectPool::EntryType::kTaggedObject,
ObjectPool::Patchability::kNotPatchable);
pool.SetObjectAt(i, obj);
++i;
ASSERT(i < obj_count);
@ -562,8 +563,8 @@ void BytecodeMetadataHelper::ReadConstantPool(const Function& function,
case ConstantPoolTag::kNativeEntry: {
name = ReadString();
obj = NativeEntry(function, name);
pool.SetTypeAt(i, ObjectPool::kNativeEntryData,
ObjectPool::kNotPatchable);
pool.SetTypeAt(i, ObjectPool::EntryType::kNativeEntryData,
ObjectPool::Patchability::kNotPatchable);
pool.SetObjectAt(i, obj);
continue;
}
@ -620,7 +621,8 @@ void BytecodeMetadataHelper::ReadConstantPool(const Function& function,
array ^= pool.ObjectAt(arg_desc_index);
// InterfaceCall constant occupies 2 entries.
// The first entry is used for selector name.
pool.SetTypeAt(i, ObjectPool::kTaggedObject, ObjectPool::kNotPatchable);
pool.SetTypeAt(i, ObjectPool::EntryType::kTaggedObject,
ObjectPool::Patchability::kNotPatchable);
pool.SetObjectAt(i, name);
++i;
ASSERT(i < obj_count);
@ -630,7 +632,8 @@ void BytecodeMetadataHelper::ReadConstantPool(const Function& function,
default:
UNREACHABLE();
}
pool.SetTypeAt(i, ObjectPool::kTaggedObject, ObjectPool::kNotPatchable);
pool.SetTypeAt(i, ObjectPool::EntryType::kTaggedObject,
ObjectPool::Patchability::kNotPatchable);
pool.SetObjectAt(i, obj);
}
}

View file

@ -692,15 +692,15 @@ FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfNoSuchMethodForwarder(
body += LoadLocal(parsed_function()->current_context_var());
body += B->LoadNativeField(
Slot::GetContextVariableSlotFor(thread(), *scopes()->this_variable));
body += B->StoreFpRelativeSlot(kWordSize *
compiler_frame_layout.param_end_from_fp);
body += B->StoreFpRelativeSlot(
kWordSize * compiler::target::frame_layout.param_end_from_fp);
} else {
body += LoadLocal(parsed_function()->current_context_var());
body += B->LoadNativeField(
Slot::GetContextVariableSlotFor(thread(), *scopes()->this_variable));
body += B->StoreFpRelativeSlot(
kWordSize *
(compiler_frame_layout.param_end_from_fp + function.NumParameters()));
kWordSize * (compiler::target::frame_layout.param_end_from_fp +
function.NumParameters()));
}
}
@ -804,8 +804,8 @@ FlowGraph* StreamingFlowGraphBuilder::BuildGraphOfNoSuchMethodForwarder(
loop_body += LoadLocal(argument_count);
loop_body += LoadLocal(index);
loop_body += B->SmiBinaryOp(Token::kSUB, /*truncate=*/true);
loop_body += B->LoadFpRelativeSlot(kWordSize *
compiler_frame_layout.param_end_from_fp);
loop_body += B->LoadFpRelativeSlot(
kWordSize * compiler::target::frame_layout.param_end_from_fp);
loop_body += StoreIndexed(kArrayCid);
// ++i

View file

@ -181,8 +181,8 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
for (; param < num_fixed_params; ++param) {
copy_args_prologue += LoadLocal(optional_count_var);
copy_args_prologue += LoadFpRelativeSlot(
kWordSize *
(compiler_frame_layout.param_end_from_fp + num_fixed_params - param));
kWordSize * (compiler::target::frame_layout.param_end_from_fp +
num_fixed_params - param));
copy_args_prologue +=
StoreLocalRaw(TokenPosition::kNoSource, ParameterVariable(param));
copy_args_prologue += Drop();
@ -201,8 +201,8 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
Fragment good(supplied);
good += LoadLocal(optional_count_var);
good += LoadFpRelativeSlot(
kWordSize *
(compiler_frame_layout.param_end_from_fp + num_fixed_params - param));
kWordSize * (compiler::target::frame_layout.param_end_from_fp +
num_fixed_params - param));
good += StoreLocalRaw(TokenPosition::kNoSource, ParameterVariable(param));
good += Drop();
@ -286,7 +286,7 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
Fragment good(supplied);
{
// fp[compiler_frame_layout.param_end_from_fp + (count_var - pos)]
// fp[target::frame_layout.param_end_from_fp + (count_var - pos)]
good += LoadLocal(count_var);
{
// pos = arg_desc[names_offset + arg_desc_name_index + positionOffset]
@ -299,8 +299,8 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
good += LoadIndexed(/* index_scale = */ kWordSize);
}
good += SmiBinaryOp(Token::kSUB, /* truncate= */ true);
good += LoadFpRelativeSlot(kWordSize *
compiler_frame_layout.param_end_from_fp);
good += LoadFpRelativeSlot(
kWordSize * compiler::target::frame_layout.param_end_from_fp);
// Copy down.
good += StoreLocalRaw(TokenPosition::kNoSource,
@ -407,7 +407,7 @@ Fragment PrologueBuilder::BuildTypeArgumentsHandling(JoinEntryInstr* nsm) {
store_type_args += LoadArgDescriptor();
store_type_args += LoadNativeField(Slot::ArgumentsDescriptor_count());
store_type_args += LoadFpRelativeSlot(
kWordSize * (1 + compiler_frame_layout.param_end_from_fp));
kWordSize * (1 + compiler::target::frame_layout.param_end_from_fp));
store_type_args += StoreLocal(TokenPosition::kNoSource, type_args_var);
store_type_args += Drop();

View file

@ -12,14 +12,19 @@
namespace dart {
// Forward declarations.
namespace compiler {
class Assembler;
class Label;
} // namespace compiler
class FlowGraphCompiler;
class Function;
class TargetEntryInstr;
class ParsedFunction;
class FlowGraph;
using compiler::Assembler;
using compiler::Label;
class Intrinsifier : public AllStatic {
public:
static bool Intrinsify(const ParsedFunction& parsed_function,

View file

@ -109,7 +109,6 @@ void Intrinsifier::GrowableArray_Allocate(Assembler* assembler,
sizeof(Raw##type_name) + kObjectAlignment - 1; \
__ AddImmediate(R2, fixed_size_plus_alignment_padding); \
__ bic(R2, R2, Operand(kObjectAlignment - 1)); \
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew); \
__ ldr(R0, Address(THR, Thread::top_offset())); \
\
/* R2: allocation size. */ \
@ -176,7 +175,7 @@ void Intrinsifier::GrowableArray_Allocate(Assembler* assembler,
__ b(&init_loop, CC); \
__ str(R8, Address(R3, -2 * kWordSize), HI); \
\
NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space)); \
NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2)); \
__ Ret(); \
__ Bind(normal_ir_body);
@ -1943,7 +1942,6 @@ static void TryAllocateOnebyteString(Assembler* assembler,
__ bic(length_reg, length_reg, Operand(kObjectAlignment - 1));
const intptr_t cid = kOneByteStringCid;
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
__ ldr(R0, Address(THR, Thread::top_offset()));
// length_reg: allocation size.
@ -1993,7 +1991,7 @@ static void TryAllocateOnebyteString(Assembler* assembler,
__ LoadImmediate(TMP, 0);
__ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, String::hash_offset()), TMP);
NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2, space));
NOT_IN_PRODUCT(__ IncrementAllocationStatsWithSize(R4, R2));
__ b(ok);
__ Bind(&fail);

View file

@ -127,7 +127,6 @@ static int GetScaleFactor(intptr_t size) {
sizeof(Raw##type_name) + kObjectAlignment - 1; \
__ AddImmediate(R2, fixed_size_plus_alignment_padding); \
__ andi(R2, R2, Immediate(~(kObjectAlignment - 1))); \
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew); \
__ ldr(R0, Address(THR, Thread::top_offset())); \
\
/* R2: allocation size. */ \
@ -146,7 +145,7 @@ static int GetScaleFactor(intptr_t size) {
/* next object start and initialize the object. */ \
__ str(R1, Address(THR, Thread::top_offset())); \
__ AddImmediate(R0, kHeapObjectTag); \
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2, space)); \
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2)); \
/* Initialize the tags. */ \
/* R0: new object start as a tagged pointer. */ \
/* R1: new object end address. */ \
@ -2009,7 +2008,6 @@ static void TryAllocateOnebyteString(Assembler* assembler,
__ andi(length_reg, length_reg, Immediate(~(kObjectAlignment - 1)));
const intptr_t cid = kOneByteStringCid;
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
__ ldr(R0, Address(THR, Thread::top_offset()));
// length_reg: allocation size.
@ -2028,7 +2026,7 @@ static void TryAllocateOnebyteString(Assembler* assembler,
// next object start and initialize the object.
__ str(R1, Address(THR, Thread::top_offset()));
__ AddImmediate(R0, kHeapObjectTag);
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2, space));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2));
// Initialize the tags.
// R0: new object start as a tagged pointer.

View file

@ -113,7 +113,6 @@ void Intrinsifier::GrowableArray_Allocate(Assembler* assembler,
sizeof(Raw##type_name) + kObjectAlignment - 1; \
__ leal(EDI, Address(EDI, scale_factor, fixed_size_plus_alignment_padding)); \
__ andl(EDI, Immediate(-kObjectAlignment)); \
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew); \
__ movl(EAX, Address(THR, Thread::top_offset())); \
__ movl(EBX, EAX); \
\
@ -132,7 +131,7 @@ void Intrinsifier::GrowableArray_Allocate(Assembler* assembler,
/* next object start and initialize the object. */ \
__ movl(Address(THR, Thread::top_offset()), EBX); \
__ addl(EAX, Immediate(kHeapObjectTag)); \
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EDI, ECX, space)); \
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EDI, ECX)); \
\
/* Initialize the tags. */ \
/* EAX: new object start as a tagged pointer. */ \
@ -1936,7 +1935,6 @@ static void TryAllocateOnebyteString(Assembler* assembler,
__ andl(EDI, Immediate(-kObjectAlignment));
const intptr_t cid = kOneByteStringCid;
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
__ movl(EAX, Address(THR, Thread::top_offset()));
__ movl(EBX, EAX);
@ -1956,7 +1954,7 @@ static void TryAllocateOnebyteString(Assembler* assembler,
__ movl(Address(THR, Thread::top_offset()), EBX);
__ addl(EAX, Immediate(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EDI, ECX, space));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, EDI, ECX));
// Initialize the tags.
// EAX: new object start as a tagged pointer.

View file

@ -114,7 +114,6 @@ void Intrinsifier::GrowableArray_Allocate(Assembler* assembler,
sizeof(Raw##type_name) + kObjectAlignment - 1; \
__ leaq(RDI, Address(RDI, scale_factor, fixed_size_plus_alignment_padding)); \
__ andq(RDI, Immediate(-kObjectAlignment)); \
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew); \
__ movq(RAX, Address(THR, Thread::top_offset())); \
__ movq(RCX, RAX); \
\
@ -133,7 +132,7 @@ void Intrinsifier::GrowableArray_Allocate(Assembler* assembler,
/* next object start and initialize the object. */ \
__ movq(Address(THR, Thread::top_offset()), RCX); \
__ addq(RAX, Immediate(kHeapObjectTag)); \
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI, space)); \
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI)); \
/* Initialize the tags. */ \
/* RAX: new object start as a tagged pointer. */ \
/* RCX: new object end address. */ \
@ -1965,7 +1964,6 @@ static void TryAllocateOnebyteString(Assembler* assembler,
__ andq(RDI, Immediate(-kObjectAlignment));
const intptr_t cid = kOneByteStringCid;
NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
__ movq(RAX, Address(THR, Thread::top_offset()));
// RDI: allocation size.
@ -1984,7 +1982,7 @@ static void TryAllocateOnebyteString(Assembler* assembler,
// next object start and initialize the object.
__ movq(Address(THR, Thread::top_offset()), RCX);
__ addq(RAX, Immediate(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI, space));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, RDI));
// Initialize the tags.
// RAX: new object start as a tagged pointer.

View file

@ -125,7 +125,6 @@ static void PrecompilationModeHandler(bool value) {
FLAG_deoptimize_alot = false; // Used in some tests.
FLAG_deoptimize_every = 0; // Used in some tests.
FLAG_load_deferred_eagerly = true;
FLAG_print_stop_message = false;
FLAG_use_osr = false;
#endif
}
@ -667,8 +666,8 @@ RawCode* CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
ASSERT(pass_state.inline_id_to_function.length() ==
pass_state.caller_inline_id.length());
ObjectPoolWrapper object_pool_wrapper;
Assembler assembler(&object_pool_wrapper, use_far_branches);
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder, use_far_branches);
FlowGraphCompiler graph_compiler(
&assembler, flow_graph, *parsed_function(), optimized(),
&speculative_policy, pass_state.inline_id_to_function,

View file

@ -0,0 +1,321 @@
// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/compiler/runtime_api.h"
#if !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/longjump.h"
#include "vm/object.h"
namespace dart {
namespace compiler {
bool IsSameObject(const Object& a, const Object& b) {
return a.raw() == b.raw();
}
bool IsNotTemporaryScopedHandle(const Object& obj) {
return obj.IsNotTemporaryScopedHandle();
}
bool IsInOldSpace(const Object& obj) {
return obj.IsOld();
}
intptr_t ObjectHash(const Object& obj) {
if (obj.IsNull()) {
return 2011;
}
if (obj.IsString() || obj.IsNumber()) {
return Instance::Cast(obj).CanonicalizeHash();
}
if (obj.IsCode()) {
// Instructions don't move during compaction.
return Code::Cast(obj).PayloadStart();
}
if (obj.IsFunction()) {
return Function::Cast(obj).Hash();
}
if (obj.IsField()) {
return dart::String::HashRawSymbol(Field::Cast(obj).name());
}
// Unlikely.
return obj.GetClassId();
}
void SetToNull(Object* obj) {
*obj = Object::null();
}
Object& NewZoneHandle(Zone* zone) {
return Object::ZoneHandle(zone, Object::null());
}
Object& NewZoneHandle(Zone* zone, const Object& obj) {
return Object::ZoneHandle(zone, obj.raw());
}
bool IsOriginalObject(const Object& object) {
if (object.IsICData()) {
return ICData::Cast(object).IsOriginal();
} else if (object.IsField()) {
return Field::Cast(object).IsOriginal();
}
return true;
}
const String& AllocateString(const char* buffer) {
return String::ZoneHandle(String::New(buffer));
}
bool HasIntegerValue(const dart::Object& object, int64_t* value) {
if (object.IsInteger()) {
*value = Integer::Cast(object).AsInt64Value();
return true;
}
return false;
}
int32_t CreateJitCookie() {
return static_cast<int32_t>(Isolate::Current()->random()->NextUInt32());
}
void BailoutWithBranchOffsetError() {
Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error());
}
namespace target {
uint32_t MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size) {
return dart::RawObject::SizeTag::encode(instance_size) |
dart::RawObject::ClassIdTag::encode(cid) |
dart::RawObject::NewBit::encode(true);
}
word Object::tags_offset() {
return dart::Object::tags_offset();
}
const word RawObject::kClassIdTagPos = dart::RawObject::kClassIdTagPos;
const word RawObject::kClassIdTagSize = dart::RawObject::kClassIdTagSize;
const word RawObject::kBarrierOverlapShift =
dart::RawObject::kBarrierOverlapShift;
intptr_t ObjectPool::element_offset(intptr_t index) {
return dart::ObjectPool::element_offset(index);
}
classid_t Class::GetId(const dart::Class& handle) {
return handle.id();
}
uword Class::GetInstanceSize(const dart::Class& handle) {
return handle.instance_size();
}
word Instance::DataOffsetFor(intptr_t cid) {
return dart::Instance::DataOffsetFor(cid);
}
bool Heap::IsAllocatableInNewSpace(intptr_t instance_size) {
return dart::Heap::IsAllocatableInNewSpace(instance_size);
}
word Thread::top_offset() {
return dart::Thread::top_offset();
}
word Thread::end_offset() {
return dart::Thread::end_offset();
}
word Thread::isolate_offset() {
return dart::Thread::isolate_offset();
}
#if !defined(TARGET_ARCH_DBC)
word Thread::call_to_runtime_entry_point_offset() {
return dart::Thread::call_to_runtime_entry_point_offset();
}
word Thread::null_error_shared_with_fpu_regs_entry_point_offset() {
return dart::Thread::null_error_shared_with_fpu_regs_entry_point_offset();
}
word Thread::null_error_shared_without_fpu_regs_entry_point_offset() {
return dart::Thread::null_error_shared_without_fpu_regs_entry_point_offset();
}
word Thread::monomorphic_miss_entry_offset() {
return dart::Thread::monomorphic_miss_entry_offset();
}
word Thread::write_barrier_mask_offset() {
return dart::Thread::write_barrier_mask_offset();
}
word Thread::write_barrier_entry_point_offset() {
return dart::Thread::write_barrier_entry_point_offset();
}
word Thread::array_write_barrier_entry_point_offset() {
return dart::Thread::array_write_barrier_entry_point_offset();
}
#endif // !defined(TARGET_ARCH_DBC)
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
defined(TARGET_ARCH_X64)
word Thread::write_barrier_wrappers_thread_offset(intptr_t regno) {
return dart::Thread::write_barrier_wrappers_thread_offset(
static_cast<Register>(regno));
}
#endif
word Thread::vm_tag_offset() {
return dart::Thread::vm_tag_offset();
}
#define DECLARE_CONSTANT_OFFSET_GETTER(name) \
word Thread::name##_address_offset() { \
return dart::Thread::name##_address_offset(); \
}
THREAD_XMM_CONSTANT_LIST(DECLARE_CONSTANT_OFFSET_GETTER)
#undef DECLARE_CONSTANT_OFFSET_GETTER
word Isolate::class_table_offset() {
return dart::Isolate::class_table_offset();
}
word ClassTable::table_offset() {
return dart::ClassTable::table_offset();
}
word ClassTable::ClassOffsetFor(intptr_t cid) {
return dart::ClassTable::ClassOffsetFor(cid);
}
#if !defined(PRODUCT)
word ClassTable::StateOffsetFor(intptr_t cid) {
return dart::ClassTable::StateOffsetFor(cid);
}
word ClassTable::TableOffsetFor(intptr_t cid) {
return dart::ClassTable::TableOffsetFor(cid);
}
word ClassTable::CounterOffsetFor(intptr_t cid, bool is_new) {
return dart::ClassTable::CounterOffsetFor(cid, is_new);
}
word ClassTable::SizeOffsetFor(intptr_t cid, bool is_new) {
return dart::ClassTable::SizeOffsetFor(cid, is_new);
}
#endif // !defined(PRODUCT)
const word ClassTable::kSizeOfClassPairLog2 = dart::kSizeOfClassPairLog2;
const intptr_t Instructions::kPolymorphicEntryOffset =
dart::Instructions::kPolymorphicEntryOffset;
const intptr_t Instructions::kMonomorphicEntryOffset =
dart::Instructions::kMonomorphicEntryOffset;
intptr_t Instructions::HeaderSize() {
return dart::Instructions::HeaderSize();
}
intptr_t Code::object_pool_offset() {
return dart::Code::object_pool_offset();
}
intptr_t Code::saved_instructions_offset() {
return dart::Code::saved_instructions_offset();
}
intptr_t Code::entry_point_offset(CodeEntryKind kind) {
return dart::Code::entry_point_offset(kind);
}
#if !defined(PRODUCT)
word ClassHeapStats::TraceAllocationMask() {
return dart::ClassHeapStats::TraceAllocationMask();
}
word ClassHeapStats::state_offset() {
return dart::ClassHeapStats::state_offset();
}
word ClassHeapStats::allocated_since_gc_new_space_offset() {
return dart::ClassHeapStats::allocated_since_gc_new_space_offset();
}
word ClassHeapStats::allocated_size_since_gc_new_space_offset() {
return dart::ClassHeapStats::allocated_size_since_gc_new_space_offset();
}
#endif // !defined(PRODUCT)
word Double::value_offset() {
return dart::Double::value_offset();
}
word Float32x4::value_offset() {
return dart::Float32x4::value_offset();
}
word Float64x2::value_offset() {
return dart::Float64x2::value_offset();
}
bool IsSmi(const dart::Object& a) {
return a.IsSmi();
}
word ToRawSmi(const dart::Object& a) {
ASSERT(a.IsSmi());
return reinterpret_cast<word>(a.raw());
}
word ToRawSmi(intptr_t value) {
return dart::Smi::RawValue(value);
}
bool CanLoadFromThread(const dart::Object& object,
word* offset /* = nullptr */) {
if (dart::Thread::CanLoadFromThread(object)) {
if (offset != nullptr) {
*offset = dart::Thread::OffsetFromThread(object);
}
return true;
}
return false;
}
#if defined(TARGET_ARCH_IA32)
uword Code::EntryPointOf(const dart::Code& code) {
static_assert(kHostWordSize == kWordSize,
"Can't embed raw pointers to runtime objects when host and "
"target word sizes are different");
return code.EntryPoint();
}
bool CanEmbedAsRawPointerInGeneratedCode(const dart::Object& obj) {
return obj.IsSmi() || obj.InVMHeap();
}
word ToRawPointer(const dart::Object& a) {
static_assert(kHostWordSize == kWordSize,
"Can't embed raw pointers to runtime objects when host and "
"target word sizes are different");
return reinterpret_cast<word>(a.raw());
}
#endif // defined(TARGET_ARCH_IA32)
} // namespace target
} // namespace compiler
} // namespace dart
#endif // !defined(DART_PRECOMPILED_RUNTIME)

View file

@ -0,0 +1,350 @@
// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_COMPILER_RUNTIME_API_H_
#define RUNTIME_VM_COMPILER_RUNTIME_API_H_
// This header defines the API that compiler can use to interact with the
// underlying Dart runtime that it is embedded into.
//
// Compiler is not allowed to directly interact with any objects - it can only
// use classes like dart::Object, dart::Code, dart::Function and similar as
// opaque handles. All interactions should be done through helper methods
// provided by this header.
//
// This header also provides ways to get word sizes, frame layout, field
// offsets for the target runtime. Note that these can be different from
// those on the host. Helpers providing access to these values live
// in compiler::target namespace.
#include "platform/globals.h"
#include "vm/allocation.h"
#include "vm/bitfield.h"
#include "vm/code_entry_kind.h"
#include "vm/frame_layout.h"
#include "vm/pointer_tagging.h"
namespace dart {
// Forward declarations.
class Class;
class Code;
class Function;
class LocalVariable;
class Object;
class String;
class Zone;
namespace compiler {
class Assembler;
}
namespace compiler {
// Host word sizes.
//
// Code in the compiler namespace should not use kWordSize and derived
// constants directly because the word size on host and target might
// be different.
//
// To prevent this we introduce variables that would shadow these
// constants and introduce compilation errors when used.
//
// target::kWordSize and target::ObjectAlignment give access to
// word size and object aligment offsets for the target.
//
// Similarly kHostWordSize gives access to the host word size.
class InvalidClass {};
extern InvalidClass kWordSize;
extern InvalidClass kWordSizeLog2;
extern InvalidClass kNewObjectAlignmentOffset;
extern InvalidClass kOldObjectAlignmentOffset;
extern InvalidClass kNewObjectBitPosition;
extern InvalidClass kObjectAlignment;
extern InvalidClass kObjectAlignmentLog2;
extern InvalidClass kObjectAlignmentMask;
static constexpr intptr_t kHostWordSize = dart::kWordSize;
static constexpr intptr_t kHostWordSizeLog2 = dart::kWordSizeLog2;
//
// Object handles.
//
// Create an empty handle.
Object& NewZoneHandle(Zone* zone);
// Clone the given handle.
Object& NewZoneHandle(Zone* zone, const Object&);
// Returns true if [a] and [b] are the same object.
bool IsSameObject(const Object& a, const Object& b);
// Returns true if the given handle is a zone handle or one of the global
// cached handles.
bool IsNotTemporaryScopedHandle(const Object& obj);
// Returns true if [obj] resides in old space.
bool IsInOldSpace(const Object& obj);
// Returns true if [obj] is not a Field/ICData clone.
//
// Used to assert that we are not embedding pointers to cloned objects that are
// used by background compiler into object pools / code.
bool IsOriginalObject(const Object& object);
// Clear the given handle.
void SetToNull(Object* obj);
// Helper functions to upcast handles.
//
// Note: compiler code cannot include object.h so it cannot see that Object is
// a superclass of Code or Function - thus we have to cast these pointers using
// reinterpret_cast.
inline const Object& ToObject(const Code& handle) {
return *reinterpret_cast<const Object*>(&handle);
}
inline const Object& ToObject(const Function& handle) {
return *reinterpret_cast<const Object*>(&handle);
}
// Returns some hash value for the given object.
//
// Note: the given hash value does not necessarily match Object.get:hashCode,
// or canonical hash.
intptr_t ObjectHash(const Object& obj);
// If the given object represents a Dart integer returns true and sets [value]
// to the value of the integer.
bool HasIntegerValue(const dart::Object& obj, int64_t* value);
// Creates a random cookie to be used for masking constants embedded in the
// generated code.
int32_t CreateJitCookie();
class RuntimeEntry : public ValueObject {
public:
virtual ~RuntimeEntry() {}
virtual void Call(compiler::Assembler* assembler,
intptr_t argument_count) const = 0;
};
// Allocate a string object with the given content in the runtime heap.
const String& AllocateString(const char* buffer);
DART_NORETURN void BailoutWithBranchOffsetError();
// compiler::target namespace contains information about the target platform:
//
// - word sizes and derived constants
// - offsets of fields
// - sizes of structures
namespace target {
// Currently we define target::word to match dart::word which represents
// host word.
//
// Once refactoring of the compiler is complete we will switch target::word
// to be independent from host word.
typedef dart::word word;
typedef dart::uword uword;
static constexpr word kWordSize = dart::kWordSize;
static constexpr word kWordSizeLog2 = dart::kWordSizeLog2;
static_assert((1 << kWordSizeLog2) == kWordSize,
"kWordSizeLog2 should match kWordSize");
using ObjectAlignment = dart::ObjectAlignment<kWordSize, kWordSizeLog2>;
// Information about frame_layout that compiler should be targeting.
extern FrameLayout frame_layout;
// Returns the FP-relative index where [variable] can be found (assumes
// [variable] is not captured), in bytes.
inline int FrameOffsetInBytesForVariable(const LocalVariable* variable) {
return frame_layout.FrameSlotForVariable(variable) * kWordSize;
}
// Encode tag word for a heap allocated object with the given class id and
// size.
//
// Note: even on 64-bit platforms we only use lower 32-bits of the tag word.
uint32_t MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size);
//
// Target specific information about objects.
//
// Returns true if the given object can be represented as a Smi on the
// target platform.
bool IsSmi(const dart::Object& a);
// Return raw Smi representation of the given object for the target platform.
word ToRawSmi(const dart::Object& a);
// Return raw Smi representation of the given integer value for the target
// platform.
//
// Note: method assumes that caller has validated that value is representable
// as a Smi.
word ToRawSmi(intptr_t value);
// If the given object can be loaded from the thread on the target then
// return true and set offset (if provided) to the offset from the
// thread pointer to a field that contains the object.
bool CanLoadFromThread(const dart::Object& object, word* offset = nullptr);
// On IA32 we can embed raw pointers into generated code.
#if defined(TARGET_ARCH_IA32)
// Returns true if the pointer to the given object can be directly embedded
// into the generated code (because the object is immortal and immovable).
bool CanEmbedAsRawPointerInGeneratedCode(const dart::Object& obj);
// Returns raw pointer value for the given object. Should only be invoked
// if CanEmbedAsRawPointerInGeneratedCode returns true.
word ToRawPointer(const dart::Object& a);
#endif // defined(TARGET_ARCH_IA32)
//
// Target specific offsets and constants.
//
// Currently we use the same names for classes, constants and getters to make
// migration easier.
class RawObject : public AllStatic {
public:
static const word kClassIdTagPos;
static const word kClassIdTagSize;
static const word kBarrierOverlapShift;
};
class Object : public AllStatic {
public:
// Offset of the tags word.
static word tags_offset();
};
class ObjectPool : public AllStatic {
public:
// Return offset to the element with the given [index] in the object pool.
static intptr_t element_offset(intptr_t index);
};
class Class : public AllStatic {
public:
// Return class id of the given class on the target.
static classid_t GetId(const dart::Class& handle);
// Return instance size for the given class on the target.
static uword GetInstanceSize(const dart::Class& handle);
};
class Instance : public AllStatic {
public:
static word DataOffsetFor(intptr_t cid);
};
class Double : public AllStatic {
public:
static word value_offset();
};
class Float32x4 : public AllStatic {
public:
static word value_offset();
};
class Float64x2 : public AllStatic {
public:
static word value_offset();
};
class Thread : public AllStatic {
public:
static word top_offset();
static word end_offset();
static word isolate_offset();
static word call_to_runtime_entry_point_offset();
static word null_error_shared_with_fpu_regs_entry_point_offset();
static word null_error_shared_without_fpu_regs_entry_point_offset();
static word write_barrier_mask_offset();
static word monomorphic_miss_entry_offset();
static word write_barrier_wrappers_thread_offset(intptr_t regno);
static word array_write_barrier_entry_point_offset();
static word write_barrier_entry_point_offset();
static word vm_tag_offset();
#define THREAD_XMM_CONSTANT_LIST(V) \
V(float_not) \
V(float_negate) \
V(float_absolute) \
V(float_zerow) \
V(double_negate) \
V(double_abs)
#define DECLARE_CONSTANT_OFFSET_GETTER(name) \
static word name##_address_offset();
THREAD_XMM_CONSTANT_LIST(DECLARE_CONSTANT_OFFSET_GETTER)
#undef DECLARE_CONSTANT_OFFSET_GETTER
};
class Isolate : public AllStatic {
public:
static word class_table_offset();
};
class ClassTable : public AllStatic {
public:
static word table_offset();
static word ClassOffsetFor(intptr_t cid);
#if !defined(PRODUCT)
static word StateOffsetFor(intptr_t cid);
static word TableOffsetFor(intptr_t cid);
static word CounterOffsetFor(intptr_t cid, bool is_new);
static word SizeOffsetFor(intptr_t cid, bool is_new);
#endif // !defined(PRODUCT)
static const word kSizeOfClassPairLog2;
};
#if !defined(PRODUCT)
class ClassHeapStats : public AllStatic {
public:
static word TraceAllocationMask();
static word state_offset();
static word allocated_since_gc_new_space_offset();
static word allocated_size_since_gc_new_space_offset();
};
#endif // !defined(PRODUCT)
class Instructions : public AllStatic {
public:
static const intptr_t kPolymorphicEntryOffset;
static const intptr_t kMonomorphicEntryOffset;
static intptr_t HeaderSize();
};
class Code : public AllStatic {
public:
#if defined(TARGET_ARCH_IA32)
static uword EntryPointOf(const dart::Code& code);
#endif // defined(TARGET_ARCH_IA32)
static intptr_t object_pool_offset();
static intptr_t entry_point_offset(
CodeEntryKind kind = CodeEntryKind::kNormal);
static intptr_t saved_instructions_offset();
};
class Heap : public AllStatic {
public:
// Return true if an object with the given instance size is allocatable
// in new space on the target.
static bool IsAllocatableInNewSpace(intptr_t instance_size);
};
} // namespace target
} // namespace compiler
} // namespace dart
#endif // RUNTIME_VM_COMPILER_RUNTIME_API_H_

View file

@ -496,7 +496,6 @@ class Instr {
((AL << kConditionShift) | (0x32 << 20) | (0xf << 12));
static const int32_t kBreakPointCode = 0xdeb0; // For breakpoint.
static const int32_t kStopMessageCode = 0xdeb1; // For Stop(message).
static const int32_t kSimulatorBreakCode = 0xdeb2; // For breakpoint in sim.
static const int32_t kSimulatorRedirectCode = 0xca11; // For redirection.

View file

@ -882,7 +882,6 @@ class Instr {
// Reserved brk and hlt instruction codes.
static const int32_t kBreakPointCode = 0xdeb0; // For breakpoint.
static const int32_t kStopMessageCode = 0xdeb1; // For Stop(message).
static const int32_t kSimulatorBreakCode = 0xdeb2; // For breakpoint in sim.
static const int32_t kSimulatorRedirectCode = 0xca11; // For redirection.

View file

@ -95,8 +95,8 @@ static void CheckOffsets() {
#if defined(TARGET_ARCH_ARM)
// These offsets are embedded in precompiled instructions. We need simarm
// (compiler) and arm (runtime) to agree.
CHECK_OFFSET(Thread::stack_limit_offset(), 28);
CHECK_OFFSET(Thread::object_null_offset(), 88);
CHECK_OFFSET(Thread::stack_limit_offset(), 36);
CHECK_OFFSET(Thread::object_null_offset(), 96);
CHECK_OFFSET(SingleTargetCache::upper_limit_offset(), 14);
CHECK_OFFSET(Isolate::object_store_offset(), 20);
NOT_IN_PRODUCT(CHECK_OFFSET(sizeof(ClassHeapStats), 168));
@ -104,8 +104,8 @@ static void CheckOffsets() {
#if defined(TARGET_ARCH_ARM64)
// These offsets are embedded in precompiled instructions. We need simarm64
// (compiler) and arm64 (runtime) to agree.
CHECK_OFFSET(Thread::stack_limit_offset(), 56);
CHECK_OFFSET(Thread::object_null_offset(), 168);
CHECK_OFFSET(Thread::stack_limit_offset(), 72);
CHECK_OFFSET(Thread::object_null_offset(), 184);
CHECK_OFFSET(SingleTargetCache::upper_limit_offset(), 26);
CHECK_OFFSET(Isolate::object_store_offset(), 40);
NOT_IN_PRODUCT(CHECK_OFFSET(sizeof(ClassHeapStats), 288));

View file

@ -1057,12 +1057,12 @@ DeoptInfoBuilder::DeoptInfoBuilder(Zone* zone,
materializations_() {}
intptr_t DeoptInfoBuilder::FindOrAddObjectInTable(const Object& obj) const {
return assembler_->object_pool_wrapper().FindObject(obj);
return assembler_->object_pool_builder().FindObject(obj);
}
intptr_t DeoptInfoBuilder::CalculateStackIndex(
const Location& source_loc) const {
intptr_t index = -compiler_frame_layout.VariableIndexForFrameSlot(
intptr_t index = -compiler::target::frame_layout.VariableIndexForFrameSlot(
source_loc.stack_index());
return index < 0 ? index + num_args_
: index + num_args_ + kDartFrameFixedSize;

View file

@ -147,7 +147,6 @@ constexpr bool kDartPrecompiledRuntime = false;
"Print live ranges after allocation.") \
R(print_stacktrace_at_api_error, false, bool, false, \
"Attempt to print a native stack trace when an API error is created.") \
C(print_stop_message, false, false, bool, false, "Print stop message.") \
D(print_variable_descriptors, bool, false, \
"Print variable descriptors in disassembly.") \
R(profiler, false, bool, false, "Enable the profiler.") \

69
runtime/vm/frame_layout.h Normal file
View file

@ -0,0 +1,69 @@
// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_FRAME_LAYOUT_H_
#define RUNTIME_VM_FRAME_LAYOUT_H_
// FrameLayout structure captures configuration specific properties of the
// frame layout used by the runtime system and compiler.
//
// Runtime system uses runtime_frame_layout defined in stack_frame.h.
// Compiler uses compiler::target::frame_layout defined in runtime_api.h
namespace dart {
// Forward declarations.
class LocalVariable;
struct FrameLayout {
// The offset (in words) from FP to the first object.
int first_object_from_fp;
// The offset (in words) from FP to the last fixed object.
int last_fixed_object_from_fp;
// The offset (in words) from FP to the first local.
int param_end_from_fp;
// The offset (in words) from FP to the first local.
int first_local_from_fp;
// The fixed size of the frame.
int dart_fixed_frame_size;
// The offset (in words) from FP to the saved pool (if applicable).
int saved_caller_pp_from_fp;
// The offset (in words) from FP to the code object (if applicable).
int code_from_fp;
// The number of fixed slots below the saved PC.
int saved_below_pc() const { return -first_local_from_fp; }
// Returns the FP-relative index where [variable] can be found (assumes
// [variable] is not captured), in words.
int FrameSlotForVariable(const LocalVariable* variable) const;
// Returns the FP-relative index where [variable_index] can be found (assumes
// [variable_index] comes from a [LocalVariable::index()], which is not
// captured).
int FrameSlotForVariableIndex(int index) const;
// Returns the variable index from a FP-relative index.
intptr_t VariableIndexForFrameSlot(intptr_t frame_slot) const {
if (frame_slot <= first_local_from_fp) {
return frame_slot - first_local_from_fp;
} else {
ASSERT(frame_slot > param_end_from_fp);
return frame_slot - param_end_from_fp;
}
}
// Called to initialize the stack frame layout during startup.
static void Init();
};
} // namespace dart
#endif // RUNTIME_VM_FRAME_LAYOUT_H_

View file

@ -0,0 +1,33 @@
// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_HANDLE_VISITOR_H_
#define RUNTIME_VM_HANDLE_VISITOR_H_
#include "vm/allocation.h"
#include "vm/flags.h"
#include "vm/os.h"
namespace dart {
class Thread;
class HandleVisitor {
public:
explicit HandleVisitor(Thread* thread) : thread_(thread) {}
virtual ~HandleVisitor() {}
Thread* thread() const { return thread_; }
virtual void VisitHandle(uword addr) = 0;
private:
Thread* thread_;
DISALLOW_IMPLICIT_CONSTRUCTORS(HandleVisitor);
};
} // namespace dart
#endif // RUNTIME_VM_HANDLE_VISITOR_H_

View file

@ -84,7 +84,7 @@ void HandleScope::Initialize() {
#endif
}
HandleScope::HandleScope(Thread* thread) : ThreadStackResource(thread) {
HandleScope::HandleScope(ThreadState* thread) : StackResource(thread) {
Initialize();
}

View file

@ -8,7 +8,6 @@
#include "vm/allocation.h"
#include "vm/flags.h"
#include "vm/os.h"
#include "vm/thread_stack_resource.h"
namespace dart {
@ -49,25 +48,10 @@ namespace dart {
// Forward declarations.
class ObjectPointerVisitor;
class Thread;
class HandleVisitor;
DECLARE_FLAG(bool, verify_handles);
class HandleVisitor {
public:
explicit HandleVisitor(Thread* thread) : thread_(thread) {}
virtual ~HandleVisitor() {}
Thread* thread() const { return thread_; }
virtual void VisitHandle(uword addr) = 0;
private:
Thread* thread_;
DISALLOW_IMPLICIT_CONSTRUCTORS(HandleVisitor);
};
template <int kHandleSizeInWords, int kHandlesPerChunk, int kOffsetOfRawPtr>
class Handles {
public:
@ -108,7 +92,6 @@ class Handles {
// Returns true if specified handle is a zone handle.
static bool IsZoneHandle(uword handle);
protected:
// Allocates space for a scoped handle.
uword AllocateScopedHandle() {
if (scoped_blocks_->IsFull()) {
@ -117,6 +100,7 @@ class Handles {
return scoped_blocks_->AllocateHandle();
}
protected:
// Returns a count of active handles (used for testing purposes).
int CountScopedHandles() const;
int CountZoneHandles() const;
@ -224,7 +208,7 @@ class Handles {
friend class HandleScope;
friend class Dart;
friend class ObjectStore;
friend class Thread;
friend class ThreadState;
DISALLOW_ALLOCATION();
DISALLOW_COPY_AND_ASSIGN(Handles);
};
@ -279,9 +263,9 @@ class VMHandles : public Handles<kVMHandleSizeInWords,
// code that creates some scoped handles.
// ....
// }
class HandleScope : public ThreadStackResource {
class HandleScope : public StackResource {
public:
explicit HandleScope(Thread* thread);
explicit HandleScope(ThreadState* thread);
~HandleScope();
private:

View file

@ -270,7 +270,7 @@ void FreeList::PrintSmall() const {
OS::PrintErr(
"small %3d [%8d bytes] : "
"%8" Pd " objs; %8.1f KB; %8.1f cum KB\n",
i, i * kObjectAlignment, list_length,
i, static_cast<int>(i * kObjectAlignment), list_length,
list_bytes / static_cast<double>(KB),
small_bytes / static_cast<double>(KB));
}

View file

@ -5,6 +5,10 @@
#ifndef RUNTIME_VM_HEAP_HEAP_H_
#define RUNTIME_VM_HEAP_HEAP_H_
#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
#error "Should not include runtime"
#endif
#include "platform/assert.h"
#include "vm/allocation.h"
#include "vm/flags.h"

View file

@ -7,6 +7,7 @@
#include "vm/flags.h"
#include "vm/globals.h"
#include "vm/handle_visitor.h"
#include "vm/handles.h"
#include "vm/thread.h"
#include "vm/visitor.h"

View file

@ -232,7 +232,7 @@ bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
intptr_t index = ObjectPool::IndexFromOffset(offset);
const ObjectPool& pool = ObjectPool::Handle(code.object_pool());
if (!pool.IsNull()) {
if (pool.TypeAt(index) == ObjectPool::kTaggedObject) {
if (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
*obj = pool.ObjectAt(index);
return true;
}
@ -331,7 +331,8 @@ RawCode* BareSwitchableCallPattern::target() const {
}
void BareSwitchableCallPattern::SetTarget(const Code& target) const {
ASSERT(object_pool_.TypeAt(target_pool_index_) == ObjectPool::kImmediate);
ASSERT(object_pool_.TypeAt(target_pool_index_) ==
ObjectPool::EntryType::kImmediate);
object_pool_.SetRawValueAt(target_pool_index_,
target.MonomorphicEntryPoint());
}

View file

@ -10,12 +10,21 @@
#error Do not include instructions_arm.h directly; use instructions.h instead.
#endif
#include "vm/allocation.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/constants_arm.h"
#include "vm/native_entry.h"
#include "vm/object.h"
#include "vm/native_function.h"
namespace dart {
class ICData;
class Code;
class Object;
class ObjectPool;
class RawCode;
class RawICData;
class RawObject;
class InstructionPattern : public AllStatic {
public:
// Decodes a load sequence ending at 'end' (the last instruction of the

View file

@ -12,6 +12,7 @@
#include "vm/constants_arm64.h"
#include "vm/cpu.h"
#include "vm/object.h"
#include "vm/reverse_pc_lookup_cache.h"
namespace dart {
@ -318,7 +319,7 @@ bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
intptr_t index = ObjectPool::IndexFromOffset(offset - kHeapObjectTag);
const ObjectPool& pool = ObjectPool::Handle(code.object_pool());
if (!pool.IsNull()) {
if (pool.TypeAt(index) == ObjectPool::kTaggedObject) {
if (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
*obj = pool.ObjectAt(index);
return true;
}
@ -443,7 +444,8 @@ RawCode* BareSwitchableCallPattern::target() const {
}
void BareSwitchableCallPattern::SetTarget(const Code& target) const {
ASSERT(object_pool_.TypeAt(target_pool_index_) == ObjectPool::kImmediate);
ASSERT(object_pool_.TypeAt(target_pool_index_) ==
ObjectPool::EntryType::kImmediate);
object_pool_.SetRawValueAt(target_pool_index_,
target.MonomorphicEntryPoint());
}

View file

@ -10,13 +10,19 @@
#error Do not include instructions_arm64.h directly; use instructions.h instead.
#endif
#include "vm/allocation.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/constants_arm64.h"
#include "vm/native_entry.h"
#include "vm/object.h"
#include "vm/reverse_pc_lookup_cache.h"
#include "vm/native_function.h"
namespace dart {
class Code;
class ObjectPool;
class ICData;
class RawICData;
class RawCode;
class InstructionPattern : public AllStatic {
public:
// Decodes a load sequence ending at 'end' (the last instruction of the

View file

@ -43,7 +43,7 @@ static bool GetLoadedObjectAt(uword pc,
Instr instr = SimulatorBytecode::At(pc);
if (HasLoadFromPool(instr)) {
uint16_t index = SimulatorBytecode::DecodeD(instr);
if (object_pool.TypeAt(index) == ObjectPool::kTaggedObject) {
if (object_pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
*obj = object_pool.ObjectAt(index);
return true;
}

View file

@ -12,15 +12,9 @@
#include "vm/allocation.h"
#include "vm/cpu.h"
#include "vm/object.h"
namespace dart {
// Forward declarations.
class RawClass;
class Immediate;
class RawObject;
// Template class for all instruction pattern classes.
// P has to specify a static pattern and a pattern length method.
template <class P>

View file

@ -38,7 +38,7 @@ bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
intptr_t index = IndexFromPPLoadDisp32(pc + 3);
const ObjectPool& pool = ObjectPool::Handle(code.object_pool());
if (!pool.IsNull()) {
if (pool.TypeAt(index) == ObjectPool::kTaggedObject) {
if (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
*obj = pool.ObjectAt(index);
return true;
}
@ -48,7 +48,7 @@ bool DecodeLoadObjectFromPoolOrThread(uword pc, const Code& code, Object* obj) {
intptr_t index = IndexFromPPLoadDisp8(pc + 3);
const ObjectPool& pool = ObjectPool::Handle(code.object_pool());
if (!pool.IsNull()) {
if (pool.TypeAt(index) == ObjectPool::kTaggedObject) {
if (pool.TypeAt(index) == ObjectPool::EntryType::kTaggedObject) {
*obj = pool.ObjectAt(index);
return true;
}

View file

@ -7,19 +7,13 @@
#define RUNTIME_VM_INSTRUCTIONS_X64_H_
#ifndef RUNTIME_VM_INSTRUCTIONS_H_
#error Do not include instructions_ia32.h directly; use instructions.h instead.
#error "Do not include instructions_x64.h directly; use instructions.h instead."
#endif
#include "vm/allocation.h"
#include "vm/object.h"
namespace dart {
// Forward declarations.
class RawClass;
class Immediate;
class RawObject;
intptr_t IndexFromPPLoadDisp8(uword start);
intptr_t IndexFromPPLoadDisp32(uword start);

View file

@ -89,10 +89,8 @@ static void DeterministicModeHandler(bool value) {
FLAG_random_seed = 0x44617274; // "Dart"
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
FLAG_load_deferred_eagerly = true;
FLAG_print_stop_message = false; // Embedds addresses in instructions.
#else
COMPILE_ASSERT(FLAG_load_deferred_eagerly);
COMPILE_ASSERT(!FLAG_print_stop_message);
#endif
}
}

View file

@ -5,6 +5,10 @@
#ifndef RUNTIME_VM_ISOLATE_H_
#define RUNTIME_VM_ISOLATE_H_
#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
#error "Should not include runtime"
#endif
#include "include/dart_api.h"
#include "platform/assert.h"
#include "platform/atomic.h"

View file

@ -11,8 +11,8 @@
namespace dart {
class Isolate;
class LogBlock;
class Thread;
#if defined(_MSC_VER)
#define THR_Print(format, ...) Log::Current()->Print(format, __VA_ARGS__)

View file

@ -52,13 +52,13 @@ RawFunction* MegamorphicCacheTable::miss_handler(Isolate* isolate) {
void MegamorphicCacheTable::InitMissHandler(Isolate* isolate) {
// The miss handler for a class ID not found in the table is invoked as a
// normal Dart function.
ObjectPoolWrapper object_pool_wrapper;
ObjectPoolBuilder object_pool_builder;
const Code& code = Code::Handle(
StubCode::Generate("_stub_MegamorphicMiss", &object_pool_wrapper,
StubCode::Generate("_stub_MegamorphicMiss", &object_pool_builder,
StubCode::GenerateMegamorphicMissStub));
const auto& object_pool =
ObjectPool::Handle(object_pool_wrapper.MakeObjectPool());
ObjectPool::Handle(ObjectPool::NewFromBuilder(object_pool_builder));
code.set_object_pool(object_pool.raw());
// When FLAG_lazy_dispatchers=false, this stub can be on the stack during
@ -88,7 +88,7 @@ void MegamorphicCacheTable::InitMissHandler(Isolate* isolate) {
}
void MegamorphicCacheTable::ReInitMissHandlerCode(Isolate* isolate,
ObjectPoolWrapper* wrapper) {
ObjectPoolBuilder* wrapper) {
ASSERT(FLAG_precompiled_mode && FLAG_use_bare_instructions);
const Code& code = Code::Handle(StubCode::Generate(

View file

@ -9,11 +9,14 @@
namespace dart {
namespace compiler {
class ObjectPoolBuilder;
}
class Array;
class Function;
class Isolate;
class ObjectPointerVisitor;
class ObjectPoolWrapper;
class RawArray;
class RawFunction;
class RawCode;
@ -33,7 +36,7 @@ class MegamorphicCacheTable : public AllStatic {
// re-generate the handler to ensure it uses the common object pool.
NOT_IN_PRECOMPILED(
static void ReInitMissHandlerCode(Isolate* isolate,
ObjectPoolWrapper* wrapper));
compiler::ObjectPoolBuilder* wrapper));
static RawMegamorphicCache* Lookup(Isolate* isolate,
const String& name,

View file

@ -8,14 +8,13 @@
#include "platform/memory_sanitizer.h"
#include "vm/allocation.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/exceptions.h"
#include "vm/heap/verifier.h"
#include "vm/log.h"
#include "vm/native_arguments.h"
#include "vm/native_function.h"
#include "vm/runtime_entry.h"
#include "include/dart_api.h"
namespace dart {
@ -23,22 +22,6 @@ namespace dart {
class Class;
class String;
// We have three variants of native functions:
// - bootstrap natives, which are called directly from stub code. The callee is
// responsible for safepoint transitions and setting up handle scopes as
// needed. Only VM-defined natives are bootstrap natives; they cannot be
// defined by embedders or native extensions.
// - no scope natives, which are called through a wrapper function. The wrapper
// function handles the safepoint transition. The callee is responsible for
// setting up API scopes as needed.
// - auto scope natives, which are called through a wrapper function. The
// wrapper function handles the safepoint transition and sets up an API
// scope.
typedef void (*NativeFunction)(NativeArguments* arguments);
typedef void (*NativeFunctionWrapper)(Dart_NativeArguments args,
Dart_NativeFunction func);
#ifdef DEBUG
#define TRACE_NATIVE_CALL(format, name) \
if (FLAG_trace_natives) { \

View file

@ -0,0 +1,35 @@
// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_NATIVE_FUNCTION_H_
#define RUNTIME_VM_NATIVE_FUNCTION_H_
#include "vm/allocation.h"
#include "include/dart_api.h"
namespace dart {
// Forward declarations.
class NativeArguments;
// We have three variants of native functions:
// - bootstrap natives, which are called directly from stub code. The callee is
// responsible for safepoint transitions and setting up handle scopes as
// needed. Only VM-defined natives are bootstrap natives; they cannot be
// defined by embedders or native extensions.
// - no scope natives, which are called through a wrapper function. The wrapper
// function handles the safepoint transition. The callee is responsible for
// setting up API scopes as needed.
// - auto scope natives, which are called through a wrapper function. The
// wrapper function handles the safepoint transition and sets up an API
// scope.
typedef void (*NativeFunction)(NativeArguments* arguments);
typedef void (*NativeFunctionWrapper)(Dart_NativeArguments args,
Dart_NativeFunction func);
} // namespace dart
#endif // RUNTIME_VM_NATIVE_FUNCTION_H_

View file

@ -12142,13 +12142,65 @@ RawObjectPool* ObjectPool::New(intptr_t len) {
result ^= raw;
result.SetLength(len);
for (intptr_t i = 0; i < len; i++) {
result.SetTypeAt(i, ObjectPool::kImmediate, ObjectPool::kPatchable);
result.SetTypeAt(i, ObjectPool::EntryType::kImmediate,
ObjectPool::Patchability::kPatchable);
}
}
return result.raw();
}
#if !defined(DART_PRECOMPILED_RUNTIME)
RawObjectPool* ObjectPool::NewFromBuilder(
const compiler::ObjectPoolBuilder& builder) {
const intptr_t len = builder.CurrentLength();
if (len == 0) {
return Object::empty_object_pool().raw();
}
const ObjectPool& result = ObjectPool::Handle(ObjectPool::New(len));
for (intptr_t i = 0; i < len; i++) {
auto entry = builder.EntryAt(i);
auto type = entry.type();
auto patchable = entry.patchable();
result.SetTypeAt(i, type, patchable);
if (type == EntryType::kTaggedObject) {
result.SetObjectAt(i, *entry.obj_);
} else {
result.SetRawValueAt(i, entry.raw_value_);
}
}
return result.raw();
}
void ObjectPool::CopyInto(compiler::ObjectPoolBuilder* builder) const {
ASSERT(builder->CurrentLength());
for (intptr_t i = 0; i < Length(); i++) {
auto type = TypeAt(i);
auto patchable = PatchableAt(i);
switch (type) {
case compiler::ObjectPoolBuilderEntry::kTaggedObject: {
compiler::ObjectPoolBuilderEntry entry(&Object::ZoneHandle(ObjectAt(i)),
patchable);
builder->AddObject(entry);
break;
}
case compiler::ObjectPoolBuilderEntry::kImmediate:
case compiler::ObjectPoolBuilderEntry::kNativeFunction:
case compiler::ObjectPoolBuilderEntry::kNativeFunctionWrapper: {
compiler::ObjectPoolBuilderEntry entry(RawValueAt(i), type, patchable);
builder->AddObject(entry);
break;
}
default:
UNREACHABLE();
}
}
ASSERT(builder->CurrentLength() == Length());
}
#endif
const char* ObjectPool::ToCString() const {
Zone* zone = Thread::Current()->zone();
return zone->PrintToString("ObjectPool len:%" Pd, Length());
@ -12159,13 +12211,14 @@ void ObjectPool::DebugPrint() const {
for (intptr_t i = 0; i < Length(); i++) {
intptr_t offset = OffsetFromIndex(i);
THR_Print(" %" Pd " PP+0x%" Px ": ", i, offset);
if ((TypeAt(i) == kTaggedObject) || (TypeAt(i) == kNativeEntryData)) {
if ((TypeAt(i) == EntryType::kTaggedObject) ||
(TypeAt(i) == EntryType::kNativeEntryData)) {
RawObject* obj = ObjectAt(i);
THR_Print("0x%" Px " %s (obj)\n", reinterpret_cast<uword>(obj),
Object::Handle(obj).ToCString());
} else if (TypeAt(i) == kNativeFunction) {
} else if (TypeAt(i) == EntryType::kNativeFunction) {
THR_Print("0x%" Px " (native function)\n", RawValueAt(i));
} else if (TypeAt(i) == kNativeFunctionWrapper) {
} else if (TypeAt(i) == EntryType::kNativeFunctionWrapper) {
THR_Print("0x%" Px " (native function wrapper)\n", RawValueAt(i));
} else {
THR_Print("0x%" Px " (raw)\n", RawValueAt(i));
@ -14226,6 +14279,19 @@ class CodeCommentsWrapper final : public CodeComments {
const Code::Comments& comments_;
String& string_;
};
static const Code::Comments& CreateCommentsFrom(
compiler::Assembler* assembler) {
const auto& comments = assembler->comments();
Code::Comments& result = Code::Comments::New(comments.length());
for (intptr_t i = 0; i < comments.length(); i++) {
result.SetPCOffsetAt(i, comments[i]->pc_offset());
result.SetCommentAt(i, comments[i]->comment());
}
return result;
}
#endif
RawCode* Code::FinalizeCode(const char* name,
@ -14242,7 +14308,10 @@ RawCode* Code::FinalizeCode(const char* name,
ASSERT(assembler != NULL);
const auto object_pool =
pool_attachment == PoolAttachment::kAttachPool
? &ObjectPool::Handle(assembler->MakeObjectPool())
? &ObjectPool::Handle(assembler->HasObjectPoolBuilder()
? ObjectPool::NewFromBuilder(
assembler->object_pool_builder())
: ObjectPool::empty_object_pool().raw())
: nullptr;
// Allocate the Code and Instructions objects. Code is allocated first
@ -14251,7 +14320,7 @@ RawCode* Code::FinalizeCode(const char* name,
intptr_t pointer_offset_count = assembler->CountPointerOffsets();
Code& code = Code::ZoneHandle(Code::New(pointer_offset_count));
#ifdef TARGET_ARCH_IA32
assembler->set_code_object(code);
assembler->GetSelfHandle() = code.raw();
#endif
Instructions& instrs = Instructions::ZoneHandle(Instructions::New(
assembler->CodeSize(), assembler->has_single_entry_point(),
@ -14314,7 +14383,7 @@ RawCode* Code::FinalizeCode(const char* name,
#endif
#ifndef PRODUCT
const Code::Comments& comments = assembler->GetCodeComments();
const Code::Comments& comments = CreateCommentsFrom(assembler);
code.set_compile_timestamp(OS::GetCurrentMonotonicMicros());
CodeCommentsWrapper comments_wrapper(comments);

View file

@ -5,12 +5,19 @@
#ifndef RUNTIME_VM_OBJECT_H_
#define RUNTIME_VM_OBJECT_H_
#if defined(SHOULD_NOT_INCLUDE_RUNTIME)
#error "Should not include runtime"
#endif
#include <tuple>
#include "include/dart_api.h"
#include "platform/assert.h"
#include "platform/utils.h"
#include "vm/bitmap.h"
#include "vm/code_entry_kind.h"
#include "vm/compiler/assembler/object_pool_builder.h"
#include "vm/compiler/method_recognizer.h"
#include "vm/compiler/runtime_api.h"
#include "vm/dart.h"
#include "vm/flags.h"
#include "vm/globals.h"
@ -29,6 +36,10 @@
namespace dart {
// Forward declarations.
namespace compiler {
class Assembler;
}
namespace kernel {
class Program;
class TreeNode;
@ -39,7 +50,6 @@ CLASS_LIST(DEFINE_FORWARD_DECLARATION)
#undef DEFINE_FORWARD_DECLARATION
class Api;
class ArgumentsDescriptor;
class Assembler;
class Closure;
class Code;
class DeoptInstr;
@ -4052,26 +4062,15 @@ class KernelProgramInfo : public Object {
// with it which is stored in-inline after all the entries.
class ObjectPool : public Object {
public:
enum EntryType {
kTaggedObject,
kImmediate,
kNativeFunction,
kNativeFunctionWrapper,
kNativeEntryData,
};
enum Patchability {
kPatchable,
kNotPatchable,
};
class TypeBits : public BitField<uint8_t, EntryType, 0, 7> {};
class PatchableBit
: public BitField<uint8_t, Patchability, TypeBits::kNextBit, 1> {};
using EntryType = compiler::ObjectPoolBuilderEntry::EntryType;
using Patchability = compiler::ObjectPoolBuilderEntry::Patchability;
using TypeBits = compiler::ObjectPoolBuilderEntry::TypeBits;
using PatchableBit = compiler::ObjectPoolBuilderEntry::PatchableBit;
struct Entry {
Entry() : raw_value_(), type_() {}
explicit Entry(const Object* obj) : obj_(obj), type_(kTaggedObject) {}
explicit Entry(const Object* obj)
: obj_(obj), type_(EntryType::kTaggedObject) {}
Entry(uword value, EntryType info) : raw_value_(value), type_(info) {}
union {
const Object* obj_;
@ -4109,23 +4108,23 @@ class ObjectPool : public Object {
}
RawObject* ObjectAt(intptr_t index) const {
ASSERT((TypeAt(index) == kTaggedObject) ||
(TypeAt(index) == kNativeEntryData));
ASSERT((TypeAt(index) == EntryType::kTaggedObject) ||
(TypeAt(index) == EntryType::kNativeEntryData));
return EntryAddr(index)->raw_obj_;
}
void SetObjectAt(intptr_t index, const Object& obj) const {
ASSERT((TypeAt(index) == kTaggedObject) ||
(TypeAt(index) == kNativeEntryData) ||
(TypeAt(index) == kImmediate && obj.IsSmi()));
ASSERT((TypeAt(index) == EntryType::kTaggedObject) ||
(TypeAt(index) == EntryType::kNativeEntryData) ||
(TypeAt(index) == EntryType::kImmediate && obj.IsSmi()));
StorePointer(&EntryAddr(index)->raw_obj_, obj.raw());
}
uword RawValueAt(intptr_t index) const {
ASSERT(TypeAt(index) != kTaggedObject);
ASSERT(TypeAt(index) != EntryType::kTaggedObject);
return EntryAddr(index)->raw_value_;
}
void SetRawValueAt(intptr_t index, uword raw_value) const {
ASSERT(TypeAt(index) != kTaggedObject);
ASSERT(TypeAt(index) != EntryType::kTaggedObject);
StoreNonPointer(&EntryAddr(index)->raw_value_, raw_value);
}
@ -4150,8 +4149,12 @@ class ObjectPool : public Object {
(len * kBytesPerElement));
}
static RawObjectPool* NewFromBuilder(
const compiler::ObjectPoolBuilder& builder);
static RawObjectPool* New(intptr_t len);
void CopyInto(compiler::ObjectPoolBuilder* builder) const;
// Returns the pool index from the offset relative to a tagged RawObjectPool*,
// adjusting for the tag-bit.
static intptr_t IndexFromOffset(intptr_t offset) {
@ -4731,12 +4734,7 @@ class Code : public Object {
return OFFSET_OF(RawCode, instructions_);
}
enum class EntryKind {
kNormal,
kUnchecked,
kMonomorphic,
kMonomorphicUnchecked,
};
using EntryKind = CodeEntryKind;
static intptr_t entry_point_offset(EntryKind kind = EntryKind::kNormal) {
switch (kind) {
@ -4755,9 +4753,9 @@ class Code : public Object {
static intptr_t function_entry_point_offset(EntryKind kind) {
switch (kind) {
case Code::EntryKind::kNormal:
case EntryKind::kNormal:
return Function::entry_point_offset();
case Code::EntryKind::kUnchecked:
case EntryKind::kUnchecked:
return Function::unchecked_entry_point_offset();
default:
ASSERT(false && "Invalid entry kind.");
@ -5050,13 +5048,13 @@ class Code : public Object {
// `Object::set_object_pool()`.
static RawCode* FinalizeCode(const Function& function,
FlowGraphCompiler* compiler,
Assembler* assembler,
compiler::Assembler* assembler,
PoolAttachment pool_attachment,
bool optimized = false,
CodeStatistics* stats = nullptr);
static RawCode* FinalizeCode(const char* name,
FlowGraphCompiler* compiler,
Assembler* assembler,
compiler::Assembler* assembler,
PoolAttachment pool_attachment,
bool optimized,
CodeStatistics* stats = nullptr);

View file

@ -101,7 +101,7 @@ void ObjectPool::ResetICDatas(Zone* zone) const {
Object& object = Object::Handle(zone);
for (intptr_t i = 0; i < Length(); i++) {
ObjectPool::EntryType entry_type = TypeAt(i);
if (entry_type != ObjectPool::kTaggedObject) {
if (entry_type != ObjectPool::EntryType::kTaggedObject) {
continue;
}
object = ObjectAt(i);

View file

@ -628,27 +628,27 @@ void ObjectPool::PrintJSONImpl(JSONStream* stream, bool ref) const {
JSONObject jsentry(stream);
jsentry.AddProperty("offset", OffsetFromIndex(i));
switch (TypeAt(i)) {
case ObjectPool::kTaggedObject:
case ObjectPool::EntryType::kTaggedObject:
obj = ObjectAt(i);
jsentry.AddProperty("kind", "Object");
jsentry.AddProperty("value", obj);
break;
case ObjectPool::kImmediate:
case ObjectPool::EntryType::kImmediate:
imm = RawValueAt(i);
jsentry.AddProperty("kind", "Immediate");
jsentry.AddProperty64("value", imm);
break;
case ObjectPool::kNativeEntryData:
case ObjectPool::EntryType::kNativeEntryData:
obj = ObjectAt(i);
jsentry.AddProperty("kind", "NativeEntryData");
jsentry.AddProperty("value", obj);
break;
case ObjectPool::kNativeFunction:
case ObjectPool::EntryType::kNativeFunction:
imm = RawValueAt(i);
jsentry.AddProperty("kind", "NativeFunction");
jsentry.AddProperty64("value", imm);
break;
case ObjectPool::kNativeFunctionWrapper:
case ObjectPool::EntryType::kNativeFunctionWrapper:
imm = RawValueAt(i);
jsentry.AddProperty("kind", "NativeFunctionWrapper");
jsentry.AddProperty64("value", imm);

View file

@ -2470,8 +2470,8 @@ static RawFunction* CreateFunction(const char* name) {
// Test for Code and Instruction object creation.
ISOLATE_UNIT_TEST_CASE(Code) {
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolWrapper object_pool_wrapper;
Assembler _assembler_(&object_pool_wrapper);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
const Function& function = Function::Handle(CreateFunction("Test_Code"));
Code& code = Code::Handle(Code::FinalizeCode(
@ -2492,8 +2492,8 @@ ISOLATE_UNIT_TEST_CASE(CodeImmutability) {
MallocHooks::stack_trace_collection_enabled();
MallocHooks::set_stack_trace_collection_enabled(false);
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolWrapper object_pool_wrapper;
Assembler _assembler_(&object_pool_wrapper);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
const Function& function = Function::Handle(CreateFunction("Test_Code"));
Code& code = Code::Handle(Code::FinalizeCode(
@ -2519,8 +2519,8 @@ ISOLATE_UNIT_TEST_CASE(EmbedStringInCode) {
extern void GenerateEmbedStringInCode(Assembler * assembler, const char* str);
const char* kHello = "Hello World!";
word expected_length = static_cast<word>(strlen(kHello));
ObjectPoolWrapper object_pool_wrapper;
Assembler _assembler_(&object_pool_wrapper);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
GenerateEmbedStringInCode(&_assembler_, kHello);
const Function& function =
Function::Handle(CreateFunction("Test_EmbedStringInCode"));
@ -2542,8 +2542,8 @@ ISOLATE_UNIT_TEST_CASE(EmbedStringInCode) {
ISOLATE_UNIT_TEST_CASE(EmbedSmiInCode) {
extern void GenerateEmbedSmiInCode(Assembler * assembler, intptr_t value);
const intptr_t kSmiTestValue = 5;
ObjectPoolWrapper object_pool_wrapper;
Assembler _assembler_(&object_pool_wrapper);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
GenerateEmbedSmiInCode(&_assembler_, kSmiTestValue);
const Function& function =
Function::Handle(CreateFunction("Test_EmbedSmiInCode"));
@ -2560,8 +2560,8 @@ ISOLATE_UNIT_TEST_CASE(EmbedSmiInCode) {
ISOLATE_UNIT_TEST_CASE(EmbedSmiIn64BitCode) {
extern void GenerateEmbedSmiInCode(Assembler * assembler, intptr_t value);
const intptr_t kSmiTestValue = DART_INT64_C(5) << 32;
ObjectPoolWrapper object_pool_wrapper;
Assembler _assembler_(&object_pool_wrapper);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
GenerateEmbedSmiInCode(&_assembler_, kSmiTestValue);
const Function& function =
Function::Handle(CreateFunction("Test_EmbedSmiIn64BitCode"));
@ -2591,8 +2591,8 @@ ISOLATE_UNIT_TEST_CASE(ExceptionHandlers) {
TokenPosition::kNoSource, true);
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolWrapper object_pool_wrapper;
Assembler _assembler_(&object_pool_wrapper);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
Code& code = Code::Handle(
Code::FinalizeCode(Function::Handle(CreateFunction("Test_Code")), nullptr,
@ -2633,8 +2633,8 @@ ISOLATE_UNIT_TEST_CASE(PcDescriptors) {
descriptors ^= builder->FinalizePcDescriptors(0);
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolWrapper object_pool_wrapper;
Assembler _assembler_(&object_pool_wrapper);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
Code& code = Code::Handle(
Code::FinalizeCode(Function::Handle(CreateFunction("Test_Code")), nullptr,
@ -2696,8 +2696,8 @@ ISOLATE_UNIT_TEST_CASE(PcDescriptorsLargeDeltas) {
descriptors ^= builder->FinalizePcDescriptors(0);
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolWrapper object_pool_wrapper;
Assembler _assembler_(&object_pool_wrapper);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
Code& code = Code::Handle(
Code::FinalizeCode(Function::Handle(CreateFunction("Test_Code")), nullptr,

View file

@ -13,7 +13,6 @@ struct tm;
namespace dart {
// Forward declarations.
class Isolate;
class Zone;
// Interface to the underlying OS platform.

View file

@ -44,12 +44,11 @@ class BaseThread {
private:
explicit BaseThread(bool is_os_thread) : is_os_thread_(is_os_thread) {}
~BaseThread() {}
virtual ~BaseThread() {}
bool is_os_thread_;
friend class ThreadState;
friend class Thread;
friend class OSThread;
DISALLOW_IMPLICIT_CONSTRUCTORS(BaseThread);

View file

@ -0,0 +1,68 @@
// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_POINTER_TAGGING_H_
#define RUNTIME_VM_POINTER_TAGGING_H_
// This header defines constants associated with pointer tagging:
//
// * which bits determine whether or not this is a Smi value or a heap
// pointer;
// * which bits determine whether this is a pointer into a new or an old
// space.
namespace dart {
// Dart VM aligns all objects by 2 words in in the old space and misaligns them
// in new space. This allows to distinguish new and old pointers by their bits.
//
// Note: these bits depend on the word size.
template <intptr_t word_size, intptr_t word_size_log2>
struct ObjectAlignment {
// Alignment offsets are used to determine object age.
static constexpr intptr_t kNewObjectAlignmentOffset = word_size;
static constexpr intptr_t kOldObjectAlignmentOffset = 0;
static constexpr intptr_t kNewObjectBitPosition = word_size_log2;
// Object sizes are aligned to kObjectAlignment.
static constexpr intptr_t kObjectAlignment = 2 * word_size;
static constexpr intptr_t kObjectAlignmentLog2 = word_size_log2 + 1;
static constexpr intptr_t kObjectAlignmentMask = kObjectAlignment - 1;
};
using HostObjectAlignment = ObjectAlignment<kWordSize, kWordSizeLog2>;
static constexpr intptr_t kNewObjectAlignmentOffset =
HostObjectAlignment::kNewObjectAlignmentOffset;
static constexpr intptr_t kOldObjectAlignmentOffset =
HostObjectAlignment::kOldObjectAlignmentOffset;
static constexpr intptr_t kNewObjectBitPosition =
HostObjectAlignment::kNewObjectBitPosition;
static constexpr intptr_t kObjectAlignment =
HostObjectAlignment::kObjectAlignment;
static constexpr intptr_t kObjectAlignmentLog2 =
HostObjectAlignment::kObjectAlignmentLog2;
static constexpr intptr_t kObjectAlignmentMask =
HostObjectAlignment::kObjectAlignmentMask;
// On all targets heap pointers are tagged by set least significant bit.
//
// To recover address of the actual heap object kHeapObjectTag needs to be
// subtracted from the tagged pointer value.
//
// Smi-s (small integers) have least significant bit cleared.
//
// To recover the integer value tagged pointer value needs to be shifted
// right by kSmiTagShift.
enum {
kSmiTag = 0,
kHeapObjectTag = 1,
kSmiTagSize = 1,
kSmiTagMask = 1,
kSmiTagShift = 1,
};
} // namespace dart
#endif // RUNTIME_VM_POINTER_TAGGING_H_

View file

@ -572,8 +572,8 @@ intptr_t RawObjectPool::VisitObjectPoolPointers(RawObjectPool* raw_obj,
for (intptr_t i = 0; i < length; ++i) {
ObjectPool::EntryType entry_type =
ObjectPool::TypeBits::decode(entry_bits[i]);
if ((entry_type == ObjectPool::kTaggedObject) ||
(entry_type == ObjectPool::kNativeEntryData)) {
if ((entry_type == ObjectPool::EntryType::kTaggedObject) ||
(entry_type == ObjectPool::EntryType::kNativeEntryData)) {
visitor->VisitPointer(&entries[i].raw_obj_);
}
}

Some files were not shown because too many files have changed in this diff Show more