[vm] Remove misleading usings.

Patchset 1 contains just the usings I deleted (<100 lines). All the
other diffs in the rest of the CL are just fixing all the places that
were broken by this.

Bug: https://github.com/dart-lang/sdk/issues/36839
Change-Id: I3bb4fa62ab4363ded81fd7c2815b857f91886dd6
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/108502
Commit-Queue: Liam Appelbe <liama@google.com>
Reviewed-by: Siva Annamalai <asiva@google.com>
This commit is contained in:
Liam Appelbe 2019-07-10 22:20:10 +00:00 committed by commit-bot@chromium.org
parent 0104a62595
commit d49bf6f25a
71 changed files with 3220 additions and 2969 deletions

View file

@ -44,7 +44,7 @@ ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
// Code accessing pp is generated, but not executed. Uninitialized pp is OK.
__ set_constant_pool_allowed(true);
ObjectPoolBuilder& op = __ object_pool_builder();
compiler::ObjectPoolBuilder& op = __ object_pool_builder();
const intptr_t ic_data_index =
op.AddObject(ic_data, ObjectPool::Patchability::kPatchable);
const intptr_t stub_index =
@ -52,8 +52,9 @@ ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
ASSERT((ic_data_index + 1) == stub_index);
__ LoadDoubleWordFromPoolOffset(R5, CODE_REG,
ObjectPool::element_offset(ic_data_index));
__ ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset(
Code::EntryKind::kMonomorphic)));
__ ldr(LR, compiler::FieldAddress(
CODE_REG,
Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
__ blr(LR);
__ ret();
}

View file

@ -108,7 +108,7 @@ class PrecompileParsedFunctionHelper : public ValueObject {
Thread* thread() const { return thread_; }
Isolate* isolate() const { return thread_->isolate(); }
void FinalizeCompilation(Assembler* assembler,
void FinalizeCompilation(compiler::Assembler* assembler,
FlowGraphCompiler* graph_compiler,
FlowGraph* flow_graph,
CodeStatistics* stats);
@ -2152,9 +2152,8 @@ void Precompiler::FinalizeAllClasses() {
I->set_all_classes_finalized(true);
}
void PrecompileParsedFunctionHelper::FinalizeCompilation(
Assembler* assembler,
compiler::Assembler* assembler,
FlowGraphCompiler* graph_compiler,
FlowGraph* flow_graph,
CodeStatistics* stats) {
@ -2292,12 +2291,13 @@ bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
ASSERT(!FLAG_use_bare_instructions || precompiler_ != nullptr);
ObjectPoolBuilder object_pool;
ObjectPoolBuilder* active_object_pool_builder =
compiler::ObjectPoolBuilder object_pool;
compiler::ObjectPoolBuilder* active_object_pool_builder =
FLAG_use_bare_instructions
? precompiler_->global_object_pool_builder()
: &object_pool;
Assembler assembler(active_object_pool_builder, use_far_branches);
compiler::Assembler assembler(active_object_pool_builder,
use_far_branches);
CodeStatistics* function_stats = NULL;
if (FLAG_print_instruction_stats) {

View file

@ -382,11 +382,4 @@ class AssemblerBase : public StackResource {
#error Unknown architecture.
#endif
namespace dart {
using compiler::Assembler;
using compiler::ExternalLabel;
using compiler::Label;
using compiler::ObjectPoolBuilder;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_H_

View file

@ -28,33 +28,20 @@ DECLARE_FLAG(bool, use_slow_path);
namespace compiler {
#ifndef PRODUCT
using target::ClassHeapStats;
#endif
using target::ClassTable;
using target::Double;
using target::Float32x4;
using target::Float64x2;
using target::Heap;
using target::Instance;
using target::Instructions;
using target::Isolate;
using target::ObjectPool;
using target::RawObject;
using target::Thread;
Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
bool use_far_branches)
: AssemblerBase(object_pool_builder),
use_far_branches_(use_far_branches),
constant_pool_allowed_(false) {
generate_invoke_write_barrier_wrapper_ = [&](Condition cond, Register reg) {
ldr(LR, Address(THR, Thread::write_barrier_wrappers_thread_offset(reg)),
ldr(LR,
Address(THR, target::Thread::write_barrier_wrappers_thread_offset(reg)),
cond);
blx(LR, cond);
};
generate_invoke_array_write_barrier_ = [&](Condition cond) {
ldr(LR, Address(THR, Thread::array_write_barrier_entry_point_offset()),
ldr(LR,
Address(THR, target::Thread::array_write_barrier_entry_point_offset()),
cond);
blx(LR, cond);
};
@ -560,25 +547,26 @@ void Assembler::TransitionGeneratedToNative(Register destination_address,
Register state) {
// Save exit frame information to enable stack walking.
StoreToOffset(kWord, exit_frame_fp, THR,
Thread::top_exit_frame_info_offset());
target::Thread::top_exit_frame_info_offset());
// Mark that the thread is executing native code.
StoreToOffset(kWord, destination_address, THR, Thread::vm_tag_offset());
LoadImmediate(state, compiler::target::Thread::native_execution_state());
StoreToOffset(kWord, state, THR, Thread::execution_state_offset());
StoreToOffset(kWord, destination_address, THR,
target::Thread::vm_tag_offset());
LoadImmediate(state, target::Thread::native_execution_state());
StoreToOffset(kWord, state, THR, target::Thread::execution_state_offset());
if (FLAG_use_slow_path || TargetCPUFeatures::arm_version() == ARMv5TE) {
EnterSafepointSlowly();
} else {
Label slow_path, done, retry;
LoadImmediate(addr, compiler::target::Thread::safepoint_state_offset());
LoadImmediate(addr, target::Thread::safepoint_state_offset());
add(addr, THR, Operand(addr));
Bind(&retry);
ldrex(state, addr);
cmp(state, Operand(Thread::safepoint_state_unacquired()));
cmp(state, Operand(target::Thread::safepoint_state_unacquired()));
b(&slow_path, NE);
mov(state, Operand(Thread::safepoint_state_acquired()));
mov(state, Operand(target::Thread::safepoint_state_acquired()));
strex(TMP, state, addr);
cmp(TMP, Operand(0)); // 0 means strex was successful.
b(&done, EQ);
@ -592,9 +580,8 @@ void Assembler::TransitionGeneratedToNative(Register destination_address,
}
void Assembler::EnterSafepointSlowly() {
ldr(TMP,
Address(THR, compiler::target::Thread::enter_safepoint_stub_offset()));
ldr(TMP, FieldAddress(TMP, compiler::target::Code::entry_point_offset()));
ldr(TMP, Address(THR, target::Thread::enter_safepoint_stub_offset()));
ldr(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
blx(TMP);
}
@ -603,14 +590,14 @@ void Assembler::TransitionNativeToGenerated(Register addr, Register state) {
ExitSafepointSlowly();
} else {
Label slow_path, done, retry;
LoadImmediate(addr, compiler::target::Thread::safepoint_state_offset());
LoadImmediate(addr, target::Thread::safepoint_state_offset());
add(addr, THR, Operand(addr));
Bind(&retry);
ldrex(state, addr);
cmp(state, Operand(Thread::safepoint_state_acquired()));
cmp(state, Operand(target::Thread::safepoint_state_acquired()));
b(&slow_path, NE);
mov(state, Operand(Thread::safepoint_state_unacquired()));
mov(state, Operand(target::Thread::safepoint_state_unacquired()));
strex(TMP, state, addr);
cmp(TMP, Operand(0)); // 0 means strex was successful.
b(&done, EQ);
@ -623,20 +610,20 @@ void Assembler::TransitionNativeToGenerated(Register addr, Register state) {
}
// Mark that the thread is executing Dart code.
LoadImmediate(state, compiler::target::Thread::vm_tag_compiled_id());
StoreToOffset(kWord, state, THR, Thread::vm_tag_offset());
LoadImmediate(state, compiler::target::Thread::generated_execution_state());
StoreToOffset(kWord, state, THR, Thread::execution_state_offset());
LoadImmediate(state, target::Thread::vm_tag_compiled_id());
StoreToOffset(kWord, state, THR, target::Thread::vm_tag_offset());
LoadImmediate(state, target::Thread::generated_execution_state());
StoreToOffset(kWord, state, THR, target::Thread::execution_state_offset());
// Reset exit frame information in Isolate structure.
LoadImmediate(state, 0);
StoreToOffset(kWord, state, THR, Thread::top_exit_frame_info_offset());
StoreToOffset(kWord, state, THR,
target::Thread::top_exit_frame_info_offset());
}
void Assembler::ExitSafepointSlowly() {
ldr(TMP,
Address(THR, compiler::target::Thread::exit_safepoint_stub_offset()));
ldr(TMP, FieldAddress(TMP, compiler::target::Code::entry_point_offset()));
ldr(TMP, Address(THR, target::Thread::exit_safepoint_stub_offset()));
ldr(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
blx(TMP);
}
@ -1549,7 +1536,7 @@ void Assembler::CheckCodePointer() {
Bind(&cid_ok);
const intptr_t offset = CodeSize() + Instr::kPCReadOffset +
Instructions::HeaderSize() - kHeapObjectTag;
target::Instructions::HeaderSize() - kHeapObjectTag;
mov(R0, Operand(PC));
AddImmediate(R0, -offset);
ldr(IP, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
@ -1576,7 +1563,7 @@ void Assembler::LoadPoolPointer(Register reg) {
}
void Assembler::LoadIsolate(Register rd) {
ldr(rd, Address(THR, Thread::isolate_offset()));
ldr(rd, Address(THR, target::Thread::isolate_offset()));
}
bool Assembler::CanLoadFromObjectPool(const Object& object) const {
@ -1610,7 +1597,7 @@ void Assembler::LoadObjectHelper(Register rd,
// object pool.
const auto index = is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object);
const int32_t offset = ObjectPool::element_offset(index);
const int32_t offset = target::ObjectPool::element_offset(index);
LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond);
} else {
UNREACHABLE();
@ -1631,7 +1618,7 @@ void Assembler::LoadNativeEntry(Register rd,
const ExternalLabel* label,
ObjectPoolBuilderEntry::Patchability patchable,
Condition cond) {
const int32_t offset = ObjectPool::element_offset(
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindNativeFunction(label, patchable));
LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond);
}
@ -1732,8 +1719,8 @@ void Assembler::StoreIntoObject(Register object,
if (!lr_reserved) Push(LR);
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
and_(TMP, LR, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
ldr(LR, Address(THR, Thread::write_barrier_mask_offset()));
and_(TMP, LR, Operand(TMP, LSR, target::RawObject::kBarrierOverlapShift));
ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
tst(TMP, Operand(LR));
if (value != kWriteBarrierValueReg) {
// Unlikely. Only non-graph intrinsics.
@ -1796,8 +1783,8 @@ void Assembler::StoreIntoArray(Register object,
if (!lr_reserved) Push(LR);
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
and_(TMP, LR, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
ldr(LR, Address(THR, Thread::write_barrier_mask_offset()));
and_(TMP, LR, Operand(TMP, LSR, target::RawObject::kBarrierOverlapShift));
ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
tst(TMP, Operand(LR));
if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
@ -1957,20 +1944,22 @@ void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
}
void Assembler::LoadClassId(Register result, Register object, Condition cond) {
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
ldrh(result, FieldAddress(object, class_id_offset), cond);
}
void Assembler::LoadClassById(Register result, Register class_id) {
ASSERT(result != class_id);
LoadIsolate(result);
const intptr_t offset =
Isolate::class_table_offset() + ClassTable::table_offset();
const intptr_t offset = target::Isolate::class_table_offset() +
target::ClassTable::table_offset();
LoadFromOffset(kWord, result, result, offset);
ldr(result, Address(result, class_id, LSL, ClassTable::kSizeOfClassPairLog2));
ldr(result,
Address(result, class_id, LSL, target::ClassTable::kSizeOfClassPairLog2));
}
void Assembler::CompareClassId(Register object,
@ -2679,7 +2668,7 @@ void Assembler::Branch(const Code& target,
ObjectPoolBuilderEntry::Patchability patchable,
Register pp,
Condition cond) {
const int32_t offset = ObjectPool::element_offset(
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindObject(ToObject(target), patchable));
LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, pp, cond);
Branch(FieldAddress(CODE_REG, target::Code::entry_point_offset()), cond);
@ -2696,7 +2685,7 @@ void Assembler::BranchLink(const Code& target,
// to by this code sequence.
// For added code robustness, use 'blx lr' in a patchable sequence and
// use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
const int32_t offset = ObjectPool::element_offset(
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindObject(ToObject(target), patchable));
LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL);
ldr(LR, FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
@ -2709,15 +2698,16 @@ void Assembler::BranchLinkPatchable(const Code& target,
}
void Assembler::BranchLinkToRuntime() {
ldr(IP, Address(THR, Thread::call_to_runtime_entry_point_offset()));
ldr(IP, Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
blx(IP);
}
void Assembler::CallNullErrorShared(bool save_fpu_registers) {
uword entry_point_offset =
save_fpu_registers
? Thread::null_error_shared_with_fpu_regs_entry_point_offset()
: Thread::null_error_shared_without_fpu_regs_entry_point_offset();
? target::Thread::null_error_shared_with_fpu_regs_entry_point_offset()
: target::Thread::
null_error_shared_without_fpu_regs_entry_point_offset();
ldr(LR, Address(THR, entry_point_offset));
blx(LR);
}
@ -2729,7 +2719,7 @@ void Assembler::BranchLinkWithEquivalence(const Code& target,
// to by this code sequence.
// For added code robustness, use 'blx lr' in a patchable sequence and
// use 'blx ip' in a non-patchable sequence (see other BranchLink flavors).
const int32_t offset = ObjectPool::element_offset(
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindObject(ToObject(target), equivalence));
LoadWordFromPoolOffset(CODE_REG, offset - kHeapObjectTag, PP, AL);
ldr(LR, FieldAddress(CODE_REG, target::Code::entry_point_offset(entry_kind)));
@ -2777,7 +2767,8 @@ void Assembler::LoadDecodableImmediate(Register rd,
const ARMVersion version = TargetCPUFeatures::arm_version();
if ((version == ARMv5TE) || (version == ARMv6)) {
if (constant_pool_allowed()) {
const int32_t offset = ObjectPool::element_offset(FindImmediate(value));
const int32_t offset =
target::ObjectPool::element_offset(FindImmediate(value));
LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, PP, cond);
} else {
LoadPatchableImmediate(rd, value, cond);
@ -2971,15 +2962,19 @@ void Assembler::CopyDoubleField(Register dst,
Register tmp2,
DRegister dtmp) {
if (TargetCPUFeatures::vfp_supported()) {
LoadDFromOffset(dtmp, src, Double::value_offset() - kHeapObjectTag);
StoreDToOffset(dtmp, dst, Double::value_offset() - kHeapObjectTag);
LoadDFromOffset(dtmp, src, target::Double::value_offset() - kHeapObjectTag);
StoreDToOffset(dtmp, dst, target::Double::value_offset() - kHeapObjectTag);
} else {
LoadFromOffset(kWord, tmp1, src, Double::value_offset() - kHeapObjectTag);
LoadFromOffset(kWord, tmp2, src,
Double::value_offset() + target::kWordSize - kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst, Double::value_offset() - kHeapObjectTag);
StoreToOffset(kWord, tmp2, dst,
Double::value_offset() + target::kWordSize - kHeapObjectTag);
LoadFromOffset(kWord, tmp1, src,
target::Double::value_offset() - kHeapObjectTag);
LoadFromOffset(
kWord, tmp2, src,
target::Double::value_offset() + target::kWordSize - kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst,
target::Double::value_offset() - kHeapObjectTag);
StoreToOffset(
kWord, tmp2, dst,
target::Double::value_offset() + target::kWordSize - kHeapObjectTag);
}
}
@ -2990,35 +2985,35 @@ void Assembler::CopyFloat32x4Field(Register dst,
DRegister dtmp) {
if (TargetCPUFeatures::neon_supported()) {
LoadMultipleDFromOffset(dtmp, 2, src,
Float32x4::value_offset() - kHeapObjectTag);
target::Float32x4::value_offset() - kHeapObjectTag);
StoreMultipleDToOffset(dtmp, 2, dst,
Float32x4::value_offset() - kHeapObjectTag);
target::Float32x4::value_offset() - kHeapObjectTag);
} else {
LoadFromOffset(
kWord, tmp1, src,
(Float32x4::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(
kWord, tmp2, src,
(Float32x4::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp1, dst,
(Float32x4::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp2, dst,
(Float32x4::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(kWord, tmp1, src,
(target::Float32x4::value_offset() + 0 * target::kWordSize) -
kHeapObjectTag);
LoadFromOffset(kWord, tmp2, src,
(target::Float32x4::value_offset() + 1 * target::kWordSize) -
kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst,
(target::Float32x4::value_offset() + 0 * target::kWordSize) -
kHeapObjectTag);
StoreToOffset(kWord, tmp2, dst,
(target::Float32x4::value_offset() + 1 * target::kWordSize) -
kHeapObjectTag);
LoadFromOffset(
kWord, tmp1, src,
(Float32x4::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(
kWord, tmp2, src,
(Float32x4::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp1, dst,
(Float32x4::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp2, dst,
(Float32x4::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(kWord, tmp1, src,
(target::Float32x4::value_offset() + 2 * target::kWordSize) -
kHeapObjectTag);
LoadFromOffset(kWord, tmp2, src,
(target::Float32x4::value_offset() + 3 * target::kWordSize) -
kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst,
(target::Float32x4::value_offset() + 2 * target::kWordSize) -
kHeapObjectTag);
StoreToOffset(kWord, tmp2, dst,
(target::Float32x4::value_offset() + 3 * target::kWordSize) -
kHeapObjectTag);
}
}
@ -3029,35 +3024,35 @@ void Assembler::CopyFloat64x2Field(Register dst,
DRegister dtmp) {
if (TargetCPUFeatures::neon_supported()) {
LoadMultipleDFromOffset(dtmp, 2, src,
Float64x2::value_offset() - kHeapObjectTag);
target::Float64x2::value_offset() - kHeapObjectTag);
StoreMultipleDToOffset(dtmp, 2, dst,
Float64x2::value_offset() - kHeapObjectTag);
target::Float64x2::value_offset() - kHeapObjectTag);
} else {
LoadFromOffset(
kWord, tmp1, src,
(Float64x2::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(
kWord, tmp2, src,
(Float64x2::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp1, dst,
(Float64x2::value_offset() + 0 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp2, dst,
(Float64x2::value_offset() + 1 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(kWord, tmp1, src,
(target::Float64x2::value_offset() + 0 * target::kWordSize) -
kHeapObjectTag);
LoadFromOffset(kWord, tmp2, src,
(target::Float64x2::value_offset() + 1 * target::kWordSize) -
kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst,
(target::Float64x2::value_offset() + 0 * target::kWordSize) -
kHeapObjectTag);
StoreToOffset(kWord, tmp2, dst,
(target::Float64x2::value_offset() + 1 * target::kWordSize) -
kHeapObjectTag);
LoadFromOffset(
kWord, tmp1, src,
(Float64x2::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(
kWord, tmp2, src,
(Float64x2::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp1, dst,
(Float64x2::value_offset() + 2 * target::kWordSize) - kHeapObjectTag);
StoreToOffset(
kWord, tmp2, dst,
(Float64x2::value_offset() + 3 * target::kWordSize) - kHeapObjectTag);
LoadFromOffset(kWord, tmp1, src,
(target::Float64x2::value_offset() + 2 * target::kWordSize) -
kHeapObjectTag);
LoadFromOffset(kWord, tmp2, src,
(target::Float64x2::value_offset() + 3 * target::kWordSize) -
kHeapObjectTag);
StoreToOffset(kWord, tmp1, dst,
(target::Float64x2::value_offset() + 2 * target::kWordSize) -
kHeapObjectTag);
StoreToOffset(kWord, tmp2, dst,
(target::Float64x2::value_offset() + 3 * target::kWordSize) -
kHeapObjectTag);
}
}
@ -3257,9 +3252,8 @@ void Assembler::EmitEntryFrameVerification(Register scratch) {
#if defined(DEBUG)
Label done;
ASSERT(!constant_pool_allowed());
LoadImmediate(scratch,
compiler::target::frame_layout.exit_link_slot_from_entry_fp *
compiler::target::kWordSize);
LoadImmediate(scratch, target::frame_layout.exit_link_slot_from_entry_fp *
target::kWordSize);
add(scratch, scratch, Operand(FPREG));
cmp(scratch, Operand(SPREG));
b(&done, EQ);
@ -3410,7 +3404,8 @@ void Assembler::MonomorphicCheckedEntryJIT() {
intptr_t start = CodeSize();
Comment("MonomorphicCheckedEntry");
ASSERT(CodeSize() - start == Instructions::kMonomorphicEntryOffsetJIT);
ASSERT(CodeSize() - start ==
target::Instructions::kMonomorphicEntryOffsetJIT);
const intptr_t cid_offset = target::Array::element_offset(0);
const intptr_t count_offset = target::Array::element_offset(1);
@ -3421,12 +3416,13 @@ void Assembler::MonomorphicCheckedEntryJIT() {
LoadClassIdMayBeSmi(IP, R0);
add(R2, R2, Operand(target::ToRawSmi(1)));
cmp(R1, Operand(IP, LSL, 1));
Branch(Address(THR, Thread::monomorphic_miss_entry_offset()), NE);
Branch(Address(THR, target::Thread::monomorphic_miss_entry_offset()), NE);
str(R2, FieldAddress(R9, count_offset));
LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction.
// Fall through to unchecked entry.
ASSERT(CodeSize() - start == Instructions::kPolymorphicEntryOffsetJIT);
ASSERT(CodeSize() - start ==
target::Instructions::kPolymorphicEntryOffsetJIT);
#if defined(TESTING) || defined(DEBUG)
set_use_far_branches(saved_use_far_branches);
@ -3444,14 +3440,16 @@ void Assembler::MonomorphicCheckedEntryAOT() {
intptr_t start = CodeSize();
Comment("MonomorphicCheckedEntry");
ASSERT(CodeSize() - start == Instructions::kMonomorphicEntryOffsetAOT);
ASSERT(CodeSize() - start ==
target::Instructions::kMonomorphicEntryOffsetAOT);
LoadClassIdMayBeSmi(IP, R0);
cmp(R9, Operand(IP, LSL, 1));
Branch(Address(THR, Thread::monomorphic_miss_entry_offset()), NE);
Branch(Address(THR, target::Thread::monomorphic_miss_entry_offset()), NE);
// Fall through to unchecked entry.
ASSERT(CodeSize() - start == Instructions::kPolymorphicEntryOffsetAOT);
ASSERT(CodeSize() - start ==
target::Instructions::kPolymorphicEntryOffsetAOT);
#if defined(TESTING) || defined(DEBUG)
set_use_far_branches(saved_use_far_branches);
@ -3460,11 +3458,11 @@ void Assembler::MonomorphicCheckedEntryAOT() {
void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
has_single_entry_point_ = false;
while (CodeSize() < Instructions::kMonomorphicEntryOffsetJIT) {
while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
bkpt(0);
}
b(label);
while (CodeSize() < Instructions::kPolymorphicEntryOffsetJIT) {
while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
bkpt(0);
}
}
@ -3473,9 +3471,9 @@ void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
void Assembler::MaybeTraceAllocation(Register stats_addr_reg, Label* trace) {
ASSERT(stats_addr_reg != kNoRegister);
ASSERT(stats_addr_reg != TMP);
const uword state_offset = ClassHeapStats::state_offset();
const uword state_offset = target::ClassHeapStats::state_offset();
ldr(TMP, Address(stats_addr_reg, state_offset));
tst(TMP, Operand(ClassHeapStats::TraceAllocationMask()));
tst(TMP, Operand(target::ClassHeapStats::TraceAllocationMask()));
b(trace, NE);
}
@ -3483,10 +3481,10 @@ void Assembler::LoadAllocationStatsAddress(Register dest, intptr_t cid) {
ASSERT(dest != kNoRegister);
ASSERT(dest != TMP);
ASSERT(cid > 0);
const intptr_t class_offset = ClassTable::ClassOffsetFor(cid);
const intptr_t class_offset = target::ClassTable::ClassOffsetFor(cid);
LoadIsolate(dest);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
intptr_t table_offset = target::Isolate::class_table_offset() +
target::ClassTable::class_heap_stats_table_offset();
ldr(dest, Address(dest, table_offset));
AddImmediate(dest, class_offset);
}
@ -3497,7 +3495,7 @@ void Assembler::IncrementAllocationStats(Register stats_addr_reg,
ASSERT(stats_addr_reg != TMP);
ASSERT(cid > 0);
const uword count_field_offset =
ClassHeapStats::allocated_since_gc_new_space_offset();
target::ClassHeapStats::allocated_since_gc_new_space_offset();
const Address& count_address = Address(stats_addr_reg, count_field_offset);
ldr(TMP, count_address);
AddImmediate(TMP, 1);
@ -3509,9 +3507,9 @@ void Assembler::IncrementAllocationStatsWithSize(Register stats_addr_reg,
ASSERT(stats_addr_reg != kNoRegister);
ASSERT(stats_addr_reg != TMP);
const uword count_field_offset =
ClassHeapStats::allocated_since_gc_new_space_offset();
target::ClassHeapStats::allocated_since_gc_new_space_offset();
const uword size_field_offset =
ClassHeapStats::allocated_size_since_gc_new_space_offset();
target::ClassHeapStats::allocated_size_since_gc_new_space_offset();
const Address& count_address = Address(stats_addr_reg, count_field_offset);
const Address& size_address = Address(stats_addr_reg, size_field_offset);
ldr(TMP, count_address);
@ -3529,18 +3527,19 @@ void Assembler::TryAllocate(const Class& cls,
Register temp_reg) {
ASSERT(failure != NULL);
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
if (FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size)) {
const classid_t cid = target::Class::GetId(cls);
ASSERT(instance_reg != temp_reg);
ASSERT(temp_reg != IP);
ASSERT(instance_size != 0);
NOT_IN_PRODUCT(LoadAllocationStatsAddress(temp_reg, cid));
ldr(instance_reg, Address(THR, Thread::top_offset()));
ldr(instance_reg, Address(THR, target::Thread::top_offset()));
// TODO(koda): Protect against unsigned overflow here.
AddImmediateSetFlags(instance_reg, instance_reg, instance_size);
// instance_reg: potential next object start.
ldr(IP, Address(THR, Thread::end_offset()));
ldr(IP, Address(THR, target::Thread::end_offset()));
cmp(IP, Operand(instance_reg));
// fail if heap end unsigned less than or equal to instance_reg.
b(failure, LS);
@ -3552,7 +3551,7 @@ void Assembler::TryAllocate(const Class& cls,
// Successfully allocated the object, now update top to point to
// next object start and store the class in the class field of object.
str(instance_reg, Address(THR, Thread::top_offset()));
str(instance_reg, Address(THR, target::Thread::top_offset()));
ASSERT(instance_size >= kHeapObjectTag);
AddImmediate(instance_reg, -instance_size + kHeapObjectTag);
@ -3575,17 +3574,18 @@ void Assembler::TryAllocateArray(intptr_t cid,
Register end_address,
Register temp1,
Register temp2) {
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
if (FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size)) {
NOT_IN_PRODUCT(LoadAllocationStatsAddress(temp1, cid));
// Potential new object start.
ldr(instance, Address(THR, Thread::top_offset()));
ldr(instance, Address(THR, target::Thread::top_offset()));
AddImmediateSetFlags(end_address, instance, instance_size);
b(failure, CS); // Branch if unsigned overflow.
// Check if the allocation fits into the remaining space.
// instance: potential new object start.
// end_address: potential next object start.
ldr(temp2, Address(THR, Thread::end_offset()));
ldr(temp2, Address(THR, target::Thread::end_offset()));
cmp(end_address, Operand(temp2));
b(failure, CS);
@ -3596,7 +3596,7 @@ void Assembler::TryAllocateArray(intptr_t cid,
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
str(end_address, Address(THR, Thread::top_offset()));
str(end_address, Address(THR, target::Thread::top_offset()));
add(instance, instance, Operand(kHeapObjectTag));
// Initialize the tags.
@ -3632,7 +3632,8 @@ Address Assembler::ElementAddressForIntIndex(bool is_load,
intptr_t index,
Register temp) {
const int64_t offset_base =
(is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
(is_external ? 0
: (target::Instance::DataOffsetFor(cid) - kHeapObjectTag));
const int64_t offset =
offset_base + static_cast<int64_t>(index) * index_scale;
ASSERT(Utils::IsInt(32, offset));
@ -3654,7 +3655,8 @@ void Assembler::LoadElementAddressForIntIndex(Register address,
Register array,
intptr_t index) {
const int64_t offset_base =
(is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
(is_external ? 0
: (target::Instance::DataOffsetFor(cid) - kHeapObjectTag));
const int64_t offset =
offset_base + static_cast<int64_t>(index) * index_scale;
ASSERT(Utils::IsInt(32, offset));
@ -3670,7 +3672,7 @@ Address Assembler::ElementAddressForRegIndex(bool is_load,
// Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays.
const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift;
int32_t offset =
is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag);
is_external ? 0 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
const OperandSize size = Address::OperandSizeFor(cid);
ASSERT(array != IP);
ASSERT(index != IP);
@ -3711,7 +3713,7 @@ void Assembler::LoadElementAddressForRegIndex(Register address,
// Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays.
const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift;
int32_t offset =
is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag);
is_external ? 0 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
if (shift < 0) {
ASSERT(shift == -1);
add(address, array, Operand(index, ASR, 1));

View file

@ -1341,13 +1341,6 @@ class Assembler : public AssemblerBase {
};
} // namespace compiler
// TODO(vegorov) temporary export commonly used classes into dart namespace
// to ease migration.
using compiler::Address;
using compiler::FieldAddress;
using compiler::Operand;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM_H_

View file

@ -24,32 +24,19 @@ DEFINE_FLAG(bool, use_far_branches, false, "Always use far branches");
namespace compiler {
#ifndef PRODUCT
using target::ClassHeapStats;
#endif
using target::ClassTable;
using target::Double;
using target::Float32x4;
using target::Float64x2;
using target::Heap;
using target::Instance;
using target::Instructions;
using target::Isolate;
using target::ObjectPool;
using target::RawObject;
using target::Thread;
Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
bool use_far_branches)
: AssemblerBase(object_pool_builder),
use_far_branches_(use_far_branches),
constant_pool_allowed_(false) {
generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
ldr(LR, Address(THR, Thread::write_barrier_wrappers_thread_offset(reg)));
ldr(LR, Address(THR,
target::Thread::write_barrier_wrappers_thread_offset(reg)));
blr(LR);
};
generate_invoke_array_write_barrier_ = [&]() {
ldr(LR, Address(THR, Thread::array_write_barrier_entry_point_offset()));
ldr(LR,
Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
blr(LR);
};
}
@ -469,13 +456,13 @@ void Assembler::LoadNativeEntry(
Register dst,
const ExternalLabel* label,
ObjectPoolBuilderEntry::Patchability patchable) {
const int32_t offset = ObjectPool::element_offset(
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindNativeFunction(label, patchable));
LoadWordFromPoolOffset(dst, offset);
}
void Assembler::LoadIsolate(Register dst) {
ldr(dst, Address(THR, Thread::isolate_offset()));
ldr(dst, Address(THR, target::Thread::isolate_offset()));
}
void Assembler::LoadObjectHelper(Register dst,
@ -486,7 +473,7 @@ void Assembler::LoadObjectHelper(Register dst,
if (target::CanLoadFromThread(object, &offset)) {
ldr(dst, Address(THR, offset));
} else if (CanLoadFromObjectPool(object)) {
const int32_t offset = ObjectPool::element_offset(
const int32_t offset = target::ObjectPool::element_offset(
is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object));
LoadWordFromPoolOffset(dst, offset);
@ -569,7 +556,8 @@ void Assembler::LoadImmediate(Register reg, int64_t imm) {
// Use constant pool if allowed, unless we can load imm with 2 instructions.
if ((w1 != 0) && constant_pool_allowed()) {
const int32_t offset = ObjectPool::element_offset(FindImmediate(imm));
const int32_t offset =
target::ObjectPool::element_offset(FindImmediate(imm));
LoadWordFromPoolOffset(reg, offset);
return;
}
@ -615,7 +603,7 @@ void Assembler::LoadDImmediate(VRegister vd, double immd) {
void Assembler::Branch(const Code& target,
Register pp,
ObjectPoolBuilderEntry::Patchability patchable) {
const int32_t offset = ObjectPool::element_offset(
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindObject(ToObject(target), patchable));
LoadWordFromPoolOffset(CODE_REG, offset, pp);
ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
@ -628,7 +616,7 @@ void Assembler::BranchPatchable(const Code& code) {
void Assembler::BranchLink(const Code& target,
ObjectPoolBuilderEntry::Patchability patchable) {
const int32_t offset = ObjectPool::element_offset(
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindObject(ToObject(target), patchable));
LoadWordFromPoolOffset(CODE_REG, offset);
ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
@ -636,13 +624,13 @@ void Assembler::BranchLink(const Code& target,
}
void Assembler::BranchLinkToRuntime() {
ldr(LR, Address(THR, Thread::call_to_runtime_entry_point_offset()));
ldr(LR, Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
blr(LR);
}
void Assembler::BranchLinkWithEquivalence(const Code& target,
const Object& equivalence) {
const int32_t offset = ObjectPool::element_offset(
const int32_t offset = target::ObjectPool::element_offset(
object_pool_builder().FindObject(ToObject(target), equivalence));
LoadWordFromPoolOffset(CODE_REG, offset);
ldr(TMP, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
@ -652,8 +640,9 @@ void Assembler::BranchLinkWithEquivalence(const Code& target,
void Assembler::CallNullErrorShared(bool save_fpu_registers) {
uword entry_point_offset =
save_fpu_registers
? Thread::null_error_shared_with_fpu_regs_entry_point_offset()
: Thread::null_error_shared_without_fpu_regs_entry_point_offset();
? target::Thread::null_error_shared_with_fpu_regs_entry_point_offset()
: target::Thread::
null_error_shared_without_fpu_regs_entry_point_offset();
ldr(LR, Address(THR, entry_point_offset));
blr(LR);
}
@ -976,7 +965,7 @@ void Assembler::StoreIntoObject(Register object,
}
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
and_(TMP, TMP2, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
and_(TMP, TMP2, Operand(TMP, LSR, target::RawObject::kBarrierOverlapShift));
tst(TMP, Operand(BARRIER_MASK));
b(&done, ZERO);
@ -1037,7 +1026,7 @@ void Assembler::StoreIntoArray(Register object,
}
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
and_(TMP, TMP2, Operand(TMP, LSR, RawObject::kBarrierOverlapShift));
and_(TMP, TMP2, Operand(TMP, LSR, target::RawObject::kBarrierOverlapShift));
tst(TMP, Operand(BARRIER_MASK));
b(&done, ZERO);
if (!lr_reserved) Push(LR);
@ -1111,10 +1100,11 @@ void Assembler::StoreInternalPointer(Register object,
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
LoadFromOffset(result, object, class_id_offset - kHeapObjectTag,
kUnsignedHalfword);
}
@ -1122,10 +1112,10 @@ void Assembler::LoadClassId(Register result, Register object) {
void Assembler::LoadClassById(Register result, Register class_id) {
ASSERT(result != class_id);
LoadIsolate(result);
const intptr_t offset =
Isolate::class_table_offset() + ClassTable::table_offset();
const intptr_t offset = target::Isolate::class_table_offset() +
target::ClassTable::table_offset();
LoadFromOffset(result, result, offset);
ASSERT(ClassTable::kSizeOfClassPairLog2 == 4);
ASSERT(target::ClassTable::kSizeOfClassPairLog2 == 4);
add(class_id, class_id, Operand(class_id));
ldr(result, Address(result, class_id, UXTX, Address::Scaled));
}
@ -1169,9 +1159,8 @@ void Assembler::EmitEntryFrameVerification() {
#if defined(DEBUG)
Label done;
ASSERT(!constant_pool_allowed());
LoadImmediate(TMP,
compiler::target::frame_layout.exit_link_slot_from_entry_fp *
compiler::target::kWordSize);
LoadImmediate(TMP, target::frame_layout.exit_link_slot_from_entry_fp *
target::kWordSize);
add(TMP, TMP, Operand(FPREG));
cmp(TMP, Operand(SPREG));
b(&done, EQ);
@ -1183,8 +1172,8 @@ void Assembler::EmitEntryFrameVerification() {
}
void Assembler::RestoreCodePointer() {
ldr(CODE_REG, Address(FP, compiler::target::frame_layout.code_from_fp *
target::kWordSize));
ldr(CODE_REG,
Address(FP, target::frame_layout.code_from_fp * target::kWordSize));
CheckCodePointer();
}
@ -1202,7 +1191,7 @@ void Assembler::CheckCodePointer() {
Bind(&cid_ok);
const intptr_t entry_offset =
CodeSize() + Instructions::HeaderSize() - kHeapObjectTag;
CodeSize() + target::Instructions::HeaderSize() - kHeapObjectTag;
adr(R0, Immediate(-entry_offset));
ldr(TMP, FieldAddress(CODE_REG, target::Code::saved_instructions_offset()));
cmp(R0, Operand(TMP));
@ -1302,9 +1291,9 @@ void Assembler::LeaveDartFrame(RestorePP restore_pp) {
if (!(FLAG_precompiled_mode && FLAG_use_bare_instructions)) {
if (restore_pp == kRestoreCallerPP) {
// Restore and untag PP.
LoadFromOffset(PP, FP,
compiler::target::frame_layout.saved_caller_pp_from_fp *
target::kWordSize);
LoadFromOffset(
PP, FP,
target::frame_layout.saved_caller_pp_from_fp * target::kWordSize);
sub(PP, PP, Operand(kHeapObjectTag));
}
}
@ -1320,33 +1309,31 @@ void Assembler::TransitionGeneratedToNative(Register destination,
// Save exit frame information to enable stack walking.
StoreToOffset(new_exit_frame, THR,
compiler::target::Thread::top_exit_frame_info_offset());
target::Thread::top_exit_frame_info_offset());
// Mark that the thread is executing native code.
StoreToOffset(destination, THR, compiler::target::Thread::vm_tag_offset());
LoadImmediate(state, Thread::native_execution_state());
StoreToOffset(state, THR, compiler::target::Thread::execution_state_offset());
StoreToOffset(destination, THR, target::Thread::vm_tag_offset());
LoadImmediate(state, target::Thread::native_execution_state());
StoreToOffset(state, THR, target::Thread::execution_state_offset());
Label slow_path, done, retry;
if (!FLAG_use_slow_path) {
movz(addr, Immediate(compiler::target::Thread::safepoint_state_offset()),
0);
movz(addr, Immediate(target::Thread::safepoint_state_offset()), 0);
add(addr, THR, Operand(addr));
Bind(&retry);
ldxr(state, addr);
cmp(state, Operand(Thread::safepoint_state_unacquired()));
cmp(state, Operand(target::Thread::safepoint_state_unacquired()));
b(&slow_path, NE);
movz(state, Immediate(Thread::safepoint_state_acquired()), 0);
movz(state, Immediate(target::Thread::safepoint_state_acquired()), 0);
stxr(TMP, state, addr);
cbz(&done, TMP); // 0 means stxr was successful.
b(&retry);
}
Bind(&slow_path);
ldr(addr,
Address(THR, compiler::target::Thread::enter_safepoint_stub_offset()));
ldr(addr, FieldAddress(addr, compiler::target::Code::entry_point_offset()));
ldr(addr, Address(THR, target::Thread::enter_safepoint_stub_offset()));
ldr(addr, FieldAddress(addr, target::Code::entry_point_offset()));
blr(addr);
Bind(&done);
@ -1358,37 +1345,34 @@ void Assembler::TransitionNativeToGenerated(Register state) {
Label slow_path, done, retry;
if (!FLAG_use_slow_path) {
movz(addr, Immediate(compiler::target::Thread::safepoint_state_offset()),
0);
movz(addr, Immediate(target::Thread::safepoint_state_offset()), 0);
add(addr, THR, Operand(addr));
Bind(&retry);
ldxr(state, addr);
cmp(state, Operand(Thread::safepoint_state_acquired()));
cmp(state, Operand(target::Thread::safepoint_state_acquired()));
b(&slow_path, NE);
movz(state, Immediate(Thread::safepoint_state_unacquired()), 0);
movz(state, Immediate(target::Thread::safepoint_state_unacquired()), 0);
stxr(TMP, state, addr);
cbz(&done, TMP); // 0 means stxr was successful.
b(&retry);
}
Bind(&slow_path);
ldr(addr,
Address(THR, compiler::target::Thread::exit_safepoint_stub_offset()));
ldr(addr, FieldAddress(addr, compiler::target::Code::entry_point_offset()));
ldr(addr, Address(THR, target::Thread::exit_safepoint_stub_offset()));
ldr(addr, FieldAddress(addr, target::Code::entry_point_offset()));
blr(addr);
Bind(&done);
// Mark that the thread is executing Dart code.
LoadImmediate(state, compiler::target::Thread::vm_tag_compiled_id());
StoreToOffset(state, THR, compiler::target::Thread::vm_tag_offset());
LoadImmediate(state, compiler::target::Thread::generated_execution_state());
StoreToOffset(state, THR, compiler::target::Thread::execution_state_offset());
LoadImmediate(state, target::Thread::vm_tag_compiled_id());
StoreToOffset(state, THR, target::Thread::vm_tag_offset());
LoadImmediate(state, target::Thread::generated_execution_state());
StoreToOffset(state, THR, target::Thread::execution_state_offset());
// Reset exit frame information in Isolate structure.
StoreToOffset(ZR, THR,
compiler::target::Thread::top_exit_frame_info_offset());
StoreToOffset(ZR, THR, target::Thread::top_exit_frame_info_offset());
}
void Assembler::EnterCallRuntimeFrame(intptr_t frame_size) {
@ -1426,7 +1410,7 @@ void Assembler::LeaveCallRuntimeFrame() {
const intptr_t kPushedRegistersSize =
kDartVolatileCpuRegCount * target::kWordSize +
kDartVolatileFpuRegCount * target::kWordSize +
(compiler::target::frame_layout.dart_fixed_frame_size - 2) *
(target::frame_layout.dart_fixed_frame_size - 2) *
target::kWordSize; // From EnterStubFrame (excluding PC / FP)
AddImmediate(SP, FP, -kPushedRegistersSize);
for (int i = kDartLastVolatileCpuReg; i >= kDartFirstVolatileCpuReg; i--) {
@ -1471,11 +1455,11 @@ void Assembler::MonomorphicCheckedEntryJIT() {
Label immediate, miss;
Bind(&miss);
ldr(IP0, Address(THR, Thread::monomorphic_miss_entry_offset()));
ldr(IP0, Address(THR, target::Thread::monomorphic_miss_entry_offset()));
br(IP0);
Comment("MonomorphicCheckedEntry");
ASSERT(CodeSize() == Instructions::kMonomorphicEntryOffsetJIT);
ASSERT(CodeSize() == target::Instructions::kMonomorphicEntryOffsetJIT);
const intptr_t cid_offset = target::Array::element_offset(0);
const intptr_t count_offset = target::Array::element_offset(1);
@ -1491,7 +1475,7 @@ void Assembler::MonomorphicCheckedEntryJIT() {
LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction.
// Fall through to unchecked entry.
ASSERT(CodeSize() == Instructions::kPolymorphicEntryOffsetJIT);
ASSERT(CodeSize() == target::Instructions::kPolymorphicEntryOffsetJIT);
set_use_far_branches(saved_use_far_branches);
}
@ -1508,28 +1492,30 @@ void Assembler::MonomorphicCheckedEntryAOT() {
Label immediate, miss;
Bind(&miss);
ldr(IP0, Address(THR, Thread::monomorphic_miss_entry_offset()));
ldr(IP0, Address(THR, target::Thread::monomorphic_miss_entry_offset()));
br(IP0);
Comment("MonomorphicCheckedEntry");
ASSERT(CodeSize() - start == Instructions::kMonomorphicEntryOffsetAOT);
ASSERT(CodeSize() - start ==
target::Instructions::kMonomorphicEntryOffsetAOT);
LoadClassIdMayBeSmi(IP0, R0);
cmp(R5, Operand(IP0, LSL, 1));
b(&miss, NE);
// Fall through to unchecked entry.
ASSERT(CodeSize() - start == Instructions::kPolymorphicEntryOffsetAOT);
ASSERT(CodeSize() - start ==
target::Instructions::kPolymorphicEntryOffsetAOT);
set_use_far_branches(saved_use_far_branches);
}
void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
has_single_entry_point_ = false;
while (CodeSize() < Instructions::kMonomorphicEntryOffsetJIT) {
while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
brk(0);
}
b(label);
while (CodeSize() < Instructions::kPolymorphicEntryOffsetJIT) {
while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
brk(0);
}
}
@ -1539,14 +1525,14 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
Register temp_reg,
Label* trace) {
ASSERT(cid > 0);
intptr_t state_offset = ClassTable::StateOffsetFor(cid);
intptr_t state_offset = target::ClassTable::StateOffsetFor(cid);
LoadIsolate(temp_reg);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
intptr_t table_offset = target::Isolate::class_table_offset() +
target::ClassTable::class_heap_stats_table_offset();
ldr(temp_reg, Address(temp_reg, table_offset));
AddImmediate(temp_reg, state_offset);
ldr(temp_reg, Address(temp_reg, 0));
tsti(temp_reg, Immediate(ClassHeapStats::TraceAllocationMask()));
tsti(temp_reg, Immediate(target::ClassHeapStats::TraceAllocationMask()));
b(trace, NE);
}
@ -1554,8 +1540,8 @@ void Assembler::UpdateAllocationStats(intptr_t cid) {
ASSERT(cid > 0);
intptr_t counter_offset = target::ClassTable::NewSpaceCounterOffsetFor(cid);
LoadIsolate(TMP2);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
intptr_t table_offset = target::Isolate::class_table_offset() +
target::ClassTable::class_heap_stats_table_offset();
ldr(TMP, Address(TMP2, table_offset));
AddImmediate(TMP2, TMP, counter_offset);
ldr(TMP, Address(TMP2, 0));
@ -1565,14 +1551,14 @@ void Assembler::UpdateAllocationStats(intptr_t cid) {
void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg) {
ASSERT(cid > 0);
const uword class_offset = ClassTable::ClassOffsetFor(cid);
const uword class_offset = target::ClassTable::ClassOffsetFor(cid);
const uword count_field_offset =
ClassHeapStats::allocated_since_gc_new_space_offset();
target::ClassHeapStats::allocated_since_gc_new_space_offset();
const uword size_field_offset =
ClassHeapStats::allocated_size_since_gc_new_space_offset();
target::ClassHeapStats::allocated_size_since_gc_new_space_offset();
LoadIsolate(TMP2);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
intptr_t table_offset = target::Isolate::class_table_offset() +
target::ClassTable::class_heap_stats_table_offset();
ldr(TMP, Address(TMP2, table_offset));
AddImmediate(TMP2, TMP, class_offset);
ldr(TMP, Address(TMP2, count_field_offset));
@ -1591,7 +1577,8 @@ void Assembler::TryAllocate(const Class& cls,
bool tag_result) {
ASSERT(failure != NULL);
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
if (FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size)) {
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
@ -1601,10 +1588,10 @@ void Assembler::TryAllocate(const Class& cls,
const Register kEndReg = TMP;
// instance_reg: potential next object start.
RELEASE_ASSERT((Thread::top_offset() + target::kWordSize) ==
Thread::end_offset());
RELEASE_ASSERT((target::Thread::top_offset() + target::kWordSize) ==
target::Thread::end_offset());
ldp(instance_reg, kEndReg,
Address(THR, Thread::top_offset(), Address::PairOffset));
Address(THR, target::Thread::top_offset(), Address::PairOffset));
// TODO(koda): Protect against unsigned overflow here.
AddImmediate(top_reg, instance_reg, instance_size);
@ -1613,7 +1600,7 @@ void Assembler::TryAllocate(const Class& cls,
// Successfully allocated the object, now update top to point to
// next object start and store the class in the class field of object.
str(top_reg, Address(THR, Thread::top_offset()));
str(top_reg, Address(THR, target::Thread::top_offset()));
NOT_IN_PRODUCT(UpdateAllocationStats(cid));
@ -1639,26 +1626,27 @@ void Assembler::TryAllocateArray(intptr_t cid,
Register end_address,
Register temp1,
Register temp2) {
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
if (FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size)) {
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp1, failure));
// Potential new object start.
ldr(instance, Address(THR, Thread::top_offset()));
ldr(instance, Address(THR, target::Thread::top_offset()));
AddImmediateSetFlags(end_address, instance, instance_size);
b(failure, CS); // Fail on unsigned overflow.
// Check if the allocation fits into the remaining space.
// instance: potential new object start.
// end_address: potential next object start.
ldr(temp2, Address(THR, Thread::end_offset()));
ldr(temp2, Address(THR, target::Thread::end_offset()));
cmp(end_address, Operand(temp2));
b(failure, CS);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
str(end_address, Address(THR, Thread::top_offset()));
str(end_address, Address(THR, target::Thread::top_offset()));
add(instance, instance, Operand(kHeapObjectTag));
LoadImmediate(temp2, instance_size);
NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, temp2));
@ -1692,7 +1680,8 @@ Address Assembler::ElementAddressForIntIndex(bool is_external,
intptr_t index) const {
const int64_t offset =
index * index_scale +
(is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
(is_external ? 0
: (target::Instance::DataOffsetFor(cid) - kHeapObjectTag));
ASSERT(Utils::IsInt(32, offset));
const OperandSize size = Address::OperandSizeFor(cid);
ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
@ -1707,7 +1696,8 @@ void Assembler::LoadElementAddressForIntIndex(Register address,
intptr_t index) {
const int64_t offset =
index * index_scale +
(is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
(is_external ? 0
: (target::Instance::DataOffsetFor(cid) - kHeapObjectTag));
AddImmediate(address, array, offset);
}
@ -1720,7 +1710,7 @@ Address Assembler::ElementAddressForRegIndex(bool is_load,
// Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays.
const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift;
const int32_t offset =
is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag);
is_external ? 0 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
ASSERT(array != TMP);
ASSERT(index != TMP);
const Register base = is_load ? TMP : index;
@ -1747,7 +1737,7 @@ void Assembler::LoadElementAddressForRegIndex(Register address,
// Note that index is expected smi-tagged, (i.e, LSL 1) for all arrays.
const intptr_t shift = Utils::ShiftForPowerOfTwo(index_scale) - kSmiTagShift;
const int32_t offset =
is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag);
is_external ? 0 : (target::Instance::DataOffsetFor(cid) - kHeapObjectTag);
if (shift == 0) {
add(address, array, Operand(index));
} else if (shift < 0) {

View file

@ -2283,12 +2283,6 @@ class Assembler : public AssemblerBase {
};
} // namespace compiler
using compiler::Address;
using compiler::FieldAddress;
using compiler::Immediate;
using compiler::Operand;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_ARM64_H_

View file

@ -100,9 +100,6 @@ class Assembler : public AssemblerBase {
};
} // namespace compiler
using compiler::Address;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_DBC_H_

View file

@ -21,14 +21,6 @@ DECLARE_FLAG(bool, use_slow_path);
namespace compiler {
using target::ClassTable;
using target::Heap;
using target::Instance;
using target::Instructions;
using target::Isolate;
using target::RawObject;
using target::Thread;
#if !defined(DART_PRECOMPILED_RUNTIME)
class DirectCallRelocation : public AssemblerFixup {
@ -1780,7 +1772,7 @@ void Assembler::Drop(intptr_t stack_elements) {
}
void Assembler::LoadIsolate(Register dst) {
movl(dst, Address(THR, Thread::isolate_offset()));
movl(dst, Address(THR, target::Thread::isolate_offset()));
}
void Assembler::LoadObject(Register dst,
@ -1902,7 +1894,7 @@ void Assembler::StoreIntoObject(Register object,
if (object != EDX) {
movl(EDX, object);
}
call(Address(THR, Thread::write_barrier_entry_point_offset()));
call(Address(THR, target::Thread::write_barrier_entry_point_offset()));
if (value != EDX) {
popl(EDX); // Restore EDX.
}
@ -1961,7 +1953,7 @@ void Assembler::StoreIntoArray(Register object,
} else if (slot != kWriteBarrierSlotReg) {
movl(kWriteBarrierSlotReg, slot);
}
call(Address(THR, Thread::array_write_barrier_entry_point_offset()));
call(Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
if (value != kWriteBarrierSlotReg && slot != kWriteBarrierSlotReg) {
popl(kWriteBarrierSlotReg); // Restore kWriteBarrierSlotReg.
}
@ -2104,10 +2096,11 @@ void Assembler::MonomorphicCheckedEntryJIT() {
intptr_t start = CodeSize();
Label have_cid, miss;
Bind(&miss);
jmp(Address(THR, Thread::monomorphic_miss_entry_offset()));
jmp(Address(THR, target::Thread::monomorphic_miss_entry_offset()));
Comment("MonomorphicCheckedEntry");
ASSERT(CodeSize() - start == Instructions::kMonomorphicEntryOffsetJIT);
ASSERT(CodeSize() - start ==
target::Instructions::kMonomorphicEntryOffsetJIT);
const intptr_t cid_offset = target::Array::element_offset(0);
const intptr_t count_offset = target::Array::element_offset(1);
@ -2127,7 +2120,8 @@ void Assembler::MonomorphicCheckedEntryJIT() {
nop(1);
// Fall through to unchecked entry.
ASSERT(CodeSize() - start == Instructions::kPolymorphicEntryOffsetJIT);
ASSERT(CodeSize() - start ==
target::Instructions::kPolymorphicEntryOffsetJIT);
}
// EBX receiver, ECX guarded cid as Smi.
@ -2138,11 +2132,11 @@ void Assembler::MonomorphicCheckedEntryAOT() {
void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
has_single_entry_point_ = false;
while (CodeSize() < Instructions::kMonomorphicEntryOffsetJIT) {
while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
int3();
}
jmp(label);
while (CodeSize() < Instructions::kPolymorphicEntryOffsetJIT) {
while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
int3();
}
}
@ -2151,31 +2145,31 @@ void Assembler::TransitionGeneratedToNative(Register destination_address,
Register new_exit_frame,
Register scratch) {
// Save exit frame information to enable stack walking.
movl(Address(THR, Thread::top_exit_frame_info_offset()), new_exit_frame);
movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
new_exit_frame);
// Mark that the thread is executing native code.
movl(VMTagAddress(), destination_address);
movl(Address(THR, Thread::execution_state_offset()),
Immediate(compiler::target::Thread::native_execution_state()));
movl(Address(THR, target::Thread::execution_state_offset()),
Immediate(target::Thread::native_execution_state()));
// Compare and swap the value at Thread::safepoint_state from unacquired to
// acquired. On success, jump to 'success'; otherwise, fallthrough.
Label done;
if (!FLAG_use_slow_path) {
pushl(EAX);
movl(EAX, Immediate(Thread::safepoint_state_unacquired()));
movl(scratch, Immediate(Thread::safepoint_state_acquired()));
LockCmpxchgl(Address(THR, Thread::safepoint_state_offset()), scratch);
movl(EAX, Immediate(target::Thread::safepoint_state_unacquired()));
movl(scratch, Immediate(target::Thread::safepoint_state_acquired()));
LockCmpxchgl(Address(THR, target::Thread::safepoint_state_offset()),
scratch);
movl(scratch, EAX);
popl(EAX);
cmpl(scratch, Immediate(Thread::safepoint_state_unacquired()));
cmpl(scratch, Immediate(target::Thread::safepoint_state_unacquired()));
j(EQUAL, &done);
}
movl(scratch,
Address(THR, compiler::target::Thread::enter_safepoint_stub_offset()));
movl(scratch,
FieldAddress(scratch, compiler::target::Code::entry_point_offset()));
movl(scratch, Address(THR, target::Thread::enter_safepoint_stub_offset()));
movl(scratch, FieldAddress(scratch, target::Code::entry_point_offset()));
call(scratch);
Bind(&done);
@ -2187,34 +2181,31 @@ void Assembler::TransitionNativeToGenerated(Register scratch) {
Label done;
if (!FLAG_use_slow_path) {
pushl(EAX);
movl(EAX, Immediate(compiler::target::Thread::safepoint_state_acquired()));
movl(scratch,
Immediate(compiler::target::Thread::safepoint_state_unacquired()));
LockCmpxchgl(
Address(THR, compiler::target::Thread::safepoint_state_offset()),
scratch);
movl(EAX, Immediate(target::Thread::safepoint_state_acquired()));
movl(scratch, Immediate(target::Thread::safepoint_state_unacquired()));
LockCmpxchgl(Address(THR, target::Thread::safepoint_state_offset()),
scratch);
movl(scratch, EAX);
popl(EAX);
cmpl(scratch, Immediate(Thread::safepoint_state_acquired()));
cmpl(scratch, Immediate(target::Thread::safepoint_state_acquired()));
j(EQUAL, &done);
}
movl(scratch,
Address(THR, compiler::target::Thread::exit_safepoint_stub_offset()));
movl(scratch,
FieldAddress(scratch, compiler::target::Code::entry_point_offset()));
movl(scratch, Address(THR, target::Thread::exit_safepoint_stub_offset()));
movl(scratch, FieldAddress(scratch, target::Code::entry_point_offset()));
call(scratch);
Bind(&done);
// Mark that the thread is executing Dart code.
movl(Assembler::VMTagAddress(),
Immediate(compiler::target::Thread::vm_tag_compiled_id()));
movl(Address(THR, Thread::execution_state_offset()),
Immediate(compiler::target::Thread::generated_execution_state()));
Immediate(target::Thread::vm_tag_compiled_id()));
movl(Address(THR, target::Thread::execution_state_offset()),
Immediate(target::Thread::generated_execution_state()));
// Reset exit frame information in Isolate structure.
movl(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
movl(Address(THR, target::Thread::top_exit_frame_info_offset()),
Immediate(0));
}
static const intptr_t kNumberOfVolatileCpuRegisters = 3;
@ -2286,7 +2277,7 @@ void Assembler::Call(const Code& target, bool movable_target) {
}
void Assembler::CallToRuntime() {
call(Address(THR, Thread::call_to_runtime_entry_point_offset()));
call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
}
void Assembler::Jmp(const Code& target) {
@ -2347,11 +2338,11 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
bool near_jump) {
ASSERT(cid > 0);
Address state_address(kNoRegister, 0);
intptr_t state_offset = ClassTable::StateOffsetFor(cid);
intptr_t state_offset = target::ClassTable::StateOffsetFor(cid);
ASSERT(temp_reg != kNoRegister);
LoadIsolate(temp_reg);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
intptr_t table_offset = target::Isolate::class_table_offset() +
target::ClassTable::class_heap_stats_table_offset();
movl(temp_reg, Address(temp_reg, table_offset));
state_address = Address(temp_reg, state_offset);
testb(state_address,
@ -2363,11 +2354,11 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
void Assembler::UpdateAllocationStats(intptr_t cid, Register temp_reg) {
ASSERT(cid > 0);
intptr_t counter_offset = ClassTable::NewSpaceCounterOffsetFor(cid);
intptr_t counter_offset = target::ClassTable::NewSpaceCounterOffsetFor(cid);
ASSERT(temp_reg != kNoRegister);
LoadIsolate(temp_reg);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
intptr_t table_offset = target::Isolate::class_table_offset() +
target::ClassTable::class_heap_stats_table_offset();
movl(temp_reg, Address(temp_reg, table_offset));
incl(Address(temp_reg, counter_offset));
}
@ -2378,7 +2369,7 @@ void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
ASSERT(cid > 0);
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid, temp_reg);
intptr_t size_offset = ClassTable::NewSpaceSizeOffsetFor(cid);
intptr_t size_offset = target::ClassTable::NewSpaceSizeOffsetFor(cid);
addl(Address(temp_reg, size_offset), size_reg);
}
@ -2388,7 +2379,7 @@ void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
ASSERT(cid > 0);
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid, temp_reg);
intptr_t size_offset = ClassTable::NewSpaceSizeOffsetFor(cid);
intptr_t size_offset = target::ClassTable::NewSpaceSizeOffsetFor(cid);
addl(Address(temp_reg, size_offset), Immediate(size_in_bytes));
}
#endif // !PRODUCT
@ -2401,20 +2392,21 @@ void Assembler::TryAllocate(const Class& cls,
ASSERT(failure != NULL);
ASSERT(temp_reg != kNoRegister);
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
if (FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size)) {
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
const classid_t cid = target::Class::GetId(cls);
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, near_jump));
movl(instance_reg, Address(THR, Thread::top_offset()));
movl(instance_reg, Address(THR, target::Thread::top_offset()));
addl(instance_reg, Immediate(instance_size));
// instance_reg: potential next object start.
cmpl(instance_reg, Address(THR, Thread::end_offset()));
cmpl(instance_reg, Address(THR, target::Thread::end_offset()));
j(ABOVE_EQUAL, failure, near_jump);
// Successfully allocated the object, now update top to point to
// next object start and store the class in the class field of object.
movl(Address(THR, Thread::top_offset()), instance_reg);
movl(Address(THR, target::Thread::top_offset()), instance_reg);
NOT_IN_PRODUCT(UpdateAllocationStats(cid, temp_reg));
ASSERT(instance_size >= kHeapObjectTag);
subl(instance_reg, Immediate(instance_size - kHeapObjectTag));
@ -2436,12 +2428,13 @@ void Assembler::TryAllocateArray(intptr_t cid,
Register temp_reg) {
ASSERT(failure != NULL);
ASSERT(temp_reg != kNoRegister);
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
if (FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size)) {
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, temp_reg, failure, near_jump));
movl(instance, Address(THR, Thread::top_offset()));
movl(instance, Address(THR, target::Thread::top_offset()));
movl(end_address, instance);
addl(end_address, Immediate(instance_size));
@ -2450,12 +2443,12 @@ void Assembler::TryAllocateArray(intptr_t cid,
// Check if the allocation fits into the remaining space.
// EAX: potential new object start.
// EBX: potential next object start.
cmpl(end_address, Address(THR, Thread::end_offset()));
cmpl(end_address, Address(THR, target::Thread::end_offset()));
j(ABOVE_EQUAL, failure);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
movl(Address(THR, Thread::top_offset()), end_address);
movl(Address(THR, target::Thread::top_offset()), end_address);
addl(instance, Immediate(kHeapObjectTag));
NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, instance_size, temp_reg));
@ -2590,20 +2583,21 @@ void Assembler::EmitGenericShift(int rm,
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
movzxw(result, FieldAddress(object, class_id_offset));
}
void Assembler::LoadClassById(Register result, Register class_id) {
ASSERT(result != class_id);
LoadIsolate(result);
const intptr_t offset =
Isolate::class_table_offset() + ClassTable::table_offset();
const intptr_t offset = target::Isolate::class_table_offset() +
target::ClassTable::table_offset();
movl(result, Address(result, offset));
ASSERT(ClassTable::kSizeOfClassPairLog2 == 3);
ASSERT(target::ClassTable::kSizeOfClassPairLog2 == 3);
movl(result, Address(result, class_id, TIMES_8, 0));
}
@ -2619,10 +2613,11 @@ void Assembler::SmiUntagOrCheckClass(Register object,
Register scratch,
Label* is_smi) {
ASSERT(kSmiTagShift == 1);
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);
@ -2648,7 +2643,8 @@ void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
Bind(&join);
} else {
ASSERT(result != object);
static const intptr_t kSmiCidSource = kSmiCid << RawObject::kClassIdTagPos;
static const intptr_t kSmiCidSource = kSmiCid
<< target::RawObject::kClassIdTagPos;
// Make a dummy "Object" whose cid is kSmiCid.
movl(result, Immediate(reinterpret_cast<int32_t>(&kSmiCidSource) + 1));
@ -2693,7 +2689,7 @@ Address Assembler::ElementAddressForIntIndex(bool is_external,
return Address(array, index * index_scale + extra_disp);
} else {
const int64_t disp = static_cast<int64_t>(index) * index_scale +
Instance::DataOffsetFor(cid) + extra_disp;
target::Instance::DataOffsetFor(cid) + extra_disp;
ASSERT(Utils::IsInt(32, disp));
return FieldAddress(array, static_cast<int32_t>(disp));
}
@ -2731,7 +2727,7 @@ Address Assembler::ElementAddressForRegIndex(bool is_external,
return Address(array, index, ToScaleFactor(index_scale), extra_disp);
} else {
return FieldAddress(array, index, ToScaleFactor(index_scale),
Instance::DataOffsetFor(cid) + extra_disp);
target::Instance::DataOffsetFor(cid) + extra_disp);
}
}

View file

@ -936,11 +936,6 @@ inline void Assembler::EmitOperandSizeOverride() {
}
} // namespace compiler
using compiler::Address;
using compiler::FieldAddress;
using compiler::Immediate;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_IA32_H_

View file

@ -23,14 +23,6 @@ DECLARE_FLAG(bool, use_slow_path);
namespace compiler {
using target::ClassTable;
using target::Heap;
using target::Instance;
using target::Instructions;
using target::Isolate;
using target::RawObject;
using target::Thread;
#if !defined(DART_PRECOMPILED_RUNTIME)
Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
@ -40,10 +32,12 @@ Assembler::Assembler(ObjectPoolBuilder* object_pool_builder,
ASSERT(!use_far_branches);
generate_invoke_write_barrier_wrapper_ = [&](Register reg) {
call(Address(THR, Thread::write_barrier_wrappers_thread_offset(reg)));
call(Address(THR,
target::Thread::write_barrier_wrappers_thread_offset(reg)));
};
generate_invoke_array_write_barrier_ = [&]() {
call(Address(THR, Thread::array_write_barrier_entry_point_offset()));
call(
Address(THR, target::Thread::array_write_barrier_entry_point_offset()));
};
}
@ -107,14 +101,15 @@ void Assembler::Call(const Code& target) {
}
void Assembler::CallToRuntime() {
call(Address(THR, Thread::call_to_runtime_entry_point_offset()));
call(Address(THR, target::Thread::call_to_runtime_entry_point_offset()));
}
void Assembler::CallNullErrorShared(bool save_fpu_registers) {
uword entry_point_offset =
save_fpu_registers
? Thread::null_error_shared_with_fpu_regs_entry_point_offset()
: Thread::null_error_shared_without_fpu_regs_entry_point_offset();
? target::Thread::null_error_shared_with_fpu_regs_entry_point_offset()
: target::Thread::
null_error_shared_without_fpu_regs_entry_point_offset();
call(Address(THR, entry_point_offset));
}
@ -168,29 +163,29 @@ void Assembler::setcc(Condition condition, ByteRegister dst) {
void Assembler::TransitionGeneratedToNative(Register destination_address,
Register new_exit_frame) {
// Save exit frame information to enable stack walking.
movq(Address(THR, Thread::top_exit_frame_info_offset()), new_exit_frame);
movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
new_exit_frame);
movq(Assembler::VMTagAddress(), destination_address);
movq(Address(THR, compiler::target::Thread::execution_state_offset()),
Immediate(compiler::target::Thread::native_execution_state()));
movq(Address(THR, target::Thread::execution_state_offset()),
Immediate(target::Thread::native_execution_state()));
// Compare and swap the value at Thread::safepoint_state from unacquired to
// acquired. If the CAS fails, go to a slow-path stub.
Label done;
if (!FLAG_use_slow_path) {
pushq(RAX);
movq(RAX, Immediate(Thread::safepoint_state_unacquired()));
movq(TMP, Immediate(Thread::safepoint_state_acquired()));
LockCmpxchgq(Address(THR, Thread::safepoint_state_offset()), TMP);
movq(RAX, Immediate(target::Thread::safepoint_state_unacquired()));
movq(TMP, Immediate(target::Thread::safepoint_state_acquired()));
LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
movq(TMP, RAX);
popq(RAX);
cmpq(TMP, Immediate(Thread::safepoint_state_unacquired()));
cmpq(TMP, Immediate(target::Thread::safepoint_state_unacquired()));
j(EQUAL, &done);
}
movq(TMP,
Address(THR, compiler::target::Thread::enter_safepoint_stub_offset()));
movq(TMP, FieldAddress(TMP, compiler::target::Code::entry_point_offset()));
movq(TMP, Address(THR, target::Thread::enter_safepoint_stub_offset()));
movq(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
// Use call instead of CFunctionCall to prevent having to clean up shadow
// space afterwards. This is possible because safepoint stub has no arguments.
call(TMP);
@ -204,18 +199,17 @@ void Assembler::TransitionNativeToGenerated() {
Label done;
if (!FLAG_use_slow_path) {
pushq(RAX);
movq(RAX, Immediate(Thread::safepoint_state_acquired()));
movq(TMP, Immediate(Thread::safepoint_state_unacquired()));
LockCmpxchgq(Address(THR, Thread::safepoint_state_offset()), TMP);
movq(RAX, Immediate(target::Thread::safepoint_state_acquired()));
movq(TMP, Immediate(target::Thread::safepoint_state_unacquired()));
LockCmpxchgq(Address(THR, target::Thread::safepoint_state_offset()), TMP);
movq(TMP, RAX);
popq(RAX);
cmpq(TMP, Immediate(Thread::safepoint_state_acquired()));
cmpq(TMP, Immediate(target::Thread::safepoint_state_acquired()));
j(EQUAL, &done);
}
movq(TMP,
Address(THR, compiler::target::Thread::exit_safepoint_stub_offset()));
movq(TMP, FieldAddress(TMP, compiler::target::Code::entry_point_offset()));
movq(TMP, Address(THR, target::Thread::exit_safepoint_stub_offset()));
movq(TMP, FieldAddress(TMP, target::Code::entry_point_offset()));
// Use call instead of CFunctionCall to prevent having to clean up shadow
// space afterwards. This is possible because safepoint stub has no arguments.
call(TMP);
@ -223,12 +217,13 @@ void Assembler::TransitionNativeToGenerated() {
Bind(&done);
movq(Assembler::VMTagAddress(),
Immediate(compiler::target::Thread::vm_tag_compiled_id()));
movq(Address(THR, Thread::execution_state_offset()),
Immediate(compiler::target::Thread::generated_execution_state()));
Immediate(target::Thread::vm_tag_compiled_id()));
movq(Address(THR, target::Thread::execution_state_offset()),
Immediate(target::Thread::generated_execution_state()));
// Reset exit frame information in Isolate structure.
movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
Immediate(0));
}
void Assembler::EmitQ(int reg,
@ -419,7 +414,7 @@ void Assembler::EmitW(Register dst,
#define UNARY_XMM_WITH_CONSTANT(name, constant, op) \
void Assembler::name(XmmRegister dst, XmmRegister src) { \
movq(TMP, Address(THR, Thread::constant##_address_offset())); \
movq(TMP, Address(THR, target::Thread::constant##_address_offset())); \
if (dst == src) { \
op(dst, Address(TMP, 0)); \
} else { \
@ -1183,7 +1178,7 @@ void Assembler::LoadWordFromPoolOffset(Register dst, int32_t offset) {
}
void Assembler::LoadIsolate(Register dst) {
movq(dst, Address(THR, Thread::isolate_offset()));
movq(dst, Address(THR, target::Thread::isolate_offset()));
}
void Assembler::LoadObjectHelper(Register dst,
@ -1348,8 +1343,8 @@ void Assembler::StoreIntoObject(Register object,
j(ZERO, &done, kNearJump);
}
movb(TMP, FieldAddress(object, target::Object::tags_offset()));
shrl(TMP, Immediate(RawObject::kBarrierOverlapShift));
andl(TMP, Address(THR, Thread::write_barrier_mask_offset()));
shrl(TMP, Immediate(target::RawObject::kBarrierOverlapShift));
andl(TMP, Address(THR, target::Thread::write_barrier_mask_offset()));
testb(FieldAddress(value, target::Object::tags_offset()), TMP);
j(ZERO, &done, kNearJump);
@ -1401,7 +1396,7 @@ void Assembler::StoreIntoArray(Register object,
}
movb(TMP, FieldAddress(object, target::Object::tags_offset()));
shrl(TMP, Immediate(target::RawObject::kBarrierOverlapShift));
andl(TMP, Address(THR, Thread::write_barrier_mask_offset()));
andl(TMP, Address(THR, target::Thread::write_barrier_mask_offset()));
testb(FieldAddress(value, target::Object::tags_offset()), TMP);
j(ZERO, &done, kNearJump);
@ -1701,7 +1696,7 @@ void Assembler::CheckCodePointer() {
{
const intptr_t kRIPRelativeLeaqSize = 7;
const intptr_t header_to_entry_offset =
(Instructions::HeaderSize() - kHeapObjectTag);
(target::Instructions::HeaderSize() - kHeapObjectTag);
const intptr_t header_to_rip_offset =
CodeSize() + kRIPRelativeLeaqSize + header_to_entry_offset;
leaq(RAX, Address::AddressRIPRelative(-header_to_rip_offset));
@ -1749,14 +1744,15 @@ void Assembler::MonomorphicCheckedEntryJIT() {
intptr_t start = CodeSize();
Label have_cid, miss;
Bind(&miss);
jmp(Address(THR, Thread::monomorphic_miss_entry_offset()));
jmp(Address(THR, target::Thread::monomorphic_miss_entry_offset()));
// Ensure the monomorphic entry is 2-byte aligned (so GC can see them if we
// store them in ICData / MegamorphicCache arrays)
nop(1);
Comment("MonomorphicCheckedEntry");
ASSERT(CodeSize() - start == Instructions::kMonomorphicEntryOffsetJIT);
ASSERT(CodeSize() - start ==
target::Instructions::kMonomorphicEntryOffsetJIT);
ASSERT((CodeSize() & kSmiTagMask) == kSmiTag);
const intptr_t cid_offset = target::Array::element_offset(0);
@ -1771,7 +1767,8 @@ void Assembler::MonomorphicCheckedEntryJIT() {
nop(1);
// Fall through to unchecked entry.
ASSERT(CodeSize() - start == Instructions::kPolymorphicEntryOffsetJIT);
ASSERT(CodeSize() - start ==
target::Instructions::kPolymorphicEntryOffsetJIT);
ASSERT(((CodeSize() - start) & kSmiTagMask) == kSmiTag);
}
@ -1780,14 +1777,15 @@ void Assembler::MonomorphicCheckedEntryAOT() {
intptr_t start = CodeSize();
Label have_cid, miss;
Bind(&miss);
jmp(Address(THR, Thread::monomorphic_miss_entry_offset()));
jmp(Address(THR, target::Thread::monomorphic_miss_entry_offset()));
// Ensure the monomorphic entry is 2-byte aligned (so GC can see them if we
// store them in ICData / MegamorphicCache arrays)
nop(1);
Comment("MonomorphicCheckedEntry");
ASSERT(CodeSize() - start == Instructions::kMonomorphicEntryOffsetAOT);
ASSERT(CodeSize() - start ==
target::Instructions::kMonomorphicEntryOffsetAOT);
ASSERT((CodeSize() & kSmiTagMask) == kSmiTag);
movq(RAX, Immediate(kSmiCid));
@ -1805,17 +1803,18 @@ void Assembler::MonomorphicCheckedEntryAOT() {
nop(1);
// Fall through to unchecked entry.
ASSERT(CodeSize() - start == Instructions::kPolymorphicEntryOffsetAOT);
ASSERT(CodeSize() - start ==
target::Instructions::kPolymorphicEntryOffsetAOT);
ASSERT(((CodeSize() - start) & kSmiTagMask) == kSmiTag);
}
void Assembler::BranchOnMonomorphicCheckedEntryJIT(Label* label) {
has_single_entry_point_ = false;
while (CodeSize() < Instructions::kMonomorphicEntryOffsetJIT) {
while (CodeSize() < target::Instructions::kMonomorphicEntryOffsetJIT) {
int3();
}
jmp(label);
while (CodeSize() < Instructions::kPolymorphicEntryOffsetJIT) {
while (CodeSize() < target::Instructions::kPolymorphicEntryOffsetJIT) {
int3();
}
}
@ -1825,11 +1824,11 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
Label* trace,
bool near_jump) {
ASSERT(cid > 0);
intptr_t state_offset = ClassTable::StateOffsetFor(cid);
intptr_t state_offset = target::ClassTable::StateOffsetFor(cid);
Register temp_reg = TMP;
LoadIsolate(temp_reg);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
intptr_t table_offset = target::Isolate::class_table_offset() +
target::ClassTable::class_heap_stats_table_offset();
movq(temp_reg, Address(temp_reg, table_offset));
testb(Address(temp_reg, state_offset),
Immediate(target::ClassHeapStats::TraceAllocationMask()));
@ -1840,11 +1839,11 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
void Assembler::UpdateAllocationStats(intptr_t cid) {
ASSERT(cid > 0);
intptr_t counter_offset = ClassTable::NewSpaceCounterOffsetFor(cid);
intptr_t counter_offset = target::ClassTable::NewSpaceCounterOffsetFor(cid);
Register temp_reg = TMP;
LoadIsolate(temp_reg);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
intptr_t table_offset = target::Isolate::class_table_offset() +
target::ClassTable::class_heap_stats_table_offset();
movq(temp_reg, Address(temp_reg, table_offset));
incq(Address(temp_reg, counter_offset));
}
@ -1854,7 +1853,7 @@ void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg) {
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid);
Register temp_reg = TMP;
intptr_t size_offset = ClassTable::NewSpaceSizeOffsetFor(cid);
intptr_t size_offset = target::ClassTable::NewSpaceSizeOffsetFor(cid);
addq(Address(temp_reg, size_offset), size_reg);
}
@ -1864,7 +1863,7 @@ void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid);
Register temp_reg = TMP;
intptr_t size_offset = ClassTable::NewSpaceSizeOffsetFor(cid);
intptr_t size_offset = target::ClassTable::NewSpaceSizeOffsetFor(cid);
addq(Address(temp_reg, size_offset), Immediate(size_in_bytes));
}
#endif // !PRODUCT
@ -1876,20 +1875,21 @@ void Assembler::TryAllocate(const Class& cls,
Register temp) {
ASSERT(failure != NULL);
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
if (FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size)) {
const classid_t cid = target::Class::GetId(cls);
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, near_jump));
movq(instance_reg, Address(THR, Thread::top_offset()));
movq(instance_reg, Address(THR, target::Thread::top_offset()));
addq(instance_reg, Immediate(instance_size));
// instance_reg: potential next object start.
cmpq(instance_reg, Address(THR, Thread::end_offset()));
cmpq(instance_reg, Address(THR, target::Thread::end_offset()));
j(ABOVE_EQUAL, failure, near_jump);
// Successfully allocated the object, now update top to point to
// next object start and store the class in the class field of object.
movq(Address(THR, Thread::top_offset()), instance_reg);
movq(Address(THR, target::Thread::top_offset()), instance_reg);
NOT_IN_PRODUCT(UpdateAllocationStats(cid));
ASSERT(instance_size >= kHeapObjectTag);
AddImmediate(instance_reg, Immediate(kHeapObjectTag - instance_size));
@ -1912,12 +1912,13 @@ void Assembler::TryAllocateArray(intptr_t cid,
Register end_address,
Register temp) {
ASSERT(failure != NULL);
if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size)) {
if (FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size)) {
// If this allocation is traced, program will jump to failure path
// (i.e. the allocation stub) which will allocate the object and trace the
// allocation call site.
NOT_IN_PRODUCT(MaybeTraceAllocation(cid, failure, near_jump));
movq(instance, Address(THR, Thread::top_offset()));
movq(instance, Address(THR, target::Thread::top_offset()));
movq(end_address, instance);
addq(end_address, Immediate(instance_size));
@ -1926,12 +1927,12 @@ void Assembler::TryAllocateArray(intptr_t cid,
// Check if the allocation fits into the remaining space.
// instance: potential new object start.
// end_address: potential next object start.
cmpq(end_address, Address(THR, Thread::end_offset()));
cmpq(end_address, Address(THR, target::Thread::end_offset()));
j(ABOVE_EQUAL, failure);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
movq(Address(THR, Thread::top_offset()), end_address);
movq(Address(THR, target::Thread::top_offset()), end_address);
addq(instance, Immediate(kHeapObjectTag));
NOT_IN_PRODUCT(UpdateAllocationStatsWithSize(cid, instance_size));
@ -2086,22 +2087,22 @@ void Assembler::EmitGenericShift(bool wide,
}
void Assembler::LoadClassId(Register result, Register object) {
using target::Object;
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(sizeof(classid_t) == sizeof(uint16_t));
const intptr_t class_id_offset =
Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
movzxw(result, FieldAddress(object, class_id_offset));
}
void Assembler::LoadClassById(Register result, Register class_id) {
ASSERT(result != class_id);
LoadIsolate(result);
const intptr_t offset =
Isolate::class_table_offset() + ClassTable::table_offset();
const intptr_t offset = target::Isolate::class_table_offset() +
target::ClassTable::table_offset();
movq(result, Address(result, offset));
ASSERT(ClassTable::kSizeOfClassPairLog2 == 4);
ASSERT(target::ClassTable::kSizeOfClassPairLog2 == 4);
// TIMES_16 is not a real scale factor on x64, so we double the class id
// and use TIMES_8.
addq(class_id, class_id);
@ -2119,13 +2120,13 @@ void Assembler::CompareClassId(Register object,
void Assembler::SmiUntagOrCheckClass(Register object,
intptr_t class_id,
Label* is_smi) {
using target::Object;
ASSERT(kSmiTagShift == 1);
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(sizeof(classid_t) == sizeof(uint16_t));
const intptr_t class_id_offset =
Object::tags_offset() + RawObject::kClassIdTagPos / kBitsPerByte;
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);
@ -2189,7 +2190,7 @@ void Assembler::LoadTaggedClassIdMayBeSmi(Register result, Register object) {
}
Address Assembler::VMTagAddress() {
return Address(THR, Thread::vm_tag_offset());
return Address(THR, target::Thread::vm_tag_offset());
}
Address Assembler::ElementAddressForIntIndex(bool is_external,
@ -2201,7 +2202,7 @@ Address Assembler::ElementAddressForIntIndex(bool is_external,
return Address(array, index * index_scale);
} else {
const int64_t disp = static_cast<int64_t>(index) * index_scale +
Instance::DataOffsetFor(cid);
target::Instance::DataOffsetFor(cid);
ASSERT(Utils::IsInt(32, disp));
return FieldAddress(array, static_cast<int32_t>(disp));
}
@ -2238,7 +2239,7 @@ Address Assembler::ElementAddressForRegIndex(bool is_external,
return Address(array, index, ToScaleFactor(index_scale), 0);
} else {
return FieldAddress(array, index, ToScaleFactor(index_scale),
Instance::DataOffsetFor(cid));
target::Instance::DataOffsetFor(cid));
}
}

View file

@ -1110,12 +1110,6 @@ inline void Assembler::EmitOperandSizeOverride() {
}
} // namespace compiler
using compiler::Address;
using compiler::FieldAddress;
using compiler::Immediate;
using compiler::Label;
} // namespace dart
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_ASSEMBLER_X64_H_

View file

@ -14,8 +14,8 @@ namespace dart {
#if !defined(PRODUCT) && !defined(TARGET_ARCH_DBC)
ISOLATE_UNIT_TEST_CASE(Disassembler) {
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(&object_pool_builder);
// The used instructions work on all platforms.
Register reg = static_cast<Register>(0);

View file

@ -180,8 +180,4 @@ class ObjectPoolBuilder : public ValueObject {
} // namespace dart
namespace dart {
using compiler::ObjectPoolBuilder;
}
#endif // RUNTIME_VM_COMPILER_ASSEMBLER_OBJECT_POOL_BUILDER_H_

View file

@ -119,7 +119,8 @@ int CombinedCodeStatistics::CompareEntries(const void* a, const void* b) {
}
}
CodeStatistics::CodeStatistics(Assembler* assembler) : assembler_(assembler) {
CodeStatistics::CodeStatistics(compiler::Assembler* assembler)
: assembler_(assembler) {
memset(entries_, 0, CombinedCodeStatistics::kNumEntries * sizeof(Entry));
instruction_bytes_ = 0;
unaccounted_bytes_ = 0;

View file

@ -67,7 +67,7 @@ class CombinedCodeStatistics {
class CodeStatistics {
public:
explicit CodeStatistics(Assembler* assembler);
explicit CodeStatistics(compiler::Assembler* assembler);
void Begin(Instruction* instruction);
void End(Instruction* instruction);
@ -82,7 +82,7 @@ class CodeStatistics {
private:
static const int kStackSize = 8;
Assembler* assembler_;
compiler::Assembler* assembler_;
typedef struct {
intptr_t bytes;

View file

@ -103,7 +103,7 @@ void CompilerDeoptInfo::EmitMaterializations(Environment* env,
}
FlowGraphCompiler::FlowGraphCompiler(
Assembler* assembler,
compiler::Assembler* assembler,
FlowGraph* flow_graph,
const ParsedFunction& parsed_function,
bool is_optimizing,
@ -298,7 +298,7 @@ void FlowGraphCompiler::CompactBlocks() {
// This algorithm does not garbage collect blocks in place, but merely
// records forwarding label information. In this way it avoids having to
// change join and target entries.
Label* nonempty_label = NULL;
compiler::Label* nonempty_label = NULL;
for (intptr_t i = block_order().length() - 1; i >= 1; --i) {
BlockEntryInstr* block = block_order()[i];
@ -331,7 +331,7 @@ intptr_t FlowGraphCompiler::UncheckedEntryOffset() const {
entry = flow_graph().graph_entry()->osr_entry();
}
ASSERT(entry != nullptr);
Label* target = GetJumpLabel(entry);
compiler::Label* target = GetJumpLabel(entry);
if (target->IsBound()) {
return target->Position();
@ -353,7 +353,7 @@ static intptr_t LocationToStackIndex(const Location& src) {
src.stack_index());
}
static CatchEntryMove CatchEntryMoveFor(Assembler* assembler,
static CatchEntryMove CatchEntryMoveFor(compiler::Assembler* assembler,
Representation src_rep,
const Location& src,
intptr_t dst_index) {
@ -545,7 +545,7 @@ bool FlowGraphCompiler::IsPeephole(Instruction* instr) const {
void FlowGraphCompiler::VisitBlocks() {
CompactBlocks();
if (Assembler::EmittingComments()) {
if (compiler::Assembler::EmittingComments()) {
// The loop_info fields were cleared, recompute.
flow_graph().ComputeLoops();
}
@ -573,7 +573,7 @@ void FlowGraphCompiler::VisitBlocks() {
}
#endif
if (Assembler::EmittingComments()) {
if (compiler::Assembler::EmittingComments()) {
for (LoopInfo* l = entry->loop_info(); l != nullptr; l = l->outer()) {
assembler()->Comment(" Loop %" Pd "", l->id());
}
@ -662,7 +662,8 @@ intptr_t FlowGraphCompiler::ExtraStackSlotsOnOsrEntry() const {
return StackSize() - stack_depth - num_stack_locals;
}
Label* FlowGraphCompiler::GetJumpLabel(BlockEntryInstr* block_entry) const {
compiler::Label* FlowGraphCompiler::GetJumpLabel(
BlockEntryInstr* block_entry) const {
const intptr_t block_index = block_entry->postorder_number();
return block_info_[block_index]->jump_label();
}
@ -672,7 +673,7 @@ bool FlowGraphCompiler::WasCompacted(BlockEntryInstr* block_entry) const {
return block_info_[block_index]->WasCompacted();
}
Label* FlowGraphCompiler::NextNonEmptyLabel() const {
compiler::Label* FlowGraphCompiler::NextNonEmptyLabel() const {
const intptr_t current_index = current_block()->postorder_number();
return block_info_[current_index]->next_nonempty_label();
}
@ -682,9 +683,9 @@ bool FlowGraphCompiler::CanFallThroughTo(BlockEntryInstr* block_entry) const {
}
BranchLabels FlowGraphCompiler::CreateBranchLabels(BranchInstr* branch) const {
Label* true_label = GetJumpLabel(branch->true_successor());
Label* false_label = GetJumpLabel(branch->false_successor());
Label* fall_through = NextNonEmptyLabel();
compiler::Label* true_label = GetJumpLabel(branch->true_successor());
compiler::Label* false_label = GetJumpLabel(branch->false_successor());
compiler::Label* fall_through = NextNonEmptyLabel();
BranchLabels result = {true_label, false_label, fall_through};
return result;
}
@ -1009,9 +1010,9 @@ Environment* FlowGraphCompiler::SlowPathEnvironmentFor(
return env;
}
Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id,
ICData::DeoptReasonId reason,
uint32_t flags) {
compiler::Label* FlowGraphCompiler::AddDeoptStub(intptr_t deopt_id,
ICData::DeoptReasonId reason,
uint32_t flags) {
if (intrinsic_mode()) {
return intrinsic_slow_path_label_;
}
@ -1068,7 +1069,7 @@ void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
code.set_pc_descriptors(descriptors);
}
RawArray* FlowGraphCompiler::CreateDeoptInfo(Assembler* assembler) {
RawArray* FlowGraphCompiler::CreateDeoptInfo(compiler::Assembler* assembler) {
// No deopt information if we precompile (no deoptimization allowed).
if (FLAG_precompiled_mode) {
return Array::empty_array().raw();
@ -1209,7 +1210,7 @@ bool FlowGraphCompiler::TryIntrinsify() {
}
bool FlowGraphCompiler::TryIntrinsifyHelper() {
Label exit;
compiler::Label exit;
set_intrinsic_slow_path_label(&exit);
if (FLAG_intrinsify) {
@ -1420,10 +1421,11 @@ void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id,
}
}
void FlowGraphCompiler::GenerateNumberTypeCheck(Register class_id_reg,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
void FlowGraphCompiler::GenerateNumberTypeCheck(
Register class_id_reg,
const AbstractType& type,
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
assembler()->Comment("NumberTypeCheck");
GrowableArray<intptr_t> args;
if (type.IsNumberType()) {
@ -1437,9 +1439,10 @@ void FlowGraphCompiler::GenerateNumberTypeCheck(Register class_id_reg,
CheckClassIds(class_id_reg, args, is_instance_lbl, is_not_instance_lbl);
}
void FlowGraphCompiler::GenerateStringTypeCheck(Register class_id_reg,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
void FlowGraphCompiler::GenerateStringTypeCheck(
Register class_id_reg,
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
assembler()->Comment("StringTypeCheck");
GrowableArray<intptr_t> args;
args.Add(kOneByteStringCid);
@ -1449,10 +1452,11 @@ void FlowGraphCompiler::GenerateStringTypeCheck(Register class_id_reg,
CheckClassIds(class_id_reg, args, is_instance_lbl, is_not_instance_lbl);
}
void FlowGraphCompiler::GenerateListTypeCheck(Register class_id_reg,
Label* is_instance_lbl) {
void FlowGraphCompiler::GenerateListTypeCheck(
Register class_id_reg,
compiler::Label* is_instance_lbl) {
assembler()->Comment("ListTypeCheck");
Label unknown;
compiler::Label unknown;
GrowableArray<intptr_t> args;
args.Add(kArrayCid);
args.Add(kGrowableObjectArrayCid);
@ -2065,9 +2069,9 @@ void FlowGraphCompiler::EmitPolymorphicInstanceCall(
bool complete,
intptr_t total_ic_calls) {
if (FLAG_polymorphic_with_deopt) {
Label* deopt =
compiler::Label* deopt =
AddDeoptStub(deopt_id, ICData::kDeoptPolymorphicInstanceCallTestFail);
Label ok;
compiler::Label ok;
EmitTestAndCall(targets, original_call.function_name(), args_info,
deopt, // No cid match.
&ok, // Found cid.
@ -2076,7 +2080,7 @@ void FlowGraphCompiler::EmitPolymorphicInstanceCall(
assembler()->Bind(&ok);
} else {
if (complete) {
Label ok;
compiler::Label ok;
EmitTestAndCall(targets, original_call.function_name(), args_info,
NULL, // No cid match.
&ok, // Found cid.
@ -2097,8 +2101,8 @@ void FlowGraphCompiler::EmitPolymorphicInstanceCall(
void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
const String& function_name,
ArgumentsInfo args_info,
Label* failed,
Label* match_found,
compiler::Label* failed,
compiler::Label* match_found,
intptr_t deopt_id,
TokenPosition token_index,
LocationSummary* locs,
@ -2140,7 +2144,7 @@ void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
}
if (smi_case != kNoCase) {
Label after_smi_test;
compiler::Label after_smi_test;
// If the call is complete and there are no other possible receiver
// classes - then receiver can only be a smi value and we don't need
// to check if it is a smi.
@ -2192,7 +2196,7 @@ void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
add_megamorphic_call = true;
break;
}
Label next_test;
compiler::Label next_test;
if (!complete || !is_last_check) {
bias = EmitTestAndCallCheckCid(assembler(),
is_last_check ? failed : &next_test,
@ -2219,7 +2223,7 @@ void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
bool FlowGraphCompiler::GenerateSubtypeRangeCheck(Register class_id_reg,
const Class& type_class,
Label* is_subtype) {
compiler::Label* is_subtype) {
HierarchyInfo* hi = Thread::Current()->hierarchy_info();
if (hi != NULL) {
const CidRangeVector& ranges =
@ -2241,12 +2245,13 @@ bool FlowGraphCompiler::GenerateSubtypeRangeCheck(Register class_id_reg,
return false;
}
void FlowGraphCompiler::GenerateCidRangesCheck(Assembler* assembler,
Register class_id_reg,
const CidRangeVector& cid_ranges,
Label* inside_range_lbl,
Label* outside_range_lbl,
bool fall_through_if_inside) {
void FlowGraphCompiler::GenerateCidRangesCheck(
compiler::Assembler* assembler,
Register class_id_reg,
const CidRangeVector& cid_ranges,
compiler::Label* inside_range_lbl,
compiler::Label* outside_range_lbl,
bool fall_through_if_inside) {
// If there are no valid class ranges, the check will fail. If we are
// supposed to fall-through in the positive case, we'll explicitly jump to
// the [outside_range_lbl].
@ -2263,8 +2268,9 @@ void FlowGraphCompiler::GenerateCidRangesCheck(Assembler* assembler,
RELEASE_ASSERT(!range.IsIllegalRange());
const bool last_round = i == (cid_ranges.length() - 1);
Label* jump_label = last_round && fall_through_if_inside ? outside_range_lbl
: inside_range_lbl;
compiler::Label* jump_label = last_round && fall_through_if_inside
? outside_range_lbl
: inside_range_lbl;
const bool jump_on_miss = last_round && fall_through_if_inside;
bias = EmitTestAndCallCheckCid(assembler, jump_label, class_id_reg, range,
@ -2288,7 +2294,7 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
const Register subtype_cache_reg,
const Register dst_type_reg,
const Register scratch_reg,
Label* done) {
compiler::Label* done) {
TypeUsageInfo* type_usage_info = thread()->type_usage_info();
// If the int type is assignable to [dst_type] we special case it on the
@ -2311,10 +2317,11 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
// Check if type arguments are null, i.e. equivalent to vector of dynamic.
__ CompareObject(kTypeArgumentsReg, Object::null_object());
__ BranchIf(EQUAL, done);
__ LoadField(dst_type_reg,
FieldAddress(kTypeArgumentsReg,
compiler::target::TypeArguments::type_at_offset(
type_param.index())));
__ LoadField(
dst_type_reg,
compiler::FieldAddress(kTypeArgumentsReg,
compiler::target::TypeArguments::type_at_offset(
type_param.index())));
if (type_usage_info != NULL) {
type_usage_info->UseTypeInAssertAssignable(dst_type);
}
@ -2443,7 +2450,7 @@ void FlowGraphCompiler::FrameStateClear() {
#define __ compiler->assembler()->
void ThrowErrorSlowPathCode::EmitNativeCode(FlowGraphCompiler* compiler) {
if (Assembler::EmittingComments()) {
if (compiler::Assembler::EmittingComments()) {
__ Comment("slow path %s operation", name());
}
const bool use_shared_stub =

View file

@ -139,9 +139,10 @@ class ParallelMoveResolver : public ValueObject {
// Helpers for non-trivial source-destination combinations that cannot
// be handled by a single instruction.
void MoveMemoryToMemory(const Address& dst, const Address& src);
void Exchange(Register reg, const Address& mem);
void Exchange(const Address& mem1, const Address& mem2);
void MoveMemoryToMemory(const compiler::Address& dst,
const compiler::Address& src);
void Exchange(Register reg, const compiler::Address& mem);
void Exchange(const compiler::Address& mem1, const compiler::Address& mem2);
void Exchange(Register reg, Register base_reg, intptr_t stack_offset);
void Exchange(Register base_reg1,
intptr_t stack_offset1,
@ -229,7 +230,7 @@ class CompilerDeoptInfoWithStub : public CompilerDeoptInfo {
ASSERT(reason != ICData::kDeoptAtCall);
}
Label* entry_label() { return &entry_label_; }
compiler::Label* entry_label() { return &entry_label_; }
// Implementation is in architecture specific file.
virtual void GenerateCode(FlowGraphCompiler* compiler, intptr_t stub_ix);
@ -246,7 +247,7 @@ class CompilerDeoptInfoWithStub : public CompilerDeoptInfo {
}
private:
Label entry_label_;
compiler::Label entry_label_;
DISALLOW_COPY_AND_ASSIGN(CompilerDeoptInfoWithStub);
};
@ -258,8 +259,8 @@ class SlowPathCode : public ZoneAllocated {
virtual ~SlowPathCode() {}
Instruction* instruction() const { return instruction_; }
Label* entry_label() { return &entry_label_; }
Label* exit_label() { return &exit_label_; }
compiler::Label* entry_label() { return &entry_label_; }
compiler::Label* exit_label() { return &exit_label_; }
void GenerateCode(FlowGraphCompiler* compiler) {
EmitNativeCode(compiler);
@ -270,8 +271,8 @@ class SlowPathCode : public ZoneAllocated {
virtual void EmitNativeCode(FlowGraphCompiler* compiler) = 0;
Instruction* instruction_;
Label entry_label_;
Label exit_label_;
compiler::Label entry_label_;
compiler::Label exit_label_;
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
@ -352,13 +353,17 @@ class FlowGraphCompiler : public ValueObject {
// The label to jump to when control is transferred to this block. For
// nonempty blocks it is the label of the block itself. For empty
// blocks it is the label of the first nonempty successor block.
Label* jump_label() const { return jump_label_; }
void set_jump_label(Label* label) { jump_label_ = label; }
compiler::Label* jump_label() const { return jump_label_; }
void set_jump_label(compiler::Label* label) { jump_label_ = label; }
// The label of the first nonempty block after this one in the block
// order, or NULL if there is no nonempty block following this one.
Label* next_nonempty_label() const { return next_nonempty_label_; }
void set_next_nonempty_label(Label* label) { next_nonempty_label_ = label; }
compiler::Label* next_nonempty_label() const {
return next_nonempty_label_;
}
void set_next_nonempty_label(compiler::Label* label) {
next_nonempty_label_ = label;
}
bool WasCompacted() const { return jump_label_ != &block_label_; }
@ -368,16 +373,16 @@ class FlowGraphCompiler : public ValueObject {
void mark() { is_marked_ = true; }
private:
Label block_label_;
compiler::Label block_label_;
Label* jump_label_;
Label* next_nonempty_label_;
compiler::Label* jump_label_;
compiler::Label* next_nonempty_label_;
bool is_marked_;
};
public:
FlowGraphCompiler(Assembler* assembler,
FlowGraphCompiler(compiler::Assembler* assembler,
FlowGraph* flow_graph,
const ParsedFunction& parsed_function,
bool is_optimizing,
@ -402,7 +407,7 @@ class FlowGraphCompiler : public ValueObject {
static bool IsPotentialUnboxedField(const Field& field);
// Accessors.
Assembler* assembler() const { return assembler_; }
compiler::Assembler* assembler() const { return assembler_; }
const ParsedFunction& parsed_function() const { return parsed_function_; }
const Function& function() const { return parsed_function_.function(); }
const GrowableArray<BlockEntryInstr*>& block_order() const {
@ -436,11 +441,11 @@ class FlowGraphCompiler : public ValueObject {
void ExitIntrinsicMode();
bool intrinsic_mode() const { return intrinsic_mode_; }
void set_intrinsic_slow_path_label(Label* label) {
void set_intrinsic_slow_path_label(compiler::Label* label) {
ASSERT(intrinsic_slow_path_label_ == nullptr || label == nullptr);
intrinsic_slow_path_label_ = label;
}
Label* intrinsic_slow_path_label() const {
compiler::Label* intrinsic_slow_path_label() const {
ASSERT(intrinsic_slow_path_label_ != nullptr);
return intrinsic_slow_path_label_;
}
@ -513,7 +518,7 @@ class FlowGraphCompiler : public ValueObject {
const Register subtype_cache_reg,
const Register dst_type_reg,
const Register scratch_reg,
Label* done);
compiler::Label* done);
// DBC emits calls very differently from all other architectures due to its
// interpreted nature.
@ -579,19 +584,20 @@ class FlowGraphCompiler : public ValueObject {
void GenerateNumberTypeCheck(Register kClassIdReg,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl);
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
void GenerateStringTypeCheck(Register kClassIdReg,
Label* is_instance_lbl,
Label* is_not_instance_lbl);
void GenerateListTypeCheck(Register kClassIdReg, Label* is_instance_lbl);
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
void GenerateListTypeCheck(Register kClassIdReg,
compiler::Label* is_instance_lbl);
// Returns true if no further checks are necessary but the code coming after
// the emitted code here is still required do a runtime call (for the negative
// case of throwing an exception).
bool GenerateSubtypeRangeCheck(Register class_id_reg,
const Class& type_class,
Label* is_subtype_lbl);
compiler::Label* is_subtype_lbl);
// We test up to 4 different cid ranges, if we would need to test more in
// order to get a definite answer we fall back to the old mechanism (namely
@ -601,11 +607,11 @@ class FlowGraphCompiler : public ValueObject {
// If [fall_through_if_inside] is `true`, then [outside_range_lbl] must be
// supplied, since it will be jumped to in the last case if the cid is outside
// the range.
static void GenerateCidRangesCheck(Assembler* assembler,
static void GenerateCidRangesCheck(compiler::Assembler* assembler,
Register class_id_reg,
const CidRangeVector& cid_ranges,
Label* inside_range_lbl,
Label* outside_range_lbl = NULL,
compiler::Label* inside_range_lbl,
compiler::Label* outside_range_lbl = NULL,
bool fall_through_if_inside = false);
void EmitOptimizedInstanceCall(
@ -652,8 +658,8 @@ class FlowGraphCompiler : public ValueObject {
void EmitTestAndCall(const CallTargets& targets,
const String& function_name,
ArgumentsInfo args_info,
Label* failed,
Label* match_found,
compiler::Label* failed,
compiler::Label* match_found,
intptr_t deopt_id,
TokenPosition token_index,
LocationSummary* locs,
@ -706,11 +712,11 @@ class FlowGraphCompiler : public ValueObject {
intptr_t ExtraStackSlotsOnOsrEntry() const;
// Returns assembler label associated with the given block entry.
Label* GetJumpLabel(BlockEntryInstr* block_entry) const;
compiler::Label* GetJumpLabel(BlockEntryInstr* block_entry) const;
bool WasCompacted(BlockEntryInstr* block_entry) const;
// Returns the label of the fall-through of the current block.
Label* NextNonEmptyLabel() const;
compiler::Label* NextNonEmptyLabel() const;
// Returns true if there is a next block after the current one in
// the block order and if it is the given block.
@ -743,9 +749,9 @@ class FlowGraphCompiler : public ValueObject {
void RecordSafepoint(LocationSummary* locs,
intptr_t slow_path_argument_count = 0);
Label* AddDeoptStub(intptr_t deopt_id,
ICData::DeoptReasonId reason,
uint32_t flags = 0);
compiler::Label* AddDeoptStub(intptr_t deopt_id,
ICData::DeoptReasonId reason,
uint32_t flags = 0);
#if defined(TARGET_ARCH_DBC)
void EmitDeopt(intptr_t deopt_id,
@ -763,7 +769,7 @@ class FlowGraphCompiler : public ValueObject {
void FinalizeExceptionHandlers(const Code& code);
void FinalizePcDescriptors(const Code& code);
RawArray* CreateDeoptInfo(Assembler* assembler);
RawArray* CreateDeoptInfo(compiler::Assembler* assembler);
void FinalizeStackMaps(const Code& code);
void FinalizeVarDescriptors(const Code& code);
void FinalizeCatchEntryMovesMap(const Code& code);
@ -853,8 +859,8 @@ class FlowGraphCompiler : public ValueObject {
// Returns new class-id bias.
//
// TODO(kustermann): We should move this code out of the [FlowGraphCompiler]!
static int EmitTestAndCallCheckCid(Assembler* assembler,
Label* label,
static int EmitTestAndCallCheckCid(compiler::Assembler* assembler,
compiler::Label* label,
Register class_id_reg,
const CidRange& range,
int bias,
@ -920,7 +926,7 @@ class FlowGraphCompiler : public ValueObject {
void EmitTestAndCallLoadReceiver(intptr_t count_without_type_args,
const Array& arguments_descriptor);
void EmitTestAndCallSmiBranch(Label* label, bool jump_if_smi);
void EmitTestAndCallSmiBranch(compiler::Label* label, bool jump_if_smi);
void EmitTestAndCallLoadCid(Register class_id_reg);
@ -930,41 +936,44 @@ class FlowGraphCompiler : public ValueObject {
// Type checking helper methods.
void CheckClassIds(Register class_id_reg,
const GrowableArray<intptr_t>& class_ids,
Label* is_instance_lbl,
Label* is_not_instance_lbl);
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
RawSubtypeTestCache* GenerateInlineInstanceof(TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl);
RawSubtypeTestCache* GenerateInlineInstanceof(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
RawSubtypeTestCache* GenerateInstantiatedTypeWithArgumentsTest(
TokenPosition token_pos,
const AbstractType& dst_type,
Label* is_instance_lbl,
Label* is_not_instance_lbl);
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
bool GenerateInstantiatedTypeNoArgumentsTest(TokenPosition token_pos,
const AbstractType& dst_type,
Label* is_instance_lbl,
Label* is_not_instance_lbl);
bool GenerateInstantiatedTypeNoArgumentsTest(
TokenPosition token_pos,
const AbstractType& dst_type,
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
RawSubtypeTestCache* GenerateUninstantiatedTypeTest(
TokenPosition token_pos,
const AbstractType& dst_type,
Label* is_instance_lbl,
Label* is_not_instance_label);
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_label);
RawSubtypeTestCache* GenerateFunctionTypeTest(TokenPosition token_pos,
const AbstractType& dst_type,
Label* is_instance_lbl,
Label* is_not_instance_label);
RawSubtypeTestCache* GenerateFunctionTypeTest(
TokenPosition token_pos,
const AbstractType& dst_type,
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_label);
RawSubtypeTestCache* GenerateSubtype1TestCacheLookup(
TokenPosition token_pos,
const Class& type_class,
Label* is_instance_lbl,
Label* is_not_instance_lbl);
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
enum TypeTestStubKind {
kTestTypeOneArg,
@ -979,10 +988,12 @@ class FlowGraphCompiler : public ValueObject {
Register instantiator_type_arguments_reg,
Register function_type_arguments_reg,
Register temp_reg,
Label* is_instance_lbl,
Label* is_not_instance_lbl);
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
void GenerateBoolToJump(Register bool_reg, Label* is_true, Label* is_false);
void GenerateBoolToJump(Register bool_reg,
compiler::Label* is_true,
compiler::Label* is_false);
void GenerateMethodExtractorIntrinsic(const Function& extracted_method,
intptr_t type_arguments_field_offset);
@ -1064,7 +1075,7 @@ class FlowGraphCompiler : public ValueObject {
Thread* thread_;
Zone* zone_;
Assembler* assembler_;
compiler::Assembler* assembler_;
const ParsedFunction& parsed_function_;
const FlowGraph& flow_graph_;
const GrowableArray<BlockEntryInstr*>& block_order_;
@ -1095,7 +1106,7 @@ class FlowGraphCompiler : public ValueObject {
bool may_reoptimize_;
// True while emitting intrinsic code.
bool intrinsic_mode_;
Label* intrinsic_slow_path_label_ = nullptr;
compiler::Label* intrinsic_slow_path_label_ = nullptr;
bool fully_intrinsified_ = false;
CodeStatistics* stats_;

View file

@ -187,7 +187,7 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
intptr_t stub_ix) {
// Calls do not need stubs, they share a deoptimization trampoline.
ASSERT(reason() != ICData::kDeoptAtCall);
Assembler* assembler = compiler->assembler();
compiler::Assembler* assembler = compiler->assembler();
#define __ assembler->
__ Comment("%s", Name());
__ Bind(entry_label());
@ -196,7 +196,8 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
}
ASSERT(deopt_env() != NULL);
__ ldr(LR, Address(THR, compiler::target::Thread::deoptimize_entry_offset()));
__ ldr(LR, compiler::Address(
THR, compiler::target::Thread::deoptimize_entry_offset()));
__ blx(LR);
ASSERT(kReservedCpuRegisters & (1 << LR));
set_pc_offset(assembler->CodeSize());
@ -207,9 +208,9 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
// Fall through if bool_register contains null.
void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
Label* is_true,
Label* is_false) {
Label fall_through;
compiler::Label* is_true,
compiler::Label* is_false) {
compiler::Label fall_through;
__ CompareObject(bool_register, Object::null_object());
__ b(&fall_through, EQ);
__ CompareObject(bool_register, Bool::True());
@ -228,8 +229,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
Register instantiator_type_arguments_reg,
Register function_type_arguments_reg,
Register temp_reg,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
ASSERT(instance_reg == R0);
ASSERT(temp_reg == kNoRegister); // Unused on ARM.
const SubtypeTestCache& type_test_cache =
@ -268,8 +269,8 @@ RawSubtypeTestCache*
FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InstantiatedTypeWithArgumentsTest");
ASSERT(type.IsInstantiated());
ASSERT(!type.IsFunctionType());
@ -278,7 +279,7 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
const Register kInstanceReg = R0;
const Type& smi_type = Type::Handle(zone(), Type::SmiType());
const bool smi_is_ok = smi_type.IsSubtypeOf(type, Heap::kOld);
__ tst(kInstanceReg, Operand(kSmiTagMask));
__ tst(kInstanceReg, compiler::Operand(kSmiTagMask));
if (smi_is_ok) {
// Fast case for type = FutureOr<int/num/top-type>.
__ b(is_instance_lbl, EQ);
@ -334,8 +335,8 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
const GrowableArray<intptr_t>& class_ids,
Label* is_equal_lbl,
Label* is_not_equal_lbl) {
compiler::Label* is_equal_lbl,
compiler::Label* is_not_equal_lbl) {
for (intptr_t i = 0; i < class_ids.length(); i++) {
__ CompareImmediate(class_id_reg, class_ids[i]);
__ b(is_equal_lbl, EQ);
@ -351,8 +352,8 @@ void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InstantiatedTypeNoArgumentsTest");
ASSERT(type.IsInstantiated());
ASSERT(!type.IsFunctionType());
@ -360,7 +361,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
ASSERT(type_class.NumTypeArguments() == 0);
const Register kInstanceReg = R0;
__ tst(kInstanceReg, Operand(kSmiTagMask));
__ tst(kInstanceReg, compiler::Operand(kSmiTagMask));
// If instance is Smi, check directly.
const Class& smi_class = Class::Handle(zone(), Smi::Class());
if (Class::IsSubtypeOf(smi_class, Object::null_type_arguments(), type_class,
@ -417,12 +418,12 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
TokenPosition token_pos,
const Class& type_class,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("Subtype1TestCacheLookup");
const Register kInstanceReg = R0;
#if defined(DEBUG)
Label ok;
compiler::Label ok;
__ BranchIfNotSmi(kInstanceReg, &ok);
__ Breakpoint();
__ Bind(&ok);
@ -431,8 +432,10 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
__ LoadClassById(R1, R2);
// R1: instance class.
// Check immediate superclass equality.
__ ldr(R2, FieldAddress(R1, compiler::target::Class::super_type_offset()));
__ ldr(R2, FieldAddress(R2, compiler::target::Type::type_class_id_offset()));
__ ldr(R2, compiler::FieldAddress(
R1, compiler::target::Class::super_type_offset()));
__ ldr(R2, compiler::FieldAddress(
R2, compiler::target::Type::type_class_id_offset()));
__ CompareImmediate(R2, Smi::RawValue(type_class.id()));
__ b(is_instance_lbl, EQ);
@ -450,8 +453,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("UninstantiatedTypeTest");
const Register kInstanceReg = R0;
const Register kInstantiatorTypeArgumentsReg = R2;
@ -474,9 +477,10 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
// Check if type arguments are null, i.e. equivalent to vector of dynamic.
__ CompareObject(kTypeArgumentsReg, Object::null_object());
__ b(is_instance_lbl, EQ);
__ ldr(R3, FieldAddress(kTypeArgumentsReg,
compiler::target::TypeArguments::type_at_offset(
type_param.index())));
__ ldr(R3, compiler::FieldAddress(
kTypeArgumentsReg,
compiler::target::TypeArguments::type_at_offset(
type_param.index())));
// R3: concrete type of type.
// Check if type argument is dynamic, Object, or void.
__ CompareObject(R3, Object::dynamic_type());
@ -487,8 +491,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
__ b(is_instance_lbl, EQ);
// For Smi check quickly against int and num interfaces.
Label not_smi;
__ tst(R0, Operand(kSmiTagMask)); // Value is Smi?
compiler::Label not_smi;
__ tst(R0, compiler::Operand(kSmiTagMask)); // Value is Smi?
__ b(&not_smi, NE);
__ CompareObject(R3, Type::ZoneHandle(zone(), Type::IntType()));
__ b(is_instance_lbl, EQ);
@ -536,8 +540,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
const Register kInstanceReg = R0;
const Register kInstantiatorTypeArgumentsReg = R2;
const Register kFunctionTypeArgumentsReg = R1;
@ -568,8 +572,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InlineInstanceof");
if (type.IsFunctionType()) {
@ -623,7 +627,7 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
const Register kFunctionTypeArgumentsReg = R1;
__ PushList((1 << kInstantiatorTypeArgumentsReg) |
(1 << kFunctionTypeArgumentsReg));
Label is_instance, is_not_instance;
compiler::Label is_instance, is_not_instance;
// If type is instantiated and non-parameterized, we can inline code
// checking whether the tested instance is a Smi.
if (type.IsInstantiated()) {
@ -644,7 +648,7 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance);
// test_cache is null if there is no fall-through.
Label done;
compiler::Label done;
if (!test_cache.IsNull()) {
// Generate runtime call.
__ ldm(IA, SP,
@ -705,7 +709,7 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
GenerateAssertAssignableViaTypeTestingStub(token_pos, deopt_id, dst_type,
dst_name, locs);
} else {
Label is_assignable_fast, is_assignable, runtime_call;
compiler::Label is_assignable_fast, is_assignable, runtime_call;
// A null object is always assignable and is returned as result.
__ CompareObject(R0, Object::null_object());
@ -758,7 +762,7 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
const Register kDstTypeReg = R8;
const Register kScratchReg = R4;
Label done;
compiler::Label done;
GenerateAssertAssignableViaTypeTestingStub(
dst_type, dst_name, kInstanceReg, kInstantiatorTypeArgumentsReg,
@ -782,7 +786,7 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
__ LoadField(
R9,
FieldAddress(
compiler::FieldAddress(
kDstTypeReg,
compiler::target::AbstractType::type_test_stub_entry_point_offset()));
__ LoadWordFromPoolOffset(kSubtypeTestCacheReg, sub_type_cache_offset, PP,
@ -835,8 +839,9 @@ void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
__ LoadFieldFromOffset(
kWord, CODE_REG, kPoolReg,
compiler::target::ObjectPool::element_offset(stub_index));
__ Branch(FieldAddress(CODE_REG, compiler::target::Code::entry_point_offset(
Code::EntryKind::kUnchecked)));
__ Branch(compiler::FieldAddress(
CODE_REG,
compiler::target::Code::entry_point_offset(Code::EntryKind::kUnchecked)));
}
void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
@ -844,7 +849,7 @@ void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
// SP: receiver.
// Sequence node has one return node, its input is load field node.
__ Comment("Intrinsic Getter");
__ ldr(R0, Address(SP, 0 * compiler::target::kWordSize));
__ ldr(R0, compiler::Address(SP, 0 * compiler::target::kWordSize));
__ LoadFieldFromOffset(kWord, R0, R0, offset);
__ Ret();
}
@ -855,8 +860,9 @@ void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
// SP+0: value.
// Sequence node has one store node and one return NULL node.
__ Comment("Intrinsic Setter");
__ ldr(R0, Address(SP, 1 * compiler::target::kWordSize)); // Receiver.
__ ldr(R1, Address(SP, 0 * compiler::target::kWordSize)); // Value.
__ ldr(R0,
compiler::Address(SP, 1 * compiler::target::kWordSize)); // Receiver.
__ ldr(R1, compiler::Address(SP, 0 * compiler::target::kWordSize)); // Value.
__ StoreIntoObjectOffset(R0, offset, R1);
__ LoadObject(R0, Object::null_object());
__ Ret();
@ -868,22 +874,23 @@ void FlowGraphCompiler::EmitFrameEntry() {
(!is_optimizing() || may_reoptimize())) {
__ Comment("Invocation Count Check");
const Register function_reg = R8;
__ ldr(function_reg,
FieldAddress(CODE_REG, compiler::target::Code::owner_offset()));
__ ldr(R3,
FieldAddress(function_reg,
compiler::target::Function::usage_counter_offset()));
__ ldr(function_reg, compiler::FieldAddress(
CODE_REG, compiler::target::Code::owner_offset()));
__ ldr(R3, compiler::FieldAddress(
function_reg,
compiler::target::Function::usage_counter_offset()));
// Reoptimization of an optimized function is triggered by counting in
// IC stubs, but not at the entry of the function.
if (!is_optimizing()) {
__ add(R3, R3, Operand(1));
__ str(R3,
FieldAddress(function_reg,
compiler::target::Function::usage_counter_offset()));
__ add(R3, R3, compiler::Operand(1));
__ str(R3, compiler::FieldAddress(
function_reg,
compiler::target::Function::usage_counter_offset()));
}
__ CompareImmediate(R3, GetOptimizationThreshold());
ASSERT(function_reg == R8);
__ Branch(Address(THR, compiler::target::Thread::optimize_entry_offset()),
__ Branch(compiler::Address(
THR, compiler::target::Thread::optimize_entry_offset()),
GE);
}
__ Comment("Enter frame");
@ -1036,7 +1043,7 @@ void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
#endif // DEBUG
__ LoadFieldFromOffset(kWord, R1, R0,
compiler::target::Array::element_offset(edge_id));
__ add(R1, R1, Operand(Smi::RawValue(1)));
__ add(R1, R1, compiler::Operand(Smi::RawValue(1)));
__ StoreIntoObjectNoBarrierOffset(
R0, compiler::target::Array::element_offset(edge_id), R1);
#if defined(DEBUG)
@ -1084,7 +1091,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
entry_kind == Code::EntryKind::kNormal
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ ldr(LR, FieldAddress(CODE_REG, entry_point_offset));
__ ldr(LR, compiler::FieldAddress(CODE_REG, entry_point_offset));
__ blx(LR);
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kIcCall, locs);
__ Drop(ic_data.CountWithTypeArgs());
@ -1111,7 +1118,7 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
__ LoadObject(R9, cache);
__ ldr(
LR,
Address(
compiler::Address(
THR,
compiler::target::Thread::megamorphic_call_checked_entry_offset()));
__ blx(LR);
@ -1167,7 +1174,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
Code::EntryKind::kMonomorphic)
: compiler::target::Code::entry_point_offset(
Code::EntryKind::kMonomorphicUnchecked);
__ ldr(LR, FieldAddress(CODE_REG, entry_point_offset));
__ ldr(LR, compiler::FieldAddress(CODE_REG, entry_point_offset));
}
__ LoadUniqueObject(R9, data);
__ blx(LR);
@ -1256,7 +1263,7 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
__ Pop(right);
__ Pop(left);
} else {
__ cmp(left, Operand(right));
__ cmp(left, compiler::Operand(right));
}
return EQ;
}
@ -1284,7 +1291,7 @@ void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
// TODO(zerny): clobber non-live temporary FPU registers.
if (tmp.IsRegister() &&
!locs->live_registers()->ContainsRegister(tmp.reg())) {
__ mov(tmp.reg(), Operand(0xf7));
__ mov(tmp.reg(), compiler::Operand(0xf7));
}
}
}
@ -1305,8 +1312,9 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
__ LoadObject(R4, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) {
__ tst(R0, Operand(kSmiTagMask));
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
bool if_smi) {
__ tst(R0, compiler::Operand(kSmiTagMask));
// Jump if receiver is not Smi.
__ b(label, if_smi ? EQ : NE);
}
@ -1319,8 +1327,8 @@ void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
#undef __
#define __ assembler->
int FlowGraphCompiler::EmitTestAndCallCheckCid(Assembler* assembler,
Label* label,
int FlowGraphCompiler::EmitTestAndCallCheckCid(compiler::Assembler* assembler,
compiler::Label* label,
Register class_id_reg,
const CidRange& range,
int bias,
@ -1349,7 +1357,7 @@ void FlowGraphCompiler::EmitMove(Location destination,
if (source.IsRegister()) {
if (destination.IsRegister()) {
__ mov(destination.reg(), Operand(source.reg()));
__ mov(destination.reg(), compiler::Operand(source.reg()));
} else {
ASSERT(destination.IsStackSlot());
const intptr_t dest_offset = destination.ToStackSlotOffset();
@ -1453,9 +1461,9 @@ void ParallelMoveResolver::EmitSwap(int index) {
if (source.IsRegister() && destination.IsRegister()) {
ASSERT(source.reg() != IP);
ASSERT(destination.reg() != IP);
__ mov(IP, Operand(source.reg()));
__ mov(source.reg(), Operand(destination.reg()));
__ mov(destination.reg(), Operand(IP));
__ mov(IP, compiler::Operand(source.reg()));
__ mov(source.reg(), compiler::Operand(destination.reg()));
__ mov(destination.reg(), compiler::Operand(IP));
} else if (source.IsRegister() && destination.IsStackSlot()) {
Exchange(source.reg(), destination.base_reg(),
destination.ToStackSlotOffset());
@ -1545,20 +1553,22 @@ void ParallelMoveResolver::EmitSwap(int index) {
}
}
void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
const Address& src) {
void ParallelMoveResolver::MoveMemoryToMemory(const compiler::Address& dst,
const compiler::Address& src) {
UNREACHABLE();
}
// Do not call or implement this function. Instead, use the form below that
// uses an offset from the frame pointer instead of an Address.
void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
void ParallelMoveResolver::Exchange(Register reg,
const compiler::Address& mem) {
UNREACHABLE();
}
// Do not call or implement this function. Instead, use the form below that
// uses offsets from the frame pointer instead of Addresses.
void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
void ParallelMoveResolver::Exchange(const compiler::Address& mem1,
const compiler::Address& mem2) {
UNREACHABLE();
}
@ -1566,7 +1576,7 @@ void ParallelMoveResolver::Exchange(Register reg,
Register base_reg,
intptr_t stack_offset) {
ScratchRegisterScope tmp(this, reg);
__ mov(tmp.reg(), Operand(reg));
__ mov(tmp.reg(), compiler::Operand(reg));
__ LoadFromOffset(kWord, reg, base_reg, stack_offset);
__ StoreToOffset(kWord, tmp.reg(), base_reg, stack_offset);
}
@ -1593,12 +1603,14 @@ void ParallelMoveResolver::RestoreScratch(Register reg) {
void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
DRegister dreg = EvenDRegisterOf(reg);
__ vstrd(dreg, Address(SP, -kDoubleSize, Address::PreIndex));
__ vstrd(dreg,
compiler::Address(SP, -kDoubleSize, compiler::Address::PreIndex));
}
void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
DRegister dreg = EvenDRegisterOf(reg);
__ vldrd(dreg, Address(SP, kDoubleSize, Address::PostIndex));
__ vldrd(dreg,
compiler::Address(SP, kDoubleSize, compiler::Address::PostIndex));
}
#undef __

View file

@ -181,7 +181,7 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
intptr_t stub_ix) {
// Calls do not need stubs, they share a deoptimization trampoline.
ASSERT(reason() != ICData::kDeoptAtCall);
Assembler* assembler = compiler->assembler();
compiler::Assembler* assembler = compiler->assembler();
#define __ assembler->
__ Comment("%s", Name());
__ Bind(entry_label());
@ -190,7 +190,7 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
}
ASSERT(deopt_env() != NULL);
__ ldr(LR, Address(THR, Thread::deoptimize_entry_offset()));
__ ldr(LR, compiler::Address(THR, Thread::deoptimize_entry_offset()));
__ blr(LR);
set_pc_offset(assembler->CodeSize());
#undef __
@ -200,9 +200,9 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
// Fall through if bool_register contains null.
void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
Label* is_true,
Label* is_false) {
Label fall_through;
compiler::Label* is_true,
compiler::Label* is_false) {
compiler::Label fall_through;
__ CompareObject(bool_register, Object::null_object());
__ b(&fall_through, EQ);
__ CompareObject(bool_register, Bool::True());
@ -220,8 +220,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
Register instantiator_type_arguments_reg,
Register function_type_arguments_reg,
Register temp_reg,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
ASSERT(instance_reg == R0);
ASSERT(temp_reg == kNoRegister); // Unused on ARM64.
const SubtypeTestCache& type_test_cache =
@ -260,8 +260,8 @@ RawSubtypeTestCache*
FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InstantiatedTypeWithArgumentsTest");
ASSERT(type.IsInstantiated());
ASSERT(!type.IsFunctionType());
@ -321,8 +321,8 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
const GrowableArray<intptr_t>& class_ids,
Label* is_equal_lbl,
Label* is_not_equal_lbl) {
compiler::Label* is_equal_lbl,
compiler::Label* is_not_equal_lbl) {
for (intptr_t i = 0; i < class_ids.length(); i++) {
__ CompareImmediate(class_id_reg, class_ids[i]);
__ b(is_equal_lbl, EQ);
@ -338,8 +338,8 @@ void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InstantiatedTypeNoArgumentsTest");
ASSERT(type.IsInstantiated());
ASSERT(!type.IsFunctionType());
@ -403,12 +403,12 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
TokenPosition token_pos,
const Class& type_class,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("Subtype1TestCacheLookup");
const Register kInstanceReg = R0;
#if defined(DEBUG)
Label ok;
compiler::Label ok;
__ BranchIfNotSmi(kInstanceReg, &ok);
__ Breakpoint();
__ Bind(&ok);
@ -436,8 +436,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("UninstantiatedTypeTest");
const Register kInstanceReg = R0;
const Register kInstantiatorTypeArgumentsReg = R1;
@ -451,7 +451,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
const AbstractType& bound = AbstractType::Handle(type_param.bound());
// Get instantiator type args (high, R1) and function type args (low, R2).
__ ldp(R2, R1, Address(SP, 0 * kWordSize, Address::PairOffset));
__ ldp(R2, R1,
compiler::Address(SP, 0 * kWordSize, compiler::Address::PairOffset));
const Register kTypeArgumentsReg = type_param.IsClassTypeParameter()
? kInstantiatorTypeArgumentsReg
: kFunctionTypeArgumentsReg;
@ -470,7 +471,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
__ b(is_instance_lbl, EQ);
// For Smi check quickly against int and num interfaces.
Label not_smi;
compiler::Label not_smi;
__ BranchIfNotSmi(R0, &not_smi);
__ CompareObject(R3, Type::ZoneHandle(zone(), Type::IntType()));
__ b(is_instance_lbl, EQ);
@ -500,7 +501,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
__ BranchIfSmi(kInstanceReg, is_not_instance_lbl);
}
__ ldp(kFunctionTypeArgumentsReg, kInstantiatorTypeArgumentsReg,
Address(SP, 0 * kWordSize, Address::PairOffset));
compiler::Address(SP, 0 * kWordSize, compiler::Address::PairOffset));
// Uninstantiated type class is known at compile time, but the type
// arguments are determined at runtime by the instantiator.
return GenerateCallSubtypeTestStub(kTestTypeFourArgs, kInstanceReg,
@ -517,14 +518,14 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
const Register kInstanceReg = R0;
const Register kInstantiatorTypeArgumentsReg = R1;
const Register kFunctionTypeArgumentsReg = R2;
__ BranchIfSmi(kInstanceReg, is_not_instance_lbl);
__ ldp(kFunctionTypeArgumentsReg, kInstantiatorTypeArgumentsReg,
Address(SP, 0 * kWordSize, Address::PairOffset));
compiler::Address(SP, 0 * kWordSize, compiler::Address::PairOffset));
// Uninstantiated type class is known at compile time, but the type
// arguments are determined at runtime by the instantiator(s).
const Register kTempReg = kNoRegister;
@ -548,8 +549,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InlineInstanceof");
if (type.IsFunctionType()) {
@ -602,7 +603,7 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
const Register kInstantiatorTypeArgumentsReg = R1;
const Register kFunctionTypeArgumentsReg = R2;
__ PushPair(kFunctionTypeArgumentsReg, kInstantiatorTypeArgumentsReg);
Label is_instance, is_not_instance;
compiler::Label is_instance, is_not_instance;
// If type is instantiated and non-parameterized, we can inline code
// checking whether the tested instance is a Smi.
if (type.IsInstantiated()) {
@ -623,13 +624,13 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance);
// test_cache is null if there is no fall-through.
Label done;
compiler::Label done;
if (!test_cache.IsNull()) {
// Generate runtime call.
const Register kInstantiatorTypeArgumentsReg = R1;
const Register kFunctionTypeArgumentsReg = R2;
__ ldp(kFunctionTypeArgumentsReg, kInstantiatorTypeArgumentsReg,
Address(SP, 0 * kWordSize, Address::PairOffset));
compiler::Address(SP, 0 * kWordSize, compiler::Address::PairOffset));
__ PushObject(Object::null_object()); // Make room for the result.
__ Push(R0); // Push the instance.
__ PushObject(type); // Push the type.
@ -684,7 +685,7 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
GenerateAssertAssignableViaTypeTestingStub(token_pos, deopt_id, dst_type,
dst_name, locs);
} else {
Label is_assignable_fast, is_assignable, runtime_call;
compiler::Label is_assignable_fast, is_assignable, runtime_call;
// A null object is always assignable and is returned as result.
__ CompareObject(R0, Object::null_object());
@ -699,7 +700,7 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
__ Bind(&runtime_call);
__ ldp(kFunctionTypeArgumentsReg, kInstantiatorTypeArgumentsReg,
Address(SP, 0 * kWordSize, Address::PairOffset));
compiler::Address(SP, 0 * kWordSize, compiler::Address::PairOffset));
__ PushObject(Object::null_object()); // Make room for the result.
__ Push(R0); // Push the source object.
__ PushObject(dst_type); // Push the type of the destination.
@ -733,7 +734,7 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
const Register kDstTypeReg = R8;
const Register kScratchReg = R4;
Label done;
compiler::Label done;
GenerateAssertAssignableViaTypeTestingStub(
dst_type, dst_name, kInstanceReg, kInstantiatorTypeArgumentsReg,
@ -755,9 +756,9 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
ASSERT((sub_type_cache_index + 1) == dst_name_index);
ASSERT(__ constant_pool_allowed());
__ LoadField(R9,
FieldAddress(kDstTypeReg,
AbstractType::type_test_stub_entry_point_offset()));
__ LoadField(
R9, compiler::FieldAddress(
kDstTypeReg, AbstractType::type_test_stub_entry_point_offset()));
__ LoadWordFromPoolOffset(kSubtypeTestCacheReg, sub_type_cache_offset);
__ blr(R9);
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
@ -841,22 +842,23 @@ void FlowGraphCompiler::EmitFrameEntry() {
(!is_optimizing() || may_reoptimize())) {
__ Comment("Invocation Count Check");
const Register function_reg = R6;
__ ldr(function_reg, FieldAddress(CODE_REG, Code::owner_offset()));
__ ldr(function_reg,
compiler::FieldAddress(CODE_REG, Code::owner_offset()));
__ LoadFieldFromOffset(R7, function_reg, Function::usage_counter_offset(),
kWord);
// Reoptimization of an optimized function is triggered by counting in
// IC stubs, but not at the entry of the function.
if (!is_optimizing()) {
__ add(R7, R7, Operand(1));
__ add(R7, R7, compiler::Operand(1));
__ StoreFieldToOffset(R7, function_reg, Function::usage_counter_offset(),
kWord);
}
__ CompareImmediate(R7, GetOptimizationThreshold());
ASSERT(function_reg == R6);
Label dont_optimize;
compiler::Label dont_optimize;
__ b(&dont_optimize, LT);
__ ldr(TMP, Address(THR, Thread::optimize_entry_offset()));
__ ldr(TMP, compiler::Address(THR, Thread::optimize_entry_offset()));
__ br(TMP);
__ Bind(&dont_optimize);
}
@ -1006,7 +1008,7 @@ void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
__ Comment("Edge counter");
__ LoadObject(R0, edge_counters_array_);
__ LoadFieldFromOffset(TMP, R0, Array::element_offset(edge_id));
__ add(TMP, TMP, Operand(Smi::RawValue(1)));
__ add(TMP, TMP, compiler::Operand(Smi::RawValue(1)));
__ StoreFieldToOffset(TMP, R0, Array::element_offset(edge_id));
}
@ -1043,7 +1045,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
__ LoadFromOffset(R0, SP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize);
ObjectPoolBuilder& op = __ object_pool_builder();
compiler::ObjectPoolBuilder& op = __ object_pool_builder();
const intptr_t ic_data_index =
op.AddObject(ic_data, ObjectPool::Patchability::kPatchable);
const intptr_t stub_index =
@ -1055,7 +1057,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
entry_kind == Code::EntryKind::kNormal
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ ldr(LR, FieldAddress(CODE_REG, entry_point_offset));
__ ldr(LR, compiler::FieldAddress(CODE_REG, entry_point_offset));
__ blr(LR);
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kIcCall, locs);
__ Drop(ic_data.CountWithTypeArgs());
@ -1080,7 +1082,8 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
__ LoadFromOffset(R0, SP, (args_desc.Count() - 1) * kWordSize);
__ LoadObject(R5, cache);
__ ldr(LR, Address(THR, Thread::megamorphic_call_checked_entry_offset()));
__ ldr(LR, compiler::Address(
THR, Thread::megamorphic_call_checked_entry_offset()));
__ blr(LR);
RecordSafepoint(locs, slow_path_argument_count);
@ -1117,7 +1120,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
const UnlinkedCall& data =
UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
ObjectPoolBuilder& op = __ object_pool_builder();
compiler::ObjectPoolBuilder& op = __ object_pool_builder();
__ Comment("InstanceCallAOT");
__ LoadFromOffset(R0, SP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize);
@ -1136,8 +1139,9 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
} else {
__ LoadDoubleWordFromPoolOffset(R5, CODE_REG,
ObjectPool::element_offset(data_index));
__ ldr(LR, FieldAddress(CODE_REG, Code::entry_point_offset(
Code::EntryKind::kMonomorphic)));
__ ldr(LR, compiler::FieldAddress(
CODE_REG,
Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
}
__ blr(LR);
@ -1254,7 +1258,7 @@ void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
// TODO(zerny): clobber non-live temporary FPU registers.
if (tmp.IsRegister() &&
!locs->live_registers()->ContainsRegister(tmp.reg())) {
__ movz(tmp.reg(), Immediate(0xf7), 0);
__ movz(tmp.reg(), compiler::Immediate(0xf7), 0);
}
}
}
@ -1273,7 +1277,8 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
__ LoadObject(R4, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) {
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
bool if_smi) {
if (if_smi) {
__ BranchIfSmi(R0, label);
} else {
@ -1289,8 +1294,8 @@ void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
#undef __
#define __ assembler->
int FlowGraphCompiler::EmitTestAndCallCheckCid(Assembler* assembler,
Label* label,
int FlowGraphCompiler::EmitTestAndCallCheckCid(compiler::Assembler* assembler,
compiler::Label* label,
Register class_id_reg,
const CidRange& range,
int bias,
@ -1486,20 +1491,22 @@ void ParallelMoveResolver::EmitSwap(int index) {
}
}
void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
const Address& src) {
void ParallelMoveResolver::MoveMemoryToMemory(const compiler::Address& dst,
const compiler::Address& src) {
UNREACHABLE();
}
// Do not call or implement this function. Instead, use the form below that
// uses an offset from the frame pointer instead of an Address.
void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
void ParallelMoveResolver::Exchange(Register reg,
const compiler::Address& mem) {
UNREACHABLE();
}
// Do not call or implement this function. Instead, use the form below that
// uses offsets from the frame pointer instead of Addresses.
void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
void ParallelMoveResolver::Exchange(const compiler::Address& mem1,
const compiler::Address& mem2) {
UNREACHABLE();
}

View file

@ -428,20 +428,22 @@ void ParallelMoveResolver::EmitSwap(int index) {
}
}
void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
const Address& src) {
void ParallelMoveResolver::MoveMemoryToMemory(const compiler::Address& dst,
const compiler::Address& src) {
UNREACHABLE();
}
// Do not call or implement this function. Instead, use the form below that
// uses an offset from the frame pointer instead of an Address.
void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
void ParallelMoveResolver::Exchange(Register reg,
const compiler::Address& mem) {
UNREACHABLE();
}
// Do not call or implement this function. Instead, use the form below that
// uses offsets from the frame pointer instead of Addresses.
void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
void ParallelMoveResolver::Exchange(const compiler::Address& mem1,
const compiler::Address& mem2) {
UNREACHABLE();
}

View file

@ -158,7 +158,7 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
intptr_t stub_ix) {
// Calls do not need stubs, they share a deoptimization trampoline.
ASSERT(reason() != ICData::kDeoptAtCall);
Assembler* assembler = compiler->assembler();
compiler::Assembler* assembler = compiler->assembler();
#define __ assembler->
__ Comment("%s", Name());
__ Bind(entry_label());
@ -178,13 +178,13 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
// Fall through if bool_register contains null.
void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
Label* is_true,
Label* is_false) {
const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
Label fall_through;
compiler::Label* is_true,
compiler::Label* is_false) {
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Label fall_through;
__ cmpl(bool_register, raw_null);
__ j(EQUAL, &fall_through, Assembler::kNearJump);
__ j(EQUAL, &fall_through, compiler::Assembler::kNearJump);
__ CompareObject(bool_register, Bool::True());
__ j(EQUAL, is_true);
__ jmp(is_false);
@ -198,12 +198,12 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
Register instantiator_type_arguments_reg,
Register function_type_arguments_reg,
Register temp_reg,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
const SubtypeTestCache& type_test_cache =
SubtypeTestCache::ZoneHandle(zone(), SubtypeTestCache::New());
const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
__ LoadObject(temp_reg, type_test_cache);
__ pushl(temp_reg); // Subtype test cache.
__ pushl(instance_reg); // Instance.
@ -249,8 +249,8 @@ RawSubtypeTestCache*
FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InstantiatedTypeWithArgumentsTest");
ASSERT(type.IsInstantiated());
ASSERT(!type.IsFunctionType());
@ -259,7 +259,7 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
const Register kInstanceReg = EAX;
const Type& smi_type = Type::Handle(zone(), Type::SmiType());
const bool smi_is_ok = smi_type.IsSubtypeOf(type, Heap::kOld);
__ testl(kInstanceReg, Immediate(kSmiTagMask));
__ testl(kInstanceReg, compiler::Immediate(kSmiTagMask));
if (smi_is_ok) {
// Fast case for type = FutureOr<int/num/top-type>.
__ j(ZERO, is_instance_lbl);
@ -277,7 +277,7 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
const Register kClassIdReg = ECX;
// dynamic type argument, check only classes.
__ LoadClassId(kClassIdReg, kInstanceReg);
__ cmpl(kClassIdReg, Immediate(type_class.id()));
__ cmpl(kClassIdReg, compiler::Immediate(type_class.id()));
__ j(EQUAL, is_instance_lbl);
// List is a very common case.
if (IsListClass(type_class)) {
@ -312,10 +312,10 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
const GrowableArray<intptr_t>& class_ids,
Label* is_equal_lbl,
Label* is_not_equal_lbl) {
compiler::Label* is_equal_lbl,
compiler::Label* is_not_equal_lbl) {
for (intptr_t i = 0; i < class_ids.length(); i++) {
__ cmpl(class_id_reg, Immediate(class_ids[i]));
__ cmpl(class_id_reg, compiler::Immediate(class_ids[i]));
__ j(EQUAL, is_equal_lbl);
}
__ jmp(is_not_equal_lbl);
@ -329,8 +329,8 @@ void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InstantiatedTypeNoArgumentsTest");
ASSERT(type.IsInstantiated());
ASSERT(!type.IsFunctionType());
@ -338,7 +338,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
ASSERT(type_class.NumTypeArguments() == 0);
const Register kInstanceReg = EAX;
__ testl(kInstanceReg, Immediate(kSmiTagMask));
__ testl(kInstanceReg, compiler::Immediate(kSmiTagMask));
// If instance is Smi, check directly.
const Class& smi_class = Class::Handle(zone(), Smi::Class());
if (Class::IsSubtypeOf(smi_class, Object::null_type_arguments(), type_class,
@ -352,7 +352,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
__ LoadClassId(kClassIdReg, kInstanceReg);
// Bool interface can be implemented only by core class Bool.
if (type.IsBoolType()) {
__ cmpl(kClassIdReg, Immediate(kBoolCid));
__ cmpl(kClassIdReg, compiler::Immediate(kBoolCid));
__ j(EQUAL, is_instance_lbl);
__ jmp(is_not_instance_lbl);
return false;
@ -370,7 +370,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
}
if (type.IsDartFunctionType()) {
// Check if instance is a closure.
__ cmpl(kClassIdReg, Immediate(kClosureCid));
__ cmpl(kClassIdReg, compiler::Immediate(kClosureCid));
__ j(EQUAL, is_instance_lbl);
return true; // Fall through
}
@ -395,12 +395,12 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
TokenPosition token_pos,
const Class& type_class,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("Subtype1TestCacheLookup");
const Register kInstanceReg = EAX;
#if defined(DEBUG)
Label ok;
compiler::Label ok;
__ BranchIfNotSmi(kInstanceReg, &ok);
__ Breakpoint();
__ Bind(&ok);
@ -409,9 +409,9 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
__ LoadClassById(ECX, EDI);
// ECX: instance class.
// Check immediate superclass equality.
__ movl(EDI, FieldAddress(ECX, Class::super_type_offset()));
__ movl(EDI, FieldAddress(EDI, Type::type_class_id_offset()));
__ cmpl(EDI, Immediate(Smi::RawValue(type_class.id())));
__ movl(EDI, compiler::FieldAddress(ECX, Class::super_type_offset()));
__ movl(EDI, compiler::FieldAddress(EDI, Type::type_class_id_offset()));
__ cmpl(EDI, compiler::Immediate(Smi::RawValue(type_class.id())));
__ j(EQUAL, is_instance_lbl);
const Register kInstantiatorTypeArgumentsReg = kNoRegister;
@ -429,8 +429,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("UninstantiatedTypeTest");
const Register kInstanceReg = EAX;
const Register kInstantiatorTypeArgumentsReg = EDX;
@ -439,14 +439,16 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
ASSERT(!type.IsInstantiated());
ASSERT(!type.IsFunctionType());
// Skip check if destination is a dynamic type.
const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
if (type.IsTypeParameter()) {
const TypeParameter& type_param = TypeParameter::Cast(type);
const AbstractType& bound = AbstractType::Handle(type_param.bound());
__ movl(EDX, Address(ESP, 1 * kWordSize)); // Get instantiator type args.
__ movl(ECX, Address(ESP, 0 * kWordSize)); // Get function type args.
__ movl(EDX, compiler::Address(
ESP, 1 * kWordSize)); // Get instantiator type args.
__ movl(ECX,
compiler::Address(ESP, 0 * kWordSize)); // Get function type args.
// EDX: instantiator type arguments.
// ECX: function type arguments.
const Register kTypeArgumentsReg =
@ -454,8 +456,9 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
// Check if type arguments are null, i.e. equivalent to vector of dynamic.
__ cmpl(kTypeArgumentsReg, raw_null);
__ j(EQUAL, is_instance_lbl);
__ movl(EDI, FieldAddress(kTypeArgumentsReg, TypeArguments::type_at_offset(
type_param.index())));
__ movl(EDI, compiler::FieldAddress(
kTypeArgumentsReg,
TypeArguments::type_at_offset(type_param.index())));
// EDI: concrete type of type.
// Check if type argument is dynamic, Object, or void.
__ CompareObject(EDI, Object::dynamic_type());
@ -466,9 +469,9 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
__ j(EQUAL, is_instance_lbl);
// For Smi check quickly against int and num interfaces.
Label not_smi;
__ testl(EAX, Immediate(kSmiTagMask)); // Value is Smi?
__ j(NOT_ZERO, &not_smi, Assembler::kNearJump);
compiler::Label not_smi;
__ testl(EAX, compiler::Immediate(kSmiTagMask)); // Value is Smi?
__ j(NOT_ZERO, &not_smi, compiler::Assembler::kNearJump);
__ CompareObject(EDI, Type::ZoneHandle(zone(), Type::IntType()));
__ j(EQUAL, is_instance_lbl);
__ CompareObject(EDI, Type::ZoneHandle(zone(), Type::Number()));
@ -493,11 +496,13 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
if (type.IsType()) {
// Smi is FutureOr<T>, when T is a top type or int or num.
if (!Class::Handle(type.type_class()).IsFutureOrClass()) {
__ testl(kInstanceReg, Immediate(kSmiTagMask)); // Is instance Smi?
__ testl(kInstanceReg,
compiler::Immediate(kSmiTagMask)); // Is instance Smi?
__ j(ZERO, is_not_instance_lbl);
}
__ movl(kInstantiatorTypeArgumentsReg, Address(ESP, 1 * kWordSize));
__ movl(kFunctionTypeArgumentsReg, Address(ESP, 0 * kWordSize));
__ movl(kInstantiatorTypeArgumentsReg,
compiler::Address(ESP, 1 * kWordSize));
__ movl(kFunctionTypeArgumentsReg, compiler::Address(ESP, 0 * kWordSize));
// Uninstantiated type class is known at compile time, but the type
// arguments are determined at runtime by the instantiator(s).
return GenerateCallSubtypeTestStub(kTestTypeFourArgs, kInstanceReg,
@ -514,14 +519,14 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
const Register kInstanceReg = EAX;
const Register kInstantiatorTypeArgumentsReg = EDX;
const Register kFunctionTypeArgumentsReg = ECX;
__ Comment("FunctionTypeTest");
__ testl(kInstanceReg, Immediate(kSmiTagMask));
__ testl(kInstanceReg, compiler::Immediate(kSmiTagMask));
__ j(ZERO, is_not_instance_lbl);
// Uninstantiated type class is known at compile time, but the type
// arguments are determined at runtime by the instantiator(s).
@ -546,8 +551,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InlineInstanceof");
if (type.IsFunctionType()) {
@ -601,9 +606,9 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
__ pushl(EDX); // Store instantiator type arguments.
__ pushl(ECX); // Store function type arguments.
const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
Label is_instance, is_not_instance;
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Label is_instance, is_not_instance;
// If type is instantiated and non-parameterized, we can inline code
// checking whether the tested instance is a Smi.
if (type.IsInstantiated()) {
@ -624,11 +629,13 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance);
// test_cache is null if there is no fall-through.
Label done;
compiler::Label done;
if (!test_cache.IsNull()) {
// Generate runtime call.
__ movl(EDX, Address(ESP, 1 * kWordSize)); // Get instantiator type args.
__ movl(ECX, Address(ESP, 0 * kWordSize)); // Get function type args.
__ movl(EDX, compiler::Address(
ESP, 1 * kWordSize)); // Get instantiator type args.
__ movl(ECX,
compiler::Address(ESP, 0 * kWordSize)); // Get function type args.
__ PushObject(Object::null_object()); // Make room for the result.
__ pushl(EAX); // Push the instance.
__ PushObject(type); // Push the type.
@ -641,11 +648,11 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
// instanceof runtime call will be left as the result of the operation.
__ Drop(5);
__ popl(EAX);
__ jmp(&done, Assembler::kNearJump);
__ jmp(&done, compiler::Assembler::kNearJump);
}
__ Bind(&is_not_instance);
__ LoadObject(EAX, Bool::Get(false));
__ jmp(&done, Assembler::kNearJump);
__ jmp(&done, compiler::Assembler::kNearJump);
__ Bind(&is_instance);
__ LoadObject(EAX, Bool::Get(true));
@ -680,9 +687,9 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
__ pushl(EDX); // Store instantiator type arguments.
__ pushl(ECX); // Store function type arguments.
// A null object is always assignable and is returned as result.
const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
Label is_assignable, runtime_call;
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Label is_assignable, runtime_call;
__ cmpl(EAX, raw_null);
__ j(EQUAL, &is_assignable);
@ -692,8 +699,10 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
&runtime_call);
__ Bind(&runtime_call);
__ movl(EDX, Address(ESP, 1 * kWordSize)); // Get instantiator type args.
__ movl(ECX, Address(ESP, 0 * kWordSize)); // Get function type args.
__ movl(EDX, compiler::Address(
ESP, 1 * kWordSize)); // Get instantiator type args.
__ movl(ECX,
compiler::Address(ESP, 0 * kWordSize)); // Get function type args.
__ PushObject(Object::null_object()); // Make room for the result.
__ pushl(EAX); // Push the source object.
__ PushObject(dst_type); // Push the type of the destination.
@ -737,8 +746,8 @@ void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
// +1 : receiver.
// Sequence node has one return node, its input is load field node.
__ Comment("Intrinsic Getter");
__ movl(EAX, Address(ESP, 1 * kWordSize));
__ movl(EAX, FieldAddress(EAX, offset));
__ movl(EAX, compiler::Address(ESP, 1 * kWordSize));
__ movl(EAX, compiler::FieldAddress(EAX, offset));
__ ret();
}
@ -748,11 +757,11 @@ void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
// +2 : receiver.
// Sequence node has one store node and one return NULL node.
__ Comment("Intrinsic Setter");
__ movl(EAX, Address(ESP, 2 * kWordSize)); // Receiver.
__ movl(EBX, Address(ESP, 1 * kWordSize)); // Value.
__ StoreIntoObject(EAX, FieldAddress(EAX, offset), EBX);
const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
__ movl(EAX, compiler::Address(ESP, 2 * kWordSize)); // Receiver.
__ movl(EBX, compiler::Address(ESP, 1 * kWordSize)); // Value.
__ StoreIntoObject(EAX, compiler::FieldAddress(EAX, offset), EBX);
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
__ movl(EAX, raw_null);
__ ret();
}
@ -770,14 +779,16 @@ void FlowGraphCompiler::EmitFrameEntry() {
// Reoptimization of an optimized function is triggered by counting in
// IC stubs, but not at the entry of the function.
if (!is_optimizing()) {
__ incl(FieldAddress(function_reg, Function::usage_counter_offset()));
__ incl(compiler::FieldAddress(function_reg,
Function::usage_counter_offset()));
}
__ cmpl(FieldAddress(function_reg, Function::usage_counter_offset()),
Immediate(GetOptimizationThreshold()));
__ cmpl(
compiler::FieldAddress(function_reg, Function::usage_counter_offset()),
compiler::Immediate(GetOptimizationThreshold()));
ASSERT(function_reg == EBX);
Label dont_optimize;
__ j(LESS, &dont_optimize, Assembler::kNearJump);
__ jmp(Address(THR, Thread::optimize_entry_offset()));
compiler::Label dont_optimize;
__ j(LESS, &dont_optimize, compiler::Assembler::kNearJump);
__ jmp(compiler::Address(THR, Thread::optimize_entry_offset()));
__ Bind(&dont_optimize);
}
__ Comment("Enter frame");
@ -806,15 +817,15 @@ void FlowGraphCompiler::EmitPrologue() {
__ Comment("Initialize spill slots");
if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
__ movl(EAX, raw_null);
}
for (intptr_t i = 0; i < num_locals; ++i) {
const intptr_t slot_index =
compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : EAX;
__ movl(Address(EBP, slot_index * kWordSize), value_reg);
__ movl(compiler::Address(EBP, slot_index * kWordSize), value_reg);
}
}
@ -897,7 +908,8 @@ void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
ASSERT(!edge_counters_array_.IsNull());
__ Comment("Edge counter");
__ LoadObject(EAX, edge_counters_array_);
__ IncrementSmiField(FieldAddress(EAX, Array::element_offset(edge_id)), 1);
__ IncrementSmiField(
compiler::FieldAddress(EAX, Array::element_offset(edge_id)), 1);
}
void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
@ -916,7 +928,8 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
// Pass the function explicitly, it is used in IC stub.
__ LoadObject(EAX, parsed_function().function());
// Load receiver into EBX.
__ movl(EBX, Address(ESP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
__ movl(EBX, compiler::Address(
ESP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
__ LoadObject(ECX, ic_data);
GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs);
__ Drop(ic_data.CountWithTypeArgs());
@ -932,14 +945,15 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(ic_data.arguments_descriptor()).Length() > 0);
// Load receiver into EBX.
__ movl(EBX, Address(ESP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
__ movl(EBX, compiler::Address(
ESP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
__ LoadObject(ECX, ic_data, true);
__ LoadObject(CODE_REG, stub, true);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ call(FieldAddress(CODE_REG, entry_point_offset));
__ call(compiler::FieldAddress(CODE_REG, entry_point_offset));
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kIcCall, locs);
__ Drop(ic_data.CountWithTypeArgs());
}
@ -960,9 +974,10 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
__ Comment("MegamorphicCall");
// Load receiver into EBX.
__ movl(EBX, Address(ESP, (args_desc.Count() - 1) * kWordSize));
__ movl(EBX, compiler::Address(ESP, (args_desc.Count() - 1) * kWordSize));
__ LoadObject(ECX, cache);
__ call(Address(THR, Thread::megamorphic_call_checked_entry_offset()));
__ call(
compiler::Address(THR, Thread::megamorphic_call_checked_entry_offset()));
AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone, token_pos);
RecordSafepoint(locs, slow_path_argument_count);
@ -1076,14 +1091,14 @@ void FlowGraphCompiler::SaveLiveRegisters(LocationSummary* locs) {
// TODO(vegorov): consider saving only caller save (volatile) registers.
const intptr_t xmm_regs_count = locs->live_registers()->FpuRegisterCount();
if (xmm_regs_count > 0) {
__ subl(ESP, Immediate(xmm_regs_count * kFpuRegisterSize));
__ subl(ESP, compiler::Immediate(xmm_regs_count * kFpuRegisterSize));
// Store XMM registers with the lowest register number at the lowest
// address.
intptr_t offset = 0;
for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
XmmRegister xmm_reg = static_cast<XmmRegister>(i);
if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
__ movups(Address(ESP, offset), xmm_reg);
__ movups(compiler::Address(ESP, offset), xmm_reg);
offset += kFpuRegisterSize;
}
}
@ -1115,12 +1130,12 @@ void FlowGraphCompiler::RestoreLiveRegisters(LocationSummary* locs) {
for (intptr_t i = 0; i < kNumberOfXmmRegisters; ++i) {
XmmRegister xmm_reg = static_cast<XmmRegister>(i);
if (locs->live_registers()->ContainsFpuRegister(xmm_reg)) {
__ movups(xmm_reg, Address(ESP, offset));
__ movups(xmm_reg, compiler::Address(ESP, offset));
offset += kFpuRegisterSize;
}
}
ASSERT(offset == (xmm_regs_count * kFpuRegisterSize));
__ addl(ESP, Immediate(offset));
__ addl(ESP, compiler::Immediate(offset));
}
}
@ -1132,7 +1147,7 @@ void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
// TODO(zerny): clobber non-live temporary FPU registers.
if (tmp.IsRegister() &&
!locs->live_registers()->ContainsRegister(tmp.reg())) {
__ movl(tmp.reg(), Immediate(0xf7));
__ movl(tmp.reg(), compiler::Immediate(0xf7));
}
}
}
@ -1147,12 +1162,14 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
const Array& arguments_descriptor) {
__ Comment("EmitTestAndCall");
// Load receiver into EAX.
__ movl(EAX, Address(ESP, (count_without_type_args - 1) * kWordSize));
__ movl(EAX,
compiler::Address(ESP, (count_without_type_args - 1) * kWordSize));
__ LoadObject(EDX, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) {
__ testl(EAX, Immediate(kSmiTagMask));
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
bool if_smi) {
__ testl(EAX, compiler::Immediate(kSmiTagMask));
// Jump if receiver is (not) Smi.
__ j(if_smi ? ZERO : NOT_ZERO, label);
}
@ -1165,20 +1182,20 @@ void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
#undef __
#define __ assembler->
int FlowGraphCompiler::EmitTestAndCallCheckCid(Assembler* assembler,
Label* label,
int FlowGraphCompiler::EmitTestAndCallCheckCid(compiler::Assembler* assembler,
compiler::Label* label,
Register class_id_reg,
const CidRange& range,
int bias,
bool jump_on_miss) {
intptr_t cid_start = range.cid_start;
if (range.IsSingleCid()) {
__ cmpl(class_id_reg, Immediate(cid_start - bias));
__ cmpl(class_id_reg, compiler::Immediate(cid_start - bias));
__ j(jump_on_miss ? NOT_EQUAL : EQUAL, label);
} else {
__ addl(class_id_reg, Immediate(bias - cid_start));
__ addl(class_id_reg, compiler::Immediate(bias - cid_start));
bias = cid_start;
__ cmpl(class_id_reg, Immediate(range.Extent()));
__ cmpl(class_id_reg, compiler::Immediate(range.Extent()));
__ j(jump_on_miss ? ABOVE : BELOW_EQUAL, label); // Unsigned higher.
}
return bias;
@ -1287,9 +1304,9 @@ void ParallelMoveResolver::EmitSwap(int index) {
destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
XmmRegister reg =
source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
const Address& slot_address = source.IsFpuRegister()
? LocationToStackSlotAddress(destination)
: LocationToStackSlotAddress(source);
const compiler::Address& slot_address =
source.IsFpuRegister() ? LocationToStackSlotAddress(destination)
: LocationToStackSlotAddress(source);
if (double_width) {
__ movsd(FpuTMP, slot_address);
@ -1300,8 +1317,9 @@ void ParallelMoveResolver::EmitSwap(int index) {
}
__ movaps(reg, FpuTMP);
} else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
const Address& source_slot_address = LocationToStackSlotAddress(source);
const Address& destination_slot_address =
const compiler::Address& source_slot_address =
LocationToStackSlotAddress(source);
const compiler::Address& destination_slot_address =
LocationToStackSlotAddress(destination);
ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
@ -1310,8 +1328,9 @@ void ParallelMoveResolver::EmitSwap(int index) {
__ movsd(destination_slot_address, FpuTMP);
__ movsd(source_slot_address, ensure_scratch.reg());
} else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
const Address& source_slot_address = LocationToStackSlotAddress(source);
const Address& destination_slot_address =
const compiler::Address& source_slot_address =
LocationToStackSlotAddress(source);
const compiler::Address& destination_slot_address =
LocationToStackSlotAddress(destination);
ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
@ -1340,20 +1359,22 @@ void ParallelMoveResolver::EmitSwap(int index) {
}
}
void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
const Address& src) {
void ParallelMoveResolver::MoveMemoryToMemory(const compiler::Address& dst,
const compiler::Address& src) {
ScratchRegisterScope ensure_scratch(this, kNoRegister);
__ MoveMemoryToMemory(dst, src, ensure_scratch.reg());
}
void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
void ParallelMoveResolver::Exchange(Register reg,
const compiler::Address& mem) {
ScratchRegisterScope ensure_scratch(this, reg);
__ movl(ensure_scratch.reg(), mem);
__ movl(mem, reg);
__ movl(reg, ensure_scratch.reg());
}
void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
void ParallelMoveResolver::Exchange(const compiler::Address& mem1,
const compiler::Address& mem2) {
ScratchRegisterScope ensure_scratch1(this, kNoRegister);
ScratchRegisterScope ensure_scratch2(this, ensure_scratch1.reg());
__ movl(ensure_scratch1.reg(), mem1);
@ -1384,13 +1405,13 @@ void ParallelMoveResolver::RestoreScratch(Register reg) {
}
void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
__ subl(ESP, Immediate(kFpuRegisterSize));
__ movups(Address(ESP, 0), reg);
__ subl(ESP, compiler::Immediate(kFpuRegisterSize));
__ movups(compiler::Address(ESP, 0), reg);
}
void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
__ movups(reg, Address(ESP, 0));
__ addl(ESP, Immediate(kFpuRegisterSize));
__ movups(reg, compiler::Address(ESP, 0));
__ addl(ESP, compiler::Immediate(kFpuRegisterSize));
}
#undef __

View file

@ -181,7 +181,7 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
intptr_t stub_ix) {
// Calls do not need stubs, they share a deoptimization trampoline.
ASSERT(reason() != ICData::kDeoptAtCall);
Assembler* assembler = compiler->assembler();
compiler::Assembler* assembler = compiler->assembler();
#define __ assembler->
__ Comment("%s", Name());
__ Bind(entry_label());
@ -190,7 +190,7 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
}
ASSERT(deopt_env() != NULL);
__ call(Address(THR, Thread::deoptimize_entry_offset()));
__ call(compiler::Address(THR, Thread::deoptimize_entry_offset()));
set_pc_offset(assembler->CodeSize());
__ int3();
#undef __
@ -200,11 +200,11 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
// Fall through if bool_register contains null.
void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
Label* is_true,
Label* is_false) {
Label fall_through;
compiler::Label* is_true,
compiler::Label* is_false) {
compiler::Label fall_through;
__ CompareObject(bool_register, Object::null_object());
__ j(EQUAL, &fall_through, Assembler::kNearJump);
__ j(EQUAL, &fall_through, compiler::Assembler::kNearJump);
__ CompareObject(bool_register, Bool::True());
__ j(EQUAL, is_true);
__ jmp(is_false);
@ -226,8 +226,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
Register instantiator_type_arguments_reg,
Register function_type_arguments_reg,
Register temp_reg,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
ASSERT(temp_reg == kNoRegister);
const SubtypeTestCache& type_test_cache =
SubtypeTestCache::ZoneHandle(zone(), SubtypeTestCache::New());
@ -265,8 +265,8 @@ RawSubtypeTestCache*
FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InstantiatedTypeWithArgumentsTest");
ASSERT(type.IsInstantiated());
ASSERT(!type.IsFunctionType());
@ -275,7 +275,7 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
const Register kInstanceReg = RAX;
const Type& smi_type = Type::Handle(zone(), Type::SmiType());
const bool smi_is_ok = smi_type.IsSubtypeOf(type, Heap::kOld);
__ testq(kInstanceReg, Immediate(kSmiTagMask));
__ testq(kInstanceReg, compiler::Immediate(kSmiTagMask));
if (smi_is_ok) {
// Fast case for type = FutureOr<int/num/top-type>.
__ j(ZERO, is_instance_lbl);
@ -294,7 +294,7 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
const Register kClassIdReg = R10;
// dynamic type argument, check only classes.
__ LoadClassId(kClassIdReg, kInstanceReg);
__ cmpl(kClassIdReg, Immediate(type_class.id()));
__ cmpl(kClassIdReg, compiler::Immediate(type_class.id()));
__ j(EQUAL, is_instance_lbl);
// List is a very common case.
if (IsListClass(type_class)) {
@ -331,10 +331,10 @@ FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
const GrowableArray<intptr_t>& class_ids,
Label* is_equal_lbl,
Label* is_not_equal_lbl) {
compiler::Label* is_equal_lbl,
compiler::Label* is_not_equal_lbl) {
for (intptr_t i = 0; i < class_ids.length(); i++) {
__ cmpl(class_id_reg, Immediate(class_ids[i]));
__ cmpl(class_id_reg, compiler::Immediate(class_ids[i]));
__ j(EQUAL, is_equal_lbl);
}
__ jmp(is_not_equal_lbl);
@ -352,8 +352,8 @@ void FlowGraphCompiler::CheckClassIds(Register class_id_reg,
bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InstantiatedTypeNoArgumentsTest");
ASSERT(type.IsInstantiated());
ASSERT(!type.IsFunctionType());
@ -361,7 +361,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
ASSERT(type_class.NumTypeArguments() == 0);
const Register kInstanceReg = RAX;
__ testq(kInstanceReg, Immediate(kSmiTagMask));
__ testq(kInstanceReg, compiler::Immediate(kSmiTagMask));
// If instance is Smi, check directly.
const Class& smi_class = Class::Handle(zone(), Smi::Class());
if (Class::IsSubtypeOf(smi_class, Object::null_type_arguments(), type_class,
@ -375,7 +375,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
__ LoadClassId(kClassIdReg, kInstanceReg);
// Bool interface can be implemented only by core class Bool.
if (type.IsBoolType()) {
__ cmpl(kClassIdReg, Immediate(kBoolCid));
__ cmpl(kClassIdReg, compiler::Immediate(kBoolCid));
__ j(EQUAL, is_instance_lbl);
__ jmp(is_not_instance_lbl);
return false;
@ -393,7 +393,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
}
if (type.IsDartFunctionType()) {
// Check if instance is a closure.
__ cmpq(kClassIdReg, Immediate(kClosureCid));
__ cmpq(kClassIdReg, compiler::Immediate(kClosureCid));
__ j(EQUAL, is_instance_lbl);
return true;
}
@ -422,12 +422,12 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
TokenPosition token_pos,
const Class& type_class,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("Subtype1TestCacheLookup");
const Register kInstanceReg = RAX;
#if defined(DEBUG)
Label ok;
compiler::Label ok;
__ BranchIfNotSmi(kInstanceReg, &ok);
__ Breakpoint();
__ Bind(&ok);
@ -436,9 +436,9 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
__ LoadClassById(R10, TMP);
// R10: instance class.
// Check immediate superclass equality.
__ movq(R13, FieldAddress(R10, Class::super_type_offset()));
__ movq(R13, FieldAddress(R13, Type::type_class_id_offset()));
__ CompareImmediate(R13, Immediate(Smi::RawValue(type_class.id())));
__ movq(R13, compiler::FieldAddress(R10, Class::super_type_offset()));
__ movq(R13, compiler::FieldAddress(R13, Type::type_class_id_offset()));
__ CompareImmediate(R13, compiler::Immediate(Smi::RawValue(type_class.id())));
__ j(EQUAL, is_instance_lbl);
const Register kInstantiatorTypeArgumentsReg = kNoRegister;
@ -461,8 +461,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
const Register kInstanceReg = RAX;
const Register kInstantiatorTypeArgumentsReg = RDX;
const Register kFunctionTypeArgumentsReg = RCX;
@ -482,8 +482,9 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
// Check if type arguments are null, i.e. equivalent to vector of dynamic.
__ CompareObject(kTypeArgumentsReg, Object::null_object());
__ j(EQUAL, is_instance_lbl);
__ movq(RDI, FieldAddress(kTypeArgumentsReg, TypeArguments::type_at_offset(
type_param.index())));
__ movq(RDI, compiler::FieldAddress(
kTypeArgumentsReg,
TypeArguments::type_at_offset(type_param.index())));
// RDI: Concrete type of type.
// Check if type argument is dynamic, Object, or void.
__ CompareObject(RDI, Object::dynamic_type());
@ -495,9 +496,9 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
__ j(EQUAL, is_instance_lbl);
// For Smi check quickly against int and num interfaces.
Label not_smi;
__ testq(RAX, Immediate(kSmiTagMask)); // Value is Smi?
__ j(NOT_ZERO, &not_smi, Assembler::kNearJump);
compiler::Label not_smi;
__ testq(RAX, compiler::Immediate(kSmiTagMask)); // Value is Smi?
__ j(NOT_ZERO, &not_smi, compiler::Assembler::kNearJump);
__ CompareObject(RDI, Type::ZoneHandle(zone(), Type::IntType()));
__ j(EQUAL, is_instance_lbl);
__ CompareObject(RDI, Type::ZoneHandle(zone(), Type::Number()));
@ -523,7 +524,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
if (type.IsType()) {
// Smi is FutureOr<T>, when T is a top type or int or num.
if (!Class::Handle(type.type_class()).IsFutureOrClass()) {
__ testq(kInstanceReg, Immediate(kSmiTagMask)); // Is instance Smi?
__ testq(kInstanceReg,
compiler::Immediate(kSmiTagMask)); // Is instance Smi?
__ j(ZERO, is_not_instance_lbl);
}
// Uninstantiated type class is known at compile time, but the type
@ -542,15 +544,15 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
const Register kInstanceReg = RAX;
const Register kInstantiatorTypeArgumentsReg = RDX;
const Register kFunctionTypeArgumentsReg = RCX;
const Register kTempReg = kNoRegister;
__ Comment("FunctionTypeTest");
__ testq(kInstanceReg, Immediate(kSmiTagMask));
__ testq(kInstanceReg, compiler::Immediate(kSmiTagMask));
__ j(ZERO, is_not_instance_lbl);
return GenerateCallSubtypeTestStub(kTestTypeSixArgs, kInstanceReg,
kInstantiatorTypeArgumentsReg,
@ -571,8 +573,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
TokenPosition token_pos,
const AbstractType& type,
Label* is_instance_lbl,
Label* is_not_instance_lbl) {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl) {
__ Comment("InlineInstanceof");
if (type.IsFunctionType()) {
@ -623,7 +625,7 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
ASSERT(type.IsFinalized());
ASSERT(!type.IsObjectType() && !type.IsDynamicType() && !type.IsVoidType());
Label is_instance, is_not_instance;
compiler::Label is_instance, is_not_instance;
// If type is instantiated and non-parameterized, we can inline code
// checking whether the tested instance is a Smi.
if (type.IsInstantiated()) {
@ -645,7 +647,7 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
GenerateInlineInstanceof(token_pos, type, &is_instance, &is_not_instance);
// test_cache is null if there is no fall-through.
Label done;
compiler::Label done;
if (!test_cache.IsNull()) {
// Generate runtime call.
__ PushObject(Object::null_object()); // Make room for the result.
@ -660,11 +662,11 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
// instanceof runtime call will be left as the result of the operation.
__ Drop(5);
__ popq(RAX);
__ jmp(&done, Assembler::kNearJump);
__ jmp(&done, compiler::Assembler::kNearJump);
}
__ Bind(&is_not_instance);
__ LoadObject(RAX, Bool::Get(false));
__ jmp(&done, Assembler::kNearJump);
__ jmp(&done, compiler::Assembler::kNearJump);
__ Bind(&is_instance);
__ LoadObject(RAX, Bool::Get(true));
@ -702,7 +704,7 @@ void FlowGraphCompiler::GenerateAssertAssignable(TokenPosition token_pos,
GenerateAssertAssignableViaTypeTestingStub(token_pos, deopt_id, dst_type,
dst_name, locs);
} else {
Label is_assignable, runtime_call;
compiler::Label is_assignable, runtime_call;
// A null object is always assignable and is returned as result.
__ CompareObject(RAX, Object::null_object());
@ -743,7 +745,7 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
const Register kInstantiatorTypeArgumentsReg = RDX;
const Register kFunctionTypeArgumentsReg = RCX;
Label done;
compiler::Label done;
const Register subtype_cache_reg = R9;
const Register kScratchReg = RBX;
@ -768,8 +770,9 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
ASSERT((sub_type_cache_index + 1) == dst_name_index);
ASSERT(__ constant_pool_allowed());
__ movq(subtype_cache_reg, Address(PP, sub_type_cache_offset));
__ call(FieldAddress(RBX, AbstractType::type_test_stub_entry_point_offset()));
__ movq(subtype_cache_reg, compiler::Address(PP, sub_type_cache_offset));
__ call(compiler::FieldAddress(
RBX, AbstractType::type_test_stub_entry_point_offset()));
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
__ Bind(&done);
}
@ -816,15 +819,16 @@ void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
kPoolReg = PP;
} else {
__ movq(kPoolReg, FieldAddress(CODE_REG, Code::object_pool_offset()));
__ movq(kPoolReg,
compiler::FieldAddress(CODE_REG, Code::object_pool_offset()));
}
__ movq(RDX, Immediate(type_arguments_field_offset));
__ movq(RBX,
FieldAddress(kPoolReg, ObjectPool::element_offset(function_index)));
__ movq(CODE_REG,
FieldAddress(kPoolReg, ObjectPool::element_offset(stub_index)));
__ jmp(FieldAddress(CODE_REG,
Code::entry_point_offset(Code::EntryKind::kUnchecked)));
__ movq(RDX, compiler::Immediate(type_arguments_field_offset));
__ movq(RBX, compiler::FieldAddress(
kPoolReg, ObjectPool::element_offset(function_index)));
__ movq(CODE_REG, compiler::FieldAddress(
kPoolReg, ObjectPool::element_offset(stub_index)));
__ jmp(compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kUnchecked)));
}
void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
@ -832,8 +836,8 @@ void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
// +1 : receiver.
// Sequence node has one return node, its input is load field node.
__ Comment("Intrinsic Getter");
__ movq(RAX, Address(RSP, 1 * kWordSize));
__ movq(RAX, FieldAddress(RAX, offset));
__ movq(RAX, compiler::Address(RSP, 1 * kWordSize));
__ movq(RAX, compiler::FieldAddress(RAX, offset));
__ ret();
}
@ -843,9 +847,9 @@ void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
// +2 : receiver.
// Sequence node has one store node and one return NULL node.
__ Comment("Intrinsic Setter");
__ movq(RAX, Address(RSP, 2 * kWordSize)); // Receiver.
__ movq(RBX, Address(RSP, 1 * kWordSize)); // Value.
__ StoreIntoObject(RAX, FieldAddress(RAX, offset), RBX);
__ movq(RAX, compiler::Address(RSP, 2 * kWordSize)); // Receiver.
__ movq(RBX, compiler::Address(RSP, 1 * kWordSize)); // Value.
__ StoreIntoObject(RAX, compiler::FieldAddress(RAX, offset), RBX);
__ LoadObject(RAX, Object::null_object());
__ ret();
}
@ -863,19 +867,22 @@ void FlowGraphCompiler::EmitFrameEntry() {
(!is_optimizing() || may_reoptimize())) {
__ Comment("Invocation Count Check");
const Register function_reg = RDI;
__ movq(function_reg, FieldAddress(CODE_REG, Code::owner_offset()));
__ movq(function_reg,
compiler::FieldAddress(CODE_REG, Code::owner_offset()));
// Reoptimization of an optimized function is triggered by counting in
// IC stubs, but not at the entry of the function.
if (!is_optimizing()) {
__ incl(FieldAddress(function_reg, Function::usage_counter_offset()));
__ incl(compiler::FieldAddress(function_reg,
Function::usage_counter_offset()));
}
__ cmpl(FieldAddress(function_reg, Function::usage_counter_offset()),
Immediate(GetOptimizationThreshold()));
__ cmpl(compiler::FieldAddress(function_reg,
Function::usage_counter_offset()),
compiler::Immediate(GetOptimizationThreshold()));
ASSERT(function_reg == RDI);
Label dont_optimize;
__ j(LESS, &dont_optimize, Assembler::kNearJump);
__ jmp(Address(THR, Thread::optimize_entry_offset()));
compiler::Label dont_optimize;
__ j(LESS, &dont_optimize, compiler::Assembler::kNearJump);
__ jmp(compiler::Address(THR, Thread::optimize_entry_offset()));
__ Bind(&dont_optimize);
}
ASSERT(StackSize() >= 0);
@ -908,7 +915,7 @@ void FlowGraphCompiler::EmitPrologue() {
const intptr_t slot_index =
compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : RAX;
__ movq(Address(RBP, slot_index * kWordSize), value_reg);
__ movq(compiler::Address(RBP, slot_index * kWordSize), value_reg);
}
}
@ -1026,7 +1033,8 @@ void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
ASSERT(assembler_->constant_pool_allowed());
__ Comment("Edge counter");
__ LoadObject(RAX, edge_counters_array_);
__ IncrementSmiField(FieldAddress(RAX, Array::element_offset(edge_id)), 1);
__ IncrementSmiField(
compiler::FieldAddress(RAX, Array::element_offset(edge_id)), 1);
}
void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
@ -1044,7 +1052,8 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
// Pass the function explicitly, it is used in IC stub.
__ LoadObject(RDI, parsed_function().function());
// Load receiver into RDX.
__ movq(RDX, Address(RSP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
__ movq(RDX, compiler::Address(
RSP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
__ LoadUniqueObject(RBX, ic_data);
GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs,
entry_kind);
@ -1061,14 +1070,15 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
// Load receiver into RDX.
__ movq(RDX, Address(RSP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
__ movq(RDX, compiler::Address(
RSP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
__ LoadUniqueObject(RBX, ic_data);
__ LoadUniqueObject(CODE_REG, stub);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ call(FieldAddress(CODE_REG, entry_point_offset));
__ call(compiler::FieldAddress(CODE_REG, entry_point_offset));
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kIcCall, locs);
__ Drop(ic_data.CountWithTypeArgs(), RCX);
}
@ -1088,9 +1098,10 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
MegamorphicCacheTable::Lookup(isolate(), name, arguments_descriptor));
__ Comment("MegamorphicCall");
// Load receiver into RDX.
__ movq(RDX, Address(RSP, (args_desc.Count() - 1) * kWordSize));
__ movq(RDX, compiler::Address(RSP, (args_desc.Count() - 1) * kWordSize));
__ LoadObject(RBX, cache);
__ call(Address(THR, Thread::megamorphic_call_checked_entry_offset()));
__ call(
compiler::Address(THR, Thread::megamorphic_call_checked_entry_offset()));
RecordSafepoint(locs, slow_path_argument_count);
const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
@ -1128,7 +1139,8 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
UnlinkedCall::ZoneHandle(zone(), ic_data.AsUnlinkedCall());
__ Comment("InstanceCallAOT");
__ movq(RDX, Address(RSP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
__ movq(RDX, compiler::Address(
RSP, (ic_data.CountWithoutTypeArgs() - 1) * kWordSize));
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
// The AOT runtime will replace the slot in the object pool with the
// entrypoint address - see clustered_snapshot.cc.
@ -1139,7 +1151,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ LoadUniqueObject(CODE_REG, initial_stub);
__ movq(RCX, FieldAddress(CODE_REG, entry_point_offset));
__ movq(RCX, compiler::FieldAddress(CODE_REG, entry_point_offset));
}
__ LoadUniqueObject(RBX, data);
__ call(RCX);
@ -1252,7 +1264,7 @@ void FlowGraphCompiler::ClobberDeadTempRegisters(LocationSummary* locs) {
// TODO(zerny): clobber non-live temporary FPU registers.
if (tmp.IsRegister() &&
!locs->live_registers()->ContainsRegister(tmp.reg())) {
__ movq(tmp.reg(), Immediate(0xf7));
__ movq(tmp.reg(), compiler::Immediate(0xf7));
}
}
}
@ -1267,12 +1279,14 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
const Array& arguments_descriptor) {
__ Comment("EmitTestAndCall");
// Load receiver into RAX.
__ movq(RAX, Address(RSP, (count_without_type_args - 1) * kWordSize));
__ movq(RAX,
compiler::Address(RSP, (count_without_type_args - 1) * kWordSize));
__ LoadObject(R10, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(Label* label, bool if_smi) {
__ testq(RAX, Immediate(kSmiTagMask));
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,
bool if_smi) {
__ testq(RAX, compiler::Immediate(kSmiTagMask));
// Jump if receiver is (not) Smi.
__ j(if_smi ? ZERO : NOT_ZERO, label);
}
@ -1285,8 +1299,8 @@ void FlowGraphCompiler::EmitTestAndCallLoadCid(Register class_id_reg) {
#undef __
#define __ assembler->
int FlowGraphCompiler::EmitTestAndCallCheckCid(Assembler* assembler,
Label* label,
int FlowGraphCompiler::EmitTestAndCallCheckCid(compiler::Assembler* assembler,
compiler::Label* label,
Register class_id_reg,
const CidRange& range,
int bias,
@ -1296,12 +1310,12 @@ int FlowGraphCompiler::EmitTestAndCallCheckCid(Assembler* assembler,
// 32-bit (since the subtraction instruction is as well).
intptr_t cid_start = range.cid_start;
if (range.IsSingleCid()) {
__ cmpl(class_id_reg, Immediate(cid_start - bias));
__ cmpl(class_id_reg, compiler::Immediate(cid_start - bias));
__ BranchIf(jump_on_miss ? NOT_EQUAL : EQUAL, label);
} else {
__ addl(class_id_reg, Immediate(bias - cid_start));
__ addl(class_id_reg, compiler::Immediate(bias - cid_start));
bias = cid_start;
__ cmpl(class_id_reg, Immediate(range.Extent()));
__ cmpl(class_id_reg, compiler::Immediate(range.Extent()));
__ BranchIf(jump_on_miss ? UNSIGNED_GREATER : UNSIGNED_LESS_EQUAL, label);
}
return bias;
@ -1405,8 +1419,8 @@ void ParallelMoveResolver::EmitSwap(int index) {
destination.IsDoubleStackSlot() || source.IsDoubleStackSlot();
XmmRegister reg =
source.IsFpuRegister() ? source.fpu_reg() : destination.fpu_reg();
Address slot_address = source.IsFpuRegister()
? LocationToStackSlotAddress(destination)
compiler::Address slot_address =
source.IsFpuRegister() ? LocationToStackSlotAddress(destination)
: LocationToStackSlotAddress(source);
if (double_width) {
@ -1418,8 +1432,9 @@ void ParallelMoveResolver::EmitSwap(int index) {
}
__ movaps(reg, FpuTMP);
} else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
const Address& source_slot_address = LocationToStackSlotAddress(source);
const Address& destination_slot_address =
const compiler::Address& source_slot_address =
LocationToStackSlotAddress(source);
const compiler::Address& destination_slot_address =
LocationToStackSlotAddress(destination);
ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
@ -1428,8 +1443,9 @@ void ParallelMoveResolver::EmitSwap(int index) {
__ movsd(destination_slot_address, FpuTMP);
__ movsd(source_slot_address, ensure_scratch.reg());
} else if (source.IsQuadStackSlot() && destination.IsQuadStackSlot()) {
const Address& source_slot_address = LocationToStackSlotAddress(source);
const Address& destination_slot_address =
const compiler::Address& source_slot_address =
LocationToStackSlotAddress(source);
const compiler::Address& destination_slot_address =
LocationToStackSlotAddress(destination);
ScratchFpuRegisterScope ensure_scratch(this, FpuTMP);
@ -1458,16 +1474,18 @@ void ParallelMoveResolver::EmitSwap(int index) {
}
}
void ParallelMoveResolver::MoveMemoryToMemory(const Address& dst,
const Address& src) {
void ParallelMoveResolver::MoveMemoryToMemory(const compiler::Address& dst,
const compiler::Address& src) {
__ MoveMemoryToMemory(dst, src);
}
void ParallelMoveResolver::Exchange(Register reg, const Address& mem) {
void ParallelMoveResolver::Exchange(Register reg,
const compiler::Address& mem) {
__ Exchange(reg, mem);
}
void ParallelMoveResolver::Exchange(const Address& mem1, const Address& mem2) {
void ParallelMoveResolver::Exchange(const compiler::Address& mem1,
const compiler::Address& mem2) {
__ Exchange(mem1, mem2);
}
@ -1493,13 +1511,13 @@ void ParallelMoveResolver::RestoreScratch(Register reg) {
}
void ParallelMoveResolver::SpillFpuScratch(FpuRegister reg) {
__ AddImmediate(RSP, Immediate(-kFpuRegisterSize));
__ movups(Address(RSP, 0), reg);
__ AddImmediate(RSP, compiler::Immediate(-kFpuRegisterSize));
__ movups(compiler::Address(RSP, 0), reg);
}
void ParallelMoveResolver::RestoreFpuScratch(FpuRegister reg) {
__ movups(reg, Address(RSP, 0));
__ AddImmediate(RSP, Immediate(kFpuRegisterSize));
__ movups(reg, compiler::Address(RSP, 0));
__ AddImmediate(RSP, compiler::Immediate(kFpuRegisterSize));
}
#undef __

View file

@ -3932,7 +3932,7 @@ void TargetEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
TokenPosition::kNoSource);
}
if (HasParallelMove()) {
if (Assembler::EmittingComments()) {
if (compiler::Assembler::EmittingComments()) {
compiler->EmitComment(parallel_move());
}
compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
@ -4005,7 +4005,7 @@ void FunctionEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
TokenPosition::kNoSource);
}
if (HasParallelMove()) {
if (Assembler::EmittingComments()) {
if (compiler::Assembler::EmittingComments()) {
compiler->EmitComment(parallel_move());
}
compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
@ -4042,7 +4042,7 @@ void OsrEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
#endif
if (HasParallelMove()) {
if (Assembler::EmittingComments()) {
if (compiler::Assembler::EmittingComments()) {
compiler->EmitComment(parallel_move());
}
compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
@ -4767,8 +4767,9 @@ void DeoptimizeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
#if !defined(TARGET_ARCH_DBC)
void CheckClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass,
licm_hoisted_ ? ICData::kHoisted : 0);
compiler::Label* deopt =
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass,
licm_hoisted_ ? ICData::kHoisted : 0);
if (IsNullCheck()) {
EmitNullCheck(compiler, deopt);
return;
@ -4777,7 +4778,7 @@ void CheckClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(!cids_.IsMonomorphic() || !cids_.HasClassId(kSmiCid));
Register value = locs()->in(0).reg();
Register temp = locs()->temp(0).reg();
Label is_ok;
compiler::Label is_ok;
__ BranchIfSmi(value, cids_.HasClassId(kSmiCid) ? &is_ok : deopt);
@ -4880,8 +4881,9 @@ void UnboxInstr::EmitLoadFromBoxWithDeopt(FlowGraphCompiler* compiler) {
const Register box = locs()->in(0).reg();
const Register temp =
(locs()->temp_count() > 0) ? locs()->temp(0).reg() : kNoRegister;
Label* deopt = compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnbox);
Label is_smi;
compiler::Label* deopt =
compiler->AddDeoptStub(GetDeoptId(), ICData::kDeoptUnbox);
compiler::Label is_smi;
if ((value()->Type()->ToNullableCid() == box_cid) &&
value()->Type()->is_nullable()) {
@ -4896,7 +4898,7 @@ void UnboxInstr::EmitLoadFromBoxWithDeopt(FlowGraphCompiler* compiler) {
EmitLoadFromBox(compiler);
if (is_smi.IsLinked()) {
Label done;
compiler::Label done;
__ Jump(&done);
__ Bind(&is_smi);
EmitSmiConversion(compiler);

View file

@ -989,9 +989,9 @@ class Instruction : public ZoneAllocated {
};
struct BranchLabels {
Label* true_label;
Label* false_label;
Label* fall_through;
compiler::Label* true_label;
compiler::Label* false_label;
compiler::Label* fall_through;
};
class PureInstruction : public Instruction {
@ -4502,12 +4502,12 @@ class StoreInstanceFieldInstr : public TemplateInstruction<2, NoThrow> {
intptr_t OffsetInBytes() const { return slot().offset_in_bytes(); }
Assembler::CanBeSmi CanValueBeSmi() const {
compiler::Assembler::CanBeSmi CanValueBeSmi() const {
const intptr_t cid = value()->Type()->ToNullableCid();
// Write barrier is skipped for nullable and non-nullable smis.
ASSERT(cid != kSmiCid);
return cid == kDynamicCid ? Assembler::kValueCanBeSmi
: Assembler::kValueIsNotSmi;
return cid == kDynamicCid ? compiler::Assembler::kValueCanBeSmi
: compiler::Assembler::kValueIsNotSmi;
}
const Slot& slot_;
@ -4667,12 +4667,12 @@ class StoreStaticFieldInstr : public TemplateDefinition<1, NoThrow> {
PRINT_OPERANDS_TO_SUPPORT
private:
Assembler::CanBeSmi CanValueBeSmi() const {
compiler::Assembler::CanBeSmi CanValueBeSmi() const {
const intptr_t cid = value()->Type()->ToNullableCid();
// Write barrier is skipped for nullable and non-nullable smis.
ASSERT(cid != kSmiCid);
return cid == kDynamicCid ? Assembler::kValueCanBeSmi
: Assembler::kValueIsNotSmi;
return cid == kDynamicCid ? compiler::Assembler::kValueCanBeSmi
: compiler::Assembler::kValueIsNotSmi;
}
const Field& field_;
@ -4939,8 +4939,8 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
virtual bool HasUnknownSideEffects() const { return false; }
private:
Assembler::CanBeSmi CanValueBeSmi() const {
return Assembler::kValueCanBeSmi;
compiler::Assembler::CanBeSmi CanValueBeSmi() const {
return compiler::Assembler::kValueCanBeSmi;
}
const StoreBarrierType emit_store_barrier_;
@ -7490,15 +7490,15 @@ class CheckClassInstr : public TemplateInstruction<1, NoThrow> {
intptr_t cid_start,
intptr_t cid_end,
bool is_last,
Label* is_ok,
Label* deopt,
compiler::Label* is_ok,
compiler::Label* deopt,
bool use_near_jump);
void EmitBitTest(FlowGraphCompiler* compiler,
intptr_t min,
intptr_t max,
intptr_t mask,
Label* deopt);
void EmitNullCheck(FlowGraphCompiler* compiler, Label* deopt);
compiler::Label* deopt);
void EmitNullCheck(FlowGraphCompiler* compiler, compiler::Label* deopt);
DISALLOW_COPY_AND_ASSIGN(CheckClassInstr);
};

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -560,7 +560,7 @@ void ComparisonInstr::EmitBranchCode(FlowGraphCompiler* compiler,
}
void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label is_true, is_false;
compiler::Label is_true, is_false;
BranchLabels labels = {&is_true, &is_false, &is_false};
Condition true_condition =
this->GetNextInstructionCondition(compiler, labels);
@ -571,7 +571,7 @@ void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (true_condition != INVALID_CONDITION) {
EmitBranchOnCondition(compiler, true_condition, labels);
}
Label done;
compiler::Label done;
__ Bind(&is_false);
__ PushConstant(Bool::False());
__ Jump(&done);
@ -598,7 +598,7 @@ void ComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// the correct boolean.
if ((next_is_true && is_false.IsLinked()) ||
(!next_is_true && is_true.IsLinked())) {
Label done;
compiler::Label done;
__ Jump(&done);
__ Bind(next_is_true ? &is_false : &is_true);
__ LoadConstant(result, Bool::Get(!next_is_true));
@ -687,7 +687,7 @@ Condition TestCidsInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
// If the cid is not in the list, jump to the opposite label from the cids
// that are in the list. These must be all the same (see asserts in the
// constructor).
Label* target = result ? labels.false_label : labels.true_label;
compiler::Label* target = result ? labels.false_label : labels.true_label;
__ Jump(target);
}
@ -1035,11 +1035,12 @@ EMIT_NATIVE_CODE(NativeCall,
function = native_c_function();
}
const ExternalLabel trampoline_label(reinterpret_cast<uword>(trampoline));
const compiler::ExternalLabel trampoline_label(
reinterpret_cast<uword>(trampoline));
const intptr_t trampoline_kidx =
__ object_pool_builder().FindNativeFunctionWrapper(
&trampoline_label, ObjectPool::Patchability::kPatchable);
const ExternalLabel label(reinterpret_cast<uword>(function));
const compiler::ExternalLabel label(reinterpret_cast<uword>(function));
const intptr_t target_kidx = __ object_pool_builder().FindNativeFunction(
&label, ObjectPool::Patchability::kPatchable);
const intptr_t argc_tag_kidx =
@ -1818,7 +1819,7 @@ EMIT_NATIVE_CODE(BoxInteger32, 1, Location::RequiresRegister()) {
EMIT_NATIVE_CODE(BoxInt64, 1, Location::RequiresRegister()) {
#if defined(ARCH_IS_64_BIT)
Label done;
compiler::Label done;
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
__ BoxInt64(out, value);

File diff suppressed because it is too large Load diff

View file

@ -140,8 +140,8 @@ void TestPipeline::CompileGraphAndAttachFunction() {
ASSERT(pass_state_->inline_id_to_function.length() ==
pass_state_->caller_inline_id.length());
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder, use_far_branches);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(&object_pool_builder, use_far_branches);
FlowGraphCompiler graph_compiler(
&assembler, flow_graph_, *parsed_function_, optimized,
&speculative_policy, pass_state_->inline_id_to_function,

File diff suppressed because it is too large Load diff

View file

@ -96,50 +96,53 @@ TemplateLocation<Register, FpuRegister>::AsPairLocation() const {
Location LocationRegisterOrConstant(Value* value) {
ConstantInstr* constant = value->definition()->AsConstant();
return ((constant != NULL) && Assembler::IsSafe(constant->value()))
return ((constant != NULL) && compiler::Assembler::IsSafe(constant->value()))
? Location::Constant(constant)
: Location::RequiresRegister();
}
Location LocationRegisterOrSmiConstant(Value* value) {
ConstantInstr* constant = value->definition()->AsConstant();
return ((constant != NULL) && Assembler::IsSafeSmi(constant->value()))
return ((constant != NULL) &&
compiler::Assembler::IsSafeSmi(constant->value()))
? Location::Constant(constant)
: Location::RequiresRegister();
}
Location LocationWritableRegisterOrSmiConstant(Value* value) {
ConstantInstr* constant = value->definition()->AsConstant();
return ((constant != NULL) && Assembler::IsSafeSmi(constant->value()))
return ((constant != NULL) &&
compiler::Assembler::IsSafeSmi(constant->value()))
? Location::Constant(constant)
: Location::WritableRegister();
}
Location LocationFixedRegisterOrConstant(Value* value, Register reg) {
ConstantInstr* constant = value->definition()->AsConstant();
return ((constant != NULL) && Assembler::IsSafe(constant->value()))
return ((constant != NULL) && compiler::Assembler::IsSafe(constant->value()))
? Location::Constant(constant)
: Location::RegisterLocation(reg);
}
Location LocationFixedRegisterOrSmiConstant(Value* value, Register reg) {
ConstantInstr* constant = value->definition()->AsConstant();
return ((constant != NULL) && Assembler::IsSafeSmi(constant->value()))
return ((constant != NULL) &&
compiler::Assembler::IsSafeSmi(constant->value()))
? Location::Constant(constant)
: Location::RegisterLocation(reg);
}
Location LocationAnyOrConstant(Value* value) {
ConstantInstr* constant = value->definition()->AsConstant();
return ((constant != NULL) && Assembler::IsSafe(constant->value()))
return ((constant != NULL) && compiler::Assembler::IsSafe(constant->value()))
? Location::Constant(constant)
: Location::Any();
}
// DBC does not have an notion of 'address' in its instruction set.
#if !defined(TARGET_ARCH_DBC)
Address LocationToStackSlotAddress(Location loc) {
return Address(loc.base_reg(), loc.ToStackSlotOffset());
compiler::Address LocationToStackSlotAddress(Location loc) {
return compiler::Address(loc.base_reg(), loc.ToStackSlotOffset());
}
#endif

View file

@ -496,7 +496,7 @@ Location LocationRemapForSlowPath(Location loc,
// DBC does not have an notion of 'address' in its instruction set.
#if !defined(TARGET_ARCH_DBC)
// Return a memory operand for stack slot locations.
Address LocationToStackSlotAddress(Location loc);
compiler::Address LocationToStackSlotAddress(Location loc);
#endif
template <class Location>

View file

@ -184,10 +184,9 @@ class ArgumentAllocator : public ValueObject {
case kUnboxedInt64:
case kUnboxedUint32:
case kUnboxedInt32: {
Location result =
rep == kUnboxedInt64 && compiler::target::kWordSize == 4
? AllocateAlignedRegisterPair()
: AllocateCpuRegister();
Location result = rep == kUnboxedInt64 && target::kWordSize == 4
? AllocateAlignedRegisterPair()
: AllocateCpuRegister();
if (!result.IsUnallocated()) return result;
break;
}
@ -196,7 +195,7 @@ class ArgumentAllocator : public ValueObject {
}
// Argument must be spilled.
if (rep == kUnboxedInt64 && compiler::target::kWordSize == 4) {
if (rep == kUnboxedInt64 && target::kWordSize == 4) {
return AllocateAlignedStackSlots(rep);
} else if (rep == kUnboxedDouble) {
// By convention, we always use DoubleStackSlot for doubles, even on
@ -217,15 +216,14 @@ class ArgumentAllocator : public ValueObject {
Location AllocateDoubleStackSlot() {
const Location result = Location::DoubleStackSlot(
stack_height_in_slots, CallingConventions::kStackPointerRegister);
stack_height_in_slots += 8 / compiler::target::kWordSize;
stack_height_in_slots += 8 / target::kWordSize;
return result;
}
// Allocates a pair of stack slots where the first stack slot is aligned to an
// 8-byte boundary, if necessary.
Location AllocateAlignedStackSlots(Representation rep) {
if (CallingConventions::kAlignArguments &&
compiler::target::kWordSize == 4) {
if (CallingConventions::kAlignArguments && target::kWordSize == 4) {
stack_height_in_slots += stack_height_in_slots % 2;
}
@ -314,7 +312,7 @@ void CallbackArgumentTranslator::AllocateArgument(Location arg) {
if (arg.IsRegister()) {
argument_slots_required_++;
} else {
argument_slots_required_ += 8 / compiler::target::kWordSize;
argument_slots_required_ += 8 / target::kWordSize;
}
}
@ -345,7 +343,7 @@ Location CallbackArgumentTranslator::TranslateArgument(Location arg) {
ASSERT(arg.IsFpuRegister());
const Location result =
Location::DoubleStackSlot(argument_slots_used_, SPREG);
argument_slots_used_ += 8 / compiler::target::kWordSize;
argument_slots_used_ += 8 / target::kWordSize;
return result;
}
@ -376,7 +374,7 @@ ZoneGrowableArray<Location>* ArgumentLocations(
return ArgumentLocationsBase<dart::CallingConventions, Location,
dart::Register, dart::FpuRegister>(arg_reps);
#else
intptr_t next_free_register = compiler::ffi::kFirstArgumentRegister;
intptr_t next_free_register = ffi::kFirstArgumentRegister;
intptr_t num_arguments = arg_reps.length();
auto result = new ZoneGrowableArray<Location>(num_arguments);
for (intptr_t i = 0; i < num_arguments; i++) {
@ -413,7 +411,7 @@ Location ResultLocation(Representation result_rep) {
case kUnboxedUint32:
return Location::RegisterLocation(CallingConventions::kReturnReg);
case kUnboxedInt64:
if (compiler::target::kWordSize == 4) {
if (target::kWordSize == 4) {
return Location::Pair(
Location::RegisterLocation(CallingConventions::kReturnReg),
Location::RegisterLocation(CallingConventions::kSecondReturnReg));
@ -478,7 +476,7 @@ intptr_t TemplateNumStackSlots(const ZoneGrowableArray<Location>& locations) {
if (locations.At(i).IsStackSlot()) {
height = locations.At(i).stack_index() + 1;
} else if (locations.At(i).IsDoubleStackSlot()) {
height = locations.At(i).stack_index() + 8 / compiler::target::kWordSize;
height = locations.At(i).stack_index() + 8 / target::kWordSize;
} else if (locations.At(i).IsPairLocation()) {
const Location first = locations.At(i).AsPairLocation()->At(0);
const Location second = locations.At(i).AsPairLocation()->At(1);

View file

@ -350,7 +350,7 @@ class CompileParsedFunctionHelper : public ValueObject {
intptr_t loading_invalidation_gen_at_start() const {
return loading_invalidation_gen_at_start_;
}
RawCode* FinalizeCompilation(Assembler* assembler,
RawCode* FinalizeCompilation(compiler::Assembler* assembler,
FlowGraphCompiler* graph_compiler,
FlowGraph* flow_graph);
void CheckIfBackgroundCompilerIsBeingStopped(bool optimizing_compiler);
@ -365,7 +365,7 @@ class CompileParsedFunctionHelper : public ValueObject {
};
RawCode* CompileParsedFunctionHelper::FinalizeCompilation(
Assembler* assembler,
compiler::Assembler* assembler,
FlowGraphCompiler* graph_compiler,
FlowGraph* flow_graph) {
ASSERT(!FLAG_precompiled_mode);
@ -651,8 +651,8 @@ RawCode* CompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
ASSERT(pass_state.inline_id_to_function.length() ==
pass_state.caller_inline_id.length());
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder, use_far_branches);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(&object_pool_builder, use_far_branches);
FlowGraphCompiler graph_compiler(
&assembler, flow_graph, *parsed_function(), optimized(),
&speculative_policy, pass_state.inline_id_to_function,

View file

@ -185,7 +185,7 @@ uword SymbolsPredefinedAddress();
#endif
typedef void (*RuntimeEntryCallInternal)(const dart::RuntimeEntry*,
compiler::Assembler*,
Assembler*,
intptr_t);
#if !defined(TARGET_ARCH_DBC)
@ -198,7 +198,7 @@ class RuntimeEntry : public ValueObject {
public:
virtual ~RuntimeEntry() {}
void Call(compiler::Assembler* assembler, intptr_t argument_count) const {
void Call(Assembler* assembler, intptr_t argument_count) const {
ASSERT(call_ != NULL);
ASSERT(runtime_entry_ != NULL);

View file

@ -198,8 +198,7 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Object& closure_allocation_stub,
const Object& context_allocation_stub) {
const intptr_t kReceiverOffset =
compiler::target::frame_layout.param_end_from_fp + 1;
const intptr_t kReceiverOffset = target::frame_layout.param_end_from_fp + 1;
__ EnterStubFrame();
@ -710,13 +709,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
// and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
const intptr_t saved_result_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - R0);
const intptr_t saved_exception_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - R0);
const intptr_t saved_stacktrace_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - R1);
// Result in R0 is preserved as part of pushing all registers below.
@ -784,14 +783,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in R0.
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1.
__ ldr(R1, Address(FP, compiler::target::frame_layout.first_local_from_fp *
__ ldr(R1, Address(FP, target::frame_layout.first_local_from_fp *
target::kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into R1.
__ ldr(R1, Address(FP, compiler::target::frame_layout.first_local_from_fp *
__ ldr(R1, Address(FP, target::frame_layout.first_local_from_fp *
target::kWordSize));
__ ldr(R2, Address(FP, (compiler::target::frame_layout.first_local_from_fp -
1) *
__ ldr(R2, Address(FP, (target::frame_layout.first_local_from_fp - 1) *
target::kWordSize));
}
// Code above cannot cause GC.
@ -904,7 +902,7 @@ void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
// Load the receiver.
__ ldr(R2, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
__ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
__ ldr(R8, Address(IP, compiler::target::frame_layout.param_end_from_fp *
__ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
target::kWordSize));
// Preserve IC data and arguments descriptor.

View file

@ -269,8 +269,7 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Object& closure_allocation_stub,
const Object& context_allocation_stub) {
const intptr_t kReceiverOffset =
compiler::target::frame_layout.param_end_from_fp + 1;
const intptr_t kReceiverOffset = target::frame_layout.param_end_from_fp + 1;
__ EnterStubFrame();
@ -762,13 +761,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
// and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
const intptr_t saved_result_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - R0);
const intptr_t saved_exception_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - R0);
const intptr_t saved_stacktrace_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - R1);
// Result in R0 is preserved as part of pushing all registers below.
@ -837,16 +836,14 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
if (kind == kLazyDeoptFromReturn) {
// Restore result into R1.
__ LoadFromOffset(
R1, FP,
compiler::target::frame_layout.first_local_from_fp * target::kWordSize);
R1, FP, target::frame_layout.first_local_from_fp * target::kWordSize);
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into R1.
__ LoadFromOffset(
R1, FP,
compiler::target::frame_layout.first_local_from_fp * target::kWordSize);
__ LoadFromOffset(R2, FP,
(compiler::target::frame_layout.first_local_from_fp - 1) *
target::kWordSize);
R1, FP, target::frame_layout.first_local_from_fp * target::kWordSize);
__ LoadFromOffset(
R2, FP,
(target::frame_layout.first_local_from_fp - 1) * target::kWordSize);
}
// Code above cannot cause GC.
// There is a Dart Frame on the stack. We must restore PP and leave frame.
@ -961,9 +958,8 @@ void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
// Load the receiver.
__ LoadFieldFromOffset(R2, R4, target::ArgumentsDescriptor::count_offset());
__ add(TMP, FP, Operand(R2, LSL, 2)); // R2 is Smi.
__ LoadFromOffset(
R6, TMP,
compiler::target::frame_layout.param_end_from_fp * target::kWordSize);
__ LoadFromOffset(R6, TMP,
target::frame_layout.param_end_from_fp * target::kWordSize);
// Preserve IC data and arguments descriptor.
__ Push(R5);

View file

@ -514,13 +514,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
// and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
const intptr_t saved_result_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - EAX);
const intptr_t saved_exception_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - EAX);
const intptr_t saved_stacktrace_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - EDX);
// Result in EAX is preserved as part of pushing all registers below.
@ -583,18 +583,14 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
if (kind == kLazyDeoptFromReturn) {
// Restore result into EBX.
__ movl(EBX,
Address(EBP, compiler::target::frame_layout.first_local_from_fp *
target::kWordSize));
__ movl(EBX, Address(EBP, target::frame_layout.first_local_from_fp *
target::kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore result into EBX.
__ movl(EBX,
Address(EBP, compiler::target::frame_layout.first_local_from_fp *
target::kWordSize));
__ movl(
ECX,
Address(EBP, (compiler::target::frame_layout.first_local_from_fp - 1) *
target::kWordSize));
__ movl(EBX, Address(EBP, target::frame_layout.first_local_from_fp *
target::kWordSize));
__ movl(ECX, Address(EBP, (target::frame_layout.first_local_from_fp - 1) *
target::kWordSize));
}
// Code above cannot cause GC.
__ LeaveFrame();

View file

@ -252,7 +252,7 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
const Object& closure_allocation_stub,
const Object& context_allocation_stub) {
const intptr_t kReceiverOffsetInWords =
compiler::target::frame_layout.param_end_from_fp + 1;
target::frame_layout.param_end_from_fp + 1;
__ EnterStubFrame();
@ -261,8 +261,7 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
__ movq(RCX, Address(THR, target::Thread::object_null_offset()));
__ cmpq(RDX, Immediate(0));
__ j(EQUAL, &no_type_args, Assembler::kNearJump);
__ movq(RAX,
Address(RBP, compiler::target::kWordSize * kReceiverOffsetInWords));
__ movq(RAX, Address(RBP, target::kWordSize * kReceiverOffsetInWords));
__ movq(RCX, Address(RAX, RDX, TIMES_1, 0));
__ Bind(&no_type_args);
__ pushq(RCX);
@ -294,8 +293,7 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
}
// Store receiver in context
__ movq(RSI,
Address(RBP, compiler::target::kWordSize * kReceiverOffsetInWords));
__ movq(RSI, Address(RBP, target::kWordSize * kReceiverOffsetInWords));
__ StoreIntoObject(
RAX, FieldAddress(RAX, target::Context::variable_offset(0)), RSI);
@ -705,13 +703,13 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
// The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry
// and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls.
const intptr_t saved_result_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - RAX);
const intptr_t saved_exception_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - RAX);
const intptr_t saved_stacktrace_slot_from_fp =
compiler::target::frame_layout.first_local_from_fp + 1 -
target::frame_layout.first_local_from_fp + 1 -
(kNumberOfCpuRegisters - RDX);
// Result in RAX is preserved as part of pushing all registers below.
@ -779,19 +777,15 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
__ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1);
if (kind == kLazyDeoptFromReturn) {
// Restore result into RBX.
__ movq(RBX,
Address(RBP, compiler::target::frame_layout.first_local_from_fp *
target::kWordSize));
__ movq(RBX, Address(RBP, target::frame_layout.first_local_from_fp *
target::kWordSize));
} else if (kind == kLazyDeoptFromThrow) {
// Restore exception into RBX.
__ movq(RBX,
Address(RBP, compiler::target::frame_layout.first_local_from_fp *
target::kWordSize));
__ movq(RBX, Address(RBP, target::frame_layout.first_local_from_fp *
target::kWordSize));
// Restore stacktrace into RDX.
__ movq(
RDX,
Address(RBP, (compiler::target::frame_layout.first_local_from_fp - 1) *
target::kWordSize));
__ movq(RDX, Address(RBP, (target::frame_layout.first_local_from_fp - 1) *
target::kWordSize));
}
// Code above cannot cause GC.
// There is a Dart Frame on the stack. We must restore PP and leave frame.
@ -910,9 +904,9 @@ void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
__ movq(RAX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
// Three words (saved pp, saved fp, stub's pc marker)
// in the stack above the return address.
__ movq(RAX, Address(RSP, RAX, TIMES_4,
compiler::target::frame_layout.saved_below_pc() *
target::kWordSize));
__ movq(RAX,
Address(RSP, RAX, TIMES_4,
target::frame_layout.saved_below_pc() * target::kWordSize));
// Preserve IC data and arguments descriptor.
__ pushq(RBX);
__ pushq(R10);

View file

@ -1046,7 +1046,7 @@ class DeoptInfoBuilder::TrieNode : public ZoneAllocated {
DeoptInfoBuilder::DeoptInfoBuilder(Zone* zone,
const intptr_t num_args,
Assembler* assembler)
compiler::Assembler* assembler)
: zone_(zone),
instructions_(),
num_args_(num_args),

View file

@ -482,7 +482,9 @@ typedef RegisterSource<FpuRegister> FpuRegisterSource;
// the heap and reset the builder's internal state for the next DeoptInfo.
class DeoptInfoBuilder : public ValueObject {
public:
DeoptInfoBuilder(Zone* zone, const intptr_t num_args, Assembler* assembler);
DeoptInfoBuilder(Zone* zone,
const intptr_t num_args,
compiler::Assembler* assembler);
// Return address before instruction.
void AddReturnAddress(const Function& function,
@ -549,7 +551,7 @@ class DeoptInfoBuilder : public ValueObject {
GrowableArray<DeoptInstr*> instructions_;
const intptr_t num_args_;
Assembler* assembler_;
compiler::Assembler* assembler_;
// Used to compress entries by sharing suffixes.
TrieNode* trie_root_;

View file

@ -53,7 +53,7 @@ bool GCSweeper::SweepPage(HeapPage* page, FreeList* freelist, bool locked) {
uword end = current + obj_size;
while (cursor < end) {
*reinterpret_cast<uword*>(cursor) =
Assembler::GetBreakInstructionFiller();
compiler::Assembler::GetBreakInstructionFiller();
cursor += kWordSize;
}
} else {

View file

@ -200,7 +200,8 @@ class PcRelativeCallPattern : public ValueObject {
int32_t distance() {
#if !defined(DART_PRECOMPILED_RUNTIME)
return Assembler::DecodeBranchOffset(*reinterpret_cast<int32_t*>(pc_));
return compiler::Assembler::DecodeBranchOffset(
*reinterpret_cast<int32_t*>(pc_));
#else
UNREACHABLE();
return 0;
@ -210,7 +211,7 @@ class PcRelativeCallPattern : public ValueObject {
void set_distance(int32_t distance) {
#if !defined(DART_PRECOMPILED_RUNTIME)
int32_t* word = reinterpret_cast<int32_t*>(pc_);
*word = Assembler::EncodeBranchOffset(distance, *word);
*word = compiler::Assembler::EncodeBranchOffset(distance, *word);
#else
UNREACHABLE();
#endif

View file

@ -210,7 +210,8 @@ class PcRelativeCallPattern : public ValueObject {
int32_t distance() {
#if !defined(DART_PRECOMPILED_RUNTIME)
return Assembler::DecodeImm26BranchOffset(*reinterpret_cast<int32_t*>(pc_));
return compiler::Assembler::DecodeImm26BranchOffset(
*reinterpret_cast<int32_t*>(pc_));
#else
UNREACHABLE();
return 0;
@ -220,7 +221,7 @@ class PcRelativeCallPattern : public ValueObject {
void set_distance(int32_t distance) {
#if !defined(DART_PRECOMPILED_RUNTIME)
int32_t* word = reinterpret_cast<int32_t*>(pc_);
*word = Assembler::EncodeImm26BranchOffset(distance, *word);
*word = compiler::Assembler::EncodeImm26BranchOffset(distance, *word);
#else
UNREACHABLE();
#endif

View file

@ -17,7 +17,7 @@ namespace dart {
#define __ assembler->
ASSEMBLER_TEST_GENERATE(Call, assembler) {
ExternalLabel label(StubCode::InvokeDartCode().EntryPoint());
compiler::ExternalLabel label(StubCode::InvokeDartCode().EntryPoint());
__ call(&label);
__ ret();
}

View file

@ -52,7 +52,7 @@ RawFunction* MegamorphicCacheTable::miss_handler(Isolate* isolate) {
void MegamorphicCacheTable::InitMissHandler(Isolate* isolate) {
// The miss handler for a class ID not found in the table is invoked as a
// normal Dart function.
ObjectPoolBuilder object_pool_builder;
compiler::ObjectPoolBuilder object_pool_builder;
const Code& code = Code::Handle(StubCode::Generate(
"_stub_MegamorphicMiss", &object_pool_builder,
compiler::StubCodeCompiler::GenerateMegamorphicMissStub));
@ -87,8 +87,9 @@ void MegamorphicCacheTable::InitMissHandler(Isolate* isolate) {
isolate->object_store()->SetMegamorphicMissHandler(code, function);
}
void MegamorphicCacheTable::ReInitMissHandlerCode(Isolate* isolate,
ObjectPoolBuilder* wrapper) {
void MegamorphicCacheTable::ReInitMissHandlerCode(
Isolate* isolate,
compiler::ObjectPoolBuilder* wrapper) {
ASSERT(FLAG_precompiled_mode && FLAG_use_bare_instructions);
const Code& code = Code::Handle(StubCode::Generate(

View file

@ -2087,7 +2087,7 @@ RawString* Object::DictionaryName() const {
void Object::InitializeObject(uword address, intptr_t class_id, intptr_t size) {
uword initial_value = (class_id == kInstructionsCid)
? Assembler::GetBreakInstructionFiller()
? compiler::Assembler::GetBreakInstructionFiller()
: reinterpret_cast<uword>(null_);
uword cur = address;
uword end = address + size;
@ -14832,7 +14832,7 @@ RawCode* Code::FinalizeCodeAndNotify(const char* name,
}
RawCode* Code::FinalizeCode(FlowGraphCompiler* compiler,
Assembler* assembler,
compiler::Assembler* assembler,
PoolAttachment pool_attachment,
bool optimized,
CodeStatistics* stats /* = nullptr */) {

View file

@ -16,14 +16,14 @@ namespace dart {
// Generate a simple dart code sequence.
// This is used to test Code and Instruction object creation.
void GenerateIncrement(Assembler* assembler) {
void GenerateIncrement(compiler::Assembler* assembler) {
__ EnterFrame(1 * kWordSize);
__ movz(R0, Immediate(0), 0);
__ movz(R0, compiler::Immediate(0), 0);
__ Push(R0);
__ add(R0, R0, Operand(1));
__ str(R0, Address(SP));
__ ldr(R1, Address(SP));
__ add(R1, R1, Operand(1));
__ add(R0, R0, compiler::Operand(1));
__ str(R0, compiler::Address(SP));
__ ldr(R1, compiler::Address(SP));
__ add(R1, R1, compiler::Operand(1));
__ Pop(R0);
__ mov(R0, R1);
__ LeaveFrame();
@ -32,7 +32,8 @@ void GenerateIncrement(Assembler* assembler) {
// Generate a dart code sequence that embeds a string object in it.
// This is used to test Embedded String objects in the instructions.
void GenerateEmbedStringInCode(Assembler* assembler, const char* str) {
void GenerateEmbedStringInCode(compiler::Assembler* assembler,
const char* str) {
const String& string_object =
String::ZoneHandle(String::New(str, Heap::kOld));
__ EnterStubFrame();
@ -43,7 +44,7 @@ void GenerateEmbedStringInCode(Assembler* assembler, const char* str) {
// Generate a dart code sequence that embeds a smi object in it.
// This is used to test Embedded Smi objects in the instructions.
void GenerateEmbedSmiInCode(Assembler* assembler, intptr_t value) {
void GenerateEmbedSmiInCode(compiler::Assembler* assembler, intptr_t value) {
const Smi& smi_object = Smi::ZoneHandle(Smi::New(value));
const int64_t val = reinterpret_cast<int64_t>(smi_object.raw());
__ LoadImmediate(R0, val);

View file

@ -16,22 +16,23 @@ namespace dart {
// Generate a simple dart code sequence.
// This is used to test Code and Instruction object creation.
void GenerateIncrement(Assembler* assembler) {
void GenerateIncrement(compiler::Assembler* assembler) {
__ LoadImmediate(R0, 0);
__ Push(R0);
__ ldr(IP, Address(SP, 0));
__ add(IP, IP, Operand(1));
__ str(IP, Address(SP, 0));
__ ldr(IP, Address(SP, 0));
__ add(IP, IP, Operand(1));
__ ldr(IP, compiler::Address(SP, 0));
__ add(IP, IP, compiler::Operand(1));
__ str(IP, compiler::Address(SP, 0));
__ ldr(IP, compiler::Address(SP, 0));
__ add(IP, IP, compiler::Operand(1));
__ Pop(R0);
__ mov(R0, Operand(IP));
__ mov(R0, compiler::Operand(IP));
__ Ret();
}
// Generate a dart code sequence that embeds a string object in it.
// This is used to test Embedded String objects in the instructions.
void GenerateEmbedStringInCode(Assembler* assembler, const char* str) {
void GenerateEmbedStringInCode(compiler::Assembler* assembler,
const char* str) {
__ EnterDartFrame(0); // To setup pp.
const String& string_object =
String::ZoneHandle(String::New(str, Heap::kOld));
@ -41,7 +42,7 @@ void GenerateEmbedStringInCode(Assembler* assembler, const char* str) {
// Generate a dart code sequence that embeds a smi object in it.
// This is used to test Embedded Smi objects in the instructions.
void GenerateEmbedSmiInCode(Assembler* assembler, intptr_t value) {
void GenerateEmbedSmiInCode(compiler::Assembler* assembler, intptr_t value) {
// No need to setup pp, since Smis are not stored in the object pool.
const Smi& smi_object = Smi::ZoneHandle(Smi::New(value));
__ LoadObject(R0, smi_object);

View file

@ -19,7 +19,7 @@ namespace dart {
// For other architectures, this sequence does do an increment, hence the name.
// On DBC, we don't do an increment because generating an instance call here
// would be too complex.
void GenerateIncrement(Assembler* assembler) {
void GenerateIncrement(compiler::Assembler* assembler) {
__ Frame(1);
__ LoadConstant(0, Smi::Handle(Smi::New(1)));
__ Return(0);
@ -27,7 +27,8 @@ void GenerateIncrement(Assembler* assembler) {
// Generate a dart code sequence that embeds a string object in it.
// This is used to test Embedded String objects in the instructions.
void GenerateEmbedStringInCode(Assembler* assembler, const char* str) {
void GenerateEmbedStringInCode(compiler::Assembler* assembler,
const char* str) {
const String& string_object =
String::ZoneHandle(String::New(str, Heap::kOld));
__ PushConstant(string_object);
@ -36,7 +37,7 @@ void GenerateEmbedStringInCode(Assembler* assembler, const char* str) {
// Generate a dart code sequence that embeds a smi object in it.
// This is used to test Embedded Smi objects in the instructions.
void GenerateEmbedSmiInCode(Assembler* assembler, intptr_t value) {
void GenerateEmbedSmiInCode(compiler::Assembler* assembler, intptr_t value) {
const Smi& smi_object = Smi::ZoneHandle(Smi::New(value));
__ PushConstant(smi_object);
__ ReturnTOS();

View file

@ -16,11 +16,11 @@ namespace dart {
// Generate a simple dart code sequence.
// This is used to test Code and Instruction object creation.
void GenerateIncrement(Assembler* assembler) {
__ movl(EAX, Immediate(0));
void GenerateIncrement(compiler::Assembler* assembler) {
__ movl(EAX, compiler::Immediate(0));
__ pushl(EAX);
__ incl(Address(ESP, 0));
__ movl(ECX, Address(ESP, 0));
__ incl(compiler::Address(ESP, 0));
__ movl(ECX, compiler::Address(ESP, 0));
__ incl(ECX);
__ popl(EAX);
__ movl(EAX, ECX);
@ -29,7 +29,8 @@ void GenerateIncrement(Assembler* assembler) {
// Generate a dart code sequence that embeds a string object in it.
// This is used to test Embedded String objects in the instructions.
void GenerateEmbedStringInCode(Assembler* assembler, const char* str) {
void GenerateEmbedStringInCode(compiler::Assembler* assembler,
const char* str) {
const String& string_object =
String::ZoneHandle(String::New(str, Heap::kOld));
__ LoadObject(EAX, string_object);
@ -38,7 +39,7 @@ void GenerateEmbedStringInCode(Assembler* assembler, const char* str) {
// Generate a dart code sequence that embeds a smi object in it.
// This is used to test Embedded Smi objects in the instructions.
void GenerateEmbedSmiInCode(Assembler* assembler, intptr_t value) {
void GenerateEmbedSmiInCode(compiler::Assembler* assembler, intptr_t value) {
const Smi& smi_object = Smi::ZoneHandle(Smi::New(value));
__ LoadObject(EAX, smi_object);
__ ret();

View file

@ -2478,9 +2478,9 @@ static RawFunction* CreateFunction(const char* name) {
// Test for Code and Instruction object creation.
ISOLATE_UNIT_TEST_CASE(Code) {
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
extern void GenerateIncrement(compiler::Assembler * assembler);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
const Function& function = Function::Handle(CreateFunction("Test_Code"));
Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
@ -2500,9 +2500,9 @@ ISOLATE_UNIT_TEST_CASE_WITH_EXPECTATION(CodeImmutability, "Crash") {
bool stack_trace_collection_enabled =
MallocHooks::stack_trace_collection_enabled();
MallocHooks::set_stack_trace_collection_enabled(false);
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
extern void GenerateIncrement(compiler::Assembler * assembler);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
const Function& function = Function::Handle(CreateFunction("Test_Code"));
Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
@ -2537,9 +2537,9 @@ ISOLATE_UNIT_TEST_CASE_WITH_EXPECTATION(CodeExecutability, "Crash") {
bool stack_trace_collection_enabled =
MallocHooks::stack_trace_collection_enabled();
MallocHooks::set_stack_trace_collection_enabled(false);
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
extern void GenerateIncrement(compiler::Assembler * assembler);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
const Function& function = Function::Handle(CreateFunction("Test_Code"));
Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
@ -2573,11 +2573,12 @@ ISOLATE_UNIT_TEST_CASE_WITH_EXPECTATION(CodeExecutability, "Crash") {
// Test for Embedded String object in the instructions.
ISOLATE_UNIT_TEST_CASE(EmbedStringInCode) {
extern void GenerateEmbedStringInCode(Assembler * assembler, const char* str);
extern void GenerateEmbedStringInCode(compiler::Assembler * assembler,
const char* str);
const char* kHello = "Hello World!";
word expected_length = static_cast<word>(strlen(kHello));
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler _assembler_(&object_pool_builder);
GenerateEmbedStringInCode(&_assembler_, kHello);
const Function& function =
Function::Handle(CreateFunction("Test_EmbedStringInCode"));
@ -2597,10 +2598,11 @@ ISOLATE_UNIT_TEST_CASE(EmbedStringInCode) {
// Test for Embedded Smi object in the instructions.
ISOLATE_UNIT_TEST_CASE(EmbedSmiInCode) {
extern void GenerateEmbedSmiInCode(Assembler * assembler, intptr_t value);
extern void GenerateEmbedSmiInCode(compiler::Assembler * assembler,
intptr_t value);
const intptr_t kSmiTestValue = 5;
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler _assembler_(&object_pool_builder);
GenerateEmbedSmiInCode(&_assembler_, kSmiTestValue);
const Function& function =
Function::Handle(CreateFunction("Test_EmbedSmiInCode"));
@ -2615,10 +2617,11 @@ ISOLATE_UNIT_TEST_CASE(EmbedSmiInCode) {
#if defined(ARCH_IS_64_BIT)
// Test for Embedded Smi object in the instructions.
ISOLATE_UNIT_TEST_CASE(EmbedSmiIn64BitCode) {
extern void GenerateEmbedSmiInCode(Assembler * assembler, intptr_t value);
extern void GenerateEmbedSmiInCode(compiler::Assembler * assembler,
intptr_t value);
const intptr_t kSmiTestValue = DART_INT64_C(5) << 32;
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler _assembler_(&object_pool_builder);
GenerateEmbedSmiInCode(&_assembler_, kSmiTestValue);
const Function& function =
Function::Handle(CreateFunction("Test_EmbedSmiIn64BitCode"));
@ -2647,9 +2650,9 @@ ISOLATE_UNIT_TEST_CASE(ExceptionHandlers) {
exception_handlers.SetHandlerInfo(3, 1, 150u, kNoStackTrace, true,
TokenPosition::kNoSource, true);
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
extern void GenerateIncrement(compiler::Assembler * assembler);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
Function::Handle(CreateFunction("Test_Code")), nullptr, &_assembler_,
@ -2689,9 +2692,9 @@ ISOLATE_UNIT_TEST_CASE(PcDescriptors) {
PcDescriptors& descriptors = PcDescriptors::Handle();
descriptors ^= builder->FinalizePcDescriptors(0);
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
extern void GenerateIncrement(compiler::Assembler * assembler);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
Function::Handle(CreateFunction("Test_Code")), nullptr, &_assembler_,
@ -2752,9 +2755,9 @@ ISOLATE_UNIT_TEST_CASE(PcDescriptorsLargeDeltas) {
PcDescriptors& descriptors = PcDescriptors::Handle();
descriptors ^= builder->FinalizePcDescriptors(0);
extern void GenerateIncrement(Assembler * assembler);
ObjectPoolBuilder object_pool_builder;
Assembler _assembler_(&object_pool_builder);
extern void GenerateIncrement(compiler::Assembler * assembler);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler _assembler_(&object_pool_builder);
GenerateIncrement(&_assembler_);
Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
Function::Handle(CreateFunction("Test_Code")), nullptr, &_assembler_,

View file

@ -16,11 +16,11 @@ namespace dart {
// Generate a simple dart code sequence.
// This is used to test Code and Instruction object creation.
void GenerateIncrement(Assembler* assembler) {
__ movq(RAX, Immediate(0));
void GenerateIncrement(compiler::Assembler* assembler) {
__ movq(RAX, compiler::Immediate(0));
__ pushq(RAX);
__ incq(Address(RSP, 0));
__ movq(RCX, Address(RSP, 0));
__ incq(compiler::Address(RSP, 0));
__ movq(RCX, compiler::Address(RSP, 0));
__ incq(RCX);
__ popq(RAX);
__ movq(RAX, RCX);
@ -29,7 +29,8 @@ void GenerateIncrement(Assembler* assembler) {
// Generate a dart code sequence that embeds a string object in it.
// This is used to test Embedded String objects in the instructions.
void GenerateEmbedStringInCode(Assembler* assembler, const char* str) {
void GenerateEmbedStringInCode(compiler::Assembler* assembler,
const char* str) {
const String& string_object =
String::ZoneHandle(String::New(str, Heap::kOld));
__ EnterStubFrame();
@ -40,9 +41,10 @@ void GenerateEmbedStringInCode(Assembler* assembler, const char* str) {
// Generate a dart code sequence that embeds a smi object in it.
// This is used to test Embedded Smi objects in the instructions.
void GenerateEmbedSmiInCode(Assembler* assembler, intptr_t value) {
void GenerateEmbedSmiInCode(compiler::Assembler* assembler, intptr_t value) {
const Smi& smi_object = Smi::ZoneHandle(Smi::New(value));
__ movq(RAX, Immediate(reinterpret_cast<int64_t>(smi_object.raw())));
__ movq(RAX,
compiler::Immediate(reinterpret_cast<int64_t>(smi_object.raw())));
__ ret();
}

View file

@ -43,17 +43,19 @@ uword RuntimeEntry::GetEntryPoint() const {
// R9 : address of the runtime function to call.
// R4 : number of arguments to the call.
void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
Assembler* assembler,
compiler::Assembler* assembler,
intptr_t argument_count) {
if (runtime_entry->is_leaf()) {
ASSERT(argument_count == runtime_entry->argument_count());
__ LoadFromOffset(
kWord, TMP, THR,
compiler::target::Thread::OffsetFromThread(runtime_entry));
__ str(TMP, Address(THR, compiler::target::Thread::vm_tag_offset()));
__ str(TMP,
compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
__ blx(TMP);
__ LoadImmediate(TMP, VMTag::kDartCompiledTagId);
__ str(TMP, Address(THR, compiler::target::Thread::vm_tag_offset()));
__ str(TMP,
compiler::Address(THR, compiler::target::Thread::vm_tag_offset()));
ASSERT((kAbiPreservedCpuRegs & (1 << THR)) != 0);
ASSERT((kAbiPreservedCpuRegs & (1 << PP)) != 0);
} else {

View file

@ -43,7 +43,7 @@ uword RuntimeEntry::GetEntryPoint() const {
// R5 : address of the runtime function to call.
// R4 : number of arguments to the call.
void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
Assembler* assembler,
compiler::Assembler* assembler,
intptr_t argument_count) {
if (runtime_entry->is_leaf()) {
ASSERT(argument_count == runtime_entry->argument_count());
@ -62,11 +62,12 @@ void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
__ mov(R25, SP);
__ ReserveAlignedFrameSpace(0);
__ mov(CSP, SP);
__ ldr(TMP, Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ str(TMP, Address(THR, Thread::vm_tag_offset()));
__ ldr(TMP,
compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ str(TMP, compiler::Address(THR, Thread::vm_tag_offset()));
__ blr(TMP);
__ LoadImmediate(TMP, VMTag::kDartCompiledTagId);
__ str(TMP, Address(THR, Thread::vm_tag_offset()));
__ str(TMP, compiler::Address(THR, Thread::vm_tag_offset()));
__ mov(SP, R25);
__ mov(CSP, R23);
ASSERT((kAbiPreservedCpuRegs & (1 << THR)) != 0);
@ -74,7 +75,7 @@ void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
} else {
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
__ ldr(R5, Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ ldr(R5, compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ LoadImmediate(R4, argument_count);
__ BranchLinkToRuntime();
}

View file

@ -19,7 +19,7 @@ uword RuntimeEntry::GetEntryPoint() const {
#if !defined(DART_PRECOMPILED_RUNTIME)
void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
Assembler* assembler,
compiler::Assembler* assembler,
intptr_t argument_count) {
UNIMPLEMENTED();
}

View file

@ -28,19 +28,20 @@ uword RuntimeEntry::GetEntryPoint() const {
// For leaf calls the caller is responsible to setup the arguments
// and look for return values based on the C calling convention.
void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
Assembler* assembler,
compiler::Assembler* assembler,
intptr_t argument_count) {
if (runtime_entry->is_leaf()) {
ASSERT(argument_count == runtime_entry->argument_count());
__ movl(EAX, Immediate(runtime_entry->GetEntryPoint()));
__ movl(Assembler::VMTagAddress(), EAX);
__ movl(EAX, compiler::Immediate(runtime_entry->GetEntryPoint()));
__ movl(compiler::Assembler::VMTagAddress(), EAX);
__ call(EAX);
__ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
__ movl(compiler::Assembler::VMTagAddress(),
compiler::Immediate(VMTag::kDartCompiledTagId));
} else {
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
__ movl(ECX, Immediate(runtime_entry->GetEntryPoint()));
__ movl(EDX, Immediate(argument_count));
__ movl(ECX, compiler::Immediate(runtime_entry->GetEntryPoint()));
__ movl(EDX, compiler::Immediate(argument_count));
__ CallToRuntime();
}
}

View file

@ -25,22 +25,25 @@ uword RuntimeEntry::GetEntryPoint() const {
// RBX : address of the runtime function to call.
// R10 : number of arguments to the call.
void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
Assembler* assembler,
compiler::Assembler* assembler,
intptr_t argument_count) {
if (runtime_entry->is_leaf()) {
ASSERT(argument_count == runtime_entry->argument_count());
COMPILE_ASSERT(CallingConventions::kVolatileCpuRegisters & (1 << RAX));
__ movq(RAX, Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ movq(Assembler::VMTagAddress(), RAX);
__ movq(RAX,
compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ movq(compiler::Assembler::VMTagAddress(), RAX);
__ CallCFunction(RAX);
__ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
__ movq(compiler::Assembler::VMTagAddress(),
compiler::Immediate(VMTag::kDartCompiledTagId));
ASSERT((CallingConventions::kCalleeSaveCpuRegisters & (1 << THR)) != 0);
ASSERT((CallingConventions::kCalleeSaveCpuRegisters & (1 << PP)) != 0);
} else {
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
__ movq(RBX, Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ LoadImmediate(R10, Immediate(argument_count));
__ movq(RBX,
compiler::Address(THR, Thread::OffsetFromThread(runtime_entry)));
__ LoadImmediate(R10, compiler::Immediate(argument_count));
__ CallToRuntime();
}
}

View file

@ -20,8 +20,6 @@
namespace dart {
using compiler::ObjectPoolBuilder;
DEFINE_FLAG(bool, disassemble_stubs, false, "Disassemble generated stubs.");
DECLARE_FLAG(bool, precompiled_mode);
@ -51,7 +49,7 @@ void StubCode::Init() {
entries_[k##name##Index]->set_object_pool(object_pool.raw());
void StubCode::Init() {
ObjectPoolBuilder object_pool_builder;
compiler::ObjectPoolBuilder object_pool_builder;
// Generate all the stubs.
VM_STUB_CODE_LIST(STUB_CODE_GENERATE);
@ -65,10 +63,11 @@ void StubCode::Init() {
#undef STUB_CODE_GENERATE
#undef STUB_CODE_SET_OBJECT_POOL
RawCode* StubCode::Generate(const char* name,
ObjectPoolBuilder* object_pool_builder,
void (*GenerateStub)(Assembler* assembler)) {
Assembler assembler(object_pool_builder);
RawCode* StubCode::Generate(
const char* name,
compiler::ObjectPoolBuilder* object_pool_builder,
void (*GenerateStub)(compiler::Assembler* assembler)) {
compiler::Assembler assembler(object_pool_builder);
GenerateStub(&assembler);
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
name, nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
@ -164,10 +163,10 @@ RawCode* StubCode::GetAllocationStubForClass(const Class& cls) {
Code& stub = Code::Handle(zone, cls.allocation_stub());
#if !defined(DART_PRECOMPILED_RUNTIME)
if (stub.IsNull()) {
ObjectPoolBuilder object_pool_builder;
compiler::ObjectPoolBuilder object_pool_builder;
Precompiler* precompiler = Precompiler::Instance();
ObjectPoolBuilder* wrapper =
compiler::ObjectPoolBuilder* wrapper =
FLAG_use_bare_instructions && precompiler != NULL
? precompiler->global_object_pool_builder()
: &object_pool_builder;
@ -177,7 +176,7 @@ RawCode* StubCode::GetAllocationStubForClass(const Class& cls) {
? Code::PoolAttachment::kNotAttachPool
: Code::PoolAttachment::kAttachPool;
Assembler assembler(wrapper);
compiler::Assembler assembler(wrapper);
const char* name = cls.ToCString();
compiler::StubCodeCompiler::GenerateAllocationStubForClass(&assembler, cls);
@ -245,7 +244,8 @@ RawCode* StubCode::GetAllocationStubForClass(const Class& cls) {
}
#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
RawCode* StubCode::GetBuildMethodExtractorStub(ObjectPoolBuilder* pool) {
RawCode* StubCode::GetBuildMethodExtractorStub(
compiler::ObjectPoolBuilder* pool) {
#if !defined(DART_PRECOMPILED_RUNTIME)
auto thread = Thread::Current();
auto Z = thread->zone();
@ -257,8 +257,8 @@ RawCode* StubCode::GetBuildMethodExtractorStub(ObjectPoolBuilder* pool) {
Code::ZoneHandle(Z, StubCode::GetAllocationStubForClass(closure_class));
const auto& context_allocation_stub = StubCode::AllocateContext();
ObjectPoolBuilder object_pool_builder;
Assembler assembler(pool != nullptr ? pool : &object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(pool != nullptr ? pool : &object_pool_builder);
compiler::StubCodeCompiler::GenerateBuildMethodExtractorStub(
&assembler, closure_allocation_stub, context_allocation_stub);

View file

@ -61,14 +61,15 @@ class StubCode : public AllStatic {
static RawCode* GetAllocationStubForClass(const Class& cls);
#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
static RawCode* GetBuildMethodExtractorStub(ObjectPoolBuilder* pool);
static RawCode* GetBuildMethodExtractorStub(
compiler::ObjectPoolBuilder* pool);
#endif
// Generate the stub and finalize the generated code into the stub
// code executable area.
static RawCode* Generate(
const char* name,
ObjectPoolBuilder* object_pool_builder,
compiler::ObjectPoolBuilder* object_pool_builder,
void (*GenerateStub)(compiler::Assembler* assembler));
static const Code& UnoptimizedStaticCallEntry(intptr_t num_args_tested);
@ -86,7 +87,8 @@ class StubCode : public AllStatic {
#if !defined(DART_PRECOMPILED_RUNTIME)
#define GENERATE_STUB(name) \
static RawCode* BuildIsolateSpecific##name##Stub(ObjectPoolBuilder* opw) { \
static RawCode* BuildIsolateSpecific##name##Stub( \
compiler::ObjectPoolBuilder* opw) { \
return StubCode::Generate( \
"_iso_stub_" #name, opw, \
compiler::StubCodeCompiler::Generate##name##Stub); \

View file

@ -35,7 +35,8 @@ static Function* CreateFunction(const char* name) {
}
// Test calls to stub code which calls into the runtime.
static void GenerateCallToCallRuntimeStub(Assembler* assembler, int length) {
static void GenerateCallToCallRuntimeStub(compiler::Assembler* assembler,
int length) {
const int argc = 2;
const Smi& smi_length = Smi::ZoneHandle(Smi::New(length));
__ EnterDartFrame(0);
@ -44,7 +45,7 @@ static void GenerateCallToCallRuntimeStub(Assembler* assembler, int length) {
__ PushObject(Object::null_object()); // Push argument 2: type arguments.
ASSERT(kAllocateArrayRuntimeEntry.argument_count() == argc);
__ CallRuntime(kAllocateArrayRuntimeEntry, argc);
__ add(SP, SP, Operand(argc * kWordSize));
__ add(SP, SP, compiler::Operand(argc * kWordSize));
__ Pop(R0); // Pop return value from return slot.
__ LeaveDartFrame();
__ ret();
@ -55,8 +56,8 @@ ISOLATE_UNIT_TEST_CASE(CallRuntimeStubCode) {
const Code& code);
const int length = 10;
const char* kName = "Test_CallRuntimeStubCode";
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(&object_pool_builder);
GenerateCallToCallRuntimeStub(&assembler, length);
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
*CreateFunction("Test_CallRuntimeStubCode"), nullptr, &assembler,
@ -68,7 +69,7 @@ ISOLATE_UNIT_TEST_CASE(CallRuntimeStubCode) {
}
// Test calls to stub code which calls into a leaf runtime entry.
static void GenerateCallToCallLeafRuntimeStub(Assembler* assembler,
static void GenerateCallToCallLeafRuntimeStub(compiler::Assembler* assembler,
const char* str_value,
intptr_t lhs_index_value,
intptr_t rhs_index_value,
@ -96,8 +97,8 @@ ISOLATE_UNIT_TEST_CASE(CallLeafRuntimeStubCode) {
intptr_t rhs_index_value = 2;
intptr_t length_value = 2;
const char* kName = "Test_CallLeafRuntimeStubCode";
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(&object_pool_builder);
GenerateCallToCallLeafRuntimeStub(&assembler, str_value, lhs_index_value,
rhs_index_value, length_value);
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(

View file

@ -35,7 +35,8 @@ static Function* CreateFunction(const char* name) {
}
// Test calls to stub code which calls into the runtime.
static void GenerateCallToCallRuntimeStub(Assembler* assembler, int length) {
static void GenerateCallToCallRuntimeStub(compiler::Assembler* assembler,
int length) {
const int argc = 2;
const Smi& smi_length = Smi::ZoneHandle(Smi::New(length));
__ EnterDartFrame(0);
@ -54,8 +55,8 @@ ISOLATE_UNIT_TEST_CASE(CallRuntimeStubCode) {
const Code& code);
const int length = 10;
const char* kName = "Test_CallRuntimeStubCode";
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(&object_pool_builder);
GenerateCallToCallRuntimeStub(&assembler, length);
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
*CreateFunction("Test_CallRuntimeStubCode"), nullptr, &assembler,
@ -67,7 +68,7 @@ ISOLATE_UNIT_TEST_CASE(CallRuntimeStubCode) {
}
// Test calls to stub code which calls into a leaf runtime entry.
static void GenerateCallToCallLeafRuntimeStub(Assembler* assembler,
static void GenerateCallToCallLeafRuntimeStub(compiler::Assembler* assembler,
const char* str_value,
intptr_t lhs_index_value,
intptr_t rhs_index_value,
@ -94,8 +95,8 @@ ISOLATE_UNIT_TEST_CASE(CallLeafRuntimeStubCode) {
intptr_t rhs_index_value = 2;
intptr_t length_value = 2;
const char* kName = "Test_CallLeafRuntimeStubCode";
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(&object_pool_builder);
GenerateCallToCallLeafRuntimeStub(&assembler, str_value, lhs_index_value,
rhs_index_value, length_value);
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(

View file

@ -35,16 +35,17 @@ static Function* CreateFunction(const char* name) {
}
// Test calls to stub code which calls into the runtime.
static void GenerateCallToCallRuntimeStub(Assembler* assembler, int length) {
static void GenerateCallToCallRuntimeStub(compiler::Assembler* assembler,
int length) {
const int argc = 2;
const Smi& smi_length = Smi::ZoneHandle(Smi::New(length));
__ enter(Immediate(0));
__ enter(compiler::Immediate(0));
__ PushObject(Object::null_object()); // Push Null object for return value.
__ PushObject(smi_length); // Push argument 1: length.
__ PushObject(Object::null_object()); // Push argument 2: type arguments.
ASSERT(kAllocateArrayRuntimeEntry.argument_count() == argc);
__ CallRuntime(kAllocateArrayRuntimeEntry, argc);
__ AddImmediate(ESP, Immediate(argc * kWordSize));
__ AddImmediate(ESP, compiler::Immediate(argc * kWordSize));
__ popl(EAX); // Pop return value from return slot.
__ leave();
__ ret();
@ -55,7 +56,7 @@ ISOLATE_UNIT_TEST_CASE(CallRuntimeStubCode) {
const Code& code);
const int length = 10;
const char* kName = "Test_CallRuntimeStubCode";
Assembler assembler(nullptr);
compiler::Assembler assembler(nullptr);
GenerateCallToCallRuntimeStub(&assembler, length);
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
*CreateFunction("Test_CallRuntimeStubCode"), nullptr, &assembler,
@ -67,7 +68,7 @@ ISOLATE_UNIT_TEST_CASE(CallRuntimeStubCode) {
}
// Test calls to stub code which calls into a leaf runtime entry.
static void GenerateCallToCallLeafRuntimeStub(Assembler* assembler,
static void GenerateCallToCallLeafRuntimeStub(compiler::Assembler* assembler,
const char* str_value,
intptr_t lhs_index_value,
intptr_t rhs_index_value,
@ -76,16 +77,16 @@ static void GenerateCallToCallLeafRuntimeStub(Assembler* assembler,
const Smi& lhs_index = Smi::ZoneHandle(Smi::New(lhs_index_value));
const Smi& rhs_index = Smi::ZoneHandle(Smi::New(rhs_index_value));
const Smi& length = Smi::ZoneHandle(Smi::New(length_value));
__ enter(Immediate(0));
__ enter(compiler::Immediate(0));
__ ReserveAlignedFrameSpace(4 * kWordSize);
__ LoadObject(EAX, str);
__ movl(Address(ESP, 0), EAX); // Push argument 1.
__ movl(compiler::Address(ESP, 0), EAX); // Push argument 1.
__ LoadObject(EAX, lhs_index);
__ movl(Address(ESP, kWordSize), EAX); // Push argument 2.
__ movl(compiler::Address(ESP, kWordSize), EAX); // Push argument 2.
__ LoadObject(EAX, rhs_index);
__ movl(Address(ESP, 2 * kWordSize), EAX); // Push argument 3.
__ movl(compiler::Address(ESP, 2 * kWordSize), EAX); // Push argument 3.
__ LoadObject(EAX, length);
__ movl(Address(ESP, 3 * kWordSize), EAX); // Push argument 4.
__ movl(compiler::Address(ESP, 3 * kWordSize), EAX); // Push argument 4.
__ CallRuntime(kCaseInsensitiveCompareUCS2RuntimeEntry, 4);
__ leave();
__ ret(); // Return value is in EAX.
@ -99,7 +100,7 @@ ISOLATE_UNIT_TEST_CASE(CallLeafRuntimeStubCode) {
intptr_t rhs_index_value = 2;
intptr_t length_value = 2;
const char* kName = "Test_CallLeafRuntimeStubCode";
Assembler assembler(nullptr);
compiler::Assembler assembler(nullptr);
GenerateCallToCallLeafRuntimeStub(&assembler, str_value, lhs_index_value,
rhs_index_value, length_value);
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(

View file

@ -35,7 +35,8 @@ static Function* CreateFunction(const char* name) {
}
// Test calls to stub code which calls into the runtime.
static void GenerateCallToCallRuntimeStub(Assembler* assembler, int length) {
static void GenerateCallToCallRuntimeStub(compiler::Assembler* assembler,
int length) {
const int argc = 2;
const Smi& smi_length = Smi::ZoneHandle(Smi::New(length));
__ EnterStubFrame();
@ -44,7 +45,7 @@ static void GenerateCallToCallRuntimeStub(Assembler* assembler, int length) {
__ PushObject(Object::null_object()); // Push argument 2: type arguments.
ASSERT(kAllocateArrayRuntimeEntry.argument_count() == argc);
__ CallRuntime(kAllocateArrayRuntimeEntry, argc);
__ AddImmediate(RSP, Immediate(argc * kWordSize));
__ AddImmediate(RSP, compiler::Immediate(argc * kWordSize));
__ popq(RAX); // Pop return value from return slot.
__ LeaveStubFrame();
__ ret();
@ -55,8 +56,8 @@ ISOLATE_UNIT_TEST_CASE(CallRuntimeStubCode) {
const Code& code);
const int length = 10;
const char* kName = "Test_CallRuntimeStubCode";
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(&object_pool_builder);
GenerateCallToCallRuntimeStub(&assembler, length);
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
*CreateFunction("Test_CallRuntimeStubCode"), nullptr, &assembler,
@ -68,7 +69,7 @@ ISOLATE_UNIT_TEST_CASE(CallRuntimeStubCode) {
}
// Test calls to stub code which calls into a leaf runtime entry.
static void GenerateCallToCallLeafRuntimeStub(Assembler* assembler,
static void GenerateCallToCallLeafRuntimeStub(compiler::Assembler* assembler,
const char* str_value,
intptr_t lhs_index_value,
intptr_t rhs_index_value,
@ -96,8 +97,8 @@ ISOLATE_UNIT_TEST_CASE(CallLeafRuntimeStubCode) {
intptr_t rhs_index_value = 2;
intptr_t length_value = 2;
const char* kName = "Test_CallLeafRuntimeStubCode";
ObjectPoolBuilder object_pool_builder;
Assembler assembler(&object_pool_builder);
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(&object_pool_builder);
GenerateCallToCallLeafRuntimeStub(&assembler, str_value, lhs_index_value,
rhs_index_value, length_value);
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(

View file

@ -188,7 +188,7 @@ RawCode* TypeTestingStubGenerator::BuildCodeForType(const Type& type) {
ASSERT(!type_class.IsNull());
// To use the already-defined __ Macro !
Assembler assembler(nullptr);
compiler::Assembler assembler(nullptr);
BuildOptimizedTypeTestStub(&assembler, hi, type, type_class);
const char* name = namer_.StubNameForType(type);
@ -216,7 +216,7 @@ RawCode* TypeTestingStubGenerator::BuildCodeForType(const Type& type) {
}
void TypeTestingStubGenerator::BuildOptimizedTypeTestStubFastCases(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class,
@ -228,12 +228,12 @@ void TypeTestingStubGenerator::BuildOptimizedTypeTestStubFastCases(
// Fast case for 'int'.
if (type.raw() == Type::IntType()) {
Label non_smi_value;
compiler::Label non_smi_value;
__ BranchIfNotSmi(instance_reg, &non_smi_value);
__ Ret();
__ Bind(&non_smi_value);
} else if (type.IsDartFunctionType()) {
Label continue_checking;
compiler::Label continue_checking;
__ CompareImmediate(class_id_reg, kClosureCid);
__ BranchIf(NOT_EQUAL, &continue_checking);
__ Ret();
@ -274,7 +274,7 @@ void TypeTestingStubGenerator::BuildOptimizedTypeTestStubFastCases(
}
// Fast case for 'null'.
Label non_null;
compiler::Label non_null;
__ CompareObject(instance_reg, Object::null_object());
__ BranchIf(NOT_EQUAL, &non_null);
__ Ret();
@ -282,12 +282,12 @@ void TypeTestingStubGenerator::BuildOptimizedTypeTestStubFastCases(
}
void TypeTestingStubGenerator::BuildOptimizedSubtypeRangeCheck(
Assembler* assembler,
compiler::Assembler* assembler,
const CidRangeVector& ranges,
Register class_id_reg,
Register instance_reg,
bool smi_is_ok) {
Label cid_range_failed, is_subtype;
compiler::Label cid_range_failed, is_subtype;
if (smi_is_ok) {
__ LoadClassIdMayBeSmi(class_id_reg, instance_reg);
@ -305,7 +305,7 @@ void TypeTestingStubGenerator::BuildOptimizedSubtypeRangeCheck(
void TypeTestingStubGenerator::
BuildOptimizedSubclassRangeCheckWithTypeArguments(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Class& type_class,
const TypeArguments& tp,
@ -314,7 +314,7 @@ void TypeTestingStubGenerator::
const Register instance_reg,
const Register instance_type_args_reg) {
// a) First we make a quick sub*class* cid-range check.
Label check_failed;
compiler::Label check_failed;
ASSERT(!type_class.is_implemented());
const CidRangeVector& ranges = hi->SubclassRangesForClass(type_class);
BuildOptimizedSubclassRangeCheck(assembler, ranges, class_id_reg,
@ -322,10 +322,11 @@ void TypeTestingStubGenerator::
// fall through to continue
// b) Then we'll load the values for the type parameters.
__ LoadField(instance_type_args_reg,
FieldAddress(instance_reg,
compiler::target::Class::TypeArgumentsFieldOffset(
type_class)));
__ LoadField(
instance_type_args_reg,
compiler::FieldAddress(
instance_reg,
compiler::target::Class::TypeArgumentsFieldOffset(type_class)));
// The kernel frontend should fill in any non-assigned type parameters on
// construction with dynamic/Object, so we should never get the null type
@ -334,7 +335,7 @@ void TypeTestingStubGenerator::
// TODO(kustermann): We could consider not using "null" as type argument
// vector representing all-dynamic to avoid this extra check (which will be
// uncommon because most Dart code in 2.0 will be strongly typed)!
Label process_done;
compiler::Label process_done;
__ CompareObject(instance_type_args_reg, Object::null_object());
__ BranchIf(NOT_EQUAL, &process_done);
__ Ret();
@ -363,21 +364,21 @@ void TypeTestingStubGenerator::
}
void TypeTestingStubGenerator::BuildOptimizedSubclassRangeCheck(
Assembler* assembler,
compiler::Assembler* assembler,
const CidRangeVector& ranges,
Register class_id_reg,
Register instance_reg,
Label* check_failed) {
compiler::Label* check_failed) {
__ LoadClassIdMayBeSmi(class_id_reg, instance_reg);
Label is_subtype;
compiler::Label is_subtype;
FlowGraphCompiler::GenerateCidRangesCheck(assembler, class_id_reg, ranges,
&is_subtype, check_failed, true);
__ Bind(&is_subtype);
}
void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const AbstractType& type_arg,
intptr_t type_param_value_offset_i,
@ -386,19 +387,19 @@ void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
const Register instantiator_type_args_reg,
const Register function_type_args_reg,
const Register own_type_arg_reg,
Label* check_failed) {
compiler::Label* check_failed) {
if (type_arg.raw() != Type::ObjectType() &&
type_arg.raw() != Type::DynamicType()) {
// TODO(kustermann): Even though it should be safe to use TMP here, we
// should avoid using TMP outside the assembler. Try to find a free
// register to use here!
__ LoadField(TMP,
FieldAddress(instance_type_args_reg,
compiler::target::TypeArguments::type_at_offset(
type_param_value_offset_i)));
__ LoadField(
class_id_reg,
FieldAddress(TMP, compiler::target::Type::type_class_id_offset()));
__ LoadField(TMP, compiler::FieldAddress(
instance_type_args_reg,
compiler::target::TypeArguments::type_at_offset(
type_param_value_offset_i)));
__ LoadField(class_id_reg,
compiler::FieldAddress(
TMP, compiler::target::Type::type_class_id_offset()));
if (type_arg.IsTypeParameter()) {
const TypeParameter& type_param = TypeParameter::Cast(type_arg);
@ -406,13 +407,14 @@ void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
? instantiator_type_args_reg
: function_type_args_reg;
__ LoadField(own_type_arg_reg,
FieldAddress(kTypeArgumentsReg,
compiler::target::TypeArguments::type_at_offset(
type_param.index())));
compiler::FieldAddress(
kTypeArgumentsReg,
compiler::target::TypeArguments::type_at_offset(
type_param.index())));
__ CompareWithFieldValue(
class_id_reg,
FieldAddress(own_type_arg_reg,
compiler::target::Type::type_class_id_offset()));
class_id_reg, compiler::FieldAddress(
own_type_arg_reg,
compiler::target::Type::type_class_id_offset()));
__ BranchIf(NOT_EQUAL, check_failed);
} else {
const Class& type_class = Class::Handle(type_arg.type_class());
@ -421,7 +423,7 @@ void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
/*include_abstract=*/true,
/*exclude_null=*/false);
Label is_subtype;
compiler::Label is_subtype;
__ SmiUntag(class_id_reg);
FlowGraphCompiler::GenerateCidRangesCheck(
assembler, class_id_reg, ranges, &is_subtype, check_failed, true);

View file

@ -54,33 +54,34 @@ class TypeTestingStubGenerator {
#if !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)
#if !defined(DART_PRECOMPILED_RUNTIME)
RawCode* BuildCodeForType(const Type& type);
static void BuildOptimizedTypeTestStub(Assembler* assembler,
static void BuildOptimizedTypeTestStub(compiler::Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class);
static void BuildOptimizedTypeTestStubFastCases(Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class,
Register instance_reg,
Register class_id_reg);
static void BuildOptimizedTypeTestStubFastCases(
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class,
Register instance_reg,
Register class_id_reg);
static void BuildOptimizedSubtypeRangeCheck(Assembler* assembler,
static void BuildOptimizedSubtypeRangeCheck(compiler::Assembler* assembler,
const CidRangeVector& ranges,
Register class_id_reg,
Register instance_reg,
bool smi_is_ok);
static void BuildOptimizedSubclassRangeCheckWithTypeArguments(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Class& type_class,
const TypeArguments& type_parameters,
const TypeArguments& type_arguments);
static void BuildOptimizedSubclassRangeCheckWithTypeArguments(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Class& type_class,
const TypeArguments& type_parameters,
@ -89,21 +90,21 @@ class TypeTestingStubGenerator {
const Register instance_reg,
const Register instance_type_args_reg);
static void BuildOptimizedSubclassRangeCheck(Assembler* assembler,
static void BuildOptimizedSubclassRangeCheck(compiler::Assembler* assembler,
const CidRangeVector& ranges,
Register class_id_reg,
Register instance_reg,
Label* check_failed);
compiler::Label* check_failed);
static void BuildOptimizedTypeArgumentValueCheck(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const AbstractType& type_arg,
intptr_t type_param_value_offset_i,
Label* check_failed);
compiler::Label* check_failed);
static void BuildOptimizedTypeArgumentValueCheck(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const AbstractType& type_arg,
intptr_t type_param_value_offset_i,
@ -112,7 +113,7 @@ class TypeTestingStubGenerator {
const Register instantiator_type_args_reg,
const Register function_type_args_reg,
const Register type_arg_reg,
Label* check_failed);
compiler::Label* check_failed);
#endif // !defined(DART_PRECOMPILED_RUNTIME)
#endif // !defined(TARGET_ARCH_DBC) && !defined(TARGET_ARCH_IA32)

View file

@ -13,7 +13,7 @@
namespace dart {
void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class) {
@ -24,17 +24,19 @@ void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
kInstanceReg, kClassIdReg);
__ ldr(CODE_REG,
Address(THR, compiler::target::Thread::slow_type_test_stub_offset()));
__ Branch(
FieldAddress(CODE_REG, compiler::target::Code::entry_point_offset()));
compiler::Address(
THR, compiler::target::Thread::slow_type_test_stub_offset()));
__ Branch(compiler::FieldAddress(
CODE_REG, compiler::target::Code::entry_point_offset()));
}
void TypeTestingStubGenerator::
BuildOptimizedSubclassRangeCheckWithTypeArguments(Assembler* assembler,
HierarchyInfo* hi,
const Class& type_class,
const TypeArguments& tp,
const TypeArguments& ta) {
BuildOptimizedSubclassRangeCheckWithTypeArguments(
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Class& type_class,
const TypeArguments& tp,
const TypeArguments& ta) {
const Register kInstanceReg = R0;
const Register kInstanceTypeArguments = NOTFP;
const Register kClassIdReg = R9;
@ -45,11 +47,11 @@ void TypeTestingStubGenerator::
}
void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const AbstractType& type_arg,
intptr_t type_param_value_offset_i,
Label* check_failed) {
compiler::Label* check_failed) {
const Register kInstantiatorTypeArgumentsReg = R2;
const Register kFunctionTypeArgumentsReg = R1;
const Register kInstanceTypeArguments = NOTFP;

View file

@ -13,7 +13,7 @@
namespace dart {
void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class) {
@ -23,17 +23,19 @@ void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class,
kInstanceReg, kClassIdReg);
__ ldr(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
__ ldr(R9, FieldAddress(CODE_REG, Code::entry_point_offset()));
__ ldr(CODE_REG,
compiler::Address(THR, Thread::slow_type_test_stub_offset()));
__ ldr(R9, compiler::FieldAddress(CODE_REG, Code::entry_point_offset()));
__ br(R9);
}
void TypeTestingStubGenerator::
BuildOptimizedSubclassRangeCheckWithTypeArguments(Assembler* assembler,
HierarchyInfo* hi,
const Class& type_class,
const TypeArguments& tp,
const TypeArguments& ta) {
BuildOptimizedSubclassRangeCheckWithTypeArguments(
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Class& type_class,
const TypeArguments& tp,
const TypeArguments& ta) {
const Register kInstanceReg = R0;
const Register kInstanceTypeArguments = R7;
const Register kClassIdReg = R9;
@ -44,11 +46,11 @@ void TypeTestingStubGenerator::
}
void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const AbstractType& type_arg,
intptr_t type_param_value_offset_i,
Label* check_failed) {
compiler::Label* check_failed) {
const Register kInstantiatorTypeArgumentsReg = R1;
const Register kFunctionTypeArgumentsReg = R2;
const Register kInstanceTypeArguments = R7;

View file

@ -13,7 +13,7 @@
namespace dart {
void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Type& type,
const Class& type_class) {
@ -23,16 +23,18 @@ void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class,
kInstanceReg, kClassIdReg);
__ movq(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
__ jmp(FieldAddress(CODE_REG, Code::entry_point_offset()));
__ movq(CODE_REG,
compiler::Address(THR, Thread::slow_type_test_stub_offset()));
__ jmp(compiler::FieldAddress(CODE_REG, Code::entry_point_offset()));
}
void TypeTestingStubGenerator::
BuildOptimizedSubclassRangeCheckWithTypeArguments(Assembler* assembler,
HierarchyInfo* hi,
const Class& type_class,
const TypeArguments& tp,
const TypeArguments& ta) {
BuildOptimizedSubclassRangeCheckWithTypeArguments(
compiler::Assembler* assembler,
HierarchyInfo* hi,
const Class& type_class,
const TypeArguments& tp,
const TypeArguments& ta) {
const Register kInstanceReg = RAX;
const Register kInstanceTypeArguments = RSI;
const Register kClassIdReg = TMP;
@ -43,11 +45,11 @@ void TypeTestingStubGenerator::
}
void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
Assembler* assembler,
compiler::Assembler* assembler,
HierarchyInfo* hi,
const AbstractType& type_arg,
intptr_t type_param_value_offset_i,
Label* check_failed) {
compiler::Label* check_failed) {
const Register kInstanceTypeArguments = RSI;
const Register kInstantiatorTypeArgumentsReg = RDX;
const Register kFunctionTypeArgumentsReg = RCX;

View file

@ -86,12 +86,12 @@
// The ASSEMBLER_TEST_GENERATE macro is used to generate a unit test
// for the assembler.
#define ASSEMBLER_TEST_GENERATE(name, assembler) \
void AssemblerTestGenerate##name(Assembler* assembler)
void AssemblerTestGenerate##name(compiler::Assembler* assembler)
// The ASSEMBLER_TEST_EXTERN macro is used to declare a unit test
// for the assembler.
#define ASSEMBLER_TEST_EXTERN(name) \
extern void AssemblerTestGenerate##name(Assembler* assembler);
extern void AssemblerTestGenerate##name(compiler::Assembler* assembler);
// The ASSEMBLER_TEST_RUN macro is used to execute the assembler unit
// test generated using the ASSEMBLER_TEST_GENERATE macro.
@ -103,8 +103,8 @@
bool use_far_branches = false; \
LongJumpScope jump; \
if (setjmp(*jump.Set()) == 0) { \
ObjectPoolBuilder object_pool_builder; \
Assembler assembler(&object_pool_builder, use_far_branches); \
compiler::ObjectPoolBuilder object_pool_builder; \
compiler::Assembler assembler(&object_pool_builder, use_far_branches); \
AssemblerTest test("" #name, &assembler); \
AssemblerTestGenerate##name(test.assembler()); \
test.Assemble(); \
@ -116,8 +116,8 @@
const Error& error = Error::Handle(Thread::Current()->sticky_error()); \
if (error.raw() == Object::branch_offset_error().raw()) { \
bool use_far_branches = true; \
ObjectPoolBuilder object_pool_builder; \
Assembler assembler(&object_pool_builder, use_far_branches); \
compiler::ObjectPoolBuilder object_pool_builder; \
compiler::Assembler assembler(&object_pool_builder, use_far_branches); \
AssemblerTest test("" #name, &assembler); \
AssemblerTestGenerate##name(test.assembler()); \
test.Assemble(); \
@ -462,14 +462,14 @@ struct is_double<double> {
class AssemblerTest {
public:
AssemblerTest(const char* name, Assembler* assembler)
AssemblerTest(const char* name, compiler::Assembler* assembler)
: name_(name), assembler_(assembler), code_(Code::ZoneHandle()) {
ASSERT(name != NULL);
ASSERT(assembler != NULL);
}
~AssemblerTest() {}
Assembler* assembler() const { return assembler_; }
compiler::Assembler* assembler() const { return assembler_; }
const Code& code() const { return code_; }
@ -590,7 +590,7 @@ class AssemblerTest {
private:
const char* name_;
Assembler* assembler_;
compiler::Assembler* assembler_;
Code& code_;
static const intptr_t DISASSEMBLY_SIZE = 10240;
char disassembly_[DISASSEMBLY_SIZE];