[vm] Namespace constants like offsets and sizeofs to use runtime_api.h

Second sub-CL of https://dart-review.googlesource.com/c/sdk/+/100644
The first was here https://dart-review.googlesource.com/c/sdk/+/103487

Now that offsets_extractor is checked in, and we have a big header full
of hard coded constants, the next step is to make sure everything is
using those constants. This is essentially everything in the original
CL, except the new simarm_x64 architecture.

Bug: https://github.com/dart-lang/sdk/issues/36839
Change-Id: I236e4f30aa1df6d92209891c983b792d1835b608
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/104286
Commit-Queue: Liam Appelbe <liama@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Liam Appelbe 2019-06-10 02:55:25 +00:00 committed by commit-bot@chromium.org
parent 51046368c5
commit e4196ce8c6
58 changed files with 2916 additions and 2208 deletions

View file

@ -464,19 +464,14 @@ void ClassTable::UpdatePromoted() {
}
}
intptr_t ClassTable::TableOffsetFor(intptr_t cid) {
return OFFSET_OF(ClassTable, class_heap_stats_table_);
}
intptr_t ClassTable::ClassOffsetFor(intptr_t cid) {
return cid * sizeof(ClassHeapStats); // NOLINT
}
intptr_t ClassTable::CounterOffsetFor(intptr_t cid, bool is_new_space) {
intptr_t ClassTable::NewSpaceCounterOffsetFor(intptr_t cid) {
const intptr_t class_offset = ClassOffsetFor(cid);
const intptr_t count_field_offset =
is_new_space ? ClassHeapStats::allocated_since_gc_new_space_offset()
: ClassHeapStats::allocated_since_gc_old_space_offset();
ClassHeapStats::allocated_since_gc_new_space_offset();
return class_offset + count_field_offset;
}
@ -484,11 +479,10 @@ intptr_t ClassTable::StateOffsetFor(intptr_t cid) {
return ClassOffsetFor(cid) + ClassHeapStats::state_offset();
}
intptr_t ClassTable::SizeOffsetFor(intptr_t cid, bool is_new_space) {
intptr_t ClassTable::NewSpaceSizeOffsetFor(intptr_t cid) {
const uword class_offset = ClassOffsetFor(cid);
const uword size_field_offset =
is_new_space ? ClassHeapStats::allocated_size_since_gc_new_space_offset()
: ClassHeapStats::allocated_size_since_gc_old_space_offset();
ClassHeapStats::allocated_size_since_gc_new_space_offset();
return class_offset + size_field_offset;
}

View file

@ -301,16 +301,13 @@ class ClassTable {
}
// Used by the generated code.
static intptr_t TableOffsetFor(intptr_t cid);
// Used by the generated code.
static intptr_t CounterOffsetFor(intptr_t cid, bool is_new_space);
static intptr_t NewSpaceCounterOffsetFor(intptr_t cid);
// Used by the generated code.
static intptr_t StateOffsetFor(intptr_t cid);
// Used by the generated code.
static intptr_t SizeOffsetFor(intptr_t cid, bool is_new_space);
static intptr_t NewSpaceSizeOffsetFor(intptr_t cid);
ClassHeapStats* StatsWithUpdatedSize(intptr_t cid);

View file

@ -1818,7 +1818,8 @@ class RODataSerializationCluster : public SerializationCluster {
s->TraceDataOffset(offset);
ASSERT(Utils::IsAligned(offset, kObjectAlignment));
ASSERT(offset > running_offset);
s->WriteUnsigned((offset - running_offset) >> kObjectAlignmentLog2);
s->WriteUnsigned((offset - running_offset) >>
compiler::target::ObjectAlignment::kObjectAlignmentLog2);
running_offset = offset;
s->TraceEndWritingObject();
}

View file

@ -19,7 +19,7 @@ void DescriptorList::AddDescriptor(RawPcDescriptors::Kind kind,
// When precompiling, we only use pc descriptors for exceptions.
if (!FLAG_precompiled_mode || try_index != -1) {
intptr_t merged_kind_try =
int32_t merged_kind_try =
RawPcDescriptors::MergedKindTry::Encode(kind, try_index);
PcDescriptors::EncodeInteger(&encoded_data_, merged_kind_try);

View file

@ -431,7 +431,7 @@ Definition* AotCallSpecializer::TryOptimizeMod(TemplateDartCall<0>* instr,
return nullptr; // non-smi mask
}
const int64_t modulus = Utils::Abs(value);
if (!Utils::IsPowerOfTwo(modulus) || !Smi::IsValid(modulus - 1)) {
if (!Utils::IsPowerOfTwo(modulus) || !compiler::target::IsSmi(modulus - 1)) {
return nullptr;
}

View file

@ -430,7 +430,7 @@ void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
ASSERT(kSmiTagShift == 1);
ASSERT(kSmiTag == 0);
TestBothArgumentsSmis(assembler, normal_ir_body);
__ CompareImmediate(R0, target::ToRawSmi(target::Smi::kBits));
__ CompareImmediate(R0, target::ToRawSmi(target::kSmiBits));
__ b(normal_ir_body, HI);
__ SmiUntag(R0);
@ -1450,7 +1450,7 @@ void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
__ ldr(R0, FieldAddress(R1, target::Double::value_offset()));
__ ldr(R1, FieldAddress(R1, target::Double::value_offset() + 4));
__ eor(R0, R0, Operand(R1));
__ AndImmediate(R0, R0, kSmiMax);
__ AndImmediate(R0, R0, target::kSmiMax);
__ SmiTag(R0);
__ Ret();
@ -1495,7 +1495,7 @@ void AsmIntrinsifier::Random_nextState(Assembler* assembler,
// Receiver.
__ ldr(R0, Address(SP, 0 * target::kWordSize));
// Field '_state'.
__ ldr(R1, FieldAddress(R0, LookupFieldOffsetInBytes(state_field)));
__ ldr(R1, FieldAddress(R0, target::Field::OffsetOf(state_field)));
// Addresses of _state[0] and _state[1].
const int64_t disp_0 =
@ -1608,8 +1608,7 @@ void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
__ Bind(&use_declaration_type);
__ LoadClassById(R2, R1); // Overwrites R1.
__ ldrh(R3, FieldAddress(
R2, target::Class::num_type_arguments_offset_in_bytes()));
__ ldrh(R3, FieldAddress(R2, target::Class::num_type_arguments_offset()));
__ CompareImmediate(R3, 0);
__ b(normal_ir_body, NE);
@ -1644,8 +1643,7 @@ void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(R3, R1);
__ ldrh(R3, FieldAddress(
R3, target::Class::num_type_arguments_offset_in_bytes()));
__ ldrh(R3, FieldAddress(R3, target::Class::num_type_arguments_offset()));
__ CompareImmediate(R3, 0);
__ b(normal_ir_body, NE);

View file

@ -430,7 +430,7 @@ void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
const Register result = R0;
TestBothArgumentsSmis(assembler, normal_ir_body);
__ CompareImmediate(right, target::ToRawSmi(target::Smi::kBits));
__ CompareImmediate(right, target::ToRawSmi(target::kSmiBits));
__ b(normal_ir_body, CS);
// Left is not a constant.
@ -1520,7 +1520,7 @@ void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
__ Bind(&double_hash);
__ fmovrd(R0, V0);
__ eor(R0, R0, Operand(R0, LSR, 32));
__ AndImmediate(R0, R0, kSmiMax);
__ AndImmediate(R0, R0, target::kSmiMax);
__ SmiTag(R0);
__ ret();
@ -1669,8 +1669,7 @@ void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
__ Bind(&use_declaration_type);
__ LoadClassById(R2, R1); // Overwrites R1.
__ ldr(R3,
FieldAddress(R2, target::Class::num_type_arguments_offset_in_bytes()),
__ ldr(R3, FieldAddress(R2, target::Class::num_type_arguments_offset()),
kHalfword);
__ CompareImmediate(R3, 0);
__ b(normal_ir_body, NE);
@ -1706,8 +1705,7 @@ void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(R3, R1); // Overwrites R1.
__ ldr(R3,
FieldAddress(R3, target::Class::num_type_arguments_offset_in_bytes()),
__ ldr(R3, FieldAddress(R3, target::Class::num_type_arguments_offset()),
kHalfword);
__ CompareImmediate(R3, 0);
__ b(normal_ir_body, NE);

View file

@ -440,7 +440,7 @@ void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
Label overflow;
TestBothArgumentsSmis(assembler, normal_ir_body);
// Shift value is in EAX. Compare with tagged Smi.
__ cmpl(EAX, Immediate(target::ToRawSmi(target::Smi::kBits)));
__ cmpl(EAX, Immediate(target::ToRawSmi(target::kSmiBits)));
__ j(ABOVE_EQUAL, normal_ir_body, Assembler::kNearJump);
__ SmiUntag(EAX);
@ -1553,7 +1553,7 @@ void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
__ movl(EAX, FieldAddress(ECX, target::Double::value_offset()));
__ movl(ECX, FieldAddress(ECX, target::Double::value_offset() + 4));
__ xorl(EAX, ECX);
__ andl(EAX, Immediate(kSmiMax));
__ andl(EAX, Immediate(target::kSmiMax));
__ SmiTag(EAX);
__ ret();
@ -1711,8 +1711,7 @@ void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
// Object is neither double, nor integer, nor string.
__ Bind(&use_declaration_type);
__ LoadClassById(EBX, EDI);
__ movzxw(EDI, FieldAddress(
EBX, target::Class::num_type_arguments_offset_in_bytes()));
__ movzxw(EDI, FieldAddress(EBX, target::Class::num_type_arguments_offset()));
__ cmpl(EDI, Immediate(0));
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
__ movl(EAX, FieldAddress(EBX, target::Class::declaration_type_offset()));
@ -1747,8 +1746,7 @@ void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(EBX, EDI);
__ movzxw(EBX, FieldAddress(
EBX, target::Class::num_type_arguments_offset_in_bytes()));
__ movzxw(EBX, FieldAddress(EBX, target::Class::num_type_arguments_offset()));
__ cmpl(EBX, Immediate(0));
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);

View file

@ -494,7 +494,7 @@ void AsmIntrinsifier::Integer_shl(Assembler* assembler, Label* normal_ir_body) {
Label overflow;
TestBothArgumentsSmis(assembler, normal_ir_body);
// Shift value is in RAX. Compare with tagged Smi.
__ cmpq(RAX, Immediate(target::ToRawSmi(target::Smi::kBits)));
__ cmpq(RAX, Immediate(target::ToRawSmi(target::kSmiBits)));
__ j(ABOVE_EQUAL, normal_ir_body, Assembler::kNearJump);
__ SmiUntag(RAX);
@ -1465,7 +1465,7 @@ void AsmIntrinsifier::Double_hashCode(Assembler* assembler,
__ movq(RCX, RAX);
__ shrq(RCX, Immediate(32));
__ xorq(RAX, RCX);
__ andq(RAX, Immediate(kSmiMax));
__ andq(RAX, Immediate(target::kSmiMax));
__ SmiTag(RAX);
__ ret();
@ -1623,8 +1623,7 @@ void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
// Object is neither double, nor integer, nor string.
__ Bind(&use_declaration_type);
__ LoadClassById(RDI, RCX);
__ movzxw(RCX, FieldAddress(
RDI, target::Class::num_type_arguments_offset_in_bytes()));
__ movzxw(RCX, FieldAddress(RDI, target::Class::num_type_arguments_offset()));
__ cmpq(RCX, Immediate(0));
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);
__ movq(RAX, FieldAddress(RDI, target::Class::declaration_type_offset()));
@ -1659,8 +1658,7 @@ void AsmIntrinsifier::ObjectHaveSameRuntimeType(Assembler* assembler,
// Check if there are no type arguments. In this case we can return true.
// Otherwise fall through into the runtime to handle comparison.
__ LoadClassById(RDI, RCX);
__ movzxw(RCX, FieldAddress(
RDI, target::Class::num_type_arguments_offset_in_bytes()));
__ movzxw(RCX, FieldAddress(RDI, target::Class::num_type_arguments_offset()));
__ cmpq(RCX, Immediate(0));
__ j(NOT_EQUAL, normal_ir_body, Assembler::kNearJump);

View file

@ -1602,9 +1602,9 @@ void Assembler::LoadObjectHelper(Register rd,
} else if (CanLoadFromObjectPool(object)) {
// Make sure that class CallPattern is able to decode this load from the
// object pool.
const int32_t offset = ObjectPool::element_offset(
is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object));
const auto index = is_unique ? object_pool_builder().AddObject(object)
: object_pool_builder().FindObject(object);
const int32_t offset = ObjectPool::element_offset(index);
LoadWordFromPoolOffset(rd, offset - kHeapObjectTag, pp, cond);
} else {
UNREACHABLE();
@ -3433,8 +3433,8 @@ void Assembler::LoadAllocationStatsAddress(Register dest, intptr_t cid) {
ASSERT(cid > 0);
const intptr_t class_offset = ClassTable::ClassOffsetFor(cid);
LoadIsolate(dest);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
ldr(dest, Address(dest, table_offset));
AddImmediate(dest, class_offset);
}

View file

@ -1495,8 +1495,8 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
ASSERT(cid > 0);
intptr_t state_offset = ClassTable::StateOffsetFor(cid);
LoadIsolate(temp_reg);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
ldr(temp_reg, Address(temp_reg, table_offset));
AddImmediate(temp_reg, state_offset);
ldr(temp_reg, Address(temp_reg, 0));
@ -1506,10 +1506,10 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
void Assembler::UpdateAllocationStats(intptr_t cid) {
ASSERT(cid > 0);
intptr_t counter_offset = ClassTable::CounterOffsetFor(cid, /*is_new=*/true);
intptr_t counter_offset = target::ClassTable::NewSpaceCounterOffsetFor(cid);
LoadIsolate(TMP2);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
ldr(TMP, Address(TMP2, table_offset));
AddImmediate(TMP2, TMP, counter_offset);
ldr(TMP, Address(TMP2, 0));
@ -1525,8 +1525,8 @@ void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg) {
const uword size_field_offset =
ClassHeapStats::allocated_size_since_gc_new_space_offset();
LoadIsolate(TMP2);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
ldr(TMP, Address(TMP2, table_offset));
AddImmediate(TMP2, TMP, class_offset);
ldr(TMP, Address(TMP2, count_field_offset));

View file

@ -2300,8 +2300,8 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
intptr_t state_offset = ClassTable::StateOffsetFor(cid);
ASSERT(temp_reg != kNoRegister);
LoadIsolate(temp_reg);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
movl(temp_reg, Address(temp_reg, table_offset));
state_address = Address(temp_reg, state_offset);
testb(state_address,
@ -2313,12 +2313,11 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
void Assembler::UpdateAllocationStats(intptr_t cid, Register temp_reg) {
ASSERT(cid > 0);
intptr_t counter_offset =
ClassTable::CounterOffsetFor(cid, /*is_new_space=*/true);
intptr_t counter_offset = ClassTable::NewSpaceCounterOffsetFor(cid);
ASSERT(temp_reg != kNoRegister);
LoadIsolate(temp_reg);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
movl(temp_reg, Address(temp_reg, table_offset));
incl(Address(temp_reg, counter_offset));
}
@ -2329,7 +2328,7 @@ void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
ASSERT(cid > 0);
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid, temp_reg);
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
intptr_t size_offset = ClassTable::NewSpaceSizeOffsetFor(cid);
addl(Address(temp_reg, size_offset), size_reg);
}
@ -2339,7 +2338,7 @@ void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
ASSERT(cid > 0);
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid, temp_reg);
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
intptr_t size_offset = ClassTable::NewSpaceSizeOffsetFor(cid);
addl(Address(temp_reg, size_offset), Immediate(size_in_bytes));
}
#endif // !PRODUCT

View file

@ -1191,7 +1191,7 @@ void Assembler::LoadObjectHelper(Register dst,
bool is_unique) {
ASSERT(IsOriginalObject(object));
target::word offset_from_thread;
intptr_t offset_from_thread;
if (target::CanLoadFromThread(object, &offset_from_thread)) {
movq(dst, Address(THR, offset_from_thread));
} else if (CanLoadFromObjectPool(object)) {
@ -1216,7 +1216,7 @@ void Assembler::LoadUniqueObject(Register dst, const Object& object) {
void Assembler::StoreObject(const Address& dst, const Object& object) {
ASSERT(IsOriginalObject(object));
target::word offset_from_thread;
intptr_t offset_from_thread;
if (target::CanLoadFromThread(object, &offset_from_thread)) {
movq(TMP, Address(THR, offset_from_thread));
movq(dst, TMP);
@ -1232,7 +1232,7 @@ void Assembler::StoreObject(const Address& dst, const Object& object) {
void Assembler::PushObject(const Object& object) {
ASSERT(IsOriginalObject(object));
target::word offset_from_thread;
intptr_t offset_from_thread;
if (target::CanLoadFromThread(object, &offset_from_thread)) {
pushq(Address(THR, offset_from_thread));
} else if (CanLoadFromObjectPool(object)) {
@ -1247,7 +1247,7 @@ void Assembler::PushObject(const Object& object) {
void Assembler::CompareObject(Register reg, const Object& object) {
ASSERT(IsOriginalObject(object));
target::word offset_from_thread;
intptr_t offset_from_thread;
if (target::CanLoadFromThread(object, &offset_from_thread)) {
cmpq(reg, Address(THR, offset_from_thread));
} else if (CanLoadFromObjectPool(object)) {
@ -1786,8 +1786,8 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
intptr_t state_offset = ClassTable::StateOffsetFor(cid);
Register temp_reg = TMP;
LoadIsolate(temp_reg);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
movq(temp_reg, Address(temp_reg, table_offset));
testb(Address(temp_reg, state_offset),
Immediate(target::ClassHeapStats::TraceAllocationMask()));
@ -1798,12 +1798,11 @@ void Assembler::MaybeTraceAllocation(intptr_t cid,
void Assembler::UpdateAllocationStats(intptr_t cid) {
ASSERT(cid > 0);
intptr_t counter_offset =
ClassTable::CounterOffsetFor(cid, /*is_new_space=*/true);
intptr_t counter_offset = ClassTable::NewSpaceCounterOffsetFor(cid);
Register temp_reg = TMP;
LoadIsolate(temp_reg);
intptr_t table_offset =
Isolate::class_table_offset() + ClassTable::TableOffsetFor(cid);
intptr_t table_offset = Isolate::class_table_offset() +
ClassTable::class_heap_stats_table_offset();
movq(temp_reg, Address(temp_reg, table_offset));
incq(Address(temp_reg, counter_offset));
}
@ -1813,7 +1812,7 @@ void Assembler::UpdateAllocationStatsWithSize(intptr_t cid, Register size_reg) {
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid);
Register temp_reg = TMP;
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
intptr_t size_offset = ClassTable::NewSpaceSizeOffsetFor(cid);
addq(Address(temp_reg, size_offset), size_reg);
}
@ -1823,7 +1822,7 @@ void Assembler::UpdateAllocationStatsWithSize(intptr_t cid,
ASSERT(cid < kNumPredefinedCids);
UpdateAllocationStats(cid);
Register temp_reg = TMP;
intptr_t size_offset = ClassTable::SizeOffsetFor(cid, /*is_new_space=*/true);
intptr_t size_offset = ClassTable::NewSpaceSizeOffsetFor(cid);
addq(Address(temp_reg, size_offset), Immediate(size_in_bytes));
}
#endif // !PRODUCT

View file

@ -340,8 +340,8 @@ void FlowGraphChecker::VisitConstant(ConstantInstr* constant) {
const Object& value = constant->value();
if (value.IsSmi()) {
const int64_t smi_value = Integer::Cast(value).AsInt64Value();
ASSERT(kSmiMin <= smi_value);
ASSERT(smi_value <= kSmiMax);
ASSERT(compiler::target::kSmiMin <= smi_value);
ASSERT(smi_value <= compiler::target::kSmiMax);
}
// Any constant involved in SSA should appear in the entry (making it more
// likely it was inserted by the utility that avoids duplication).

View file

@ -824,7 +824,8 @@ void FlowGraphCompiler::RecordSafepoint(LocationSummary* locs,
RegisterSet* registers = locs->live_registers();
ASSERT(registers != NULL);
const intptr_t kFpuRegisterSpillFactor = kFpuRegisterSize / kWordSize;
const intptr_t kFpuRegisterSpillFactor =
kFpuRegisterSize / compiler::target::kWordSize;
intptr_t saved_registers_size = 0;
const bool using_shared_stub = locs->call_on_shared_slow_path();
if (using_shared_stub) {
@ -965,7 +966,8 @@ Environment* FlowGraphCompiler::SlowPathEnvironmentFor(
RegisterSet* regs = instruction->locs()->live_registers();
intptr_t fpu_reg_slots[kNumberOfFpuRegisters];
intptr_t cpu_reg_slots[kNumberOfCpuRegisters];
const intptr_t kFpuRegisterSpillFactor = kFpuRegisterSize / kWordSize;
const intptr_t kFpuRegisterSpillFactor =
kFpuRegisterSize / compiler::target::kWordSize;
// FPU registers are spilled first from highest to lowest register number.
for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; --i) {
FpuRegister reg = static_cast<FpuRegister>(i);
@ -1234,7 +1236,7 @@ bool FlowGraphCompiler::TryIntrinsifyHelper() {
if (field.is_instance() &&
(FLAG_precompiled_mode || !IsPotentialUnboxedField(field))) {
SpecialStatsBegin(CombinedCodeStatistics::kTagIntrinsics);
GenerateGetterIntrinsic(field.Offset());
GenerateGetterIntrinsic(compiler::target::Field::OffsetOf(field));
SpecialStatsEnd(CombinedCodeStatistics::kTagIntrinsics);
return !isolate()->use_field_guards();
}
@ -1253,7 +1255,7 @@ bool FlowGraphCompiler::TryIntrinsifyHelper() {
if (field.is_instance() &&
(FLAG_precompiled_mode || field.guarded_cid() == kDynamicCid)) {
SpecialStatsBegin(CombinedCodeStatistics::kTagIntrinsics);
GenerateSetterIntrinsic(field.Offset());
GenerateSetterIntrinsic(compiler::target::Field::OffsetOf(field));
SpecialStatsEnd(CombinedCodeStatistics::kTagIntrinsics);
return !isolate()->use_field_guards();
}
@ -1267,8 +1269,9 @@ bool FlowGraphCompiler::TryIntrinsifyHelper() {
parsed_function().function().extracted_method_closure());
auto& klass = Class::Handle(extracted_method.Owner());
const intptr_t type_arguments_field_offset =
klass.NumTypeArguments() > 0
? (klass.type_arguments_field_offset() - kHeapObjectTag)
compiler::target::Class::HasTypeArgumentsField(klass)
? (compiler::target::Class::TypeArgumentsFieldOffset(klass) -
kHeapObjectTag)
: 0;
SpecialStatsBegin(CombinedCodeStatistics::kTagIntrinsics);
@ -2308,8 +2311,9 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
__ CompareObject(kTypeArgumentsReg, Object::null_object());
__ BranchIf(EQUAL, done);
__ LoadField(dst_type_reg,
FieldAddress(kTypeArgumentsReg, TypeArguments::type_at_offset(
type_param.index())));
FieldAddress(kTypeArgumentsReg,
compiler::target::TypeArguments::type_at_offset(
type_param.index())));
if (type_usage_info != NULL) {
type_usage_info->UseTypeInAssertAssignable(dst_type);
}

View file

@ -196,7 +196,7 @@ void CompilerDeoptInfoWithStub::GenerateCode(FlowGraphCompiler* compiler,
}
ASSERT(deopt_env() != NULL);
__ ldr(LR, Address(THR, Thread::deoptimize_entry_offset()));
__ ldr(LR, Address(THR, compiler::target::Thread::deoptimize_entry_offset()));
__ blx(LR);
ASSERT(kReservedCpuRegisters & (1 << LR));
set_pc_offset(assembler->CodeSize());
@ -431,8 +431,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
__ LoadClassById(R1, R2);
// R1: instance class.
// Check immediate superclass equality.
__ ldr(R2, FieldAddress(R1, Class::super_type_offset()));
__ ldr(R2, FieldAddress(R2, Type::type_class_id_offset()));
__ ldr(R2, FieldAddress(R1, compiler::target::Class::super_type_offset()));
__ ldr(R2, FieldAddress(R2, compiler::target::Type::type_class_id_offset()));
__ CompareImmediate(R2, Smi::RawValue(type_class.id()));
__ b(is_instance_lbl, EQ);
@ -475,7 +475,8 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
__ CompareObject(kTypeArgumentsReg, Object::null_object());
__ b(is_instance_lbl, EQ);
__ ldr(R3, FieldAddress(kTypeArgumentsReg,
TypeArguments::type_at_offset(type_param.index())));
compiler::target::TypeArguments::type_at_offset(
type_param.index())));
// R3: concrete type of type.
// Check if type argument is dynamic, Object, or void.
__ CompareObject(R3, Object::dynamic_type());
@ -772,15 +773,18 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
const intptr_t sub_type_cache_index = __ object_pool_builder().AddObject(
Object::null_object(), ObjectPool::Patchability::kPatchable);
const intptr_t sub_type_cache_offset =
ObjectPool::element_offset(sub_type_cache_index) - kHeapObjectTag;
compiler::target::ObjectPool::element_offset(sub_type_cache_index) -
kHeapObjectTag;
const intptr_t dst_name_index = __ object_pool_builder().AddObject(
dst_name, ObjectPool::Patchability::kPatchable);
ASSERT((sub_type_cache_index + 1) == dst_name_index);
ASSERT(__ constant_pool_allowed());
__ LoadField(R9,
FieldAddress(kDstTypeReg,
AbstractType::type_test_stub_entry_point_offset()));
__ LoadField(
R9,
FieldAddress(
kDstTypeReg,
compiler::target::AbstractType::type_test_stub_entry_point_offset()));
__ LoadWordFromPoolOffset(kSubtypeTestCacheReg, sub_type_cache_offset, PP,
AL);
__ blx(R9);
@ -822,15 +826,17 @@ void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
kPoolReg = PP;
} else {
__ LoadFieldFromOffset(kWord, kPoolReg, CODE_REG,
Code::object_pool_offset());
compiler::target::Code::object_pool_offset());
}
__ LoadImmediate(R4, type_arguments_field_offset);
__ LoadFieldFromOffset(kWord, R1, kPoolReg,
ObjectPool::element_offset(function_index));
__ LoadFieldFromOffset(kWord, CODE_REG, kPoolReg,
ObjectPool::element_offset(stub_index));
__ Branch(FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kUnchecked)));
__ LoadFieldFromOffset(
kWord, R1, kPoolReg,
compiler::target::ObjectPool::element_offset(function_index));
__ LoadFieldFromOffset(
kWord, CODE_REG, kPoolReg,
compiler::target::ObjectPool::element_offset(stub_index));
__ Branch(FieldAddress(CODE_REG, compiler::target::Code::entry_point_offset(
Code::EntryKind::kUnchecked)));
}
void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
@ -838,7 +844,7 @@ void FlowGraphCompiler::GenerateGetterIntrinsic(intptr_t offset) {
// SP: receiver.
// Sequence node has one return node, its input is load field node.
__ Comment("Inlined Getter");
__ ldr(R0, Address(SP, 0 * kWordSize));
__ ldr(R0, Address(SP, 0 * compiler::target::kWordSize));
__ LoadFieldFromOffset(kWord, R0, R0, offset);
__ Ret();
}
@ -849,8 +855,8 @@ void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
// SP+0: value.
// Sequence node has one store node and one return NULL node.
__ Comment("Inlined Setter");
__ ldr(R0, Address(SP, 1 * kWordSize)); // Receiver.
__ ldr(R1, Address(SP, 0 * kWordSize)); // Value.
__ ldr(R0, Address(SP, 1 * compiler::target::kWordSize)); // Receiver.
__ ldr(R1, Address(SP, 0 * compiler::target::kWordSize)); // Value.
__ StoreIntoObjectOffset(R0, offset, R1);
__ LoadObject(R0, Object::null_object());
__ Ret();
@ -862,13 +868,18 @@ void FlowGraphCompiler::EmitFrameEntry() {
(!is_optimizing() || may_reoptimize())) {
__ Comment("Invocation Count Check");
const Register function_reg = R8;
__ ldr(function_reg, FieldAddress(CODE_REG, Code::owner_offset()));
__ ldr(R3, FieldAddress(function_reg, Function::usage_counter_offset()));
__ ldr(function_reg,
FieldAddress(CODE_REG, compiler::target::Code::owner_offset()));
__ ldr(R3,
FieldAddress(function_reg,
compiler::target::Function::usage_counter_offset()));
// Reoptimization of an optimized function is triggered by counting in
// IC stubs, but not at the entry of the function.
if (!is_optimizing()) {
__ add(R3, R3, Operand(1));
__ str(R3, FieldAddress(function_reg, Function::usage_counter_offset()));
__ str(R3,
FieldAddress(function_reg,
compiler::target::Function::usage_counter_offset()));
}
__ CompareImmediate(R3, GetOptimizationThreshold());
ASSERT(function_reg == R8);
@ -878,10 +889,10 @@ void FlowGraphCompiler::EmitFrameEntry() {
if (flow_graph().IsCompiledForOsr()) {
const intptr_t extra_slots = ExtraStackSlotsOnOsrEntry();
ASSERT(extra_slots >= 0);
__ EnterOsrFrame(extra_slots * kWordSize);
__ EnterOsrFrame(extra_slots * compiler::target::kWordSize);
} else {
ASSERT(StackSize() >= 0);
__ EnterDartFrame(StackSize() * kWordSize);
__ EnterDartFrame(StackSize() * compiler::target::kWordSize);
}
}
@ -907,7 +918,8 @@ void FlowGraphCompiler::EmitPrologue() {
const intptr_t slot_index =
compiler::target::frame_layout.FrameSlotForVariableIndex(-i);
Register value_reg = slot_index == args_desc_slot ? ARGS_DESC_REG : R0;
__ StoreToOffset(kWord, value_reg, FP, slot_index * kWordSize);
__ StoreToOffset(kWord, value_reg, FP,
slot_index * compiler::target::kWordSize);
}
}
@ -1021,9 +1033,11 @@ void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
bool old_use_far_branches = assembler_->use_far_branches();
assembler_->set_use_far_branches(true);
#endif // DEBUG
__ LoadFieldFromOffset(kWord, R1, R0, Array::element_offset(edge_id));
__ LoadFieldFromOffset(kWord, R1, R0,
compiler::target::Array::element_offset(edge_id));
__ add(R1, R1, Operand(Smi::RawValue(1)));
__ StoreIntoObjectNoBarrierOffset(R0, Array::element_offset(edge_id), R1);
__ StoreIntoObjectNoBarrierOffset(
R0, compiler::target::Array::element_offset(edge_id), R1);
#if defined(DEBUG)
assembler_->set_use_far_branches(old_use_far_branches);
#endif // DEBUG
@ -1082,9 +1096,14 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
__ Comment("MegamorphicCall");
// Load receiver into R0.
__ LoadFromOffset(kWord, R0, SP, (args_desc.Count() - 1) * kWordSize);
__ LoadFromOffset(kWord, R0, SP,
(args_desc.Count() - 1) * compiler::target::kWordSize);
__ LoadObject(R9, cache);
__ ldr(LR, Address(THR, Thread::megamorphic_call_checked_entry_offset()));
__ ldr(
LR,
Address(
THR,
compiler::target::Thread::megamorphic_call_checked_entry_offset()));
__ blx(LR);
RecordSafepoint(locs, slow_path_argument_count);
@ -1121,8 +1140,9 @@ void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
const Code& initial_stub = StubCode::ICCallThroughFunction();
__ Comment("SwitchableCall");
__ LoadFromOffset(kWord, R0, SP,
(ic_data.CountWithoutTypeArgs() - 1) * kWordSize);
__ LoadFromOffset(
kWord, R0, SP,
(ic_data.CountWithoutTypeArgs() - 1) * compiler::target::kWordSize);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
// The AOT runtime will replace the slot in the object pool with the
// entrypoint address - see clustered_snapshot.cc.
@ -1131,8 +1151,10 @@ void FlowGraphCompiler::EmitSwitchableInstanceCall(const ICData& ic_data,
__ LoadUniqueObject(CODE_REG, initial_stub);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
? compiler::target::Code::entry_point_offset(
Code::EntryKind::kMonomorphic)
: compiler::target::Code::entry_point_offset(
Code::EntryKind::kMonomorphicUnchecked);
__ ldr(LR, FieldAddress(CODE_REG, entry_point_offset));
}
__ LoadUniqueObject(R9, ic_data);
@ -1265,7 +1287,9 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
const Array& arguments_descriptor) {
__ Comment("EmitTestAndCall");
// Load receiver into R0.
__ LoadFromOffset(kWord, R0, SP, (count_without_type_args - 1) * kWordSize);
__ LoadFromOffset(
kWord, R0, SP,
(count_without_type_args - 1) * compiler::target::kWordSize);
__ LoadObject(R4, arguments_descriptor);
}

View file

@ -823,7 +823,7 @@ bool CheckClassInstr::IsCompactCidRange(const Cids& cids) {
intptr_t min = cids.ComputeLowestCid();
intptr_t max = cids.ComputeHighestCid();
return (max - min) < kBitsPerWord;
return (max - min) < compiler::target::kBitsPerWord;
}
bool CheckClassInstr::IsBitTest() const {
@ -837,7 +837,7 @@ intptr_t CheckClassInstr::ComputeCidMask() const {
for (intptr_t i = 0; i < cids_.length(); ++i) {
intptr_t run;
uintptr_t range = 1ul + cids_[i].Extent();
if (range >= static_cast<uintptr_t>(kBitsPerWord)) {
if (range >= static_cast<uintptr_t>(compiler::target::kBitsPerWord)) {
run = -1;
} else {
run = (1 << range) - 1;
@ -1835,7 +1835,7 @@ bool UnboxInt32Instr::ComputeCanDeoptimize() const {
}
const intptr_t value_cid = value()->Type()->ToCid();
if (value_cid == kSmiCid) {
return (kSmiBits > 32) && !is_truncating() &&
return (compiler::target::kSmiBits > 32) && !is_truncating() &&
!RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
} else if (value_cid == kMintCid) {
@ -1844,7 +1844,7 @@ bool UnboxInt32Instr::ComputeCanDeoptimize() const {
RangeBoundary::kRangeBoundaryInt32);
} else if (is_truncating() && value()->definition()->IsBoxInteger()) {
return false;
} else if ((kSmiBits < 32) && value()->Type()->IsInt()) {
} else if ((compiler::target::kSmiBits < 32) && value()->Type()->IsInt()) {
return !RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
} else {
@ -1935,7 +1935,7 @@ bool BinaryIntegerOpInstr::RightIsPowerOfTwoConstant() const {
static intptr_t RepresentationBits(Representation r) {
switch (r) {
case kTagged:
return kBitsPerWord - 1;
return compiler::target::kBitsPerWord - 1;
case kUnboxedInt32:
case kUnboxedUint32:
return 32;
@ -3184,7 +3184,7 @@ Definition* UnboxInt64Instr::Canonicalize(FlowGraph* flow_graph) {
// (on simdbc64 the [UnboxedConstantInstr] handling is only implemented for
// doubles and causes a bailout for everthing else)
#if !defined(TARGET_ARCH_DBC)
if (kBitsPerWord == 64) {
if (compiler::target::kBitsPerWord == 64) {
ConstantInstr* c = value()->definition()->AsConstant();
if (c != NULL && (c->value().IsSmi() || c->value().IsMint())) {
UnboxedConstantInstr* uc =
@ -5093,18 +5093,18 @@ intptr_t CheckArrayBoundInstr::LengthOffsetFor(intptr_t class_id) {
if (RawObject::IsTypedDataClassId(class_id) ||
RawObject::IsTypedDataViewClassId(class_id) ||
RawObject::IsExternalTypedDataClassId(class_id)) {
return TypedDataBase::length_offset();
return compiler::target::TypedDataBase::length_offset();
}
switch (class_id) {
case kGrowableObjectArrayCid:
return GrowableObjectArray::length_offset();
return compiler::target::GrowableObjectArray::length_offset();
case kOneByteStringCid:
case kTwoByteStringCid:
return String::length_offset();
return compiler::target::String::length_offset();
case kArrayCid:
case kImmutableArrayCid:
return Array::length_offset();
return compiler::target::Array::length_offset();
default:
UNREACHABLE();
return -1;

View file

@ -3018,6 +3018,8 @@ class ConstantInstr : public TemplateDefinition<0, NoThrow, Pure> {
const Object& value() const { return value_; }
bool IsSmi() const { return compiler::target::IsSmi(value()); }
virtual bool ComputeCanDeoptimize() const { return false; }
virtual void InferRange(RangeAnalysis* analysis, Range* range);
@ -4775,7 +4777,11 @@ class LoadCodeUnitsInstr : public TemplateDefinition<2, NoThrow> {
Value* array() const { return inputs_[0]; }
Value* index() const { return inputs_[1]; }
intptr_t index_scale() const { return Instance::ElementSizeFor(class_id_); }
intptr_t index_scale() const {
return compiler::target::Instance::ElementSizeFor(class_id_);
}
intptr_t class_id() const { return class_id_; }
intptr_t element_count() const { return element_count_; }
@ -6137,7 +6143,10 @@ class CaseInsensitiveCompareInstr
const RuntimeEntry& TargetFunction() const { return entry_; }
bool IsExternal() const { return cid_ == kExternalTwoByteStringCid; }
intptr_t class_id() const { return cid_; }
intptr_t index_scale() const { return Instance::ElementSizeFor(cid_); }
intptr_t index_scale() const {
return compiler::target::Instance::ElementSizeFor(cid_);
}
virtual bool ComputeCanDeoptimize() const { return false; }

View file

@ -64,7 +64,8 @@ DEFINE_BACKEND(TailCall,
Temp<Register> temp)) {
__ LoadObject(CODE_REG, instr->code());
__ LeaveDartFrame(); // The arguments are still on the stack.
__ Branch(FieldAddress(CODE_REG, Code::entry_point_offset()));
__ Branch(
FieldAddress(CODE_REG, compiler::target::Code::entry_point_offset()));
// Even though the TailCallInstr will be the last instruction in a basic
// block, the flow graph compiler will emit native code for other blocks after
@ -129,7 +130,7 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t fp_sp_dist =
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
compiler::target::kWordSize;
ASSERT(fp_sp_dist <= 0);
__ sub(R2, SP, Operand(FP));
__ CompareImmediate(R2, fp_sp_dist);
@ -234,10 +235,11 @@ void IfThenElseInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Lsl(result, result, Operand(shift + kSmiTagSize));
} else {
__ sub(result, result, Operand(1));
const int32_t val = Smi::RawValue(true_value) - Smi::RawValue(false_value);
const int32_t val = compiler::target::ToRawSmi(true_value) -
compiler::target::ToRawSmi(false_value);
__ AndImmediate(result, result, val);
if (false_value != 0) {
__ AddImmediate(result, Smi::RawValue(false_value));
__ AddImmediate(result, compiler::target::ToRawSmi(false_value));
}
}
}
@ -263,8 +265,10 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// R4: Arguments descriptor.
// R0: Function.
ASSERT(locs()->in(0).reg() == R0);
__ ldr(CODE_REG, FieldAddress(R0, Function::code_offset()));
__ ldr(R2, FieldAddress(R0, Code::function_entry_point_offset(entry_kind())));
__ ldr(CODE_REG, FieldAddress(R0, compiler::target::Function::code_offset()));
__ ldr(R2,
FieldAddress(R0, compiler::target::Code::function_entry_point_offset(
entry_kind())));
// R2: instructions entry point.
// R9: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
@ -320,7 +324,10 @@ void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
Register tmp) {
if (destination.IsRegister()) {
if (representation() == kUnboxedInt32) {
__ LoadImmediate(destination.reg(), Smi::Cast(value_).Value());
int64_t v;
const bool ok = compiler::HasIntegerValue(value_, &v);
RELEASE_ASSERT(ok);
__ LoadImmediate(destination.reg(), v);
} else {
ASSERT(representation() == kTagged);
__ LoadObject(destination.reg(), value_);
@ -350,7 +357,10 @@ void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
ASSERT(tmp != kNoRegister);
const intptr_t dest_offset = destination.ToStackSlotOffset();
if (representation() == kUnboxedInt32) {
__ LoadImmediate(tmp, Smi::Cast(value_).Value());
int64_t v;
const bool ok = compiler::HasIntegerValue(value_, &v);
RELEASE_ASSERT(ok);
__ LoadImmediate(tmp, v);
} else {
__ LoadObject(tmp, value_);
}
@ -803,8 +813,8 @@ Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
const Register left = locs()->in(0).reg();
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
const int32_t imm = reinterpret_cast<int32_t>(right.constant().raw());
ASSERT(compiler::target::IsSmi(right.constant()));
const int32_t imm = compiler::target::ToRawSmi(right.constant());
__ TestImmediate(left, imm);
} else {
__ tst(left, Operand(right.reg()));
@ -933,7 +943,7 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ PushObject(Object::null_object());
// Pass a pointer to the first argument in R2.
__ add(R2, SP, Operand(ArgumentCount() * kWordSize));
__ add(R2, SP, Operand(ArgumentCount() * compiler::target::kWordSize));
// Compute the effective address. When running under the simulator,
// this is a redirection address that forces the simulator to call
@ -1179,9 +1189,8 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
CallingConventions::ArgumentRegisters[0] != TMP2 &&
CallingConventions::ArgumentRegisters[0] != R1);
__ LoadImmediate(CallingConventions::ArgumentRegisters[0], callback_id_);
__ LoadFromOffset(
kWord, R1, THR,
compiler::target::Thread::verify_callback_isolate_entry_point_offset());
__ LoadFromOffset(kWord, R1, THR,
compiler::target::Thread::verify_callback_entry_offset());
__ blx(R1);
// Load the code object.
@ -1231,8 +1240,12 @@ void OneByteStringFromCharCodeInstr::EmitNativeCode(
const Register char_code = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
__ ldr(result, Address(THR, Thread::predefined_symbols_address_offset()));
__ AddImmediate(result, Symbols::kNullCharCodeSymbolOffset * kWordSize);
__ ldr(
result,
Address(THR,
compiler::target::Thread::predefined_symbols_address_offset()));
__ AddImmediate(
result, Symbols::kNullCharCodeSymbolOffset * compiler::target::kWordSize);
__ ldr(result, Address(result, char_code, LSL, 1)); // Char code is a smi.
}
@ -1247,10 +1260,12 @@ void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(cid_ == kOneByteStringCid);
const Register str = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
__ ldr(result, FieldAddress(str, String::length_offset()));
__ cmp(result, Operand(Smi::RawValue(1)));
__ ldr(result, FieldAddress(str, compiler::target::String::length_offset()));
__ cmp(result, Operand(compiler::target::ToRawSmi(1)));
__ LoadImmediate(result, -1, NE);
__ ldrb(result, FieldAddress(str, OneByteString::data_offset()), EQ);
__ ldrb(result,
FieldAddress(str, compiler::target::OneByteString::data_offset()),
EQ);
__ SmiTag(result);
}
@ -1416,8 +1431,8 @@ static bool CanBeImmediateIndex(Value* value,
if ((constant == NULL) || !Assembler::IsSafeSmi(constant->value())) {
return false;
}
const int64_t index = Smi::Cast(constant->value()).AsInt64Value();
const intptr_t scale = Instance::ElementSizeFor(cid);
const int64_t index = compiler::target::SmiValue(constant->value());
const intptr_t scale = compiler::target::Instance::ElementSizeFor(cid);
const intptr_t base_offset =
(is_external ? 0 : (Instance::DataOffsetFor(cid) - kHeapObjectTag));
const int64_t offset = index * scale + base_offset;
@ -1510,7 +1525,7 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
: __ ElementAddressForIntIndex(
true, // Load.
IsExternal(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value(),
compiler::target::SmiValue(index.constant()),
IP); // Temp register.
// Warning: element_address may use register IP as base.
} else {
@ -1520,11 +1535,11 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
IsExternal(), class_id(), index_scale(),
array, index.reg());
} else {
__ LoadElementAddressForIntIndex(address,
true, // Load.
IsExternal(), class_id(), index_scale(),
array,
Smi::Cast(index.constant()).Value());
__ LoadElementAddressForIntIndex(
address,
true, // Load.
IsExternal(), class_id(), index_scale(), array,
compiler::target::SmiValue(index.constant()));
}
}
@ -1603,10 +1618,10 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register result_hi = result_pair->At(1).reg();
if (aligned()) {
__ ldr(result_lo, Address(address));
__ ldr(result_hi, Address(address, kWordSize));
__ ldr(result_hi, Address(address, compiler::target::kWordSize));
} else {
__ LoadWordUnaligned(result_lo, address, TMP);
__ AddImmediate(address, address, kWordSize);
__ AddImmediate(address, address, compiler::target::kWordSize);
__ LoadWordUnaligned(result_hi, address, TMP);
}
break;
@ -1802,15 +1817,16 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Address element_address(kNoRegister);
if (directly_addressable) {
element_address = index.IsRegister()
? __ ElementAddressForRegIndex(
false, // Store.
IsExternal(), class_id(), index_scale(), array,
index.reg())
: __ ElementAddressForIntIndex(
false, // Store.
IsExternal(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value(), temp);
element_address =
index.IsRegister()
? __ ElementAddressForRegIndex(false, // Store.
IsExternal(), class_id(),
index_scale(), array,
index.reg())
: __ ElementAddressForIntIndex(
false, // Store.
IsExternal(), class_id(), index_scale(), array,
compiler::target::SmiValue(index.constant()), temp);
} else {
if (index.IsRegister()) {
__ LoadElementAddressForRegIndex(temp,
@ -1818,11 +1834,11 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
IsExternal(), class_id(), index_scale(),
array, index.reg());
} else {
__ LoadElementAddressForIntIndex(temp,
false, // Store.
IsExternal(), class_id(), index_scale(),
array,
Smi::Cast(index.constant()).Value());
__ LoadElementAddressForIntIndex(
temp,
false, // Store.
IsExternal(), class_id(), index_scale(), array,
compiler::target::SmiValue(index.constant()));
}
}
@ -1846,8 +1862,8 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case kOneByteStringCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
__ LoadImmediate(IP, static_cast<int8_t>(constant.Value()));
__ LoadImmediate(IP,
compiler::target::SmiValue(locs()->in(2).constant()));
__ strb(IP, element_address);
} else {
const Register value = locs()->in(2).reg();
@ -1859,8 +1875,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case kExternalTypedDataUint8ClampedArrayCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
intptr_t value = compiler::target::SmiValue(locs()->in(2).constant());
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
@ -1910,10 +1925,10 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value_hi = value_pair->At(1).reg();
if (aligned()) {
__ str(value_lo, Address(temp));
__ str(value_hi, Address(temp, kWordSize));
__ str(value_hi, Address(temp, compiler::target::kWordSize));
} else {
__ StoreWordUnaligned(value_lo, temp, temp2);
__ AddImmediate(temp, temp, kWordSize);
__ AddImmediate(temp, temp, compiler::target::kWordSize);
__ StoreWordUnaligned(value_hi, temp, temp2);
}
break;
@ -2033,9 +2048,10 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (emit_full_guard) {
__ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
FieldAddress field_cid_operand(field_reg, Field::guarded_cid_offset());
FieldAddress field_nullability_operand(field_reg,
Field::is_nullable_offset());
FieldAddress field_cid_operand(
field_reg, compiler::target::Field::guarded_cid_offset());
FieldAddress field_nullability_operand(
field_reg, compiler::target::Field::is_nullable_offset());
if (value_cid == kDynamicCid) {
LoadValueCid(compiler, value_cid_reg, value_reg);
@ -2082,7 +2098,8 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(!compiler->is_optimizing());
__ Bind(fail);
__ ldrh(IP, FieldAddress(field_reg, Field::guarded_cid_offset()));
__ ldrh(IP, FieldAddress(field_reg,
compiler::target::Field::guarded_cid_offset()));
__ CompareImmediate(IP, kDynamicCid);
__ b(&ok, EQ);
@ -2174,12 +2191,13 @@ void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadObject(field_reg, Field::ZoneHandle(field().Original()));
__ ldrsb(
offset_reg,
FieldAddress(field_reg,
Field::guarded_list_length_in_object_offset_offset()));
__ ldrsb(offset_reg,
FieldAddress(field_reg,
compiler::target::Field::
guarded_list_length_in_object_offset_offset()));
__ ldr(length_reg,
FieldAddress(field_reg, Field::guarded_list_length_offset()));
FieldAddress(field_reg,
compiler::target::Field::guarded_list_length_offset()));
__ tst(offset_reg, Operand(offset_reg));
__ b(&ok, MI);
@ -2214,8 +2232,8 @@ void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ ldr(length_reg,
FieldAddress(value_reg,
field().guarded_list_length_in_object_offset()));
__ CompareImmediate(length_reg,
Smi::RawValue(field().guarded_list_length()));
__ CompareImmediate(
length_reg, compiler::target::ToRawSmi(field().guarded_list_length()));
__ b(deopt, NE);
}
}
@ -2387,9 +2405,10 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
result, temp);
__ eor(temp, temp, Operand(temp));
__ StoreToOffset(kWord, value, result,
Mint::value_offset() - kHeapObjectTag);
compiler::target::Mint::value_offset() - kHeapObjectTag);
__ StoreToOffset(kWord, temp, result,
Mint::value_offset() - kHeapObjectTag + kWordSize);
compiler::target::Mint::value_offset() - kHeapObjectTag +
compiler::target::kWordSize);
__ Bind(&done);
}
}
@ -2487,17 +2506,21 @@ void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreInstanceFieldInstr");
__ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag);
__ StoreDToOffset(
value, temp,
compiler::target::Double::value_offset() - kHeapObjectTag);
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreInstanceFieldInstr");
__ StoreMultipleDToOffset(value, 2, temp,
Float32x4::value_offset() - kHeapObjectTag);
__ StoreMultipleDToOffset(
value, 2, temp,
compiler::target::Float32x4::value_offset() - kHeapObjectTag);
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreInstanceFieldInstr");
__ StoreMultipleDToOffset(value, 2, temp,
Float64x2::value_offset() - kHeapObjectTag);
__ StoreMultipleDToOffset(
value, 2, temp,
compiler::target::Float64x2::value_offset() - kHeapObjectTag);
break;
default:
UNREACHABLE();
@ -2525,23 +2548,28 @@ void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
__ ldrh(temp2, FieldAddress(temp, Field::is_nullable_offset()));
__ ldrh(temp2,
FieldAddress(temp, compiler::target::Field::is_nullable_offset()));
__ CompareImmediate(temp2, kNullCid);
__ b(&store_pointer, EQ);
__ ldrb(temp2, FieldAddress(temp, Field::kind_bits_offset()));
__ ldrb(temp2,
FieldAddress(temp, compiler::target::Field::kind_bits_offset()));
__ tst(temp2, Operand(1 << Field::kUnboxingCandidateBit));
__ b(&store_pointer, EQ);
__ ldrh(temp2, FieldAddress(temp, Field::guarded_cid_offset()));
__ ldrh(temp2,
FieldAddress(temp, compiler::target::Field::guarded_cid_offset()));
__ CompareImmediate(temp2, kDoubleCid);
__ b(&store_double, EQ);
__ ldrh(temp2, FieldAddress(temp, Field::guarded_cid_offset()));
__ ldrh(temp2,
FieldAddress(temp, compiler::target::Field::guarded_cid_offset()));
__ CompareImmediate(temp2, kFloat32x4Cid);
__ b(&store_float32x4, EQ);
__ ldrh(temp2, FieldAddress(temp, Field::guarded_cid_offset()));
__ ldrh(temp2,
FieldAddress(temp, compiler::target::Field::guarded_cid_offset()));
__ CompareImmediate(temp2, kFloat64x2Cid);
__ b(&store_float64x2, EQ);
@ -2621,7 +2649,8 @@ LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(Zone* zone,
void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register field = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
__ LoadFieldFromOffset(kWord, result, field, Field::static_value_offset());
__ LoadFieldFromOffset(kWord, result, field,
compiler::target::Field::static_value_offset());
}
LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
@ -2641,12 +2670,16 @@ void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadObject(temp, Field::ZoneHandle(Z, field().Original()));
if (this->value()->NeedsWriteBarrier()) {
__ StoreIntoObject(temp, FieldAddress(temp, Field::static_value_offset()),
value, CanValueBeSmi(),
/*lr_reserved=*/!compiler->intrinsic_mode());
__ StoreIntoObject(
temp,
FieldAddress(temp, compiler::target::Field::static_value_offset()),
value, CanValueBeSmi(),
/*lr_reserved=*/!compiler->intrinsic_mode());
} else {
__ StoreIntoObjectNoBarrier(
temp, FieldAddress(temp, Field::static_value_offset()), value);
temp,
FieldAddress(temp, compiler::target::Field::static_value_offset()),
value);
}
}
@ -2703,11 +2736,13 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
// Store the type argument field.
__ StoreIntoObjectNoBarrier(
R0, FieldAddress(R0, Array::type_arguments_offset()), kElemTypeReg);
R0, FieldAddress(R0, compiler::target::Array::type_arguments_offset()),
kElemTypeReg);
// Set the length field.
__ StoreIntoObjectNoBarrier(R0, FieldAddress(R0, Array::length_offset()),
kLengthReg);
__ StoreIntoObjectNoBarrier(
R0, FieldAddress(R0, compiler::target::Array::length_offset()),
kLengthReg);
// Initialize all array elements to raw_null.
// R0: new object start as a tagged pointer.
@ -2727,9 +2762,9 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
#endif // DEBUG
}
__ AddImmediate(R6, R0, sizeof(RawArray) - kHeapObjectTag);
if (array_size < (kInlineArraySize * kWordSize)) {
__ InitializeFieldsNoBarrierUnrolled(R0, R6, 0, num_elements * kWordSize,
R8, R9);
if (array_size < (kInlineArraySize * compiler::target::kWordSize)) {
__ InitializeFieldsNoBarrierUnrolled(
R0, R6, 0, num_elements * compiler::target::kWordSize, R8, R9);
} else {
__ InitializeFieldsNoBarrier(R0, R6, R3, R8, R9);
}
@ -2755,8 +2790,9 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (compiler->is_optimizing() && !FLAG_precompiled_mode &&
num_elements()->BindsToConstant() &&
num_elements()->BoundConstant().IsSmi()) {
const intptr_t length = Smi::Cast(num_elements()->BoundConstant()).Value();
compiler::target::IsSmi(num_elements()->BoundConstant())) {
const intptr_t length =
compiler::target::SmiValue(num_elements()->BoundConstant());
if (Array::IsValidLength(length)) {
Label slow_path, done;
InlineArrayAllocation(compiler, length, &slow_path, &done);
@ -2815,18 +2851,21 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ LoadDFromOffset(result, temp,
Double::value_offset() - kHeapObjectTag);
__ LoadDFromOffset(
result, temp,
compiler::target::Double::value_offset() - kHeapObjectTag);
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4LoadFieldInstr");
__ LoadMultipleDFromOffset(result, 2, temp,
Float32x4::value_offset() - kHeapObjectTag);
__ LoadMultipleDFromOffset(
result, 2, temp,
compiler::target::Float32x4::value_offset() - kHeapObjectTag);
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2LoadFieldInstr");
__ LoadMultipleDFromOffset(result, 2, temp,
Float64x2::value_offset() - kHeapObjectTag);
__ LoadMultipleDFromOffset(
result, 2, temp,
compiler::target::Float64x2::value_offset() - kHeapObjectTag);
break;
default:
UNREACHABLE();
@ -2848,9 +2887,10 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadObject(result_reg, Field::ZoneHandle(slot().field().Original()));
FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset());
FieldAddress field_nullability_operand(result_reg,
Field::is_nullable_offset());
FieldAddress field_cid_operand(
result_reg, compiler::target::Field::guarded_cid_offset());
FieldAddress field_nullability_operand(
result_reg, compiler::target::Field::is_nullable_offset());
__ ldrh(temp, field_nullability_operand);
__ CompareImmediate(temp, kNullCid);
@ -2982,25 +3022,38 @@ void InstantiateTypeArgumentsInstr::EmitNativeCode(
// TODO(regis): Consider moving this into a shared stub to reduce
// generated code size.
__ LoadObject(R3, type_arguments());
__ ldr(R3, FieldAddress(R3, TypeArguments::instantiations_offset()));
__ AddImmediate(R3, Array::data_offset() - kHeapObjectTag);
__ ldr(R3, FieldAddress(
R3, compiler::target::TypeArguments::instantiations_offset()));
__ AddImmediate(R3, compiler::target::Array::data_offset() - kHeapObjectTag);
// The instantiations cache is initialized with Object::zero_array() and is
// therefore guaranteed to contain kNoInstantiator. No length check needed.
Label loop, next, found, slow_case;
__ Bind(&loop);
__ ldr(R2, Address(R3, 0 * kWordSize)); // Cached instantiator type args.
__ ldr(
R2,
Address(
R3,
0 * compiler::target::kWordSize)); // Cached instantiator type args.
__ cmp(R2, Operand(instantiator_type_args_reg));
__ b(&next, NE);
__ ldr(IP, Address(R3, 1 * kWordSize)); // Cached function type args.
__ ldr(
IP,
Address(R3,
1 * compiler::target::kWordSize)); // Cached function type args.
__ cmp(IP, Operand(function_type_args_reg));
__ b(&found, EQ);
__ Bind(&next);
__ AddImmediate(R3, StubCode::kInstantiationSizeInWords * kWordSize);
__ CompareImmediate(R2, Smi::RawValue(StubCode::kNoInstantiator));
__ AddImmediate(
R3, StubCode::kInstantiationSizeInWords * compiler::target::kWordSize);
__ CompareImmediate(R2,
compiler::target::ToRawSmi(StubCode::kNoInstantiator));
__ b(&loop, NE);
__ b(&slow_case);
__ Bind(&found);
__ ldr(result_reg, Address(R3, 2 * kWordSize)); // Cached instantiated args.
__ ldr(
result_reg,
Address(R3,
2 * compiler::target::kWordSize)); // Cached instantiated args.
__ b(&type_arguments_instantiated);
__ Bind(&slow_case);
@ -3076,7 +3129,8 @@ void AllocateUninitializedContextInstr::EmitNativeCode(
// Setup up number of context variables field.
__ LoadImmediate(temp0, num_context_variables());
__ str(temp0, FieldAddress(result, Context::num_variables_offset()));
__ str(temp0, FieldAddress(
result, compiler::target::Context::num_variables_offset()));
__ Bind(slow_path->exit_label());
}
@ -3117,7 +3171,8 @@ void InitStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register temp = locs()->temp(0).reg();
Label call_runtime, no_call;
__ ldr(temp, FieldAddress(field, Field::static_value_offset()));
__ ldr(temp,
FieldAddress(field, compiler::target::Field::static_value_offset()));
__ CompareObject(temp, Object::sentinel());
__ b(&call_runtime, EQ);
@ -3186,7 +3241,7 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t fp_sp_dist =
(compiler::target::frame_layout.first_local_from_fp + 1 -
compiler->StackSize()) *
kWordSize;
compiler::target::kWordSize;
ASSERT(fp_sp_dist <= 0);
__ AddImmediate(SP, FP, fp_sp_dist);
@ -3232,7 +3287,9 @@ class CheckStackOverflowSlowPath
__ Comment("CheckStackOverflowSlowPathOsr");
__ Bind(osr_entry_label());
__ LoadImmediate(value, Thread::kOsrRequest);
__ str(value, Address(THR, Thread::stack_overflow_flags_offset()));
__ str(value,
Address(THR,
compiler::target::Thread::stack_overflow_flags_offset()));
}
__ Comment("CheckStackOverflowSlowPath");
__ Bind(entry_label());
@ -3249,8 +3306,8 @@ class CheckStackOverflowSlowPath
compiler->pending_deoptimization_env_ = env;
if (using_shared_stub) {
const uword entry_point_offset =
Thread::stack_overflow_shared_stub_entry_point_offset(
const uword entry_point_offset = compiler::target::Thread::
stack_overflow_shared_stub_entry_point_offset(
instruction()->locs()->live_registers()->FpuRegisterCount() > 0);
__ ldr(LR, Address(THR, entry_point_offset));
__ blx(LR);
@ -3290,7 +3347,7 @@ class CheckStackOverflowSlowPath
};
void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ ldr(IP, Address(THR, Thread::stack_limit_offset()));
__ ldr(IP, Address(THR, compiler::target::Thread::stack_limit_offset()));
__ cmp(SP, Operand(IP));
auto object_store = compiler->isolate()->object_store();
@ -3327,7 +3384,8 @@ void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadObject(temp, compiler->parsed_function().function());
intptr_t threshold =
FLAG_optimization_counter_threshold * (loop_depth() + 1);
__ ldr(temp, FieldAddress(temp, Function::usage_counter_offset()));
__ ldr(temp, FieldAddress(
temp, compiler::target::Function::usage_counter_offset()));
__ CompareImmediate(temp, threshold);
__ b(slow_path->osr_entry_label(), GE);
}
@ -3348,10 +3406,10 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
: NULL;
if (locs.in(1).IsConstant()) {
const Object& constant = locs.in(1).constant();
ASSERT(constant.IsSmi());
ASSERT(compiler::target::IsSmi(constant));
// Immediate shift operation takes 5 bits for the count.
const intptr_t kCountLimit = 0x1F;
const intptr_t value = Smi::Cast(constant).Value();
const intptr_t value = compiler::target::SmiValue(constant);
ASSERT((0 < value) && (value < kCountLimit));
if (shift_left->can_overflow()) {
// Check for overflow (preserve left).
@ -3371,19 +3429,20 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
// TODO(srdjan): Implement code below for is_truncating().
// If left is constant, we know the maximal allowed size for right.
const Object& obj = shift_left->left()->BoundConstant();
if (obj.IsSmi()) {
const intptr_t left_int = Smi::Cast(obj).Value();
if (compiler::target::IsSmi(obj)) {
const intptr_t left_int = compiler::target::SmiValue(obj);
if (left_int == 0) {
__ cmp(right, Operand(0));
__ b(deopt, MI);
__ mov(result, Operand(0));
return;
}
const intptr_t max_right = kSmiBits - Utils::HighestBit(left_int);
const intptr_t max_right =
compiler::target::kSmiBits - Utils::HighestBit(left_int);
const bool right_needs_check =
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
if (right_needs_check) {
__ cmp(right, Operand(reinterpret_cast<int32_t>(Smi::New(max_right))));
__ cmp(right, Operand(compiler::target::ToRawSmi(max_right)));
__ b(deopt, CS);
}
__ SmiUntag(IP, right);
@ -3393,7 +3452,7 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
}
const bool right_needs_check =
!RangeUtils::IsWithin(right_range, 0, (Smi::kBits - 1));
!RangeUtils::IsWithin(right_range, 0, (compiler::target::kSmiBits - 1));
if (!shift_left->can_overflow()) {
if (right_needs_check) {
if (!RangeUtils::IsPositive(right_range)) {
@ -3402,7 +3461,8 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
__ b(deopt, MI);
}
__ cmp(right, Operand(reinterpret_cast<int32_t>(Smi::New(Smi::kBits))));
__ cmp(right,
Operand(compiler::target::ToRawSmi(compiler::target::kSmiBits)));
__ mov(result, Operand(0), CS);
__ SmiUntag(IP, right, CC); // SmiUntag right into IP if CC.
__ Lsl(result, left, IP, CC);
@ -3413,7 +3473,8 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
} else {
if (right_needs_check) {
ASSERT(shift_left->CanDeoptimize());
__ cmp(right, Operand(reinterpret_cast<int32_t>(Smi::New(Smi::kBits))));
__ cmp(right,
Operand(compiler::target::ToRawSmi(compiler::target::kSmiBits)));
__ b(deopt, CS);
}
// Left is not a constant.
@ -3535,7 +3596,8 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case Token::kSHL:
ASSERT(result != left);
ASSERT(result != right);
__ CompareImmediate(right, Smi::RawValue(Smi::kBits));
__ CompareImmediate(
right, compiler::target::ToRawSmi(compiler::target::kSmiBits));
__ b(slow_path->entry_label(), HI);
__ SmiUntag(TMP, right);
@ -3548,7 +3610,8 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case Token::kSHR:
ASSERT(result != left);
ASSERT(result != right);
__ CompareImmediate(right, Smi::RawValue(Smi::kBits));
__ CompareImmediate(
right, compiler::target::ToRawSmi(compiler::target::kSmiBits));
__ b(slow_path->entry_label(), HI);
__ SmiUntag(result, right);
@ -3764,8 +3827,8 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (locs()->in(1).IsConstant()) {
const Object& constant = locs()->in(1).constant();
ASSERT(constant.IsSmi());
const int32_t imm = reinterpret_cast<int32_t>(constant.raw());
ASSERT(compiler::target::IsSmi(constant));
const int32_t imm = compiler::target::ToRawSmi(constant);
switch (op_kind()) {
case Token::kADD: {
if (deopt == NULL) {
@ -3789,7 +3852,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
case Token::kMUL: {
// Keep left value tagged and untag right value.
const intptr_t value = Smi::Cast(constant).Value();
const intptr_t value = compiler::target::SmiValue(constant);
if (deopt == NULL) {
__ LoadImmediate(IP, value);
__ mul(result, left, IP);
@ -3803,7 +3866,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
break;
}
case Token::kTRUNCDIV: {
const intptr_t value = Smi::Cast(constant).Value();
const intptr_t value = compiler::target::SmiValue(constant);
ASSERT(value != kIntptrMin);
ASSERT(Utils::IsPowerOfTwo(Utils::Abs(value)));
const intptr_t shift_count =
@ -3859,7 +3922,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case Token::kSHR: {
// sarl operation masks the count to 5 bits.
const intptr_t kCountLimit = 0x1F;
intptr_t value = Smi::Cast(constant).Value();
intptr_t value = compiler::target::SmiValue(constant);
__ Asr(result, left,
Operand(Utils::Minimum(value + kSmiTagSize, kCountLimit)));
__ SmiTag(result);
@ -4023,10 +4086,10 @@ static void EmitInt32ShiftLeft(FlowGraphCompiler* compiler,
: NULL;
ASSERT(locs.in(1).IsConstant());
const Object& constant = locs.in(1).constant();
ASSERT(constant.IsSmi());
ASSERT(compiler::target::IsSmi(constant));
// Immediate shift operation takes 5 bits for the count.
const intptr_t kCountLimit = 0x1F;
const intptr_t value = Smi::Cast(constant).Value();
const intptr_t value = compiler::target::SmiValue(constant);
ASSERT((0 < value) && (value < kCountLimit));
if (shift_left->can_overflow()) {
// Check for overflow (preserve left).
@ -4076,8 +4139,8 @@ void BinaryInt32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (locs()->in(1).IsConstant()) {
const Object& constant = locs()->in(1).constant();
ASSERT(constant.IsSmi());
const intptr_t value = Smi::Cast(constant).Value();
ASSERT(compiler::target::IsSmi(constant));
const intptr_t value = compiler::target::SmiValue(constant);
switch (op_kind()) {
case Token::kADD: {
if (deopt == NULL) {
@ -4318,7 +4381,7 @@ void UnboxInstr::EmitLoadFromBox(FlowGraphCompiler* compiler) {
ASSERT(result->At(0).reg() != box);
__ LoadFieldFromOffset(kWord, result->At(0).reg(), box, ValueOffset());
__ LoadFieldFromOffset(kWord, result->At(1).reg(), box,
ValueOffset() + kWordSize);
ValueOffset() + compiler::target::kWordSize);
break;
}
@ -4380,7 +4443,8 @@ void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
const Register result = locs()->out(0).reg();
Label done;
__ SmiUntag(result, value, &done);
__ LoadFieldFromOffset(kWord, result, value, Mint::value_offset());
__ LoadFieldFromOffset(kWord, result, value,
compiler::target::Mint::value_offset());
__ Bind(&done);
}
@ -4435,14 +4499,16 @@ void BoxInteger32Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
temp);
if (from_representation() == kUnboxedInt32) {
__ Asr(temp, value, Operand(kBitsPerWord - 1));
__ Asr(temp, value, Operand(compiler::target::kBitsPerWord - 1));
} else {
ASSERT(from_representation() == kUnboxedUint32);
__ eor(temp, temp, Operand(temp));
}
__ StoreToOffset(kWord, value, out, Mint::value_offset() - kHeapObjectTag);
__ StoreToOffset(kWord, value, out,
compiler::target::Mint::value_offset() - kHeapObjectTag);
__ StoreToOffset(kWord, temp, out,
Mint::value_offset() - kHeapObjectTag + kWordSize);
compiler::target::Mint::value_offset() - kHeapObjectTag +
compiler::target::kWordSize);
__ Bind(&done);
}
}
@ -4488,9 +4554,10 @@ void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(),
out_reg, tmp);
__ StoreToOffset(kWord, value_lo, out_reg,
Mint::value_offset() - kHeapObjectTag);
compiler::target::Mint::value_offset() - kHeapObjectTag);
__ StoreToOffset(kWord, value_hi, out_reg,
Mint::value_offset() - kHeapObjectTag + kWordSize);
compiler::target::Mint::value_offset() - kHeapObjectTag +
compiler::target::kWordSize);
__ Bind(&done);
}
@ -4499,10 +4566,13 @@ static void LoadInt32FromMint(FlowGraphCompiler* compiler,
Register result,
Register temp,
Label* deopt) {
__ LoadFieldFromOffset(kWord, result, mint, Mint::value_offset());
__ LoadFieldFromOffset(kWord, result, mint,
compiler::target::Mint::value_offset());
if (deopt != NULL) {
__ LoadFieldFromOffset(kWord, temp, mint, Mint::value_offset() + kWordSize);
__ cmp(temp, Operand(result, ASR, kBitsPerWord - 1));
__ LoadFieldFromOffset(
kWord, temp, mint,
compiler::target::Mint::value_offset() + compiler::target::kWordSize);
__ cmp(temp, Operand(result, ASR, compiler::target::kBitsPerWord - 1));
__ b(deopt, NE);
}
}
@ -5489,7 +5559,8 @@ void DoubleToIntegerInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register value_obj = locs()->in(0).reg();
ASSERT(result == R0);
ASSERT(result != value_obj);
__ LoadDFromOffset(DTMP, value_obj, Double::value_offset() - kHeapObjectTag);
__ LoadDFromOffset(DTMP, value_obj,
compiler::target::Double::value_offset() - kHeapObjectTag);
Label done, do_call;
// First check for NaN. Checking for minint after the conversion doesn't work
@ -6066,11 +6137,11 @@ void CheckClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register value = locs()->in(0).reg();
Label* deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptCheckClass);
if (cids_.IsSingleCid()) {
__ CompareImmediate(value, Smi::RawValue(cids_.cid_start));
__ CompareImmediate(value, compiler::target::ToRawSmi(cids_.cid_start));
__ b(deopt, NE);
} else {
__ AddImmediate(value, -Smi::RawValue(cids_.cid_start));
__ CompareImmediate(value, Smi::RawValue(cids_.Extent()));
__ AddImmediate(value, -compiler::target::ToRawSmi(cids_.cid_start));
__ CompareImmediate(value, compiler::target::ToRawSmi(cids_.Extent()));
__ b(deopt, HI); // Unsigned higher.
}
}
@ -6096,9 +6167,11 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Location index_loc = locs()->in(kIndexPos);
if (length_loc.IsConstant() && index_loc.IsConstant()) {
ASSERT((Smi::Cast(length_loc.constant()).Value() <=
Smi::Cast(index_loc.constant()).Value()) ||
(Smi::Cast(index_loc.constant()).Value() < 0));
#ifdef DEBUG
const int32_t length = compiler::target::SmiValue(length_loc.constant());
const int32_t index = compiler::target::SmiValue(index_loc.constant());
ASSERT((length <= index) || (index < 0));
#endif
// Unconditionally deoptimize for constant bounds checks because they
// only occur only when index is out-of-bounds.
__ b(deopt);
@ -6108,20 +6181,21 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t index_cid = index()->Type()->ToCid();
if (index_loc.IsConstant()) {
const Register length = length_loc.reg();
const Smi& index = Smi::Cast(index_loc.constant());
__ CompareImmediate(length, reinterpret_cast<int32_t>(index.raw()));
__ CompareImmediate(length,
compiler::target::ToRawSmi(index_loc.constant()));
__ b(deopt, LS);
} else if (length_loc.IsConstant()) {
const Smi& length = Smi::Cast(length_loc.constant());
const Register index = index_loc.reg();
if (index_cid != kSmiCid) {
__ BranchIfNotSmi(index, deopt);
}
if (length.Value() == Smi::kMaxValue) {
if (compiler::target::SmiValue(length_loc.constant()) ==
compiler::target::kSmiMax) {
__ tst(index, Operand(index));
__ b(deopt, MI);
} else {
__ CompareImmediate(index, reinterpret_cast<int32_t>(length.raw()));
__ CompareImmediate(index,
compiler::target::ToRawSmi(length_loc.constant()));
__ b(deopt, CS);
}
} else {
@ -6353,7 +6427,8 @@ class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
switch (instruction()->AsShiftInt64Op()->op_kind()) {
case Token::kSHR:
__ Asr(out_hi, left_hi, Operand(kBitsPerWord - 1), GE);
__ Asr(out_hi, left_hi, Operand(compiler::target::kBitsPerWord - 1),
GE);
__ mov(out_lo, Operand(out_hi), GE);
break;
case Token::kSHL: {
@ -6372,10 +6447,13 @@ class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
// The unboxed int64 argument is passed through a dedicated slot in Thread.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ StoreToOffset(kWord, right_lo, THR,
Thread::unboxed_int64_runtime_arg_offset());
__ StoreToOffset(kWord, right_hi, THR,
Thread::unboxed_int64_runtime_arg_offset() + kWordSize);
__ StoreToOffset(
kWord, right_lo, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset());
__ StoreToOffset(
kWord, right_hi, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
compiler::target::kWordSize);
}
};
@ -6513,10 +6591,13 @@ class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
// The unboxed int64 argument is passed through a dedicated slot in Thread.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ StoreToOffset(kWord, right_lo, THR,
Thread::unboxed_int64_runtime_arg_offset());
__ StoreToOffset(kWord, right_hi, THR,
Thread::unboxed_int64_runtime_arg_offset() + kWordSize);
__ StoreToOffset(
kWord, right_lo, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset());
__ StoreToOffset(
kWord, right_hi, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
compiler::target::kWordSize);
}
};
@ -6809,7 +6890,7 @@ void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label* deopt =
compiler->AddDeoptStub(deopt_id(), ICData::kDeoptUnboxInteger);
ASSERT(to() == kUnboxedInt32);
__ cmp(in_hi, Operand(in_lo, ASR, kBitsPerWord - 1));
__ cmp(in_hi, Operand(in_lo, ASR, compiler::target::kBitsPerWord - 1));
__ b(deopt, NE);
}
} else if (from() == kUnboxedUint32 || from() == kUnboxedInt32) {
@ -6824,7 +6905,7 @@ void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ eor(out_hi, out_hi, Operand(out_hi));
} else {
ASSERT(from() == kUnboxedInt32);
__ mov(out_hi, Operand(in, ASR, kBitsPerWord - 1));
__ mov(out_hi, Operand(in, ASR, compiler::target::kBitsPerWord - 1));
}
} else {
UNREACHABLE();
@ -6847,7 +6928,8 @@ void UnboxedWidthExtenderInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register reg = locs()->in(0).reg();
// There are no builtin sign- or zero-extension instructions, so we'll have to
// use shifts instead.
const intptr_t shift_length = (kWordSize - from_width_bytes()) * kBitsPerByte;
const intptr_t shift_length =
(compiler::target::kWordSize - from_width_bytes()) * kBitsPerByte;
__ Lsl(reg, reg, Operand(shift_length));
switch (representation_) {
case kUnboxedInt32: // Sign extend operand.

View file

@ -1079,9 +1079,8 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
CallingConventions::ArgumentRegisters[0] != TMP2 &&
CallingConventions::ArgumentRegisters[0] != R1);
__ LoadImmediate(CallingConventions::ArgumentRegisters[0], callback_id_);
__ LoadFromOffset(
R1, THR,
compiler::target::Thread::verify_callback_isolate_entry_point_offset());
__ LoadFromOffset(R1, THR,
compiler::target::Thread::verify_callback_entry_offset());
__ blr(R1);
// Load the code object.

View file

@ -1049,8 +1049,9 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Now that the safepoint has ended, we can hold Dart objects with bare hands.
// TODO(35934): fix linking issue
__ pushl(Immediate(callback_id_));
__ movl(EAX, Address(THR, compiler::target::Thread::
verify_callback_isolate_entry_point_offset()));
__ movl(
EAX,
Address(THR, compiler::target::Thread::verify_callback_entry_offset()));
__ call(EAX);
__ popl(EAX);

View file

@ -1072,8 +1072,9 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Otherwise we'll clobber the argument sent from the caller.
COMPILE_ASSERT(RAX != CallingConventions::kArg1Reg);
__ movq(CallingConventions::kArg1Reg, Immediate(callback_id_));
__ movq(RAX, Address(THR, compiler::target::Thread::
verify_callback_isolate_entry_point_offset()));
__ movq(
RAX,
Address(THR, compiler::target::Thread::verify_callback_entry_offset()));
__ call(RAX);
// Load the code object.

View file

@ -2391,7 +2391,8 @@ static bool ShouldInlineInt64ArrayOps() {
static bool CanUnboxInt32() {
// Int32/Uint32 can be unboxed if it fits into a smi or the platform
// supports unboxed mints.
return (kSmiBits >= 32) || FlowGraphCompiler::SupportsUnboxedInt64();
return (compiler::target::kSmiBits >= 32) ||
FlowGraphCompiler::SupportsUnboxedInt64();
}
// Quick access to the current one.
@ -2423,8 +2424,9 @@ static intptr_t PrepareInlineIndexedOp(FlowGraph* flow_graph,
*array = elements;
array_cid = kArrayCid;
} else if (RawObject::IsExternalTypedDataClassId(array_cid)) {
LoadUntaggedInstr* elements = new (Z) LoadUntaggedInstr(
new (Z) Value(*array), ExternalTypedData::data_offset());
LoadUntaggedInstr* elements = new (Z)
LoadUntaggedInstr(new (Z) Value(*array),
compiler::target::TypedDataBase::data_field_offset());
*cursor = flow_graph->AppendTo(*cursor, elements, NULL, FlowGraph::kValue);
*array = elements;
}
@ -2456,11 +2458,12 @@ static bool InlineGetIndexed(FlowGraph* flow_graph,
if ((array_cid == kTypedDataInt32ArrayCid) ||
(array_cid == kTypedDataUint32ArrayCid)) {
// Deoptimization may be needed if result does not always fit in a Smi.
deopt_id = (kSmiBits >= 32) ? DeoptId::kNone : call->deopt_id();
deopt_id =
(compiler::target::kSmiBits >= 32) ? DeoptId::kNone : call->deopt_id();
}
// Array load and return.
intptr_t index_scale = Instance::ElementSizeFor(array_cid);
intptr_t index_scale = compiler::target::Instance::ElementSizeFor(array_cid);
LoadIndexedInstr* load = new (Z)
LoadIndexedInstr(new (Z) Value(array), new (Z) Value(index), index_scale,
array_cid, kAlignedAccess, deopt_id, call->token_pos());
@ -2679,7 +2682,8 @@ static bool InlineSetIndexed(FlowGraph* flow_graph,
FlowGraph::kValue);
}
const intptr_t index_scale = Instance::ElementSizeFor(array_cid);
const intptr_t index_scale =
compiler::target::Instance::ElementSizeFor(array_cid);
*last = new (Z) StoreIndexedInstr(
new (Z) Value(array), new (Z) Value(index), new (Z) Value(stored_value),
needs_store_barrier, index_scale, array_cid, kAlignedAccess,
@ -2835,7 +2839,7 @@ static void PrepareInlineTypedArrayBoundsCheck(FlowGraph* flow_graph,
call->token_pos());
*cursor = flow_graph->AppendTo(*cursor, length, NULL, FlowGraph::kValue);
intptr_t element_size = Instance::ElementSizeFor(array_cid);
intptr_t element_size = compiler::target::Instance::ElementSizeFor(array_cid);
ConstantInstr* bytes_per_element =
flow_graph->GetConstant(Smi::Handle(Z, Smi::New(element_size)));
BinarySmiOpInstr* len_in_bytes = new (Z)
@ -2846,7 +2850,8 @@ static void PrepareInlineTypedArrayBoundsCheck(FlowGraph* flow_graph,
// adjusted_length = len_in_bytes - (element_size - 1).
Definition* adjusted_length = len_in_bytes;
intptr_t adjustment = Instance::ElementSizeFor(view_cid) - 1;
intptr_t adjustment =
compiler::target::Instance::ElementSizeFor(view_cid) - 1;
if (adjustment > 0) {
ConstantInstr* length_adjustment =
flow_graph->GetConstant(Smi::Handle(Z, Smi::New(adjustment)));
@ -2887,8 +2892,9 @@ static void PrepareInlineByteArrayBaseOp(FlowGraph* flow_graph,
if (array_cid == kDynamicCid ||
RawObject::IsExternalTypedDataClassId(array_cid)) {
// Internal or External typed data: load untagged.
auto elements = new (Z) LoadUntaggedInstr(
new (Z) Value(*array), TypedDataBase::data_field_offset());
auto elements = new (Z)
LoadUntaggedInstr(new (Z) Value(*array),
compiler::target::TypedDataBase::data_field_offset());
*cursor = flow_graph->AppendTo(*cursor, elements, NULL, FlowGraph::kValue);
*array = elements;
} else {
@ -3045,7 +3051,7 @@ static bool InlineByteArrayBaseStore(FlowGraph* flow_graph,
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
// On 64-bit platforms assume that stored value is always a smi.
if (kSmiBits >= 32) {
if (compiler::target::kSmiBits >= 32) {
value_check = Cids::CreateMonomorphic(Z, kSmiCid);
}
break;
@ -3209,18 +3215,21 @@ static Definition* PrepareInlineStringIndexOp(FlowGraph* flow_graph,
// For external strings: Load backing store.
if (cid == kExternalOneByteStringCid) {
str = new LoadUntaggedInstr(new Value(str),
ExternalOneByteString::external_data_offset());
str = new LoadUntaggedInstr(
new Value(str),
compiler::target::ExternalOneByteString::external_data_offset());
cursor = flow_graph->AppendTo(cursor, str, NULL, FlowGraph::kValue);
} else if (cid == kExternalTwoByteStringCid) {
str = new LoadUntaggedInstr(new Value(str),
ExternalTwoByteString::external_data_offset());
str = new LoadUntaggedInstr(
new Value(str),
compiler::target::ExternalTwoByteString::external_data_offset());
cursor = flow_graph->AppendTo(cursor, str, NULL, FlowGraph::kValue);
}
LoadIndexedInstr* load_indexed = new (Z) LoadIndexedInstr(
new (Z) Value(str), new (Z) Value(index), Instance::ElementSizeFor(cid),
cid, kAlignedAccess, DeoptId::kNone, call->token_pos());
LoadIndexedInstr* load_indexed = new (Z)
LoadIndexedInstr(new (Z) Value(str), new (Z) Value(index),
compiler::target::Instance::ElementSizeFor(cid), cid,
kAlignedAccess, DeoptId::kNone, call->token_pos());
cursor = flow_graph->AppendTo(cursor, load_indexed, NULL, FlowGraph::kValue);
auto box = BoxInstr::Create(kUnboxedIntPtr, new Value(load_indexed));

View file

@ -52,7 +52,8 @@ struct ExtraLoopInfo;
class FlowGraphAllocator : public ValueObject {
public:
// Number of stack slots needed for a fpu register spill slot.
static const intptr_t kDoubleSpillFactor = kDoubleSize / kWordSize;
static const intptr_t kDoubleSpillFactor =
kDoubleSize / compiler::target::kWordSize;
explicit FlowGraphAllocator(const FlowGraph& flow_graph,
bool intrinsic_mode = false);
@ -525,16 +526,16 @@ class LiveRange : public ZoneAllocated {
Location spill_slot() const { return spill_slot_; }
bool HasOnlyUnconstrainedUsesInLoop(intptr_t loop_id) const {
if (loop_id < kBitsPerWord) {
const intptr_t mask = static_cast<intptr_t>(1) << loop_id;
if (loop_id < kMaxLoops) {
const uint64_t mask = static_cast<uint64_t>(1) << loop_id;
return (has_only_any_uses_in_loops_ & mask) != 0;
}
return false;
}
void MarkHasOnlyUnconstrainedUsesInLoop(intptr_t loop_id) {
if (loop_id < kBitsPerWord) {
has_only_any_uses_in_loops_ |= static_cast<intptr_t>(1) << loop_id;
if (loop_id < kMaxLoops) {
has_only_any_uses_in_loops_ |= static_cast<uint64_t>(1) << loop_id;
}
}
@ -576,7 +577,8 @@ class LiveRange : public ZoneAllocated {
LiveRange* next_sibling_;
intptr_t has_only_any_uses_in_loops_;
static constexpr intptr_t kMaxLoops = sizeof(uint64_t) * kBitsPerByte;
uint64_t has_only_any_uses_in_loops_;
bool is_loop_phi_;
AllocationFinger finger_;

View file

@ -145,7 +145,7 @@ Address LocationToStackSlotAddress(Location loc) {
template <class Register, class FpuRegister>
intptr_t TemplateLocation<Register, FpuRegister>::ToStackSlotOffset() const {
return stack_index() * kWordSize;
return stack_index() * compiler::target::kWordSize;
}
template <class Register, class FpuRegister>

View file

@ -816,8 +816,8 @@ class BoundsCheckGeneralizer {
// AOT should only see non-deopting GenericCheckBound.
ASSERT(!FLAG_precompiled_mode);
ConstantInstr* max_smi =
flow_graph_->GetConstant(Smi::Handle(Smi::New(Smi::kMaxValue)));
ConstantInstr* max_smi = flow_graph_->GetConstant(
Smi::Handle(Smi::New(compiler::target::kSmiMax)));
for (intptr_t i = 0; i < non_positive_symbols.length(); i++) {
CheckArrayBoundInstr* precondition = new CheckArrayBoundInstr(
new Value(max_smi), new Value(non_positive_symbols[i]),
@ -1075,7 +1075,7 @@ class BoundsCheckGeneralizer {
c = left_const + right_const;
if (Utils::WillAddOverflow(left_const, right_const) ||
!Smi::IsValid(c)) {
!compiler::target::IsSmi(c)) {
return false; // Abort.
}
@ -1120,7 +1120,7 @@ class BoundsCheckGeneralizer {
c = (left_const - right_const);
if (Utils::WillSubOverflow(left_const, right_const) ||
!Smi::IsValid(c)) {
!compiler::target::IsSmi(c)) {
return false; // Abort.
}
@ -1180,7 +1180,7 @@ class BoundsCheckGeneralizer {
}
} else if ((*defn)->IsConstant()) {
ConstantInstr* constant_defn = (*defn)->AsConstant();
if ((constant != NULL) && constant_defn->value().IsSmi()) {
if ((constant != NULL) && constant_defn->IsSmi()) {
*defn = NULL;
*constant = Smi::Cast(constant_defn->value()).Value();
}
@ -1195,7 +1195,7 @@ class BoundsCheckGeneralizer {
Definition* defn) {
if (defn->IsConstant()) {
const Object& value = defn->AsConstant()->value();
return value.IsSmi() && (Smi::Cast(value).Value() >= 0);
return compiler::target::IsSmi(value) && (Smi::Cast(value).Value() >= 0);
} else if (defn->HasSSATemp()) {
if (!RangeUtils::IsPositive(defn->range())) {
symbols->Add(defn);
@ -1683,7 +1683,7 @@ void IntegerInstructionSelector::ReplaceInstructions() {
}
RangeBoundary RangeBoundary::FromDefinition(Definition* defn, int64_t offs) {
if (defn->IsConstant() && defn->AsConstant()->value().IsSmi()) {
if (defn->IsConstant() && defn->AsConstant()->IsSmi()) {
return FromConstant(Smi::Cast(defn->AsConstant()->value()).Value() + offs);
}
ASSERT(IsValidOffsetForSymbolicRangeBoundary(offs));
@ -2289,7 +2289,8 @@ void Range::Mul(const Range* left_range,
const int64_t left_max = ConstantAbsMax(left_range);
const int64_t right_max = ConstantAbsMax(right_range);
if ((left_max <= -kSmiMin) && (right_max <= -kSmiMin) &&
if ((left_max <= -compiler::target::kSmiMin) &&
(right_max <= -compiler::target::kSmiMin) &&
((left_max == 0) || (right_max <= kMaxInt64 / left_max))) {
// Product of left and right max values stays in 64 bit range.
const int64_t mul_max = left_max * right_max;
@ -2625,8 +2626,9 @@ void LoadFieldInstr::InferRange(RangeAnalysis* analysis, Range* range) {
switch (slot().kind()) {
case Slot::Kind::kArray_length:
case Slot::Kind::kGrowableObjectArray_length:
*range = Range(RangeBoundary::FromConstant(0),
RangeBoundary::FromConstant(Array::kMaxElements));
*range = Range(
RangeBoundary::FromConstant(0),
RangeBoundary::FromConstant(compiler::target::Array::kMaxElements));
break;
case Slot::Kind::kTypedDataBase_length:
@ -2635,8 +2637,9 @@ void LoadFieldInstr::InferRange(RangeAnalysis* analysis, Range* range) {
break;
case Slot::Kind::kString_length:
*range = Range(RangeBoundary::FromConstant(0),
RangeBoundary::FromConstant(String::kMaxElements));
*range = Range(
RangeBoundary::FromConstant(0),
RangeBoundary::FromConstant(compiler::target::String::kMaxElements));
break;
case Slot::Kind::kDartField:
@ -2929,7 +2932,8 @@ bool CheckArrayBoundInstr::IsRedundant(const RangeBoundary& length) {
// Range of the index is unknown can't decide if the check is redundant.
if (index_range == NULL) {
if (!(index()->BindsToConstant() && index()->BoundConstant().IsSmi())) {
if (!(index()->BindsToConstant() &&
compiler::target::IsSmi(index()->BoundConstant()))) {
return false;
}

View file

@ -64,7 +64,8 @@ class RangeBoundary : public ValueObject {
static RangeBoundary FromDefinition(Definition* defn, int64_t offs = 0);
static bool IsValidOffsetForSymbolicRangeBoundary(int64_t offset) {
if ((offset > (kMaxInt64 - kSmiMax)) || (offset < (kMinInt64 - kSmiMin))) {
if ((offset > (kMaxInt64 - compiler::target::kSmiMax)) ||
(offset < (kMinInt64 - compiler::target::kSmiMin))) {
// Avoid creating symbolic range boundaries which can wrap around.
return false;
}
@ -72,16 +73,20 @@ class RangeBoundary : public ValueObject {
}
// Construct a RangeBoundary for the constant MinSmi value.
static RangeBoundary MinSmi() { return FromConstant(Smi::kMinValue); }
static RangeBoundary MinSmi() {
return FromConstant(compiler::target::kSmiMin);
}
// Construct a RangeBoundary for the constant MaxSmi value.
static RangeBoundary MaxSmi() { return FromConstant(Smi::kMaxValue); }
static RangeBoundary MaxSmi() {
return FromConstant(compiler::target::kSmiMax);
}
// Construct a RangeBoundary for the constant kMin value.
static RangeBoundary MinConstant(RangeSize size) {
switch (size) {
case kRangeBoundarySmi:
return FromConstant(Smi::kMinValue);
return FromConstant(compiler::target::kSmiMin);
case kRangeBoundaryInt32:
return FromConstant(kMinInt32);
case kRangeBoundaryInt64:
@ -94,7 +99,7 @@ class RangeBoundary : public ValueObject {
static RangeBoundary MaxConstant(RangeSize size) {
switch (size) {
case kRangeBoundarySmi:
return FromConstant(Smi::kMaxValue);
return FromConstant(compiler::target::kSmiMax);
case kRangeBoundaryInt32:
return FromConstant(kMaxInt32);
case kRangeBoundaryInt64:
@ -138,7 +143,8 @@ class RangeBoundary : public ValueObject {
// Returns true when this is a constant that is outside of Smi range.
bool OverflowedSmi() const {
return (IsConstant() && !Smi::IsValid(ConstantValue())) || IsInfinity();
return (IsConstant() && !compiler::target::IsSmi(ConstantValue())) ||
IsInfinity();
}
bool Overflowed(RangeBoundary::RangeSize size) const {

View file

@ -67,15 +67,18 @@ TEST_CASE(RangeTests) {
TEST_RANGE_OP(Range::Shl, -1, 1, 63, 63, RangeBoundary(kMinInt64),
RangeBoundary::PositiveInfinity());
if (kBitsPerWord == 64) {
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62,
RangeBoundary(compiler::target::kSmiMin),
RangeBoundary(compiler::target::kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(-(1 << 30)),
RangeBoundary(1 << 30));
} else {
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62, RangeBoundary(kSmiMin),
RangeBoundary(kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 30, 30,
RangeBoundary(compiler::target::kSmiMin),
RangeBoundary(compiler::target::kSmiMax));
TEST_RANGE_OP_SMI(Range::Shl, -1, 1, 62, 62,
RangeBoundary(compiler::target::kSmiMin),
RangeBoundary(compiler::target::kSmiMax));
}
TEST_RANGE_OP(Range::Shl, 0, 100, 0, 64, RangeBoundary(0),
RangeBoundary::PositiveInfinity());

View file

@ -1725,10 +1725,9 @@ class LoadOptimizer : public ValueObject {
Definition* forward_def = graph_->constant_null();
if (alloc->ArgumentCount() > 0) {
ASSERT(alloc->ArgumentCount() == 1);
intptr_t type_args_offset =
alloc->cls().type_arguments_field_offset();
if (load->slot().IsTypeArguments() &&
load->slot().offset_in_bytes() == type_args_offset) {
const Slot& type_args_slot = Slot::GetTypeArgumentsSlotFor(
graph_->thread(), alloc->cls());
if (load->slot().IsIdentical(type_args_slot)) {
forward_def = alloc->PushArgumentAt(0)->value()->definition();
}
}

View file

@ -54,7 +54,8 @@ const Slot& Slot::GetNativeSlot(Kind kind) {
#define FIELD_VAR (0)
#define DEFINE_NATIVE_FIELD(ClassName, FieldName, cid, mutability) \
Slot(Kind::k##ClassName##_##FieldName, FIELD_##mutability, k##cid##Cid, \
ClassName::FieldName##_offset(), #ClassName "." #FieldName, nullptr),
compiler::target::ClassName::FieldName##_offset(), \
#ClassName "." #FieldName, nullptr),
NATIVE_SLOTS_LIST(DEFINE_NATIVE_FIELD)
@ -102,7 +103,8 @@ const Slot& Slot::GetTypeArgumentsSlotAt(Thread* thread, intptr_t offset) {
}
const Slot& Slot::GetTypeArgumentsSlotFor(Thread* thread, const Class& cls) {
return GetTypeArgumentsSlotAt(thread, cls.type_arguments_field_offset());
return GetTypeArgumentsSlotAt(
thread, compiler::target::Class::TypeArgumentsFieldOffset(cls));
}
const Slot& Slot::GetContextVariableSlotFor(Thread* thread,
@ -115,7 +117,8 @@ const Slot& Slot::GetContextVariableSlotFor(Thread* thread,
return SlotCache::Instance(thread).Canonicalize(Slot(
Kind::kCapturedVariable,
IsImmutableBit::encode(variable.is_final()) | IsNullableBit::encode(true),
kDynamicCid, Context::variable_offset(variable.index().value()),
kDynamicCid,
compiler::target::Context::variable_offset(variable.index().value()),
&variable.name(), /*static_type=*/nullptr));
}
@ -156,7 +159,7 @@ const Slot& Slot::Get(const Field& field,
IsImmutableBit::encode(field.is_final() || field.is_const()) |
IsNullableBit::encode(is_nullable) |
IsGuardedBit::encode(used_guarded_state),
nullable_cid, field.Offset(), &field,
nullable_cid, compiler::target::Field::OffsetOf(field), &field,
&AbstractType::ZoneHandle(zone, field.type())));
// If properties of this slot were based on the guarded state make sure

View file

@ -168,6 +168,8 @@ class Slot : public ZoneAllocated {
bool Equals(const Slot* other) const;
intptr_t Hashcode() const;
bool IsIdentical(const Slot& other) const { return this == &other; }
private:
Slot(Kind kind,
int8_t bits,

View file

@ -510,7 +510,7 @@ bool CallSpecializer::TryStringLengthOneEquality(InstanceCallInstr* call,
}
static bool SmiFitsInDouble() {
return kSmiBits < 53;
return compiler::target::kSmiBits < 53;
}
bool CallSpecializer::TryReplaceWithEqualityOp(InstanceCallInstr* call,
@ -1836,8 +1836,9 @@ Definition* TypedDataSpecializer::AppendLoadIndexed(TemplateDartCall<0>* call,
const intptr_t element_size = TypedDataBase::ElementSizeFor(cid);
const intptr_t index_scale = element_size;
auto data = new (Z) LoadUntaggedInstr(new (Z) Value(array),
TypedDataBase::data_field_offset());
auto data = new (Z)
LoadUntaggedInstr(new (Z) Value(array),
compiler::target::TypedDataBase::data_field_offset());
flow_graph_->InsertBefore(call, data, call->env(), FlowGraph::kValue);
Definition* load = new (Z)
@ -1913,8 +1914,9 @@ void TypedDataSpecializer::AppendStoreIndexed(TemplateDartCall<0>* call,
break;
}
auto data = new (Z) LoadUntaggedInstr(new (Z) Value(array),
TypedDataBase::data_field_offset());
auto data = new (Z)
LoadUntaggedInstr(new (Z) Value(array),
compiler::target::TypedDataBase::data_field_offset());
flow_graph_->InsertBefore(call, data, call->env(), FlowGraph::kValue);
auto store = new (Z) StoreIndexedInstr(

View file

@ -539,8 +539,9 @@ Fragment BaseFlowGraphBuilder::StoreIndexed(intptr_t class_id) {
value->BindsToConstant() ? kNoStoreBarrier : kEmitStoreBarrier;
StoreIndexedInstr* store = new (Z) StoreIndexedInstr(
Pop(), // Array.
index, value, emit_store_barrier, Instance::ElementSizeFor(class_id),
class_id, kAlignedAccess, DeoptId::kNone, TokenPosition::kNoSource);
index, value, emit_store_barrier,
compiler::target::Instance::ElementSizeFor(class_id), class_id,
kAlignedAccess, DeoptId::kNone, TokenPosition::kNoSource);
return Fragment(store);
}

View file

@ -674,7 +674,8 @@ void BytecodeFlowGraphBuilder::BuildCheckFunctionTypeArgs() {
store_type_args += B->LoadArgDescriptor();
store_type_args += B->LoadNativeField(Slot::ArgumentsDescriptor_count());
store_type_args += B->LoadFpRelativeSlot(
kWordSize * (1 + compiler::target::frame_layout.param_end_from_fp),
compiler::target::kWordSize *
(1 + compiler::target::frame_layout.param_end_from_fp),
CompileType::CreateNullable(/*is_nullable=*/true, kTypeArgumentsCid));
store_type_args +=
B->StoreLocalRaw(TokenPosition::kNoSource, type_args_var);

View file

@ -1033,13 +1033,13 @@ Fragment FlowGraphBuilder::BuildTypedDataViewFactoryConstructor(
// instructions!
body += LoadLocal(view_object);
body += LoadLocal(typed_data);
body += LoadUntagged(TypedDataBase::data_field_offset());
body += LoadUntagged(compiler::target::TypedDataBase::data_field_offset());
body += ConvertUntaggedToIntptr();
body += LoadLocal(offset_in_bytes);
body += UnboxSmiToIntptr();
body += AddIntptrIntegers();
body += ConvertIntptrToUntagged();
body += StoreUntagged(TypedDataView::data_field_offset());
body += StoreUntagged(compiler::target::TypedDataBase::data_field_offset());
return body;
}
@ -1772,9 +1772,10 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfNoSuchMethodForwarder(
loop_body += LoadLocal(argument_count);
loop_body += LoadLocal(index);
loop_body += SmiBinaryOp(Token::kSUB, /*truncate=*/true);
loop_body += LoadFpRelativeSlot(
kWordSize * compiler::target::frame_layout.param_end_from_fp,
CompileType::Dynamic());
loop_body +=
LoadFpRelativeSlot(compiler::target::kWordSize *
compiler::target::frame_layout.param_end_from_fp,
CompileType::Dynamic());
loop_body += StoreIndexed(kArrayCid);
// ++i

View file

@ -190,8 +190,9 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
for (; param < num_fixed_params; ++param) {
copy_args_prologue += LoadLocal(optional_count_var);
copy_args_prologue += LoadFpRelativeSlot(
kWordSize * (compiler::target::frame_layout.param_end_from_fp +
num_fixed_params - param),
compiler::target::kWordSize *
(compiler::target::frame_layout.param_end_from_fp +
num_fixed_params - param),
ParameterType(ParameterVariable(param)));
copy_args_prologue +=
StoreLocalRaw(TokenPosition::kNoSource, ParameterVariable(param));
@ -211,8 +212,9 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
Fragment good(supplied);
good += LoadLocal(optional_count_var);
good += LoadFpRelativeSlot(
kWordSize * (compiler::target::frame_layout.param_end_from_fp +
num_fixed_params - param),
compiler::target::kWordSize *
(compiler::target::frame_layout.param_end_from_fp +
num_fixed_params - param),
ParameterType(ParameterVariable(param)));
good += StoreLocalRaw(TokenPosition::kNoSource, ParameterVariable(param));
good += Drop();
@ -249,7 +251,8 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
ASSERT(num_opt_named_params > 0);
const intptr_t first_name_offset =
ArgumentsDescriptor::first_named_entry_offset() - Array::data_offset();
compiler::target::ArgumentsDescriptor::first_named_entry_offset() -
compiler::target::Array::data_offset();
// Start by alphabetically sorting the names of the optional parameters.
int* opt_param_position = Z->Alloc<int>(num_opt_named_params);
@ -266,19 +269,23 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
for (intptr_t i = 0; param < num_params; ++param, ++i) {
JoinEntryInstr* join = BuildJoinEntry();
copy_args_prologue +=
IntConstant(ArgumentsDescriptor::named_entry_size() / kWordSize);
copy_args_prologue += IntConstant(
compiler::target::ArgumentsDescriptor::named_entry_size() /
compiler::target::kWordSize);
copy_args_prologue += LoadLocal(optional_count_vars_processed);
copy_args_prologue += SmiBinaryOp(Token::kMUL, /* truncate= */ true);
LocalVariable* tuple_diff = MakeTemporary();
// name = arg_desc[names_offset + arg_desc_name_index + nameOffset]
copy_args_prologue += LoadArgDescriptor();
copy_args_prologue += IntConstant(
(first_name_offset + ArgumentsDescriptor::name_offset()) / kWordSize);
copy_args_prologue +=
IntConstant((first_name_offset +
compiler::target::ArgumentsDescriptor::name_offset()) /
compiler::target::kWordSize);
copy_args_prologue += LoadLocal(tuple_diff);
copy_args_prologue += SmiBinaryOp(Token::kADD, /* truncate= */ true);
copy_args_prologue += LoadIndexed(/* index_scale = */ kWordSize);
copy_args_prologue +=
LoadIndexed(/* index_scale = */ compiler::target::kWordSize);
// first name in sorted list of all names
const String& param_name = String::ZoneHandle(
@ -303,15 +310,17 @@ Fragment PrologueBuilder::BuildOptionalParameterHandling(
// pos = arg_desc[names_offset + arg_desc_name_index + positionOffset]
good += LoadArgDescriptor();
good += IntConstant(
(first_name_offset + ArgumentsDescriptor::position_offset()) /
kWordSize);
(first_name_offset +
compiler::target::ArgumentsDescriptor::position_offset()) /
compiler::target::kWordSize);
good += LoadLocal(tuple_diff);
good += SmiBinaryOp(Token::kADD, /* truncate= */ true);
good += LoadIndexed(/* index_scale = */ kWordSize);
good += LoadIndexed(/* index_scale = */ compiler::target::kWordSize);
}
good += SmiBinaryOp(Token::kSUB, /* truncate= */ true);
good += LoadFpRelativeSlot(
kWordSize * compiler::target::frame_layout.param_end_from_fp,
compiler::target::kWordSize *
compiler::target::frame_layout.param_end_from_fp,
ParameterType(ParameterVariable(opt_param_position[i])));
// Copy down.
@ -417,7 +426,8 @@ Fragment PrologueBuilder::BuildTypeArgumentsHandling(JoinEntryInstr* nsm) {
store_type_args += LoadArgDescriptor();
store_type_args += LoadNativeField(Slot::ArgumentsDescriptor_count());
store_type_args += LoadFpRelativeSlot(
kWordSize * (1 + compiler::target::frame_layout.param_end_from_fp),
compiler::target::kWordSize *
(1 + compiler::target::frame_layout.param_end_from_fp),
CompileType::CreateNullable(/*is_nullable=*/true, kTypeArgumentsCid));
store_type_args += StoreLocal(TokenPosition::kNoSource, type_args_var);
store_type_args += Drop();

View file

@ -169,12 +169,12 @@ static bool IntrinsifyArrayGetIndexed(FlowGraph* flow_graph,
if (RawObject::IsExternalTypedDataClassId(array_cid)) {
array = builder.AddDefinition(new LoadUntaggedInstr(
new Value(array), ExternalTypedData::data_offset()));
new Value(array), target::TypedDataBase::data_field_offset()));
}
Definition* result = builder.AddDefinition(new LoadIndexedInstr(
new Value(array), new Value(index),
Instance::ElementSizeFor(array_cid), // index scale
target::Instance::ElementSizeFor(array_cid), // index scale
array_cid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
// We don't perform [RangeAnalysis] for graph intrinsics. To inform the
@ -347,14 +347,14 @@ static bool IntrinsifyArraySetIndexed(FlowGraph* flow_graph,
if (RawObject::IsExternalTypedDataClassId(array_cid)) {
array = builder.AddDefinition(new LoadUntaggedInstr(
new Value(array), ExternalTypedData::data_offset()));
new Value(array), target::TypedDataBase::data_field_offset()));
}
// No store barrier.
ASSERT(RawObject::IsExternalTypedDataClassId(array_cid) ||
RawObject::IsTypedDataClassId(array_cid));
builder.AddInstruction(new StoreIndexedInstr(
new Value(array), new Value(index), new Value(value), kNoStoreBarrier,
Instance::ElementSizeFor(array_cid), // index scale
target::Instance::ElementSizeFor(array_cid), // index scale
array_cid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
// Return null.
Definition* null_def = builder.AddNullDefinition();
@ -482,15 +482,15 @@ static bool BuildCodeUnitAt(FlowGraph* flow_graph, intptr_t cid) {
// For external strings: Load external data.
if (cid == kExternalOneByteStringCid) {
str = builder.AddDefinition(new LoadUntaggedInstr(
new Value(str), ExternalOneByteString::external_data_offset()));
new Value(str), target::ExternalOneByteString::external_data_offset()));
} else if (cid == kExternalTwoByteStringCid) {
str = builder.AddDefinition(new LoadUntaggedInstr(
new Value(str), ExternalTwoByteString::external_data_offset()));
new Value(str), target::ExternalTwoByteString::external_data_offset()));
}
Definition* load = builder.AddDefinition(new LoadIndexedInstr(
new Value(str), new Value(index), Instance::ElementSizeFor(cid), cid,
kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
new Value(str), new Value(index), target::Instance::ElementSizeFor(cid),
cid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
// We don't perform [RangeAnalysis] for graph intrinsics. To inform the
// following boxing instruction about a more precise range we attach it here
@ -687,7 +687,7 @@ bool GraphIntrinsifier::Build_GrowableArrayGetIndexed(FlowGraph* flow_graph) {
Slot::GrowableObjectArray_data(), builder.TokenPos()));
Definition* result = builder.AddDefinition(new LoadIndexedInstr(
new Value(backing_store), new Value(index),
Instance::ElementSizeFor(kArrayCid), // index scale
target::Instance::ElementSizeFor(kArrayCid), // index scale
kArrayCid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
builder.AddReturn(new Value(result));
return true;
@ -716,7 +716,7 @@ bool GraphIntrinsifier::Build_ObjectArraySetIndexedUnchecked(
builder.AddInstruction(new StoreIndexedInstr(
new Value(array), new Value(index), new Value(value), kEmitStoreBarrier,
Instance::ElementSizeFor(kArrayCid), // index scale
target::Instance::ElementSizeFor(kArrayCid), // index scale
kArrayCid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
// Return null.
Definition* null_def = builder.AddNullDefinition();
@ -751,7 +751,7 @@ bool GraphIntrinsifier::Build_GrowableArraySetIndexedUnchecked(
builder.AddInstruction(new StoreIndexedInstr(
new Value(backing_store), new Value(index), new Value(value),
kEmitStoreBarrier,
Instance::ElementSizeFor(kArrayCid), // index scale
target::Instance::ElementSizeFor(kArrayCid), // index scale
kArrayCid, kAlignedAccess, DeoptId::kNone, builder.TokenPos()));
// Return null.
Definition* null_def = builder.AddNullDefinition();

View file

@ -59,7 +59,8 @@ bool Intrinsifier::CanIntrinsify(const Function& function) {
case MethodRecognizer::kUint64ArrayGetIndexed:
case MethodRecognizer::kUint64ArraySetIndexed:
// TODO(ajcbik): consider 32-bit as well.
if (kBitsPerWord == 64 && FlowGraphCompiler::SupportsUnboxedInt64()) {
if (target::kBitsPerWord == 64 &&
FlowGraphCompiler::SupportsUnboxedInt64()) {
break;
}
if (FLAG_trace_intrinsifier) {
@ -186,7 +187,7 @@ bool Intrinsifier::Intrinsify(const ParsedFunction& parsed_function,
return compiler->intrinsic_slow_path_label()->IsUnused();
}
#if !defined(HASH_IN_OBJECT_HEADER)
#if !defined(TARGET_HASH_IN_OBJECT_HEADER)
// These two are more complicated on 32 bit platforms, where the
// identity hash is not stored in the header of the object. We
// therefore don't intrinsify them, falling back on the native C++

View file

@ -42,28 +42,32 @@ class OffsetsExtractor : public AllStatic {
public:
static void DumpOffsets() {
#define PRINT_FIELD_OFFSET(Class, Name) \
std::cout << "static constexpr dart::word " #Class "_" #Name " = " \
std::cout << "static constexpr dart::compiler::target::word " #Class \
"_" #Name " = " \
<< Class::Name() << ";\n";
#define PRINT_ARRAY_LAYOUT(Class, Name) \
std::cout << "static constexpr dart::word " #Class \
std::cout << "static constexpr dart::compiler::target::word " #Class \
"_elements_start_offset = " \
<< Class::ArrayLayout::elements_start_offset() << ";\n"; \
std::cout << "static constexpr dart::word " #Class "_element_size = " \
std::cout << "static constexpr dart::compiler::target::word " #Class \
"_element_size = " \
<< Class::ArrayLayout::kElementSize << ";\n";
#define PRINT_ARRAY_STRUCTFIELD_OFFSET(Class, Name, ElementOffsetName, \
FieldOffset)
#define PRINT_SIZEOF(Class, Name, What) \
std::cout << "static constexpr dart::word " #Class "_" #Name " = " \
std::cout << "static constexpr dart::compiler::target::word " #Class \
"_" #Name " = " \
<< sizeof(What) << ";\n";
#define PRINT_RANGE(Class, Name, Type, First, Last, Filter) \
{ \
auto filter = Filter; \
bool comma = false; \
std::cout << "static dart::word " #Class "_" #Name "[] = {"; \
std::cout << "static dart::compiler::target::word " #Class "_" #Name \
"[] = {"; \
for (intptr_t i = static_cast<intptr_t>(First); \
i <= static_cast<intptr_t>(Last); i++) { \
auto v = static_cast<Type>(i); \
@ -74,7 +78,8 @@ class OffsetsExtractor : public AllStatic {
}
#define PRINT_CONSTANT(Class, Name) \
std::cout << "static constexpr dart::word " #Class "_" #Name " = " \
std::cout << "static constexpr dart::compiler::target::word " #Class \
"_" #Name " = " \
<< Class::Name << ";\n";
#define PRECOMP_NO_CHECK(Code) Code

View file

@ -107,7 +107,8 @@ void CodeRelocator::FindInstructionAndCallLimits() {
for (intptr_t i = 0; i < code_objects_->length(); ++i) {
current_caller = (*code_objects_)[i];
const intptr_t size = current_caller.instructions()->HeapSize();
const intptr_t size =
ImageWriter::SizeInSnapshot(current_caller.instructions());
if (size > max_instructions_size_) {
max_instructions_size_ = size;
}
@ -182,7 +183,7 @@ bool CodeRelocator::AddInstructionsToText(RawCode* code) {
}
text_offsets_.Insert({instructions, next_text_offset_});
commands_->Add(ImageWriterCommand(next_text_offset_, code));
next_text_offset_ += instructions->HeapSize();
next_text_offset_ += ImageWriter::SizeInSnapshot(instructions);
return true;
}

View file

@ -4,8 +4,19 @@
#include "vm/compiler/runtime_api.h"
namespace dart {
namespace compiler {
namespace target {
#include "vm/compiler/runtime_offsets_extracted.h"
} // namespace target
} // namespace compiler
} // namespace dart
#if !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/compiler/runtime_offsets_list.h"
#include "vm/dart_entry.h"
#include "vm/longjump.h"
#include "vm/native_arguments.h"
@ -20,6 +31,11 @@ namespace dart {
namespace compiler {
bool IsSameObject(const Object& a, const Object& b) {
if (a.IsMint() && b.IsMint()) {
return Mint::Cast(a).value() == Mint::Cast(b).value();
} else if (a.IsDouble() && b.IsDouble()) {
return Double::Cast(a).value() == Double::Cast(b).value();
}
return a.raw() == b.raw();
}
@ -49,7 +65,7 @@ CLASS_LIST_FOR_HANDLES(DO)
#undef DO
bool IsInOldSpace(const Object& obj) {
return obj.IsOld();
return obj.IsSmi() || obj.IsOld();
}
intptr_t ObjectHash(const Object& obj) {
@ -162,7 +178,8 @@ word TypedDataElementSizeInBytes(classid_t cid) {
}
word TypedDataMaxNewSpaceElements(classid_t cid) {
return dart::TypedData::MaxNewSpaceElements(cid);
return (dart::Heap::kNewAllocatableSize - target::TypedData::InstanceSize()) /
TypedDataElementSizeInBytes(cid);
}
const Field& LookupMathRandomStateFieldOffset() {
@ -215,23 +232,29 @@ void BailoutWithBranchOffsetError() {
}
word RuntimeEntry::OffsetFromThread() const {
return dart::Thread::OffsetFromThread(runtime_entry_);
return target::Thread::OffsetFromThread(runtime_entry_);
}
namespace target {
const word kPageSize = dart::kPageSize;
const word kPageSizeInWords = dart::kPageSizeInWords;
const word kPageSizeInWords = dart::kPageSize / kWordSize;
const word kPageMask = dart::kPageMask;
static word TranslateOffsetInWordsToHost(word offset) {
RELEASE_ASSERT((offset % kWordSize) == 0);
return (offset / kWordSize) * dart::kWordSize;
}
uint32_t MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size) {
return dart::RawObject::SizeTag::encode(instance_size) |
return dart::RawObject::SizeTag::encode(
TranslateOffsetInWordsToHost(instance_size)) |
dart::RawObject::ClassIdTag::encode(cid) |
dart::RawObject::NewBit::encode(true);
}
word Object::tags_offset() {
return dart::Object::tags_offset();
return 0;
}
const word RawObject::kCardRememberedBit = dart::RawObject::kCardRememberedBit;
@ -247,7 +270,8 @@ const word RawObject::kClassIdTagPos = dart::RawObject::kClassIdTagPos;
const word RawObject::kClassIdTagSize = dart::RawObject::kClassIdTagSize;
const word RawObject::kSizeTagMaxSizeTag =
dart::RawObject::SizeTag::kMaxSizeTag;
dart::RawObject::SizeTag::kMaxSizeTagInUnitsOfAlignment *
ObjectAlignment::kObjectAlignment;
const word RawObject::kTagBitsSizeTagPos =
dart::RawObject::TagBits::kSizeTagPos;
@ -262,21 +286,6 @@ bool RawObject::IsTypedDataClassId(intptr_t cid) {
return dart::RawObject::IsTypedDataClassId(cid);
}
intptr_t ObjectPool::element_offset(intptr_t index) {
return dart::ObjectPool::element_offset(index);
}
word Class::type_arguments_field_offset_in_words_offset() {
return dart::Class::type_arguments_field_offset_in_words_offset();
}
word Class::declaration_type_offset() {
return dart::Class::declaration_type_offset();
}
word Class::num_type_arguments_offset_in_bytes() {
return dart::Class::num_type_arguments_offset();
}
const word Class::kNoTypeArguments = dart::Class::kNoTypeArguments;
@ -284,8 +293,59 @@ classid_t Class::GetId(const dart::Class& handle) {
return handle.id();
}
static word TranslateOffsetInWords(word offset) {
RELEASE_ASSERT((offset % dart::kWordSize) == 0);
return (offset / dart::kWordSize) * kWordSize;
}
static uword GetInstanceSizeImpl(const dart::Class& handle) {
switch (handle.id()) {
case kMintCid:
return Mint::InstanceSize();
case kDoubleCid:
return Double::InstanceSize();
case kInt32x4Cid:
return Int32x4::InstanceSize();
case kFloat32x4Cid:
return Float32x4::InstanceSize();
case kFloat64x2Cid:
return Float64x2::InstanceSize();
case kObjectCid:
return Object::InstanceSize();
case kInstanceCid:
return Instance::InstanceSize();
case kGrowableObjectArrayCid:
return GrowableObjectArray::InstanceSize();
case kClosureCid:
return Closure::InstanceSize();
case kTypedDataBaseCid:
return TypedDataBase::InstanceSize();
case kLinkedHashMapCid:
return LinkedHashMap::InstanceSize();
case kByteBufferCid:
case kByteDataViewCid:
case kFfiPointerCid:
#define HANDLE_CASE(clazz) \
case kTypedData##clazz##Cid: \
case kTypedData##clazz##ViewCid: \
case kExternalTypedData##clazz##Cid:
CLASS_LIST_TYPED_DATA(HANDLE_CASE)
#undef HANDLE_CASE
return TranslateOffsetInWords(handle.instance_size());
default:
if (handle.id() >= kNumPredefinedCids) {
return TranslateOffsetInWords(handle.instance_size());
}
}
FATAL3("Unsupported class for size translation: %s (id=%" Pd
", kNumPredefinedCids=%d)\n",
handle.ToCString(), handle.id(), kNumPredefinedCids);
return -1;
}
uword Class::GetInstanceSize(const dart::Class& handle) {
return handle.instance_size();
return Utils::RoundUp(GetInstanceSizeImpl(handle),
ObjectAlignment::kObjectAlignment);
}
intptr_t Class::NumTypeArguments(const dart::Class& klass) {
@ -297,11 +357,7 @@ bool Class::HasTypeArgumentsField(const dart::Class& klass) {
}
intptr_t Class::TypeArgumentsFieldOffset(const dart::Class& klass) {
return klass.type_arguments_field_offset();
}
intptr_t Class::InstanceSize(const dart::Class& klass) {
return klass.instance_size();
return TranslateOffsetInWords(klass.type_arguments_field_offset());
}
bool Class::TraceAllocation(const dart::Class& klass) {
@ -309,57 +365,60 @@ bool Class::TraceAllocation(const dart::Class& klass) {
}
word Instance::first_field_offset() {
return dart::Instance::NextFieldOffset();
return TranslateOffsetInWords(dart::Instance::NextFieldOffset());
}
word Instance::DataOffsetFor(intptr_t cid) {
return dart::Instance::DataOffsetFor(cid);
if (dart::RawObject::IsExternalTypedDataClassId(cid) ||
dart::RawObject::IsExternalStringClassId(cid)) {
// Elements start at offset 0 of the external data.
return 0;
}
if (dart::RawObject::IsTypedDataClassId(cid)) {
return TypedData::data_offset();
}
switch (cid) {
case kArrayCid:
case kImmutableArrayCid:
return Array::data_offset();
case kOneByteStringCid:
return OneByteString::data_offset();
case kTwoByteStringCid:
return TwoByteString::data_offset();
default:
UNIMPLEMENTED();
return Array::data_offset();
}
}
word Instance::ElementSizeFor(intptr_t cid) {
return dart::Instance::ElementSizeFor(cid);
}
word Function::code_offset() {
return dart::Function::code_offset();
}
word Function::entry_point_offset() {
return dart::Function::entry_point_offset();
}
word Function::usage_counter_offset() {
return dart::Function::usage_counter_offset();
}
word Function::unchecked_entry_point_offset() {
return dart::Function::unchecked_entry_point_offset();
if (dart::RawObject::IsExternalTypedDataClassId(cid) ||
dart::RawObject::IsTypedDataClassId(cid) ||
dart::RawObject::IsTypedDataViewClassId(cid)) {
return dart::TypedDataBase::ElementSizeInBytes(cid);
}
switch (cid) {
case kArrayCid:
case kImmutableArrayCid:
return kWordSize;
case kOneByteStringCid:
return dart::OneByteString::kBytesPerElement;
case kTwoByteStringCid:
return dart::TwoByteString::kBytesPerElement;
case kExternalOneByteStringCid:
return dart::ExternalOneByteString::kBytesPerElement;
case kExternalTwoByteStringCid:
return dart::ExternalTwoByteString::kBytesPerElement;
default:
UNIMPLEMENTED();
return 0;
}
}
word ICData::CodeIndexFor(word num_args) {
return dart::ICData::CodeIndexFor(num_args);
}
word ICData::owner_offset() {
return dart::ICData::owner_offset();
}
word ICData::arguments_descriptor_offset() {
return dart::ICData::arguments_descriptor_offset();
}
word ICData::entries_offset() {
return dart::ICData::entries_offset();
}
word ICData::receivers_static_type_offset() {
return dart::ICData::receivers_static_type_offset();
}
word ICData::state_bits_offset() {
return dart::ICData::state_bits_offset();
}
word ICData::CountIndexFor(word num_args) {
return dart::ICData::CountIndexFor(num_args);
}
@ -380,144 +439,77 @@ word ICData::EntryPointIndexFor(word num_args) {
return dart::ICData::EntryPointIndexFor(num_args);
}
word ICData::NumArgsTestedShift() {
return dart::ICData::NumArgsTestedShift();
}
word ICData::NumArgsTestedMask() {
return dart::ICData::NumArgsTestedMask();
}
const word MegamorphicCache::kSpreadFactor =
dart::MegamorphicCache::kSpreadFactor;
word MegamorphicCache::mask_offset() {
return dart::MegamorphicCache::mask_offset();
}
word MegamorphicCache::buckets_offset() {
return dart::MegamorphicCache::buckets_offset();
}
word MegamorphicCache::arguments_descriptor_offset() {
return dart::MegamorphicCache::arguments_descriptor_offset();
}
word SingleTargetCache::lower_limit_offset() {
return dart::SingleTargetCache::lower_limit_offset();
}
word SingleTargetCache::upper_limit_offset() {
return dart::SingleTargetCache::upper_limit_offset();
}
word SingleTargetCache::entry_point_offset() {
return dart::SingleTargetCache::entry_point_offset();
}
word SingleTargetCache::target_offset() {
return dart::SingleTargetCache::target_offset();
}
const word Array::kMaxNewSpaceElements = dart::Array::kMaxNewSpaceElements;
word Context::InstanceSize(word n) {
return dart::Context::InstanceSize(n);
return TranslateOffsetInWords(dart::Context::InstanceSize(n));
}
word Context::variable_offset(word n) {
return dart::Context::variable_offset(n);
return TranslateOffsetInWords(dart::Context::variable_offset(n));
}
word TypedData::InstanceSize() {
return sizeof(RawTypedData);
#define DEFINE_FIELD(clazz, name) \
word clazz::name() { return clazz##_##name; }
#define DEFINE_ARRAY(clazz, name) \
word clazz::name(intptr_t index) { \
return clazz##_elements_start_offset + index * clazz##_element_size; \
}
#define DEFINE_ARRAY_STRUCTFIELD(clazz, name, element_offset, field_offset) \
word clazz::name(intptr_t index) { \
return element_offset(index) + field_offset; \
}
#define DEFINE_SIZEOF(clazz, name, what) \
word clazz::name() { return clazz##_##name; }
#define DEFINE_RANGE(Class, Getter, Type, First, Last, Filter) \
word Class::Getter(Type index) { \
return Class##_##Getter[static_cast<intptr_t>(index) - \
static_cast<intptr_t>(First)]; \
}
#define DEFINE_CONSTANT(Class, Name) const word Class::Name = Class##_##Name;
#define PRECOMP_NO_CHECK(Code) Code
OFFSETS_LIST(DEFINE_FIELD,
DEFINE_ARRAY,
DEFINE_ARRAY_STRUCTFIELD,
DEFINE_SIZEOF,
DEFINE_RANGE,
DEFINE_CONSTANT,
PRECOMP_NO_CHECK)
#undef DEFINE_FIELD
#undef DEFINE_ARRAY
#undef DEFINE_ARRAY_STRUCTFIELD
#undef DEFINE_SIZEOF
#undef DEFINE_RANGE
#undef DEFINE_CONSTANT
#undef PRECOMP_NO_CHECK
const word StoreBufferBlock::kSize = dart::StoreBufferBlock::kSize;
const word MarkingStackBlock::kSize = dart::MarkingStackBlock::kSize;
word Instructions::HeaderSize() {
intptr_t alignment = OS::PreferredCodeAlignment();
intptr_t aligned_size =
Utils::RoundUp(Instructions::UnalignedHeaderSize(), alignment);
ASSERT(aligned_size == alignment);
return aligned_size;
}
word Array::header_size() {
return sizeof(dart::RawArray);
#if !defined(TARGET_ARCH_DBC)
word Thread::stack_overflow_shared_stub_entry_point_offset(bool fpu_regs) {
return fpu_regs ? stack_overflow_shared_with_fpu_regs_entry_point_offset()
: stack_overflow_shared_without_fpu_regs_entry_point_offset();
}
#define CLASS_NAME_LIST(V) \
V(AbstractType, type_test_stub_entry_point_offset) \
V(ArgumentsDescriptor, count_offset) \
V(ArgumentsDescriptor, type_args_len_offset) \
V(Array, data_offset) \
V(Array, length_offset) \
V(Array, tags_offset) \
V(Array, type_arguments_offset) \
V(ClassTable, table_offset) \
V(Closure, context_offset) \
V(Closure, delayed_type_arguments_offset) \
V(Closure, function_offset) \
V(Closure, function_type_arguments_offset) \
V(Closure, instantiator_type_arguments_offset) \
V(Code, object_pool_offset) \
V(Code, saved_instructions_offset) \
V(Context, num_variables_offset) \
V(Context, parent_offset) \
V(Double, value_offset) \
V(Float32x4, value_offset) \
V(Float64x2, value_offset) \
V(GrowableObjectArray, data_offset) \
V(GrowableObjectArray, length_offset) \
V(GrowableObjectArray, type_arguments_offset) \
V(HeapPage, card_table_offset) \
V(Isolate, class_table_offset) \
V(Isolate, current_tag_offset) \
V(Isolate, default_tag_offset) \
V(Isolate, ic_miss_code_offset) \
V(Isolate, object_store_offset) \
V(Isolate, user_tag_offset) \
V(MarkingStackBlock, pointers_offset) \
V(MarkingStackBlock, top_offset) \
V(Mint, value_offset) \
V(NativeArguments, argc_tag_offset) \
V(NativeArguments, argv_offset) \
V(NativeArguments, retval_offset) \
V(NativeArguments, thread_offset) \
V(ObjectStore, double_type_offset) \
V(ObjectStore, int_type_offset) \
V(ObjectStore, string_type_offset) \
V(OneByteString, data_offset) \
V(StoreBufferBlock, pointers_offset) \
V(StoreBufferBlock, top_offset) \
V(String, hash_offset) \
V(String, length_offset) \
V(SubtypeTestCache, cache_offset) \
V(Thread, active_exception_offset) \
V(Thread, active_stacktrace_offset) \
V(Thread, async_stack_trace_offset) \
V(Thread, auto_scope_native_wrapper_entry_point_offset) \
V(Thread, bool_false_offset) \
V(Thread, bool_true_offset) \
V(Thread, dart_stream_offset) \
V(Thread, end_offset) \
V(Thread, global_object_pool_offset) \
V(Thread, isolate_offset) \
V(Thread, marking_stack_block_offset) \
V(Thread, no_scope_native_wrapper_entry_point_offset) \
V(Thread, object_null_offset) \
V(Thread, predefined_symbols_address_offset) \
V(Thread, resume_pc_offset) \
V(Thread, store_buffer_block_offset) \
V(Thread, top_exit_frame_info_offset) \
V(Thread, top_offset) \
V(Thread, top_resource_offset) \
V(Thread, vm_tag_offset) \
V(Thread, safepoint_state_offset) \
V(Thread, callback_code_offset) \
V(TimelineStream, enabled_offset) \
V(TwoByteString, data_offset) \
V(Type, arguments_offset) \
V(TypedDataBase, data_field_offset) \
V(TypedDataBase, length_offset) \
V(TypedData, data_offset) \
V(Type, hash_offset) \
V(TypeRef, type_offset) \
V(Type, signature_offset) \
V(Type, type_state_offset) \
V(UserTag, tag_offset)
#define DEFINE_FORWARDER(clazz, name) \
word clazz::name() { return dart::clazz::name(); }
CLASS_NAME_LIST(DEFINE_FORWARDER)
#undef DEFINE_FORWARDER
#endif // !defined(TARGET_ARCH_DBC)
uword Thread::safepoint_state_unacquired() {
return dart::Thread::safepoint_state_unacquired();
@ -527,285 +519,67 @@ uword Thread::safepoint_state_acquired() {
return dart::Thread::safepoint_state_acquired();
}
const word HeapPage::kBytesPerCardLog2 = dart::HeapPage::kBytesPerCardLog2;
const word String::kHashBits = dart::String::kHashBits;
word String::InstanceSize() {
return sizeof(dart::RawString);
}
bool Heap::IsAllocatableInNewSpace(intptr_t instance_size) {
return dart::Heap::IsAllocatableInNewSpace(instance_size);
}
#if !defined(TARGET_ARCH_DBC)
word Thread::write_barrier_code_offset() {
return dart::Thread::write_barrier_code_offset();
}
word Thread::array_write_barrier_code_offset() {
return dart::Thread::array_write_barrier_code_offset();
}
word Thread::fix_callers_target_code_offset() {
return dart::Thread::fix_callers_target_code_offset();
}
word Thread::fix_allocation_stub_code_offset() {
return dart::Thread::fix_allocation_stub_code_offset();
}
word Thread::call_to_runtime_entry_point_offset() {
return dart::Thread::call_to_runtime_entry_point_offset();
}
word Thread::null_error_shared_with_fpu_regs_entry_point_offset() {
return dart::Thread::null_error_shared_with_fpu_regs_entry_point_offset();
}
word Thread::null_error_shared_without_fpu_regs_entry_point_offset() {
return dart::Thread::null_error_shared_without_fpu_regs_entry_point_offset();
}
word Thread::monomorphic_miss_entry_offset() {
return dart::Thread::monomorphic_miss_entry_offset();
}
word Thread::write_barrier_mask_offset() {
return dart::Thread::write_barrier_mask_offset();
}
word Thread::write_barrier_entry_point_offset() {
return dart::Thread::write_barrier_entry_point_offset();
}
word Thread::array_write_barrier_entry_point_offset() {
return dart::Thread::array_write_barrier_entry_point_offset();
}
word Thread::verify_callback_isolate_entry_point_offset() {
return dart::Thread::verify_callback_entry_offset();
}
#endif // !defined(TARGET_ARCH_DBC)
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \
defined(TARGET_ARCH_X64)
word Thread::write_barrier_wrappers_thread_offset(intptr_t regno) {
return dart::Thread::write_barrier_wrappers_thread_offset(
static_cast<Register>(regno));
}
#endif
#if !defined(TARGET_ARCH_DBC)
word Thread::monomorphic_miss_stub_offset() {
return dart::Thread::monomorphic_miss_stub_offset();
}
word Thread::ic_lookup_through_code_stub_offset() {
return dart::Thread::ic_lookup_through_code_stub_offset();
}
word Thread::lazy_specialize_type_test_stub_offset() {
return dart::Thread::lazy_specialize_type_test_stub_offset();
}
word Thread::slow_type_test_stub_offset() {
return dart::Thread::slow_type_test_stub_offset();
}
word Thread::call_to_runtime_stub_offset() {
return dart::Thread::call_to_runtime_stub_offset();
}
word Thread::invoke_dart_code_stub_offset() {
return dart::Thread::invoke_dart_code_stub_offset();
}
word Thread::interpret_call_entry_point_offset() {
return dart::Thread::interpret_call_entry_point_offset();
}
word Thread::invoke_dart_code_from_bytecode_stub_offset() {
return dart::Thread::invoke_dart_code_from_bytecode_stub_offset();
}
word Thread::null_error_shared_without_fpu_regs_stub_offset() {
return dart::Thread::null_error_shared_without_fpu_regs_stub_offset();
}
word Thread::null_error_shared_with_fpu_regs_stub_offset() {
return dart::Thread::null_error_shared_with_fpu_regs_stub_offset();
}
word Thread::stack_overflow_shared_without_fpu_regs_stub_offset() {
return dart::Thread::stack_overflow_shared_without_fpu_regs_stub_offset();
}
word Thread::stack_overflow_shared_with_fpu_regs_stub_offset() {
return dart::Thread::stack_overflow_shared_with_fpu_regs_stub_offset();
}
word Thread::lazy_deopt_from_return_stub_offset() {
return dart::Thread::lazy_deopt_from_return_stub_offset();
}
word Thread::lazy_deopt_from_throw_stub_offset() {
return dart::Thread::lazy_deopt_from_throw_stub_offset();
}
word Thread::deoptimize_stub_offset() {
return dart::Thread::deoptimize_stub_offset();
}
word Thread::enter_safepoint_stub_offset() {
return dart::Thread::enter_safepoint_stub_offset();
}
word Thread::exit_safepoint_stub_offset() {
return dart::Thread::exit_safepoint_stub_offset();
}
word Thread::execution_state_offset() {
return dart::Thread::execution_state_offset();
uword Thread::generated_execution_state() {
return dart::Thread::ExecutionState::kThreadInGenerated;
}
uword Thread::native_execution_state() {
return dart::Thread::ExecutionState::kThreadInNative;
}
uword Thread::generated_execution_state() {
return dart::Thread::ExecutionState::kThreadInGenerated;
}
uword Thread::vm_tag_compiled_id() {
return dart::VMTag::kDartCompiledTagId;
}
#endif // !defined(TARGET_ARCH_DBC)
#define DECLARE_CONSTANT_OFFSET_GETTER(name) \
word Thread::name##_address_offset() { \
return dart::Thread::name##_address_offset(); \
}
THREAD_XMM_CONSTANT_LIST(DECLARE_CONSTANT_OFFSET_GETTER)
#undef DECLARE_CONSTANT_OFFSET_GETTER
word Thread::OffsetFromThread(const dart::Object& object) {
return dart::Thread::OffsetFromThread(object);
auto host_offset = dart::Thread::OffsetFromThread(object);
return object_null_offset() +
TranslateOffsetInWords(host_offset -
dart::Thread::object_null_offset());
}
const word StoreBufferBlock::kSize = dart::StoreBufferBlock::kSize;
const word MarkingStackBlock::kSize = dart::MarkingStackBlock::kSize;
#if !defined(PRODUCT)
word Isolate::single_step_offset() {
return dart::Isolate::single_step_offset();
}
#endif // !defined(PRODUCT)
#if !defined(PRODUCT)
word ClassTable::ClassOffsetFor(intptr_t cid) {
return dart::ClassTable::ClassOffsetFor(cid);
intptr_t Thread::OffsetFromThread(const dart::RuntimeEntry* runtime_entry) {
auto host_offset = dart::Thread::OffsetFromThread(runtime_entry);
return AllocateArray_entry_point_offset() +
TranslateOffsetInWords(
host_offset - dart::Thread::AllocateArray_entry_point_offset());
}
word ClassTable::StateOffsetFor(intptr_t cid) {
return dart::ClassTable::StateOffsetFor(cid);
bool CanLoadFromThread(const dart::Object& object,
intptr_t* offset /* = nullptr */) {
if (dart::Thread::CanLoadFromThread(object)) {
if (offset != nullptr) {
*offset = Thread::OffsetFromThread(object);
}
return true;
}
return false;
}
word ClassTable::TableOffsetFor(intptr_t cid) {
return dart::ClassTable::TableOffsetFor(cid);
}
static_assert(
kSmiBits <= dart::kSmiBits,
"Expected that size of Smi on HOST is at least as large as on target.");
word ClassTable::CounterOffsetFor(intptr_t cid, bool is_new) {
return dart::ClassTable::CounterOffsetFor(cid, is_new);
}
word ClassTable::SizeOffsetFor(intptr_t cid, bool is_new) {
return dart::ClassTable::SizeOffsetFor(cid, is_new);
}
#endif // !defined(PRODUCT)
const word ClassTable::kSizeOfClassPairLog2 =
dart::ClassTable::kSizeOfClassPairLog2;
const intptr_t Instructions::kPolymorphicEntryOffset =
dart::Instructions::kPolymorphicEntryOffset;
const intptr_t Instructions::kMonomorphicEntryOffset =
dart::Instructions::kMonomorphicEntryOffset;
intptr_t Instructions::HeaderSize() {
return dart::Instructions::HeaderSize();
}
intptr_t Code::entry_point_offset(CodeEntryKind kind) {
return dart::Code::entry_point_offset(kind);
}
const word SubtypeTestCache::kTestEntryLength =
dart::SubtypeTestCache::kTestEntryLength;
const word SubtypeTestCache::kInstanceClassIdOrFunction =
dart::SubtypeTestCache::kInstanceClassIdOrFunction;
const word SubtypeTestCache::kInstanceTypeArguments =
dart::SubtypeTestCache::kInstanceTypeArguments;
const word SubtypeTestCache::kInstantiatorTypeArguments =
dart::SubtypeTestCache::kInstantiatorTypeArguments;
const word SubtypeTestCache::kFunctionTypeArguments =
dart::SubtypeTestCache::kFunctionTypeArguments;
const word SubtypeTestCache::kInstanceParentFunctionTypeArguments =
dart::SubtypeTestCache::kInstanceParentFunctionTypeArguments;
const word SubtypeTestCache::kInstanceDelayedFunctionTypeArguments =
dart::SubtypeTestCache::kInstanceDelayedFunctionTypeArguments;
const word SubtypeTestCache::kTestResult = dart::SubtypeTestCache::kTestResult;
word Context::header_size() {
return sizeof(dart::RawContext);
}
#if !defined(PRODUCT)
word ClassHeapStats::TraceAllocationMask() {
return dart::ClassHeapStats::TraceAllocationMask();
}
word ClassHeapStats::state_offset() {
return dart::ClassHeapStats::state_offset();
}
word ClassHeapStats::allocated_since_gc_new_space_offset() {
return dart::ClassHeapStats::allocated_since_gc_new_space_offset();
}
word ClassHeapStats::allocated_size_since_gc_new_space_offset() {
return dart::ClassHeapStats::allocated_size_since_gc_new_space_offset();
}
#endif // !defined(PRODUCT)
const word Smi::kBits = dart::Smi::kBits;
bool IsSmi(const dart::Object& a) {
return a.IsSmi();
return a.IsSmi() && Utils::IsInt(kSmiBits + 1, dart::Smi::Cast(a).Value());
}
bool IsSmi(int64_t v) {
return Utils::IsInt(kSmiBits + 1, v);
}
word ToRawSmi(const dart::Object& a) {
ASSERT(a.IsSmi());
return reinterpret_cast<word>(a.raw());
RELEASE_ASSERT(IsSmi(a));
return static_cast<word>(reinterpret_cast<intptr_t>(a.raw()));
}
word ToRawSmi(intptr_t value) {
return dart::Smi::RawValue(value);
}
bool CanLoadFromThread(const dart::Object& object,
word* offset /* = nullptr */) {
if (dart::Thread::CanLoadFromThread(object)) {
if (offset != nullptr) {
*offset = dart::Thread::OffsetFromThread(object);
}
return true;
}
return false;
word SmiValue(const dart::Object& a) {
RELEASE_ASSERT(IsSmi(a));
return static_cast<word>(dart::Smi::Cast(a).Value());
}
#if defined(TARGET_ARCH_IA32)
@ -828,15 +602,8 @@ word ToRawPointer(const dart::Object& a) {
}
#endif // defined(TARGET_ARCH_IA32)
const word NativeEntry::kNumCallWrapperArguments =
dart::NativeEntry::kNumCallWrapperArguments;
word NativeArguments::StructSize() {
return sizeof(dart::NativeArguments);
}
word RegExp::function_offset(classid_t cid, bool sticky) {
return dart::RegExp::function_offset(cid, sticky);
return TranslateOffsetInWords(dart::RegExp::function_offset(cid, sticky));
}
const word Symbols::kNumberOfOneCharCodeSymbols =
@ -844,6 +611,28 @@ const word Symbols::kNumberOfOneCharCodeSymbols =
const word Symbols::kNullCharCodeSymbolOffset =
dart::Symbols::kNullCharCodeSymbolOffset;
const word String::kHashBits = dart::String::kHashBits;
bool Heap::IsAllocatableInNewSpace(intptr_t instance_size) {
return dart::Heap::IsAllocatableInNewSpace(instance_size);
}
word Field::OffsetOf(const dart::Field& field) {
return TranslateOffsetInWords(field.Offset());
}
} // namespace target
} // namespace compiler
} // namespace dart
#else
namespace dart {
namespace compiler {
namespace target {
const word Array::kMaxElements = Array_kMaxElements;
} // namespace target
} // namespace compiler
} // namespace dart

View file

@ -23,6 +23,7 @@
#include "vm/bitfield.h"
#include "vm/class_id.h"
#include "vm/code_entry_kind.h"
#include "vm/constants.h"
#include "vm/frame_layout.h"
#include "vm/pointer_tagging.h"
#include "vm/runtime_entry_list.h"
@ -64,12 +65,16 @@ namespace compiler {
class InvalidClass {};
extern InvalidClass kWordSize;
extern InvalidClass kWordSizeLog2;
extern InvalidClass kBitsPerWord;
extern InvalidClass kNewObjectAlignmentOffset;
extern InvalidClass kOldObjectAlignmentOffset;
extern InvalidClass kNewObjectBitPosition;
extern InvalidClass kObjectAlignment;
extern InvalidClass kObjectAlignmentLog2;
extern InvalidClass kObjectAlignmentMask;
extern InvalidClass kSmiBits;
extern InvalidClass kSmiMin;
extern InvalidClass kSmiMax;
static constexpr intptr_t kHostWordSize = dart::kWordSize;
static constexpr intptr_t kHostWordSizeLog2 = dart::kWordSizeLog2;
@ -252,21 +257,30 @@ DART_NORETURN void BailoutWithBranchOffsetError();
// - sizes of structures
namespace target {
// Currently we define target::word to match dart::word which represents
// host word.
//
// Once refactoring of the compiler is complete we will switch target::word
// to be independent from host word.
typedef dart::word word;
typedef dart::uword uword;
#if defined(TARGET_ARCH_IS_32_BIT)
typedef int32_t word;
typedef uint32_t uword;
static constexpr word kWordSize = 4;
static constexpr word kWordSizeLog2 = 2;
#elif defined(TARGET_ARCH_IS_64_BIT)
typedef int64_t word;
typedef uint64_t uword;
static constexpr word kWordSize = 8;
static constexpr word kWordSizeLog2 = 3;
#else
#error "Unsupported architecture"
#endif
static constexpr word kWordSize = dart::kWordSize;
static constexpr word kWordSizeLog2 = dart::kWordSizeLog2;
static constexpr word kBitsPerWord = 8 * kWordSize;
static_assert((1 << kWordSizeLog2) == kWordSize,
"kWordSizeLog2 should match kWordSize");
using ObjectAlignment = dart::ObjectAlignment<kWordSize, kWordSizeLog2>;
const intptr_t kSmiBits = kBitsPerWord - 2;
const intptr_t kSmiMax = (static_cast<intptr_t>(1) << kSmiBits) - 1;
const intptr_t kSmiMin = -(static_cast<intptr_t>(1) << kSmiBits);
// Information about heap pages.
extern const word kPageSize;
extern const word kPageSizeInWords;
@ -291,10 +305,14 @@ uint32_t MakeTagWordForNewSpaceObject(classid_t cid, uword instance_size);
// Target specific information about objects.
//
// Returns true if the given object can be represented as a Smi on the
// target platform.
// Returns true if the given object can be represented as a Smi on the target
// platform.
bool IsSmi(const dart::Object& a);
// Returns true if the given value can be represented as a Smi on the target
// platform.
bool IsSmi(int64_t value);
// Return raw Smi representation of the given object for the target platform.
word ToRawSmi(const dart::Object& a);
@ -305,10 +323,12 @@ word ToRawSmi(const dart::Object& a);
// as a Smi.
word ToRawSmi(intptr_t value);
word SmiValue(const dart::Object& a);
// If the given object can be loaded from the thread on the target then
// return true and set offset (if provided) to the offset from the
// thread pointer to a field that contains the object.
bool CanLoadFromThread(const dart::Object& object, word* offset = nullptr);
bool CanLoadFromThread(const dart::Object& object, intptr_t* offset = nullptr);
// On IA32 we can embed raw pointers into generated code.
#if defined(TARGET_ARCH_IA32)
@ -350,12 +370,13 @@ class Object : public AllStatic {
public:
// Offset of the tags word.
static word tags_offset();
static word InstanceSize();
};
class ObjectPool : public AllStatic {
public:
// Return offset to the element with the given [index] in the object pool.
static intptr_t element_offset(intptr_t index);
static word element_offset(intptr_t index);
};
class Class : public AllStatic {
@ -364,8 +385,10 @@ class Class : public AllStatic {
static word declaration_type_offset();
static word super_type_offset();
// The offset of the RawObject::num_type_arguments_ field in bytes.
static word num_type_arguments_offset_in_bytes();
static word num_type_arguments_offset();
// The value used if no type arguments vector is present.
static const word kNoTypeArguments;
@ -385,9 +408,6 @@ class Class : public AllStatic {
// Returns the offset (in bytes) of the type arguments vector.
static intptr_t TypeArgumentsFieldOffset(const dart::Class& klass);
// Returns the instance size (in bytes).
static intptr_t InstanceSize(const dart::Class& klass);
// Whether to trace allocation for this klass.
static bool TraceAllocation(const dart::Class& klass);
};
@ -398,6 +418,7 @@ class Instance : public AllStatic {
static word first_field_offset();
static word DataOffsetFor(intptr_t cid);
static word ElementSizeFor(intptr_t cid);
static word InstanceSize();
};
class Function : public AllStatic {
@ -449,7 +470,9 @@ class Array : public AllStatic {
static word data_offset();
static word type_arguments_offset();
static word length_offset();
static word element_offset(intptr_t index);
static const word kMaxElements;
static const word kMaxNewSpaceElements;
};
@ -458,12 +481,14 @@ class GrowableObjectArray : public AllStatic {
static word data_offset();
static word type_arguments_offset();
static word length_offset();
static word InstanceSize();
};
class TypedDataBase : public AllStatic {
public:
static word data_field_offset();
static word length_offset();
static word InstanceSize();
};
class TypedData : public AllStatic {
@ -472,10 +497,41 @@ class TypedData : public AllStatic {
static word InstanceSize();
};
class ExternalTypedData : public AllStatic {
public:
static word data_offset();
};
class TypedDataView : public AllStatic {
public:
static word offset_in_bytes_offset();
static word data_offset();
};
class LinkedHashMap : public AllStatic {
public:
static word index_offset();
static word data_offset();
static word hash_mask_offset();
static word used_data_offset();
static word deleted_keys_offset();
static word InstanceSize();
};
class ArgumentsDescriptor : public AllStatic {
public:
static word first_named_entry_offset();
static word named_entry_size();
static word position_offset();
static word name_offset();
static word count_offset();
static word type_args_len_offset();
static word positional_count_offset();
};
class Pointer : public AllStatic {
public:
static word c_memory_address_offset();
};
class AbstractType : public AllStatic {
@ -489,6 +545,7 @@ class Type : public AllStatic {
static word type_state_offset();
static word arguments_offset();
static word signature_offset();
static word type_class_id_offset();
};
class TypeRef : public AllStatic {
@ -499,21 +556,19 @@ class TypeRef : public AllStatic {
class Double : public AllStatic {
public:
static word value_offset();
};
class Smi : public AllStatic {
public:
static const word kBits;
static word InstanceSize();
};
class Mint : public AllStatic {
public:
static word value_offset();
static word InstanceSize();
};
class String : public AllStatic {
public:
static const word kHashBits;
static const word kMaxElements;
static word hash_offset();
static word length_offset();
static word InstanceSize();
@ -529,14 +584,31 @@ class TwoByteString : public AllStatic {
static word data_offset();
};
class ExternalOneByteString : public AllStatic {
public:
static word external_data_offset();
};
class ExternalTwoByteString : public AllStatic {
public:
static word external_data_offset();
};
class Int32x4 : public AllStatic {
public:
static word InstanceSize();
};
class Float32x4 : public AllStatic {
public:
static word value_offset();
static word InstanceSize();
};
class Float64x2 : public AllStatic {
public:
static word value_offset();
static word InstanceSize();
};
class TimelineStream : public AllStatic {
@ -544,12 +616,18 @@ class TimelineStream : public AllStatic {
static word enabled_offset();
};
class VMHandles : public AllStatic {
public:
static constexpr intptr_t kOffsetOfRawPtrInHandle = kWordSize;
};
class Thread : public AllStatic {
public:
static word dart_stream_offset();
static word async_stack_trace_offset();
static word predefined_symbols_address_offset();
static word deoptimize_entry_offset();
static word megamorphic_call_checked_entry_offset();
static word active_exception_offset();
static word active_stacktrace_offset();
static word resume_pc_offset();
@ -569,10 +647,10 @@ class Thread : public AllStatic {
static word null_error_shared_without_fpu_regs_entry_point_offset();
static word write_barrier_mask_offset();
static word monomorphic_miss_entry_offset();
static word write_barrier_wrappers_thread_offset(intptr_t regno);
static word write_barrier_wrappers_thread_offset(Register regno);
static word array_write_barrier_entry_point_offset();
static word write_barrier_entry_point_offset();
static word verify_callback_isolate_entry_point_offset();
static word verify_callback_entry_offset();
static word vm_tag_offset();
static uword vm_tag_compiled_id();
@ -583,9 +661,14 @@ class Thread : public AllStatic {
static word execution_state_offset();
static uword native_execution_state();
static uword generated_execution_state();
static word stack_overflow_flags_offset();
static word stack_overflow_shared_stub_entry_point_offset(bool fpu_regs);
static word stack_limit_offset();
static word unboxed_int64_runtime_arg_offset();
static word callback_code_offset();
static word AllocateArray_entry_point_offset();
#if !defined(TARGET_ARCH_DBC)
static word write_barrier_code_offset();
static word array_write_barrier_code_offset();
@ -602,7 +685,9 @@ class Thread : public AllStatic {
static word invoke_dart_code_from_bytecode_stub_offset();
static word null_error_shared_without_fpu_regs_stub_offset();
static word null_error_shared_with_fpu_regs_stub_offset();
static word stack_overflow_shared_without_fpu_regs_entry_point_offset();
static word stack_overflow_shared_without_fpu_regs_stub_offset();
static word stack_overflow_shared_with_fpu_regs_entry_point_offset();
static word stack_overflow_shared_with_fpu_regs_stub_offset();
static word lazy_deopt_from_return_stub_offset();
static word lazy_deopt_from_throw_stub_offset();
@ -628,6 +713,7 @@ class Thread : public AllStatic {
#undef DECLARE_CONSTANT_OFFSET_GETTER
static word OffsetFromThread(const dart::Object& object);
static intptr_t OffsetFromThread(const dart::RuntimeEntry* runtime_entry);
};
class StoreBufferBlock : public AllStatic {
@ -670,9 +756,9 @@ class ClassTable : public AllStatic {
#if !defined(PRODUCT)
static word ClassOffsetFor(intptr_t cid);
static word StateOffsetFor(intptr_t cid);
static word TableOffsetFor(intptr_t cid);
static word CounterOffsetFor(intptr_t cid, bool is_new);
static word SizeOffsetFor(intptr_t cid, bool is_new);
static word class_heap_stats_table_offset();
static word NewSpaceCounterOffsetFor(intptr_t cid);
static word NewSpaceSizeOffsetFor(intptr_t cid);
#endif // !defined(PRODUCT)
static const word kSizeOfClassPairLog2;
};
@ -689,9 +775,10 @@ class ClassHeapStats : public AllStatic {
class Instructions : public AllStatic {
public:
static const intptr_t kPolymorphicEntryOffset;
static const intptr_t kMonomorphicEntryOffset;
static intptr_t HeaderSize();
static const word kPolymorphicEntryOffset;
static const word kMonomorphicEntryOffset;
static word HeaderSize();
static word UnalignedHeaderSize();
};
class Code : public AllStatic {
@ -700,10 +787,11 @@ class Code : public AllStatic {
static uword EntryPointOf(const dart::Code& code);
#endif // defined(TARGET_ARCH_IA32)
static intptr_t object_pool_offset();
static intptr_t entry_point_offset(
CodeEntryKind kind = CodeEntryKind::kNormal);
static intptr_t saved_instructions_offset();
static word object_pool_offset();
static word entry_point_offset(CodeEntryKind kind = CodeEntryKind::kNormal);
static word function_entry_point_offset(CodeEntryKind kind);
static word saved_instructions_offset();
static word owner_offset();
};
class SubtypeTestCache : public AllStatic {
@ -736,6 +824,8 @@ class Closure : public AllStatic {
static word function_offset();
static word function_type_arguments_offset();
static word instantiator_type_arguments_offset();
static word hash_offset();
static word InstanceSize();
};
class HeapPage : public AllStatic {
@ -783,6 +873,24 @@ class Symbols : public AllStatic {
static const word kNullCharCodeSymbolOffset;
};
class Field : public AllStatic {
public:
static word OffsetOf(const dart::Field& field);
static word guarded_cid_offset();
static word guarded_list_length_in_object_offset_offset();
static word guarded_list_length_offset();
static word is_nullable_offset();
static word static_value_offset();
static word kind_bits_offset();
};
class TypeArguments : public AllStatic {
public:
static word instantiations_offset();
static word type_at_offset(intptr_t i);
};
} // namespace target
} // namespace compiler
} // namespace dart

File diff suppressed because it is too large Load diff

View file

@ -262,6 +262,12 @@
SIZEOF(Mint, InstanceSize, RawMint) \
SIZEOF(NativeArguments, StructSize, NativeArguments) \
SIZEOF(String, InstanceSize, RawString) \
SIZEOF(TypedData, InstanceSize, RawTypedData)
SIZEOF(TypedData, InstanceSize, RawTypedData) \
SIZEOF(Object, InstanceSize, RawObject) \
SIZEOF(TypedDataBase, InstanceSize, RawTypedDataBase) \
SIZEOF(Closure, InstanceSize, RawClosure) \
SIZEOF(GrowableObjectArray, InstanceSize, RawGrowableObjectArray) \
SIZEOF(Instance, InstanceSize, RawInstance) \
SIZEOF(LinkedHashMap, InstanceSize, RawLinkedHashMap)
#endif // RUNTIME_VM_COMPILER_RUNTIME_OFFSETS_LIST_H_

View file

@ -1098,7 +1098,7 @@ void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ StoreToOffset(kWord, R9, THR, target::Thread::vm_tag_offset());
// Load arguments descriptor array into R4, which is passed to Dart code.
__ ldr(R4, Address(R1, VMHandles::kOffsetOfRawPtrInHandle));
__ ldr(R4, Address(R1, target::VMHandles::kOffsetOfRawPtrInHandle));
// Load number of arguments into R9 and adjust count for type arguments.
__ ldr(R3,
@ -1110,7 +1110,7 @@ void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ SmiUntag(R9);
// Compute address of 'arguments array' data area into R2.
__ ldr(R2, Address(R2, VMHandles::kOffsetOfRawPtrInHandle));
__ ldr(R2, Address(R2, target::VMHandles::kOffsetOfRawPtrInHandle));
__ AddImmediate(R2, target::Array::data_offset() - kHeapObjectTag);
// Set up arguments for the Dart call.
@ -1134,7 +1134,7 @@ void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
} else {
__ LoadImmediate(PP, 0); // GC safe value into PP.
}
__ ldr(CODE_REG, Address(R0, VMHandles::kOffsetOfRawPtrInHandle));
__ ldr(CODE_REG, Address(R0, target::VMHandles::kOffsetOfRawPtrInHandle));
__ ldr(R0, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ blx(R0); // R4 is the arguments descriptor array.
@ -1668,7 +1668,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
// when the object initialization should be done as a loop or as
// straight line code.
const int kInlineInstanceSize = 12;
const intptr_t instance_size = target::Class::InstanceSize(cls);
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
ASSERT(instance_size > 0);
ASSERT(instance_size % target::ObjectAlignment::kObjectAlignment == 0);
if (is_cls_parameterized) {

View file

@ -1732,7 +1732,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
// when the object initialization should be done as a loop or as
// straight line code.
const int kInlineInstanceSize = 12;
const intptr_t instance_size = target::Class::InstanceSize(cls);
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
ASSERT(instance_size > 0);
if (is_cls_parameterized) {
__ ldr(kTypeArgumentsReg, Address(SP));

View file

@ -1384,7 +1384,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
// when the object initialization should be done as a loop or as
// straight line code.
const int kInlineInstanceSize = 12; // In words.
const intptr_t instance_size = target::Class::InstanceSize(cls);
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
ASSERT(instance_size > 0);
if (is_cls_parameterized) {
__ movl(EDX, Address(ESP, kObjectTypeArgumentsOffset));

View file

@ -1682,7 +1682,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
// when the object initialization should be done as a loop or as
// straight line code.
const int kInlineInstanceSize = 12; // In words.
const intptr_t instance_size = target::Class::InstanceSize(cls);
const intptr_t instance_size = target::Class::GetInstanceSize(cls);
ASSERT(instance_size > 0);
__ LoadObject(R9, NullObject());
if (is_cls_parameterized) {

View file

@ -371,15 +371,15 @@ class ExceptionHandlerFinder : public StackResource {
};
CatchEntryMove CatchEntryMove::ReadFrom(ReadStream* stream) {
using Reader = ReadStream::Raw<sizeof(intptr_t), intptr_t>;
const intptr_t src = Reader::Read(stream);
const intptr_t dest_and_kind = Reader::Read(stream);
using Reader = ReadStream::Raw<sizeof(int32_t), int32_t>;
const int32_t src = Reader::Read(stream);
const int32_t dest_and_kind = Reader::Read(stream);
return CatchEntryMove(src, dest_and_kind);
}
#if !defined(DART_PRECOMPILED_RUNTIME)
void CatchEntryMove::WriteTo(WriteStream* stream) {
using Writer = WriteStream::Raw<sizeof(intptr_t), intptr_t>;
using Writer = WriteStream::Raw<sizeof(int32_t), int32_t>;
Writer::Write(stream, src_);
Writer::Write(stream, dest_and_kind_);
}

View file

@ -203,21 +203,21 @@ class CatchEntryMove {
#endif
private:
CatchEntryMove(intptr_t src, intptr_t dest_and_kind)
CatchEntryMove(int32_t src, int32_t dest_and_kind)
: src_(src), dest_and_kind_(dest_and_kind) {}
// Note: BitField helper does not work with signed values of size that does
// not match the destination size - thus we don't use BitField for declaring
// DestinationField and instead encode and decode it manually.
using SourceKindField = BitField<intptr_t, SourceKind, 0, 4>;
using SourceKindField = BitField<int32_t, SourceKind, 0, 4>;
static constexpr intptr_t kHalfSourceBits = kBitsPerWord / 2;
using LoSourceSlot = BitField<intptr_t, intptr_t, 0, kHalfSourceBits>;
static constexpr intptr_t kHalfSourceBits = 16;
using LoSourceSlot = BitField<int32_t, int32_t, 0, kHalfSourceBits>;
using HiSourceSlot =
BitField<intptr_t, intptr_t, kHalfSourceBits, kHalfSourceBits>;
BitField<int32_t, int32_t, kHalfSourceBits, kHalfSourceBits>;
intptr_t src_;
intptr_t dest_and_kind_;
int32_t src_;
int32_t dest_and_kind_;
};
// A sequence of moves that needs to be executed to create a state expected

View file

@ -107,7 +107,7 @@ void ImageWriter::PrepareForSerialization(
RawInstructions* instructions = Code::InstructionsOf(code);
const intptr_t offset = next_text_offset_;
instructions_.Add(InstructionsData(instructions, code, offset));
next_text_offset_ += instructions->HeapSize();
next_text_offset_ += SizeInSnapshot(instructions);
ASSERT(heap_->GetObjectId(instructions) == 0);
heap_->SetObjectId(instructions, offset);
break;
@ -142,7 +142,7 @@ void ImageWriter::SetupShared(ObjectOffsetMap* map, const void* shared_image) {
pair.object = raw_obj;
pair.offset = offset;
map->Insert(pair);
obj_addr += raw_obj->HeapSize();
obj_addr += SizeInSnapshot(raw_obj);
}
ASSERT(obj_addr == end_addr);
}
@ -175,13 +175,17 @@ int32_t ImageWriter::GetTextOffsetFor(RawInstructions* instructions,
offset = next_text_offset_;
heap_->SetObjectId(instructions, offset);
next_text_offset_ += instructions->HeapSize();
next_text_offset_ += SizeInSnapshot(instructions);
instructions_.Add(InstructionsData(instructions, code, offset));
ASSERT(offset != 0);
return offset;
}
intptr_t ImageWriter::SizeInSnapshot(RawObject* raw_object) {
return raw_object->HeapSize();
}
bool ImageWriter::GetSharedDataOffsetFor(RawObject* raw_object,
uint32_t* offset) {
ObjectOffsetPair* pair = shared_objects_.Lookup(raw_object);
@ -193,9 +197,9 @@ bool ImageWriter::GetSharedDataOffsetFor(RawObject* raw_object,
}
uint32_t ImageWriter::GetDataOffsetFor(RawObject* raw_object) {
intptr_t heap_size = raw_object->HeapSize();
intptr_t snap_size = SizeInSnapshot(raw_object);
intptr_t offset = next_data_offset_;
next_data_offset_ += heap_size;
next_data_offset_ += snap_size;
objects_.Add(ObjectData(raw_object));
return offset;
}
@ -238,7 +242,7 @@ void ImageWriter::DumpInstructionsSizes() {
js.PrintPropertyStr("c", name);
}
js.PrintProperty("n", data.code_->QualifiedName());
js.PrintProperty("s", data.insns_->raw()->HeapSize());
js.PrintProperty("s", SizeInSnapshot(data.insns_->raw()));
js.CloseObject();
}
js.CloseArray();
@ -473,7 +477,7 @@ void AssemblyImageWriter::WriteText(WriteStream* clustered_stream, bool vm) {
"Instructions",
/*name=*/nullptr);
profile_writer_->AttributeBytesTo({offset_space_, offset},
insns.raw()->HeapSize());
SizeInSnapshot(insns.raw()));
}
ASSERT(insns.raw()->HeapSize() % sizeof(uint64_t) == 0);
@ -775,7 +779,8 @@ void BlobImageWriter::WriteText(WriteStream* clustered_stream, bool vm) {
beginning += sizeof(uword);
text_offset += WriteByteSequence(beginning, end);
ASSERT((text_offset - instr_start) == insns.raw()->HeapSize());
ASSERT((text_offset - instr_start) ==
ImageWriter::SizeInSnapshot(insns.raw()));
}
#ifdef DART_PRECOMPILER

View file

@ -181,6 +181,8 @@ class ImageWriter : public ValueObject {
void TraceInstructions(const Instructions& instructions);
static intptr_t SizeInSnapshot(RawObject* object);
protected:
void WriteROData(WriteStream* stream);
virtual void WriteText(WriteStream* clustered_stream, bool vm) = 0;

View file

@ -1351,7 +1351,8 @@ bool Isolate::VerifyTerminateCapability(const Object& capability) const {
bool Isolate::AddResumeCapability(const Capability& capability) {
// Ensure a limit for the number of resume capabilities remembered.
static const intptr_t kMaxResumeCapabilities = kSmiMax / (6 * kWordSize);
static const intptr_t kMaxResumeCapabilities =
compiler::target::kSmiMax / (6 * kWordSize);
const GrowableObjectArray& caps = GrowableObjectArray::Handle(
current_zone(), object_store()->resume_capabilities());
@ -1402,7 +1403,8 @@ bool Isolate::RemoveResumeCapability(const Capability& capability) {
void Isolate::AddExitListener(const SendPort& listener,
const Instance& response) {
// Ensure a limit for the number of listeners remembered.
static const intptr_t kMaxListeners = kSmiMax / (12 * kWordSize);
static const intptr_t kMaxListeners =
compiler::target::kSmiMax / (12 * kWordSize);
const GrowableObjectArray& listeners = GrowableObjectArray::Handle(
current_zone(), object_store()->exit_listeners());
@ -1469,7 +1471,8 @@ void Isolate::NotifyExitListeners() {
void Isolate::AddErrorListener(const SendPort& listener) {
// Ensure a limit for the number of listeners remembered.
static const intptr_t kMaxListeners = kSmiMax / (6 * kWordSize);
static const intptr_t kMaxListeners =
compiler::target::kSmiMax / (6 * kWordSize);
const GrowableObjectArray& listeners = GrowableObjectArray::Handle(
current_zone(), object_store()->error_listeners());

View file

@ -5634,7 +5634,7 @@ class Context : public Object {
}
static bool IsValidLength(intptr_t len) {
return 0 <= len && len <= kMaxElements;
return 0 <= len && len <= compiler::target::Array::kMaxElements;
}
static intptr_t InstanceSize() {

View file

@ -13,6 +13,7 @@
#include "platform/atomic.h"
#include "vm/class_id.h"
#include "vm/compiler/method_recognizer.h"
#include "vm/compiler/runtime_api.h"
#include "vm/exceptions.h"
#include "vm/globals.h"
#include "vm/object_graph.h"
@ -150,8 +151,10 @@ class RawObject {
// Encodes the object size in the tag in units of object alignment.
class SizeTag {
public:
static const intptr_t kMaxSizeTag = ((1 << RawObject::kSizeTagSize) - 1)
<< kObjectAlignmentLog2;
static constexpr intptr_t kMaxSizeTagInUnitsOfAlignment =
((1 << RawObject::kSizeTagSize) - 1);
static constexpr intptr_t kMaxSizeTag =
kMaxSizeTagInUnitsOfAlignment * kObjectAlignment;
static uword encode(intptr_t size) {
return SizeBits::encode(SizeToTagValue(size));
@ -171,11 +174,15 @@ class RawObject {
: public BitField<uint32_t, intptr_t, kSizeTagPos, kSizeTagSize> {};
static intptr_t SizeToTagValue(intptr_t size) {
ASSERT(Utils::IsAligned(size, kObjectAlignment));
return (size > kMaxSizeTag) ? 0 : (size >> kObjectAlignmentLog2);
ASSERT(Utils::IsAligned(
size, compiler::target::ObjectAlignment::kObjectAlignment));
return (size > kMaxSizeTag)
? 0
: (size >>
compiler::target::ObjectAlignment::kObjectAlignmentLog2);
}
static intptr_t TagValueToSize(intptr_t value) {
return value << kObjectAlignmentLog2;
return value << compiler::target::ObjectAlignment::kObjectAlignmentLog2;
}
};
@ -1464,19 +1471,19 @@ class RawPcDescriptors : public RawObject {
public:
// Most of the time try_index will be small and merged field will fit into
// one byte.
static intptr_t Encode(intptr_t kind, intptr_t try_index) {
static int32_t Encode(intptr_t kind, intptr_t try_index) {
intptr_t kind_shift = Utils::ShiftForPowerOfTwo(kind);
ASSERT(Utils::IsUint(kKindShiftSize, kind_shift));
ASSERT(Utils::IsInt(kTryIndexSize, try_index));
return (try_index << kTryIndexPos) | (kind_shift << kKindShiftPos);
}
static intptr_t DecodeKind(intptr_t merged_kind_try) {
static intptr_t DecodeKind(int32_t merged_kind_try) {
const intptr_t kKindShiftMask = (1 << kKindShiftSize) - 1;
return 1 << (merged_kind_try & kKindShiftMask);
}
static intptr_t DecodeTryIndex(intptr_t merged_kind_try) {
static intptr_t DecodeTryIndex(int32_t merged_kind_try) {
// Arithmetic shift.
return merged_kind_try >> kTryIndexPos;
}
@ -1488,7 +1495,7 @@ class RawPcDescriptors : public RawObject {
COMPILE_ASSERT(kLastKind <= 1 << ((1 << kKindShiftSize) - 1));
static const intptr_t kTryIndexPos = kKindShiftSize;
static const intptr_t kTryIndexSize = kBitsPerWord - kKindShiftSize;
static const intptr_t kTryIndexSize = 32 - kKindShiftSize;
};
private:

View file

@ -47,17 +47,21 @@ void RuntimeEntry::CallInternal(const RuntimeEntry* runtime_entry,
intptr_t argument_count) {
if (runtime_entry->is_leaf()) {
ASSERT(argument_count == runtime_entry->argument_count());
__ LoadFromOffset(kWord, TMP, THR, Thread::OffsetFromThread(runtime_entry));
__ str(TMP, Address(THR, Thread::vm_tag_offset()));
__ LoadFromOffset(
kWord, TMP, THR,
compiler::target::Thread::OffsetFromThread(runtime_entry));
__ str(TMP, Address(THR, compiler::target::Thread::vm_tag_offset()));
__ blx(TMP);
__ LoadImmediate(TMP, VMTag::kDartCompiledTagId);
__ str(TMP, Address(THR, Thread::vm_tag_offset()));
__ str(TMP, Address(THR, compiler::target::Thread::vm_tag_offset()));
ASSERT((kAbiPreservedCpuRegs & (1 << THR)) != 0);
ASSERT((kAbiPreservedCpuRegs & (1 << PP)) != 0);
} else {
// Argument count is not checked here, but in the runtime entry for a more
// informative error message.
__ LoadFromOffset(kWord, R9, THR, Thread::OffsetFromThread(runtime_entry));
__ LoadFromOffset(
kWord, R9, THR,
compiler::target::Thread::OffsetFromThread(runtime_entry));
__ LoadImmediate(R4, argument_count);
__ BranchLinkToRuntime();
}

View file

@ -322,9 +322,10 @@ void TypeTestingStubGenerator::
// fall through to continue
// b) Then we'll load the values for the type parameters.
__ LoadField(
instance_type_args_reg,
FieldAddress(instance_reg, type_class.type_arguments_field_offset()));
__ LoadField(instance_type_args_reg,
FieldAddress(instance_reg,
compiler::target::Class::TypeArgumentsFieldOffset(
type_class)));
// The kernel frontend should fill in any non-assigned type parameters on
// construction with dynamic/Object, so we should never get the null type
@ -391,24 +392,27 @@ void TypeTestingStubGenerator::BuildOptimizedTypeArgumentValueCheck(
// TODO(kustermann): Even though it should be safe to use TMP here, we
// should avoid using TMP outside the assembler. Try to find a free
// register to use here!
__ LoadField(TMP,
FieldAddress(instance_type_args_reg,
compiler::target::TypeArguments::type_at_offset(
type_param_value_offset_i)));
__ LoadField(
TMP,
FieldAddress(instance_type_args_reg,
TypeArguments::type_at_offset(type_param_value_offset_i)));
__ LoadField(class_id_reg, FieldAddress(TMP, Type::type_class_id_offset()));
class_id_reg,
FieldAddress(TMP, compiler::target::Type::type_class_id_offset()));
if (type_arg.IsTypeParameter()) {
const TypeParameter& type_param = TypeParameter::Cast(type_arg);
const Register kTypeArgumentsReg = type_param.IsClassTypeParameter()
? instantiator_type_args_reg
: function_type_args_reg;
__ LoadField(
own_type_arg_reg,
FieldAddress(kTypeArgumentsReg,
TypeArguments::type_at_offset(type_param.index())));
__ LoadField(own_type_arg_reg,
FieldAddress(kTypeArgumentsReg,
compiler::target::TypeArguments::type_at_offset(
type_param.index())));
__ CompareWithFieldValue(
class_id_reg,
FieldAddress(own_type_arg_reg, Type::type_class_id_offset()));
FieldAddress(own_type_arg_reg,
compiler::target::Type::type_class_id_offset()));
__ BranchIf(NOT_EQUAL, check_failed);
} else {
const Class& type_class = Class::Handle(type_arg.type_class());
@ -483,7 +487,7 @@ void RegisterTypeArgumentsUse(const Function& function,
const Class& instance_klass =
Class::Handle(Isolate::Current()->class_table()->At(cid));
if (load_field->slot().IsTypeArguments() && instance_klass.IsGeneric() &&
instance_klass.type_arguments_field_offset() ==
compiler::target::Class::TypeArgumentsFieldOffset(instance_klass) ==
load_field->slot().offset_in_bytes()) {
// This is a subset of Case c) above, namely forwarding the type
// argument vector.

View file

@ -23,8 +23,10 @@ void TypeTestingStubGenerator::BuildOptimizedTypeTestStub(
BuildOptimizedTypeTestStubFastCases(assembler, hi, type, type_class,
kInstanceReg, kClassIdReg);
__ ldr(CODE_REG, Address(THR, Thread::slow_type_test_stub_offset()));
__ Branch(FieldAddress(CODE_REG, Code::entry_point_offset()));
__ ldr(CODE_REG,
Address(THR, compiler::target::Thread::slow_type_test_stub_offset()));
__ Branch(
FieldAddress(CODE_REG, compiler::target::Code::entry_point_offset()));
}
void TypeTestingStubGenerator::