mirror of
https://github.com/dart-lang/sdk
synced 2024-11-02 10:49:00 +00:00
[vm] Make Address/FieldAddress independent of operand size on arm64
The operand size is dropped from Address/FieldAddress constructors on arm64. This makes code using Address/FieldAddress less fragile, avoids repeating size both in the Address/FieldAddress and load/store instructions, and better aligns arm64 Address/FieldAddress with other architectures. TEST=ci Change-Id: I92d7c5c8f0239333f022deebc0472136018bb0fa Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/260072 Reviewed-by: Ryan Macnak <rmacnak@google.com> Commit-Queue: Alexander Markov <alexmarkov@google.com>
This commit is contained in:
parent
d0d509d4fc
commit
f9355b1bf2
6 changed files with 177 additions and 232 deletions
|
@ -1274,10 +1274,8 @@ void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
|
||||||
|
|
||||||
__ Bind(&use_declaration_type);
|
__ Bind(&use_declaration_type);
|
||||||
__ LoadClassById(R2, R1);
|
__ LoadClassById(R2, R1);
|
||||||
__ ldr(
|
__ ldr(R3, FieldAddress(R2, target::Class::num_type_arguments_offset()),
|
||||||
R3,
|
kTwoBytes);
|
||||||
FieldAddress(R2, target::Class::num_type_arguments_offset(), kTwoBytes),
|
|
||||||
kTwoBytes);
|
|
||||||
__ cbnz(normal_ir_body, R3);
|
__ cbnz(normal_ir_body, R3);
|
||||||
|
|
||||||
__ LoadCompressed(R0,
|
__ LoadCompressed(R0,
|
||||||
|
@ -1465,9 +1463,9 @@ void AsmIntrinsifier::Type_equality(Assembler* assembler,
|
||||||
|
|
||||||
// Check nullability.
|
// Check nullability.
|
||||||
__ Bind(&equiv_cids);
|
__ Bind(&equiv_cids);
|
||||||
__ ldr(R1, FieldAddress(R1, target::Type::nullability_offset(), kByte),
|
__ ldr(R1, FieldAddress(R1, target::Type::nullability_offset()),
|
||||||
kUnsignedByte);
|
kUnsignedByte);
|
||||||
__ ldr(R2, FieldAddress(R2, target::Type::nullability_offset(), kByte),
|
__ ldr(R2, FieldAddress(R2, target::Type::nullability_offset()),
|
||||||
kUnsignedByte);
|
kUnsignedByte);
|
||||||
__ cmp(R1, Operand(R2));
|
__ cmp(R1, Operand(R2));
|
||||||
__ b(&check_legacy, NE);
|
__ b(&check_legacy, NE);
|
||||||
|
@ -1525,12 +1523,11 @@ void AsmIntrinsifier::Object_getHash(Assembler* assembler,
|
||||||
Label* normal_ir_body) {
|
Label* normal_ir_body) {
|
||||||
Label not_yet_computed;
|
Label not_yet_computed;
|
||||||
__ ldr(R0, Address(SP, 0 * target::kWordSize)); // Object.
|
__ ldr(R0, Address(SP, 0 * target::kWordSize)); // Object.
|
||||||
__ ldr(R0,
|
__ ldr(
|
||||||
FieldAddress(R0,
|
R0,
|
||||||
target::Object::tags_offset() +
|
FieldAddress(R0, target::Object::tags_offset() +
|
||||||
target::UntaggedObject::kHashTagPos / kBitsPerByte,
|
target::UntaggedObject::kHashTagPos / kBitsPerByte),
|
||||||
kFourBytes),
|
kUnsignedFourBytes);
|
||||||
kUnsignedFourBytes);
|
|
||||||
__ cbz(¬_yet_computed, R0);
|
__ cbz(¬_yet_computed, R0);
|
||||||
__ SmiTag(R0);
|
__ SmiTag(R0);
|
||||||
__ ret();
|
__ ret();
|
||||||
|
@ -1935,7 +1932,7 @@ void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
|
||||||
__ AddImmediate(R6, 1);
|
__ AddImmediate(R6, 1);
|
||||||
__ sub(R2, R2, Operand(1));
|
__ sub(R2, R2, Operand(1));
|
||||||
__ cmp(R2, Operand(0));
|
__ cmp(R2, Operand(0));
|
||||||
__ str(R1, FieldAddress(R7, target::OneByteString::data_offset(), kByte),
|
__ str(R1, FieldAddress(R7, target::OneByteString::data_offset()),
|
||||||
kUnsignedByte);
|
kUnsignedByte);
|
||||||
__ AddImmediate(R7, 1);
|
__ AddImmediate(R7, 1);
|
||||||
__ b(&loop, GT);
|
__ b(&loop, GT);
|
||||||
|
|
|
@ -860,7 +860,7 @@ Address Assembler::PrepareLargeOffset(Register base,
|
||||||
int32_t offset,
|
int32_t offset,
|
||||||
OperandSize sz) {
|
OperandSize sz) {
|
||||||
if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
|
if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
|
||||||
return Address(base, offset, Address::Offset, sz);
|
return Address(base, offset);
|
||||||
}
|
}
|
||||||
ASSERT(base != TMP2);
|
ASSERT(base != TMP2);
|
||||||
Operand op;
|
Operand op;
|
||||||
|
@ -870,7 +870,7 @@ Address Assembler::PrepareLargeOffset(Register base,
|
||||||
(Operand::CanHold(upper20, kXRegSizeInBits, &op) == Operand::Immediate) &&
|
(Operand::CanHold(upper20, kXRegSizeInBits, &op) == Operand::Immediate) &&
|
||||||
Address::CanHoldOffset(lower12, Address::Offset, sz)) {
|
Address::CanHoldOffset(lower12, Address::Offset, sz)) {
|
||||||
add(TMP2, base, op);
|
add(TMP2, base, op);
|
||||||
return Address(TMP2, lower12, Address::Offset, sz);
|
return Address(TMP2, lower12);
|
||||||
}
|
}
|
||||||
LoadImmediate(TMP2, offset);
|
LoadImmediate(TMP2, offset);
|
||||||
return Address(base, TMP2);
|
return Address(base, TMP2);
|
||||||
|
@ -1103,10 +1103,8 @@ void Assembler::StoreBarrier(Register object,
|
||||||
if (can_be_smi == kValueCanBeSmi) {
|
if (can_be_smi == kValueCanBeSmi) {
|
||||||
BranchIfSmi(value, &done);
|
BranchIfSmi(value, &done);
|
||||||
}
|
}
|
||||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset(), kByte),
|
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||||
kUnsignedByte);
|
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
|
||||||
ldr(TMP2, FieldAddress(value, target::Object::tags_offset(), kByte),
|
|
||||||
kUnsignedByte);
|
|
||||||
and_(TMP, TMP2,
|
and_(TMP, TMP2,
|
||||||
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||||
tst(TMP, Operand(HEAP_BITS, LSR, 32));
|
tst(TMP, Operand(HEAP_BITS, LSR, 32));
|
||||||
|
@ -1158,7 +1156,7 @@ void Assembler::StoreCompressedIntoArray(Register object,
|
||||||
Register slot,
|
Register slot,
|
||||||
Register value,
|
Register value,
|
||||||
CanBeSmi can_be_smi) {
|
CanBeSmi can_be_smi) {
|
||||||
str(value, Address(slot, 0, Address::Offset, kObjectBytes), kObjectBytes);
|
str(value, Address(slot, 0), kObjectBytes);
|
||||||
StoreIntoArrayBarrier(object, slot, value, can_be_smi);
|
StoreIntoArrayBarrier(object, slot, value, can_be_smi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1185,10 +1183,8 @@ void Assembler::StoreIntoArrayBarrier(Register object,
|
||||||
if (can_be_smi == kValueCanBeSmi) {
|
if (can_be_smi == kValueCanBeSmi) {
|
||||||
BranchIfSmi(value, &done);
|
BranchIfSmi(value, &done);
|
||||||
}
|
}
|
||||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset(), kByte),
|
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||||
kUnsignedByte);
|
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
|
||||||
ldr(TMP2, FieldAddress(value, target::Object::tags_offset(), kByte),
|
|
||||||
kUnsignedByte);
|
|
||||||
and_(TMP, TMP2,
|
and_(TMP, TMP2,
|
||||||
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||||
tst(TMP, Operand(HEAP_BITS, LSR, 32));
|
tst(TMP, Operand(HEAP_BITS, LSR, 32));
|
||||||
|
@ -1226,8 +1222,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||||
Label done;
|
Label done;
|
||||||
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
|
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
|
||||||
|
|
||||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset(), kByte),
|
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||||
kUnsignedByte);
|
|
||||||
tsti(TMP, Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
tsti(TMP, Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
||||||
b(&done, ZERO);
|
b(&done, ZERO);
|
||||||
|
|
||||||
|
@ -1253,8 +1248,7 @@ void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
|
||||||
Label done;
|
Label done;
|
||||||
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
|
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
|
||||||
|
|
||||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset(), kByte),
|
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||||
kUnsignedByte);
|
|
||||||
tsti(TMP, Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
tsti(TMP, Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
||||||
b(&done, ZERO);
|
b(&done, ZERO);
|
||||||
|
|
||||||
|
@ -1905,13 +1899,13 @@ void Assembler::MonomorphicCheckedEntryJIT() {
|
||||||
const intptr_t count_offset = target::Array::element_offset(1);
|
const intptr_t count_offset = target::Array::element_offset(1);
|
||||||
|
|
||||||
// Sadly this cannot use ldp because ldp requires aligned offsets.
|
// Sadly this cannot use ldp because ldp requires aligned offsets.
|
||||||
ldr(R1, FieldAddress(R5, cid_offset, kObjectBytes), kObjectBytes);
|
ldr(R1, FieldAddress(R5, cid_offset), kObjectBytes);
|
||||||
ldr(R2, FieldAddress(R5, count_offset, kObjectBytes), kObjectBytes);
|
ldr(R2, FieldAddress(R5, count_offset), kObjectBytes);
|
||||||
LoadClassIdMayBeSmi(IP0, R0);
|
LoadClassIdMayBeSmi(IP0, R0);
|
||||||
add(R2, R2, Operand(target::ToRawSmi(1)), kObjectBytes);
|
add(R2, R2, Operand(target::ToRawSmi(1)), kObjectBytes);
|
||||||
cmp(R1, Operand(IP0, LSL, 1), kObjectBytes);
|
cmp(R1, Operand(IP0, LSL, 1), kObjectBytes);
|
||||||
b(&miss, NE);
|
b(&miss, NE);
|
||||||
str(R2, FieldAddress(R5, count_offset, kObjectBytes), kObjectBytes);
|
str(R2, FieldAddress(R5, count_offset), kObjectBytes);
|
||||||
LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction
|
LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction
|
||||||
|
|
||||||
// Fall through to unchecked entry.
|
// Fall through to unchecked entry.
|
||||||
|
@ -2108,7 +2102,7 @@ Address Assembler::ElementAddressForIntIndex(bool is_external,
|
||||||
ASSERT(Utils::IsInt(32, offset));
|
ASSERT(Utils::IsInt(32, offset));
|
||||||
const OperandSize size = Address::OperandSizeFor(cid);
|
const OperandSize size = Address::OperandSizeFor(cid);
|
||||||
ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
|
ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
|
||||||
return Address(array, static_cast<int32_t>(offset), Address::Offset, size);
|
return Address(array, static_cast<int32_t>(offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::ComputeElementAddressForIntIndex(Register address,
|
void Assembler::ComputeElementAddressForIntIndex(Register address,
|
||||||
|
@ -2174,7 +2168,7 @@ Address Assembler::ElementAddressForRegIndexWithSize(bool is_external,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
|
ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
|
||||||
return Address(temp, offset, Address::Offset, size);
|
return Address(temp, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Assembler::ComputeElementAddressForRegIndex(Register address,
|
void Assembler::ComputeElementAddressForRegIndex(Register address,
|
||||||
|
|
|
@ -136,16 +136,14 @@ class Address : public ValueObject {
|
||||||
public:
|
public:
|
||||||
Address(const Address& other)
|
Address(const Address& other)
|
||||||
: ValueObject(),
|
: ValueObject(),
|
||||||
encoding_(other.encoding_),
|
|
||||||
type_(other.type_),
|
type_(other.type_),
|
||||||
base_(other.base_),
|
base_(other.base_),
|
||||||
log2sz_(other.log2sz_) {}
|
offset_(other.offset_) {}
|
||||||
|
|
||||||
Address& operator=(const Address& other) {
|
Address& operator=(const Address& other) {
|
||||||
encoding_ = other.encoding_;
|
|
||||||
type_ = other.type_;
|
type_ = other.type_;
|
||||||
base_ = other.base_;
|
base_ = other.base_;
|
||||||
log2sz_ = other.log2sz_;
|
offset_ = other.offset_;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,74 +165,21 @@ class Address : public ValueObject {
|
||||||
bool can_writeback_to(Register r) const {
|
bool can_writeback_to(Register r) const {
|
||||||
if (type() == PreIndex || type() == PostIndex || type() == PairPreIndex ||
|
if (type() == PreIndex || type() == PostIndex || type() == PairPreIndex ||
|
||||||
type() == PairPostIndex) {
|
type() == PairPostIndex) {
|
||||||
return base() != r;
|
return ConcreteRegister(base()) != ConcreteRegister(r);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Offset is in bytes. For the unsigned imm12 case, we unscale based on the
|
// Offset is in bytes.
|
||||||
// operand size, and assert that offset is aligned accordingly.
|
explicit Address(Register rn, int32_t offset = 0, AddressType at = Offset) {
|
||||||
// For the smaller signed imm9 case, the offset is the number of bytes, but
|
|
||||||
// is unscaled.
|
|
||||||
Address(Register rn,
|
|
||||||
int32_t offset = 0,
|
|
||||||
AddressType at = Offset,
|
|
||||||
OperandSize sz = kEightBytes) {
|
|
||||||
ASSERT((rn != kNoRegister) && (rn != R31) && (rn != ZR));
|
ASSERT((rn != kNoRegister) && (rn != R31) && (rn != ZR));
|
||||||
ASSERT(CanHoldOffset(offset, at, sz));
|
|
||||||
log2sz_ = -1;
|
|
||||||
const int32_t scale = Log2OperandSizeBytes(sz);
|
|
||||||
if ((at == Offset) && Utils::IsUint(12 + scale, offset) &&
|
|
||||||
(offset == ((offset >> scale) << scale))) {
|
|
||||||
encoding_ =
|
|
||||||
B24 | ((offset >> scale) << kImm12Shift) | Arm64Encode::Rn(rn);
|
|
||||||
if (offset != 0) {
|
|
||||||
log2sz_ = scale;
|
|
||||||
}
|
|
||||||
} else if ((at == Offset) && Utils::IsInt(9, offset)) {
|
|
||||||
encoding_ = ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(rn);
|
|
||||||
} else if ((at == PreIndex) || (at == PostIndex)) {
|
|
||||||
ASSERT(Utils::IsInt(9, offset));
|
|
||||||
int32_t idx = (at == PostIndex) ? B10 : (B11 | B10);
|
|
||||||
encoding_ = idx | ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(rn);
|
|
||||||
} else {
|
|
||||||
ASSERT((at == PairOffset) || (at == PairPreIndex) ||
|
|
||||||
(at == PairPostIndex));
|
|
||||||
ASSERT(Utils::IsInt(7 + scale, offset) &&
|
|
||||||
(static_cast<uint32_t>(offset) ==
|
|
||||||
((static_cast<uint32_t>(offset) >> scale) << scale)));
|
|
||||||
int32_t idx = 0;
|
|
||||||
switch (at) {
|
|
||||||
case PairPostIndex:
|
|
||||||
idx = B23;
|
|
||||||
break;
|
|
||||||
case PairPreIndex:
|
|
||||||
idx = B24 | B23;
|
|
||||||
break;
|
|
||||||
case PairOffset:
|
|
||||||
idx = B24;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
encoding_ =
|
|
||||||
idx |
|
|
||||||
((static_cast<uint32_t>(offset >> scale) << kImm7Shift) & kImm7Mask) |
|
|
||||||
Arm64Encode::Rn(rn);
|
|
||||||
if (offset != 0) {
|
|
||||||
log2sz_ = scale;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type_ = at;
|
type_ = at;
|
||||||
base_ = ConcreteRegister(rn);
|
base_ = rn;
|
||||||
|
offset_ = offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
// This addressing mode does not exist.
|
// This addressing mode does not exist.
|
||||||
Address(Register rn,
|
Address(Register rn, Register offset, AddressType at) = delete;
|
||||||
Register offset,
|
|
||||||
AddressType at,
|
|
||||||
OperandSize sz = kEightBytes);
|
|
||||||
|
|
||||||
static bool CanHoldOffset(int32_t offset,
|
static bool CanHoldOffset(int32_t offset,
|
||||||
AddressType at = Offset,
|
AddressType at = Offset,
|
||||||
|
@ -264,22 +209,20 @@ class Address : public ValueObject {
|
||||||
static Address PC(int32_t pc_off) {
|
static Address PC(int32_t pc_off) {
|
||||||
ASSERT(CanHoldOffset(pc_off, PCOffset));
|
ASSERT(CanHoldOffset(pc_off, PCOffset));
|
||||||
Address addr;
|
Address addr;
|
||||||
addr.encoding_ = (((pc_off >> 2) << kImm19Shift) & kImm19Mask);
|
|
||||||
addr.base_ = kNoRegister;
|
addr.base_ = kNoRegister;
|
||||||
addr.type_ = PCOffset;
|
addr.type_ = PCOffset;
|
||||||
addr.log2sz_ = -1;
|
addr.offset_ = pc_off;
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static Address Pair(Register rn,
|
static Address Pair(Register rn,
|
||||||
int32_t offset = 0,
|
int32_t offset = 0,
|
||||||
AddressType at = PairOffset,
|
AddressType at = PairOffset) {
|
||||||
OperandSize sz = kEightBytes) {
|
return Address(rn, offset, at);
|
||||||
return Address(rn, offset, at, sz);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This addressing mode does not exist.
|
// This addressing mode does not exist.
|
||||||
static Address PC(Register r);
|
static Address PC(Register r) = delete;
|
||||||
|
|
||||||
enum Scaling {
|
enum Scaling {
|
||||||
Unscaled,
|
Unscaled,
|
||||||
|
@ -298,12 +241,11 @@ class Address : public ValueObject {
|
||||||
// Can only scale when ext = UXTX.
|
// Can only scale when ext = UXTX.
|
||||||
ASSERT((scale != Scaled) || (ext == UXTX));
|
ASSERT((scale != Scaled) || (ext == UXTX));
|
||||||
ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
|
ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
|
||||||
const int32_t s = (scale == Scaled) ? B12 : 0;
|
|
||||||
encoding_ = B21 | B11 | s | Arm64Encode::Rn(rn) | Arm64Encode::Rm(rm) |
|
|
||||||
(static_cast<int32_t>(ext) << kExtendTypeShift);
|
|
||||||
type_ = Reg;
|
type_ = Reg;
|
||||||
base_ = ConcreteRegister(rn);
|
base_ = rn;
|
||||||
log2sz_ = -1; // Any.
|
// Use offset_ to store pre-encoded scale, extend and rm.
|
||||||
|
offset_ = ((scale == Scaled) ? B12 : 0) | Arm64Encode::Rm(rm) |
|
||||||
|
(static_cast<int32_t>(ext) << kExtendTypeShift);
|
||||||
}
|
}
|
||||||
|
|
||||||
static OperandSize OperandSizeFor(intptr_t cid) {
|
static OperandSize OperandSizeFor(intptr_t cid) {
|
||||||
|
@ -354,16 +296,71 @@ class Address : public ValueObject {
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
uint32_t encoding() const { return encoding_; }
|
uint32_t encoding(OperandSize sz) const {
|
||||||
|
const int32_t offset = offset_;
|
||||||
|
const int32_t scale = Log2OperandSizeBytes(sz);
|
||||||
|
ASSERT((type_ == Reg) || CanHoldOffset(offset, type_, sz));
|
||||||
|
switch (type_) {
|
||||||
|
case Offset:
|
||||||
|
if (Utils::IsUint(12 + scale, offset) &&
|
||||||
|
(offset == ((offset >> scale) << scale))) {
|
||||||
|
return B24 | ((offset >> scale) << kImm12Shift) |
|
||||||
|
Arm64Encode::Rn(base_);
|
||||||
|
} else if (Utils::IsInt(9, offset)) {
|
||||||
|
return ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(base_);
|
||||||
|
} else {
|
||||||
|
FATAL("Offset %d is out of range\n", offset);
|
||||||
|
}
|
||||||
|
case PreIndex:
|
||||||
|
case PostIndex: {
|
||||||
|
ASSERT(Utils::IsInt(9, offset));
|
||||||
|
int32_t idx = (type_ == PostIndex) ? B10 : (B11 | B10);
|
||||||
|
return idx | ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(base_);
|
||||||
|
}
|
||||||
|
case PairOffset:
|
||||||
|
case PairPreIndex:
|
||||||
|
case PairPostIndex: {
|
||||||
|
ASSERT(Utils::IsInt(7 + scale, offset) &&
|
||||||
|
(static_cast<uint32_t>(offset) ==
|
||||||
|
((static_cast<uint32_t>(offset) >> scale) << scale)));
|
||||||
|
int32_t idx = 0;
|
||||||
|
switch (type_) {
|
||||||
|
case PairPostIndex:
|
||||||
|
idx = B23;
|
||||||
|
break;
|
||||||
|
case PairPreIndex:
|
||||||
|
idx = B24 | B23;
|
||||||
|
break;
|
||||||
|
case PairOffset:
|
||||||
|
idx = B24;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return idx |
|
||||||
|
((static_cast<uint32_t>(offset >> scale) << kImm7Shift) &
|
||||||
|
kImm7Mask) |
|
||||||
|
Arm64Encode::Rn(base_);
|
||||||
|
}
|
||||||
|
case PCOffset:
|
||||||
|
return (((offset >> 2) << kImm19Shift) & kImm19Mask);
|
||||||
|
case Reg:
|
||||||
|
// Offset contains pre-encoded scale, extend and rm.
|
||||||
|
return B21 | B11 | Arm64Encode::Rn(base_) | offset;
|
||||||
|
case Unknown:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
AddressType type() const { return type_; }
|
AddressType type() const { return type_; }
|
||||||
Register base() const { return base_; }
|
Register base() const { return base_; }
|
||||||
|
|
||||||
Address() : encoding_(0), type_(Unknown), base_(kNoRegister) {}
|
Address() : type_(Unknown), base_(kNoRegister), offset_(0) {}
|
||||||
|
|
||||||
uint32_t encoding_;
|
|
||||||
AddressType type_;
|
AddressType type_;
|
||||||
Register base_;
|
Register base_;
|
||||||
int32_t log2sz_; // Required log2 of operand size (-1 means any).
|
int32_t offset_;
|
||||||
|
|
||||||
friend class Assembler;
|
friend class Assembler;
|
||||||
};
|
};
|
||||||
|
@ -376,11 +373,11 @@ class FieldAddress : public Address {
|
||||||
return Address::CanHoldOffset(offset - kHeapObjectTag, at, sz);
|
return Address::CanHoldOffset(offset - kHeapObjectTag, at, sz);
|
||||||
}
|
}
|
||||||
|
|
||||||
FieldAddress(Register base, int32_t disp, OperandSize sz = kEightBytes)
|
FieldAddress(Register base, int32_t disp)
|
||||||
: Address(base, disp - kHeapObjectTag, Offset, sz) {}
|
: Address(base, disp - kHeapObjectTag) {}
|
||||||
|
|
||||||
// This addressing mode does not exist.
|
// This addressing mode does not exist.
|
||||||
FieldAddress(Register base, Register disp, OperandSize sz = kEightBytes);
|
FieldAddress(Register base, Register disp) = delete;
|
||||||
|
|
||||||
FieldAddress(const FieldAddress& other) : Address(other) {}
|
FieldAddress(const FieldAddress& other) : Address(other) {}
|
||||||
|
|
||||||
|
@ -653,15 +650,14 @@ class Assembler : public AssemblerBase {
|
||||||
int8_t value) override {
|
int8_t value) override {
|
||||||
EnsureHasClassIdInDEBUG(kFunctionTypeCid, type, TMP);
|
EnsureHasClassIdInDEBUG(kFunctionTypeCid, type, TMP);
|
||||||
ldr(TMP,
|
ldr(TMP,
|
||||||
FieldAddress(type, compiler::target::FunctionType::nullability_offset(),
|
FieldAddress(type,
|
||||||
kByte),
|
compiler::target::FunctionType::nullability_offset()),
|
||||||
kUnsignedByte);
|
kUnsignedByte);
|
||||||
cmp(TMP, Operand(value));
|
cmp(TMP, Operand(value));
|
||||||
}
|
}
|
||||||
void CompareTypeNullabilityWith(Register type, int8_t value) override {
|
void CompareTypeNullabilityWith(Register type, int8_t value) override {
|
||||||
EnsureHasClassIdInDEBUG(kTypeCid, type, TMP);
|
EnsureHasClassIdInDEBUG(kTypeCid, type, TMP);
|
||||||
ldr(TMP,
|
ldr(TMP, FieldAddress(type, compiler::target::Type::nullability_offset()),
|
||||||
FieldAddress(type, compiler::target::Type::nullability_offset(), kByte),
|
|
||||||
kUnsignedByte);
|
kUnsignedByte);
|
||||||
cmp(TMP, Operand(value));
|
cmp(TMP, Operand(value));
|
||||||
}
|
}
|
||||||
|
@ -1417,11 +1413,11 @@ class Assembler : public AssemblerBase {
|
||||||
void fcvtds(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FCVTDS, vd, vn); }
|
void fcvtds(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FCVTDS, vd, vn); }
|
||||||
void fldrq(VRegister vt, Address a) {
|
void fldrq(VRegister vt, Address a) {
|
||||||
ASSERT(a.type() != Address::PCOffset);
|
ASSERT(a.type() != Address::PCOffset);
|
||||||
EmitLoadStoreReg(FLDRQ, static_cast<Register>(vt), a, kByte);
|
EmitLoadStoreReg(FLDRQ, static_cast<Register>(vt), a, kQWord);
|
||||||
}
|
}
|
||||||
void fstrq(VRegister vt, Address a) {
|
void fstrq(VRegister vt, Address a) {
|
||||||
ASSERT(a.type() != Address::PCOffset);
|
ASSERT(a.type() != Address::PCOffset);
|
||||||
EmitLoadStoreReg(FSTRQ, static_cast<Register>(vt), a, kByte);
|
EmitLoadStoreReg(FSTRQ, static_cast<Register>(vt), a, kQWord);
|
||||||
}
|
}
|
||||||
void fldrd(VRegister vt, Address a) {
|
void fldrd(VRegister vt, Address a) {
|
||||||
ASSERT(a.type() != Address::PCOffset);
|
ASSERT(a.type() != Address::PCOffset);
|
||||||
|
@ -1650,20 +1646,18 @@ class Assembler : public AssemblerBase {
|
||||||
fldrq(reg, Address(SP, 1 * kQuadSize, Address::PostIndex));
|
fldrq(reg, Address(SP, 1 * kQuadSize, Address::PostIndex));
|
||||||
}
|
}
|
||||||
void PushDoublePair(VRegister low, VRegister high) {
|
void PushDoublePair(VRegister low, VRegister high) {
|
||||||
fstp(low, high,
|
fstp(low, high, Address(SP, -2 * kDoubleSize, Address::PairPreIndex),
|
||||||
Address(SP, -2 * kDoubleSize, Address::PairPreIndex, kDWord), kDWord);
|
kDWord);
|
||||||
}
|
}
|
||||||
void PopDoublePair(VRegister low, VRegister high) {
|
void PopDoublePair(VRegister low, VRegister high) {
|
||||||
fldp(low, high,
|
fldp(low, high, Address(SP, 2 * kDoubleSize, Address::PairPostIndex),
|
||||||
Address(SP, 2 * kDoubleSize, Address::PairPostIndex, kDWord), kDWord);
|
kDWord);
|
||||||
}
|
}
|
||||||
void PushQuadPair(VRegister low, VRegister high) {
|
void PushQuadPair(VRegister low, VRegister high) {
|
||||||
fstp(low, high, Address(SP, -2 * kQuadSize, Address::PairPreIndex, kQWord),
|
fstp(low, high, Address(SP, -2 * kQuadSize, Address::PairPreIndex), kQWord);
|
||||||
kQWord);
|
|
||||||
}
|
}
|
||||||
void PopQuadPair(VRegister low, VRegister high) {
|
void PopQuadPair(VRegister low, VRegister high) {
|
||||||
fldp(low, high, Address(SP, 2 * kQuadSize, Address::PairPostIndex, kQWord),
|
fldp(low, high, Address(SP, 2 * kQuadSize, Address::PairPostIndex), kQWord);
|
||||||
kQWord);
|
|
||||||
}
|
}
|
||||||
void TagAndPushPP() {
|
void TagAndPushPP() {
|
||||||
// Add the heap object tag back to PP before putting it on the stack.
|
// Add the heap object tag back to PP before putting it on the stack.
|
||||||
|
@ -2762,9 +2756,8 @@ class Assembler : public AssemblerBase {
|
||||||
ASSERT((op != LDR && op != STR && op != LDRS) || a.can_writeback_to(rt));
|
ASSERT((op != LDR && op != STR && op != LDRS) || a.can_writeback_to(rt));
|
||||||
|
|
||||||
const int32_t size = Log2OperandSizeBytes(sz);
|
const int32_t size = Log2OperandSizeBytes(sz);
|
||||||
ASSERT(a.log2sz_ == -1 || a.log2sz_ == size);
|
|
||||||
const int32_t encoding =
|
const int32_t encoding =
|
||||||
op | ((size & 0x3) << kSzShift) | Arm64Encode::Rt(rt) | a.encoding();
|
op | ((size & 0x3) << kSzShift) | Arm64Encode::Rt(rt) | a.encoding(sz);
|
||||||
Emit(encoding);
|
Emit(encoding);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2774,10 +2767,9 @@ class Assembler : public AssemblerBase {
|
||||||
OperandSize sz) {
|
OperandSize sz) {
|
||||||
ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
|
ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
|
||||||
(sz == kUnsignedFourBytes));
|
(sz == kUnsignedFourBytes));
|
||||||
ASSERT(a.log2sz_ == -1 || a.log2sz_ == Log2OperandSizeBytes(sz));
|
|
||||||
ASSERT((rt != CSP) && (rt != R31));
|
ASSERT((rt != CSP) && (rt != R31));
|
||||||
const int32_t size = (sz == kEightBytes) ? B30 : 0;
|
const int32_t size = (sz == kEightBytes) ? B30 : 0;
|
||||||
const int32_t encoding = op | size | Arm64Encode::Rt(rt) | a.encoding();
|
const int32_t encoding = op | size | Arm64Encode::Rt(rt) | a.encoding(sz);
|
||||||
Emit(encoding);
|
Emit(encoding);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2792,7 +2784,6 @@ class Assembler : public AssemblerBase {
|
||||||
|
|
||||||
ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
|
ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
|
||||||
(sz == kUnsignedFourBytes));
|
(sz == kUnsignedFourBytes));
|
||||||
ASSERT(a.log2sz_ == -1 || a.log2sz_ == Log2OperandSizeBytes(sz));
|
|
||||||
ASSERT((rt != CSP) && (rt != R31));
|
ASSERT((rt != CSP) && (rt != R31));
|
||||||
ASSERT((rt2 != CSP) && (rt2 != R31));
|
ASSERT((rt2 != CSP) && (rt2 != R31));
|
||||||
int32_t opc = 0;
|
int32_t opc = 0;
|
||||||
|
@ -2811,7 +2802,7 @@ class Assembler : public AssemblerBase {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
const int32_t encoding =
|
const int32_t encoding =
|
||||||
opc | op | Arm64Encode::Rt(rt) | Arm64Encode::Rt2(rt2) | a.encoding();
|
opc | op | Arm64Encode::Rt(rt) | Arm64Encode::Rt2(rt2) | a.encoding(sz);
|
||||||
Emit(encoding);
|
Emit(encoding);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2822,7 +2813,6 @@ class Assembler : public AssemblerBase {
|
||||||
OperandSize sz) {
|
OperandSize sz) {
|
||||||
ASSERT(op != FLDP || rt != rt2);
|
ASSERT(op != FLDP || rt != rt2);
|
||||||
ASSERT((sz == kSWord) || (sz == kDWord) || (sz == kQWord));
|
ASSERT((sz == kSWord) || (sz == kDWord) || (sz == kQWord));
|
||||||
ASSERT(a.log2sz_ == -1 || a.log2sz_ == Log2OperandSizeBytes(sz));
|
|
||||||
int32_t opc = 0;
|
int32_t opc = 0;
|
||||||
switch (sz) {
|
switch (sz) {
|
||||||
case kSWord:
|
case kSWord:
|
||||||
|
@ -2840,7 +2830,7 @@ class Assembler : public AssemblerBase {
|
||||||
}
|
}
|
||||||
const int32_t encoding =
|
const int32_t encoding =
|
||||||
opc | op | Arm64Encode::Rt(static_cast<Register>(rt)) |
|
opc | op | Arm64Encode::Rt(static_cast<Register>(rt)) |
|
||||||
Arm64Encode::Rt2(static_cast<Register>(rt2)) | a.encoding();
|
Arm64Encode::Rt2(static_cast<Register>(rt2)) | a.encoding(sz);
|
||||||
Emit(encoding);
|
Emit(encoding);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -666,9 +666,9 @@ ASSEMBLER_TEST_GENERATE(LoadSigned32Bit, assembler) {
|
||||||
Operand(2 * target::kWordSize)); // Must not access beyond CSP.
|
Operand(2 * target::kWordSize)); // Must not access beyond CSP.
|
||||||
|
|
||||||
__ LoadImmediate(R1, 0xffffffff);
|
__ LoadImmediate(R1, 0xffffffff);
|
||||||
__ str(R1, Address(SP, -4, Address::PreIndex, kFourBytes), kFourBytes);
|
__ str(R1, Address(SP, -4, Address::PreIndex), kFourBytes);
|
||||||
__ ldr(R0, Address(SP), kFourBytes);
|
__ ldr(R0, Address(SP), kFourBytes);
|
||||||
__ ldr(R1, Address(SP, 4, Address::PostIndex, kFourBytes), kFourBytes);
|
__ ldr(R1, Address(SP, 4, Address::PostIndex), kFourBytes);
|
||||||
__ RestoreCSP();
|
__ RestoreCSP();
|
||||||
__ ret();
|
__ ret();
|
||||||
}
|
}
|
||||||
|
@ -757,13 +757,9 @@ ASSEMBLER_TEST_GENERATE(LoadStorePairUnsigned32, assembler) {
|
||||||
__ LoadImmediate(R3, 0xBBCCDDEEFF998877);
|
__ LoadImmediate(R3, 0xBBCCDDEEFF998877);
|
||||||
__ sub(SP, SP, Operand(4 * target::kWordSize));
|
__ sub(SP, SP, Operand(4 * target::kWordSize));
|
||||||
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
|
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
|
||||||
__ stp(R2, R3,
|
__ stp(R2, R3, Address(SP, 2 * sizeof(uint32_t), Address::PairOffset),
|
||||||
Address(SP, 2 * sizeof(uint32_t), Address::PairOffset,
|
|
||||||
compiler::kUnsignedFourBytes),
|
|
||||||
kUnsignedFourBytes);
|
kUnsignedFourBytes);
|
||||||
__ ldp(R0, R1,
|
__ ldp(R0, R1, Address(SP, 2 * sizeof(uint32_t), Address::PairOffset),
|
||||||
Address(SP, 2 * sizeof(uint32_t), Address::PairOffset,
|
|
||||||
kUnsignedFourBytes),
|
|
||||||
kUnsignedFourBytes);
|
kUnsignedFourBytes);
|
||||||
__ add(SP, SP, Operand(4 * target::kWordSize));
|
__ add(SP, SP, Operand(4 * target::kWordSize));
|
||||||
__ sub(R0, R0, Operand(R1));
|
__ sub(R0, R0, Operand(R1));
|
||||||
|
@ -801,11 +797,9 @@ ASSEMBLER_TEST_GENERATE(LoadStorePairSigned32, assembler) {
|
||||||
__ LoadImmediate(R3, 0xBBCCDDEEFF998877);
|
__ LoadImmediate(R3, 0xBBCCDDEEFF998877);
|
||||||
__ sub(SP, SP, Operand(4 * target::kWordSize));
|
__ sub(SP, SP, Operand(4 * target::kWordSize));
|
||||||
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
|
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
|
||||||
__ stp(R2, R3,
|
__ stp(R2, R3, Address(SP, 2 * sizeof(int32_t), Address::PairOffset),
|
||||||
Address(SP, 2 * sizeof(int32_t), Address::PairOffset, kFourBytes),
|
|
||||||
kFourBytes);
|
kFourBytes);
|
||||||
__ ldp(R0, R1,
|
__ ldp(R0, R1, Address(SP, 2 * sizeof(int32_t), Address::PairOffset),
|
||||||
Address(SP, 2 * sizeof(int32_t), Address::PairOffset, kFourBytes),
|
|
||||||
kFourBytes);
|
kFourBytes);
|
||||||
__ add(SP, SP, Operand(4 * target::kWordSize));
|
__ add(SP, SP, Operand(4 * target::kWordSize));
|
||||||
__ sub(R0, R0, Operand(R1));
|
__ sub(R0, R0, Operand(R1));
|
||||||
|
@ -3645,7 +3639,7 @@ ASSEMBLER_TEST_RUN(LoadImmediateMedNeg4, test) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSEMBLER_TEST_GENERATE(LoadHalfWordUnaligned, assembler) {
|
ASSEMBLER_TEST_GENERATE(LoadHalfWordUnaligned, assembler) {
|
||||||
__ ldr(R1, R0, kTwoBytes);
|
__ ldr(R1, Address(R0), kTwoBytes);
|
||||||
__ mov(R0, R1);
|
__ mov(R0, R1);
|
||||||
__ ret();
|
__ ret();
|
||||||
}
|
}
|
||||||
|
@ -3672,7 +3666,7 @@ ASSEMBLER_TEST_RUN(LoadHalfWordUnaligned, test) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSEMBLER_TEST_GENERATE(LoadHalfWordUnsignedUnaligned, assembler) {
|
ASSEMBLER_TEST_GENERATE(LoadHalfWordUnsignedUnaligned, assembler) {
|
||||||
__ ldr(R1, R0, kUnsignedTwoBytes);
|
__ ldr(R1, Address(R0), kUnsignedTwoBytes);
|
||||||
__ mov(R0, R1);
|
__ mov(R0, R1);
|
||||||
__ ret();
|
__ ret();
|
||||||
}
|
}
|
||||||
|
@ -3698,7 +3692,7 @@ ASSEMBLER_TEST_RUN(LoadHalfWordUnsignedUnaligned, test) {
|
||||||
|
|
||||||
ASSEMBLER_TEST_GENERATE(StoreHalfWordUnaligned, assembler) {
|
ASSEMBLER_TEST_GENERATE(StoreHalfWordUnaligned, assembler) {
|
||||||
__ LoadImmediate(R1, 0xABCD);
|
__ LoadImmediate(R1, 0xABCD);
|
||||||
__ str(R1, R0, kTwoBytes);
|
__ str(R1, Address(R0), kTwoBytes);
|
||||||
__ mov(R0, R1);
|
__ mov(R0, R1);
|
||||||
__ ret();
|
__ ret();
|
||||||
}
|
}
|
||||||
|
@ -3731,7 +3725,7 @@ ASSEMBLER_TEST_RUN(StoreHalfWordUnaligned, test) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSEMBLER_TEST_GENERATE(LoadWordUnaligned, assembler) {
|
ASSEMBLER_TEST_GENERATE(LoadWordUnaligned, assembler) {
|
||||||
__ ldr(R1, R0, kUnsignedFourBytes);
|
__ ldr(R1, Address(R0), kUnsignedFourBytes);
|
||||||
__ mov(R0, R1);
|
__ mov(R0, R1);
|
||||||
__ ret();
|
__ ret();
|
||||||
}
|
}
|
||||||
|
@ -3765,7 +3759,7 @@ ASSEMBLER_TEST_RUN(LoadWordUnaligned, test) {
|
||||||
|
|
||||||
ASSEMBLER_TEST_GENERATE(StoreWordUnaligned, assembler) {
|
ASSEMBLER_TEST_GENERATE(StoreWordUnaligned, assembler) {
|
||||||
__ LoadImmediate(R1, 0x12345678);
|
__ LoadImmediate(R1, 0x12345678);
|
||||||
__ str(R1, R0, kUnsignedFourBytes);
|
__ str(R1, Address(R0), kUnsignedFourBytes);
|
||||||
__ mov(R0, R1);
|
__ mov(R0, R1);
|
||||||
__ ret();
|
__ ret();
|
||||||
}
|
}
|
||||||
|
@ -5192,7 +5186,7 @@ ASSEMBLER_TEST_GENERATE(FldrdFstrdLargeOffset, assembler) {
|
||||||
__ LoadDImmediate(V1, 42.0);
|
__ LoadDImmediate(V1, 42.0);
|
||||||
__ sub(SP, SP, Operand(512 * target::kWordSize));
|
__ sub(SP, SP, Operand(512 * target::kWordSize));
|
||||||
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
|
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
|
||||||
__ fstrd(V1, Address(SP, 512 * target::kWordSize, Address::Offset));
|
__ fstrd(V1, Address(SP, 512 * target::kWordSize));
|
||||||
__ add(SP, SP, Operand(512 * target::kWordSize));
|
__ add(SP, SP, Operand(512 * target::kWordSize));
|
||||||
__ fldrd(V0, Address(SP));
|
__ fldrd(V0, Address(SP));
|
||||||
__ RestoreCSP();
|
__ RestoreCSP();
|
||||||
|
|
|
@ -1652,9 +1652,7 @@ void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
||||||
const Register result = locs()->out(0).reg();
|
const Register result = locs()->out(0).reg();
|
||||||
__ LoadCompressedSmi(result,
|
__ LoadCompressedSmi(result,
|
||||||
compiler::FieldAddress(str, String::length_offset()));
|
compiler::FieldAddress(str, String::length_offset()));
|
||||||
__ ldr(TMP,
|
__ ldr(TMP, compiler::FieldAddress(str, OneByteString::data_offset()),
|
||||||
compiler::FieldAddress(str, OneByteString::data_offset(),
|
|
||||||
compiler::kByte),
|
|
||||||
compiler::kUnsignedByte);
|
compiler::kUnsignedByte);
|
||||||
__ CompareImmediate(result, Smi::RawValue(1));
|
__ CompareImmediate(result, Smi::RawValue(1));
|
||||||
__ LoadImmediate(result, -1);
|
__ LoadImmediate(result, -1);
|
||||||
|
@ -2275,10 +2273,10 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
||||||
if (emit_full_guard) {
|
if (emit_full_guard) {
|
||||||
__ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
|
__ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
|
||||||
|
|
||||||
compiler::FieldAddress field_cid_operand(
|
compiler::FieldAddress field_cid_operand(field_reg,
|
||||||
field_reg, Field::guarded_cid_offset(), compiler::kUnsignedTwoBytes);
|
Field::guarded_cid_offset());
|
||||||
compiler::FieldAddress field_nullability_operand(
|
compiler::FieldAddress field_nullability_operand(
|
||||||
field_reg, Field::is_nullable_offset(), compiler::kUnsignedTwoBytes);
|
field_reg, Field::is_nullable_offset());
|
||||||
|
|
||||||
if (value_cid == kDynamicCid) {
|
if (value_cid == kDynamicCid) {
|
||||||
LoadValueCid(compiler, value_cid_reg, value_reg);
|
LoadValueCid(compiler, value_cid_reg, value_reg);
|
||||||
|
@ -2541,15 +2539,14 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
|
||||||
__ StoreCompressedIntoObjectNoBarrier(
|
__ StoreCompressedIntoObjectNoBarrier(
|
||||||
AllocateArrayABI::kResultReg,
|
AllocateArrayABI::kResultReg,
|
||||||
compiler::FieldAddress(AllocateArrayABI::kResultReg,
|
compiler::FieldAddress(AllocateArrayABI::kResultReg,
|
||||||
Array::type_arguments_offset(),
|
Array::type_arguments_offset()),
|
||||||
compiler::kObjectBytes),
|
|
||||||
AllocateArrayABI::kTypeArgumentsReg);
|
AllocateArrayABI::kTypeArgumentsReg);
|
||||||
|
|
||||||
// Set the length field.
|
// Set the length field.
|
||||||
__ StoreCompressedIntoObjectNoBarrier(
|
__ StoreCompressedIntoObjectNoBarrier(
|
||||||
AllocateArrayABI::kResultReg,
|
AllocateArrayABI::kResultReg,
|
||||||
compiler::FieldAddress(AllocateArrayABI::kResultReg,
|
compiler::FieldAddress(AllocateArrayABI::kResultReg,
|
||||||
Array::length_offset(), compiler::kObjectBytes),
|
Array::length_offset()),
|
||||||
AllocateArrayABI::kLengthReg);
|
AllocateArrayABI::kLengthReg);
|
||||||
|
|
||||||
// TODO(zra): Use stp once added.
|
// TODO(zra): Use stp once added.
|
||||||
|
@ -2566,9 +2563,7 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
|
||||||
intptr_t current_offset = 0;
|
intptr_t current_offset = 0;
|
||||||
while (current_offset < array_size) {
|
while (current_offset < array_size) {
|
||||||
__ StoreCompressedIntoObjectNoBarrier(
|
__ StoreCompressedIntoObjectNoBarrier(
|
||||||
AllocateArrayABI::kResultReg,
|
AllocateArrayABI::kResultReg, compiler::Address(R8, current_offset),
|
||||||
compiler::Address(R8, current_offset, compiler::Address::Offset,
|
|
||||||
compiler::kObjectBytes),
|
|
||||||
NULL_REG);
|
NULL_REG);
|
||||||
current_offset += kCompressedWordSize;
|
current_offset += kCompressedWordSize;
|
||||||
}
|
}
|
||||||
|
@ -2577,11 +2572,8 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
|
||||||
__ Bind(&init_loop);
|
__ Bind(&init_loop);
|
||||||
__ CompareRegisters(R8, R3);
|
__ CompareRegisters(R8, R3);
|
||||||
__ b(&end_loop, CS);
|
__ b(&end_loop, CS);
|
||||||
__ StoreCompressedIntoObjectNoBarrier(
|
__ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
|
||||||
AllocateArrayABI::kResultReg,
|
compiler::Address(R8, 0), NULL_REG);
|
||||||
compiler::Address(R8, 0, compiler::Address::Offset,
|
|
||||||
compiler::kObjectBytes),
|
|
||||||
NULL_REG);
|
|
||||||
__ AddImmediate(R8, kCompressedWordSize);
|
__ AddImmediate(R8, kCompressedWordSize);
|
||||||
__ b(&init_loop);
|
__ b(&init_loop);
|
||||||
__ Bind(&end_loop);
|
__ Bind(&end_loop);
|
||||||
|
@ -5168,8 +5160,7 @@ void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
||||||
compiler->AddSlowPathCode(slow_path);
|
compiler->AddSlowPathCode(slow_path);
|
||||||
__ ldr(TMP,
|
__ ldr(TMP,
|
||||||
compiler::FieldAddress(locs()->in(0).reg(),
|
compiler::FieldAddress(locs()->in(0).reg(),
|
||||||
compiler::target::Object::tags_offset(),
|
compiler::target::Object::tags_offset()),
|
||||||
compiler::kUnsignedByte),
|
|
||||||
compiler::kUnsignedByte);
|
compiler::kUnsignedByte);
|
||||||
// In the first byte.
|
// In the first byte.
|
||||||
ASSERT(compiler::target::UntaggedObject::kImmutableBit < 8);
|
ASSERT(compiler::target::UntaggedObject::kImmutableBit < 8);
|
||||||
|
|
|
@ -1807,10 +1807,8 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
|
||||||
Label slow_case;
|
Label slow_case;
|
||||||
|
|
||||||
// Load num. variable (int32) in the existing context.
|
// Load num. variable (int32) in the existing context.
|
||||||
__ ldr(
|
__ ldr(R1, FieldAddress(R5, target::Context::num_variables_offset()),
|
||||||
R1,
|
kFourBytes);
|
||||||
FieldAddress(R5, target::Context::num_variables_offset(), kFourBytes),
|
|
||||||
kFourBytes);
|
|
||||||
|
|
||||||
GenerateAllocateContextSpaceStub(assembler, &slow_case);
|
GenerateAllocateContextSpaceStub(assembler, &slow_case);
|
||||||
|
|
||||||
|
@ -3055,31 +3053,26 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
||||||
// Closure handling.
|
// Closure handling.
|
||||||
{
|
{
|
||||||
__ Comment("Closure");
|
__ Comment("Closure");
|
||||||
__ LoadCompressed(
|
__ LoadCompressed(STCInternalRegs::kInstanceCidOrSignatureReg,
|
||||||
STCInternalRegs::kInstanceCidOrSignatureReg,
|
FieldAddress(TypeTestABI::kInstanceReg,
|
||||||
FieldAddress(TypeTestABI::kInstanceReg,
|
target::Closure::function_offset()));
|
||||||
target::Closure::function_offset(), kObjectBytes));
|
__ LoadCompressed(STCInternalRegs::kInstanceCidOrSignatureReg,
|
||||||
__ LoadCompressed(
|
FieldAddress(STCInternalRegs::kInstanceCidOrSignatureReg,
|
||||||
STCInternalRegs::kInstanceCidOrSignatureReg,
|
target::Function::signature_offset()));
|
||||||
FieldAddress(STCInternalRegs::kInstanceCidOrSignatureReg,
|
|
||||||
target::Function::signature_offset(), kObjectBytes));
|
|
||||||
if (n >= 3) {
|
if (n >= 3) {
|
||||||
__ LoadCompressed(
|
__ LoadCompressed(
|
||||||
STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg,
|
STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg,
|
||||||
FieldAddress(TypeTestABI::kInstanceReg,
|
FieldAddress(TypeTestABI::kInstanceReg,
|
||||||
target::Closure::instantiator_type_arguments_offset(),
|
target::Closure::instantiator_type_arguments_offset()));
|
||||||
kObjectBytes));
|
|
||||||
if (n >= 7) {
|
if (n >= 7) {
|
||||||
__ LoadCompressed(
|
__ LoadCompressed(
|
||||||
STCInternalRegs::kInstanceParentFunctionTypeArgumentsReg,
|
STCInternalRegs::kInstanceParentFunctionTypeArgumentsReg,
|
||||||
FieldAddress(TypeTestABI::kInstanceReg,
|
FieldAddress(TypeTestABI::kInstanceReg,
|
||||||
target::Closure::function_type_arguments_offset(),
|
target::Closure::function_type_arguments_offset()));
|
||||||
kObjectBytes));
|
|
||||||
__ LoadCompressed(
|
__ LoadCompressed(
|
||||||
STCInternalRegs::kInstanceDelayedFunctionTypeArgumentsReg,
|
STCInternalRegs::kInstanceDelayedFunctionTypeArgumentsReg,
|
||||||
FieldAddress(TypeTestABI::kInstanceReg,
|
FieldAddress(TypeTestABI::kInstanceReg,
|
||||||
target::Closure::delayed_type_arguments_offset(),
|
target::Closure::delayed_type_arguments_offset()));
|
||||||
kObjectBytes));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
__ b(&loop);
|
__ b(&loop);
|
||||||
|
@ -3103,7 +3096,7 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
||||||
__ add(kScratchReg, TypeTestABI::kInstanceReg,
|
__ add(kScratchReg, TypeTestABI::kInstanceReg,
|
||||||
Operand(kScratchReg, LSL, kCompressedWordSizeLog2));
|
Operand(kScratchReg, LSL, kCompressedWordSizeLog2));
|
||||||
__ LoadCompressed(STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg,
|
__ LoadCompressed(STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg,
|
||||||
FieldAddress(kScratchReg, 0, kObjectBytes));
|
FieldAddress(kScratchReg, 0));
|
||||||
__ Bind(&has_no_type_arguments);
|
__ Bind(&has_no_type_arguments);
|
||||||
__ Comment("No type arguments");
|
__ Comment("No type arguments");
|
||||||
|
|
||||||
|
@ -3126,8 +3119,7 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
||||||
kScratchReg,
|
kScratchReg,
|
||||||
Address(kCacheArrayReg,
|
Address(kCacheArrayReg,
|
||||||
target::kCompressedWordSize *
|
target::kCompressedWordSize *
|
||||||
target::SubtypeTestCache::kInstanceCidOrSignature,
|
target::SubtypeTestCache::kInstanceCidOrSignature));
|
||||||
Address::Offset, kObjectBytes));
|
|
||||||
__ CompareObjectRegisters(kScratchReg, kNullReg);
|
__ CompareObjectRegisters(kScratchReg, kNullReg);
|
||||||
__ b(&done, EQ);
|
__ b(&done, EQ);
|
||||||
__ CompareObjectRegisters(kScratchReg,
|
__ CompareObjectRegisters(kScratchReg,
|
||||||
|
@ -3139,16 +3131,14 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
||||||
__ LoadCompressed(kScratchReg,
|
__ LoadCompressed(kScratchReg,
|
||||||
Address(kCacheArrayReg,
|
Address(kCacheArrayReg,
|
||||||
target::kCompressedWordSize *
|
target::kCompressedWordSize *
|
||||||
target::SubtypeTestCache::kDestinationType,
|
target::SubtypeTestCache::kDestinationType));
|
||||||
Address::Offset, kObjectBytes));
|
|
||||||
__ cmp(kScratchReg, Operand(TypeTestABI::kDstTypeReg));
|
__ cmp(kScratchReg, Operand(TypeTestABI::kDstTypeReg));
|
||||||
__ b(&next_iteration, NE);
|
__ b(&next_iteration, NE);
|
||||||
__ LoadCompressed(
|
__ LoadCompressed(
|
||||||
kScratchReg,
|
kScratchReg,
|
||||||
Address(kCacheArrayReg,
|
Address(kCacheArrayReg,
|
||||||
target::kCompressedWordSize *
|
target::kCompressedWordSize *
|
||||||
target::SubtypeTestCache::kInstanceTypeArguments,
|
target::SubtypeTestCache::kInstanceTypeArguments));
|
||||||
Address::Offset, kObjectBytes));
|
|
||||||
__ cmp(kScratchReg,
|
__ cmp(kScratchReg,
|
||||||
Operand(STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg));
|
Operand(STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg));
|
||||||
if (n == 3) {
|
if (n == 3) {
|
||||||
|
@ -3159,16 +3149,14 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
||||||
kScratchReg,
|
kScratchReg,
|
||||||
Address(kCacheArrayReg,
|
Address(kCacheArrayReg,
|
||||||
target::kCompressedWordSize *
|
target::kCompressedWordSize *
|
||||||
target::SubtypeTestCache::kInstantiatorTypeArguments,
|
target::SubtypeTestCache::kInstantiatorTypeArguments));
|
||||||
Address::Offset, kObjectBytes));
|
|
||||||
__ cmp(kScratchReg, Operand(TypeTestABI::kInstantiatorTypeArgumentsReg));
|
__ cmp(kScratchReg, Operand(TypeTestABI::kInstantiatorTypeArgumentsReg));
|
||||||
__ b(&next_iteration, NE);
|
__ b(&next_iteration, NE);
|
||||||
__ LoadCompressed(
|
__ LoadCompressed(
|
||||||
kScratchReg,
|
kScratchReg,
|
||||||
Address(kCacheArrayReg,
|
Address(kCacheArrayReg,
|
||||||
target::kCompressedWordSize *
|
target::kCompressedWordSize *
|
||||||
target::SubtypeTestCache::kFunctionTypeArguments,
|
target::SubtypeTestCache::kFunctionTypeArguments));
|
||||||
Address::Offset, kObjectBytes));
|
|
||||||
__ cmp(kScratchReg, Operand(TypeTestABI::kFunctionTypeArgumentsReg));
|
__ cmp(kScratchReg, Operand(TypeTestABI::kFunctionTypeArgumentsReg));
|
||||||
if (n == 5) {
|
if (n == 5) {
|
||||||
__ b(&found, EQ);
|
__ b(&found, EQ);
|
||||||
|
@ -3176,23 +3164,22 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
||||||
ASSERT(n == 7);
|
ASSERT(n == 7);
|
||||||
__ b(&next_iteration, NE);
|
__ b(&next_iteration, NE);
|
||||||
|
|
||||||
__ LoadCompressed(kScratchReg,
|
__ LoadCompressed(
|
||||||
Address(kCacheArrayReg,
|
kScratchReg, Address(kCacheArrayReg,
|
||||||
target::kCompressedWordSize *
|
target::kCompressedWordSize *
|
||||||
target::SubtypeTestCache::
|
target::SubtypeTestCache::
|
||||||
kInstanceParentFunctionTypeArguments,
|
kInstanceParentFunctionTypeArguments));
|
||||||
Address::Offset, kObjectBytes));
|
|
||||||
__ cmp(
|
__ cmp(
|
||||||
kScratchReg,
|
kScratchReg,
|
||||||
Operand(STCInternalRegs::kInstanceParentFunctionTypeArgumentsReg));
|
Operand(STCInternalRegs::kInstanceParentFunctionTypeArgumentsReg));
|
||||||
__ b(&next_iteration, NE);
|
__ b(&next_iteration, NE);
|
||||||
|
|
||||||
__ LoadCompressed(kScratchReg,
|
__ LoadCompressed(
|
||||||
Address(kCacheArrayReg,
|
kScratchReg,
|
||||||
target::kCompressedWordSize *
|
Address(kCacheArrayReg,
|
||||||
target::SubtypeTestCache::
|
target::kCompressedWordSize *
|
||||||
kInstanceDelayedFunctionTypeArguments,
|
target::SubtypeTestCache::
|
||||||
Address::Offset, kObjectBytes));
|
kInstanceDelayedFunctionTypeArguments));
|
||||||
__ cmp(
|
__ cmp(
|
||||||
kScratchReg,
|
kScratchReg,
|
||||||
Operand(STCInternalRegs::kInstanceDelayedFunctionTypeArgumentsReg));
|
Operand(STCInternalRegs::kInstanceDelayedFunctionTypeArgumentsReg));
|
||||||
|
@ -3209,11 +3196,10 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
||||||
|
|
||||||
__ Bind(&found);
|
__ Bind(&found);
|
||||||
__ Comment("Found");
|
__ Comment("Found");
|
||||||
__ LoadCompressed(TypeTestABI::kSubtypeTestCacheResultReg,
|
__ LoadCompressed(
|
||||||
Address(kCacheArrayReg,
|
TypeTestABI::kSubtypeTestCacheResultReg,
|
||||||
target::kCompressedWordSize *
|
Address(kCacheArrayReg, target::kCompressedWordSize *
|
||||||
target::SubtypeTestCache::kTestResult,
|
target::SubtypeTestCache::kTestResult));
|
||||||
Address::Offset, kObjectBytes));
|
|
||||||
__ Bind(&done);
|
__ Bind(&done);
|
||||||
__ Comment("Done");
|
__ Comment("Done");
|
||||||
__ ret();
|
__ ret();
|
||||||
|
@ -3512,9 +3498,8 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
|
||||||
// proper target for the given name and arguments descriptor. If the
|
// proper target for the given name and arguments descriptor. If the
|
||||||
// illegal class id was found, the target is a cache miss handler that can
|
// illegal class id was found, the target is a cache miss handler that can
|
||||||
// be invoked as a normal Dart function.
|
// be invoked as a normal Dart function.
|
||||||
__ LoadCompressed(
|
__ LoadCompressed(FUNCTION_REG,
|
||||||
FUNCTION_REG,
|
FieldAddress(TMP, base + target::kCompressedWordSize));
|
||||||
FieldAddress(TMP, base + target::kCompressedWordSize, kObjectBytes));
|
|
||||||
__ ldr(R1,
|
__ ldr(R1,
|
||||||
FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
|
FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
|
||||||
__ ldr(ARGS_DESC_REG,
|
__ ldr(ARGS_DESC_REG,
|
||||||
|
@ -3577,14 +3562,12 @@ void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
|
||||||
if (FLAG_precompiled_mode) {
|
if (FLAG_precompiled_mode) {
|
||||||
const intptr_t entry_offset =
|
const intptr_t entry_offset =
|
||||||
target::ICData::EntryPointIndexFor(1) * target::kCompressedWordSize;
|
target::ICData::EntryPointIndexFor(1) * target::kCompressedWordSize;
|
||||||
__ LoadCompressed(R1,
|
__ LoadCompressed(R1, Address(R8, entry_offset));
|
||||||
Address(R8, entry_offset, Address::Offset, kObjectBytes));
|
|
||||||
__ ldr(R1, FieldAddress(R1, target::Function::entry_point_offset()));
|
__ ldr(R1, FieldAddress(R1, target::Function::entry_point_offset()));
|
||||||
} else {
|
} else {
|
||||||
const intptr_t code_offset =
|
const intptr_t code_offset =
|
||||||
target::ICData::CodeIndexFor(1) * target::kCompressedWordSize;
|
target::ICData::CodeIndexFor(1) * target::kCompressedWordSize;
|
||||||
__ LoadCompressed(CODE_REG,
|
__ LoadCompressed(CODE_REG, Address(R8, code_offset));
|
||||||
Address(R8, code_offset, Address::Offset, kObjectBytes));
|
|
||||||
__ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
|
__ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
|
||||||
}
|
}
|
||||||
__ br(R1);
|
__ br(R1);
|
||||||
|
@ -3657,13 +3640,9 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
|
||||||
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
|
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
|
||||||
Label miss;
|
Label miss;
|
||||||
__ LoadClassIdMayBeSmi(R1, R0);
|
__ LoadClassIdMayBeSmi(R1, R0);
|
||||||
__ ldr(R2,
|
__ ldr(R2, FieldAddress(R5, target::SingleTargetCache::lower_limit_offset()),
|
||||||
FieldAddress(R5, target::SingleTargetCache::lower_limit_offset(),
|
|
||||||
kTwoBytes),
|
|
||||||
kUnsignedTwoBytes);
|
kUnsignedTwoBytes);
|
||||||
__ ldr(R3,
|
__ ldr(R3, FieldAddress(R5, target::SingleTargetCache::upper_limit_offset()),
|
||||||
FieldAddress(R5, target::SingleTargetCache::upper_limit_offset(),
|
|
||||||
kTwoBytes),
|
|
||||||
kUnsignedTwoBytes);
|
kUnsignedTwoBytes);
|
||||||
|
|
||||||
__ cmp(R1, Operand(R2));
|
__ cmp(R1, Operand(R2));
|
||||||
|
|
Loading…
Reference in a new issue