mirror of
https://github.com/dart-lang/sdk
synced 2024-11-05 18:22:09 +00:00
[vm] Make Address/FieldAddress independent of operand size on arm64
The operand size is dropped from Address/FieldAddress constructors on arm64. This makes code using Address/FieldAddress less fragile, avoids repeating size both in the Address/FieldAddress and load/store instructions, and better aligns arm64 Address/FieldAddress with other architectures. TEST=ci Change-Id: I92d7c5c8f0239333f022deebc0472136018bb0fa Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/260072 Reviewed-by: Ryan Macnak <rmacnak@google.com> Commit-Queue: Alexander Markov <alexmarkov@google.com>
This commit is contained in:
parent
d0d509d4fc
commit
f9355b1bf2
6 changed files with 177 additions and 232 deletions
|
@ -1274,10 +1274,8 @@ void AsmIntrinsifier::ObjectRuntimeType(Assembler* assembler,
|
|||
|
||||
__ Bind(&use_declaration_type);
|
||||
__ LoadClassById(R2, R1);
|
||||
__ ldr(
|
||||
R3,
|
||||
FieldAddress(R2, target::Class::num_type_arguments_offset(), kTwoBytes),
|
||||
kTwoBytes);
|
||||
__ ldr(R3, FieldAddress(R2, target::Class::num_type_arguments_offset()),
|
||||
kTwoBytes);
|
||||
__ cbnz(normal_ir_body, R3);
|
||||
|
||||
__ LoadCompressed(R0,
|
||||
|
@ -1465,9 +1463,9 @@ void AsmIntrinsifier::Type_equality(Assembler* assembler,
|
|||
|
||||
// Check nullability.
|
||||
__ Bind(&equiv_cids);
|
||||
__ ldr(R1, FieldAddress(R1, target::Type::nullability_offset(), kByte),
|
||||
__ ldr(R1, FieldAddress(R1, target::Type::nullability_offset()),
|
||||
kUnsignedByte);
|
||||
__ ldr(R2, FieldAddress(R2, target::Type::nullability_offset(), kByte),
|
||||
__ ldr(R2, FieldAddress(R2, target::Type::nullability_offset()),
|
||||
kUnsignedByte);
|
||||
__ cmp(R1, Operand(R2));
|
||||
__ b(&check_legacy, NE);
|
||||
|
@ -1525,12 +1523,11 @@ void AsmIntrinsifier::Object_getHash(Assembler* assembler,
|
|||
Label* normal_ir_body) {
|
||||
Label not_yet_computed;
|
||||
__ ldr(R0, Address(SP, 0 * target::kWordSize)); // Object.
|
||||
__ ldr(R0,
|
||||
FieldAddress(R0,
|
||||
target::Object::tags_offset() +
|
||||
target::UntaggedObject::kHashTagPos / kBitsPerByte,
|
||||
kFourBytes),
|
||||
kUnsignedFourBytes);
|
||||
__ ldr(
|
||||
R0,
|
||||
FieldAddress(R0, target::Object::tags_offset() +
|
||||
target::UntaggedObject::kHashTagPos / kBitsPerByte),
|
||||
kUnsignedFourBytes);
|
||||
__ cbz(¬_yet_computed, R0);
|
||||
__ SmiTag(R0);
|
||||
__ ret();
|
||||
|
@ -1935,7 +1932,7 @@ void AsmIntrinsifier::OneByteString_substringUnchecked(Assembler* assembler,
|
|||
__ AddImmediate(R6, 1);
|
||||
__ sub(R2, R2, Operand(1));
|
||||
__ cmp(R2, Operand(0));
|
||||
__ str(R1, FieldAddress(R7, target::OneByteString::data_offset(), kByte),
|
||||
__ str(R1, FieldAddress(R7, target::OneByteString::data_offset()),
|
||||
kUnsignedByte);
|
||||
__ AddImmediate(R7, 1);
|
||||
__ b(&loop, GT);
|
||||
|
|
|
@ -860,7 +860,7 @@ Address Assembler::PrepareLargeOffset(Register base,
|
|||
int32_t offset,
|
||||
OperandSize sz) {
|
||||
if (Address::CanHoldOffset(offset, Address::Offset, sz)) {
|
||||
return Address(base, offset, Address::Offset, sz);
|
||||
return Address(base, offset);
|
||||
}
|
||||
ASSERT(base != TMP2);
|
||||
Operand op;
|
||||
|
@ -870,7 +870,7 @@ Address Assembler::PrepareLargeOffset(Register base,
|
|||
(Operand::CanHold(upper20, kXRegSizeInBits, &op) == Operand::Immediate) &&
|
||||
Address::CanHoldOffset(lower12, Address::Offset, sz)) {
|
||||
add(TMP2, base, op);
|
||||
return Address(TMP2, lower12, Address::Offset, sz);
|
||||
return Address(TMP2, lower12);
|
||||
}
|
||||
LoadImmediate(TMP2, offset);
|
||||
return Address(base, TMP2);
|
||||
|
@ -1103,10 +1103,8 @@ void Assembler::StoreBarrier(Register object,
|
|||
if (can_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done);
|
||||
}
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset(), kByte),
|
||||
kUnsignedByte);
|
||||
ldr(TMP2, FieldAddress(value, target::Object::tags_offset(), kByte),
|
||||
kUnsignedByte);
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
|
||||
and_(TMP, TMP2,
|
||||
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||
tst(TMP, Operand(HEAP_BITS, LSR, 32));
|
||||
|
@ -1158,7 +1156,7 @@ void Assembler::StoreCompressedIntoArray(Register object,
|
|||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi) {
|
||||
str(value, Address(slot, 0, Address::Offset, kObjectBytes), kObjectBytes);
|
||||
str(value, Address(slot, 0), kObjectBytes);
|
||||
StoreIntoArrayBarrier(object, slot, value, can_be_smi);
|
||||
}
|
||||
|
||||
|
@ -1185,10 +1183,8 @@ void Assembler::StoreIntoArrayBarrier(Register object,
|
|||
if (can_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done);
|
||||
}
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset(), kByte),
|
||||
kUnsignedByte);
|
||||
ldr(TMP2, FieldAddress(value, target::Object::tags_offset(), kByte),
|
||||
kUnsignedByte);
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
|
||||
and_(TMP, TMP2,
|
||||
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||
tst(TMP, Operand(HEAP_BITS, LSR, 32));
|
||||
|
@ -1226,8 +1222,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
|
|||
Label done;
|
||||
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
|
||||
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset(), kByte),
|
||||
kUnsignedByte);
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||
tsti(TMP, Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
||||
b(&done, ZERO);
|
||||
|
||||
|
@ -1253,8 +1248,7 @@ void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
|
|||
Label done;
|
||||
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
|
||||
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset(), kByte),
|
||||
kUnsignedByte);
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||
tsti(TMP, Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
||||
b(&done, ZERO);
|
||||
|
||||
|
@ -1905,13 +1899,13 @@ void Assembler::MonomorphicCheckedEntryJIT() {
|
|||
const intptr_t count_offset = target::Array::element_offset(1);
|
||||
|
||||
// Sadly this cannot use ldp because ldp requires aligned offsets.
|
||||
ldr(R1, FieldAddress(R5, cid_offset, kObjectBytes), kObjectBytes);
|
||||
ldr(R2, FieldAddress(R5, count_offset, kObjectBytes), kObjectBytes);
|
||||
ldr(R1, FieldAddress(R5, cid_offset), kObjectBytes);
|
||||
ldr(R2, FieldAddress(R5, count_offset), kObjectBytes);
|
||||
LoadClassIdMayBeSmi(IP0, R0);
|
||||
add(R2, R2, Operand(target::ToRawSmi(1)), kObjectBytes);
|
||||
cmp(R1, Operand(IP0, LSL, 1), kObjectBytes);
|
||||
b(&miss, NE);
|
||||
str(R2, FieldAddress(R5, count_offset, kObjectBytes), kObjectBytes);
|
||||
str(R2, FieldAddress(R5, count_offset), kObjectBytes);
|
||||
LoadImmediate(R4, 0); // GC-safe for OptimizeInvokedFunction
|
||||
|
||||
// Fall through to unchecked entry.
|
||||
|
@ -2108,7 +2102,7 @@ Address Assembler::ElementAddressForIntIndex(bool is_external,
|
|||
ASSERT(Utils::IsInt(32, offset));
|
||||
const OperandSize size = Address::OperandSizeFor(cid);
|
||||
ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
|
||||
return Address(array, static_cast<int32_t>(offset), Address::Offset, size);
|
||||
return Address(array, static_cast<int32_t>(offset));
|
||||
}
|
||||
|
||||
void Assembler::ComputeElementAddressForIntIndex(Register address,
|
||||
|
@ -2174,7 +2168,7 @@ Address Assembler::ElementAddressForRegIndexWithSize(bool is_external,
|
|||
}
|
||||
}
|
||||
ASSERT(Address::CanHoldOffset(offset, Address::Offset, size));
|
||||
return Address(temp, offset, Address::Offset, size);
|
||||
return Address(temp, offset);
|
||||
}
|
||||
|
||||
void Assembler::ComputeElementAddressForRegIndex(Register address,
|
||||
|
|
|
@ -136,16 +136,14 @@ class Address : public ValueObject {
|
|||
public:
|
||||
Address(const Address& other)
|
||||
: ValueObject(),
|
||||
encoding_(other.encoding_),
|
||||
type_(other.type_),
|
||||
base_(other.base_),
|
||||
log2sz_(other.log2sz_) {}
|
||||
offset_(other.offset_) {}
|
||||
|
||||
Address& operator=(const Address& other) {
|
||||
encoding_ = other.encoding_;
|
||||
type_ = other.type_;
|
||||
base_ = other.base_;
|
||||
log2sz_ = other.log2sz_;
|
||||
offset_ = other.offset_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -167,74 +165,21 @@ class Address : public ValueObject {
|
|||
bool can_writeback_to(Register r) const {
|
||||
if (type() == PreIndex || type() == PostIndex || type() == PairPreIndex ||
|
||||
type() == PairPostIndex) {
|
||||
return base() != r;
|
||||
return ConcreteRegister(base()) != ConcreteRegister(r);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Offset is in bytes. For the unsigned imm12 case, we unscale based on the
|
||||
// operand size, and assert that offset is aligned accordingly.
|
||||
// For the smaller signed imm9 case, the offset is the number of bytes, but
|
||||
// is unscaled.
|
||||
Address(Register rn,
|
||||
int32_t offset = 0,
|
||||
AddressType at = Offset,
|
||||
OperandSize sz = kEightBytes) {
|
||||
// Offset is in bytes.
|
||||
explicit Address(Register rn, int32_t offset = 0, AddressType at = Offset) {
|
||||
ASSERT((rn != kNoRegister) && (rn != R31) && (rn != ZR));
|
||||
ASSERT(CanHoldOffset(offset, at, sz));
|
||||
log2sz_ = -1;
|
||||
const int32_t scale = Log2OperandSizeBytes(sz);
|
||||
if ((at == Offset) && Utils::IsUint(12 + scale, offset) &&
|
||||
(offset == ((offset >> scale) << scale))) {
|
||||
encoding_ =
|
||||
B24 | ((offset >> scale) << kImm12Shift) | Arm64Encode::Rn(rn);
|
||||
if (offset != 0) {
|
||||
log2sz_ = scale;
|
||||
}
|
||||
} else if ((at == Offset) && Utils::IsInt(9, offset)) {
|
||||
encoding_ = ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(rn);
|
||||
} else if ((at == PreIndex) || (at == PostIndex)) {
|
||||
ASSERT(Utils::IsInt(9, offset));
|
||||
int32_t idx = (at == PostIndex) ? B10 : (B11 | B10);
|
||||
encoding_ = idx | ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(rn);
|
||||
} else {
|
||||
ASSERT((at == PairOffset) || (at == PairPreIndex) ||
|
||||
(at == PairPostIndex));
|
||||
ASSERT(Utils::IsInt(7 + scale, offset) &&
|
||||
(static_cast<uint32_t>(offset) ==
|
||||
((static_cast<uint32_t>(offset) >> scale) << scale)));
|
||||
int32_t idx = 0;
|
||||
switch (at) {
|
||||
case PairPostIndex:
|
||||
idx = B23;
|
||||
break;
|
||||
case PairPreIndex:
|
||||
idx = B24 | B23;
|
||||
break;
|
||||
case PairOffset:
|
||||
idx = B24;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
encoding_ =
|
||||
idx |
|
||||
((static_cast<uint32_t>(offset >> scale) << kImm7Shift) & kImm7Mask) |
|
||||
Arm64Encode::Rn(rn);
|
||||
if (offset != 0) {
|
||||
log2sz_ = scale;
|
||||
}
|
||||
}
|
||||
type_ = at;
|
||||
base_ = ConcreteRegister(rn);
|
||||
base_ = rn;
|
||||
offset_ = offset;
|
||||
}
|
||||
|
||||
// This addressing mode does not exist.
|
||||
Address(Register rn,
|
||||
Register offset,
|
||||
AddressType at,
|
||||
OperandSize sz = kEightBytes);
|
||||
Address(Register rn, Register offset, AddressType at) = delete;
|
||||
|
||||
static bool CanHoldOffset(int32_t offset,
|
||||
AddressType at = Offset,
|
||||
|
@ -264,22 +209,20 @@ class Address : public ValueObject {
|
|||
static Address PC(int32_t pc_off) {
|
||||
ASSERT(CanHoldOffset(pc_off, PCOffset));
|
||||
Address addr;
|
||||
addr.encoding_ = (((pc_off >> 2) << kImm19Shift) & kImm19Mask);
|
||||
addr.base_ = kNoRegister;
|
||||
addr.type_ = PCOffset;
|
||||
addr.log2sz_ = -1;
|
||||
addr.offset_ = pc_off;
|
||||
return addr;
|
||||
}
|
||||
|
||||
static Address Pair(Register rn,
|
||||
int32_t offset = 0,
|
||||
AddressType at = PairOffset,
|
||||
OperandSize sz = kEightBytes) {
|
||||
return Address(rn, offset, at, sz);
|
||||
AddressType at = PairOffset) {
|
||||
return Address(rn, offset, at);
|
||||
}
|
||||
|
||||
// This addressing mode does not exist.
|
||||
static Address PC(Register r);
|
||||
static Address PC(Register r) = delete;
|
||||
|
||||
enum Scaling {
|
||||
Unscaled,
|
||||
|
@ -298,12 +241,11 @@ class Address : public ValueObject {
|
|||
// Can only scale when ext = UXTX.
|
||||
ASSERT((scale != Scaled) || (ext == UXTX));
|
||||
ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
|
||||
const int32_t s = (scale == Scaled) ? B12 : 0;
|
||||
encoding_ = B21 | B11 | s | Arm64Encode::Rn(rn) | Arm64Encode::Rm(rm) |
|
||||
(static_cast<int32_t>(ext) << kExtendTypeShift);
|
||||
type_ = Reg;
|
||||
base_ = ConcreteRegister(rn);
|
||||
log2sz_ = -1; // Any.
|
||||
base_ = rn;
|
||||
// Use offset_ to store pre-encoded scale, extend and rm.
|
||||
offset_ = ((scale == Scaled) ? B12 : 0) | Arm64Encode::Rm(rm) |
|
||||
(static_cast<int32_t>(ext) << kExtendTypeShift);
|
||||
}
|
||||
|
||||
static OperandSize OperandSizeFor(intptr_t cid) {
|
||||
|
@ -354,16 +296,71 @@ class Address : public ValueObject {
|
|||
}
|
||||
|
||||
private:
|
||||
uint32_t encoding() const { return encoding_; }
|
||||
uint32_t encoding(OperandSize sz) const {
|
||||
const int32_t offset = offset_;
|
||||
const int32_t scale = Log2OperandSizeBytes(sz);
|
||||
ASSERT((type_ == Reg) || CanHoldOffset(offset, type_, sz));
|
||||
switch (type_) {
|
||||
case Offset:
|
||||
if (Utils::IsUint(12 + scale, offset) &&
|
||||
(offset == ((offset >> scale) << scale))) {
|
||||
return B24 | ((offset >> scale) << kImm12Shift) |
|
||||
Arm64Encode::Rn(base_);
|
||||
} else if (Utils::IsInt(9, offset)) {
|
||||
return ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(base_);
|
||||
} else {
|
||||
FATAL("Offset %d is out of range\n", offset);
|
||||
}
|
||||
case PreIndex:
|
||||
case PostIndex: {
|
||||
ASSERT(Utils::IsInt(9, offset));
|
||||
int32_t idx = (type_ == PostIndex) ? B10 : (B11 | B10);
|
||||
return idx | ((offset & 0x1ff) << kImm9Shift) | Arm64Encode::Rn(base_);
|
||||
}
|
||||
case PairOffset:
|
||||
case PairPreIndex:
|
||||
case PairPostIndex: {
|
||||
ASSERT(Utils::IsInt(7 + scale, offset) &&
|
||||
(static_cast<uint32_t>(offset) ==
|
||||
((static_cast<uint32_t>(offset) >> scale) << scale)));
|
||||
int32_t idx = 0;
|
||||
switch (type_) {
|
||||
case PairPostIndex:
|
||||
idx = B23;
|
||||
break;
|
||||
case PairPreIndex:
|
||||
idx = B24 | B23;
|
||||
break;
|
||||
case PairOffset:
|
||||
idx = B24;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
return idx |
|
||||
((static_cast<uint32_t>(offset >> scale) << kImm7Shift) &
|
||||
kImm7Mask) |
|
||||
Arm64Encode::Rn(base_);
|
||||
}
|
||||
case PCOffset:
|
||||
return (((offset >> 2) << kImm19Shift) & kImm19Mask);
|
||||
case Reg:
|
||||
// Offset contains pre-encoded scale, extend and rm.
|
||||
return B21 | B11 | Arm64Encode::Rn(base_) | offset;
|
||||
case Unknown:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
AddressType type() const { return type_; }
|
||||
Register base() const { return base_; }
|
||||
|
||||
Address() : encoding_(0), type_(Unknown), base_(kNoRegister) {}
|
||||
Address() : type_(Unknown), base_(kNoRegister), offset_(0) {}
|
||||
|
||||
uint32_t encoding_;
|
||||
AddressType type_;
|
||||
Register base_;
|
||||
int32_t log2sz_; // Required log2 of operand size (-1 means any).
|
||||
int32_t offset_;
|
||||
|
||||
friend class Assembler;
|
||||
};
|
||||
|
@ -376,11 +373,11 @@ class FieldAddress : public Address {
|
|||
return Address::CanHoldOffset(offset - kHeapObjectTag, at, sz);
|
||||
}
|
||||
|
||||
FieldAddress(Register base, int32_t disp, OperandSize sz = kEightBytes)
|
||||
: Address(base, disp - kHeapObjectTag, Offset, sz) {}
|
||||
FieldAddress(Register base, int32_t disp)
|
||||
: Address(base, disp - kHeapObjectTag) {}
|
||||
|
||||
// This addressing mode does not exist.
|
||||
FieldAddress(Register base, Register disp, OperandSize sz = kEightBytes);
|
||||
FieldAddress(Register base, Register disp) = delete;
|
||||
|
||||
FieldAddress(const FieldAddress& other) : Address(other) {}
|
||||
|
||||
|
@ -653,15 +650,14 @@ class Assembler : public AssemblerBase {
|
|||
int8_t value) override {
|
||||
EnsureHasClassIdInDEBUG(kFunctionTypeCid, type, TMP);
|
||||
ldr(TMP,
|
||||
FieldAddress(type, compiler::target::FunctionType::nullability_offset(),
|
||||
kByte),
|
||||
FieldAddress(type,
|
||||
compiler::target::FunctionType::nullability_offset()),
|
||||
kUnsignedByte);
|
||||
cmp(TMP, Operand(value));
|
||||
}
|
||||
void CompareTypeNullabilityWith(Register type, int8_t value) override {
|
||||
EnsureHasClassIdInDEBUG(kTypeCid, type, TMP);
|
||||
ldr(TMP,
|
||||
FieldAddress(type, compiler::target::Type::nullability_offset(), kByte),
|
||||
ldr(TMP, FieldAddress(type, compiler::target::Type::nullability_offset()),
|
||||
kUnsignedByte);
|
||||
cmp(TMP, Operand(value));
|
||||
}
|
||||
|
@ -1417,11 +1413,11 @@ class Assembler : public AssemblerBase {
|
|||
void fcvtds(VRegister vd, VRegister vn) { EmitFPOneSourceOp(FCVTDS, vd, vn); }
|
||||
void fldrq(VRegister vt, Address a) {
|
||||
ASSERT(a.type() != Address::PCOffset);
|
||||
EmitLoadStoreReg(FLDRQ, static_cast<Register>(vt), a, kByte);
|
||||
EmitLoadStoreReg(FLDRQ, static_cast<Register>(vt), a, kQWord);
|
||||
}
|
||||
void fstrq(VRegister vt, Address a) {
|
||||
ASSERT(a.type() != Address::PCOffset);
|
||||
EmitLoadStoreReg(FSTRQ, static_cast<Register>(vt), a, kByte);
|
||||
EmitLoadStoreReg(FSTRQ, static_cast<Register>(vt), a, kQWord);
|
||||
}
|
||||
void fldrd(VRegister vt, Address a) {
|
||||
ASSERT(a.type() != Address::PCOffset);
|
||||
|
@ -1650,20 +1646,18 @@ class Assembler : public AssemblerBase {
|
|||
fldrq(reg, Address(SP, 1 * kQuadSize, Address::PostIndex));
|
||||
}
|
||||
void PushDoublePair(VRegister low, VRegister high) {
|
||||
fstp(low, high,
|
||||
Address(SP, -2 * kDoubleSize, Address::PairPreIndex, kDWord), kDWord);
|
||||
fstp(low, high, Address(SP, -2 * kDoubleSize, Address::PairPreIndex),
|
||||
kDWord);
|
||||
}
|
||||
void PopDoublePair(VRegister low, VRegister high) {
|
||||
fldp(low, high,
|
||||
Address(SP, 2 * kDoubleSize, Address::PairPostIndex, kDWord), kDWord);
|
||||
fldp(low, high, Address(SP, 2 * kDoubleSize, Address::PairPostIndex),
|
||||
kDWord);
|
||||
}
|
||||
void PushQuadPair(VRegister low, VRegister high) {
|
||||
fstp(low, high, Address(SP, -2 * kQuadSize, Address::PairPreIndex, kQWord),
|
||||
kQWord);
|
||||
fstp(low, high, Address(SP, -2 * kQuadSize, Address::PairPreIndex), kQWord);
|
||||
}
|
||||
void PopQuadPair(VRegister low, VRegister high) {
|
||||
fldp(low, high, Address(SP, 2 * kQuadSize, Address::PairPostIndex, kQWord),
|
||||
kQWord);
|
||||
fldp(low, high, Address(SP, 2 * kQuadSize, Address::PairPostIndex), kQWord);
|
||||
}
|
||||
void TagAndPushPP() {
|
||||
// Add the heap object tag back to PP before putting it on the stack.
|
||||
|
@ -2762,9 +2756,8 @@ class Assembler : public AssemblerBase {
|
|||
ASSERT((op != LDR && op != STR && op != LDRS) || a.can_writeback_to(rt));
|
||||
|
||||
const int32_t size = Log2OperandSizeBytes(sz);
|
||||
ASSERT(a.log2sz_ == -1 || a.log2sz_ == size);
|
||||
const int32_t encoding =
|
||||
op | ((size & 0x3) << kSzShift) | Arm64Encode::Rt(rt) | a.encoding();
|
||||
op | ((size & 0x3) << kSzShift) | Arm64Encode::Rt(rt) | a.encoding(sz);
|
||||
Emit(encoding);
|
||||
}
|
||||
|
||||
|
@ -2774,10 +2767,9 @@ class Assembler : public AssemblerBase {
|
|||
OperandSize sz) {
|
||||
ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
|
||||
(sz == kUnsignedFourBytes));
|
||||
ASSERT(a.log2sz_ == -1 || a.log2sz_ == Log2OperandSizeBytes(sz));
|
||||
ASSERT((rt != CSP) && (rt != R31));
|
||||
const int32_t size = (sz == kEightBytes) ? B30 : 0;
|
||||
const int32_t encoding = op | size | Arm64Encode::Rt(rt) | a.encoding();
|
||||
const int32_t encoding = op | size | Arm64Encode::Rt(rt) | a.encoding(sz);
|
||||
Emit(encoding);
|
||||
}
|
||||
|
||||
|
@ -2792,7 +2784,6 @@ class Assembler : public AssemblerBase {
|
|||
|
||||
ASSERT((sz == kEightBytes) || (sz == kFourBytes) ||
|
||||
(sz == kUnsignedFourBytes));
|
||||
ASSERT(a.log2sz_ == -1 || a.log2sz_ == Log2OperandSizeBytes(sz));
|
||||
ASSERT((rt != CSP) && (rt != R31));
|
||||
ASSERT((rt2 != CSP) && (rt2 != R31));
|
||||
int32_t opc = 0;
|
||||
|
@ -2811,7 +2802,7 @@ class Assembler : public AssemblerBase {
|
|||
break;
|
||||
}
|
||||
const int32_t encoding =
|
||||
opc | op | Arm64Encode::Rt(rt) | Arm64Encode::Rt2(rt2) | a.encoding();
|
||||
opc | op | Arm64Encode::Rt(rt) | Arm64Encode::Rt2(rt2) | a.encoding(sz);
|
||||
Emit(encoding);
|
||||
}
|
||||
|
||||
|
@ -2822,7 +2813,6 @@ class Assembler : public AssemblerBase {
|
|||
OperandSize sz) {
|
||||
ASSERT(op != FLDP || rt != rt2);
|
||||
ASSERT((sz == kSWord) || (sz == kDWord) || (sz == kQWord));
|
||||
ASSERT(a.log2sz_ == -1 || a.log2sz_ == Log2OperandSizeBytes(sz));
|
||||
int32_t opc = 0;
|
||||
switch (sz) {
|
||||
case kSWord:
|
||||
|
@ -2840,7 +2830,7 @@ class Assembler : public AssemblerBase {
|
|||
}
|
||||
const int32_t encoding =
|
||||
opc | op | Arm64Encode::Rt(static_cast<Register>(rt)) |
|
||||
Arm64Encode::Rt2(static_cast<Register>(rt2)) | a.encoding();
|
||||
Arm64Encode::Rt2(static_cast<Register>(rt2)) | a.encoding(sz);
|
||||
Emit(encoding);
|
||||
}
|
||||
|
||||
|
|
|
@ -666,9 +666,9 @@ ASSEMBLER_TEST_GENERATE(LoadSigned32Bit, assembler) {
|
|||
Operand(2 * target::kWordSize)); // Must not access beyond CSP.
|
||||
|
||||
__ LoadImmediate(R1, 0xffffffff);
|
||||
__ str(R1, Address(SP, -4, Address::PreIndex, kFourBytes), kFourBytes);
|
||||
__ str(R1, Address(SP, -4, Address::PreIndex), kFourBytes);
|
||||
__ ldr(R0, Address(SP), kFourBytes);
|
||||
__ ldr(R1, Address(SP, 4, Address::PostIndex, kFourBytes), kFourBytes);
|
||||
__ ldr(R1, Address(SP, 4, Address::PostIndex), kFourBytes);
|
||||
__ RestoreCSP();
|
||||
__ ret();
|
||||
}
|
||||
|
@ -757,13 +757,9 @@ ASSEMBLER_TEST_GENERATE(LoadStorePairUnsigned32, assembler) {
|
|||
__ LoadImmediate(R3, 0xBBCCDDEEFF998877);
|
||||
__ sub(SP, SP, Operand(4 * target::kWordSize));
|
||||
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
|
||||
__ stp(R2, R3,
|
||||
Address(SP, 2 * sizeof(uint32_t), Address::PairOffset,
|
||||
compiler::kUnsignedFourBytes),
|
||||
__ stp(R2, R3, Address(SP, 2 * sizeof(uint32_t), Address::PairOffset),
|
||||
kUnsignedFourBytes);
|
||||
__ ldp(R0, R1,
|
||||
Address(SP, 2 * sizeof(uint32_t), Address::PairOffset,
|
||||
kUnsignedFourBytes),
|
||||
__ ldp(R0, R1, Address(SP, 2 * sizeof(uint32_t), Address::PairOffset),
|
||||
kUnsignedFourBytes);
|
||||
__ add(SP, SP, Operand(4 * target::kWordSize));
|
||||
__ sub(R0, R0, Operand(R1));
|
||||
|
@ -801,11 +797,9 @@ ASSEMBLER_TEST_GENERATE(LoadStorePairSigned32, assembler) {
|
|||
__ LoadImmediate(R3, 0xBBCCDDEEFF998877);
|
||||
__ sub(SP, SP, Operand(4 * target::kWordSize));
|
||||
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
|
||||
__ stp(R2, R3,
|
||||
Address(SP, 2 * sizeof(int32_t), Address::PairOffset, kFourBytes),
|
||||
__ stp(R2, R3, Address(SP, 2 * sizeof(int32_t), Address::PairOffset),
|
||||
kFourBytes);
|
||||
__ ldp(R0, R1,
|
||||
Address(SP, 2 * sizeof(int32_t), Address::PairOffset, kFourBytes),
|
||||
__ ldp(R0, R1, Address(SP, 2 * sizeof(int32_t), Address::PairOffset),
|
||||
kFourBytes);
|
||||
__ add(SP, SP, Operand(4 * target::kWordSize));
|
||||
__ sub(R0, R0, Operand(R1));
|
||||
|
@ -3645,7 +3639,7 @@ ASSEMBLER_TEST_RUN(LoadImmediateMedNeg4, test) {
|
|||
}
|
||||
|
||||
ASSEMBLER_TEST_GENERATE(LoadHalfWordUnaligned, assembler) {
|
||||
__ ldr(R1, R0, kTwoBytes);
|
||||
__ ldr(R1, Address(R0), kTwoBytes);
|
||||
__ mov(R0, R1);
|
||||
__ ret();
|
||||
}
|
||||
|
@ -3672,7 +3666,7 @@ ASSEMBLER_TEST_RUN(LoadHalfWordUnaligned, test) {
|
|||
}
|
||||
|
||||
ASSEMBLER_TEST_GENERATE(LoadHalfWordUnsignedUnaligned, assembler) {
|
||||
__ ldr(R1, R0, kUnsignedTwoBytes);
|
||||
__ ldr(R1, Address(R0), kUnsignedTwoBytes);
|
||||
__ mov(R0, R1);
|
||||
__ ret();
|
||||
}
|
||||
|
@ -3698,7 +3692,7 @@ ASSEMBLER_TEST_RUN(LoadHalfWordUnsignedUnaligned, test) {
|
|||
|
||||
ASSEMBLER_TEST_GENERATE(StoreHalfWordUnaligned, assembler) {
|
||||
__ LoadImmediate(R1, 0xABCD);
|
||||
__ str(R1, R0, kTwoBytes);
|
||||
__ str(R1, Address(R0), kTwoBytes);
|
||||
__ mov(R0, R1);
|
||||
__ ret();
|
||||
}
|
||||
|
@ -3731,7 +3725,7 @@ ASSEMBLER_TEST_RUN(StoreHalfWordUnaligned, test) {
|
|||
}
|
||||
|
||||
ASSEMBLER_TEST_GENERATE(LoadWordUnaligned, assembler) {
|
||||
__ ldr(R1, R0, kUnsignedFourBytes);
|
||||
__ ldr(R1, Address(R0), kUnsignedFourBytes);
|
||||
__ mov(R0, R1);
|
||||
__ ret();
|
||||
}
|
||||
|
@ -3765,7 +3759,7 @@ ASSEMBLER_TEST_RUN(LoadWordUnaligned, test) {
|
|||
|
||||
ASSEMBLER_TEST_GENERATE(StoreWordUnaligned, assembler) {
|
||||
__ LoadImmediate(R1, 0x12345678);
|
||||
__ str(R1, R0, kUnsignedFourBytes);
|
||||
__ str(R1, Address(R0), kUnsignedFourBytes);
|
||||
__ mov(R0, R1);
|
||||
__ ret();
|
||||
}
|
||||
|
@ -5192,7 +5186,7 @@ ASSEMBLER_TEST_GENERATE(FldrdFstrdLargeOffset, assembler) {
|
|||
__ LoadDImmediate(V1, 42.0);
|
||||
__ sub(SP, SP, Operand(512 * target::kWordSize));
|
||||
__ andi(CSP, SP, Immediate(~15)); // Must not access beyond CSP.
|
||||
__ fstrd(V1, Address(SP, 512 * target::kWordSize, Address::Offset));
|
||||
__ fstrd(V1, Address(SP, 512 * target::kWordSize));
|
||||
__ add(SP, SP, Operand(512 * target::kWordSize));
|
||||
__ fldrd(V0, Address(SP));
|
||||
__ RestoreCSP();
|
||||
|
|
|
@ -1652,9 +1652,7 @@ void StringToCharCodeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
const Register result = locs()->out(0).reg();
|
||||
__ LoadCompressedSmi(result,
|
||||
compiler::FieldAddress(str, String::length_offset()));
|
||||
__ ldr(TMP,
|
||||
compiler::FieldAddress(str, OneByteString::data_offset(),
|
||||
compiler::kByte),
|
||||
__ ldr(TMP, compiler::FieldAddress(str, OneByteString::data_offset()),
|
||||
compiler::kUnsignedByte);
|
||||
__ CompareImmediate(result, Smi::RawValue(1));
|
||||
__ LoadImmediate(result, -1);
|
||||
|
@ -2275,10 +2273,10 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
if (emit_full_guard) {
|
||||
__ LoadObject(field_reg, Field::ZoneHandle((field().Original())));
|
||||
|
||||
compiler::FieldAddress field_cid_operand(
|
||||
field_reg, Field::guarded_cid_offset(), compiler::kUnsignedTwoBytes);
|
||||
compiler::FieldAddress field_cid_operand(field_reg,
|
||||
Field::guarded_cid_offset());
|
||||
compiler::FieldAddress field_nullability_operand(
|
||||
field_reg, Field::is_nullable_offset(), compiler::kUnsignedTwoBytes);
|
||||
field_reg, Field::is_nullable_offset());
|
||||
|
||||
if (value_cid == kDynamicCid) {
|
||||
LoadValueCid(compiler, value_cid_reg, value_reg);
|
||||
|
@ -2541,15 +2539,14 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
|
|||
__ StoreCompressedIntoObjectNoBarrier(
|
||||
AllocateArrayABI::kResultReg,
|
||||
compiler::FieldAddress(AllocateArrayABI::kResultReg,
|
||||
Array::type_arguments_offset(),
|
||||
compiler::kObjectBytes),
|
||||
Array::type_arguments_offset()),
|
||||
AllocateArrayABI::kTypeArgumentsReg);
|
||||
|
||||
// Set the length field.
|
||||
__ StoreCompressedIntoObjectNoBarrier(
|
||||
AllocateArrayABI::kResultReg,
|
||||
compiler::FieldAddress(AllocateArrayABI::kResultReg,
|
||||
Array::length_offset(), compiler::kObjectBytes),
|
||||
Array::length_offset()),
|
||||
AllocateArrayABI::kLengthReg);
|
||||
|
||||
// TODO(zra): Use stp once added.
|
||||
|
@ -2566,9 +2563,7 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
|
|||
intptr_t current_offset = 0;
|
||||
while (current_offset < array_size) {
|
||||
__ StoreCompressedIntoObjectNoBarrier(
|
||||
AllocateArrayABI::kResultReg,
|
||||
compiler::Address(R8, current_offset, compiler::Address::Offset,
|
||||
compiler::kObjectBytes),
|
||||
AllocateArrayABI::kResultReg, compiler::Address(R8, current_offset),
|
||||
NULL_REG);
|
||||
current_offset += kCompressedWordSize;
|
||||
}
|
||||
|
@ -2577,11 +2572,8 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
|
|||
__ Bind(&init_loop);
|
||||
__ CompareRegisters(R8, R3);
|
||||
__ b(&end_loop, CS);
|
||||
__ StoreCompressedIntoObjectNoBarrier(
|
||||
AllocateArrayABI::kResultReg,
|
||||
compiler::Address(R8, 0, compiler::Address::Offset,
|
||||
compiler::kObjectBytes),
|
||||
NULL_REG);
|
||||
__ StoreCompressedIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
|
||||
compiler::Address(R8, 0), NULL_REG);
|
||||
__ AddImmediate(R8, kCompressedWordSize);
|
||||
__ b(&init_loop);
|
||||
__ Bind(&end_loop);
|
||||
|
@ -5168,8 +5160,7 @@ void CheckWritableInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
compiler->AddSlowPathCode(slow_path);
|
||||
__ ldr(TMP,
|
||||
compiler::FieldAddress(locs()->in(0).reg(),
|
||||
compiler::target::Object::tags_offset(),
|
||||
compiler::kUnsignedByte),
|
||||
compiler::target::Object::tags_offset()),
|
||||
compiler::kUnsignedByte);
|
||||
// In the first byte.
|
||||
ASSERT(compiler::target::UntaggedObject::kImmutableBit < 8);
|
||||
|
|
|
@ -1807,10 +1807,8 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
|
|||
Label slow_case;
|
||||
|
||||
// Load num. variable (int32) in the existing context.
|
||||
__ ldr(
|
||||
R1,
|
||||
FieldAddress(R5, target::Context::num_variables_offset(), kFourBytes),
|
||||
kFourBytes);
|
||||
__ ldr(R1, FieldAddress(R5, target::Context::num_variables_offset()),
|
||||
kFourBytes);
|
||||
|
||||
GenerateAllocateContextSpaceStub(assembler, &slow_case);
|
||||
|
||||
|
@ -3055,31 +3053,26 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
|||
// Closure handling.
|
||||
{
|
||||
__ Comment("Closure");
|
||||
__ LoadCompressed(
|
||||
STCInternalRegs::kInstanceCidOrSignatureReg,
|
||||
FieldAddress(TypeTestABI::kInstanceReg,
|
||||
target::Closure::function_offset(), kObjectBytes));
|
||||
__ LoadCompressed(
|
||||
STCInternalRegs::kInstanceCidOrSignatureReg,
|
||||
FieldAddress(STCInternalRegs::kInstanceCidOrSignatureReg,
|
||||
target::Function::signature_offset(), kObjectBytes));
|
||||
__ LoadCompressed(STCInternalRegs::kInstanceCidOrSignatureReg,
|
||||
FieldAddress(TypeTestABI::kInstanceReg,
|
||||
target::Closure::function_offset()));
|
||||
__ LoadCompressed(STCInternalRegs::kInstanceCidOrSignatureReg,
|
||||
FieldAddress(STCInternalRegs::kInstanceCidOrSignatureReg,
|
||||
target::Function::signature_offset()));
|
||||
if (n >= 3) {
|
||||
__ LoadCompressed(
|
||||
STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg,
|
||||
FieldAddress(TypeTestABI::kInstanceReg,
|
||||
target::Closure::instantiator_type_arguments_offset(),
|
||||
kObjectBytes));
|
||||
target::Closure::instantiator_type_arguments_offset()));
|
||||
if (n >= 7) {
|
||||
__ LoadCompressed(
|
||||
STCInternalRegs::kInstanceParentFunctionTypeArgumentsReg,
|
||||
FieldAddress(TypeTestABI::kInstanceReg,
|
||||
target::Closure::function_type_arguments_offset(),
|
||||
kObjectBytes));
|
||||
target::Closure::function_type_arguments_offset()));
|
||||
__ LoadCompressed(
|
||||
STCInternalRegs::kInstanceDelayedFunctionTypeArgumentsReg,
|
||||
FieldAddress(TypeTestABI::kInstanceReg,
|
||||
target::Closure::delayed_type_arguments_offset(),
|
||||
kObjectBytes));
|
||||
target::Closure::delayed_type_arguments_offset()));
|
||||
}
|
||||
}
|
||||
__ b(&loop);
|
||||
|
@ -3103,7 +3096,7 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
|||
__ add(kScratchReg, TypeTestABI::kInstanceReg,
|
||||
Operand(kScratchReg, LSL, kCompressedWordSizeLog2));
|
||||
__ LoadCompressed(STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg,
|
||||
FieldAddress(kScratchReg, 0, kObjectBytes));
|
||||
FieldAddress(kScratchReg, 0));
|
||||
__ Bind(&has_no_type_arguments);
|
||||
__ Comment("No type arguments");
|
||||
|
||||
|
@ -3126,8 +3119,7 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
|||
kScratchReg,
|
||||
Address(kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::kInstanceCidOrSignature,
|
||||
Address::Offset, kObjectBytes));
|
||||
target::SubtypeTestCache::kInstanceCidOrSignature));
|
||||
__ CompareObjectRegisters(kScratchReg, kNullReg);
|
||||
__ b(&done, EQ);
|
||||
__ CompareObjectRegisters(kScratchReg,
|
||||
|
@ -3139,16 +3131,14 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
|||
__ LoadCompressed(kScratchReg,
|
||||
Address(kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::kDestinationType,
|
||||
Address::Offset, kObjectBytes));
|
||||
target::SubtypeTestCache::kDestinationType));
|
||||
__ cmp(kScratchReg, Operand(TypeTestABI::kDstTypeReg));
|
||||
__ b(&next_iteration, NE);
|
||||
__ LoadCompressed(
|
||||
kScratchReg,
|
||||
Address(kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::kInstanceTypeArguments,
|
||||
Address::Offset, kObjectBytes));
|
||||
target::SubtypeTestCache::kInstanceTypeArguments));
|
||||
__ cmp(kScratchReg,
|
||||
Operand(STCInternalRegs::kInstanceInstantiatorTypeArgumentsReg));
|
||||
if (n == 3) {
|
||||
|
@ -3159,16 +3149,14 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
|||
kScratchReg,
|
||||
Address(kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::kInstantiatorTypeArguments,
|
||||
Address::Offset, kObjectBytes));
|
||||
target::SubtypeTestCache::kInstantiatorTypeArguments));
|
||||
__ cmp(kScratchReg, Operand(TypeTestABI::kInstantiatorTypeArgumentsReg));
|
||||
__ b(&next_iteration, NE);
|
||||
__ LoadCompressed(
|
||||
kScratchReg,
|
||||
Address(kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::kFunctionTypeArguments,
|
||||
Address::Offset, kObjectBytes));
|
||||
target::SubtypeTestCache::kFunctionTypeArguments));
|
||||
__ cmp(kScratchReg, Operand(TypeTestABI::kFunctionTypeArgumentsReg));
|
||||
if (n == 5) {
|
||||
__ b(&found, EQ);
|
||||
|
@ -3176,23 +3164,22 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
|||
ASSERT(n == 7);
|
||||
__ b(&next_iteration, NE);
|
||||
|
||||
__ LoadCompressed(kScratchReg,
|
||||
Address(kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::
|
||||
kInstanceParentFunctionTypeArguments,
|
||||
Address::Offset, kObjectBytes));
|
||||
__ LoadCompressed(
|
||||
kScratchReg, Address(kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::
|
||||
kInstanceParentFunctionTypeArguments));
|
||||
__ cmp(
|
||||
kScratchReg,
|
||||
Operand(STCInternalRegs::kInstanceParentFunctionTypeArgumentsReg));
|
||||
__ b(&next_iteration, NE);
|
||||
|
||||
__ LoadCompressed(kScratchReg,
|
||||
Address(kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::
|
||||
kInstanceDelayedFunctionTypeArguments,
|
||||
Address::Offset, kObjectBytes));
|
||||
__ LoadCompressed(
|
||||
kScratchReg,
|
||||
Address(kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::
|
||||
kInstanceDelayedFunctionTypeArguments));
|
||||
__ cmp(
|
||||
kScratchReg,
|
||||
Operand(STCInternalRegs::kInstanceDelayedFunctionTypeArgumentsReg));
|
||||
|
@ -3209,11 +3196,10 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
|
|||
|
||||
__ Bind(&found);
|
||||
__ Comment("Found");
|
||||
__ LoadCompressed(TypeTestABI::kSubtypeTestCacheResultReg,
|
||||
Address(kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::kTestResult,
|
||||
Address::Offset, kObjectBytes));
|
||||
__ LoadCompressed(
|
||||
TypeTestABI::kSubtypeTestCacheResultReg,
|
||||
Address(kCacheArrayReg, target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::kTestResult));
|
||||
__ Bind(&done);
|
||||
__ Comment("Done");
|
||||
__ ret();
|
||||
|
@ -3512,9 +3498,8 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
|
|||
// proper target for the given name and arguments descriptor. If the
|
||||
// illegal class id was found, the target is a cache miss handler that can
|
||||
// be invoked as a normal Dart function.
|
||||
__ LoadCompressed(
|
||||
FUNCTION_REG,
|
||||
FieldAddress(TMP, base + target::kCompressedWordSize, kObjectBytes));
|
||||
__ LoadCompressed(FUNCTION_REG,
|
||||
FieldAddress(TMP, base + target::kCompressedWordSize));
|
||||
__ ldr(R1,
|
||||
FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
|
||||
__ ldr(ARGS_DESC_REG,
|
||||
|
@ -3577,14 +3562,12 @@ void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
|
|||
if (FLAG_precompiled_mode) {
|
||||
const intptr_t entry_offset =
|
||||
target::ICData::EntryPointIndexFor(1) * target::kCompressedWordSize;
|
||||
__ LoadCompressed(R1,
|
||||
Address(R8, entry_offset, Address::Offset, kObjectBytes));
|
||||
__ LoadCompressed(R1, Address(R8, entry_offset));
|
||||
__ ldr(R1, FieldAddress(R1, target::Function::entry_point_offset()));
|
||||
} else {
|
||||
const intptr_t code_offset =
|
||||
target::ICData::CodeIndexFor(1) * target::kCompressedWordSize;
|
||||
__ LoadCompressed(CODE_REG,
|
||||
Address(R8, code_offset, Address::Offset, kObjectBytes));
|
||||
__ LoadCompressed(CODE_REG, Address(R8, code_offset));
|
||||
__ ldr(R1, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
|
||||
}
|
||||
__ br(R1);
|
||||
|
@ -3657,13 +3640,9 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
|
|||
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
|
||||
Label miss;
|
||||
__ LoadClassIdMayBeSmi(R1, R0);
|
||||
__ ldr(R2,
|
||||
FieldAddress(R5, target::SingleTargetCache::lower_limit_offset(),
|
||||
kTwoBytes),
|
||||
__ ldr(R2, FieldAddress(R5, target::SingleTargetCache::lower_limit_offset()),
|
||||
kUnsignedTwoBytes);
|
||||
__ ldr(R3,
|
||||
FieldAddress(R5, target::SingleTargetCache::upper_limit_offset(),
|
||||
kTwoBytes),
|
||||
__ ldr(R3, FieldAddress(R5, target::SingleTargetCache::upper_limit_offset()),
|
||||
kUnsignedTwoBytes);
|
||||
|
||||
__ cmp(R1, Operand(R2));
|
||||
|
|
Loading…
Reference in a new issue