mirror of
https://github.com/dart-lang/sdk
synced 2024-10-14 15:57:17 +00:00
[vm/compiler] More consolidation of Store assembler methods.
Add OperandSize arguments to the non-compressed Store methods so that the compressed Store methods can be implemented as calls to the corresponding non-compressed method using kObjectBytes. This makes it so that all compressed Store methods are now non-virtual. Add a scratch register argument to all Store methods that involve a write barrier that defaults to TMP. Originally only the IA32 implementation took a scratch register, with the others using TMP or TMP2 internally. Since all architectures can represent a register/offset pair as an Address without losing information, make all the offset-based load and store methods have an implementation that creates an Address or FieldAddress and calls the corresponding address-based store method. This makes all the offset-based load and store methods non-virtual. After this, most of the Store methods are non-virtual, with only the base methods used to implement them as virtual: * Store * StoreRelease * StoreObjectIntoObjectNoBarrier * StoreBarrier * ArrayStoreBarrier * VerifyStoreNeedsNoWriteBarrier (for DEBUG use only) The Load methods can't be consolidated quite so much, as the base methods for handling non-Smi compressed pointers must be defined per-architecture, as each compressed pointer architecture handles adding the upper bits for the heap separately. Have LoadFieldInstr::EmitNativeCode use Assembler::LoadToSlot when the result location is a single integer register and StoreFieldInstr::EmitNativeCode use Assembler::StoreToSlot and Assembler::StoreToSlotNoBarrier when the value location is a single integer register to avoid code duplication between those methods. Handle both compressed and uncompressed Smi fields in LoadFromSlot. TEST=ci (refactoring) Cq-Include-Trybots: luci.dart.try:vm-aot-android-release-arm64c-try,vm-aot-linux-debug-x64-try,vm-aot-linux-debug-x64c-try,vm-aot-mac-release-arm64-try,vm-aot-mac-release-x64-try,vm-aot-obfuscate-linux-release-x64-try,vm-aot-optimization-level-linux-release-x64-try,vm-aot-win-debug-arm64-try,vm-appjit-linux-debug-x64-try,vm-asan-linux-release-x64-try,vm-checked-mac-release-arm64-try,vm-eager-optimization-linux-release-ia32-try,vm-eager-optimization-linux-release-x64-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64c-try,vm-ffi-qemu-linux-release-arm-try,vm-ffi-qemu-linux-release-riscv64-try,vm-linux-debug-ia32-try,vm-linux-debug-x64c-try,vm-mac-debug-arm64-try,vm-mac-debug-x64-try,vm-msan-linux-release-x64-try,vm-reload-linux-debug-x64-try,vm-reload-rollback-linux-debug-x64-try,vm-ubsan-linux-release-x64-try,vm-win-debug-x64-try,vm-win-release-ia32-try Change-Id: I60cc03776af220ed87918664bb4b9abafff2788a Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/360641 Reviewed-by: Alexander Markov <alexmarkov@google.com> Commit-Queue: Tess Strickland <sstrickl@google.com> Reviewed-by: Daco Harkes <dacoharkes@google.com>
This commit is contained in:
parent
1074cda324
commit
671f271f58
|
@ -1758,23 +1758,17 @@ Register AllocateRegister(RegList* used) {
|
|||
used);
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObject(Register object,
|
||||
const Address& dest,
|
||||
void Assembler::StoreBarrier(Register object,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
Register scratch) {
|
||||
// x.slot = x. Barrier should have be removed at the IL level.
|
||||
ASSERT(object != value);
|
||||
ASSERT(object != LINK_REGISTER);
|
||||
ASSERT(value != LINK_REGISTER);
|
||||
ASSERT(object != TMP);
|
||||
ASSERT(value != TMP);
|
||||
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, dest);
|
||||
} else {
|
||||
Store(value, dest);
|
||||
}
|
||||
ASSERT(object != scratch);
|
||||
ASSERT(value != scratch);
|
||||
ASSERT(scratch != kNoRegister);
|
||||
|
||||
// In parallel, test whether
|
||||
// - object is old and not remembered and value is new, or
|
||||
|
@ -1786,18 +1780,25 @@ void Assembler::StoreIntoObject(Register object,
|
|||
Label done;
|
||||
if (can_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
} else {
|
||||
#if defined(DEBUG)
|
||||
Label passed_check;
|
||||
BranchIfNotSmi(value, &passed_check, kNearJump);
|
||||
Breakpoint();
|
||||
Bind(&passed_check);
|
||||
#endif
|
||||
}
|
||||
const bool preserve_lr = lr_state().LRContainsReturnAddress();
|
||||
if (preserve_lr) {
|
||||
SPILLS_LR_TO_FRAME(Push(LR));
|
||||
}
|
||||
CLOBBERS_LR({
|
||||
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
|
||||
ldrb(scratch, FieldAddress(object, target::Object::tags_offset()));
|
||||
ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
|
||||
and_(TMP, LR,
|
||||
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||
and_(scratch, LR,
|
||||
Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||
ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
|
||||
tst(TMP, Operand(LR));
|
||||
tst(scratch, Operand(LR));
|
||||
});
|
||||
if (value != kWriteBarrierValueReg) {
|
||||
// Unlikely. Only non-graph intrinsics.
|
||||
|
@ -1832,20 +1833,18 @@ void Assembler::StoreIntoObject(Register object,
|
|||
Bind(&done);
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoArray(Register object,
|
||||
void Assembler::ArrayStoreBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi) {
|
||||
// x.slot = x. Barrier should have be removed at the IL level.
|
||||
ASSERT(object != value);
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) {
|
||||
ASSERT(object != LINK_REGISTER);
|
||||
ASSERT(value != LINK_REGISTER);
|
||||
ASSERT(slot != LINK_REGISTER);
|
||||
ASSERT(object != TMP);
|
||||
ASSERT(value != TMP);
|
||||
ASSERT(slot != TMP);
|
||||
|
||||
str(value, Address(slot, 0));
|
||||
ASSERT(object != scratch);
|
||||
ASSERT(value != scratch);
|
||||
ASSERT(slot != scratch);
|
||||
ASSERT(scratch != kNoRegister);
|
||||
|
||||
// In parallel, test whether
|
||||
// - object is old and not remembered and value is new, or
|
||||
|
@ -1857,6 +1856,13 @@ void Assembler::StoreIntoArray(Register object,
|
|||
Label done;
|
||||
if (can_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
} else {
|
||||
#if defined(DEBUG)
|
||||
Label passed_check;
|
||||
BranchIfNotSmi(value, &passed_check, kNearJump);
|
||||
Breakpoint();
|
||||
Bind(&passed_check);
|
||||
#endif
|
||||
}
|
||||
const bool preserve_lr = lr_state().LRContainsReturnAddress();
|
||||
if (preserve_lr) {
|
||||
|
@ -1864,12 +1870,12 @@ void Assembler::StoreIntoArray(Register object,
|
|||
}
|
||||
|
||||
CLOBBERS_LR({
|
||||
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
|
||||
ldrb(scratch, FieldAddress(object, target::Object::tags_offset()));
|
||||
ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
|
||||
and_(TMP, LR,
|
||||
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||
and_(scratch, LR,
|
||||
Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||
ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
|
||||
tst(TMP, Operand(LR));
|
||||
tst(scratch, Operand(LR));
|
||||
});
|
||||
|
||||
if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
|
||||
|
@ -1886,32 +1892,39 @@ void Assembler::StoreIntoArray(Register object,
|
|||
Bind(&done);
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObjectOffset(Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order,
|
||||
OperandSize size) {
|
||||
ASSERT_EQUAL(size, kFourBytes);
|
||||
ASSERT_EQUAL(dest.mode(), Address::Mode::Offset);
|
||||
ASSERT_EQUAL(dest.kind(), Address::OffsetKind::Immediate);
|
||||
int32_t ignored = 0;
|
||||
if (Address::CanHoldStoreOffset(kFourBytes, offset - kHeapObjectTag,
|
||||
&ignored)) {
|
||||
StoreIntoObject(object, FieldAddress(object, offset), value,
|
||||
can_value_be_smi, memory_order);
|
||||
Register scratch = TMP;
|
||||
if (!Address::CanHoldStoreOffset(size, dest.offset(), &ignored)) {
|
||||
// As there is no TMP2 on ARM7, Store uses TMP when the instruction cannot
|
||||
// contain the offset, so we need to use a different scratch register
|
||||
// for loading the object.
|
||||
scratch = dest.base() == R9 ? R8 : R9;
|
||||
Push(scratch);
|
||||
}
|
||||
ASSERT(IsOriginalObject(value));
|
||||
DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
|
||||
// No store buffer update.
|
||||
LoadObject(scratch, value);
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(scratch, dest);
|
||||
} else {
|
||||
AddImmediate(IP, object, offset - kHeapObjectTag);
|
||||
StoreIntoObject(object, Address(IP), value, can_value_be_smi, memory_order);
|
||||
Store(scratch, dest);
|
||||
}
|
||||
if (scratch != TMP) {
|
||||
Pop(scratch);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, dest);
|
||||
} else {
|
||||
Store(value, dest);
|
||||
}
|
||||
#if defined(DEBUG)
|
||||
void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
|
||||
Register value) {
|
||||
// We can't assert the incremental barrier is not needed here, only the
|
||||
// generational barrier. We sometimes omit the write barrier when 'value' is
|
||||
// a constant, but we don't eagerly mark 'value' and instead assume it is also
|
||||
|
@ -1927,60 +1940,6 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
|
|||
b(&done, ZERO);
|
||||
Stop("Write barrier is required");
|
||||
Bind(&done);
|
||||
#endif // defined(DEBUG)
|
||||
// No store buffer update.
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
ASSERT(IsOriginalObject(value));
|
||||
DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
|
||||
// No store buffer update.
|
||||
LoadObject(IP, value);
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(IP, dest);
|
||||
} else {
|
||||
str(IP, dest);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
int32_t ignored = 0;
|
||||
if (Address::CanHoldStoreOffset(kFourBytes, offset - kHeapObjectTag,
|
||||
&ignored)) {
|
||||
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
|
||||
memory_order);
|
||||
} else {
|
||||
Register base = object == R9 ? R8 : R9;
|
||||
Push(base);
|
||||
AddImmediate(base, object, offset - kHeapObjectTag);
|
||||
StoreIntoObjectNoBarrier(object, Address(base), value, memory_order);
|
||||
Pop(base);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
ASSERT(IsOriginalObject(value));
|
||||
int32_t ignored = 0;
|
||||
if (Address::CanHoldStoreOffset(kFourBytes, offset - kHeapObjectTag,
|
||||
&ignored)) {
|
||||
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
|
||||
memory_order);
|
||||
} else {
|
||||
Register base = object == R9 ? R8 : R9;
|
||||
Push(base);
|
||||
AddImmediate(base, object, offset - kHeapObjectTag);
|
||||
StoreIntoObjectNoBarrier(object, Address(base), value, memory_order);
|
||||
Pop(base);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::StoreInternalPointer(Register object,
|
||||
|
|
|
@ -430,22 +430,16 @@ class Assembler : public AssemblerBase {
|
|||
StoreToOffset(src, base, offset);
|
||||
}
|
||||
void LoadAcquire(Register dst,
|
||||
Register address,
|
||||
int32_t offset = 0,
|
||||
const Address& address,
|
||||
OperandSize size = kFourBytes) override {
|
||||
Load(dst, Address(address, offset), size);
|
||||
Load(dst, address, size);
|
||||
dmb();
|
||||
}
|
||||
void StoreRelease(Register src,
|
||||
Register address,
|
||||
int32_t offset = 0) override {
|
||||
StoreRelease(src, Address(address, offset));
|
||||
}
|
||||
void StoreRelease(Register src, Address dest) {
|
||||
const Address& address,
|
||||
OperandSize size = kFourBytes) override {
|
||||
dmb();
|
||||
Store(src, dest);
|
||||
|
||||
// We don't run TSAN bots on 32 bit.
|
||||
Store(src, address, size);
|
||||
}
|
||||
|
||||
void CompareWithMemoryValue(Register value,
|
||||
|
@ -995,47 +989,23 @@ class Assembler : public AssemblerBase {
|
|||
}
|
||||
void CompareObject(Register rn, const Object& object);
|
||||
|
||||
// Store into a heap object and apply the generational and incremental write
|
||||
// barriers. All stores into heap objects must pass through this function or,
|
||||
// if the value can be proven either Smi or old-and-premarked, its NoBarrier
|
||||
// variants.
|
||||
// Preserves object and value registers.
|
||||
void StoreIntoObject(Register object, // Object we are storing into.
|
||||
const Address& dest, // Where we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
void StoreIntoArray(Register object,
|
||||
void StoreObjectIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
OperandSize size = kWordBytes) override;
|
||||
|
||||
void StoreBarrier(Register object,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) override;
|
||||
void ArrayStoreBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi) override;
|
||||
void StoreIntoObjectOffset(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
|
||||
void StoreIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
void StoreIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
void StoreIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
void StoreIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) override;
|
||||
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override;
|
||||
|
||||
// Stores a non-tagged value into a heap object.
|
||||
void StoreInternalPointer(Register object,
|
||||
|
|
|
@ -1035,82 +1035,27 @@ void Assembler::VRSqrts(VRegister vd, VRegister vn) {
|
|||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::LoadCompressed(Register dest, const Address& slot) {
|
||||
ldr(dest, slot, kUnsignedFourBytes); // Zero-extension.
|
||||
Load(dest, slot, kUnsignedFourBytes); // Zero-extension.
|
||||
add(dest, dest, Operand(HEAP_BITS, LSL, 32));
|
||||
}
|
||||
|
||||
void Assembler::LoadCompressedFromOffset(Register dest,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadFromOffset(dest, base, offset, kUnsignedFourBytes); // Zero-extension.
|
||||
add(dest, dest, Operand(HEAP_BITS, LSL, 32));
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreIntoObjectOffset(Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
CanBeSmi value_can_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, object, offset);
|
||||
} else {
|
||||
StoreToOffset(value, object, offset - kHeapObjectTag);
|
||||
}
|
||||
StoreBarrier(object, value, value_can_be_smi);
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoObjectOffset(Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
CanBeSmi value_can_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreReleaseCompressed(value, object, offset);
|
||||
} else {
|
||||
StoreToOffset(value, object, offset - kHeapObjectTag, kObjectBytes);
|
||||
}
|
||||
StoreBarrier(object, value, value_can_be_smi);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreIntoObject(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
// stlr does not feature an address operand.
|
||||
ASSERT(memory_order == kRelaxedNonAtomic);
|
||||
str(value, dest);
|
||||
StoreBarrier(object, value, can_be_smi);
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoObject(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
// stlr does not feature an address operand.
|
||||
ASSERT(memory_order == kRelaxedNonAtomic);
|
||||
str(value, dest, kObjectBytes);
|
||||
StoreBarrier(object, value, can_be_smi);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreBarrier(Register object,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi) {
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) {
|
||||
const bool spill_lr = lr_state().LRContainsReturnAddress();
|
||||
// x.slot = x. Barrier should have be removed at the IL level.
|
||||
ASSERT(object != value);
|
||||
ASSERT(object != scratch);
|
||||
ASSERT(value != scratch);
|
||||
ASSERT(object != LINK_REGISTER);
|
||||
ASSERT(value != LINK_REGISTER);
|
||||
ASSERT(object != TMP);
|
||||
ASSERT(scratch != LINK_REGISTER);
|
||||
ASSERT(object != TMP2);
|
||||
ASSERT(value != TMP);
|
||||
ASSERT(value != TMP2);
|
||||
ASSERT(scratch != TMP2);
|
||||
ASSERT(scratch != kNoRegister);
|
||||
|
||||
// In parallel, test whether
|
||||
// - object is old and not remembered and value is new, or
|
||||
|
@ -1122,12 +1067,20 @@ void Assembler::StoreBarrier(Register object,
|
|||
Label done;
|
||||
if (can_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done);
|
||||
} else {
|
||||
#if defined(DEBUG)
|
||||
Label passed_check;
|
||||
BranchIfNotSmi(value, &passed_check, kNearJump);
|
||||
Breakpoint();
|
||||
Bind(&passed_check);
|
||||
#endif
|
||||
}
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||
ldr(scratch, FieldAddress(object, target::Object::tags_offset()),
|
||||
kUnsignedByte);
|
||||
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
|
||||
and_(TMP, TMP2,
|
||||
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||
tst(TMP, Operand(HEAP_BITS, LSR, 32));
|
||||
and_(scratch, TMP2,
|
||||
Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||
tst(scratch, Operand(HEAP_BITS, LSR, 32));
|
||||
b(&done, ZERO);
|
||||
|
||||
if (spill_lr) {
|
||||
|
@ -1164,35 +1117,27 @@ void Assembler::StoreBarrier(Register object,
|
|||
Bind(&done);
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoArray(Register object,
|
||||
void Assembler::ArrayStoreBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi) {
|
||||
str(value, Address(slot, 0));
|
||||
StoreIntoArrayBarrier(object, slot, value, can_be_smi);
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoArray(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi) {
|
||||
str(value, Address(slot, 0), kObjectBytes);
|
||||
StoreIntoArrayBarrier(object, slot, value, can_be_smi);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreIntoArrayBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi) {
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) {
|
||||
const bool spill_lr = lr_state().LRContainsReturnAddress();
|
||||
ASSERT(object != TMP);
|
||||
ASSERT(object != slot);
|
||||
ASSERT(object != value);
|
||||
ASSERT(object != scratch);
|
||||
ASSERT(slot != value);
|
||||
ASSERT(slot != scratch);
|
||||
ASSERT(value != scratch);
|
||||
ASSERT(object != LINK_REGISTER);
|
||||
ASSERT(slot != LINK_REGISTER);
|
||||
ASSERT(value != LINK_REGISTER);
|
||||
ASSERT(scratch != LINK_REGISTER);
|
||||
ASSERT(object != TMP2);
|
||||
ASSERT(value != TMP);
|
||||
ASSERT(value != TMP2);
|
||||
ASSERT(slot != TMP);
|
||||
ASSERT(slot != TMP2);
|
||||
ASSERT(value != TMP2);
|
||||
ASSERT(scratch != TMP2);
|
||||
ASSERT(scratch != kNoRegister);
|
||||
|
||||
// In parallel, test whether
|
||||
// - object is old and not remembered and value is new, or
|
||||
|
@ -1204,12 +1149,20 @@ void Assembler::StoreIntoArrayBarrier(Register object,
|
|||
Label done;
|
||||
if (can_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done);
|
||||
} else {
|
||||
#if defined(DEBUG)
|
||||
Label passed_check;
|
||||
BranchIfNotSmi(value, &passed_check, kNearJump);
|
||||
Breakpoint();
|
||||
Bind(&passed_check);
|
||||
#endif
|
||||
}
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||
ldr(scratch, FieldAddress(object, target::Object::tags_offset()),
|
||||
kUnsignedByte);
|
||||
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
|
||||
and_(TMP, TMP2,
|
||||
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||
tst(TMP, Operand(HEAP_BITS, LSR, 32));
|
||||
and_(scratch, TMP2,
|
||||
Operand(scratch, LSR, target::UntaggedObject::kBarrierOverlapShift));
|
||||
tst(scratch, Operand(HEAP_BITS, LSR, 32));
|
||||
b(&done, ZERO);
|
||||
if (spill_lr) {
|
||||
SPILLS_LR_TO_FRAME(Push(LR));
|
||||
|
@ -1228,14 +1181,34 @@ void Assembler::StoreIntoArrayBarrier(Register object,
|
|||
Bind(&done);
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
// stlr does not feature an address operand.
|
||||
ASSERT(memory_order == kRelaxedNonAtomic);
|
||||
str(value, dest);
|
||||
#if defined(DEBUG)
|
||||
void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
|
||||
const Address& address,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order,
|
||||
OperandSize size) {
|
||||
ASSERT(IsOriginalObject(value));
|
||||
DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
|
||||
Register src = kNoRegister;
|
||||
if (IsSameObject(compiler::NullObject(), value)) {
|
||||
src = NULL_REG;
|
||||
} else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
|
||||
src = ZR;
|
||||
} else {
|
||||
// Store uses TMP2 when the address cannot be fully contained in the
|
||||
// instruction, so TMP is safe to use as a scratch register here.
|
||||
src = TMP;
|
||||
ASSERT(object != src);
|
||||
LoadObject(src, value);
|
||||
}
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(src, address, size);
|
||||
} else {
|
||||
Store(src, address, size);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
|
||||
Register value) {
|
||||
// We can't assert the incremental barrier is not needed here, only the
|
||||
// generational barrier. We sometimes omit the write barrier when 'value' is
|
||||
// a constant, but we don't eagerly mark 'value' and instead assume it is also
|
||||
|
@ -1249,156 +1222,8 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
|
|||
tbz(&done, TMP, target::UntaggedObject::kOldAndNotRememberedBit);
|
||||
Stop("Write barrier is required");
|
||||
Bind(&done);
|
||||
#endif // defined(DEBUG)
|
||||
// No store buffer update.
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
// stlr does not feature an address operand.
|
||||
ASSERT(memory_order == kRelaxedNonAtomic);
|
||||
str(value, dest, kObjectBytes);
|
||||
#if defined(DEBUG)
|
||||
// We can't assert the incremental barrier is not needed here, only the
|
||||
// generational barrier. We sometimes omit the write barrier when 'value' is
|
||||
// a constant, but we don't eagerly mark 'value' and instead assume it is also
|
||||
// reachable via a constant pool, so it doesn't matter if it is not traced via
|
||||
// 'object'.
|
||||
Label done;
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
ldr(TMP, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
|
||||
tbz(&done, TMP, target::UntaggedObject::kNewBit);
|
||||
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
|
||||
tbz(&done, TMP, target::UntaggedObject::kOldAndNotRememberedBit);
|
||||
Stop("Write barrier is required");
|
||||
Bind(&done);
|
||||
#endif // defined(DEBUG)
|
||||
// No store buffer update.
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, object, offset);
|
||||
} else if (FieldAddress::CanHoldOffset(offset)) {
|
||||
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
|
||||
} else {
|
||||
AddImmediate(TMP, object, offset - kHeapObjectTag);
|
||||
StoreIntoObjectNoBarrier(object, Address(TMP), value);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreReleaseCompressed(value, object, offset);
|
||||
} else if (FieldAddress::CanHoldOffset(offset)) {
|
||||
StoreCompressedIntoObjectNoBarrier(object, FieldAddress(object, offset),
|
||||
value);
|
||||
} else {
|
||||
AddImmediate(TMP, object, offset - kHeapObjectTag);
|
||||
StoreCompressedIntoObjectNoBarrier(object, Address(TMP), value);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
RELEASE_ASSERT(memory_order == kRelaxedNonAtomic);
|
||||
ASSERT(IsOriginalObject(value));
|
||||
DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
|
||||
if (IsSameObject(compiler::NullObject(), value)) {
|
||||
str(NULL_REG, dest);
|
||||
} else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
|
||||
str(ZR, dest);
|
||||
} else {
|
||||
LoadObject(TMP2, value);
|
||||
str(TMP2, dest);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
// stlr does not feature an address operand.
|
||||
RELEASE_ASSERT(memory_order == kRelaxedNonAtomic);
|
||||
ASSERT(IsOriginalObject(value));
|
||||
DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
|
||||
// No store buffer update.
|
||||
if (IsSameObject(compiler::NullObject(), value)) {
|
||||
str(NULL_REG, dest, kObjectBytes);
|
||||
} else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
|
||||
str(ZR, dest, kObjectBytes);
|
||||
} else {
|
||||
LoadObject(TMP2, value);
|
||||
str(TMP2, dest, kObjectBytes);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
Register value_reg = TMP2;
|
||||
if (IsSameObject(compiler::NullObject(), value)) {
|
||||
value_reg = NULL_REG;
|
||||
} else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
|
||||
value_reg = ZR;
|
||||
} else {
|
||||
LoadObject(value_reg, value);
|
||||
}
|
||||
StoreIntoObjectOffsetNoBarrier(object, offset, value_reg, memory_order);
|
||||
} else if (FieldAddress::CanHoldOffset(offset)) {
|
||||
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
|
||||
} else {
|
||||
AddImmediate(TMP, object, offset - kHeapObjectTag);
|
||||
StoreIntoObjectNoBarrier(object, Address(TMP), value);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
Register value_reg = TMP2;
|
||||
if (memory_order == kRelease) {
|
||||
if (IsSameObject(compiler::NullObject(), value)) {
|
||||
value_reg = NULL_REG;
|
||||
} else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
|
||||
value_reg = ZR;
|
||||
} else {
|
||||
LoadObject(value_reg, value);
|
||||
}
|
||||
StoreCompressedIntoObjectOffsetNoBarrier(object, offset, value_reg,
|
||||
memory_order);
|
||||
} else if (FieldAddress::CanHoldOffset(offset)) {
|
||||
StoreCompressedIntoObjectNoBarrier(object, FieldAddress(object, offset),
|
||||
value);
|
||||
} else {
|
||||
AddImmediate(TMP, object, offset - kHeapObjectTag);
|
||||
StoreCompressedIntoObjectNoBarrier(object, Address(TMP), value);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreInternalPointer(Register object,
|
||||
const Address& dest,
|
||||
Register value) {
|
||||
|
|
|
@ -524,12 +524,13 @@ class Assembler : public AssemblerBase {
|
|||
#endif
|
||||
|
||||
void LoadAcquire(Register dst,
|
||||
Register address,
|
||||
int32_t offset = 0,
|
||||
const Address& address,
|
||||
OperandSize size = kEightBytes) override {
|
||||
Register src = address;
|
||||
if (offset != 0) {
|
||||
AddImmediate(TMP2, address, offset);
|
||||
// ldar does not feature an address operand.
|
||||
ASSERT(address.type() == Address::AddressType::Offset);
|
||||
Register src = address.base();
|
||||
if (address.offset() != 0) {
|
||||
AddImmediate(TMP2, src, address.offset());
|
||||
src = TMP2;
|
||||
}
|
||||
ldar(dst, src, size);
|
||||
|
@ -539,44 +540,28 @@ class Assembler : public AssemblerBase {
|
|||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void LoadAcquireCompressed(Register dst,
|
||||
Register address,
|
||||
int32_t offset = 0) override {
|
||||
LoadAcquire(dst, address, offset, kObjectBytes);
|
||||
void LoadAcquireCompressed(Register dst, const Address& address) override {
|
||||
LoadAcquire(dst, address, kObjectBytes);
|
||||
add(dst, dst, Operand(HEAP_BITS, LSL, 32));
|
||||
}
|
||||
#endif
|
||||
|
||||
void StoreRelease(Register src,
|
||||
Register address,
|
||||
int32_t offset = 0) override {
|
||||
Register kDestReg = address;
|
||||
if (offset != 0) {
|
||||
kDestReg = TMP;
|
||||
AddImmediate(kDestReg, address, offset);
|
||||
const Address& address,
|
||||
OperandSize size = kEightBytes) override {
|
||||
// stlr does not feature an address operand.
|
||||
ASSERT(address.type() == Address::AddressType::Offset);
|
||||
Register dst = address.base();
|
||||
if (address.offset() != 0) {
|
||||
AddImmediate(TMP2, dst, address.offset());
|
||||
dst = TMP2;
|
||||
}
|
||||
stlr(src, kDestReg);
|
||||
stlr(src, dst, size);
|
||||
#if defined(TARGET_USES_THREAD_SANITIZER)
|
||||
TsanStoreRelease(kDestReg);
|
||||
TsanStoreRelease(dst);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreReleaseCompressed(Register src,
|
||||
Register address,
|
||||
int32_t offset = 0) override {
|
||||
Register kResultReg = address;
|
||||
if (offset != 0) {
|
||||
kResultReg = TMP;
|
||||
AddImmediate(kResultReg, address, offset);
|
||||
}
|
||||
stlr(src, kResultReg, kObjectBytes);
|
||||
#if defined(TARGET_USES_THREAD_SANITIZER)
|
||||
TsanStoreRelease(kResultReg);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
void CompareWithMemoryValue(Register value,
|
||||
Address address,
|
||||
OperandSize sz = kEightBytes) override {
|
||||
|
@ -1956,108 +1941,25 @@ class Assembler : public AssemblerBase {
|
|||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void LoadCompressed(Register dest, const Address& slot) override;
|
||||
void LoadCompressedFromOffset(Register dest,
|
||||
Register base,
|
||||
int32_t offset) override;
|
||||
#endif
|
||||
|
||||
// Store into a heap object and apply the generational and incremental write
|
||||
// barriers. All stores into heap objects must pass through this function or,
|
||||
// if the value can be proven either Smi or old-and-premarked, its NoBarrier
|
||||
// variants.
|
||||
// Preserves object and value registers.
|
||||
void StoreIntoObject(Register object,
|
||||
const Address& dest,
|
||||
void StoreBarrier(Register object,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoObject(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#endif
|
||||
void StoreBarrier(Register object, Register value, CanBeSmi can_value_be_smi);
|
||||
void StoreIntoArray(Register object,
|
||||
CanBeSmi can_value_be_smi,
|
||||
Register scratch) override;
|
||||
void ArrayStoreBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoArray(
|
||||
Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi) override;
|
||||
#endif
|
||||
void StoreIntoArrayBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi);
|
||||
CanBeSmi can_value_be_smi,
|
||||
Register scratch) override;
|
||||
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override;
|
||||
|
||||
void StoreIntoObjectOffset(
|
||||
void StoreObjectIntoObjectNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoObjectOffset(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#endif
|
||||
void StoreIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#endif
|
||||
void StoreIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#endif
|
||||
void StoreIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
const Address& address,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#endif
|
||||
void StoreIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#endif
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
OperandSize size = kWordBytes) override;
|
||||
|
||||
// Stores a non-tagged value into a heap object.
|
||||
void StoreInternalPointer(Register object,
|
||||
|
|
|
@ -7568,8 +7568,8 @@ ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire, assembler) {
|
|||
__ Push(R1);
|
||||
__ mov(R1, R0);
|
||||
__ LoadImmediate(R0, 0);
|
||||
__ StoreRelease(R1, SP, 0);
|
||||
__ LoadAcquire(R0, SP, 0);
|
||||
__ StoreReleaseToOffset(R1, SP, 0);
|
||||
__ LoadAcquireFromOffset(R0, SP, 0);
|
||||
__ Pop(R1);
|
||||
__ Pop(R1);
|
||||
__ RestoreCSP();
|
||||
|
@ -7604,8 +7604,8 @@ ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire1024, assembler) {
|
|||
__ mov(R1, R0);
|
||||
__ LoadImmediate(R0, 0);
|
||||
__ sub(SP, SP, Operand(1024 * target::kWordSize));
|
||||
__ StoreRelease(R1, SP, 1024);
|
||||
__ LoadAcquire(R0, SP, 1024);
|
||||
__ StoreReleaseToOffset(R1, SP, 1024);
|
||||
__ LoadAcquireFromOffset(R0, SP, 1024);
|
||||
__ add(SP, SP, Operand(1024 * target::kWordSize));
|
||||
__ Pop(R1);
|
||||
__ Pop(R1);
|
||||
|
@ -7626,8 +7626,8 @@ ASSEMBLER_TEST_RUN(StoreReleaseLoadAcquire1024, test) {
|
|||
"mov r1, r0\n"
|
||||
"movz r0, #0x0\n"
|
||||
"sub sp, sp, #0x2000\n"
|
||||
"add tmp, sp, #0x400\n"
|
||||
"stlr r1, tmp\n"
|
||||
"add tmp2, sp, #0x400\n"
|
||||
"stlr r1, tmp2\n"
|
||||
"add tmp2, sp, #0x400\n"
|
||||
"ldar r0, tmp2\n"
|
||||
"add sp, sp, #0x2000\n"
|
||||
|
|
|
@ -40,15 +40,20 @@ void AssemblerBase::LoadFromSlot(Register dst,
|
|||
// fit into a register.
|
||||
ASSERT(RepresentationUtils::ValueSize(slot.representation()) <=
|
||||
compiler::target::kWordSize);
|
||||
const intptr_t offset = slot.offset_in_bytes() - kHeapObjectTag;
|
||||
auto const sz = RepresentationUtils::OperandSize(slot.representation());
|
||||
LoadFromOffset(dst, base, offset, sz);
|
||||
} else if (!slot.is_compressed()) {
|
||||
LoadFieldFromOffset(dst, base, slot.offset_in_bytes());
|
||||
} else if (slot.type().ToCid() == kSmiCid) {
|
||||
LoadFieldFromOffset(dst, base, slot.offset_in_bytes(), sz);
|
||||
} else if (!slot.is_guarded_field() && slot.type().ToCid() == kSmiCid) {
|
||||
if (slot.is_compressed()) {
|
||||
LoadCompressedSmiFieldFromOffset(dst, base, slot.offset_in_bytes());
|
||||
} else {
|
||||
LoadSmiFieldFromOffset(dst, base, slot.offset_in_bytes());
|
||||
}
|
||||
} else {
|
||||
if (slot.is_compressed()) {
|
||||
LoadCompressedFieldFromOffset(dst, base, slot.offset_in_bytes());
|
||||
} else {
|
||||
LoadFieldFromOffset(dst, base, slot.offset_in_bytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -65,16 +70,17 @@ void AssemblerBase::StoreToSlot(Register src,
|
|||
Register base,
|
||||
const Slot& slot,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
MemoryOrder memory_order,
|
||||
Register scratch) {
|
||||
if (slot.is_unboxed()) {
|
||||
// Same as the no barrier case.
|
||||
StoreToSlotNoBarrier(src, base, slot, memory_order);
|
||||
} else if (slot.is_compressed()) {
|
||||
StoreCompressedIntoObjectOffset(base, slot.offset_in_bytes(), src,
|
||||
can_be_smi, memory_order);
|
||||
can_be_smi, memory_order, scratch);
|
||||
} else {
|
||||
StoreIntoObjectOffset(base, slot.offset_in_bytes(), src, can_be_smi,
|
||||
memory_order);
|
||||
memory_order, scratch);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -90,9 +96,8 @@ void AssemblerBase::StoreToSlotNoBarrier(Register src,
|
|||
// fit into a register.
|
||||
ASSERT(RepresentationUtils::ValueSize(slot.representation()) <=
|
||||
compiler::target::kWordSize);
|
||||
const intptr_t offset = slot.offset_in_bytes() - kHeapObjectTag;
|
||||
auto const sz = RepresentationUtils::OperandSize(slot.representation());
|
||||
StoreToOffset(src, base, offset, sz);
|
||||
StoreFieldToOffset(src, base, slot.offset_in_bytes(), sz);
|
||||
} else if (slot.is_compressed()) {
|
||||
StoreCompressedIntoObjectOffsetNoBarrier(base, slot.offset_in_bytes(), src,
|
||||
memory_order);
|
||||
|
@ -121,66 +126,158 @@ void AssemblerBase::LoadField(Register dst,
|
|||
OperandSize sz) {
|
||||
Load(dst, address, sz);
|
||||
}
|
||||
void AssemblerBase::LoadFieldFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset,
|
||||
OperandSize sz) {
|
||||
Load(dst, FieldAddress(base, offset), sz);
|
||||
}
|
||||
|
||||
void AssemblerBase::StoreFieldToOffset(Register src,
|
||||
Register base,
|
||||
int32_t offset,
|
||||
OperandSize sz) {
|
||||
Store(src, FieldAddress(base, offset), sz);
|
||||
}
|
||||
|
||||
void AssemblerBase::LoadSmiField(Register dst, const FieldAddress& address) {
|
||||
LoadSmi(dst, address);
|
||||
}
|
||||
void AssemblerBase::LoadSmiFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadSmi(dst, Address(base, offset));
|
||||
}
|
||||
void AssemblerBase::LoadSmiFieldFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadSmi(dst, FieldAddress(base, offset));
|
||||
}
|
||||
|
||||
void AssemblerBase::LoadAcquireCompressedFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadAcquireCompressed(dst, Address(base, offset));
|
||||
}
|
||||
void AssemblerBase::LoadCompressedField(Register dst,
|
||||
const FieldAddress& address) {
|
||||
LoadCompressed(dst, address);
|
||||
}
|
||||
void AssemblerBase::LoadCompressedFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadCompressed(dst, Address(base, offset));
|
||||
}
|
||||
void AssemblerBase::LoadCompressedFieldFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadCompressed(dst, FieldAddress(base, offset));
|
||||
}
|
||||
void AssemblerBase::LoadCompressedSmiField(Register dst,
|
||||
const FieldAddress& address) {
|
||||
LoadCompressedSmi(dst, address);
|
||||
}
|
||||
void AssemblerBase::LoadCompressedSmiFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadCompressedSmi(dst, Address(base, offset));
|
||||
}
|
||||
void AssemblerBase::LoadCompressedSmiFieldFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadCompressedSmi(dst, FieldAddress(base, offset));
|
||||
}
|
||||
|
||||
void AssemblerBase::LoadAcquireFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset,
|
||||
OperandSize size) {
|
||||
LoadAcquire(dst, Address(base, offset), size);
|
||||
}
|
||||
void AssemblerBase::StoreReleaseToOffset(Register src,
|
||||
Register base,
|
||||
int32_t offset,
|
||||
OperandSize size) {
|
||||
StoreRelease(src, Address(base, offset), size);
|
||||
}
|
||||
|
||||
void AssemblerBase::StoreIntoObject(Register object,
|
||||
const Address& address,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order,
|
||||
Register scratch,
|
||||
OperandSize size) {
|
||||
// A write barrier should never be applied when writing a reference to an
|
||||
// object into itself.
|
||||
ASSERT(object != value);
|
||||
ASSERT(object != scratch);
|
||||
ASSERT(value != scratch);
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, address, size);
|
||||
} else {
|
||||
Store(value, address, size);
|
||||
}
|
||||
StoreBarrier(object, value, can_be_smi, scratch);
|
||||
}
|
||||
|
||||
void AssemblerBase::StoreIntoObjectNoBarrier(Register object,
|
||||
const Address& address,
|
||||
Register value,
|
||||
MemoryOrder memory_order,
|
||||
OperandSize size) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, address, size);
|
||||
} else {
|
||||
Store(value, address, size);
|
||||
}
|
||||
DEBUG_ONLY(VerifyStoreNeedsNoWriteBarrier(object, value));
|
||||
}
|
||||
|
||||
void AssemblerBase::StoreIntoObjectOffset(Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
MemoryOrder memory_order,
|
||||
Register scratch,
|
||||
OperandSize size) {
|
||||
StoreIntoObject(object, FieldAddress(object, offset), value, can_be_smi,
|
||||
memory_order);
|
||||
}
|
||||
void AssemblerBase::StoreIntoObjectOffsetNoBarrier(Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
|
||||
memory_order);
|
||||
}
|
||||
void AssemblerBase::StoreIntoObjectOffsetNoBarrier(Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
|
||||
memory_order);
|
||||
memory_order, scratch, size);
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void AssemblerBase::LoadCompressedFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadCompressed(dst, Address(base, offset));
|
||||
}
|
||||
void AssemblerBase::StoreCompressedIntoObjectOffset(Register object,
|
||||
void AssemblerBase::StoreIntoObjectOffsetNoBarrier(Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
StoreCompressedIntoObject(object, FieldAddress(object, offset), value,
|
||||
can_be_smi, memory_order);
|
||||
MemoryOrder memory_order,
|
||||
OperandSize size) {
|
||||
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
|
||||
memory_order, size);
|
||||
}
|
||||
void AssemblerBase::StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
StoreCompressedIntoObjectNoBarrier(object, FieldAddress(object, offset),
|
||||
value, memory_order);
|
||||
}
|
||||
void AssemblerBase::StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
void AssemblerBase::StoreObjectIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
StoreCompressedIntoObjectNoBarrier(object, FieldAddress(object, offset),
|
||||
value, memory_order);
|
||||
MemoryOrder memory_order,
|
||||
OperandSize size) {
|
||||
StoreObjectIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
|
||||
memory_order, size);
|
||||
}
|
||||
|
||||
void AssemblerBase::StoreIntoArray(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch,
|
||||
OperandSize size) {
|
||||
ASSERT(object != scratch);
|
||||
ASSERT(value != object);
|
||||
ASSERT(value != scratch);
|
||||
ASSERT(slot != object);
|
||||
ASSERT(slot != value);
|
||||
ASSERT(slot != scratch);
|
||||
Store(value, Address(slot, 0), size);
|
||||
ArrayStoreBarrier(object, slot, value, can_be_smi, scratch);
|
||||
}
|
||||
#endif
|
||||
|
||||
void AssemblerBase::UnrolledMemCopy(Register dst_base,
|
||||
intptr_t dst_offset,
|
||||
|
|
|
@ -747,13 +747,12 @@ class AssemblerBase : public StackResource {
|
|||
Register instance,
|
||||
Register offset_in_words_as_smi) = 0;
|
||||
|
||||
virtual void LoadAcquire(Register reg,
|
||||
Register address,
|
||||
int32_t offset = 0,
|
||||
virtual void LoadAcquire(Register dst,
|
||||
const Address& address,
|
||||
OperandSize size = kWordBytes) = 0;
|
||||
virtual void StoreRelease(Register src,
|
||||
Register address,
|
||||
int32_t offset = 0) = 0;
|
||||
const Address& address,
|
||||
OperandSize size = kWordBytes) = 0;
|
||||
|
||||
virtual void Load(Register dst,
|
||||
const Address& address,
|
||||
|
@ -762,22 +761,40 @@ class AssemblerBase : public StackResource {
|
|||
virtual void Store(Register src,
|
||||
const Address& address,
|
||||
OperandSize sz = kWordBytes) = 0;
|
||||
virtual void StoreIntoObject(
|
||||
Register object, // Object being stored into.
|
||||
|
||||
// When emitting the write barrier code on IA32, either the caller must
|
||||
// allocate a scratch register or the implementation chooses a register to
|
||||
// save and restore and uses that as a scratch register internally.
|
||||
// Thus, the scratch register is an additional optional argument to
|
||||
// StoreIntoObject, StoreIntoArray, StoreIntoObjectOffset, and StoreBarrier
|
||||
// that defaults to TMP on other architectures. (TMP is kNoRegister on IA32,
|
||||
// so the default value invokes the correct behavior.)
|
||||
|
||||
// Store into a heap object and applies the appropriate write barriers.
|
||||
// (See StoreBarrier for which are applied on a given architecture.)
|
||||
//
|
||||
// All stores into heap objects must pass through this function or,
|
||||
// if the value can be proven either Smi or old-and-premarked, its NoBarrier
|
||||
// variant. Preserves the [object] and [value] registers.
|
||||
void StoreIntoObject(Register object, // Object being stored into.
|
||||
const Address& address, // Offset into object.
|
||||
Register value, // Value being stored.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) = 0;
|
||||
virtual void StoreIntoObjectNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
Register scratch = TMP,
|
||||
OperandSize size = kWordBytes);
|
||||
|
||||
void StoreIntoObjectNoBarrier(Register object, // Object being stored into.
|
||||
const Address& address, // Offset into object.
|
||||
Register value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) = 0;
|
||||
virtual void StoreIntoObjectNoBarrier(
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
OperandSize size = kWordBytes);
|
||||
virtual void StoreObjectIntoObjectNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
const Address& address, // Offset into object.
|
||||
const Object& value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) = 0;
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
OperandSize size = kWordBytes) = 0;
|
||||
|
||||
virtual void LoadIndexedPayload(Register dst,
|
||||
Register base,
|
||||
|
@ -785,17 +802,24 @@ class AssemblerBase : public StackResource {
|
|||
Register index,
|
||||
ScaleFactor scale,
|
||||
OperandSize sz = kWordBytes) = 0;
|
||||
virtual void StoreIntoArray(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi) = 0;
|
||||
|
||||
// For virtual XFromOffset methods, the base method implementation creates an
|
||||
// For virtual XOffset methods, the base method implementation creates an
|
||||
// appropriate address from the base register and offset and calls the
|
||||
// corresponding address-taking method. These should be overridden for
|
||||
// architectures where offsets should not be converted to addresses without
|
||||
// additional precautions, or for when the ARM-specific Assembler needs
|
||||
// to override with an overloaded version for the Condition argument.
|
||||
// additional precautions, for when the ARM-specific Assembler needs
|
||||
// to override with an overloaded version for the Condition argument,
|
||||
// or for when the IA32-specific Assembler needs to override with an
|
||||
// overloaded version for adding a scratch register argument.
|
||||
|
||||
void LoadAcquireFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset = 0,
|
||||
OperandSize size = kWordBytes);
|
||||
void StoreReleaseToOffset(Register src,
|
||||
Register base,
|
||||
int32_t offset = 0,
|
||||
OperandSize size = kWordBytes);
|
||||
|
||||
virtual void LoadFromOffset(Register dst,
|
||||
Register base,
|
||||
|
@ -812,17 +836,21 @@ class AssemblerBase : public StackResource {
|
|||
int32_t offset, // Offset into object.
|
||||
Register value, // Value being stored.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic);
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
Register scratch = TMP,
|
||||
OperandSize size = kWordBytes);
|
||||
virtual void StoreIntoObjectOffsetNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
int32_t offset, // Offset into object.
|
||||
Register value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic);
|
||||
virtual void StoreIntoObjectOffsetNoBarrier(
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
OperandSize size = kWordBytes);
|
||||
void StoreObjectIntoObjectOffsetNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
int32_t offset, // Offset into object.
|
||||
const Object& value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic);
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
OperandSize size = kWordBytes);
|
||||
|
||||
void LoadField(Register dst,
|
||||
const FieldAddress& address,
|
||||
|
@ -830,17 +858,14 @@ class AssemblerBase : public StackResource {
|
|||
virtual void LoadFieldFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset,
|
||||
OperandSize sz = kWordBytes) {
|
||||
LoadFromOffset(dst, base, offset - kHeapObjectTag, sz);
|
||||
}
|
||||
OperandSize sz = kWordBytes);
|
||||
|
||||
// Does not use write barriers, use StoreIntoObjectOffset instead for
|
||||
// boxed fields.
|
||||
virtual void StoreFieldToOffset(Register src,
|
||||
Register base,
|
||||
int32_t offset,
|
||||
OperandSize sz = kWordBytes) {
|
||||
StoreToOffset(src, base, offset - kHeapObjectTag, sz);
|
||||
}
|
||||
OperandSize sz = kWordBytes);
|
||||
|
||||
// Loads a Smi. In DEBUG mode, also checks that the loaded value is a Smi and
|
||||
// halts if not.
|
||||
|
@ -849,112 +874,47 @@ class AssemblerBase : public StackResource {
|
|||
DEBUG_ONLY(VerifySmi(dst));
|
||||
}
|
||||
|
||||
// Loads a Smi field from a Dart object. In DEBUG mode, also checks that the
|
||||
// loaded value is a Smi and halts if not.
|
||||
void LoadSmiField(Register dst, const FieldAddress& address);
|
||||
|
||||
// Loads a Smi. In DEBUG mode, also checks that the loaded value is a Smi and
|
||||
// halts if not.
|
||||
void LoadSmiFromOffset(Register dst, Register base, int32_t offset) {
|
||||
LoadFromOffset(dst, base, offset);
|
||||
DEBUG_ONLY(VerifySmi(dst));
|
||||
}
|
||||
void LoadSmiFromOffset(Register dst, Register base, int32_t offset);
|
||||
|
||||
// Loads a Smi field from a Dart object. In DEBUG mode, also checks that the
|
||||
// loaded value is a Smi and halts if not.
|
||||
void LoadSmiFieldFromOffset(Register dst, Register base, int32_t offset);
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
// Add pure virtual methods for methods that take addresses.
|
||||
// These are the base methods that all other compressed methods delegate to.
|
||||
//
|
||||
// Since the methods are only virtual when using compressed pointers, the
|
||||
// overriding definitions must be guarded by the appropriate #ifdef.
|
||||
// For the virtual methods, they are only virtual when using compressed
|
||||
// pointers, so the overriding definitions must be guarded with an #ifdef.
|
||||
|
||||
virtual void LoadCompressedFieldAddressForRegOffset(
|
||||
Register address,
|
||||
Register instance,
|
||||
Register offset_in_words_as_smi) = 0;
|
||||
|
||||
virtual void LoadAcquireCompressed(Register dst,
|
||||
Register address,
|
||||
int32_t offset = 0) = 0;
|
||||
virtual void StoreReleaseCompressed(Register src,
|
||||
Register address,
|
||||
int32_t offset = 0) = 0;
|
||||
virtual void LoadAcquireCompressed(Register dst, const Address& address) = 0;
|
||||
|
||||
virtual void LoadCompressed(Register dst, const Address& address) = 0;
|
||||
|
||||
// There is no StoreCompressed because only Dart objects contain compressed
|
||||
// pointers and compressed pointers may require write barriers, so
|
||||
// StoreCompressedIntoObject should be used instead.
|
||||
|
||||
virtual void StoreCompressedIntoObject(
|
||||
Register object, // Object being stored into.
|
||||
const Address& address, // Address to store the value at.
|
||||
Register value, // Value being stored.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) = 0;
|
||||
virtual void StoreCompressedIntoObjectNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
const Address& address, // Address to store the value at.
|
||||
Register value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) = 0;
|
||||
virtual void StoreCompressedIntoObjectNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
const Address& address, // Address to store the value at.
|
||||
const Object& value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) = 0;
|
||||
|
||||
virtual void LoadIndexedCompressed(Register dst,
|
||||
Register base,
|
||||
int32_t offset,
|
||||
Register index) = 0;
|
||||
virtual void StoreCompressedIntoArray(
|
||||
Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi) = 0;
|
||||
|
||||
// Add a base virtual method for methods that take offsets which convert
|
||||
// the base register and offset into an address appropriately.
|
||||
//
|
||||
// The latter should be overridden for architectures where offsets should not
|
||||
// be converted to addresses without additional precautions.
|
||||
//
|
||||
// Since the methods are only virtual when using compressed pointers, the
|
||||
// overriding definitions must be guarded by the appropriate #ifdef.
|
||||
|
||||
virtual void LoadCompressedFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset);
|
||||
virtual void StoreCompressedIntoObjectOffset(
|
||||
Register object, // Object being stored into.
|
||||
int32_t offset, // Offset into object.
|
||||
Register value, // Value being stored.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic);
|
||||
virtual void StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
int32_t offset, // Offset into object.
|
||||
Register value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic);
|
||||
virtual void StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
int32_t offset, // Offset into object.
|
||||
const Object& value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic);
|
||||
|
||||
// Since loading Smis just involves zero extension instead of adjusting the
|
||||
// high bits to be heap bits, these are non-virtual.
|
||||
|
||||
// Loads a Smi, handling sign extension appropriately when compressed.
|
||||
// In DEBUG mode, also checks that the loaded value is a Smi and halts if not.
|
||||
// Loads a compressed Smi. In DEBUG mode, also checks that the loaded value is
|
||||
// a Smi and halts if not.
|
||||
void LoadCompressedSmi(Register dst, const Address& address) {
|
||||
Load(dst, address, kUnsignedFourBytes); // Zero extension.
|
||||
DEBUG_ONLY(VerifySmi(dst);)
|
||||
}
|
||||
|
||||
// Loads a Smi, handling sign extension appropriately when compressed.
|
||||
// In DEBUG mode, also checks that the loaded value is a Smi and halts if not.
|
||||
void LoadCompressedSmiFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadFromOffset(dst, base, offset, kUnsignedFourBytes); // Zero extension.
|
||||
DEBUG_ONLY(VerifySmi(dst);)
|
||||
}
|
||||
#else
|
||||
// These are the base methods that all other compressed methods delegate to.
|
||||
//
|
||||
// The methods are non-virtual and forward to the uncompressed versions.
|
||||
|
||||
void LoadCompressedFieldAddressForRegOffset(Register address,
|
||||
|
@ -963,115 +923,125 @@ class AssemblerBase : public StackResource {
|
|||
LoadFieldAddressForRegOffset(address, instance, offset_in_words_as_smi);
|
||||
}
|
||||
|
||||
void LoadAcquireCompressed(Register dst,
|
||||
Register address,
|
||||
int32_t offset = 0) {
|
||||
LoadAcquire(dst, address, offset);
|
||||
}
|
||||
void StoreReleaseCompressed(Register src,
|
||||
Register address,
|
||||
int32_t offset = 0) {
|
||||
StoreRelease(src, address, offset);
|
||||
void LoadAcquireCompressed(Register dst, const Address& address) {
|
||||
LoadAcquire(dst, address);
|
||||
}
|
||||
|
||||
void LoadCompressed(Register dst, const Address& address) {
|
||||
Load(dst, address);
|
||||
}
|
||||
|
||||
// There is no StoreCompressed because only Dart objects contain compressed
|
||||
// pointers, so StoreCompressedIntoObject should be used instead.
|
||||
|
||||
void StoreCompressedIntoObject(
|
||||
Register object, // Object being stored into.
|
||||
const Address& address, // Address to store the value at.
|
||||
Register value, // Value being stored.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) {
|
||||
StoreIntoObject(object, address, value, can_be_smi, memory_order);
|
||||
}
|
||||
void StoreCompressedIntoObjectNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
const Address& address, // Address to store the value at.
|
||||
Register value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) {
|
||||
StoreIntoObjectNoBarrier(object, address, value, memory_order);
|
||||
}
|
||||
void StoreCompressedIntoObjectNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
const Address& address, // Address to store the value at.
|
||||
const Object& value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) {
|
||||
StoreIntoObjectNoBarrier(object, address, value, memory_order);
|
||||
}
|
||||
|
||||
void LoadIndexedCompressed(Register dst,
|
||||
Register base,
|
||||
int32_t offset,
|
||||
Register index) {
|
||||
LoadIndexedPayload(dst, base, offset, index, TIMES_WORD_SIZE, kWordBytes);
|
||||
}
|
||||
void StoreCompressedIntoArray(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi) {
|
||||
StoreIntoArray(object, slot, value, can_value_be_smi);
|
||||
|
||||
// Loads a compressed Smi. In DEBUG mode, also checks that the loaded value is
|
||||
// a Smi and halts if not.
|
||||
void LoadCompressedSmi(Register dst, const Address& address) {
|
||||
LoadSmi(dst, address);
|
||||
}
|
||||
#endif // defined(DART_COMPRESSED_POINTERS)
|
||||
|
||||
// Compressed store methods are implemented in AssemblerBase, as the only
|
||||
// difference is whether the entire word is stored or just the low bits.
|
||||
|
||||
void StoreReleaseCompressed(Register src, const Address& address) {
|
||||
StoreRelease(src, address, kObjectBytes);
|
||||
}
|
||||
void StoreReleaseCompressedToOffset(Register src,
|
||||
Register base,
|
||||
int32_t offset = 0) {
|
||||
StoreReleaseToOffset(src, base, offset, kObjectBytes);
|
||||
}
|
||||
|
||||
void LoadCompressedFromOffset(Register dst, Register base, int32_t offset) {
|
||||
LoadFromOffset(dst, base, offset);
|
||||
void StoreCompressedIntoObject(
|
||||
Register object, // Object being stored into.
|
||||
const Address& address, // Address to store the value at.
|
||||
Register value, // Value being stored.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
Register scratch = TMP) {
|
||||
StoreIntoObject(object, address, value, can_be_smi, memory_order, TMP,
|
||||
kObjectBytes);
|
||||
}
|
||||
void StoreCompressedIntoObjectNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
const Address& address, // Address to store the value at.
|
||||
Register value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) {
|
||||
StoreIntoObjectNoBarrier(object, address, value, memory_order,
|
||||
kObjectBytes);
|
||||
}
|
||||
virtual void StoreCompressedObjectIntoObjectNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
const Address& address, // Address to store the value at.
|
||||
const Object& value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) {
|
||||
StoreObjectIntoObjectNoBarrier(object, address, value, memory_order,
|
||||
kObjectBytes);
|
||||
}
|
||||
|
||||
void StoreCompressedIntoObjectOffset(
|
||||
Register object, // Object being stored into.
|
||||
int32_t offset, // Offset into object.
|
||||
Register value, // Value being stored.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) {
|
||||
StoreIntoObjectOffset(object, offset, value, can_be_smi, memory_order);
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
Register scratch = TMP) {
|
||||
StoreIntoObjectOffset(object, offset, value, can_be_smi, memory_order, TMP,
|
||||
kObjectBytes);
|
||||
}
|
||||
void StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
int32_t offset, // Offset into object.
|
||||
Register value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) {
|
||||
StoreIntoObjectOffsetNoBarrier(object, offset, value, memory_order);
|
||||
StoreIntoObjectOffsetNoBarrier(object, offset, value, memory_order,
|
||||
kObjectBytes);
|
||||
}
|
||||
void StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
void StoreCompressedObjectIntoObjectOffsetNoBarrier(
|
||||
Register object, // Object being stored into.
|
||||
int32_t offset, // Offset into object.
|
||||
const Object& value, // Value being stored.
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) {
|
||||
StoreIntoObjectOffsetNoBarrier(object, offset, value, memory_order);
|
||||
StoreObjectIntoObjectOffsetNoBarrier(object, offset, value, memory_order,
|
||||
kObjectBytes);
|
||||
}
|
||||
|
||||
// Loads a Smi, handling sign extension appropriately when compressed.
|
||||
// In DEBUG mode, also checks that the loaded value is a Smi and halts if not.
|
||||
void LoadCompressedSmi(Register dst, const Address& address) {
|
||||
LoadSmi(dst, address);
|
||||
void StoreIntoArray(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
Register scratch = TMP,
|
||||
OperandSize size = kWordBytes);
|
||||
void StoreCompressedIntoArray(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
Register scratch = TMP) {
|
||||
StoreIntoArray(object, slot, value, can_value_be_smi, scratch,
|
||||
kObjectBytes);
|
||||
}
|
||||
|
||||
// Loads a Smi, handling sign extension appropriately when compressed.
|
||||
// In DEBUG mode, also checks that the loaded value is a Smi and halts if not.
|
||||
void LoadCompressedSmiFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadSmiFromOffset(dst, base, offset);
|
||||
}
|
||||
#endif // defined(DART_COMPRESSED_POINTERS)
|
||||
|
||||
// These methods just delegate to the non-Field classes, either passing
|
||||
// along a FieldAddress as the Address or adjusting the offset appropriately.
|
||||
|
||||
void LoadAcquireCompressedFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset);
|
||||
void LoadCompressedField(Register dst, const FieldAddress& address);
|
||||
void LoadCompressedFromOffset(Register dst, Register base, int32_t offset);
|
||||
void LoadCompressedFieldFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadCompressedFromOffset(dst, base, offset - kHeapObjectTag);
|
||||
}
|
||||
int32_t offset);
|
||||
void LoadCompressedSmiField(Register dst, const FieldAddress& address);
|
||||
void LoadCompressedSmiFromOffset(Register dst, Register base, int32_t offset);
|
||||
void LoadCompressedSmiFieldFromOffset(Register dst,
|
||||
Register base,
|
||||
int32_t offset) {
|
||||
LoadCompressedSmiFromOffset(dst, base, offset - kHeapObjectTag);
|
||||
}
|
||||
int32_t offset);
|
||||
|
||||
// There are no StoreCompressedField methods because only Dart objects contain
|
||||
// compressed pointers and compressed pointers may require write barriers, so
|
||||
|
@ -1082,7 +1052,8 @@ class AssemblerBase : public StackResource {
|
|||
Register base,
|
||||
const Slot& slot,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic);
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
Register scratch = TMP);
|
||||
void StoreToSlotNoBarrier(Register src,
|
||||
Register base,
|
||||
const Slot& slot,
|
||||
|
@ -1257,6 +1228,31 @@ class AssemblerBase : public StackResource {
|
|||
intptr_t unchecked_entry_offset_ = 0;
|
||||
|
||||
private:
|
||||
// Apply the generational write barrier on all architectures and incremental
|
||||
// write barrier on non-IA32 architectures.
|
||||
//
|
||||
// On IA32, since the incremental write barrier is not applied,
|
||||
// concurrent marking cannot be enabled.
|
||||
virtual void StoreBarrier(Register object, // Object being stored into.
|
||||
Register value, // Value being stored.
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) = 0;
|
||||
|
||||
// Apply the generational write barrier on all architectures and incremental
|
||||
// write barrier on non-IA32 architectures when storing into an array.
|
||||
//
|
||||
// On IA32, since the incremental write barrier is not applied,
|
||||
// concurrent marking cannot be enabled.
|
||||
virtual void ArrayStoreBarrier(Register object, // Object being stored into.
|
||||
Register slot, // Slot being stored into.
|
||||
Register value, // Value being stored.
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) = 0;
|
||||
|
||||
// Checks that storing [value] into [object] does not require a write barrier.
|
||||
virtual void VerifyStoreNeedsNoWriteBarrier(Register object,
|
||||
Register value) = 0;
|
||||
|
||||
GrowableArray<CodeComment*> comments_;
|
||||
ObjectPoolBuilder* object_pool_builder_;
|
||||
};
|
||||
|
|
|
@ -2076,21 +2076,13 @@ void Assembler::CompareObject(Register reg, const Object& object) {
|
|||
}
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObject(Register object,
|
||||
const Address& dest,
|
||||
void Assembler::StoreBarrier(Register object,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order,
|
||||
Register scratch) {
|
||||
// x.slot = x. Barrier should have be removed at the IL level.
|
||||
ASSERT(object != value);
|
||||
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, dest.base(), dest.disp32());
|
||||
} else {
|
||||
movl(dest, value);
|
||||
}
|
||||
|
||||
bool spill_scratch = false;
|
||||
if (scratch == kNoRegister) {
|
||||
spill_scratch = true;
|
||||
|
@ -2115,6 +2107,13 @@ void Assembler::StoreIntoObject(Register object,
|
|||
Label done;
|
||||
if (can_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
} else {
|
||||
#if defined(DEBUG)
|
||||
Label passed_check;
|
||||
BranchIfNotSmi(value, &passed_check, kNearJump);
|
||||
Breakpoint();
|
||||
Bind(&passed_check);
|
||||
#endif
|
||||
}
|
||||
if (spill_scratch) {
|
||||
pushl(scratch);
|
||||
|
@ -2153,43 +2152,12 @@ void Assembler::StoreIntoObject(Register object,
|
|||
Bind(&done);
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, dest.base(), dest.disp32());
|
||||
} else {
|
||||
movl(dest, value);
|
||||
}
|
||||
#if defined(DEBUG)
|
||||
// We can't assert the incremental barrier is not needed here, only the
|
||||
// generational barrier. We sometimes omit the write barrier when 'value' is
|
||||
// a constant, but we don't eagerly mark 'value' and instead assume it is also
|
||||
// reachable via a constant pool, so it doesn't matter if it is not traced via
|
||||
// 'object'.
|
||||
Label done;
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
testb(FieldAddress(value, target::Object::tags_offset()),
|
||||
Immediate(1 << target::UntaggedObject::kNewBit));
|
||||
j(ZERO, &done, Assembler::kNearJump);
|
||||
testb(FieldAddress(object, target::Object::tags_offset()),
|
||||
Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
||||
j(ZERO, &done, Assembler::kNearJump);
|
||||
Stop("Write barrier is required");
|
||||
Bind(&done);
|
||||
#endif // defined(DEBUG)
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoArray(Register object,
|
||||
void Assembler::ArrayStoreBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) {
|
||||
ASSERT(object != value);
|
||||
movl(Address(slot, 0), value);
|
||||
|
||||
ASSERT(scratch != kNoRegister);
|
||||
ASSERT(scratch != object);
|
||||
ASSERT(scratch != value);
|
||||
ASSERT(scratch != slot);
|
||||
|
@ -2204,6 +2172,13 @@ void Assembler::StoreIntoArray(Register object,
|
|||
Label done;
|
||||
if (can_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
} else {
|
||||
#if defined(DEBUG)
|
||||
Label passed_check;
|
||||
BranchIfNotSmi(value, &passed_check, kNearJump);
|
||||
Breakpoint();
|
||||
Bind(&passed_check);
|
||||
#endif
|
||||
}
|
||||
movl(scratch, FieldAddress(object, target::Object::tags_offset()));
|
||||
shrl(scratch, Immediate(target::UntaggedObject::kBarrierOverlapShift));
|
||||
|
@ -2222,10 +2197,31 @@ void Assembler::StoreIntoArray(Register object,
|
|||
Bind(&done);
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||
void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
|
||||
Register value) {
|
||||
// We can't assert the incremental barrier is not needed here, only the
|
||||
// generational barrier. We sometimes omit the write barrier when 'value' is
|
||||
// a constant, but we don't eagerly mark 'value' and instead assume it is also
|
||||
// reachable via a constant pool, so it doesn't matter if it is not traced via
|
||||
// 'object'.
|
||||
Label done;
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
testb(FieldAddress(value, target::Object::tags_offset()),
|
||||
Immediate(1 << target::UntaggedObject::kNewBit));
|
||||
j(ZERO, &done, Assembler::kNearJump);
|
||||
testb(FieldAddress(object, target::Object::tags_offset()),
|
||||
Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
||||
j(ZERO, &done, Assembler::kNearJump);
|
||||
Stop("Write barrier is required");
|
||||
Bind(&done);
|
||||
}
|
||||
|
||||
void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
MemoryOrder memory_order,
|
||||
OperandSize size) {
|
||||
ASSERT_EQUAL(size, kFourBytes);
|
||||
ASSERT(IsOriginalObject(value));
|
||||
// Ignoring memory_order.
|
||||
// On intel stores have store-release behavior (i.e. stores are not
|
||||
|
|
|
@ -671,21 +671,24 @@ class Assembler : public AssemblerBase {
|
|||
}
|
||||
|
||||
void LoadAcquire(Register dst,
|
||||
Register address,
|
||||
int32_t offset = 0,
|
||||
const Address& address,
|
||||
OperandSize size = kFourBytes) override {
|
||||
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered
|
||||
// with other loads).
|
||||
Load(dst, Address(address, offset), size);
|
||||
Load(dst, address, size);
|
||||
#if defined(TARGET_USES_THREAD_SANITIZER)
|
||||
#error No support for TSAN on IA32.
|
||||
#endif
|
||||
}
|
||||
void StoreRelease(Register src,
|
||||
Register address,
|
||||
int32_t offset = 0) override {
|
||||
const Address& address,
|
||||
OperandSize size = kFourBytes) override {
|
||||
// On intel stores have store-release behavior (i.e. stores are not
|
||||
// re-ordered with other stores).
|
||||
movl(Address(address, offset), src);
|
||||
|
||||
// We don't run TSAN on 32 bit systems.
|
||||
Store(src, address, size);
|
||||
#if defined(TARGET_USES_THREAD_SANITIZER)
|
||||
#error No support for TSAN on IA32.
|
||||
#endif
|
||||
}
|
||||
|
||||
void CompareWithMemoryValue(Register value,
|
||||
|
@ -806,82 +809,23 @@ class Assembler : public AssemblerBase {
|
|||
void PushObject(const Object& object);
|
||||
void CompareObject(Register reg, const Object& object);
|
||||
|
||||
// Store into a heap object and apply the generational write barrier. (Unlike
|
||||
// the other architectures, this does not apply the incremental write barrier,
|
||||
// and so concurrent marking is not enabled for now on IA32.) All stores into
|
||||
// heap objects must pass through this function or, if the value can be proven
|
||||
// either Smi or old-and-premarked, its NoBarrier variants.
|
||||
// Destroys the value register.
|
||||
void StoreIntoObject(Register object, // Object we are storing into.
|
||||
const Address& dest, // Where we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override {
|
||||
StoreIntoObject(object, dest, value, can_value_be_smi, memory_order,
|
||||
kNoRegister);
|
||||
}
|
||||
void StoreIntoObject(Register object, // Object we are storing into.
|
||||
const Address& dest, // Where we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_value_be_smi,
|
||||
MemoryOrder memory_order,
|
||||
Register scratch);
|
||||
void StoreIntoArray(Register object, // Object we are storing into.
|
||||
Register slot, // Where we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi) override {
|
||||
StoreIntoArray(object, slot, value, can_value_be_smi, kNoRegister);
|
||||
}
|
||||
void StoreIntoArray(Register object, // Object we are storing into.
|
||||
Register slot, // Where we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_value_be_smi,
|
||||
Register scratch);
|
||||
void StoreIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
void StoreIntoObjectNoBarrier(
|
||||
void StoreObjectIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
OperandSize size = kFourBytes) override;
|
||||
|
||||
void StoreIntoObjectOffset(
|
||||
Register object, // Object we are storing into.
|
||||
int32_t offset, // Where we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override {
|
||||
StoreIntoObjectOffset(object, offset, value, can_value_be_smi, memory_order,
|
||||
kNoRegister);
|
||||
}
|
||||
void StoreIntoObjectOffset(Register object, // Object we are storing into.
|
||||
int32_t offset, // Where we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_value_be_smi,
|
||||
MemoryOrder memory_order,
|
||||
Register scratch) {
|
||||
StoreIntoObject(object, FieldAddress(object, offset), value,
|
||||
can_value_be_smi, memory_order, scratch);
|
||||
}
|
||||
void StoreIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
void StoreBarrier(Register object,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override {
|
||||
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
|
||||
memory_order);
|
||||
}
|
||||
void StoreIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override {
|
||||
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
|
||||
memory_order);
|
||||
}
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) override;
|
||||
void ArrayStoreBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) override;
|
||||
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override;
|
||||
|
||||
// Stores a non-tagged value into a heap object.
|
||||
void StoreInternalPointer(Register object,
|
||||
|
|
|
@ -2581,26 +2581,27 @@ void Assembler::TsanStoreRelease(Register addr) {
|
|||
#endif
|
||||
|
||||
void Assembler::LoadAcquire(Register dst,
|
||||
Register address,
|
||||
int32_t offset,
|
||||
const Address& address,
|
||||
OperandSize size) {
|
||||
ASSERT(dst != address);
|
||||
LoadFromOffset(dst, address, offset, size);
|
||||
ASSERT(dst != address.base());
|
||||
Load(dst, address, size);
|
||||
fence(HartEffects::kRead, HartEffects::kMemory);
|
||||
|
||||
#if defined(TARGET_USES_THREAD_SANITIZER)
|
||||
if (offset == 0) {
|
||||
TsanLoadAcquire(address);
|
||||
if (address.offset() == 0) {
|
||||
TsanLoadAcquire(address.base());
|
||||
} else {
|
||||
AddImmediate(TMP2, address, offset);
|
||||
AddImmediate(TMP2, address.base(), address.offset());
|
||||
TsanLoadAcquire(TMP2);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::StoreRelease(Register src, Register address, int32_t offset) {
|
||||
void Assembler::StoreRelease(Register src,
|
||||
const Address& address,
|
||||
OperandSize size) {
|
||||
fence(HartEffects::kMemory, HartEffects::kWrite);
|
||||
StoreToOffset(src, address, offset);
|
||||
Store(src, address, size);
|
||||
}
|
||||
|
||||
void Assembler::CompareWithMemoryValue(Register value,
|
||||
|
@ -3252,32 +3253,21 @@ void Assembler::StoreDToOffset(FRegister src, Register base, int32_t offset) {
|
|||
fsd(src, PrepareLargeOffset(base, offset));
|
||||
}
|
||||
|
||||
// Store into a heap object and apply the generational and incremental write
|
||||
// barriers. All stores into heap objects must pass through this function or,
|
||||
// if the value can be proven either Smi or old-and-premarked, its NoBarrier
|
||||
// variants.
|
||||
// Preserves object and value registers.
|
||||
void Assembler::StoreIntoObject(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
// stlr does not feature an address operand.
|
||||
ASSERT(memory_order == kRelaxedNonAtomic);
|
||||
Store(value, dest);
|
||||
StoreBarrier(object, value, can_value_be_smi);
|
||||
}
|
||||
void Assembler::StoreBarrier(Register object,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi) {
|
||||
CanBeSmi can_value_be_smi,
|
||||
Register scratch) {
|
||||
// x.slot = x. Barrier should have be removed at the IL level.
|
||||
ASSERT(object != value);
|
||||
ASSERT(object != scratch);
|
||||
ASSERT(value != scratch);
|
||||
ASSERT(object != RA);
|
||||
ASSERT(value != RA);
|
||||
ASSERT(object != TMP);
|
||||
ASSERT(scratch != RA);
|
||||
ASSERT(object != TMP2);
|
||||
ASSERT(value != TMP);
|
||||
ASSERT(value != TMP2);
|
||||
ASSERT(scratch != TMP2);
|
||||
ASSERT(scratch != kNoRegister);
|
||||
|
||||
// In parallel, test whether
|
||||
// - object is old and not remembered and value is new, or
|
||||
|
@ -3290,12 +3280,19 @@ void Assembler::StoreBarrier(Register object,
|
|||
Label done;
|
||||
if (can_value_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
} else {
|
||||
#if defined(DEBUG)
|
||||
Label passed_check;
|
||||
BranchIfNotSmi(value, &passed_check, kNearJump);
|
||||
Breakpoint();
|
||||
Bind(&passed_check);
|
||||
#endif
|
||||
}
|
||||
lbu(TMP, FieldAddress(object, target::Object::tags_offset()));
|
||||
lbu(scratch, FieldAddress(object, target::Object::tags_offset()));
|
||||
lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
|
||||
srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
|
||||
and_(TMP, TMP, TMP2);
|
||||
ble(TMP, WRITE_BARRIER_STATE, &done, kNearJump);
|
||||
srli(scratch, scratch, target::UntaggedObject::kBarrierOverlapShift);
|
||||
and_(scratch, scratch, TMP2);
|
||||
ble(scratch, WRITE_BARRIER_STATE, &done, kNearJump);
|
||||
|
||||
Register objectForCall = object;
|
||||
if (value != kWriteBarrierValueReg) {
|
||||
|
@ -3325,25 +3322,29 @@ void Assembler::StoreBarrier(Register object,
|
|||
}
|
||||
Bind(&done);
|
||||
}
|
||||
void Assembler::StoreIntoArray(Register object,
|
||||
|
||||
void Assembler::ArrayStoreBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi) {
|
||||
sx(value, Address(slot, 0));
|
||||
StoreIntoArrayBarrier(object, slot, value, can_value_be_smi);
|
||||
}
|
||||
void Assembler::StoreIntoArrayBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi) {
|
||||
CanBeSmi can_value_be_smi,
|
||||
Register scratch) {
|
||||
// TODO(riscv): Use RA2 to avoid spilling RA inline?
|
||||
const bool spill_lr = true;
|
||||
ASSERT(object != TMP);
|
||||
ASSERT(object != slot);
|
||||
ASSERT(object != value);
|
||||
ASSERT(object != scratch);
|
||||
ASSERT(slot != value);
|
||||
ASSERT(slot != scratch);
|
||||
ASSERT(value != scratch);
|
||||
ASSERT(object != RA);
|
||||
ASSERT(slot != RA);
|
||||
ASSERT(value != RA);
|
||||
ASSERT(scratch != RA);
|
||||
ASSERT(object != TMP2);
|
||||
ASSERT(value != TMP);
|
||||
ASSERT(value != TMP2);
|
||||
ASSERT(slot != TMP);
|
||||
ASSERT(slot != TMP2);
|
||||
ASSERT(value != TMP2);
|
||||
ASSERT(scratch != TMP2);
|
||||
ASSERT(scratch != kNoRegister);
|
||||
|
||||
// In parallel, test whether
|
||||
// - object is old and not remembered and value is new, or
|
||||
|
@ -3356,12 +3357,19 @@ void Assembler::StoreIntoArrayBarrier(Register object,
|
|||
Label done;
|
||||
if (can_value_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
} else {
|
||||
#if defined(DEBUG)
|
||||
Label passed_check;
|
||||
BranchIfNotSmi(value, &passed_check, kNearJump);
|
||||
Breakpoint();
|
||||
Bind(&passed_check);
|
||||
#endif
|
||||
}
|
||||
lbu(TMP, FieldAddress(object, target::Object::tags_offset()));
|
||||
lbu(scratch, FieldAddress(object, target::Object::tags_offset()));
|
||||
lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
|
||||
srli(TMP, TMP, target::UntaggedObject::kBarrierOverlapShift);
|
||||
and_(TMP, TMP, TMP2);
|
||||
ble(TMP, WRITE_BARRIER_STATE, &done, kNearJump);
|
||||
srli(scratch, scratch, target::UntaggedObject::kBarrierOverlapShift);
|
||||
and_(scratch, scratch, TMP2);
|
||||
ble(scratch, WRITE_BARRIER_STATE, &done, kNearJump);
|
||||
if (spill_lr) {
|
||||
PushRegister(RA);
|
||||
}
|
||||
|
@ -3379,25 +3387,8 @@ void Assembler::StoreIntoArrayBarrier(Register object,
|
|||
Bind(&done);
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObjectOffset(Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, object, offset - kHeapObjectTag);
|
||||
} else {
|
||||
StoreToOffset(value, object, offset - kHeapObjectTag);
|
||||
}
|
||||
StoreBarrier(object, value, can_value_be_smi);
|
||||
}
|
||||
void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
ASSERT(memory_order == kRelaxedNonAtomic);
|
||||
Store(value, dest);
|
||||
#if defined(DEBUG)
|
||||
void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
|
||||
Register value) {
|
||||
// We can't assert the incremental barrier is not needed here, only the
|
||||
// generational barrier. We sometimes omit the write barrier when 'value' is
|
||||
// a constant, but we don't eagerly mark 'value' and instead assume it is also
|
||||
|
@ -3413,39 +3404,13 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
|
|||
beqz(TMP2, &done, kNearJump);
|
||||
Stop("Write barrier is required");
|
||||
Bind(&done);
|
||||
#endif
|
||||
}
|
||||
void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, object, offset - kHeapObjectTag);
|
||||
} else {
|
||||
StoreToOffset(value, object, offset - kHeapObjectTag);
|
||||
}
|
||||
#if defined(DEBUG)
|
||||
// We can't assert the incremental barrier is not needed here, only the
|
||||
// generational barrier. We sometimes omit the write barrier when 'value' is
|
||||
// a constant, but we don't eagerly mark 'value' and instead assume it is also
|
||||
// reachable via a constant pool, so it doesn't matter if it is not traced via
|
||||
// 'object'.
|
||||
Label done;
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
lbu(TMP2, FieldAddress(value, target::Object::tags_offset()));
|
||||
andi(TMP2, TMP2, 1 << target::UntaggedObject::kNewBit);
|
||||
beqz(TMP2, &done, kNearJump);
|
||||
lbu(TMP2, FieldAddress(object, target::Object::tags_offset()));
|
||||
andi(TMP2, TMP2, 1 << target::UntaggedObject::kOldAndNotRememberedBit);
|
||||
beqz(TMP2, &done, kNearJump);
|
||||
Stop("Write barrier is required");
|
||||
Bind(&done);
|
||||
#endif
|
||||
}
|
||||
void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||
|
||||
void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
MemoryOrder memory_order,
|
||||
OperandSize size) {
|
||||
ASSERT(IsOriginalObject(value));
|
||||
DEBUG_ASSERT(IsNotTemporaryScopedHandle(value));
|
||||
// No store buffer update.
|
||||
|
@ -3455,34 +3420,14 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
|
|||
} else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
|
||||
value_reg = ZR;
|
||||
} else {
|
||||
LoadObject(TMP2, value);
|
||||
value_reg = TMP2;
|
||||
ASSERT(object != TMP);
|
||||
LoadObject(TMP, value);
|
||||
value_reg = TMP;
|
||||
}
|
||||
if (memory_order == kRelease) {
|
||||
fence(HartEffects::kMemory, HartEffects::kWrite);
|
||||
}
|
||||
sx(value_reg, dest);
|
||||
}
|
||||
void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
Register value_reg = TMP2;
|
||||
if (IsSameObject(compiler::NullObject(), value)) {
|
||||
value_reg = NULL_REG;
|
||||
} else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
|
||||
value_reg = ZR;
|
||||
} else {
|
||||
LoadObject(value_reg, value);
|
||||
}
|
||||
StoreIntoObjectOffsetNoBarrier(object, offset, value_reg, memory_order);
|
||||
} else if (IsITypeImm(offset - kHeapObjectTag)) {
|
||||
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value);
|
||||
} else {
|
||||
AddImmediate(TMP, object, offset - kHeapObjectTag);
|
||||
StoreIntoObjectNoBarrier(object, Address(TMP), value);
|
||||
}
|
||||
Store(value_reg, dest, size);
|
||||
}
|
||||
|
||||
// Stores a non-tagged value into a heap object.
|
||||
|
|
|
@ -879,13 +879,12 @@ class Assembler : public MicroAssembler {
|
|||
#endif
|
||||
|
||||
void LoadAcquire(Register dst,
|
||||
Register address,
|
||||
int32_t offset = 0,
|
||||
const Address& address,
|
||||
OperandSize size = kWordBytes) override;
|
||||
|
||||
void StoreRelease(Register src,
|
||||
Register address,
|
||||
int32_t offset = 0) override;
|
||||
const Address& address,
|
||||
OperandSize size = kWordBytes) override;
|
||||
|
||||
void CompareWithMemoryValue(Register value,
|
||||
Address address,
|
||||
|
@ -1166,52 +1165,23 @@ class Assembler : public MicroAssembler {
|
|||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Store into a heap object and apply the generational and incremental write
|
||||
// barriers. All stores into heap objects must pass through this function or,
|
||||
// if the value can be proven either Smi or old-and-premarked, its NoBarrier
|
||||
// variants.
|
||||
// Preserves object and value registers.
|
||||
void StoreIntoObject(Register object,
|
||||
const Address& dest,
|
||||
void StoreBarrier(Register object,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
void StoreBarrier(Register object, Register value, CanBeSmi can_value_be_smi);
|
||||
void StoreIntoArray(Register object,
|
||||
CanBeSmi can_value_be_smi,
|
||||
Register scratch) override;
|
||||
void ArrayStoreBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi) override;
|
||||
void StoreIntoArrayBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi);
|
||||
CanBeSmi can_value_be_smi,
|
||||
Register scratch) override;
|
||||
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override;
|
||||
|
||||
void StoreIntoObjectOffset(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
CanBeSmi can_value_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
void StoreIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
void StoreIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
void StoreIntoObjectNoBarrier(
|
||||
void StoreObjectIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
void StoreIntoObjectOffsetNoBarrier(
|
||||
Register object,
|
||||
int32_t offset,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
OperandSize size = kWordBytes) override;
|
||||
|
||||
// Stores a non-tagged value into a heap object.
|
||||
void StoreInternalPointer(Register object,
|
||||
|
|
|
@ -1451,18 +1451,21 @@ void Assembler::LoadUniqueObject(
|
|||
LoadObjectHelper(dst, object, true, snapshot_behavior);
|
||||
}
|
||||
|
||||
void Assembler::StoreObject(const Address& dst, const Object& object) {
|
||||
void Assembler::StoreObject(const Address& dst,
|
||||
const Object& object,
|
||||
OperandSize size) {
|
||||
ASSERT(IsOriginalObject(object));
|
||||
ASSERT(size == kWordBytes || size == kObjectBytes);
|
||||
|
||||
intptr_t offset_from_thread;
|
||||
if (target::CanLoadFromThread(object, &offset_from_thread)) {
|
||||
movq(TMP, Address(THR, offset_from_thread));
|
||||
movq(dst, TMP);
|
||||
Store(TMP, dst, size);
|
||||
} else if (target::IsSmi(object)) {
|
||||
MoveImmediate(dst, Immediate(target::ToRawSmi(object)));
|
||||
MoveImmediate(dst, Immediate(target::ToRawSmi(object)), size);
|
||||
} else {
|
||||
LoadObject(TMP, object);
|
||||
movq(dst, TMP);
|
||||
Store(TMP, dst, size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1508,14 +1511,26 @@ void Assembler::LoadImmediate(Register reg, const Immediate& imm) {
|
|||
}
|
||||
}
|
||||
|
||||
void Assembler::MoveImmediate(const Address& dst, const Immediate& imm) {
|
||||
void Assembler::MoveImmediate(const Address& dst,
|
||||
const Immediate& imm,
|
||||
OperandSize size) {
|
||||
if (imm.is_int32()) {
|
||||
if (size == kFourBytes) {
|
||||
movl(dst, imm);
|
||||
} else {
|
||||
ASSERT(size == kEightBytes);
|
||||
movq(dst, imm);
|
||||
}
|
||||
} else {
|
||||
LoadImmediate(TMP, imm);
|
||||
if (size == kFourBytes) {
|
||||
movl(dst, TMP);
|
||||
} else {
|
||||
ASSERT(size == kEightBytes);
|
||||
movq(dst, TMP);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::LoadSImmediate(FpuRegister dst, float immediate) {
|
||||
int32_t bits = bit_cast<int32_t>(immediate);
|
||||
|
@ -1552,41 +1567,15 @@ void Assembler::LoadCompressed(Register dest, const Address& slot) {
|
|||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreIntoObject(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, dest.base(), dest.disp32());
|
||||
} else {
|
||||
movq(dest, value);
|
||||
}
|
||||
StoreBarrier(object, value, can_be_smi);
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoObject(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreReleaseCompressed(value, dest.base(), dest.disp8());
|
||||
} else {
|
||||
OBJ(mov)(dest, value);
|
||||
}
|
||||
StoreBarrier(object, value, can_be_smi);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreBarrier(Register object,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi) {
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) {
|
||||
// x.slot = x. Barrier should have be removed at the IL level.
|
||||
ASSERT(object != value);
|
||||
ASSERT(object != TMP);
|
||||
ASSERT(value != TMP);
|
||||
ASSERT(object != scratch);
|
||||
ASSERT(value != scratch);
|
||||
ASSERT(scratch != kNoRegister);
|
||||
|
||||
// In parallel, test whether
|
||||
// - object is old and not remembered and value is new, or
|
||||
|
@ -1598,12 +1587,19 @@ void Assembler::StoreBarrier(Register object,
|
|||
Label done;
|
||||
if (can_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
} else {
|
||||
#if defined(DEBUG)
|
||||
Label passed_check;
|
||||
BranchIfNotSmi(value, &passed_check, kNearJump);
|
||||
Breakpoint();
|
||||
Bind(&passed_check);
|
||||
#endif
|
||||
}
|
||||
movb(ByteRegisterOf(TMP),
|
||||
movb(ByteRegisterOf(scratch),
|
||||
FieldAddress(object, target::Object::tags_offset()));
|
||||
shrl(TMP, Immediate(target::UntaggedObject::kBarrierOverlapShift));
|
||||
andl(TMP, Address(THR, target::Thread::write_barrier_mask_offset()));
|
||||
testb(FieldAddress(value, target::Object::tags_offset()), TMP);
|
||||
shrl(scratch, Immediate(target::UntaggedObject::kBarrierOverlapShift));
|
||||
andl(scratch, Address(THR, target::Thread::write_barrier_mask_offset()));
|
||||
testb(FieldAddress(value, target::Object::tags_offset()), scratch);
|
||||
j(ZERO, &done, kNearJump);
|
||||
|
||||
Register object_for_call = object;
|
||||
|
@ -1630,31 +1626,15 @@ void Assembler::StoreBarrier(Register object,
|
|||
Bind(&done);
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoArray(Register object,
|
||||
void Assembler::ArrayStoreBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi) {
|
||||
movq(Address(slot, 0), value);
|
||||
StoreIntoArrayBarrier(object, slot, value, can_be_smi);
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoArray(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi) {
|
||||
OBJ(mov)(Address(slot, 0), value);
|
||||
StoreIntoArrayBarrier(object, slot, value, can_be_smi);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreIntoArrayBarrier(Register object,
|
||||
Register slot,
|
||||
Register value,
|
||||
CanBeSmi can_be_smi) {
|
||||
ASSERT(object != TMP);
|
||||
ASSERT(value != TMP);
|
||||
ASSERT(slot != TMP);
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) {
|
||||
ASSERT(object != scratch);
|
||||
ASSERT(value != scratch);
|
||||
ASSERT(slot != scratch);
|
||||
ASSERT(scratch != kNoRegister);
|
||||
|
||||
// In parallel, test whether
|
||||
// - object is old and not remembered and value is new, or
|
||||
|
@ -1666,12 +1646,19 @@ void Assembler::StoreIntoArrayBarrier(Register object,
|
|||
Label done;
|
||||
if (can_be_smi == kValueCanBeSmi) {
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
} else {
|
||||
#if defined(DEBUG)
|
||||
Label passed_check;
|
||||
BranchIfNotSmi(value, &passed_check, kNearJump);
|
||||
Breakpoint();
|
||||
Bind(&passed_check);
|
||||
#endif
|
||||
}
|
||||
movb(ByteRegisterOf(TMP),
|
||||
movb(ByteRegisterOf(scratch),
|
||||
FieldAddress(object, target::Object::tags_offset()));
|
||||
shrl(TMP, Immediate(target::UntaggedObject::kBarrierOverlapShift));
|
||||
andl(TMP, Address(THR, target::Thread::write_barrier_mask_offset()));
|
||||
testb(FieldAddress(value, target::Object::tags_offset()), TMP);
|
||||
shrl(scratch, Immediate(target::UntaggedObject::kBarrierOverlapShift));
|
||||
andl(scratch, Address(THR, target::Thread::write_barrier_mask_offset()));
|
||||
testb(FieldAddress(value, target::Object::tags_offset()), scratch);
|
||||
j(ZERO, &done, kNearJump);
|
||||
|
||||
if ((object != kWriteBarrierObjectReg) || (value != kWriteBarrierValueReg) ||
|
||||
|
@ -1687,16 +1674,8 @@ void Assembler::StoreIntoArrayBarrier(Register object,
|
|||
Bind(&done);
|
||||
}
|
||||
|
||||
void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreRelease(value, dest.base(), dest.disp32());
|
||||
} else {
|
||||
movq(dest, value);
|
||||
}
|
||||
#if defined(DEBUG)
|
||||
void Assembler::VerifyStoreNeedsNoWriteBarrier(Register object,
|
||||
Register value) {
|
||||
// We can't assert the incremental barrier is not needed here, only the
|
||||
// generational barrier. We sometimes omit the write barrier when 'value' is
|
||||
// a constant, but we don't eagerly mark 'value' and instead assume it is also
|
||||
|
@ -1712,63 +1691,21 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
|
|||
j(ZERO, &done, Assembler::kNearJump);
|
||||
Stop("Write barrier is required");
|
||||
Bind(&done);
|
||||
#endif // defined(DEBUG)
|
||||
// No store buffer update.
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order) {
|
||||
if (memory_order == kRelease) {
|
||||
StoreReleaseCompressed(value, dest.base(), dest.disp8());
|
||||
} else {
|
||||
OBJ(mov)(dest, value);
|
||||
}
|
||||
#if defined(DEBUG)
|
||||
// We can't assert the incremental barrier is not needed here, only the
|
||||
// generational barrier. We sometimes omit the write barrier when 'value' is
|
||||
// a constant, but we don't eagerly mark 'value' and instead assume it is also
|
||||
// reachable via a constant pool, so it doesn't matter if it is not traced via
|
||||
// 'object'.
|
||||
Label done;
|
||||
BranchIfSmi(value, &done, kNearJump);
|
||||
testb(FieldAddress(value, target::Object::tags_offset()),
|
||||
Immediate(1 << target::UntaggedObject::kNewBit));
|
||||
j(ZERO, &done, Assembler::kNearJump);
|
||||
testb(FieldAddress(object, target::Object::tags_offset()),
|
||||
Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
|
||||
j(ZERO, &done, Assembler::kNearJump);
|
||||
Stop("Write barrier is required");
|
||||
Bind(&done);
|
||||
#endif // defined(DEBUG)
|
||||
// No store buffer update.
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreIntoObjectNoBarrier(Register object,
|
||||
void Assembler::StoreObjectIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
MemoryOrder memory_order,
|
||||
OperandSize size) {
|
||||
if (memory_order == kRelease) {
|
||||
LoadObject(TMP, value);
|
||||
StoreIntoObjectNoBarrier(object, dest, TMP, memory_order);
|
||||
StoreIntoObjectNoBarrier(object, dest, TMP, memory_order, size);
|
||||
} else {
|
||||
StoreObject(dest, value);
|
||||
StoreObject(dest, value, size);
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order) {
|
||||
LoadObject(TMP, value);
|
||||
StoreCompressedIntoObjectNoBarrier(object, dest, TMP, memory_order);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::StoreInternalPointer(Register object,
|
||||
const Address& dest,
|
||||
Register value) {
|
||||
|
|
|
@ -852,69 +852,32 @@ class Assembler : public AssemblerBase {
|
|||
|
||||
// Unaware of write barrier (use StoreInto* methods for storing to objects).
|
||||
// TODO(koda): Add StackAddress/HeapAddress types to prevent misuse.
|
||||
void StoreObject(const Address& dst, const Object& obj);
|
||||
void StoreObject(const Address& dst,
|
||||
const Object& obj,
|
||||
OperandSize size = kWordBytes);
|
||||
void PushObject(const Object& object);
|
||||
void CompareObject(Register reg, const Object& object);
|
||||
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void LoadCompressed(Register dest, const Address& slot) override;
|
||||
#endif
|
||||
// Store into a heap object and apply the generational and incremental write
|
||||
// barriers. All stores into heap objects must pass through this function or,
|
||||
// if the value can be proven either Smi or old-and-premarked, its NoBarrier
|
||||
// variants.
|
||||
// Preserves object and value registers.
|
||||
void StoreIntoObject(Register object, // Object we are storing into.
|
||||
const Address& dest, // Where we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoObject(
|
||||
Register object, // Object we are storing into.
|
||||
const Address& dest, // Where we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#endif
|
||||
void StoreBarrier(Register object, // Object we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_be_smi);
|
||||
void StoreIntoArray(Register object, // Object we are storing into.
|
||||
Register slot, // Where we are storing into.
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) override;
|
||||
void ArrayStoreBarrier(Register object, // Object we are storing into.
|
||||
Register slot, // Slot into which we are storing.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoArray(Register object, // Object we are storing into.
|
||||
Register slot, // Where we are storing into.
|
||||
Register value, // Value we are storing.
|
||||
CanBeSmi can_be_smi = kValueCanBeSmi) override;
|
||||
#endif
|
||||
CanBeSmi can_be_smi,
|
||||
Register scratch) override;
|
||||
void VerifyStoreNeedsNoWriteBarrier(Register object, Register value) override;
|
||||
|
||||
void StoreIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
Register value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#endif
|
||||
void StoreIntoObjectNoBarrier(
|
||||
void StoreObjectIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreCompressedIntoObjectNoBarrier(
|
||||
Register object,
|
||||
const Address& dest,
|
||||
const Object& value,
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic) override;
|
||||
#endif
|
||||
MemoryOrder memory_order = kRelaxedNonAtomic,
|
||||
OperandSize size = kWordBytes) override;
|
||||
|
||||
// Stores a non-tagged value into a heap object.
|
||||
void StoreInternalPointer(Register object,
|
||||
|
@ -1153,50 +1116,35 @@ class Assembler : public AssemblerBase {
|
|||
#endif
|
||||
|
||||
void LoadAcquire(Register dst,
|
||||
Register address,
|
||||
int32_t offset = 0,
|
||||
const Address& address,
|
||||
OperandSize size = kEightBytes) override {
|
||||
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered
|
||||
// with other loads).
|
||||
Load(dst, Address(address, offset), size);
|
||||
Load(dst, address, size);
|
||||
#if defined(TARGET_USES_THREAD_SANITIZER)
|
||||
TsanLoadAcquire(Address(address, offset));
|
||||
TsanLoadAcquire(address);
|
||||
#endif
|
||||
}
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void LoadAcquireCompressed(Register dst,
|
||||
Register address,
|
||||
int32_t offset = 0) override {
|
||||
void LoadAcquireCompressed(Register dst, const Address& address) override {
|
||||
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered
|
||||
// with other loads).
|
||||
LoadCompressed(dst, Address(address, offset));
|
||||
LoadCompressed(dst, address);
|
||||
#if defined(TARGET_USES_THREAD_SANITIZER)
|
||||
TsanLoadAcquire(Address(address, offset));
|
||||
TsanLoadAcquire(address);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
void StoreRelease(Register src,
|
||||
Register address,
|
||||
int32_t offset = 0) override {
|
||||
const Address& address,
|
||||
OperandSize size = kWordBytes) override {
|
||||
// On intel stores have store-release behavior (i.e. stores are not
|
||||
// re-ordered with other stores).
|
||||
movq(Address(address, offset), src);
|
||||
Store(src, address, size);
|
||||
#if defined(TARGET_USES_THREAD_SANITIZER)
|
||||
TsanStoreRelease(Address(address, offset));
|
||||
TsanStoreRelease(address);
|
||||
#endif
|
||||
}
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
void StoreReleaseCompressed(Register src,
|
||||
Register address,
|
||||
int32_t offset = 0) override {
|
||||
// On intel stores have store-release behavior (i.e. stores are not
|
||||
// re-ordered with other stores).
|
||||
OBJ(mov)(Address(address, offset), src);
|
||||
#if defined(TARGET_USES_THREAD_SANITIZER)
|
||||
TsanStoreRelease(Address(address, offset));
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
void CompareWithMemoryValue(Register value,
|
||||
Address address,
|
||||
|
@ -1513,7 +1461,9 @@ class Assembler : public AssemblerBase {
|
|||
CanBeSmi can_be_smi = kValueCanBeSmi);
|
||||
|
||||
// Unaware of write barrier (use StoreInto* methods for storing to objects).
|
||||
void MoveImmediate(const Address& dst, const Immediate& imm);
|
||||
void MoveImmediate(const Address& dst,
|
||||
const Immediate& imm,
|
||||
OperandSize size = kWordBytes);
|
||||
|
||||
friend class dart::FlowGraphCompiler;
|
||||
std::function<void(Register reg)> generate_invoke_write_barrier_wrapper_;
|
||||
|
|
|
@ -6254,7 +6254,7 @@ ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire, assembler) {
|
|||
__ movq(reg, Immediate(0xAABBCCDD + i));
|
||||
}
|
||||
}
|
||||
__ StoreRelease(CallingConventions::kArg3Reg, RSP, 0);
|
||||
__ StoreReleaseToOffset(CallingConventions::kArg3Reg, RSP, 0);
|
||||
|
||||
__ pushq(TMP);
|
||||
|
||||
|
@ -6294,7 +6294,7 @@ ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire, assembler) {
|
|||
__ Bind(&ok);
|
||||
}
|
||||
}
|
||||
__ LoadAcquire(CallingConventions::kReturnReg, RSP, 0);
|
||||
__ LoadAcquireFromOffset(CallingConventions::kReturnReg, RSP, 0);
|
||||
__ popq(RCX);
|
||||
__ popq(RCX);
|
||||
__ ret();
|
||||
|
@ -6316,8 +6316,8 @@ ASSEMBLER_TEST_GENERATE(StoreReleaseLoadAcquire1024, assembler) {
|
|||
__ xorq(RCX, RCX);
|
||||
__ pushq(RCX);
|
||||
__ subq(RSP, Immediate(1024));
|
||||
__ StoreRelease(CallingConventions::kArg3Reg, RSP, 1024);
|
||||
__ LoadAcquire(CallingConventions::kReturnReg, RSP, 1024);
|
||||
__ StoreReleaseToOffset(CallingConventions::kArg3Reg, RSP, 1024);
|
||||
__ LoadAcquireFromOffset(CallingConventions::kReturnReg, RSP, 1024);
|
||||
__ addq(RSP, Immediate(1024));
|
||||
__ popq(RCX);
|
||||
__ popq(RCX);
|
||||
|
|
|
@ -4629,28 +4629,7 @@ LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
|
|||
const intptr_t kNumInputs = 1;
|
||||
LocationSummary* locs = nullptr;
|
||||
auto const rep = slot().representation();
|
||||
if (rep != kTagged) {
|
||||
ASSERT(!calls_initializer());
|
||||
|
||||
const intptr_t kNumTemps = 0;
|
||||
locs = new (zone)
|
||||
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
||||
locs->set_in(0, Location::RequiresRegister());
|
||||
if (rep == kUntagged) {
|
||||
locs->set_out(0, Location::RequiresRegister());
|
||||
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
|
||||
const size_t value_size = RepresentationUtils::ValueSize(rep);
|
||||
if (value_size <= compiler::target::kWordSize) {
|
||||
locs->set_out(0, Location::RequiresRegister());
|
||||
} else {
|
||||
ASSERT(value_size == 2 * compiler::target::kWordSize);
|
||||
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
|
||||
Location::RequiresRegister()));
|
||||
}
|
||||
} else {
|
||||
locs->set_out(0, Location::RequiresFpuRegister());
|
||||
}
|
||||
} else if (calls_initializer()) {
|
||||
if (calls_initializer()) {
|
||||
if (throw_exception_on_initialization()) {
|
||||
const bool using_shared_stub = UseSharedSlowPathStub(opt);
|
||||
const intptr_t kNumTemps = using_shared_stub ? 1 : 0;
|
||||
|
@ -4678,25 +4657,37 @@ LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
|
|||
locs = new (zone)
|
||||
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
|
||||
locs->set_in(0, Location::RequiresRegister());
|
||||
if (rep == kTagged || rep == kUntagged) {
|
||||
locs->set_out(0, Location::RequiresRegister());
|
||||
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
|
||||
const size_t value_size = RepresentationUtils::ValueSize(rep);
|
||||
if (value_size <= compiler::target::kWordSize) {
|
||||
locs->set_out(0, Location::RequiresRegister());
|
||||
} else {
|
||||
ASSERT(value_size == 2 * compiler::target::kWordSize);
|
||||
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
|
||||
Location::RequiresRegister()));
|
||||
}
|
||||
} else {
|
||||
locs->set_out(0, Location::RequiresFpuRegister());
|
||||
}
|
||||
}
|
||||
return locs;
|
||||
}
|
||||
|
||||
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
||||
const Register instance_reg = locs()->in(0).reg();
|
||||
|
||||
auto const rep = slot().representation();
|
||||
if (rep != kTagged) {
|
||||
if (rep == kUntagged) {
|
||||
const Register result = locs()->out(0).reg();
|
||||
__ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
|
||||
RepresentationUtils::OperandSize(rep));
|
||||
if (calls_initializer()) {
|
||||
__ LoadFromSlot(locs()->out(0).reg(), instance_reg, slot());
|
||||
EmitNativeCodeForInitializerCall(compiler);
|
||||
} else if (rep == kTagged || rep == kUntagged) {
|
||||
__ LoadFromSlot(locs()->out(0).reg(), instance_reg, slot());
|
||||
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
|
||||
const size_t value_size = RepresentationUtils::ValueSize(rep);
|
||||
if (value_size <= compiler::target::kWordSize) {
|
||||
const Register result = locs()->out(0).reg();
|
||||
__ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
|
||||
RepresentationUtils::OperandSize(rep));
|
||||
__ LoadFromSlot(locs()->out(0).reg(), instance_reg, slot());
|
||||
} else {
|
||||
auto const result_pair = locs()->out(0).AsPairLocation();
|
||||
const Register result_lo = result_pair->At(0).reg();
|
||||
|
@ -4706,8 +4697,9 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
OffsetInBytes() + compiler::target::kWordSize);
|
||||
}
|
||||
} else {
|
||||
const FpuRegister result = locs()->out(0).fpu_reg();
|
||||
ASSERT(slot().IsDartField());
|
||||
const intptr_t cid = slot().field().guarded_cid();
|
||||
const FpuRegister result = locs()->out(0).fpu_reg();
|
||||
switch (cid) {
|
||||
case kDoubleCid:
|
||||
__ LoadUnboxedDouble(result, instance_reg,
|
||||
|
@ -4722,20 +4714,6 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Tagged load.
|
||||
const Register result = locs()->out(0).reg();
|
||||
if (slot().is_compressed()) {
|
||||
__ LoadCompressedFieldFromOffset(result, instance_reg, OffsetInBytes());
|
||||
} else {
|
||||
__ LoadFieldFromOffset(result, instance_reg, OffsetInBytes());
|
||||
}
|
||||
|
||||
if (calls_initializer()) {
|
||||
EmitNativeCodeForInitializerCall(compiler);
|
||||
}
|
||||
}
|
||||
|
||||
void LoadFieldInstr::EmitNativeCodeForInitializerCall(
|
||||
|
@ -7726,7 +7704,6 @@ LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
|
|||
|
||||
summary->set_in(kInstancePos, Location::RequiresRegister());
|
||||
const Representation rep = slot().representation();
|
||||
if (rep != kTagged) {
|
||||
if (rep == kUntagged) {
|
||||
summary->set_in(kValuePos, Location::RequiresRegister());
|
||||
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
|
||||
|
@ -7735,16 +7712,12 @@ LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
|
|||
summary->set_in(kValuePos, Location::RequiresRegister());
|
||||
} else {
|
||||
ASSERT(value_size == 2 * compiler::target::kWordSize);
|
||||
summary->set_in(kValuePos,
|
||||
Location::Pair(Location::RequiresRegister(),
|
||||
summary->set_in(kValuePos, Location::Pair(Location::RequiresRegister(),
|
||||
Location::RequiresRegister()));
|
||||
}
|
||||
} else {
|
||||
} else if (RepresentationUtils::IsUnboxed(rep)) {
|
||||
summary->set_in(kValuePos, Location::RequiresFpuRegister());
|
||||
}
|
||||
} else {
|
||||
Location value_loc;
|
||||
if (ShouldEmitStoreBarrier()) {
|
||||
} else if (ShouldEmitStoreBarrier()) {
|
||||
summary->set_in(kValuePos,
|
||||
Location::RegisterLocation(kWriteBarrierValueReg));
|
||||
} else {
|
||||
|
@ -7763,8 +7736,7 @@ LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
|
|||
Location value_loc = Location::RequiresRegister();
|
||||
if (auto constant = value()->definition()->AsConstant()) {
|
||||
const auto& value = constant->value();
|
||||
if (value.IsNull() ||
|
||||
(value.IsSmi() && Smi::Cast(value).Value() == 0)) {
|
||||
if (value.IsNull() || (value.IsSmi() && Smi::Cast(value).Value() == 0)) {
|
||||
value_loc = Location::Constant(constant);
|
||||
}
|
||||
}
|
||||
|
@ -7774,7 +7746,6 @@ LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
|
|||
summary->set_in(kValuePos, Location::RequiresRegister());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
if (kNumTemps == 1) {
|
||||
summary->set_temp(0, Location::RequiresRegister());
|
||||
} else {
|
||||
|
@ -7788,21 +7759,18 @@ void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
const intptr_t offset_in_bytes = OffsetInBytes();
|
||||
ASSERT(offset_in_bytes > 0); // Field is finalized and points after header.
|
||||
|
||||
auto const rep = slot().representation();
|
||||
if (slot().representation() != kTagged) {
|
||||
// Unboxed field.
|
||||
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
|
||||
const Representation rep = slot().representation();
|
||||
if (rep == kUntagged) {
|
||||
const Register value = locs()->in(kValuePos).reg();
|
||||
__ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
|
||||
RepresentationUtils::OperandSize(rep));
|
||||
__ StoreToSlotNoBarrier(locs()->in(kValuePos).reg(), instance_reg, slot(),
|
||||
memory_order_);
|
||||
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
|
||||
const size_t value_size = RepresentationUtils::ValueSize(rep);
|
||||
if (value_size <= compiler::target::kWordSize) {
|
||||
const Register value = locs()->in(kValuePos).reg();
|
||||
__ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
|
||||
RepresentationUtils::OperandSize(rep));
|
||||
__ StoreToSlotNoBarrier(locs()->in(kValuePos).reg(), instance_reg, slot(),
|
||||
memory_order_);
|
||||
} else {
|
||||
ASSERT(slot().representation() == kUnboxedInt64);
|
||||
ASSERT_EQUAL(compiler::target::kWordSize, kInt32Size);
|
||||
auto const value_pair = locs()->in(kValuePos).AsPairLocation();
|
||||
const Register value_lo = value_pair->At(0).reg();
|
||||
const Register value_hi = value_pair->At(1).reg();
|
||||
|
@ -7810,8 +7778,8 @@ void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ StoreFieldToOffset(value_hi, instance_reg,
|
||||
offset_in_bytes + compiler::target::kWordSize);
|
||||
}
|
||||
} else {
|
||||
// This is an FPU store.
|
||||
} else if (RepresentationUtils::IsUnboxed(rep)) {
|
||||
ASSERT(slot().IsDartField());
|
||||
const intptr_t cid = slot().field().guarded_cid();
|
||||
const FpuRegister value = locs()->in(kValuePos).fpu_reg();
|
||||
switch (cid) {
|
||||
|
@ -7827,67 +7795,20 @@ void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Store of a tagged pointer.
|
||||
const bool compressed = slot().is_compressed();
|
||||
if (ShouldEmitStoreBarrier()) {
|
||||
Register value_reg = locs()->in(kValuePos).reg();
|
||||
if (!compressed) {
|
||||
#if defined(TARGET_ARCH_IA32)
|
||||
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
|
||||
CanValueBeSmi(), memory_order_,
|
||||
locs()->temp(0).reg());
|
||||
#else
|
||||
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
|
||||
CanValueBeSmi(), memory_order_);
|
||||
#endif
|
||||
} else {
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
__ StoreCompressedIntoObjectOffset(instance_reg, offset_in_bytes,
|
||||
value_reg, CanValueBeSmi(),
|
||||
memory_order_);
|
||||
#else
|
||||
UNREACHABLE();
|
||||
#endif
|
||||
}
|
||||
} else {
|
||||
if (locs()->in(kValuePos).IsConstant()) {
|
||||
#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32) || \
|
||||
defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
|
||||
defined(TARGET_ARCH_RISCV64)
|
||||
} else if (ShouldEmitStoreBarrier()) {
|
||||
const Register scratch_reg =
|
||||
locs()->temp_count() > 0 ? locs()->temp(0).reg() : TMP;
|
||||
__ StoreToSlot(locs()->in(kValuePos).reg(), instance_reg, slot(),
|
||||
CanValueBeSmi(), memory_order_, scratch_reg);
|
||||
} else if (locs()->in(kValuePos).IsConstant()) {
|
||||
const auto& value = locs()->in(kValuePos).constant();
|
||||
if (!compressed) {
|
||||
__ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes, value,
|
||||
auto const size =
|
||||
slot().is_compressed() ? compiler::kObjectBytes : compiler::kWordBytes;
|
||||
__ StoreObjectIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
|
||||
value, memory_order_, size);
|
||||
} else {
|
||||
__ StoreToSlotNoBarrier(locs()->in(kValuePos).reg(), instance_reg, slot(),
|
||||
memory_order_);
|
||||
} else {
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
__ StoreCompressedIntoObjectOffsetNoBarrier(
|
||||
instance_reg, offset_in_bytes, value, memory_order_);
|
||||
#else
|
||||
UNREACHABLE();
|
||||
#endif
|
||||
}
|
||||
return;
|
||||
#else
|
||||
UNREACHABLE();
|
||||
#endif
|
||||
}
|
||||
|
||||
Register value_reg = locs()->in(kValuePos).reg();
|
||||
if (!compressed) {
|
||||
__ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
|
||||
value_reg, memory_order_);
|
||||
} else {
|
||||
#if defined(DART_COMPRESSED_POINTERS)
|
||||
__ StoreCompressedIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
|
||||
value_reg, memory_order_);
|
||||
#else
|
||||
UNREACHABLE();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2605,7 +2605,8 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ StoreIntoArray(array, temp, value, CanValueBeSmi());
|
||||
} else if (locs()->in(2).IsConstant()) {
|
||||
const Object& constant = locs()->in(2).constant();
|
||||
__ StoreIntoObjectNoBarrier(array, compiler::Address(temp), constant);
|
||||
__ StoreObjectIntoObjectNoBarrier(array, compiler::Address(temp),
|
||||
constant);
|
||||
} else {
|
||||
const Register value = locs()->in(2).reg();
|
||||
__ StoreIntoObjectNoBarrier(array, compiler::Address(temp), value);
|
||||
|
|
|
@ -2266,7 +2266,8 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above.
|
||||
if (locs()->in(2).IsConstant()) {
|
||||
const Object& constant = locs()->in(2).constant();
|
||||
__ StoreCompressedIntoObjectNoBarrier(array, element_address, constant);
|
||||
__ StoreCompressedObjectIntoObjectNoBarrier(array, element_address,
|
||||
constant);
|
||||
} else {
|
||||
const Register value = locs()->in(2).reg();
|
||||
__ StoreCompressedIntoObjectNoBarrier(array, element_address, value);
|
||||
|
|
|
@ -1944,7 +1944,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ StoreIntoArray(array, slot, value, CanValueBeSmi(), scratch);
|
||||
} else if (locs()->in(2).IsConstant()) {
|
||||
const Object& constant = locs()->in(2).constant();
|
||||
__ StoreIntoObjectNoBarrier(array, element_address, constant);
|
||||
__ StoreObjectIntoObjectNoBarrier(array, element_address, constant);
|
||||
} else {
|
||||
Register value = locs()->in(2).reg();
|
||||
__ StoreIntoObjectNoBarrier(array, element_address, value);
|
||||
|
@ -2336,7 +2336,7 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
|
|||
} else {
|
||||
compiler::Label init_loop;
|
||||
__ Bind(&init_loop);
|
||||
__ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
|
||||
__ StoreObjectIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
|
||||
compiler::Address(EDI, 0),
|
||||
Object::null_object());
|
||||
__ addl(EDI, compiler::Immediate(kWordSize));
|
||||
|
|
|
@ -2529,7 +2529,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above.
|
||||
if (locs()->in(2).IsConstant()) {
|
||||
const Object& constant = locs()->in(2).constant();
|
||||
__ StoreIntoObjectNoBarrier(array, element_address, constant);
|
||||
__ StoreObjectIntoObjectNoBarrier(array, element_address, constant);
|
||||
} else {
|
||||
const Register value = locs()->in(2).reg();
|
||||
__ StoreIntoObjectNoBarrier(array, element_address, value);
|
||||
|
|
|
@ -2236,7 +2236,8 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
|
|||
__ StoreCompressedIntoArray(array, slot, value, CanValueBeSmi());
|
||||
} else if (locs()->in(2).IsConstant()) {
|
||||
const Object& constant = locs()->in(2).constant();
|
||||
__ StoreCompressedIntoObjectNoBarrier(array, element_address, constant);
|
||||
__ StoreCompressedObjectIntoObjectNoBarrier(array, element_address,
|
||||
constant);
|
||||
} else {
|
||||
Register value = locs()->in(2).reg();
|
||||
__ StoreCompressedIntoObjectNoBarrier(array, element_address, value);
|
||||
|
|
|
@ -299,7 +299,8 @@ void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub() {
|
|||
static_assert(TypeArguments::Cache::kSentinelIndex ==
|
||||
TypeArguments::Cache::kInstantiatorTypeArgsIndex,
|
||||
"sentinel is not same index as instantiator type args");
|
||||
__ LoadAcquireCompressed(InstantiationABI::kScratchReg, kEntryReg,
|
||||
__ LoadAcquireCompressedFromOffset(
|
||||
InstantiationABI::kScratchReg, kEntryReg,
|
||||
TypeArguments::Cache::kInstantiatorTypeArgsIndex *
|
||||
target::kCompressedWordSize);
|
||||
// Test for an unoccupied entry by checking for the Smi sentinel.
|
||||
|
@ -321,7 +322,7 @@ void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub() {
|
|||
};
|
||||
|
||||
// Lookup cache before calling runtime.
|
||||
__ LoadAcquireCompressed(
|
||||
__ LoadAcquireCompressedFromOffset(
|
||||
InstantiationABI::kScratchReg,
|
||||
InstantiationABI::kUninstantiatedTypeArgumentsReg,
|
||||
target::TypeArguments::instantiations_offset() - kHeapObjectTag);
|
||||
|
@ -374,7 +375,7 @@ void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub() {
|
|||
TypeArguments::Cache::kHeaderSize * target::kCompressedWordSize);
|
||||
|
||||
__ Comment("Calculate probe mask");
|
||||
__ LoadAcquireCompressed(
|
||||
__ LoadAcquireCompressedFromOffset(
|
||||
InstantiationABI::kScratchReg, kEntryReg,
|
||||
TypeArguments::Cache::kMetadataIndex * target::kCompressedWordSize);
|
||||
__ LsrImmediate(
|
||||
|
@ -2649,7 +2650,8 @@ static void GenerateSubtypeTestCacheLoopBody(Assembler* assembler,
|
|||
//
|
||||
// Instead, just use LoadAcquire to load the lower bits when compressed and
|
||||
// only compare the low bits of the loaded value using CompareObjectRegisters.
|
||||
__ LoadAcquire(TypeTestABI::kScratchReg, cache_entry_reg,
|
||||
__ LoadAcquireFromOffset(
|
||||
TypeTestABI::kScratchReg, cache_entry_reg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::kInstanceCidOrSignature,
|
||||
kObjectBytes);
|
||||
|
@ -3153,7 +3155,7 @@ void StubCodeCompiler::GenerateSubtypeTestCacheSearch(
|
|||
__ Bind(&search_stc);
|
||||
#endif
|
||||
|
||||
__ LoadAcquireCompressed(
|
||||
__ LoadAcquireCompressedFromOffset(
|
||||
cache_entry_reg, TypeTestABI::kSubtypeTestCacheReg,
|
||||
target::SubtypeTestCache::cache_offset() - kHeapObjectTag);
|
||||
|
||||
|
|
|
@ -992,7 +992,7 @@ void StubCodeCompiler::GenerateAllocateArrayStub() {
|
|||
for (intptr_t offset = 0; offset < target::kObjectAlignment;
|
||||
offset += target::kWordSize) {
|
||||
// No generational barrier needed, since we are storing null.
|
||||
__ StoreIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
|
||||
__ StoreObjectIntoObjectNoBarrier(AllocateArrayABI::kResultReg,
|
||||
Address(EDI, offset), NullObject());
|
||||
}
|
||||
// Safe to only check every kObjectAlignment bytes instead of each word.
|
||||
|
@ -1253,7 +1253,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
|
|||
// EAX: new object.
|
||||
// EDX: number of context variables.
|
||||
// No generational barrier needed, since we are storing null.
|
||||
__ StoreIntoObjectNoBarrier(
|
||||
__ StoreObjectIntoObjectNoBarrier(
|
||||
EAX, FieldAddress(EAX, target::Context::parent_offset()), NullObject());
|
||||
|
||||
// Initialize the context variables.
|
||||
|
@ -1267,7 +1267,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
|
|||
__ Bind(&loop);
|
||||
__ decl(EDX);
|
||||
// No generational barrier needed, since we are storing null.
|
||||
__ StoreIntoObjectNoBarrier(EAX, Address(EBX, EDX, TIMES_4, 0),
|
||||
__ StoreObjectIntoObjectNoBarrier(EAX, Address(EBX, EDX, TIMES_4, 0),
|
||||
NullObject());
|
||||
__ Bind(&entry);
|
||||
__ cmpl(EDX, Immediate(0));
|
||||
|
@ -1662,7 +1662,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
|
|||
for (intptr_t current_offset = target::Instance::first_field_offset();
|
||||
current_offset < instance_size;
|
||||
current_offset += target::kWordSize) {
|
||||
__ StoreIntoObjectNoBarrier(
|
||||
__ StoreObjectIntoObjectNoBarrier(
|
||||
AllocateObjectABI::kResultReg,
|
||||
FieldAddress(AllocateObjectABI::kResultReg, current_offset),
|
||||
NullObject());
|
||||
|
@ -1680,7 +1680,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
|
|||
__ Bind(&loop);
|
||||
for (intptr_t offset = 0; offset < target::kObjectAlignment;
|
||||
offset += target::kWordSize) {
|
||||
__ StoreIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
|
||||
__ StoreObjectIntoObjectNoBarrier(AllocateObjectABI::kResultReg,
|
||||
Address(ECX, offset), NullObject());
|
||||
}
|
||||
// Safe to only check every kObjectAlignment bytes instead of each word.
|
||||
|
@ -2467,7 +2467,7 @@ static void GenerateSubtypeTestCacheLoop(
|
|||
__ CompareToStack(src, original_tos_offset + depth);
|
||||
};
|
||||
|
||||
__ LoadAcquireCompressed(
|
||||
__ LoadAcquireCompressedFromOffset(
|
||||
STCInternal::kScratchReg, STCInternal::kCacheArrayReg,
|
||||
target::kCompressedWordSize *
|
||||
target::SubtypeTestCache::kInstanceCidOrSignature);
|
||||
|
|
Loading…
Reference in a new issue