[vm] Remove dynamic field unboxing in JIT

Instead apply the same approach as we do in AOT: unbox based on the
static type information. There are no TFA results available in JIT,
but we could still unbox fields when running in sound null-safety.

TEST=ci

Cq-Include-Trybots: luci.dart.try:vm-kernel-reload-linux-release-x64-try,vm-kernel-reload-linux-debug-x64-try,vm-kernel-reload-rollback-linux-debug-x64-try,vm-kernel-reload-rollback-linux-release-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-kernel-precomp-linux-product-x64-try,vm-kernel-precomp-linux-release-x64-try,vm-kernel-precomp-nnbd-linux-release-simarm64-try,vm-kernel-linux-debug-simriscv64-try,vm-kernel-precomp-linux-debug-simriscv64-try,vm-kernel-nnbd-linux-release-ia32-try,vm-kernel-nnbd-linux-debug-x64-try,vm-kernel-nnbd-linux-debug-ia32-try,vm-kernel-nnbd-linux-release-simarm-try,vm-kernel-nnbd-linux-release-simarm64-try
Change-Id: Ide2e78c6659261ef8d245a4586cf699ea0fbb459
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/256211
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Slava Egorov <vegorov@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
This commit is contained in:
Vyacheslav Egorov 2022-09-20 13:42:45 +00:00 committed by Commit Bot
parent 50ac31f286
commit 83ab5d5ca3
53 changed files with 2112 additions and 4269 deletions

View file

@ -145,6 +145,7 @@ namespace dart {
struct simd128_value_t {
union {
int32_t int_storage[4];
int64_t int64_storage[2];
float float_storage[4];
double double_storage[2];
};
@ -410,10 +411,6 @@ struct simd128_value_t {
#define DUAL_MAPPING_SUPPORTED 1
#endif
#if defined(DART_PRECOMPILED_RUNTIME) || defined(DART_PRECOMPILER)
#define SUPPORT_UNBOXED_INSTANCE_FIELDS
#endif
// Short form printf format specifiers
#define Pd PRIdPTR
#define Pu PRIuPTR

View file

@ -334,10 +334,10 @@ class ClassSerializationCluster : public SerializationCluster {
}
s->Write<uint32_t>(cls->untag()->state_bits_);
// In AOT, the bitmap of unboxed fields should also be serialized
if (FLAG_precompiled_mode && !ClassTable::IsTopLevelCid(class_id)) {
s->WriteUnsigned64(
CalculateTargetUnboxedFieldsBitmap(s, class_id).Value());
if (!ClassTable::IsTopLevelCid(class_id)) {
const auto unboxed_fields_map =
CalculateTargetUnboxedFieldsBitmap(s, class_id);
s->WriteUnsigned64(unboxed_fields_map.Value());
}
}
@ -430,16 +430,10 @@ class ClassDeserializationCluster : public DeserializationCluster {
cls->untag()->implementor_cid_ = d.ReadCid();
#endif // !defined(DART_PRECOMPILED_RUNTIME)
cls->untag()->state_bits_ = d.Read<uint32_t>();
#if defined(DART_PRECOMPILED_RUNTIME)
d.ReadUnsigned64(); // Skip unboxed fields bitmap.
#endif
}
ClassTable* table = d_->isolate_group()->class_table();
#if defined(DART_PRECOMPILED_RUNTIME)
auto class_table = d_->isolate_group()->class_table();
#endif
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
ClassPtr cls = static_cast<ClassPtr>(d.Ref(id));
Deserializer::InitializeHeader(cls, kClassCid, Class::InstanceSize());
@ -478,12 +472,10 @@ class ClassDeserializationCluster : public DeserializationCluster {
table->AllocateIndex(class_id);
table->SetAt(class_id, cls);
#if defined(DART_PRECOMPILED_RUNTIME)
if (!ClassTable::IsTopLevelCid(class_id)) {
const UnboxedFieldBitmap unboxed_fields_map(d.ReadUnsigned64());
class_table->SetUnboxedFieldsMapAt(class_id, unboxed_fields_map);
table->SetUnboxedFieldsMapAt(class_id, unboxed_fields_map);
}
#endif
}
}

View file

@ -53,13 +53,13 @@ static void Finish(Thread* thread) {
Class& cls = Class::Handle(zone, object_store->closure_class());
cls.EnsureIsFinalized(thread);
// Make sure _Closure fields are not marked as unboxing candidates
// as they are accessed with plain loads.
// Make sure _Closure fields are not marked as unboxed as they are accessed
// with plain loads.
const Array& fields = Array::Handle(zone, cls.fields());
Field& field = Field::Handle(zone);
for (intptr_t i = 0; i < fields.Length(); ++i) {
field ^= fields.At(i);
field.set_is_unboxing_candidate(false);
field.set_is_unboxed(false);
}
// _Closure._hash field should be explicitly marked as nullable because
// VM creates instances of _Closure without compiling its constructors,

View file

@ -55,6 +55,10 @@ class UnboxedFieldBitmap {
ASSERT(position < Length());
bitmap_ |= Utils::Bit<decltype(bitmap_)>(position);
}
DART_FORCE_INLINE void Clear(intptr_t position) {
ASSERT(position < Length());
bitmap_ &= ~Utils::Bit<decltype(bitmap_)>(position);
}
DART_FORCE_INLINE uint64_t Value() const { return bitmap_; }
DART_FORCE_INLINE bool IsEmpty() const { return bitmap_ == 0; }
DART_FORCE_INLINE void Reset() { bitmap_ = 0; }
@ -373,13 +377,11 @@ class ClassTable : public MallocAllocated {
UnboxedFieldBitmap GetUnboxedFieldsMapAt(intptr_t cid) const {
ASSERT(IsValidIndex(cid));
return FLAG_precompiled_mode ? classes_.At<kUnboxedFieldBitmapIndex>(cid)
: UnboxedFieldBitmap();
return classes_.At<kUnboxedFieldBitmapIndex>(cid);
}
void SetUnboxedFieldsMapAt(intptr_t cid, UnboxedFieldBitmap map) {
ASSERT(IsValidIndex(cid));
ASSERT(classes_.At<kUnboxedFieldBitmapIndex>(cid).IsEmpty());
classes_.At<kUnboxedFieldBitmapIndex>(cid) = map;
}

View file

@ -1942,7 +1942,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
}
}
void Assembler::StoreIntoObjectNoBarrierOffset(Register object,
void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
int32_t offset,
Register value,
MemoryOrder memory_order) {
@ -1960,7 +1960,7 @@ void Assembler::StoreIntoObjectNoBarrierOffset(Register object,
}
}
void Assembler::StoreIntoObjectNoBarrierOffset(Register object,
void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
int32_t offset,
const Object& value,
MemoryOrder memory_order) {
@ -3035,77 +3035,6 @@ void Assembler::StoreMultipleDToOffset(DRegister first,
vstmd(IA, IP, first, count);
}
void Assembler::CopyDoubleField(Register dst,
Register src,
Register tmp1,
Register tmp2,
DRegister dtmp) {
LoadDFromOffset(dtmp, src, target::Double::value_offset() - kHeapObjectTag);
StoreDToOffset(dtmp, dst, target::Double::value_offset() - kHeapObjectTag);
}
void Assembler::CopyFloat32x4Field(Register dst,
Register src,
Register tmp1,
Register tmp2,
DRegister dtmp) {
if (TargetCPUFeatures::neon_supported()) {
LoadMultipleDFromOffset(dtmp, 2, src,
target::Float32x4::value_offset() - kHeapObjectTag);
StoreMultipleDToOffset(dtmp, 2, dst,
target::Float32x4::value_offset() - kHeapObjectTag);
} else {
LoadFieldFromOffset(
tmp1, src, target::Float32x4::value_offset() + 0 * target::kWordSize);
LoadFieldFromOffset(
tmp2, src, target::Float32x4::value_offset() + 1 * target::kWordSize);
StoreFieldToOffset(
tmp1, dst, target::Float32x4::value_offset() + 0 * target::kWordSize);
StoreFieldToOffset(
tmp2, dst, target::Float32x4::value_offset() + 1 * target::kWordSize);
LoadFieldFromOffset(
tmp1, src, target::Float32x4::value_offset() + 2 * target::kWordSize);
LoadFieldFromOffset(
tmp2, src, target::Float32x4::value_offset() + 3 * target::kWordSize);
StoreFieldToOffset(
tmp1, dst, target::Float32x4::value_offset() + 2 * target::kWordSize);
StoreFieldToOffset(
tmp2, dst, target::Float32x4::value_offset() + 3 * target::kWordSize);
}
}
void Assembler::CopyFloat64x2Field(Register dst,
Register src,
Register tmp1,
Register tmp2,
DRegister dtmp) {
if (TargetCPUFeatures::neon_supported()) {
LoadMultipleDFromOffset(dtmp, 2, src,
target::Float64x2::value_offset() - kHeapObjectTag);
StoreMultipleDToOffset(dtmp, 2, dst,
target::Float64x2::value_offset() - kHeapObjectTag);
} else {
LoadFieldFromOffset(
tmp1, src, target::Float64x2::value_offset() + 0 * target::kWordSize);
LoadFieldFromOffset(
tmp2, src, target::Float64x2::value_offset() + 1 * target::kWordSize);
StoreFieldToOffset(
tmp1, dst, target::Float64x2::value_offset() + 0 * target::kWordSize);
StoreFieldToOffset(
tmp2, dst, target::Float64x2::value_offset() + 1 * target::kWordSize);
LoadFieldFromOffset(
tmp1, src, target::Float64x2::value_offset() + 2 * target::kWordSize);
LoadFieldFromOffset(
tmp2, src, target::Float64x2::value_offset() + 3 * target::kWordSize);
StoreFieldToOffset(
tmp1, dst, target::Float64x2::value_offset() + 2 * target::kWordSize);
StoreFieldToOffset(
tmp2, dst, target::Float64x2::value_offset() + 3 * target::kWordSize);
}
}
void Assembler::AddImmediate(Register rd,
Register rn,
int32_t value,

View file

@ -392,6 +392,8 @@ class Assembler : public AssemblerBase {
}
}
void PushValueAtOffset(Register base, int32_t offset) { UNIMPLEMENTED(); }
void Bind(Label* label);
// Unconditional jump to a given label. [distance] is ignored on ARM.
void Jump(Label* label, JumpDistance distance = kFarJump) { b(label); }
@ -958,12 +960,12 @@ class Assembler : public AssemblerBase {
const Address& dest,
const Object& value,
MemoryOrder memory_order = kRelaxedNonAtomic);
void StoreIntoObjectNoBarrierOffset(
void StoreIntoObjectOffsetNoBarrier(
Register object,
int32_t offset,
Register value,
MemoryOrder memory_order = kRelaxedNonAtomic);
void StoreIntoObjectNoBarrierOffset(
void StoreIntoObjectOffsetNoBarrier(
Register object,
int32_t offset,
const Object& value,
@ -1134,21 +1136,17 @@ class Assembler : public AssemblerBase {
Register base,
int32_t offset);
void CopyDoubleField(Register dst,
Register src,
Register tmp1,
Register tmp2,
DRegister dtmp);
void CopyFloat32x4Field(Register dst,
Register src,
Register tmp1,
Register tmp2,
DRegister dtmp);
void CopyFloat64x2Field(Register dst,
Register src,
Register tmp1,
Register tmp2,
DRegister dtmp);
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset) {
LoadMultipleDFromOffset(EvenDRegisterOf(dst), 2, base, offset);
}
void StoreUnboxedSimd128(FpuRegister src, Register base, int32_t offset) {
StoreMultipleDToOffset(EvenDRegisterOf(src), 2, base, offset);
}
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src) {
if (src != dst) {
vmovq(dst, src);
}
}
void Push(Register rd, Condition cond = AL);
void Pop(Register rd, Condition cond = AL);

View file

@ -1296,10 +1296,11 @@ void Assembler::StoreCompressedIntoObjectOffsetNoBarrier(
void Assembler::StoreIntoObjectNoBarrier(Register object,
const Address& dest,
const Object& value) {
const Object& value,
MemoryOrder memory_order) {
RELEASE_ASSERT(memory_order == kRelaxedNonAtomic);
ASSERT(IsOriginalObject(value));
ASSERT(IsNotTemporaryScopedHandle(value));
// No store buffer update.
if (IsSameObject(compiler::NullObject(), value)) {
str(NULL_REG, dest);
} else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
@ -1315,7 +1316,7 @@ void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
const Object& value,
MemoryOrder memory_order) {
// stlr does not feature an address operand.
ASSERT(memory_order == kRelaxedNonAtomic);
RELEASE_ASSERT(memory_order == kRelaxedNonAtomic);
ASSERT(IsOriginalObject(value));
ASSERT(IsNotTemporaryScopedHandle(value));
// No store buffer update.

View file

@ -518,6 +518,8 @@ class Assembler : public AssemblerBase {
void PushRegister(Register r) { Push(r); }
void PopRegister(Register r) { Pop(r); }
void PushValueAtOffset(Register base, int32_t offset) { UNIMPLEMENTED(); }
void PushRegisterPair(Register r0, Register r1) { PushPair(r0, r1); }
void PopRegisterPair(Register r0, Register r1) { PopPair(r0, r1); }
@ -1955,6 +1957,18 @@ class Assembler : public AssemblerBase {
}
}
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset) {
LoadQFromOffset(dst, base, offset);
}
void StoreUnboxedSimd128(FpuRegister src, Register base, int32_t offset) {
StoreQToOffset(src, base, offset);
}
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src) {
if (src != dst) {
vmov(dst, src);
}
}
void LoadCompressed(Register dest, const Address& slot);
void LoadCompressedFromOffset(Register dest, Register base, int32_t offset);
void LoadCompressedSmi(Register dest, const Address& slot);
@ -2025,7 +2039,8 @@ class Assembler : public AssemblerBase {
MemoryOrder memory_order = kRelaxedNonAtomic);
void StoreIntoObjectNoBarrier(Register object,
const Address& dest,
const Object& value);
const Object& value,
MemoryOrder memory_order = kRelaxedNonAtomic);
void StoreCompressedIntoObjectNoBarrier(
Register object,
const Address& dest,

View file

@ -580,6 +580,10 @@ class Assembler : public AssemblerBase {
PushRegister(value);
}
void PushValueAtOffset(Register base, int32_t offset) {
pushl(Address(base, offset));
}
void CompareRegisters(Register a, Register b);
void CompareObjectRegisters(Register a, Register b) {
CompareRegisters(a, b);
@ -686,6 +690,18 @@ class Assembler : public AssemblerBase {
}
}
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset) {
movups(dst, Address(base, offset));
}
void StoreUnboxedSimd128(FpuRegister dst, Register base, int32_t offset) {
movups(Address(base, offset), dst);
}
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src) {
if (src != dst) {
movaps(dst, src);
}
}
void LoadAcquire(Register dst, Register address, int32_t offset = 0) {
// On intel loads have load-acquire behavior (i.e. loads are not re-ordered
// with other loads).
@ -814,6 +830,31 @@ class Assembler : public AssemblerBase {
const Object& value,
MemoryOrder memory_order = kRelaxedNonAtomic);
void StoreIntoObjectOffset(Register object, // Object we are storing into.
int32_t offset, // Where we are storing into.
Register value, // Value we are storing.
CanBeSmi can_value_be_smi = kValueCanBeSmi,
MemoryOrder memory_order = kRelaxedNonAtomic) {
StoreIntoObject(object, FieldAddress(object, offset), value,
can_value_be_smi, memory_order);
}
void StoreIntoObjectOffsetNoBarrier(
Register object,
int32_t offset,
Register value,
MemoryOrder memory_order = kRelaxedNonAtomic) {
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
memory_order);
}
void StoreIntoObjectOffsetNoBarrier(
Register object,
int32_t offset,
const Object& value,
MemoryOrder memory_order = kRelaxedNonAtomic) {
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
memory_order);
}
// Stores a non-tagged value into a heap object.
void StoreInternalPointer(Register object,
const Address& dest,

View file

@ -2368,7 +2368,7 @@ void Assembler::LoadAcquireCompressed(Register dst,
}
void Assembler::StoreRelease(Register src, Register address, int32_t offset) {
fence(HartEffects::kMemory, HartEffects::kRead);
fence(HartEffects::kMemory, HartEffects::kWrite);
StoreToOffset(src, address, offset);
}
@ -3168,7 +3168,7 @@ void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
Register value,
MemoryOrder memory_order) {
if (memory_order == kRelease) {
StoreRelease(value, object, offset);
StoreRelease(value, object, offset - kHeapObjectTag);
} else {
StoreToOffset(value, object, offset - kHeapObjectTag);
}
@ -3200,18 +3200,24 @@ void Assembler::StoreCompressedIntoObjectOffsetNoBarrier(
}
void Assembler::StoreIntoObjectNoBarrier(Register object,
const Address& dest,
const Object& value) {
const Object& value,
MemoryOrder memory_order) {
ASSERT(IsOriginalObject(value));
ASSERT(IsNotTemporaryScopedHandle(value));
// No store buffer update.
Register value_reg;
if (IsSameObject(compiler::NullObject(), value)) {
sx(NULL_REG, dest);
value_reg = NULL_REG;
} else if (target::IsSmi(value) && (target::ToRawSmi(value) == 0)) {
sx(ZR, dest);
value_reg = ZR;
} else {
LoadObject(TMP2, value);
sx(TMP2, dest);
value_reg = TMP2;
}
if (memory_order == kRelease) {
fence(HartEffects::kMemory, HartEffects::kWrite);
}
sx(value_reg, dest);
}
void Assembler::StoreCompressedIntoObjectNoBarrier(Register object,
const Address& dest,

View file

@ -780,6 +780,8 @@ class Assembler : public MicroAssembler {
void PushRegistersInOrder(std::initializer_list<Register> regs);
void PushValueAtOffset(Register base, int32_t offset) { UNIMPLEMENTED(); }
// Push all registers which are callee-saved according to the ARM64 ABI.
void PushNativeCalleeSavedRegisters();
@ -1065,6 +1067,19 @@ class Assembler : public MicroAssembler {
fmvd(dst, src);
}
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset) {
// No single register SIMD on RISC-V.
UNREACHABLE();
}
void StoreUnboxedSimd128(FpuRegister src, Register base, int32_t offset) {
// No single register SIMD on RISC-V.
UNREACHABLE();
}
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src) {
// No single register SIMD on RISC-V.
UNREACHABLE();
}
void LoadCompressed(Register dest, const Address& slot) {
lx(dest, slot);
}
@ -1143,7 +1158,8 @@ class Assembler : public MicroAssembler {
MemoryOrder memory_order = kRelaxedNonAtomic);
void StoreIntoObjectNoBarrier(Register object,
const Address& dest,
const Object& value);
const Object& value,
MemoryOrder memory_order = kRelaxedNonAtomic);
void StoreCompressedIntoObjectNoBarrier(
Register object,
const Address& dest,

View file

@ -753,6 +753,10 @@ class Assembler : public AssemblerBase {
PopRegister(r1);
}
void PushValueAtOffset(Register base, int32_t offset) {
pushq(Address(base, offset));
}
// Methods for adding/subtracting an immediate value that may be loaded from
// the constant pool.
// TODO(koda): Assert that these are not used for heap objects.
@ -838,12 +842,29 @@ class Assembler : public AssemblerBase {
Register value, // Value we are storing.
CanBeSmi can_be_smi = kValueCanBeSmi,
MemoryOrder memory_order = kRelaxedNonAtomic) override;
void StoreIntoObjectOffset(Register object, // Object we are storing into.
int32_t offset, // Where we are storing into.
Register value, // Value we are storing.
CanBeSmi can_be_smi = kValueCanBeSmi,
MemoryOrder memory_order = kRelaxedNonAtomic) {
StoreIntoObject(object, FieldAddress(object, offset), value, can_be_smi,
memory_order);
}
void StoreCompressedIntoObject(
Register object, // Object we are storing into.
const Address& dest, // Where we are storing into.
Register value, // Value we are storing.
CanBeSmi can_be_smi = kValueCanBeSmi,
MemoryOrder memory_order = kRelaxedNonAtomic) override;
void StoreCompressedIntoObjectOffset(
Register object, // Object we are storing into.
int32_t offset, // Where we are storing into.
Register value, // Value we are storing.
CanBeSmi can_be_smi = kValueCanBeSmi,
MemoryOrder memory_order = kRelaxedNonAtomic) {
StoreCompressedIntoObject(object, FieldAddress(object, offset), value,
can_be_smi, memory_order);
}
void StoreBarrier(Register object, // Object we are storing into.
Register value, // Value we are storing.
CanBeSmi can_be_smi);
@ -876,6 +897,39 @@ class Assembler : public AssemblerBase {
const Object& value,
MemoryOrder memory_order = kRelaxedNonAtomic);
void StoreIntoObjectOffsetNoBarrier(
Register object,
int32_t offset,
Register value,
MemoryOrder memory_order = kRelaxedNonAtomic) {
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
memory_order);
}
void StoreCompressedIntoObjectOffsetNoBarrier(
Register object,
int32_t offset,
Register value,
MemoryOrder memory_order = kRelaxedNonAtomic) {
StoreCompressedIntoObjectNoBarrier(object, FieldAddress(object, offset),
value, memory_order);
}
void StoreIntoObjectOffsetNoBarrier(
Register object,
int32_t offset,
const Object& value,
MemoryOrder memory_order = kRelaxedNonAtomic) {
StoreIntoObjectNoBarrier(object, FieldAddress(object, offset), value,
memory_order);
}
void StoreCompressedIntoObjectOffsetNoBarrier(
Register object,
int32_t offset,
const Object& value,
MemoryOrder memory_order = kRelaxedNonAtomic) {
StoreCompressedIntoObjectNoBarrier(object, FieldAddress(object, offset),
value, memory_order);
}
// Stores a non-tagged value into a heap object.
void StoreInternalPointer(Register object,
const Address& dest,
@ -1088,6 +1142,18 @@ class Assembler : public AssemblerBase {
movq(Address(base, offset), src);
}
void LoadUnboxedSimd128(FpuRegister dst, Register base, int32_t offset) {
movups(dst, Address(base, offset));
}
void StoreUnboxedSimd128(FpuRegister dst, Register base, int32_t offset) {
movups(Address(base, offset), dst);
}
void MoveUnboxedSimd128(FpuRegister dst, FpuRegister src) {
if (src != dst) {
movaps(dst, src);
}
}
void LoadUnboxedDouble(FpuRegister dst, Register base, int32_t offset) {
movsd(dst, Address(base, offset));
}

View file

@ -517,6 +517,57 @@ void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) {
}
}
#define __ assembler()->
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) {
return;
}
Definition* defn = instr->AsDefinition();
if (defn != nullptr && defn->HasTemp()) {
Location value = defn->locs()->out(0);
if (value.IsRegister()) {
__ PushRegister(value.reg());
} else if (value.IsFpuRegister()) {
const Code* stub;
switch (instr->representation()) {
case kUnboxedDouble:
stub = &StubCode::BoxDouble();
break;
case kUnboxedFloat32x4:
stub = &StubCode::BoxFloat32x4();
break;
case kUnboxedFloat64x2:
stub = &StubCode::BoxFloat64x2();
break;
default:
UNREACHABLE();
break;
}
// In unoptimized code at instruction epilogue the only
// live register is an output register.
instr->locs()->live_registers()->Clear();
if (instr->representation() == kUnboxedDouble) {
__ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
} else {
__ MoveUnboxedSimd128(BoxDoubleStubABI::kValueReg, value.fpu_reg());
}
GenerateNonLazyDeoptableStubCall(
InstructionSource(), // No token position.
*stub, UntaggedPcDescriptors::kOther, instr->locs());
__ PushRegister(BoxDoubleStubABI::kResultReg);
} else if (value.IsConstant()) {
__ PushObject(value.constant());
} else {
ASSERT(value.IsStackSlot());
__ PushValueAtOffset(value.base_reg(), value.ToStackSlotOffset());
}
}
}
#undef __
void FlowGraphCompiler::EmitSourceLine(Instruction* instr) {
if (!instr->token_pos().IsReal()) {
return;
@ -1750,6 +1801,15 @@ void FlowGraphCompiler::AllocateRegistersLocally(Instruction* instr) {
fpu_reg, reg,
compiler::target::Double::value_offset() - kHeapObjectTag);
break;
case kUnboxedFloat32x4:
case kUnboxedFloat64x2:
ASSERT(fpu_reg != kNoFpuRegister);
ASSERT(instr->SpeculativeModeOfInput(i) ==
Instruction::kNotSpeculative);
assembler()->LoadUnboxedSimd128(
fpu_reg, reg,
compiler::target::Float32x4::value_offset() - kHeapObjectTag);
break;
default:
// No automatic unboxing for other representations.
ASSERT(fpu_reg == kNoFpuRegister);
@ -3265,7 +3325,9 @@ void FlowGraphCompiler::FrameStateUpdateWith(Instruction* instr) {
void FlowGraphCompiler::FrameStatePush(Definition* defn) {
Representation rep = defn->representation();
ASSERT(!is_optimizing());
if ((rep == kUnboxedDouble) && defn->locs()->out(0).IsFpuRegister()) {
if ((rep == kUnboxedDouble || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2) &&
defn->locs()->out(0).IsFpuRegister()) {
// Output value is boxed in the instruction epilogue.
rep = kTagged;
}

View file

@ -231,31 +231,6 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
__ Bind(&fall_through);
}
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) {
return;
}
Definition* defn = instr->AsDefinition();
if ((defn != NULL) && defn->HasTemp()) {
const Location value = defn->locs()->out(0);
if (value.IsRegister()) {
__ PushRegister(value.reg());
} else if (value.IsFpuRegister()) {
ASSERT(instr->representation() == kUnboxedDouble);
// In unoptimized code at instruction epilogue the only
// live register is an output register.
instr->locs()->live_registers()->Clear();
__ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
GenerateNonLazyDeoptableStubCall(
InstructionSource(), // No token position.
StubCode::BoxDouble(), UntaggedPcDescriptors::kOther, instr->locs());
__ PushRegister(BoxDoubleStubABI::kResultReg);
} else {
UNREACHABLE();
}
}
}
void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
const Function& extracted_method,
intptr_t type_arguments_field_offset) {
@ -498,7 +473,7 @@ void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
__ LoadFieldFromOffset(R1, R0,
compiler::target::Array::element_offset(edge_id));
__ add(R1, R1, compiler::Operand(Smi::RawValue(1)));
__ StoreIntoObjectNoBarrierOffset(
__ StoreIntoObjectOffsetNoBarrier(
R0, compiler::target::Array::element_offset(edge_id), R1);
#if defined(DEBUG)
assembler_->set_use_far_branches(old_use_far_branches);

View file

@ -222,31 +222,6 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
__ Bind(&fall_through);
}
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) {
return;
}
Definition* defn = instr->AsDefinition();
if ((defn != NULL) && defn->HasTemp()) {
const Location value = defn->locs()->out(0);
if (value.IsRegister()) {
__ PushRegister(value.reg());
} else if (value.IsFpuRegister()) {
ASSERT(instr->representation() == kUnboxedDouble);
// In unoptimized code at instruction epilogue the only
// live register is an output register.
instr->locs()->live_registers()->Clear();
__ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
GenerateNonLazyDeoptableStubCall(
InstructionSource(), // No token position.
StubCode::BoxDouble(), UntaggedPcDescriptors::kOther, instr->locs());
__ PushRegister(BoxDoubleStubABI::kResultReg);
} else {
UNREACHABLE();
}
}
}
void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
const Function& extracted_method,
intptr_t type_arguments_field_offset) {

View file

@ -351,34 +351,6 @@ void FlowGraphCompiler::GenerateAssertAssignable(
__ Bind(&is_assignable);
}
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) {
return;
}
Definition* defn = instr->AsDefinition();
if ((defn != NULL) && defn->HasTemp()) {
Location value = defn->locs()->out(0);
if (value.IsRegister()) {
__ PushRegister(value.reg());
} else if (value.IsFpuRegister()) {
ASSERT(instr->representation() == kUnboxedDouble);
// In unoptimized code at instruction epilogue the only
// live register is an output register.
instr->locs()->live_registers()->Clear();
__ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
GenerateNonLazyDeoptableStubCall(
InstructionSource(), // No token position.
StubCode::BoxDouble(), UntaggedPcDescriptors::kOther, instr->locs());
__ PushRegister(BoxDoubleStubABI::kResultReg);
} else if (value.IsConstant()) {
__ PushObject(value.constant());
} else {
ASSERT(value.IsStackSlot());
__ pushl(LocationToStackSlotAddress(value));
}
}
}
// NOTE: If the entry code shape changes, ReturnAddressLocator in profiler.cc
// needs to be updated to match.
void FlowGraphCompiler::EmitFrameEntry() {

View file

@ -211,31 +211,6 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
__ Bind(&fall_through);
}
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) {
return;
}
Definition* defn = instr->AsDefinition();
if ((defn != NULL) && defn->HasTemp()) {
const Location value = defn->locs()->out(0);
if (value.IsRegister()) {
__ PushRegister(value.reg());
} else if (value.IsFpuRegister()) {
ASSERT(instr->representation() == kUnboxedDouble);
// In unoptimized code at instruction epilogue the only
// live register is an output register.
instr->locs()->live_registers()->Clear();
__ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
GenerateNonLazyDeoptableStubCall(
InstructionSource(), // No token position.
StubCode::BoxDouble(), UntaggedPcDescriptors::kOther, instr->locs());
__ PushRegister(BoxDoubleStubABI::kResultReg);
} else {
UNREACHABLE();
}
}
}
void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
const Function& extracted_method,
intptr_t type_arguments_field_offset) {

View file

@ -223,34 +223,6 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
__ Bind(&fall_through);
}
void FlowGraphCompiler::EmitInstructionEpilogue(Instruction* instr) {
if (is_optimizing()) {
return;
}
Definition* defn = instr->AsDefinition();
if ((defn != NULL) && defn->HasTemp()) {
Location value = defn->locs()->out(0);
if (value.IsRegister()) {
__ PushRegister(value.reg());
} else if (value.IsFpuRegister()) {
ASSERT(instr->representation() == kUnboxedDouble);
// In unoptimized code at instruction epilogue the only
// live register is an output register.
instr->locs()->live_registers()->Clear();
__ MoveUnboxedDouble(BoxDoubleStubABI::kValueReg, value.fpu_reg());
GenerateNonLazyDeoptableStubCall(
InstructionSource(), // No token position.
StubCode::BoxDouble(), UntaggedPcDescriptors::kOther, instr->locs());
__ PushRegister(BoxDoubleStubABI::kResultReg);
} else if (value.IsConstant()) {
__ PushObject(value.constant());
} else {
ASSERT(value.IsStackSlot());
__ pushq(LocationToStackSlotAddress(value));
}
}
}
void FlowGraphCompiler::GenerateMethodExtractorIntrinsic(
const Function& extracted_method,
intptr_t type_arguments_field_offset) {

View file

@ -898,20 +898,7 @@ intptr_t CheckClassInstr::ComputeCidMask() const {
return mask;
}
bool LoadFieldInstr::IsUnboxedDartFieldLoad() const {
return slot().representation() == kTagged && slot().IsDartField() &&
slot().IsUnboxed();
}
bool LoadFieldInstr::IsPotentialUnboxedDartFieldLoad() const {
return slot().representation() == kTagged && slot().IsDartField() &&
slot().IsPotentialUnboxed();
}
Representation LoadFieldInstr::representation() const {
if (IsUnboxedDartFieldLoad() && CompilerState::Current().is_optimizing()) {
return slot().UnboxedRepresentation();
}
return slot().representation();
}
@ -967,16 +954,6 @@ void AllocateTypedDataInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
locs(), deopt_id(), env());
}
bool StoreFieldInstr::IsUnboxedDartFieldStore() const {
return slot().representation() == kTagged && slot().IsDartField() &&
slot().IsUnboxed();
}
bool StoreFieldInstr::IsPotentialUnboxedDartFieldStore() const {
return slot().representation() == kTagged && slot().IsDartField() &&
slot().IsPotentialUnboxed();
}
Representation StoreFieldInstr::RequiredInputRepresentation(
intptr_t index) const {
ASSERT((index == 0) || (index == 1));
@ -984,9 +961,6 @@ Representation StoreFieldInstr::RequiredInputRepresentation(
// The instance is always tagged.
return kTagged;
}
if (IsUnboxedDartFieldStore() && CompilerState::Current().is_optimizing()) {
return slot().UnboxedRepresentation();
}
return slot().representation();
}
@ -4278,6 +4252,115 @@ void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
LocationSummary* locs = nullptr;
if (slot().representation() != kTagged) {
ASSERT(!calls_initializer());
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (RepresentationUtils::IsUnboxedInteger(slot().representation())) {
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
if (value_size <= compiler::target::kWordSize) {
locs->set_out(0, Location::RequiresRegister());
} else {
ASSERT(value_size == 2 * compiler::target::kWordSize);
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
}
} else {
locs->set_out(0, Location::RequiresFpuRegister());
}
} else if (calls_initializer()) {
if (throw_exception_on_initialization()) {
const bool using_shared_stub = UseSharedSlowPathStub(opt);
const intptr_t kNumTemps = using_shared_stub ? 1 : 0;
locs = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
: LocationSummary::kCallOnSlowPath);
if (using_shared_stub) {
locs->set_temp(0, Location::RegisterLocation(
LateInitializationErrorABI::kFieldReg));
}
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(
0, Location::RegisterLocation(InitInstanceFieldABI::kInstanceReg));
locs->set_out(
0, Location::RegisterLocation(InitInstanceFieldABI::kResultReg));
}
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
}
return locs;
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register instance_reg = locs()->in(0).reg();
if (representation() != kTagged) {
if (RepresentationUtils::IsUnboxedInteger(representation())) {
const size_t value_size =
RepresentationUtils::ValueSize(representation());
if (value_size <= compiler::target::kWordSize) {
const Register result = locs()->out(0).reg();
__ LoadFieldFromOffset(
result, instance_reg, OffsetInBytes(),
RepresentationUtils::OperandSize(representation()));
} else {
auto const result_pair = locs()->out(0).AsPairLocation();
const Register result_lo = result_pair->At(0).reg();
const Register result_hi = result_pair->At(1).reg();
__ LoadFieldFromOffset(result_lo, instance_reg, OffsetInBytes());
__ LoadFieldFromOffset(result_hi, instance_reg,
OffsetInBytes() + compiler::target::kWordSize);
}
} else {
const FpuRegister result = locs()->out(0).fpu_reg();
const intptr_t cid = slot().field().guarded_cid();
switch (cid) {
case kDoubleCid:
__ LoadUnboxedDouble(result, instance_reg,
OffsetInBytes() - kHeapObjectTag);
break;
case kFloat32x4Cid:
case kFloat64x2Cid:
__ LoadUnboxedSimd128(result, instance_reg,
OffsetInBytes() - kHeapObjectTag);
break;
default:
UNREACHABLE();
}
}
return;
}
// Tagged load.
const Register result = locs()->out(0).reg();
if (slot().is_compressed()) {
__ LoadCompressedFieldFromOffset(result, instance_reg, OffsetInBytes());
} else {
__ LoadFieldFromOffset(result, instance_reg, OffsetInBytes());
}
if (calls_initializer()) {
EmitNativeCodeForInitializerCall(compiler);
}
}
void LoadFieldInstr::EmitNativeCodeForInitializerCall(
FlowGraphCompiler* compiler) {
ASSERT(calls_initializer());
@ -6379,7 +6462,7 @@ Representation StoreIndexedInstr::RequiredInputRepresentation(
}
bool Utf8ScanInstr::IsScanFlagsUnboxed() const {
return scan_flags_field_.IsUnboxed();
return scan_flags_field_.is_unboxed();
}
InvokeMathCFunctionInstr::InvokeMathCFunctionInstr(
@ -6862,6 +6945,166 @@ void RawStoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler->assembler()->StoreMemoryValue(value_reg, base_reg, offset_);
}
LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(kInstancePos, Location::RequiresRegister());
if (slot().representation() != kTagged) {
if (RepresentationUtils::IsUnboxedInteger(slot().representation())) {
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
if (value_size <= compiler::target::kWordSize) {
summary->set_in(kValuePos, Location::RequiresRegister());
} else {
ASSERT(value_size == 2 * compiler::target::kWordSize);
summary->set_in(kValuePos,
Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
}
} else {
summary->set_in(kValuePos, Location::RequiresFpuRegister());
}
} else {
Location value_loc;
if (ShouldEmitStoreBarrier()) {
summary->set_in(kValuePos,
kWriteBarrierValueReg != kNoRegister
? Location::RegisterLocation(kWriteBarrierValueReg)
: Location::WritableRegister());
} else {
#if defined(TARGET_ARCH_IA32)
// IA32 supports emitting `mov mem, Imm32` even for heap
// pointer immediates.
summary->set_in(kValuePos, LocationRegisterOrConstant(value()));
#elif defined(TARGET_ARCH_X64)
// X64 supports emitting `mov mem, Imm32` only with non-pointer
// immediate.
summary->set_in(kValuePos, LocationRegisterOrSmiConstant(value()));
#elif defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
defined(TARGET_ARCH_RISCV64)
// ARM64 and RISC-V have dedicated zero and null registers which can be
// used in store instructions.
Location value_loc = Location::RequiresRegister();
if (auto constant = value()->definition()->AsConstant()) {
const auto& value = constant->value();
if (value.IsNull() ||
(value.IsSmi() && Smi::Cast(value).Value() == 0)) {
value_loc = Location::Constant(constant);
}
}
summary->set_in(kValuePos, value_loc);
#else
// No support for moving immediate to memory directly.
summary->set_in(kValuePos, Location::RequiresRegister());
#endif
}
}
return summary;
}
void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register instance_reg = locs()->in(kInstancePos).reg();
const intptr_t offset_in_bytes = OffsetInBytes();
ASSERT(offset_in_bytes > 0); // Field is finalized and points after header.
if (slot().representation() != kTagged) {
// Unboxed field.
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
if (RepresentationUtils::IsUnboxedInteger(slot().representation())) {
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
if (value_size <= compiler::target::kWordSize) {
const Register value = locs()->in(kValuePos).reg();
__ StoreFieldToOffset(
value, instance_reg, offset_in_bytes,
RepresentationUtils::OperandSize(slot().representation()));
} else {
auto const value_pair = locs()->in(kValuePos).AsPairLocation();
const Register value_lo = value_pair->At(0).reg();
const Register value_hi = value_pair->At(1).reg();
__ StoreFieldToOffset(value_lo, instance_reg, offset_in_bytes);
__ StoreFieldToOffset(value_hi, instance_reg,
offset_in_bytes + compiler::target::kWordSize);
}
} else {
// This is an FPU store.
const intptr_t cid = slot().field().guarded_cid();
const FpuRegister value = locs()->in(kValuePos).fpu_reg();
switch (cid) {
case kDoubleCid:
__ StoreUnboxedDouble(value, instance_reg,
offset_in_bytes - kHeapObjectTag);
return;
case kFloat32x4Cid:
case kFloat64x2Cid:
__ StoreUnboxedSimd128(value, instance_reg,
offset_in_bytes - kHeapObjectTag);
return;
default:
UNREACHABLE();
}
}
return;
}
// Store of a tagged pointer.
const bool compressed = slot().is_compressed();
if (ShouldEmitStoreBarrier()) {
Register value_reg = locs()->in(kValuePos).reg();
if (!compressed) {
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
CanValueBeSmi(), memory_order_);
} else {
#if defined(DART_COMPRESSED_POINTERS)
__ StoreCompressedIntoObjectOffset(instance_reg, offset_in_bytes,
value_reg, CanValueBeSmi(),
memory_order_);
#else
UNREACHABLE();
#endif
}
} else {
if (locs()->in(kValuePos).IsConstant()) {
#if defined(TARGET_ARCH_X64) || defined(TARGET_ARCH_IA32) || \
defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
defined(TARGET_ARCH_RISCV64)
const auto& value = locs()->in(kValuePos).constant();
if (!compressed) {
__ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes, value,
memory_order_);
} else {
#if defined(DART_COMPRESSED_POINTERS)
__ StoreCompressedIntoObjectOffsetNoBarrier(
instance_reg, offset_in_bytes, value, memory_order_);
#else
UNREACHABLE();
#endif
}
return;
#else
UNREACHABLE();
#endif
}
Register value_reg = locs()->in(kValuePos).reg();
if (!compressed) {
__ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
value_reg, memory_order_);
} else {
#if defined(DART_COMPRESSED_POINTERS)
__ StoreCompressedIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
value_reg, memory_order_);
#else
UNREACHABLE();
#endif
}
}
}
const Code& ReturnInstr::GetReturnStub(FlowGraphCompiler* compiler) const {
const Function& function = compiler->parsed_function().function();
ASSERT(function.IsSuspendableFunction());

View file

@ -5896,12 +5896,10 @@ class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
kind) {}
virtual SpeculativeMode SpeculativeModeOfInput(intptr_t index) const {
// In AOT unbox is done based on TFA, therefore it was proven to be correct
// and it can never deoptimize.
return (slot().representation() != kTagged ||
(IsUnboxedDartFieldStore() && CompilerState::Current().is_aot()))
? kNotSpeculative
: kGuardInputs;
// Slots are unboxed based on statically inferrable type information.
// Either sound non-nullable static types (JIT) or global type flow analysis
// results (AOT).
return slot().representation() != kTagged ? kNotSpeculative : kGuardInputs;
}
DECLARE_INSTRUCTION(StoreField)
@ -5937,9 +5935,7 @@ class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
emit_store_barrier_ = value;
}
virtual bool CanTriggerGC() const {
return IsUnboxedDartFieldStore() || IsPotentialUnboxedDartFieldStore();
}
virtual bool CanTriggerGC() const { return false; }
virtual bool ComputeCanDeoptimize() const { return false; }
@ -5951,15 +5947,6 @@ class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
// are marked as having no side-effects.
virtual bool HasUnknownSideEffects() const { return false; }
// Returns whether this instruction is an unboxed store into a _boxed_ Dart
// field. Unboxed Dart fields are handled similar to unboxed native fields.
bool IsUnboxedDartFieldStore() const;
// Returns whether this instruction is an potential unboxed store into a
// _boxed_ Dart field. Unboxed Dart fields are handled similar to unboxed
// native fields.
bool IsPotentialUnboxedDartFieldStore() const;
virtual Representation RequiredInputRepresentation(intptr_t index) const;
virtual Instruction* Canonicalize(FlowGraph* flow_graph);
@ -7361,15 +7348,6 @@ class LoadFieldInstr : public TemplateLoadField<1> {
virtual Representation representation() const;
// Returns whether this instruction is an unboxed load from a _boxed_ Dart
// field. Unboxed Dart fields are handled similar to unboxed native fields.
bool IsUnboxedDartFieldLoad() const;
// Returns whether this instruction is an potential unboxed load from a
// _boxed_ Dart field. Unboxed Dart fields are handled similar to unboxed
// native fields.
bool IsPotentialUnboxedDartFieldLoad() const;
DECLARE_INSTRUCTION(LoadField)
DECLARE_ATTRIBUTES(&slot())
@ -7402,9 +7380,7 @@ class LoadFieldInstr : public TemplateLoadField<1> {
virtual bool AllowsCSE() const { return slot_.is_immutable(); }
virtual bool CanTriggerGC() const {
return calls_initializer() || IsPotentialUnboxedDartFieldLoad();
}
virtual bool CanTriggerGC() const { return calls_initializer(); }
virtual bool AttributesEqual(const Instruction& other) const;

View file

@ -2829,286 +2829,6 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps =
((IsUnboxedDartFieldStore() && opt)
? (FLAG_precompiled_mode ? 0 : 2)
: (IsPotentialUnboxedDartFieldStore() ? 3 : 0));
LocationSummary* summary = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
(!FLAG_precompiled_mode &&
((IsUnboxedDartFieldStore() && opt && is_initialization()) ||
IsPotentialUnboxedDartFieldStore()))
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall);
summary->set_in(kInstancePos, Location::RequiresRegister());
if (slot().representation() != kTagged) {
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
if (value_size <= compiler::target::kWordSize) {
summary->set_in(kValuePos, Location::RequiresRegister());
} else {
ASSERT(value_size <= 2 * compiler::target::kWordSize);
summary->set_in(kValuePos, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
}
} else if (IsUnboxedDartFieldStore() && opt) {
summary->set_in(kValuePos, Location::RequiresFpuRegister());
if (!FLAG_precompiled_mode) {
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
}
} else if (IsPotentialUnboxedDartFieldStore()) {
summary->set_in(kValuePos, ShouldEmitStoreBarrier()
? Location::WritableRegister()
: Location::RequiresRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
summary->set_temp(2, opt ? Location::RequiresFpuRegister()
: Location::FpuRegisterLocation(Q1));
} else {
summary->set_in(kValuePos,
ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
}
return summary;
}
static void EnsureMutableBox(FlowGraphCompiler* compiler,
StoreFieldInstr* instruction,
Register box_reg,
const Class& cls,
Register instance_reg,
intptr_t offset,
Register temp) {
compiler::Label done;
__ ldr(box_reg, compiler::FieldAddress(instance_reg, offset));
__ CompareObject(box_reg, Object::null_object());
__ b(&done, NE);
BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp);
__ MoveRegister(temp, box_reg);
__ StoreIntoObjectOffset(instance_reg, offset, temp,
compiler::Assembler::kValueIsNotSmi);
__ Bind(&done);
}
void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
compiler::Label skip_store;
const Register instance_reg = locs()->in(kInstancePos).reg();
const intptr_t offset_in_bytes = OffsetInBytes();
ASSERT(offset_in_bytes > 0); // Field is finalized and points after header.
if (slot().representation() != kTagged) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
auto const rep = slot().representation();
ASSERT(RepresentationUtils::IsUnboxedInteger(rep));
const size_t value_size = RepresentationUtils::ValueSize(rep);
__ Comment("NativeUnboxedStoreFieldInstr");
if (value_size <= compiler::target::kWordSize) {
const Register value = locs()->in(kValuePos).reg();
__ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
RepresentationUtils::OperandSize(rep));
} else {
auto const in_pair = locs()->in(kValuePos).AsPairLocation();
const Register in_lo = in_pair->At(0).reg();
const Register in_hi = in_pair->At(1).reg();
const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
__ StoreToOffset(in_lo, instance_reg, offset_lo);
__ StoreToOffset(in_hi, instance_reg, offset_hi);
}
return;
}
if (IsUnboxedDartFieldStore() && compiler->is_optimizing()) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
const intptr_t cid = slot().field().UnboxedFieldCid();
const DRegister value = EvenDRegisterOf(locs()->in(kValuePos).fpu_reg());
if (FLAG_precompiled_mode) {
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreFieldInstr");
__ StoreDToOffset(value, instance_reg,
offset_in_bytes - kHeapObjectTag);
return;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreFieldInstr");
__ StoreMultipleDToOffset(value, 2, instance_reg,
offset_in_bytes - kHeapObjectTag);
return;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreFieldInstr");
__ StoreMultipleDToOffset(value, 2, instance_reg,
offset_in_bytes - kHeapObjectTag);
return;
default:
UNREACHABLE();
}
}
const Register temp = locs()->temp(0).reg();
const Register temp2 = locs()->temp(1).reg();
if (is_initialization()) {
const Class* cls = NULL;
switch (cid) {
case kDoubleCid:
cls = &compiler->double_class();
break;
case kFloat32x4Cid:
cls = &compiler->float32x4_class();
break;
case kFloat64x2Cid:
cls = &compiler->float64x2_class();
break;
default:
UNREACHABLE();
}
BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
__ MoveRegister(temp2, temp);
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes, temp2,
compiler::Assembler::kValueIsNotSmi);
} else {
__ ldr(temp, compiler::FieldAddress(instance_reg, offset_in_bytes));
}
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreFieldInstr");
__ StoreDToOffset(
value, temp,
compiler::target::Double::value_offset() - kHeapObjectTag);
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreFieldInstr");
__ StoreMultipleDToOffset(
value, 2, temp,
compiler::target::Float32x4::value_offset() - kHeapObjectTag);
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreFieldInstr");
__ StoreMultipleDToOffset(
value, 2, temp,
compiler::target::Float64x2::value_offset() - kHeapObjectTag);
break;
default:
UNREACHABLE();
}
return;
}
if (IsPotentialUnboxedDartFieldStore()) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
const Register value_reg = locs()->in(kValuePos).reg();
const Register temp = locs()->temp(0).reg();
const Register temp2 = locs()->temp(1).reg();
const DRegister fpu_temp = EvenDRegisterOf(locs()->temp(2).fpu_reg());
if (ShouldEmitStoreBarrier()) {
// Value input is a writable register and should be manually preserved
// across allocation slow-path.
locs()->live_registers()->Add(locs()->in(kValuePos), kTagged);
}
compiler::Label store_pointer;
compiler::Label store_double;
compiler::Label store_float32x4;
compiler::Label store_float64x2;
__ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
__ ldrh(temp2, compiler::FieldAddress(
temp, compiler::target::Field::is_nullable_offset()));
__ CompareImmediate(temp2, kNullCid);
__ b(&store_pointer, EQ);
__ ldrb(temp2, compiler::FieldAddress(
temp, compiler::target::Field::kind_bits_offset()));
__ tst(temp2, compiler::Operand(1 << Field::kUnboxingCandidateBit));
__ b(&store_pointer, EQ);
__ ldrh(temp2, compiler::FieldAddress(
temp, compiler::target::Field::guarded_cid_offset()));
__ CompareImmediate(temp2, kDoubleCid);
__ b(&store_double, EQ);
__ ldrh(temp2, compiler::FieldAddress(
temp, compiler::target::Field::guarded_cid_offset()));
__ CompareImmediate(temp2, kFloat32x4Cid);
__ b(&store_float32x4, EQ);
__ ldrh(temp2, compiler::FieldAddress(
temp, compiler::target::Field::guarded_cid_offset()));
__ CompareImmediate(temp2, kFloat64x2Cid);
__ b(&store_float64x2, EQ);
// Fall through.
__ b(&store_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(kInstancePos));
locs()->live_registers()->Add(locs()->in(kValuePos));
}
{
__ Bind(&store_double);
EnsureMutableBox(compiler, this, temp, compiler->double_class(),
instance_reg, offset_in_bytes, temp2);
__ CopyDoubleField(temp, value_reg, TMP, temp2, fpu_temp);
__ b(&skip_store);
}
{
__ Bind(&store_float32x4);
EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(),
instance_reg, offset_in_bytes, temp2);
__ CopyFloat32x4Field(temp, value_reg, TMP, temp2, fpu_temp);
__ b(&skip_store);
}
{
__ Bind(&store_float64x2);
EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(),
instance_reg, offset_in_bytes, temp2);
__ CopyFloat64x2Field(temp, value_reg, TMP, temp2, fpu_temp);
__ b(&skip_store);
}
__ Bind(&store_pointer);
}
if (ShouldEmitStoreBarrier()) {
const Register value_reg = locs()->in(kValuePos).reg();
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
CanValueBeSmi(), memory_order_);
} else {
if (locs()->in(kValuePos).IsConstant()) {
__ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes,
locs()->in(kValuePos).constant(),
memory_order_);
} else {
const Register value_reg = locs()->in(kValuePos).reg();
__ StoreIntoObjectNoBarrierOffset(instance_reg, offset_in_bytes,
value_reg, memory_order_);
}
}
__ Bind(&skip_store);
}
LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -3263,252 +2983,6 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&done);
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
LocationSummary* locs = nullptr;
if (slot().representation() != kTagged) {
ASSERT(!calls_initializer());
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (value_size <= compiler::target::kWordSize) {
locs->set_out(0, Location::RequiresRegister());
} else {
ASSERT(value_size <= 2 * compiler::target::kWordSize);
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
}
} else if (IsUnboxedDartFieldLoad() && opt) {
ASSERT(!calls_initializer());
ASSERT(!slot().field().is_non_nullable_integer());
const intptr_t kNumTemps = FLAG_precompiled_mode ? 0 : 1;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (!FLAG_precompiled_mode) {
locs->set_temp(0, Location::RequiresRegister());
}
locs->set_out(0, Location::RequiresFpuRegister());
} else if (IsPotentialUnboxedDartFieldLoad()) {
ASSERT(!calls_initializer());
const intptr_t kNumTemps = 3;
locs = new (zone) LocationSummary(zone, kNumInputs, kNumTemps,
LocationSummary::kCallOnSlowPath);
locs->set_in(0, Location::RequiresRegister());
locs->set_temp(0, opt ? Location::RequiresFpuRegister()
: Location::FpuRegisterLocation(Q1));
locs->set_temp(1, Location::RequiresRegister());
locs->set_temp(2, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else if (calls_initializer()) {
if (throw_exception_on_initialization()) {
const bool using_shared_stub = UseSharedSlowPathStub(opt);
const intptr_t kNumTemps = using_shared_stub ? 1 : 0;
locs = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
: LocationSummary::kCallOnSlowPath);
if (using_shared_stub) {
locs->set_temp(0, Location::RegisterLocation(
LateInitializationErrorABI::kFieldReg));
}
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(
0, Location::RegisterLocation(InitInstanceFieldABI::kInstanceReg));
locs->set_out(
0, Location::RegisterLocation(InitInstanceFieldABI::kResultReg));
}
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
}
return locs;
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const Register instance_reg = locs()->in(0).reg();
if (slot().representation() != kTagged) {
ASSERT(!calls_initializer());
auto const rep = slot().representation();
const size_t value_size = RepresentationUtils::ValueSize(rep);
__ Comment("NativeUnboxedLoadFieldInstr");
if (value_size <= compiler::target::kWordSize) {
auto const result = locs()->out(0).reg();
__ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
RepresentationUtils::OperandSize(rep));
} else {
auto const out_pair = locs()->out(0).AsPairLocation();
const Register out_lo = out_pair->At(0).reg();
const Register out_hi = out_pair->At(1).reg();
const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
__ LoadFromOffset(out_lo, instance_reg, offset_lo);
__ LoadFromOffset(out_hi, instance_reg, offset_hi);
}
return;
}
if (IsUnboxedDartFieldLoad() && compiler->is_optimizing()) {
ASSERT_EQUAL(slot().representation(), kTagged);
ASSERT(!calls_initializer());
ASSERT(!slot().field().is_non_nullable_integer());
const intptr_t cid = slot().field().UnboxedFieldCid();
const DRegister result = EvenDRegisterOf(locs()->out(0).fpu_reg());
if (FLAG_precompiled_mode) {
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ LoadDFromOffset(result, instance_reg,
OffsetInBytes() - kHeapObjectTag);
return;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4LoadFieldInstr");
__ LoadMultipleDFromOffset(result, 2, instance_reg,
OffsetInBytes() - kHeapObjectTag);
return;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2LoadFieldInstr");
__ LoadMultipleDFromOffset(result, 2, instance_reg,
OffsetInBytes() - kHeapObjectTag);
return;
default:
UNREACHABLE();
}
}
const Register temp = locs()->temp(0).reg();
__ LoadFieldFromOffset(temp, instance_reg, OffsetInBytes());
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ LoadDFromOffset(
result, temp,
compiler::target::Double::value_offset() - kHeapObjectTag);
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4LoadFieldInstr");
__ LoadMultipleDFromOffset(
result, 2, temp,
compiler::target::Float32x4::value_offset() - kHeapObjectTag);
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2LoadFieldInstr");
__ LoadMultipleDFromOffset(
result, 2, temp,
compiler::target::Float64x2::value_offset() - kHeapObjectTag);
break;
default:
UNREACHABLE();
}
return;
}
compiler::Label done;
const Register result_reg = locs()->out(0).reg();
if (IsPotentialUnboxedDartFieldLoad()) {
ASSERT_EQUAL(slot().representation(), kTagged);
ASSERT(!calls_initializer());
const DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg());
const Register temp = locs()->temp(1).reg();
const Register temp2 = locs()->temp(2).reg();
compiler::Label load_pointer;
compiler::Label load_double;
compiler::Label load_float32x4;
compiler::Label load_float64x2;
__ LoadObject(result_reg, Field::ZoneHandle(slot().field().Original()));
compiler::FieldAddress field_cid_operand(
result_reg, compiler::target::Field::guarded_cid_offset());
compiler::FieldAddress field_nullability_operand(
result_reg, compiler::target::Field::is_nullable_offset());
__ ldrh(temp, field_nullability_operand);
__ CompareImmediate(temp, kNullCid);
__ b(&load_pointer, EQ);
__ ldrh(temp, field_cid_operand);
__ CompareImmediate(temp, kDoubleCid);
__ b(&load_double, EQ);
__ ldrh(temp, field_cid_operand);
__ CompareImmediate(temp, kFloat32x4Cid);
__ b(&load_float32x4, EQ);
__ ldrh(temp, field_cid_operand);
__ CompareImmediate(temp, kFloat64x2Cid);
__ b(&load_float64x2, EQ);
// Fall through.
__ b(&load_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(0));
}
{
__ Bind(&load_double);
BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
result_reg, temp);
__ ldr(temp, compiler::FieldAddress(instance_reg, OffsetInBytes()));
__ CopyDoubleField(result_reg, temp, TMP, temp2, value);
__ b(&done);
}
{
__ Bind(&load_float32x4);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->float32x4_class(), result_reg, temp);
__ ldr(temp, compiler::FieldAddress(instance_reg, OffsetInBytes()));
__ CopyFloat32x4Field(result_reg, temp, TMP, temp2, value);
__ b(&done);
}
{
__ Bind(&load_float64x2);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->float64x2_class(), result_reg, temp);
__ ldr(temp, compiler::FieldAddress(instance_reg, OffsetInBytes()));
__ CopyFloat64x2Field(result_reg, temp, TMP, temp2, value);
__ b(&done);
}
__ Bind(&load_pointer);
}
__ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes());
if (calls_initializer()) {
EmitNativeCodeForInitializerCall(compiler);
}
__ Bind(&done);
}
LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
@ -6894,13 +6368,11 @@ class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
// The unboxed int64 argument is passed through a dedicated slot in Thread.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ StoreToOffset(
right_lo, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset());
__ StoreToOffset(
right_hi, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
compiler::target::kWordSize);
__ StoreToOffset(right_lo, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
__ StoreToOffset(right_hi, THR,
compiler::target::Thread::unboxed_runtime_arg_offset() +
compiler::target::kWordSize);
}
};
@ -7033,13 +6505,11 @@ class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
// The unboxed int64 argument is passed through a dedicated slot in Thread.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ StoreToOffset(
right_lo, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset());
__ StoreToOffset(
right_hi, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
compiler::target::kWordSize);
__ StoreToOffset(right_lo, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
__ StoreToOffset(right_hi, THR,
compiler::target::Thread::unboxed_runtime_arg_offset() +
compiler::target::kWordSize);
}
};

View file

@ -2460,292 +2460,6 @@ void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
static void EnsureMutableBox(FlowGraphCompiler* compiler,
StoreFieldInstr* instruction,
Register box_reg,
const Class& cls,
Register instance_reg,
intptr_t offset,
Register temp) {
compiler::Label done;
__ LoadCompressedFieldFromOffset(box_reg, instance_reg, offset);
__ CompareObject(box_reg, Object::null_object());
__ b(&done, NE);
BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp);
__ MoveRegister(temp, box_reg);
__ StoreCompressedIntoObjectOffset(instance_reg, offset, temp,
compiler::Assembler::kValueIsNotSmi);
__ Bind(&done);
}
LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = (IsUnboxedDartFieldStore() && opt)
? (FLAG_precompiled_mode ? 0 : 2)
: (IsPotentialUnboxedDartFieldStore() ? 2 : 0);
LocationSummary* summary = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
(!FLAG_precompiled_mode &&
((IsUnboxedDartFieldStore() && opt && is_initialization()) ||
IsPotentialUnboxedDartFieldStore()))
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall);
summary->set_in(kInstancePos, Location::RequiresRegister());
if (slot().representation() != kTagged) {
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
ASSERT(RepresentationUtils::ValueSize(slot().representation()) <=
compiler::target::kWordSize);
summary->set_in(kValuePos, Location::RequiresRegister());
} else if (IsUnboxedDartFieldStore() && opt) {
summary->set_in(kValuePos, Location::RequiresFpuRegister());
if (FLAG_precompiled_mode) {
ConstantInstr* constant = value()->definition()->AsConstant();
if (constant != nullptr && constant->HasZeroRepresentation()) {
summary->set_in(kValuePos, Location::Constant(constant));
}
} else {
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
}
} else if (IsPotentialUnboxedDartFieldStore()) {
summary->set_in(kValuePos, ShouldEmitStoreBarrier()
? Location::WritableRegister()
: Location::RequiresRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
} else {
summary->set_in(kValuePos,
ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
}
return summary;
}
void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
compiler::Label skip_store;
const Register instance_reg = locs()->in(kInstancePos).reg();
const intptr_t offset_in_bytes = OffsetInBytes();
ASSERT(offset_in_bytes > 0); // Field is finalized and points after header.
if (slot().representation() != kTagged) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
const Register value = locs()->in(kValuePos).reg();
__ Comment("NativeUnboxedStoreFieldInstr");
__ StoreFieldToOffset(
value, instance_reg, offset_in_bytes,
RepresentationUtils::OperandSize(slot().representation()));
return;
}
if (IsUnboxedDartFieldStore() && compiler->is_optimizing()) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
const intptr_t cid = slot().field().UnboxedFieldCid();
if (FLAG_precompiled_mode) {
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreFieldInstr");
if (locs()->in(kValuePos).IsConstant()) {
ASSERT(locs()
->in(kValuePos)
.constant_instruction()
->HasZeroRepresentation());
__ StoreFieldToOffset(ZR, instance_reg, offset_in_bytes,
compiler::kEightBytes);
} else {
__ StoreDFieldToOffset(locs()->in(kValuePos).fpu_reg(),
instance_reg, offset_in_bytes);
}
return;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreFieldInstr");
__ StoreQFieldToOffset(locs()->in(kValuePos).fpu_reg(), instance_reg,
offset_in_bytes);
return;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreFieldInstr");
__ StoreQFieldToOffset(locs()->in(kValuePos).fpu_reg(), instance_reg,
offset_in_bytes);
return;
default:
UNREACHABLE();
}
}
const Register temp = locs()->temp(0).reg();
const Register temp2 = locs()->temp(1).reg();
if (is_initialization()) {
const Class* cls = NULL;
switch (cid) {
case kDoubleCid:
cls = &compiler->double_class();
break;
case kFloat32x4Cid:
cls = &compiler->float32x4_class();
break;
case kFloat64x2Cid:
cls = &compiler->float64x2_class();
break;
default:
UNREACHABLE();
}
BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
__ MoveRegister(temp2, temp);
__ StoreCompressedIntoObjectOffset(instance_reg, offset_in_bytes, temp2,
compiler::Assembler::kValueIsNotSmi);
} else {
__ LoadCompressedFieldFromOffset(temp, instance_reg, offset_in_bytes);
}
const VRegister value = locs()->in(kValuePos).fpu_reg();
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreFieldInstr");
__ StoreDFieldToOffset(value, temp, Double::value_offset());
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreFieldInstr");
__ StoreQFieldToOffset(value, temp, Float32x4::value_offset());
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreFieldInstr");
__ StoreQFieldToOffset(value, temp, Float64x2::value_offset());
break;
default:
UNREACHABLE();
}
return;
}
if (IsPotentialUnboxedDartFieldStore()) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
const Register value_reg = locs()->in(kValuePos).reg();
const Register temp = locs()->temp(0).reg();
const Register temp2 = locs()->temp(1).reg();
if (ShouldEmitStoreBarrier()) {
// Value input is a writable register and should be manually preserved
// across allocation slow-path.
locs()->live_registers()->Add(locs()->in(kValuePos), kTagged);
}
compiler::Label store_pointer;
compiler::Label store_double;
compiler::Label store_float32x4;
compiler::Label store_float64x2;
__ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
__ LoadFieldFromOffset(temp2, temp, Field::is_nullable_offset(),
compiler::kUnsignedTwoBytes);
__ CompareImmediate(temp2, kNullCid);
__ b(&store_pointer, EQ);
__ LoadFromOffset(temp2, temp, Field::kind_bits_offset() - kHeapObjectTag,
compiler::kUnsignedByte);
__ tsti(temp2, compiler::Immediate(1 << Field::kUnboxingCandidateBit));
__ b(&store_pointer, EQ);
__ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(),
compiler::kUnsignedTwoBytes);
__ CompareImmediate(temp2, kDoubleCid);
__ b(&store_double, EQ);
__ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(),
compiler::kUnsignedTwoBytes);
__ CompareImmediate(temp2, kFloat32x4Cid);
__ b(&store_float32x4, EQ);
__ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(),
compiler::kUnsignedTwoBytes);
__ CompareImmediate(temp2, kFloat64x2Cid);
__ b(&store_float64x2, EQ);
// Fall through.
__ b(&store_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(kInstancePos));
locs()->live_registers()->Add(locs()->in(kValuePos));
}
{
__ Bind(&store_double);
EnsureMutableBox(compiler, this, temp, compiler->double_class(),
instance_reg, offset_in_bytes, temp2);
__ LoadDFieldFromOffset(VTMP, value_reg, Double::value_offset());
__ StoreDFieldToOffset(VTMP, temp, Double::value_offset());
__ b(&skip_store);
}
{
__ Bind(&store_float32x4);
EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(),
instance_reg, offset_in_bytes, temp2);
__ LoadQFieldFromOffset(VTMP, value_reg, Float32x4::value_offset());
__ StoreQFieldToOffset(VTMP, temp, Float32x4::value_offset());
__ b(&skip_store);
}
{
__ Bind(&store_float64x2);
EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(),
instance_reg, offset_in_bytes, temp2);
__ LoadQFieldFromOffset(VTMP, value_reg, Float64x2::value_offset());
__ StoreQFieldToOffset(VTMP, temp, Float64x2::value_offset());
__ b(&skip_store);
}
__ Bind(&store_pointer);
}
const bool compressed = slot().is_compressed();
if (ShouldEmitStoreBarrier()) {
const Register value_reg = locs()->in(kValuePos).reg();
if (!compressed) {
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
CanValueBeSmi(), memory_order_);
} else {
__ StoreCompressedIntoObjectOffset(instance_reg, offset_in_bytes,
value_reg, CanValueBeSmi(),
memory_order_);
}
} else {
if (locs()->in(kValuePos).IsConstant()) {
const auto& value = locs()->in(kValuePos).constant();
if (!compressed) {
__ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes, value,
memory_order_);
} else {
__ StoreCompressedIntoObjectOffsetNoBarrier(
instance_reg, offset_in_bytes, value, memory_order_);
}
} else {
const Register value_reg = locs()->in(kValuePos).reg();
if (!compressed) {
__ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
value_reg, memory_order_);
} else {
__ StoreCompressedIntoObjectOffsetNoBarrier(
instance_reg, offset_in_bytes, value_reg, memory_order_);
}
}
}
__ Bind(&skip_store);
}
LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -2908,221 +2622,6 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&done);
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
LocationSummary* locs = nullptr;
if (slot().representation() != kTagged) {
ASSERT(!calls_initializer());
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
ASSERT(RepresentationUtils::ValueSize(slot().representation()) <=
compiler::target::kWordSize);
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else if (IsUnboxedDartFieldLoad() && opt) {
ASSERT(!calls_initializer());
ASSERT(!slot().field().is_non_nullable_integer());
const intptr_t kNumTemps = FLAG_precompiled_mode ? 0 : 1;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (!FLAG_precompiled_mode) {
locs->set_temp(0, Location::RequiresRegister());
}
locs->set_out(0, Location::RequiresFpuRegister());
} else if (IsPotentialUnboxedDartFieldLoad()) {
ASSERT(!calls_initializer());
const intptr_t kNumTemps = 1;
locs = new (zone) LocationSummary(zone, kNumInputs, kNumTemps,
LocationSummary::kCallOnSlowPath);
locs->set_in(0, Location::RequiresRegister());
locs->set_temp(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else if (calls_initializer()) {
if (throw_exception_on_initialization()) {
const bool using_shared_stub = UseSharedSlowPathStub(opt);
const intptr_t kNumTemps = using_shared_stub ? 1 : 0;
locs = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
: LocationSummary::kCallOnSlowPath);
if (using_shared_stub) {
locs->set_temp(0, Location::RegisterLocation(
LateInitializationErrorABI::kFieldReg));
}
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(
0, Location::RegisterLocation(InitInstanceFieldABI::kInstanceReg));
locs->set_out(
0, Location::RegisterLocation(InitInstanceFieldABI::kResultReg));
}
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
}
return locs;
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const Register instance_reg = locs()->in(0).reg();
if (slot().representation() != kTagged) {
const Register result = locs()->out(0).reg();
__ Comment("NativeUnboxedLoadFieldInstr");
__ LoadFieldFromOffset(
result, instance_reg, OffsetInBytes(),
RepresentationUtils::OperandSize(slot().representation()));
return;
}
if (IsUnboxedDartFieldLoad() && compiler->is_optimizing()) {
const VRegister result = locs()->out(0).fpu_reg();
const intptr_t cid = slot().field().UnboxedFieldCid();
if (FLAG_precompiled_mode) {
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ LoadDFieldFromOffset(result, instance_reg, OffsetInBytes());
return;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4LoadFieldInstr");
__ LoadQFieldFromOffset(result, instance_reg, OffsetInBytes());
return;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2LoadFieldInstr");
__ LoadQFieldFromOffset(result, instance_reg, OffsetInBytes());
return;
default:
UNREACHABLE();
}
}
const Register temp = locs()->temp(0).reg();
__ LoadCompressedFieldFromOffset(temp, instance_reg, OffsetInBytes());
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ LoadDFieldFromOffset(result, temp, Double::value_offset());
break;
case kFloat32x4Cid:
__ LoadQFieldFromOffset(result, temp, Float32x4::value_offset());
break;
case kFloat64x2Cid:
__ LoadQFieldFromOffset(result, temp, Float64x2::value_offset());
break;
default:
UNREACHABLE();
}
return;
}
compiler::Label done;
const Register result_reg = locs()->out(0).reg();
if (IsPotentialUnboxedDartFieldLoad()) {
const Register temp = locs()->temp(0).reg();
compiler::Label load_pointer;
compiler::Label load_double;
compiler::Label load_float32x4;
compiler::Label load_float64x2;
__ LoadObject(result_reg, Field::ZoneHandle(slot().field().Original()));
compiler::FieldAddress field_cid_operand(
result_reg, Field::guarded_cid_offset(), compiler::kUnsignedTwoBytes);
compiler::FieldAddress field_nullability_operand(
result_reg, Field::is_nullable_offset(), compiler::kUnsignedTwoBytes);
__ ldr(temp, field_nullability_operand, compiler::kUnsignedTwoBytes);
__ CompareImmediate(temp, kNullCid);
__ b(&load_pointer, EQ);
__ ldr(temp, field_cid_operand, compiler::kUnsignedTwoBytes);
__ CompareImmediate(temp, kDoubleCid);
__ b(&load_double, EQ);
__ ldr(temp, field_cid_operand, compiler::kUnsignedTwoBytes);
__ CompareImmediate(temp, kFloat32x4Cid);
__ b(&load_float32x4, EQ);
__ ldr(temp, field_cid_operand, compiler::kUnsignedTwoBytes);
__ CompareImmediate(temp, kFloat64x2Cid);
__ b(&load_float64x2, EQ);
// Fall through.
__ b(&load_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(0));
}
{
__ Bind(&load_double);
BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
result_reg, temp);
__ LoadCompressedFieldFromOffset(temp, instance_reg, OffsetInBytes());
__ LoadDFieldFromOffset(VTMP, temp, Double::value_offset());
__ StoreDFieldToOffset(VTMP, result_reg, Double::value_offset());
__ b(&done);
}
{
__ Bind(&load_float32x4);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->float32x4_class(), result_reg, temp);
__ LoadCompressedFieldFromOffset(temp, instance_reg, OffsetInBytes());
__ LoadQFieldFromOffset(VTMP, temp, Float32x4::value_offset());
__ StoreQFieldToOffset(VTMP, result_reg, Float32x4::value_offset());
__ b(&done);
}
{
__ Bind(&load_float64x2);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->float64x2_class(), result_reg, temp);
__ LoadCompressedFieldFromOffset(temp, instance_reg, OffsetInBytes());
__ LoadQFieldFromOffset(VTMP, temp, Float64x2::value_offset());
__ StoreQFieldToOffset(VTMP, result_reg, Float64x2::value_offset());
__ b(&done);
}
__ Bind(&load_pointer);
}
if (slot().is_compressed()) {
__ LoadCompressedFieldFromOffset(result_reg, instance_reg, OffsetInBytes());
} else {
__ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes());
}
if (calls_initializer()) {
EmitNativeCodeForInitializerCall(compiler);
}
__ Bind(&done);
}
LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
@ -6075,7 +5574,8 @@ class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ str(right,
compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()));
compiler::Address(
THR, compiler::target::Thread::unboxed_runtime_arg_offset()));
}
};
@ -6182,7 +5682,8 @@ class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ str(right,
compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()));
compiler::Address(
THR, compiler::target::Thread::unboxed_runtime_arg_offset()));
}
};

View file

@ -2122,263 +2122,6 @@ void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps =
(IsUnboxedDartFieldStore() && opt)
? 2
: ((IsPotentialUnboxedDartFieldStore()) ? 3 : 0);
LocationSummary* summary = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
((IsUnboxedDartFieldStore() && opt && is_initialization()) ||
IsPotentialUnboxedDartFieldStore())
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall);
summary->set_in(kInstancePos, Location::RequiresRegister());
if (slot().representation() != kTagged) {
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
if (value_size <= compiler::target::kWordSize) {
summary->set_in(kValuePos, Location::RequiresRegister());
} else {
ASSERT(value_size <= 2 * compiler::target::kWordSize);
summary->set_in(kValuePos, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
}
} else if (IsUnboxedDartFieldStore() && opt) {
summary->set_in(kValuePos, Location::RequiresFpuRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
} else if (IsPotentialUnboxedDartFieldStore()) {
summary->set_in(kValuePos, ShouldEmitStoreBarrier()
? Location::WritableRegister()
: Location::RequiresRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
summary->set_temp(2, opt ? Location::RequiresFpuRegister()
: Location::FpuRegisterLocation(XMM1));
} else {
summary->set_in(kValuePos, ShouldEmitStoreBarrier()
? Location::WritableRegister()
: LocationRegisterOrConstant(value()));
}
return summary;
}
static void EnsureMutableBox(FlowGraphCompiler* compiler,
StoreFieldInstr* instruction,
Register box_reg,
const Class& cls,
Register instance_reg,
intptr_t offset,
Register temp) {
compiler::Label done;
const compiler::Immediate& raw_null =
compiler::Immediate(static_cast<intptr_t>(Object::null()));
__ movl(box_reg, compiler::FieldAddress(instance_reg, offset));
__ cmpl(box_reg, raw_null);
__ j(NOT_EQUAL, &done);
BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp);
__ movl(temp, box_reg);
__ StoreIntoObject(instance_reg, compiler::FieldAddress(instance_reg, offset),
temp, compiler::Assembler::kValueIsNotSmi);
__ Bind(&done);
}
void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
compiler::Label skip_store;
const Register instance_reg = locs()->in(kInstancePos).reg();
const intptr_t offset_in_bytes = OffsetInBytes();
ASSERT(offset_in_bytes > 0); // Field is finalized and points after header.
if (slot().representation() != kTagged) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
auto const rep = slot().representation();
ASSERT(RepresentationUtils::IsUnboxedInteger(rep));
const size_t value_size = RepresentationUtils::ValueSize(rep);
__ Comment("NativeUnboxedStoreFieldInstr");
if (value_size <= compiler::target::kWordSize) {
const Register value = locs()->in(kValuePos).reg();
__ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
RepresentationUtils::OperandSize(rep));
} else {
auto const in_pair = locs()->in(kValuePos).AsPairLocation();
const Register in_lo = in_pair->At(0).reg();
const Register in_hi = in_pair->At(1).reg();
const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
__ StoreToOffset(in_lo, instance_reg, offset_lo);
__ StoreToOffset(in_hi, instance_reg, offset_hi);
}
return;
}
if (IsUnboxedDartFieldStore() && compiler->is_optimizing()) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
XmmRegister value = locs()->in(kValuePos).fpu_reg();
Register temp = locs()->temp(0).reg();
Register temp2 = locs()->temp(1).reg();
const intptr_t cid = slot().field().UnboxedFieldCid();
if (is_initialization()) {
const Class* cls = NULL;
switch (cid) {
case kDoubleCid:
cls = &compiler->double_class();
break;
case kFloat32x4Cid:
cls = &compiler->float32x4_class();
break;
case kFloat64x2Cid:
cls = &compiler->float64x2_class();
break;
default:
UNREACHABLE();
}
BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
__ movl(temp2, temp);
__ StoreIntoObject(instance_reg,
compiler::FieldAddress(instance_reg, offset_in_bytes),
temp2, compiler::Assembler::kValueIsNotSmi);
} else {
__ movl(temp, compiler::FieldAddress(instance_reg, offset_in_bytes));
}
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreFieldInstr");
__ movsd(compiler::FieldAddress(temp, Double::value_offset()), value);
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreFieldInstr");
__ movups(compiler::FieldAddress(temp, Float32x4::value_offset()),
value);
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreFieldInstr");
__ movups(compiler::FieldAddress(temp, Float64x2::value_offset()),
value);
break;
default:
UNREACHABLE();
}
return;
}
if (IsPotentialUnboxedDartFieldStore()) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
__ Comment("PotentialUnboxedStore");
Register value_reg = locs()->in(kValuePos).reg();
Register temp = locs()->temp(0).reg();
Register temp2 = locs()->temp(1).reg();
FpuRegister fpu_temp = locs()->temp(2).fpu_reg();
if (ShouldEmitStoreBarrier()) {
// Value input is a writable register and should be manually preserved
// across allocation slow-path. Add it to live_registers set which
// determines which registers to preserve.
locs()->live_registers()->Add(locs()->in(kValuePos), kTagged);
}
compiler::Label store_pointer;
compiler::Label store_double;
compiler::Label store_float32x4;
compiler::Label store_float64x2;
__ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
__ cmpw(compiler::FieldAddress(temp, Field::is_nullable_offset()),
compiler::Immediate(kNullCid));
__ j(EQUAL, &store_pointer);
__ movzxb(temp2, compiler::FieldAddress(temp, Field::kind_bits_offset()));
__ testl(temp2, compiler::Immediate(1 << Field::kUnboxingCandidateBit));
__ j(ZERO, &store_pointer);
__ cmpw(compiler::FieldAddress(temp, Field::guarded_cid_offset()),
compiler::Immediate(kDoubleCid));
__ j(EQUAL, &store_double);
__ cmpw(compiler::FieldAddress(temp, Field::guarded_cid_offset()),
compiler::Immediate(kFloat32x4Cid));
__ j(EQUAL, &store_float32x4);
__ cmpw(compiler::FieldAddress(temp, Field::guarded_cid_offset()),
compiler::Immediate(kFloat64x2Cid));
__ j(EQUAL, &store_float64x2);
// Fall through.
__ jmp(&store_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(kInstancePos));
locs()->live_registers()->Add(locs()->in(kValuePos));
}
{
__ Bind(&store_double);
EnsureMutableBox(compiler, this, temp, compiler->double_class(),
instance_reg, offset_in_bytes, temp2);
__ movsd(fpu_temp,
compiler::FieldAddress(value_reg, Double::value_offset()));
__ movsd(compiler::FieldAddress(temp, Double::value_offset()), fpu_temp);
__ jmp(&skip_store);
}
{
__ Bind(&store_float32x4);
EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(),
instance_reg, offset_in_bytes, temp2);
__ movups(fpu_temp,
compiler::FieldAddress(value_reg, Float32x4::value_offset()));
__ movups(compiler::FieldAddress(temp, Float32x4::value_offset()),
fpu_temp);
__ jmp(&skip_store);
}
{
__ Bind(&store_float64x2);
EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(),
instance_reg, offset_in_bytes, temp2);
__ movups(fpu_temp,
compiler::FieldAddress(value_reg, Float64x2::value_offset()));
__ movups(compiler::FieldAddress(temp, Float64x2::value_offset()),
fpu_temp);
__ jmp(&skip_store);
}
__ Bind(&store_pointer);
}
if (ShouldEmitStoreBarrier()) {
Register value_reg = locs()->in(kValuePos).reg();
__ StoreIntoObject(instance_reg,
compiler::FieldAddress(instance_reg, offset_in_bytes),
value_reg, CanValueBeSmi(), memory_order_);
} else {
if (locs()->in(kValuePos).IsConstant()) {
__ StoreIntoObjectNoBarrier(
instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
locs()->in(kValuePos).constant(), memory_order_);
} else {
Register value_reg = locs()->in(kValuePos).reg();
__ StoreIntoObjectNoBarrier(
instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
value_reg, memory_order_);
}
}
__ Bind(&skip_store);
}
LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
LocationSummary* locs =
@ -2532,209 +2275,6 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&done);
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
LocationSummary* locs = nullptr;
if (slot().representation() != kTagged) {
ASSERT(!calls_initializer());
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (value_size <= compiler::target::kWordSize) {
locs->set_out(0, Location::RequiresRegister());
} else {
ASSERT(value_size <= 2 * compiler::target::kWordSize);
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
}
} else if (IsUnboxedDartFieldLoad() && opt) {
ASSERT(!calls_initializer());
const intptr_t kNumTemps = 1;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_temp(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresFpuRegister());
} else if (IsPotentialUnboxedDartFieldLoad()) {
ASSERT(!calls_initializer());
const intptr_t kNumTemps = 2;
locs = new (zone) LocationSummary(zone, kNumInputs, kNumTemps,
LocationSummary::kCallOnSlowPath);
locs->set_in(0, Location::RequiresRegister());
locs->set_temp(0, opt ? Location::RequiresFpuRegister()
: Location::FpuRegisterLocation(XMM1));
locs->set_temp(1, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else if (calls_initializer()) {
if (throw_exception_on_initialization()) {
ASSERT(!UseSharedSlowPathStub(opt));
const intptr_t kNumTemps = 0;
locs = new (zone) LocationSummary(zone, kNumInputs, kNumTemps,
LocationSummary::kCallOnSlowPath);
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(
0, Location::RegisterLocation(InitInstanceFieldABI::kInstanceReg));
locs->set_out(
0, Location::RegisterLocation(InitInstanceFieldABI::kResultReg));
}
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
}
return locs;
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const Register instance_reg = locs()->in(0).reg();
if (slot().representation() != kTagged) {
ASSERT(!calls_initializer());
auto const rep = slot().representation();
const size_t value_size = RepresentationUtils::ValueSize(rep);
__ Comment("NativeUnboxedLoadFieldInstr");
if (value_size <= compiler::target::kWordSize) {
auto const result = locs()->out(0).reg();
__ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
RepresentationUtils::OperandSize(rep));
} else {
auto const out_pair = locs()->out(0).AsPairLocation();
const Register out_lo = out_pair->At(0).reg();
const Register out_hi = out_pair->At(1).reg();
const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
__ LoadFromOffset(out_lo, instance_reg, offset_lo);
__ LoadFromOffset(out_hi, instance_reg, offset_hi);
}
return;
}
if (IsUnboxedDartFieldLoad() && compiler->is_optimizing()) {
XmmRegister result = locs()->out(0).fpu_reg();
Register temp = locs()->temp(0).reg();
__ movl(temp, compiler::FieldAddress(instance_reg, OffsetInBytes()));
const intptr_t cid = slot().field().UnboxedFieldCid();
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ movsd(result, compiler::FieldAddress(temp, Double::value_offset()));
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4LoadFieldInstr");
__ movups(result,
compiler::FieldAddress(temp, Float32x4::value_offset()));
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2LoadFieldInstr");
__ movups(result,
compiler::FieldAddress(temp, Float64x2::value_offset()));
break;
default:
UNREACHABLE();
}
return;
}
compiler::Label done;
const Register result = locs()->out(0).reg();
if (IsPotentialUnboxedDartFieldLoad()) {
Register temp = locs()->temp(1).reg();
XmmRegister value = locs()->temp(0).fpu_reg();
compiler::Label load_pointer;
compiler::Label load_double;
compiler::Label load_float32x4;
compiler::Label load_float64x2;
__ LoadObject(result, Field::ZoneHandle(slot().field().Original()));
compiler::FieldAddress field_cid_operand(result,
Field::guarded_cid_offset());
compiler::FieldAddress field_nullability_operand(
result, Field::is_nullable_offset());
__ cmpw(field_nullability_operand, compiler::Immediate(kNullCid));
__ j(EQUAL, &load_pointer);
__ cmpw(field_cid_operand, compiler::Immediate(kDoubleCid));
__ j(EQUAL, &load_double);
__ cmpw(field_cid_operand, compiler::Immediate(kFloat32x4Cid));
__ j(EQUAL, &load_float32x4);
__ cmpw(field_cid_operand, compiler::Immediate(kFloat64x2Cid));
__ j(EQUAL, &load_float64x2);
// Fall through.
__ jmp(&load_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(0));
}
{
__ Bind(&load_double);
BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
result, temp);
__ movl(temp, compiler::FieldAddress(instance_reg, OffsetInBytes()));
__ movsd(value, compiler::FieldAddress(temp, Double::value_offset()));
__ movsd(compiler::FieldAddress(result, Double::value_offset()), value);
__ jmp(&done);
}
{
__ Bind(&load_float32x4);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->float32x4_class(), result, temp);
__ movl(temp, compiler::FieldAddress(instance_reg, OffsetInBytes()));
__ movups(value, compiler::FieldAddress(temp, Float32x4::value_offset()));
__ movups(compiler::FieldAddress(result, Float32x4::value_offset()),
value);
__ jmp(&done);
}
{
__ Bind(&load_float64x2);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->float64x2_class(), result, temp);
__ movl(temp, compiler::FieldAddress(instance_reg, OffsetInBytes()));
__ movups(value, compiler::FieldAddress(temp, Float64x2::value_offset()));
__ movups(compiler::FieldAddress(result, Float64x2::value_offset()),
value);
__ jmp(&done);
}
__ Bind(&load_pointer);
}
__ movl(result, compiler::FieldAddress(instance_reg, OffsetInBytes()));
if (calls_initializer()) {
EmitNativeCodeForInitializerCall(compiler);
}
__ Bind(&done);
}
LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
@ -6041,10 +5581,12 @@ class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
// The unboxed int64 argument is passed through a dedicated slot in Thread.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ movl(compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()),
__ movl(compiler::Address(
THR, compiler::target::Thread::unboxed_runtime_arg_offset()),
right_lo);
__ movl(compiler::Address(
THR, Thread::unboxed_int64_runtime_arg_offset() + kWordSize),
THR, compiler::target::Thread::unboxed_runtime_arg_offset() +
kWordSize),
right_hi);
}
};
@ -6181,10 +5723,12 @@ class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
// The unboxed int64 argument is passed through a dedicated slot in Thread.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ movl(compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()),
__ movl(compiler::Address(
THR, compiler::target::Thread::unboxed_runtime_arg_offset()),
right_lo);
__ movl(compiler::Address(
THR, Thread::unboxed_int64_runtime_arg_offset() + kWordSize),
THR, compiler::target::Thread::unboxed_runtime_arg_offset() +
kWordSize),
right_hi);
}
};

View file

@ -29,6 +29,42 @@ DEFINE_FLAG(bool,
DECLARE_FLAG(bool, trace_inlining_intervals);
const char* RepresentationToCString(Representation rep) {
switch (rep) {
case kTagged:
return "tagged";
case kUntagged:
return "untagged";
case kUnboxedDouble:
return "double";
case kUnboxedFloat:
return "float";
case kUnboxedUint8:
return "uint8";
case kUnboxedUint16:
return "uint16";
case kUnboxedInt32:
return "int32";
case kUnboxedUint32:
return "uint32";
case kUnboxedInt64:
return "int64";
case kUnboxedFloat32x4:
return "float32x4";
case kUnboxedInt32x4:
return "int32x4";
case kUnboxedFloat64x2:
return "float64x2";
case kPairOfTagged:
return "tagged-pair";
case kNoRepresentation:
return "none";
case kNumRepresentations:
UNREACHABLE();
}
return "?";
}
class IlTestPrinter : public AllStatic {
public:
static void PrintGraph(const char* phase, FlowGraph* flow_graph) {
@ -788,6 +824,9 @@ void StoreFieldInstr::PrintOperandsTo(BaseTextBuffer* f) const {
instance()->PrintTo(f);
f->Printf(" . %s = ", slot().Name());
value()->PrintTo(f);
if (slot().representation() != kTagged) {
f->Printf(" <%s>", RepresentationToCString(slot().representation()));
}
// Here, we just print the value of the enum field. We would prefer to get
// the final decision on whether a store barrier will be emitted by calling
@ -1100,42 +1139,6 @@ void IndirectEntryInstr::PrintTo(BaseTextBuffer* f) const {
}
}
const char* RepresentationToCString(Representation rep) {
switch (rep) {
case kTagged:
return "tagged";
case kUntagged:
return "untagged";
case kUnboxedDouble:
return "double";
case kUnboxedFloat:
return "float";
case kUnboxedUint8:
return "uint8";
case kUnboxedUint16:
return "uint16";
case kUnboxedInt32:
return "int32";
case kUnboxedUint32:
return "uint32";
case kUnboxedInt64:
return "int64";
case kUnboxedFloat32x4:
return "float32x4";
case kUnboxedInt32x4:
return "int32x4";
case kUnboxedFloat64x2:
return "float64x2";
case kPairOfTagged:
return "tagged-pair";
case kNoRepresentation:
return "none";
case kNumRepresentations:
UNREACHABLE();
}
return "?";
}
void PhiInstr::PrintTo(BaseTextBuffer* f) const {
f->Printf("v%" Pd " <- phi(", ssa_temp_index());
for (intptr_t i = 0; i < inputs_.length(); ++i) {

View file

@ -2785,290 +2785,6 @@ void GuardFieldLengthInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
static void EnsureMutableBox(FlowGraphCompiler* compiler,
StoreFieldInstr* instruction,
Register box_reg,
const Class& cls,
Register instance_reg,
intptr_t offset,
Register temp) {
compiler::Label done;
__ LoadCompressedFieldFromOffset(box_reg, instance_reg, offset);
__ CompareObject(box_reg, Object::null_object());
__ BranchIf(NE, &done);
BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp);
__ MoveRegister(temp, box_reg);
__ StoreCompressedIntoObjectOffset(instance_reg, offset, temp,
compiler::Assembler::kValueIsNotSmi);
__ Bind(&done);
}
LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = (IsUnboxedDartFieldStore() && opt)
? (FLAG_precompiled_mode ? 0 : 2)
: (IsPotentialUnboxedDartFieldStore() ? 2 : 0);
LocationSummary* summary = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
(!FLAG_precompiled_mode &&
((IsUnboxedDartFieldStore() && opt && is_initialization()) ||
IsPotentialUnboxedDartFieldStore()))
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall);
summary->set_in(kInstancePos, Location::RequiresRegister());
if (slot().representation() != kTagged) {
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
if (value_size <= compiler::target::kWordSize) {
summary->set_in(kValuePos, Location::RequiresRegister());
} else {
#if XLEN == 32
ASSERT(value_size <= 2 * compiler::target::kWordSize);
summary->set_in(kValuePos, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
#else
UNREACHABLE();
#endif
}
} else if (IsUnboxedDartFieldStore() && opt) {
summary->set_in(kValuePos, Location::RequiresFpuRegister());
if (FLAG_precompiled_mode) {
#if XLEN >= 64
ConstantInstr* constant = value()->definition()->AsConstant();
if (constant != nullptr && constant->HasZeroRepresentation()) {
summary->set_in(kValuePos, Location::Constant(constant));
}
#endif
} else {
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
}
} else if (IsPotentialUnboxedDartFieldStore()) {
summary->set_in(kValuePos, ShouldEmitStoreBarrier()
? Location::WritableRegister()
: Location::RequiresRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
} else {
summary->set_in(kValuePos,
ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
}
return summary;
}
void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
compiler::Label skip_store;
const Register instance_reg = locs()->in(kInstancePos).reg();
const intptr_t offset_in_bytes = OffsetInBytes();
ASSERT(offset_in_bytes > 0); // Field is finalized and points after header.
if (slot().representation() != kTagged) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
auto const rep = slot().representation();
ASSERT(RepresentationUtils::IsUnboxedInteger(rep));
const size_t value_size = RepresentationUtils::ValueSize(rep);
__ Comment("NativeUnboxedStoreFieldInstr");
if (value_size <= compiler::target::kWordSize) {
const Register value = locs()->in(kValuePos).reg();
__ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
RepresentationUtils::OperandSize(rep));
} else {
#if XLEN == 32
auto const in_pair = locs()->in(kValuePos).AsPairLocation();
const Register in_lo = in_pair->At(0).reg();
const Register in_hi = in_pair->At(1).reg();
const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
__ StoreToOffset(in_lo, instance_reg, offset_lo);
__ StoreToOffset(in_hi, instance_reg, offset_hi);
#else
UNREACHABLE();
#endif
}
return;
}
if (IsUnboxedDartFieldStore() && compiler->is_optimizing()) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
const intptr_t cid = slot().field().UnboxedFieldCid();
if (FLAG_precompiled_mode) {
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreFieldInstr");
#if XLEN >= 64
if (locs()->in(kValuePos).IsConstant()) {
ASSERT(locs()
->in(kValuePos)
.constant_instruction()
->HasZeroRepresentation());
__ StoreFieldToOffset(ZR, instance_reg, offset_in_bytes,
compiler::kEightBytes);
return;
}
#endif
__ StoreDFieldToOffset(locs()->in(kValuePos).fpu_reg(), instance_reg,
offset_in_bytes);
return;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreFieldInstr");
UNIMPLEMENTED();
return;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreFieldInstr");
UNIMPLEMENTED();
return;
default:
UNREACHABLE();
}
}
const Register temp = locs()->temp(0).reg();
const Register temp2 = locs()->temp(1).reg();
if (is_initialization()) {
const Class* cls = NULL;
switch (cid) {
case kDoubleCid:
cls = &compiler->double_class();
break;
case kFloat32x4Cid:
cls = &compiler->float32x4_class();
break;
case kFloat64x2Cid:
cls = &compiler->float64x2_class();
break;
default:
UNREACHABLE();
}
BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
__ MoveRegister(temp2, temp);
__ StoreCompressedIntoObjectOffset(instance_reg, offset_in_bytes, temp2,
compiler::Assembler::kValueIsNotSmi);
} else {
__ LoadCompressedFieldFromOffset(temp, instance_reg, offset_in_bytes);
}
const FRegister value = locs()->in(kValuePos).fpu_reg();
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreFieldInstr");
__ StoreDFieldToOffset(value, temp, Double::value_offset());
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreFieldInstr");
UNIMPLEMENTED();
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreFieldInstr");
UNIMPLEMENTED();
break;
default:
UNREACHABLE();
}
return;
}
if (IsPotentialUnboxedDartFieldStore()) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
const Register value_reg = locs()->in(kValuePos).reg();
const Register temp = locs()->temp(0).reg();
const Register temp2 = locs()->temp(1).reg();
if (ShouldEmitStoreBarrier()) {
// Value input is a writable register and should be manually preserved
// across allocation slow-path.
locs()->live_registers()->Add(locs()->in(kValuePos), kTagged);
}
compiler::Label store_pointer;
compiler::Label store_double;
compiler::Label store_float32x4;
compiler::Label store_float64x2;
__ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
__ LoadFieldFromOffset(temp2, temp, Field::is_nullable_offset(),
compiler::kUnsignedTwoBytes);
__ CompareImmediate(temp2, kNullCid);
__ BranchIf(EQ, &store_pointer);
__ LoadFromOffset(temp2, temp, Field::kind_bits_offset() - kHeapObjectTag,
compiler::kUnsignedByte);
__ TestImmediate(temp2, 1 << Field::kUnboxingCandidateBit);
__ BranchIf(EQ, &store_pointer);
__ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset(),
compiler::kUnsignedTwoBytes);
__ CompareImmediate(temp2, kDoubleCid);
__ BranchIf(EQ, &store_double);
// Fall through.
__ j(&store_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(kInstancePos));
locs()->live_registers()->Add(locs()->in(kValuePos));
}
{
__ Bind(&store_double);
EnsureMutableBox(compiler, this, temp, compiler->double_class(),
instance_reg, offset_in_bytes, temp2);
__ LoadDFieldFromOffset(FTMP, value_reg, Double::value_offset());
__ StoreDFieldToOffset(FTMP, temp, Double::value_offset());
__ j(&skip_store);
}
__ Bind(&store_pointer);
}
const bool compressed = slot().is_compressed();
if (ShouldEmitStoreBarrier()) {
const Register value_reg = locs()->in(kValuePos).reg();
if (!compressed) {
__ StoreIntoObjectOffset(instance_reg, offset_in_bytes, value_reg,
CanValueBeSmi(), memory_order_);
} else {
__ StoreCompressedIntoObjectOffset(instance_reg, offset_in_bytes,
value_reg, CanValueBeSmi(),
memory_order_);
}
} else {
if (locs()->in(kValuePos).IsConstant()) {
const auto& value = locs()->in(kValuePos).constant();
if (!compressed) {
__ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes, value,
memory_order_);
} else {
__ StoreCompressedIntoObjectOffsetNoBarrier(
instance_reg, offset_in_bytes, value, memory_order_);
}
} else {
const Register value_reg = locs()->in(kValuePos).reg();
if (!compressed) {
__ StoreIntoObjectOffsetNoBarrier(instance_reg, offset_in_bytes,
value_reg, memory_order_);
} else {
__ StoreCompressedIntoObjectOffsetNoBarrier(
instance_reg, offset_in_bytes, value_reg, memory_order_);
}
}
}
__ Bind(&skip_store);
}
LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -3222,238 +2938,6 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&done);
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
LocationSummary* locs = nullptr;
if (slot().representation() != kTagged) {
ASSERT(!calls_initializer());
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (value_size <= compiler::target::kWordSize) {
locs->set_out(0, Location::RequiresRegister());
} else {
#if XLEN == 32
ASSERT(value_size <= 2 * compiler::target::kWordSize);
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
#else
UNREACHABLE();
#endif
}
} else if (IsUnboxedDartFieldLoad() && opt) {
ASSERT(!calls_initializer());
ASSERT(!slot().field().is_non_nullable_integer());
const intptr_t kNumTemps = FLAG_precompiled_mode ? 0 : 1;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (!FLAG_precompiled_mode) {
locs->set_temp(0, Location::RequiresRegister());
}
locs->set_out(0, Location::RequiresFpuRegister());
} else if (IsPotentialUnboxedDartFieldLoad()) {
ASSERT(!calls_initializer());
const intptr_t kNumTemps = 1;
locs = new (zone) LocationSummary(zone, kNumInputs, kNumTemps,
LocationSummary::kCallOnSlowPath);
locs->set_in(0, Location::RequiresRegister());
locs->set_temp(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else if (calls_initializer()) {
if (throw_exception_on_initialization()) {
const bool using_shared_stub = UseSharedSlowPathStub(opt);
const intptr_t kNumTemps = using_shared_stub ? 1 : 0;
locs = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
: LocationSummary::kCallOnSlowPath);
if (using_shared_stub) {
locs->set_temp(0, Location::RegisterLocation(
LateInitializationErrorABI::kFieldReg));
}
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(
0, Location::RegisterLocation(InitInstanceFieldABI::kInstanceReg));
locs->set_out(
0, Location::RegisterLocation(InitInstanceFieldABI::kResultReg));
}
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
}
return locs;
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const Register instance_reg = locs()->in(0).reg();
if (slot().representation() != kTagged) {
auto const rep = slot().representation();
const size_t value_size = RepresentationUtils::ValueSize(rep);
__ Comment("NativeUnboxedLoadFieldInstr");
if (value_size <= compiler::target::kWordSize) {
auto const result = locs()->out(0).reg();
__ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
RepresentationUtils::OperandSize(rep));
} else {
#if XLEN == 32
auto const out_pair = locs()->out(0).AsPairLocation();
const Register out_lo = out_pair->At(0).reg();
const Register out_hi = out_pair->At(1).reg();
const intptr_t offset_lo = OffsetInBytes() - kHeapObjectTag;
const intptr_t offset_hi = offset_lo + compiler::target::kWordSize;
__ LoadFromOffset(out_lo, instance_reg, offset_lo);
__ LoadFromOffset(out_hi, instance_reg, offset_hi);
#else
UNREACHABLE();
#endif
}
return;
}
if (IsUnboxedDartFieldLoad() && compiler->is_optimizing()) {
const FRegister result = locs()->out(0).fpu_reg();
const intptr_t cid = slot().field().UnboxedFieldCid();
if (FLAG_precompiled_mode) {
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ LoadDFieldFromOffset(result, instance_reg, OffsetInBytes());
return;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4LoadFieldInstr");
UNIMPLEMENTED();
return;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2LoadFieldInstr");
UNIMPLEMENTED();
return;
default:
UNREACHABLE();
}
}
const Register temp = locs()->temp(0).reg();
__ LoadCompressedFieldFromOffset(temp, instance_reg, OffsetInBytes());
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ LoadDFieldFromOffset(result, temp, Double::value_offset());
break;
case kFloat32x4Cid:
UNIMPLEMENTED();
break;
case kFloat64x2Cid:
UNIMPLEMENTED();
break;
default:
UNREACHABLE();
}
return;
}
compiler::Label done;
const Register result_reg = locs()->out(0).reg();
if (IsPotentialUnboxedDartFieldLoad()) {
const Register temp = locs()->temp(0).reg();
compiler::Label load_pointer;
compiler::Label load_double;
compiler::Label load_float32x4;
compiler::Label load_float64x2;
__ LoadObject(result_reg, Field::ZoneHandle(slot().field().Original()));
compiler::FieldAddress field_cid_operand(result_reg,
Field::guarded_cid_offset());
compiler::FieldAddress field_nullability_operand(
result_reg, Field::is_nullable_offset());
__ lhu(temp, field_nullability_operand);
__ CompareImmediate(temp, kNullCid);
__ BranchIf(EQ, &load_pointer, compiler::Assembler::kNearJump);
__ lhu(temp, field_cid_operand);
__ CompareImmediate(temp, kDoubleCid);
__ BranchIf(EQ, &load_double, compiler::Assembler::kNearJump);
__ lhu(temp, field_cid_operand);
__ CompareImmediate(temp, kFloat32x4Cid);
__ BranchIf(EQ, &load_float32x4, compiler::Assembler::kNearJump);
__ lhu(temp, field_cid_operand);
__ CompareImmediate(temp, kFloat64x2Cid);
__ BranchIf(EQ, &load_float64x2, compiler::Assembler::kNearJump);
// Fall through.
__ j(&load_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(0));
}
{
__ Bind(&load_double);
BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
result_reg, temp);
__ LoadCompressedFieldFromOffset(temp, instance_reg, OffsetInBytes());
__ LoadDFieldFromOffset(FTMP, temp, Double::value_offset());
__ StoreDFieldToOffset(FTMP, result_reg, Double::value_offset());
__ j(&done);
}
{
__ Bind(&load_float32x4);
__ ebreak(); // Unimplemented
__ j(&done);
}
{
__ Bind(&load_float64x2);
__ ebreak(); // Unimplemented
__ j(&done);
}
__ Bind(&load_pointer);
}
if (slot().is_compressed()) {
__ LoadCompressedFieldFromOffset(result_reg, instance_reg, OffsetInBytes());
} else {
__ LoadFieldFromOffset(result_reg, instance_reg, OffsetInBytes());
}
if (calls_initializer()) {
EmitNativeCodeForInitializerCall(compiler);
}
__ Bind(&done);
}
LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
@ -6431,13 +5915,11 @@ class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
// The unboxed int64 argument is passed through a dedicated slot in Thread.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ StoreToOffset(
right_lo, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset());
__ StoreToOffset(
right_hi, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
compiler::target::kWordSize);
__ StoreToOffset(right_lo, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
__ StoreToOffset(right_hi, THR,
compiler::target::Thread::unboxed_runtime_arg_offset() +
compiler::target::kWordSize);
#else
const Register left = instruction()->locs()->in(0).reg();
const Register right = instruction()->locs()->in(1).reg();
@ -6468,7 +5950,8 @@ class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ sx(right,
compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()));
compiler::Address(
THR, compiler::target::Thread::unboxed_runtime_arg_offset()));
#endif
}
};
@ -6680,13 +6163,11 @@ class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
// The unboxed int64 argument is passed through a dedicated slot in Thread.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ StoreToOffset(
right_lo, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset());
__ StoreToOffset(
right_hi, THR,
compiler::target::Thread::unboxed_int64_runtime_arg_offset() +
compiler::target::kWordSize);
__ StoreToOffset(right_lo, THR,
compiler::target::Thread::unboxed_runtime_arg_offset());
__ StoreToOffset(right_hi, THR,
compiler::target::Thread::unboxed_runtime_arg_offset() +
compiler::target::kWordSize);
#else
const Register right = instruction()->locs()->in(1).reg();
@ -6696,7 +6177,8 @@ class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ sx(right,
compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()));
compiler::Address(
THR, compiler::target::Thread::unboxed_runtime_arg_offset()));
#endif
}
};

View file

@ -2463,291 +2463,6 @@ void GuardFieldTypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&ok);
}
LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = (IsUnboxedDartFieldStore() && opt)
? (FLAG_precompiled_mode ? 0 : 2)
: (IsPotentialUnboxedDartFieldStore() ? 3 : 0);
LocationSummary* summary = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
(!FLAG_precompiled_mode &&
((IsUnboxedDartFieldStore() && opt && is_initialization()) ||
IsPotentialUnboxedDartFieldStore()))
? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall);
summary->set_in(kInstancePos, Location::RequiresRegister());
if (slot().representation() != kTagged) {
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
ASSERT(RepresentationUtils::ValueSize(slot().representation()) <=
compiler::target::kWordSize);
summary->set_in(kValuePos, Location::RequiresRegister());
} else if (IsUnboxedDartFieldStore() && opt) {
summary->set_in(kValuePos, Location::RequiresFpuRegister());
if (!FLAG_precompiled_mode) {
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
}
} else if (IsPotentialUnboxedDartFieldStore()) {
summary->set_in(kValuePos, ShouldEmitStoreBarrier()
? Location::WritableRegister()
: Location::RequiresRegister());
summary->set_temp(0, Location::RequiresRegister());
summary->set_temp(1, Location::RequiresRegister());
summary->set_temp(2, opt ? Location::RequiresFpuRegister()
: Location::FpuRegisterLocation(XMM1));
} else {
summary->set_in(kValuePos,
ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
}
return summary;
}
static void EnsureMutableBox(FlowGraphCompiler* compiler,
StoreFieldInstr* instruction,
Register box_reg,
const Class& cls,
Register instance_reg,
intptr_t offset,
Register temp) {
compiler::Label done;
__ LoadCompressed(box_reg, compiler::FieldAddress(instance_reg, offset));
__ CompareObject(box_reg, Object::null_object());
__ j(NOT_EQUAL, &done);
BoxAllocationSlowPath::Allocate(compiler, instruction, cls, box_reg, temp);
__ movq(temp, box_reg);
__ StoreCompressedIntoObject(instance_reg,
compiler::FieldAddress(instance_reg, offset),
temp, compiler::Assembler::kValueIsNotSmi);
__ Bind(&done);
}
void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
compiler::Label skip_store;
const Register instance_reg = locs()->in(kInstancePos).reg();
const intptr_t offset_in_bytes = OffsetInBytes();
ASSERT(offset_in_bytes > 0); // Field is finalized and points after header.
if (slot().representation() != kTagged) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
const Register value = locs()->in(kValuePos).reg();
__ Comment("NativeUnboxedStoreFieldInstr");
__ StoreFieldToOffset(
value, instance_reg, offset_in_bytes,
RepresentationUtils::OperandSize(slot().representation()));
return;
}
if (IsUnboxedDartFieldStore() && compiler->is_optimizing()) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
XmmRegister value = locs()->in(kValuePos).fpu_reg();
const intptr_t cid = slot().field().UnboxedFieldCid();
// Real unboxed field
if (FLAG_precompiled_mode) {
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreFieldInstr");
__ movsd(compiler::FieldAddress(instance_reg, offset_in_bytes),
value);
return;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreFieldInstr");
__ movups(compiler::FieldAddress(instance_reg, offset_in_bytes),
value);
return;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreFieldInstr");
__ movups(compiler::FieldAddress(instance_reg, offset_in_bytes),
value);
return;
default:
UNREACHABLE();
}
}
Register temp = locs()->temp(0).reg();
Register temp2 = locs()->temp(1).reg();
if (is_initialization()) {
const Class* cls = NULL;
switch (cid) {
case kDoubleCid:
cls = &compiler->double_class();
break;
case kFloat32x4Cid:
cls = &compiler->float32x4_class();
break;
case kFloat64x2Cid:
cls = &compiler->float64x2_class();
break;
default:
UNREACHABLE();
}
BoxAllocationSlowPath::Allocate(compiler, this, *cls, temp, temp2);
__ movq(temp2, temp);
__ StoreCompressedIntoObject(
instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
temp2, compiler::Assembler::kValueIsNotSmi);
} else {
__ LoadCompressed(temp,
compiler::FieldAddress(instance_reg, offset_in_bytes));
}
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleStoreFieldInstr");
__ movsd(compiler::FieldAddress(temp, Double::value_offset()), value);
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4StoreFieldInstr");
__ movups(compiler::FieldAddress(temp, Float32x4::value_offset()),
value);
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2StoreFieldInstr");
__ movups(compiler::FieldAddress(temp, Float64x2::value_offset()),
value);
break;
default:
UNREACHABLE();
}
return;
}
if (IsPotentialUnboxedDartFieldStore()) {
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
Register value_reg = locs()->in(kValuePos).reg();
Register temp = locs()->temp(0).reg();
Register temp2 = locs()->temp(1).reg();
FpuRegister fpu_temp = locs()->temp(2).fpu_reg();
if (ShouldEmitStoreBarrier()) {
// Value input is a writable register and should be manually preserved
// across allocation slow-path.
locs()->live_registers()->Add(locs()->in(kValuePos), kTagged);
}
compiler::Label store_pointer;
compiler::Label store_double;
compiler::Label store_float32x4;
compiler::Label store_float64x2;
__ LoadObject(temp, Field::ZoneHandle(Z, slot().field().Original()));
__ cmpw(compiler::FieldAddress(temp, Field::is_nullable_offset()),
compiler::Immediate(kNullCid));
__ j(EQUAL, &store_pointer);
__ movzxb(temp2, compiler::FieldAddress(temp, Field::kind_bits_offset()));
__ testq(temp2, compiler::Immediate(1 << Field::kUnboxingCandidateBit));
__ j(ZERO, &store_pointer);
__ cmpw(compiler::FieldAddress(temp, Field::guarded_cid_offset()),
compiler::Immediate(kDoubleCid));
__ j(EQUAL, &store_double);
__ cmpw(compiler::FieldAddress(temp, Field::guarded_cid_offset()),
compiler::Immediate(kFloat32x4Cid));
__ j(EQUAL, &store_float32x4);
__ cmpw(compiler::FieldAddress(temp, Field::guarded_cid_offset()),
compiler::Immediate(kFloat64x2Cid));
__ j(EQUAL, &store_float64x2);
// Fall through.
__ jmp(&store_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(kInstancePos));
locs()->live_registers()->Add(locs()->in(kValuePos));
}
{
__ Bind(&store_double);
EnsureMutableBox(compiler, this, temp, compiler->double_class(),
instance_reg, offset_in_bytes, temp2);
__ movsd(fpu_temp,
compiler::FieldAddress(value_reg, Double::value_offset()));
__ movsd(compiler::FieldAddress(temp, Double::value_offset()), fpu_temp);
__ jmp(&skip_store);
}
{
__ Bind(&store_float32x4);
EnsureMutableBox(compiler, this, temp, compiler->float32x4_class(),
instance_reg, offset_in_bytes, temp2);
__ movups(fpu_temp,
compiler::FieldAddress(value_reg, Float32x4::value_offset()));
__ movups(compiler::FieldAddress(temp, Float32x4::value_offset()),
fpu_temp);
__ jmp(&skip_store);
}
{
__ Bind(&store_float64x2);
EnsureMutableBox(compiler, this, temp, compiler->float64x2_class(),
instance_reg, offset_in_bytes, temp2);
__ movups(fpu_temp,
compiler::FieldAddress(value_reg, Float64x2::value_offset()));
__ movups(compiler::FieldAddress(temp, Float64x2::value_offset()),
fpu_temp);
__ jmp(&skip_store);
}
__ Bind(&store_pointer);
}
const bool compressed = slot().is_compressed();
if (ShouldEmitStoreBarrier()) {
Register value_reg = locs()->in(kValuePos).reg();
if (!compressed) {
__ StoreIntoObject(instance_reg,
compiler::FieldAddress(instance_reg, offset_in_bytes),
value_reg, CanValueBeSmi(), memory_order_);
} else {
__ StoreCompressedIntoObject(
instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
value_reg, CanValueBeSmi(), memory_order_);
}
} else {
if (locs()->in(kValuePos).IsConstant()) {
const auto& value = locs()->in(kValuePos).constant();
if (!compressed) {
__ StoreIntoObjectNoBarrier(
instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
value, memory_order_);
} else {
__ StoreCompressedIntoObjectNoBarrier(
instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
value, memory_order_);
}
} else {
Register value_reg = locs()->in(kValuePos).reg();
if (!compressed) {
__ StoreIntoObjectNoBarrier(
instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
value_reg, memory_order_);
} else {
__ StoreCompressedIntoObjectNoBarrier(
instance_reg, compiler::FieldAddress(instance_reg, offset_in_bytes),
value_reg, memory_order_);
}
}
}
__ Bind(&skip_store);
}
LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -2906,235 +2621,6 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&done);
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
LocationSummary* locs = nullptr;
if (slot().representation() != kTagged) {
ASSERT(!calls_initializer());
ASSERT(RepresentationUtils::IsUnboxedInteger(slot().representation()));
ASSERT(RepresentationUtils::ValueSize(slot().representation()) <=
compiler::target::kWordSize);
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else if (IsUnboxedDartFieldLoad() && opt) {
ASSERT(!calls_initializer());
ASSERT(!slot().field().is_non_nullable_integer());
const intptr_t kNumTemps = FLAG_precompiled_mode ? 0 : 1;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (!FLAG_precompiled_mode) {
locs->set_temp(0, Location::RequiresRegister());
}
locs->set_out(0, Location::RequiresFpuRegister());
} else if (IsPotentialUnboxedDartFieldLoad()) {
ASSERT(!calls_initializer());
const intptr_t kNumTemps = 2;
locs = new (zone) LocationSummary(zone, kNumInputs, kNumTemps,
LocationSummary::kCallOnSlowPath);
locs->set_in(0, Location::RequiresRegister());
locs->set_temp(0, opt ? Location::RequiresFpuRegister()
: Location::FpuRegisterLocation(XMM1));
locs->set_temp(1, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else if (calls_initializer()) {
if (throw_exception_on_initialization()) {
const bool using_shared_stub = UseSharedSlowPathStub(opt);
const intptr_t kNumTemps = using_shared_stub ? 1 : 0;
locs = new (zone) LocationSummary(
zone, kNumInputs, kNumTemps,
using_shared_stub ? LocationSummary::kCallOnSharedSlowPath
: LocationSummary::kCallOnSlowPath);
if (using_shared_stub) {
locs->set_temp(0, Location::RegisterLocation(
LateInitializationErrorABI::kFieldReg));
}
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(
0, Location::RegisterLocation(InitInstanceFieldABI::kInstanceReg));
locs->set_out(
0, Location::RegisterLocation(InitInstanceFieldABI::kResultReg));
}
} else {
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_out(0, Location::RequiresRegister());
}
return locs;
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const Register instance_reg = locs()->in(0).reg();
if (slot().representation() != kTagged) {
const Register result = locs()->out(0).reg();
__ Comment("NativeUnboxedLoadFieldInstr");
__ LoadFieldFromOffset(
result, instance_reg, OffsetInBytes(),
RepresentationUtils::OperandSize(slot().representation()));
return;
}
if (IsUnboxedDartFieldLoad() && compiler->is_optimizing()) {
XmmRegister result = locs()->out(0).fpu_reg();
const intptr_t cid = slot().field().UnboxedFieldCid();
// Real unboxed field
if (FLAG_precompiled_mode) {
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ movsd(result,
compiler::FieldAddress(instance_reg, OffsetInBytes()));
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4LoadFieldInstr");
__ movups(result,
compiler::FieldAddress(instance_reg, OffsetInBytes()));
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2LoadFieldInstr");
__ movups(result,
compiler::FieldAddress(instance_reg, OffsetInBytes()));
break;
default:
UNREACHABLE();
}
return;
}
Register temp = locs()->temp(0).reg();
__ LoadCompressed(temp,
compiler::FieldAddress(instance_reg, OffsetInBytes()));
switch (cid) {
case kDoubleCid:
__ Comment("UnboxedDoubleLoadFieldInstr");
__ movsd(result, compiler::FieldAddress(temp, Double::value_offset()));
break;
case kFloat32x4Cid:
__ Comment("UnboxedFloat32x4LoadFieldInstr");
__ movups(result,
compiler::FieldAddress(temp, Float32x4::value_offset()));
break;
case kFloat64x2Cid:
__ Comment("UnboxedFloat64x2LoadFieldInstr");
__ movups(result,
compiler::FieldAddress(temp, Float64x2::value_offset()));
break;
default:
UNREACHABLE();
}
return;
}
compiler::Label done;
const Register result = locs()->out(0).reg();
if (IsPotentialUnboxedDartFieldLoad()) {
Register temp = locs()->temp(1).reg();
XmmRegister value = locs()->temp(0).fpu_reg();
compiler::Label load_pointer;
compiler::Label load_double;
compiler::Label load_float32x4;
compiler::Label load_float64x2;
__ LoadObject(result, Field::ZoneHandle(slot().field().Original()));
compiler::FieldAddress field_cid_operand(result,
Field::guarded_cid_offset());
compiler::FieldAddress field_nullability_operand(
result, Field::is_nullable_offset());
__ cmpw(field_nullability_operand, compiler::Immediate(kNullCid));
__ j(EQUAL, &load_pointer);
__ cmpw(field_cid_operand, compiler::Immediate(kDoubleCid));
__ j(EQUAL, &load_double);
__ cmpw(field_cid_operand, compiler::Immediate(kFloat32x4Cid));
__ j(EQUAL, &load_float32x4);
__ cmpw(field_cid_operand, compiler::Immediate(kFloat64x2Cid));
__ j(EQUAL, &load_float64x2);
// Fall through.
__ jmp(&load_pointer);
if (!compiler->is_optimizing()) {
locs()->live_registers()->Add(locs()->in(0));
}
{
__ Bind(&load_double);
BoxAllocationSlowPath::Allocate(compiler, this, compiler->double_class(),
result, temp);
__ LoadCompressed(temp,
compiler::FieldAddress(instance_reg, OffsetInBytes()));
__ movsd(value, compiler::FieldAddress(temp, Double::value_offset()));
__ movsd(compiler::FieldAddress(result, Double::value_offset()), value);
__ jmp(&done);
}
{
__ Bind(&load_float32x4);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->float32x4_class(), result, temp);
__ LoadCompressed(temp,
compiler::FieldAddress(instance_reg, OffsetInBytes()));
__ movups(value, compiler::FieldAddress(temp, Float32x4::value_offset()));
__ movups(compiler::FieldAddress(result, Float32x4::value_offset()),
value);
__ jmp(&done);
}
{
__ Bind(&load_float64x2);
BoxAllocationSlowPath::Allocate(
compiler, this, compiler->float64x2_class(), result, temp);
__ LoadCompressed(temp,
compiler::FieldAddress(instance_reg, OffsetInBytes()));
__ movups(value, compiler::FieldAddress(temp, Float64x2::value_offset()));
__ movups(compiler::FieldAddress(result, Float64x2::value_offset()),
value);
__ jmp(&done);
}
__ Bind(&load_pointer);
}
if (slot().is_compressed()) {
__ LoadCompressed(result,
compiler::FieldAddress(instance_reg, OffsetInBytes()));
} else {
__ movq(result, compiler::FieldAddress(instance_reg, OffsetInBytes()));
}
if (calls_initializer()) {
EmitNativeCodeForInitializerCall(compiler);
}
__ Bind(&done);
}
LocationSummary* AllocateUninitializedContextInstr::MakeLocationSummary(
Zone* zone,
bool opt) const {
@ -6479,7 +5965,8 @@ class ShiftInt64OpSlowPath : public ThrowErrorSlowPathCode {
// The unboxed int64 argument is passed through a dedicated slot in Thread.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ movq(compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()),
__ movq(compiler::Address(
THR, compiler::target::Thread::unboxed_runtime_arg_offset()),
RCX);
}
};
@ -6595,7 +6082,8 @@ class ShiftUint32OpSlowPath : public ThrowErrorSlowPathCode {
// The unboxed int64 argument is passed through a dedicated slot in Thread.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
__ movq(compiler::Address(THR, Thread::unboxed_int64_runtime_arg_offset()),
__ movq(compiler::Address(
THR, compiler::target::Thread::unboxed_runtime_arg_offset()),
RCX);
}
};

View file

@ -316,42 +316,8 @@ const Slot& Slot::GetCanonicalSlot(Thread* thread,
FieldGuardState::FieldGuardState(const Field& field)
: state_(GuardedCidBits::encode(field.guarded_cid()) |
IsNonNullableIntegerBit::encode(field.is_non_nullable_integer()) |
IsUnboxingCandidateBit::encode(field.is_unboxing_candidate()) |
IsNullableBit::encode(field.is_nullable())) {}
bool FieldGuardState::IsUnboxed() const {
ASSERT(!is_non_nullable_integer() || FLAG_precompiled_mode);
const bool valid_class = ((FlowGraphCompiler::SupportsUnboxedDoubles() &&
(guarded_cid() == kDoubleCid)) ||
(FlowGraphCompiler::SupportsUnboxedSimd128() &&
(guarded_cid() == kFloat32x4Cid)) ||
(FlowGraphCompiler::SupportsUnboxedSimd128() &&
(guarded_cid() == kFloat64x2Cid)) ||
is_non_nullable_integer());
return is_unboxing_candidate() && !is_nullable() && valid_class;
}
bool FieldGuardState::IsPotentialUnboxed() const {
if (FLAG_precompiled_mode) {
// kernel_loader.cc:ReadInferredType sets the guarded cid for fields based
// on inferred types from TFA (if available). The guarded cid is therefore
// proven to be correct.
return IsUnboxed();
}
return is_unboxing_candidate() &&
(IsUnboxed() || (guarded_cid() == kIllegalCid));
}
bool Slot::IsUnboxed() const {
return field_guard_state().IsUnboxed();
}
bool Slot::IsPotentialUnboxed() const {
return field_guard_state().IsPotentialUnboxed();
}
Representation Slot::UnboxedRepresentation() const {
switch (field_guard_state().guarded_cid()) {
case kDoubleCid:
@ -361,7 +327,6 @@ Representation Slot::UnboxedRepresentation() const {
case kFloat64x2Cid:
return kUnboxedFloat64x2;
default:
RELEASE_ASSERT(field_guard_state().is_non_nullable_integer());
return kUnboxedInt64;
}
}
@ -414,11 +379,22 @@ const Slot& Slot::Get(const Field& field,
used_guarded_state = false;
}
if (field_guard_state.is_non_nullable_integer()) {
ASSERT(FLAG_precompiled_mode);
const bool is_unboxed = field.is_unboxed();
if (is_unboxed) {
is_nullable = false;
if (field_guard_state.IsUnboxed()) {
rep = kUnboxedInt64;
switch (field_guard_state.guarded_cid()) {
case kDoubleCid:
rep = kUnboxedDouble;
break;
case kFloat32x4Cid:
rep = kUnboxedFloat32x4;
break;
case kFloat64x2Cid:
rep = kUnboxedFloat64x2;
break;
default:
rep = kUnboxedInt64;
break;
}
}
@ -432,7 +408,8 @@ const Slot& Slot::Get(const Field& field,
IsCompressedBit::encode(
compiler::target::Class::HasCompressedPointers(owner)) |
IsSentinelVisibleBit::encode(field.is_late() && field.is_final() &&
!field.has_initializer()),
!field.has_initializer()) |
IsUnboxedBit::encode(is_unboxed),
nullable_cid, compiler::target::Field::OffsetOf(field), &field, &type,
rep, field_guard_state);

View file

@ -198,25 +198,11 @@ class FieldGuardState {
explicit FieldGuardState(const Field& field);
intptr_t guarded_cid() const { return GuardedCidBits::decode(state_); }
bool is_non_nullable_integer() const {
return IsNonNullableIntegerBit::decode(state_);
}
bool is_unboxing_candidate() const {
return IsUnboxingCandidateBit::decode(state_);
}
bool is_nullable() const { return IsNullableBit::decode(state_); }
bool IsUnboxed() const;
bool IsPotentialUnboxed() const;
private:
using GuardedCidBits = BitField<int32_t, ClassIdTagType, 0, 16>;
using IsNonNullableIntegerBit =
BitField<int32_t, bool, GuardedCidBits::kNextBit, 1>;
using IsUnboxingCandidateBit =
BitField<int32_t, bool, IsNonNullableIntegerBit::kNextBit, 1>;
using IsNullableBit =
BitField<int32_t, bool, IsUnboxingCandidateBit::kNextBit, 1>;
using IsNullableBit = BitField<int32_t, bool, GuardedCidBits::kNextBit, 1>;
const int32_t state_;
};
@ -362,8 +348,9 @@ class Slot : public ZoneAllocated {
return kind() == Kind::kCapturedVariable || kind() == Kind::kContext_parent;
}
bool IsUnboxed() const;
bool IsPotentialUnboxed() const;
bool is_unboxed() const {
return IsUnboxedBit::decode(flags_);
}
Representation UnboxedRepresentation() const;
void Write(FlowGraphSerializer* s) const;
@ -403,6 +390,8 @@ class Slot : public ZoneAllocated {
using IsCompressedBit = BitField<int8_t, bool, IsGuardedBit::kNextBit, 1>;
using IsSentinelVisibleBit =
BitField<int8_t, bool, IsCompressedBit::kNextBit, 1>;
using IsUnboxedBit =
BitField<int8_t, bool, IsSentinelVisibleBit::kNextBit, 1>;
template <typename T>
const T* DataAs() const {

View file

@ -547,8 +547,14 @@ Fragment BaseFlowGraphBuilder::StoreFieldGuarded(
const Field& field_clone = MayCloneField(Z, field);
if (IG->use_field_guards()) {
LocalVariable* store_expression = MakeTemporary();
instructions += LoadLocal(store_expression);
instructions += GuardFieldClass(field_clone, GetNextDeoptId());
// Note: unboxing decision can only change due to hot reload at which
// point all code will be cleared, so there is no need to worry about
// stability of deopt id numbering.
if (!field_clone.is_unboxed()) {
instructions += LoadLocal(store_expression);
instructions += GuardFieldClass(field_clone, GetNextDeoptId());
}
// Field length guard can be omitted if it is not needed.
// However, it is possible that we were tracking list length previously,

View file

@ -1054,10 +1054,10 @@ bool GraphIntrinsifier::Build_ImplicitGetter(FlowGraph* flow_graph) {
// We only support cases where we do not have to create a box (whose
// allocation could fail).
ASSERT(function.HasUnboxedReturnValue() || !slot.IsUnboxed());
ASSERT(function.HasUnboxedReturnValue() || !slot.is_unboxed());
// We might need to unbox the field value before returning.
if (function.HasUnboxedReturnValue() && !slot.IsUnboxed()) {
if (function.HasUnboxedReturnValue() && !slot.is_unboxed()) {
ASSERT(FLAG_precompiled_mode);
field_value = builder.AddUnboxInstr(
FlowGraph::ReturnRepresentationOf(flow_graph->function()),
@ -1082,10 +1082,10 @@ bool GraphIntrinsifier::Build_ImplicitSetter(FlowGraph* flow_graph) {
}
ASSERT(field.is_instance() && !field.is_final());
const auto& slot = Slot::Get(field, &flow_graph->parsed_function());
ASSERT(!function.HasUnboxedParameters() || slot.IsUnboxed());
ASSERT(!function.HasUnboxedParameters() || slot.is_unboxed());
const auto barrier_mode =
slot.IsUnboxed() ? kNoStoreBarrier : kEmitStoreBarrier;
slot.is_unboxed() ? kNoStoreBarrier : kEmitStoreBarrier;
flow_graph->CreateCommonConstants();
GraphEntryInstr* graph_entry = flow_graph->graph_entry();
@ -1096,7 +1096,7 @@ bool GraphIntrinsifier::Build_ImplicitSetter(FlowGraph* flow_graph) {
auto value = builder.AddParameter(1, /*with_frame=*/false);
VerifyParameterIsBoxed(&builder, 0);
if (!function.HasUnboxedParameters() && slot.IsUnboxed()) {
if (!function.HasUnboxedParameters() && slot.is_unboxed()) {
// We do not support storing to possibly guarded fields in JIT in graph
// intrinsics.
ASSERT(FLAG_precompiled_mode);

View file

@ -109,14 +109,13 @@ bool Intrinsifier::CanIntrinsifyFieldAccessor(
// We don't support complex getter cases.
if (field.is_late() || field.needs_load_guard()) return false;
if (slot.IsPotentialUnboxed()) {
if (slot.is_unboxed()) {
if (function.HasUnboxedReturnValue()) {
// In AOT mode: Unboxed fields contain the unboxed value and can be
// returned in unboxed form.
ASSERT(FLAG_precompiled_mode);
} else {
// In JIT mode: Unboxed fields contain a mutable box which we cannot
// return.
// In JIT mode: Can't return unboxed value directly.
return false;
}
} else {
@ -144,7 +143,7 @@ bool Intrinsifier::CanIntrinsifyFieldAccessor(
// avoid the need for boxing (which we cannot do in the intrinsic).
if (function.HasUnboxedParameters()) {
ASSERT(FLAG_precompiled_mode);
if (!slot.IsUnboxed()) {
if (!slot.is_unboxed()) {
return false;
}
}

View file

@ -9,6 +9,7 @@
#include "vm/object.h"
#if !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/compiler/backend/flow_graph_compiler.h"
#include "vm/compiler/runtime_offsets_list.h"
#include "vm/dart_api_state.h"
#include "vm/dart_entry.h"
@ -1046,6 +1047,49 @@ word Number::NextFieldOffset() {
return TranslateOffsetInWords(dart::Number::NextFieldOffset());
}
void UnboxFieldIfSupported(const dart::Field& field,
const dart::AbstractType& type) {
if (field.is_static() || field.is_late()) {
return;
}
if (type.IsNullable()) {
return;
}
// In JIT mode we can unbox fields which are guaranteed to be non-nullable
// based on their static type. We can only rely on this information
// when running in sound null safety. AOT instead uses TFA results, see
// |KernelLoader::ReadInferredType|.
if (!dart::Thread::Current()->isolate_group()->null_safety()) {
return;
}
classid_t cid = kIllegalCid;
if (type.IsDoubleType()) {
if (FlowGraphCompiler::SupportsUnboxedDoubles()) {
cid = kDoubleCid;
}
} else if (type.IsFloat32x4Type()) {
if (FlowGraphCompiler::SupportsUnboxedSimd128()) {
cid = kFloat32x4Cid;
}
} else if (type.IsFloat64x2Type()) {
if (FlowGraphCompiler::SupportsUnboxedSimd128()) {
cid = kFloat64x2Cid;
}
}
if (cid != kIllegalCid) {
field.set_guarded_cid(cid);
field.set_is_nullable(false);
field.set_is_unboxed(true);
field.set_guarded_list_length(dart::Field::kNoFixedLength);
field.set_guarded_list_length_in_object_offset(
dart::Field::kUnknownLengthOffset);
}
}
} // namespace target
} // namespace compiler
} // namespace dart

View file

@ -1196,8 +1196,7 @@ class Thread : public AllStatic {
static word stack_overflow_shared_stub_entry_point_offset(bool fpu_regs);
static word stack_limit_offset();
static word saved_stack_limit_offset();
static word unboxed_int64_runtime_arg_offset();
static word unboxed_double_runtime_arg_offset();
static word unboxed_runtime_arg_offset();
static word callback_code_offset();
static word callback_stack_return_offset();
@ -1578,6 +1577,9 @@ class FieldTable : public AllStatic {
static word OffsetOf(const dart::Field& field);
};
void UnboxFieldIfSupported(const dart::Field& field,
const dart::AbstractType& type);
} // namespace target
} // namespace compiler
} // namespace dart

File diff suppressed because it is too large Load diff

View file

@ -316,8 +316,7 @@
FIELD(Thread, top_exit_frame_info_offset) \
FIELD(Thread, top_offset) \
FIELD(Thread, top_resource_offset) \
FIELD(Thread, unboxed_int64_runtime_arg_offset) \
FIELD(Thread, unboxed_double_runtime_arg_offset) \
FIELD(Thread, unboxed_runtime_arg_offset) \
FIELD(Thread, vm_tag_offset) \
FIELD(Thread, write_barrier_entry_point_offset) \
FIELD(Thread, write_barrier_mask_offset) \

View file

@ -1358,13 +1358,17 @@ EMIT_BOX_ALLOCATION(Int32x4)
#undef EMIT_BOX_ALLOCATION
void StubCodeCompiler::GenerateBoxDoubleStub(Assembler* assembler) {
static void GenerateBoxFpuValueStub(Assembler* assembler,
const dart::Class& cls,
const RuntimeEntry& runtime_entry,
void (Assembler::*store_value)(FpuRegister,
Register,
int32_t)) {
Label call_runtime;
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
__ TryAllocate(compiler::DoubleClass(), &call_runtime,
compiler::Assembler::kFarJump, BoxDoubleStubABI::kResultReg,
BoxDoubleStubABI::kTempReg);
__ StoreUnboxedDouble(
__ TryAllocate(cls, &call_runtime, compiler::Assembler::kFarJump,
BoxDoubleStubABI::kResultReg, BoxDoubleStubABI::kTempReg);
(assembler->*store_value)(
BoxDoubleStubABI::kValueReg, BoxDoubleStubABI::kResultReg,
compiler::target::Double::value_offset() - kHeapObjectTag);
__ Ret();
@ -1372,18 +1376,44 @@ void StubCodeCompiler::GenerateBoxDoubleStub(Assembler* assembler) {
__ Bind(&call_runtime);
__ EnterStubFrame();
__ PushObject(NullObject()); /* Make room for result. */
__ StoreUnboxedDouble(BoxDoubleStubABI::kValueReg, THR,
target::Thread::unboxed_double_runtime_arg_offset());
__ CallRuntime(kBoxDoubleRuntimeEntry, 0);
(assembler->*store_value)(BoxDoubleStubABI::kValueReg, THR,
target::Thread::unboxed_runtime_arg_offset());
__ CallRuntime(runtime_entry, 0);
__ PopRegister(BoxDoubleStubABI::kResultReg);
__ LeaveStubFrame();
__ Ret();
}
void StubCodeCompiler::GenerateBoxDoubleStub(Assembler* assembler) {
GenerateBoxFpuValueStub(assembler, compiler::DoubleClass(),
kBoxDoubleRuntimeEntry,
&Assembler::StoreUnboxedDouble);
}
void StubCodeCompiler::GenerateBoxFloat32x4Stub(Assembler* assembler) {
#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
GenerateBoxFpuValueStub(assembler, compiler::Float32x4Class(),
kBoxFloat32x4RuntimeEntry,
&Assembler::StoreUnboxedSimd128);
#else
__ Stop("Not supported on RISC-V.");
#endif
}
void StubCodeCompiler::GenerateBoxFloat64x2Stub(Assembler* assembler) {
#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
GenerateBoxFpuValueStub(assembler, compiler::Float64x2Class(),
kBoxFloat64x2RuntimeEntry,
&Assembler::StoreUnboxedSimd128);
#else
__ Stop("Not supported on RISC-V.");
#endif
}
void StubCodeCompiler::GenerateDoubleToIntegerStub(Assembler* assembler) {
__ EnterStubFrame();
__ StoreUnboxedDouble(DoubleToIntegerStubABI::kInputReg, THR,
target::Thread::unboxed_double_runtime_arg_offset());
target::Thread::unboxed_runtime_arg_offset());
__ PushObject(NullObject()); /* Make room for result. */
__ PushRegister(DoubleToIntegerStubABI::kRecognizedKindReg);
__ CallRuntime(kDoubleToIntegerRuntimeEntry, 1);

View file

@ -100,8 +100,8 @@ InstanceMorpher* InstanceMorpher::CreateFromClassDescriptors(
ClassTable* class_table,
const Class& from,
const Class& to) {
auto mapping = new (zone) ZoneGrowableArray<intptr_t>();
auto new_fields_offsets = new (zone) ZoneGrowableArray<intptr_t>();
auto mapping = new (zone) FieldMappingArray();
auto new_fields_offsets = new (zone) FieldOffsetArray();
if (from.NumTypeArguments() > 0) {
// Add copying of the optional type argument field.
@ -109,8 +109,8 @@ InstanceMorpher* InstanceMorpher::CreateFromClassDescriptors(
ASSERT(from_offset != Class::kNoTypeArguments);
intptr_t to_offset = to.host_type_arguments_field_offset();
ASSERT(to_offset != Class::kNoTypeArguments);
mapping->Add(from_offset);
mapping->Add(to_offset);
mapping->Add({from_offset, kIllegalCid});
mapping->Add({to_offset, kIllegalCid});
}
// Add copying of the instance fields if matching by name.
@ -123,6 +123,13 @@ InstanceMorpher* InstanceMorpher::CreateFromClassDescriptors(
String& from_name = String::Handle();
String& to_name = String::Handle();
auto ensure_boxed_and_guarded = [&](const Field& field) {
field.set_needs_load_guard(true);
if (field.is_unboxed()) {
to.MarkFieldBoxedDuringReload(class_table, field);
}
};
// Scan across all the fields in the new class definition.
for (intptr_t i = 0; i < to_fields.Length(); i++) {
if (to_fields.At(i) == Field::null()) {
@ -146,19 +153,61 @@ InstanceMorpher* InstanceMorpher::CreateFromClassDescriptors(
ASSERT(from_field.is_instance());
from_name = from_field.name();
if (from_name.Equals(to_name)) {
intptr_t from_box_cid = kIllegalCid;
intptr_t to_box_cid = kIllegalCid;
// Check if either of the fields are unboxed.
if ((from_field.is_unboxed() && from_field.type() != to_field.type()) ||
(from_field.is_unboxed() != to_field.is_unboxed())) {
// For simplicity we just migrate to boxed fields if such
// situation occurs.
ensure_boxed_and_guarded(to_field);
}
if (from_field.is_unboxed()) {
const auto field_cid = from_field.guarded_cid();
switch (field_cid) {
case kDoubleCid:
case kFloat32x4Cid:
case kFloat64x2Cid:
from_box_cid = field_cid;
break;
default:
from_box_cid = kIntegerCid;
break;
}
}
if (to_field.is_unboxed()) {
const auto field_cid = to_field.guarded_cid();
switch (field_cid) {
case kDoubleCid:
case kFloat32x4Cid:
case kFloat64x2Cid:
to_box_cid = field_cid;
break;
default:
to_box_cid = kIntegerCid;
break;
}
}
// Field can't become unboxed if it was boxed.
ASSERT(from_box_cid != kIllegalCid || to_box_cid == kIllegalCid);
// Success
mapping->Add(from_field.HostOffset());
mapping->Add(to_field.HostOffset());
mapping->Add({from_field.HostOffset(), from_box_cid});
mapping->Add({to_field.HostOffset(), to_box_cid});
// Field did exist in old class deifnition.
new_field = false;
break;
}
}
if (new_field) {
const Field& field = Field::Handle(to_field.ptr());
field.set_needs_load_guard(true);
field.set_is_unboxing_candidate_unsafe(false);
new_fields_offsets->Add(field.HostOffset());
ensure_boxed_and_guarded(to_field);
new_fields_offsets->Add(to_field.HostOffset());
}
}
@ -167,12 +216,11 @@ InstanceMorpher* InstanceMorpher::CreateFromClassDescriptors(
InstanceMorpher(zone, to.id(), class_table, mapping, new_fields_offsets);
}
InstanceMorpher::InstanceMorpher(
Zone* zone,
classid_t cid,
ClassTable* class_table,
ZoneGrowableArray<intptr_t>* mapping,
ZoneGrowableArray<intptr_t>* new_fields_offsets)
InstanceMorpher::InstanceMorpher(Zone* zone,
classid_t cid,
ClassTable* class_table,
FieldMappingArray* mapping,
FieldOffsetArray* new_fields_offsets)
: zone_(zone),
cid_(cid),
class_table_(class_table),
@ -226,16 +274,77 @@ void InstanceMorpher::CreateMorphedCopies(Become* become) {
// Morph the context from [before] to [after] using mapping_.
for (intptr_t i = 0; i < mapping_->length(); i += 2) {
intptr_t from_offset = mapping_->At(i);
intptr_t to_offset = mapping_->At(i + 1);
ASSERT(from_offset > 0);
ASSERT(to_offset > 0);
value = before.RawGetFieldAtOffset(from_offset);
after.RawSetFieldAtOffset(to_offset, value);
const auto& from = mapping_->At(i);
const auto& to = mapping_->At(i + 1);
ASSERT(from.offset > 0);
ASSERT(to.offset > 0);
if (from.box_cid == kIllegalCid) {
// Boxed to boxed field migration.
ASSERT(to.box_cid == kIllegalCid);
value = before.RawGetFieldAtOffset(from.offset);
after.RawSetFieldAtOffset(to.offset, value);
} else if (to.box_cid == kIllegalCid) {
// Unboxed to boxed field migration.
switch (from.box_cid) {
case kDoubleCid: {
const auto unboxed_value =
before.RawGetUnboxedFieldAtOffset<double>(from.offset);
value = Double::New(unboxed_value);
break;
}
case kFloat32x4Cid: {
const auto unboxed_value =
before.RawGetUnboxedFieldAtOffset<simd128_value_t>(from.offset);
value = Float32x4::New(unboxed_value);
break;
}
case kFloat64x2Cid: {
const auto unboxed_value =
before.RawGetUnboxedFieldAtOffset<simd128_value_t>(from.offset);
value = Float64x2::New(unboxed_value);
break;
}
case kIntegerCid: {
const auto unboxed_value =
before.RawGetUnboxedFieldAtOffset<int64_t>(from.offset);
value = Integer::New(unboxed_value);
break;
}
}
if (is_canonical) {
value = Instance::Cast(value).Canonicalize(Thread::Current());
}
after.RawSetFieldAtOffset(to.offset, value);
} else {
// Unboxed to unboxed field migration.
ASSERT(to.box_cid == from.box_cid);
switch (from.box_cid) {
case kDoubleCid: {
const auto unboxed_value =
before.RawGetUnboxedFieldAtOffset<double>(from.offset);
after.RawSetUnboxedFieldAtOffset<double>(to.offset, unboxed_value);
break;
}
case kFloat32x4Cid:
case kFloat64x2Cid: {
const auto unboxed_value =
before.RawGetUnboxedFieldAtOffset<simd128_value_t>(from.offset);
after.RawSetUnboxedFieldAtOffset<simd128_value_t>(to.offset,
unboxed_value);
break;
}
case kIntegerCid: {
const auto unboxed_value =
before.RawGetUnboxedFieldAtOffset<int64_t>(from.offset);
after.RawSetUnboxedFieldAtOffset<int64_t>(to.offset, unboxed_value);
break;
}
}
}
}
for (intptr_t i = 0; i < new_fields_offsets_->length(); i++) {
const intptr_t field_offset = new_fields_offsets_->At(i);
const auto& field_offset = new_fields_offsets_->At(i);
after.RawSetFieldAtOffset(field_offset, Object::sentinel());
}
@ -248,11 +357,33 @@ void InstanceMorpher::CreateMorphedCopies(Become* become) {
}
}
static const char* BoxCidToCString(intptr_t box_cid) {
switch (box_cid) {
case kDoubleCid:
return "double";
case kFloat32x4Cid:
return "float32x4";
case kFloat64x2Cid:
return "float64x2";
case kIntegerCid:
return "int64";
}
return "?";
}
void InstanceMorpher::Dump() const {
LogBlock blocker;
THR_Print("Morphing objects with cid: %d via this mapping: ", cid_);
for (int i = 0; i < mapping_->length(); i += 2) {
THR_Print(" %" Pd "->%" Pd, mapping_->At(i), mapping_->At(i + 1));
const auto& from = mapping_->At(i);
const auto& to = mapping_->At(i + 1);
THR_Print(" %" Pd "->%" Pd "", from.offset, to.offset);
THR_Print(" (%" Pd " -> %" Pd ")", from.box_cid, to.box_cid);
if (to.box_cid == kIllegalCid && from.box_cid != kIllegalCid) {
THR_Print("[box %s]", BoxCidToCString(from.box_cid));
} else if (to.box_cid != kIllegalCid) {
THR_Print("[%s]", BoxCidToCString(from.box_cid));
}
}
THR_Print("\n");
}
@ -264,9 +395,17 @@ void InstanceMorpher::AppendTo(JSONArray* array) {
jsobj.AddProperty("instanceCount", before_.length());
JSONArray map(&jsobj, "fieldOffsetMappings");
for (int i = 0; i < mapping_->length(); i += 2) {
const auto& from = mapping_->At(i);
const auto& to = mapping_->At(i + 1);
JSONArray pair(&map);
pair.AddValue(mapping_->At(i));
pair.AddValue(mapping_->At(i + 1));
pair.AddValue(from.offset);
pair.AddValue(to.offset);
if (to.box_cid == kIllegalCid && from.box_cid != kIllegalCid) {
pair.AddValueF("box %s", BoxCidToCString(from.box_cid));
} else if (to.box_cid != kIllegalCid) {
pair.AddValueF("%s", BoxCidToCString(from.box_cid));
}
}
}
@ -724,7 +863,8 @@ bool IsolateGroupReloadContext::Reload(bool force_reload,
isolate_group_->program_reload_context()->ReloadPhase4CommitPrepare();
bool discard_class_tables = true;
if (HasInstanceMorphers()) {
// Find all objects that need to be morphed (reallocated to a new size).
// Find all objects that need to be morphed (reallocated to a new
// layout).
ObjectLocator locator(this);
{
HeapIterationScope iteration(Thread::Current());
@ -741,11 +881,11 @@ bool IsolateGroupReloadContext::Reload(bool force_reload,
if (count > 0) {
TIMELINE_SCOPE(MorphInstances);
// While we are reallocating instances to their new size, the heap
// will contain a mix of instances with the old and new sizes that
// While we are reallocating instances to their new layout, the heap
// will contain a mix of instances with the old and new layouts that
// have the same cid. This makes the heap unwalkable until the
// "become" operation below replaces all the instances of the old
// size with forwarding corpses. Force heap growth to prevent size
// layout with forwarding corpses. Force heap growth to prevent layout
// confusion during this period.
ForceGrowthScope force_growth(thread);
// The HeapIterationScope above ensures no other GC tasks can be
@ -755,10 +895,10 @@ bool IsolateGroupReloadContext::Reload(bool force_reload,
MorphInstancesPhase1Allocate(&locator, IG->become());
{
// Apply the new class table before "become". Become will replace
// all the instances of the old size with forwarding corpses, then
// all the instances of the old layout with forwarding corpses, then
// perform a heap walk to fix references to the forwarding corpses.
// During this heap walk, it will encounter instances of the new
// size, so it requires the new class table.
// layout, so it requires the new class table.
ASSERT(HasNoTasks(heap));
// We accepted the hot-reload and morphed instances. So now we can
@ -1664,8 +1804,8 @@ void IsolateGroupReloadContext::MorphInstancesPhase2Become(Become* become) {
ASSERT(HasInstanceMorphers());
become->Forward();
// The heap now contains only instances with the new size. Ordinary GC is safe
// again.
// The heap now contains only instances with the new layout.
// Ordinary GC is safe again.
}
void IsolateGroupReloadContext::ForEachIsolate(
@ -2069,6 +2209,10 @@ class FieldInvalidator {
if (field.needs_load_guard()) {
return; // Already guarding.
}
if (field.is_unboxed()) {
// Unboxed fields are guaranteed to match.
return;
}
value_ = instance.GetField(field);
if (value_.ptr() == Object::sentinel().ptr()) {
if (field.is_late()) {

View file

@ -52,6 +52,14 @@ class ObjectStore;
class Script;
class UpdateClassesVisitor;
struct FieldMapping {
intptr_t offset;
intptr_t box_cid; // kIllegalCid if field is boxed
};
using FieldMappingArray = ZoneGrowableArray<FieldMapping>;
using FieldOffsetArray = ZoneGrowableArray<intptr_t>;
class InstanceMorpher : public ZoneAllocated {
public:
// Creates a new [InstanceMorpher] based on the [from]/[to] class
@ -64,8 +72,8 @@ class InstanceMorpher : public ZoneAllocated {
InstanceMorpher(Zone* zone,
classid_t cid,
ClassTable* class_table,
ZoneGrowableArray<intptr_t>* mapping,
ZoneGrowableArray<intptr_t>* new_fields_offsets);
FieldMappingArray* mapping,
FieldOffsetArray* new_fields_offsets);
virtual ~InstanceMorpher() {}
// Adds an object to be morphed.
@ -87,8 +95,8 @@ class InstanceMorpher : public ZoneAllocated {
Zone* zone_;
classid_t cid_;
ClassTable* class_table_;
ZoneGrowableArray<intptr_t>* mapping_;
ZoneGrowableArray<intptr_t>* new_fields_offsets_;
FieldMappingArray* mapping_;
FieldOffsetArray* new_fields_offsets_;
GrowableArray<const Instance*> before_;
};

View file

@ -4205,21 +4205,45 @@ TEST_CASE(IsolateReload_DeleteStaticField) {
}
}
TEST_CASE(IsolateReload_ExistingFieldChangesType) {
static void TestReloadWithFieldChange(const char* prefix,
const char* suffix,
const char* verify,
const char* from_type,
const char* from_init,
const char* to_type,
const char* to_init) {
const char* late_tag = TestCase::LateTag();
// clang-format off
auto kScript = Utils::CStringUniquePtr(OS::SCreate(nullptr,
R"(
import 'dart:typed_data';
void doubleEq(double got, double expected) {
if (got != expected) throw 'expected $expected got $got';
}
void float32x4Eq(Float32x4 got, Float32x4 expected) {
if (got.equal(expected).signMask != 0xf) throw 'expected $expected got $got';
}
class Foo {
int x = 42;
%s
%s x = %s;
%s
}
%s Foo value;
main() {
value = Foo();
%s
return 'Okay';
}
)",
late_tag), std::free);
prefix,
from_type,
from_init,
suffix,
late_tag,
verify), std::free);
// clang-format on
Dart_Handle lib = TestCase::LoadTestScript(kScript.get(), NULL);
@ -4228,28 +4252,92 @@ TEST_CASE(IsolateReload_ExistingFieldChangesType) {
// clang-format off
auto kReloadScript = Utils::CStringUniquePtr(OS::SCreate(nullptr, R"(
import 'dart:typed_data';
void doubleEq(double got, double expected) {
if (got != expected) throw 'expected $expected got $got';
}
void float32x4Eq(Float32x4 got, Float32x4 expected) {
if (got.equal(expected).signMask != 0xf) throw 'expected $expected got $got';
}
class Foo {
double x = 42.0;
%s
%s x = %s;
%s
}
%s Foo value;
main() {
try {
%s
return value.x.toString();
} catch (e) {
return e.toString();
}
}
)",
late_tag), std::free);
)", prefix, to_type, to_init, suffix,
late_tag, verify), std::free);
// clang-format on
lib = TestCase::ReloadTestScript(kReloadScript.get());
EXPECT_VALID(lib);
EXPECT_STREQ(
"type 'int' is not a subtype of type 'double' of 'function result'",
OS::SCreate(
Thread::Current()->zone(),
"type '%s' is not a subtype of type '%s' of 'function result'",
from_type, to_type),
SimpleInvokeStr(lib, "main"));
}
TEST_CASE(IsolateReload_ExistingFieldChangesType) {
TestReloadWithFieldChange(/*prefix=*/"", /*suffix=*/"", /*verify=*/"",
/*from_type=*/"int", /*from_init=*/"42",
/*to_type=*/"double", /*to_init=*/"42.0");
}
TEST_CASE(IsolateReload_ExistingFieldChangesTypeWithOtherUnboxedFields) {
TestReloadWithFieldChange(
/*prefix=*/"double a = 1.5;",
/*suffix=*/"Float32x4 b = Float32x4(1.0, 2.0, 3.0, 4.0);", /*verify=*/
"doubleEq(value.a, 1.5); float32x4Eq(value.b, Float32x4(1.0, 2.0, 3.0, "
"4.0));",
/*from_type=*/"int", /*from_init=*/"42", /*to_type=*/"double",
/*to_init=*/"42.0");
}
TEST_CASE(IsolateReload_ExistingFieldUnboxedToBoxed) {
TestReloadWithFieldChange(
/*prefix=*/"double a = 1.5;",
/*suffix=*/"Float32x4 b = Float32x4(1.0, 2.0, 3.0, 4.0);", /*verify=*/
"doubleEq(value.a, 1.5); float32x4Eq(value.b, Float32x4(1.0, 2.0, 3.0, "
"4.0));",
/*from_type=*/"double", /*from_init=*/"42.0", /*to_type=*/"String",
/*to_init=*/"'42'");
}
TEST_CASE(IsolateReload_ExistingFieldBoxedToUnboxed) {
// Note: underlying field will not actually be unboxed.
TestReloadWithFieldChange(
/*prefix=*/"double a = 1.5;",
/*suffix=*/"Float32x4 b = Float32x4(1.0, 2.0, 3.0, 4.0);", /*verify=*/
"doubleEq(value.a, 1.5); float32x4Eq(value.b, Float32x4(1.0, 2.0, 3.0, "
"4.0));",
/*from_type=*/"String", /*from_init=*/"'42.0'", /*to_type=*/"double",
/*to_init=*/"42.0");
}
TEST_CASE(IsolateReload_ExistingFieldUnboxedToUnboxed) {
// Note: underlying field will not actually be unboxed.
TestReloadWithFieldChange(
/*prefix=*/"double a = 1.5;",
/*suffix=*/"Float32x4 b = Float32x4(1.0, 2.0, 3.0, 4.0);", /*verify=*/
"doubleEq(value.a, 1.5); float32x4Eq(value.b, Float32x4(1.0, 2.0, 3.0, "
"4.0));",
/*from_type=*/"double", /*from_init=*/"42.0", /*to_type=*/"Float32x4",
/*to_init=*/"Float32x4(1.0, 2.0, 3.0, 4.0)");
}
TEST_CASE(IsolateReload_ExistingStaticFieldChangesType) {
const char* kScript = R"(
int value = init();

View file

@ -869,17 +869,15 @@ void KernelLoader::ReadInferredType(const Field& field,
field.set_is_nullable(type.IsNullable());
field.set_guarded_list_length(Field::kNoFixedLength);
if (FLAG_precompiled_mode) {
field.set_is_unboxing_candidate(
!field.is_late() && !field.is_static() &&
((field.guarded_cid() == kDoubleCid &&
FlowGraphCompiler::SupportsUnboxedDoubles()) ||
(field.guarded_cid() == kFloat32x4Cid &&
FlowGraphCompiler::SupportsUnboxedSimd128()) ||
(field.guarded_cid() == kFloat64x2Cid &&
FlowGraphCompiler::SupportsUnboxedSimd128()) ||
type.IsInt()) &&
!field.is_nullable());
field.set_is_non_nullable_integer(!field.is_nullable() && type.IsInt());
field.set_is_unboxed(!field.is_late() && !field.is_static() &&
!field.is_nullable() &&
((field.guarded_cid() == kDoubleCid &&
FlowGraphCompiler::SupportsUnboxedDoubles()) ||
(field.guarded_cid() == kFloat32x4Cid &&
FlowGraphCompiler::SupportsUnboxedSimd128()) ||
(field.guarded_cid() == kFloat64x2Cid &&
FlowGraphCompiler::SupportsUnboxedSimd128()) ||
type.IsInt()));
}
}
@ -2190,14 +2188,7 @@ ObjectPtr KernelLoader::ReadInitialFieldValue(const Field& field,
if (field_helper->IsStatic()) {
return converter.SimpleValue().ptr();
} else {
// Note: optimizer relies on DoubleInitialized bit in its field-unboxing
// heuristics. See JitCallSpecializer::VisitStoreField for more
// details.
field.RecordStore(converter.SimpleValue());
if (!converter.SimpleValue().IsNull() &&
converter.SimpleValue().IsDouble()) {
field.set_is_double_initialized(true);
}
}
}
}

View file

@ -822,17 +822,13 @@ class InstanceMessageSerializationCluster : public MessageSerializationCluster {
objects_.Add(instance);
const intptr_t next_field_offset = next_field_offset_;
#if defined(DART_PRECOMPILED_RUNTIME)
const auto unboxed_fields_bitmap =
s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(cid_);
#endif
for (intptr_t offset = Instance::NextFieldOffset();
offset < next_field_offset; offset += kCompressedWordSize) {
#if defined(DART_PRECOMPILED_RUNTIME)
if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
continue;
}
#endif
s->Push(reinterpret_cast<CompressedObjectPtr*>(
reinterpret_cast<uword>(instance->untag()) + offset)
->Decompress(instance->untag()->heap_base()));
@ -856,13 +852,10 @@ class InstanceMessageSerializationCluster : public MessageSerializationCluster {
Instance* instance = objects_[i];
const intptr_t next_field_offset = next_field_offset_;
#if defined(DART_PRECOMPILED_RUNTIME)
const auto unboxed_fields_bitmap =
s->isolate_group()->class_table()->GetUnboxedFieldsMapAt(cid_);
#endif
for (intptr_t offset = Instance::NextFieldOffset();
offset < next_field_offset; offset += kCompressedWordSize) {
#if defined(DART_PRECOMPILED_RUNTIME)
if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
// Writes 32 bits of the unboxed value at a time
const uword value = *reinterpret_cast<compressed_uword*>(
@ -870,7 +863,6 @@ class InstanceMessageSerializationCluster : public MessageSerializationCluster {
s->WriteWordWith32BitWrites(value);
continue;
}
#endif
s->WriteRef(reinterpret_cast<CompressedObjectPtr*>(
reinterpret_cast<uword>(instance->untag()) + offset)
->Decompress(instance->untag()->heap_base()));
@ -905,10 +897,9 @@ class InstanceMessageDeserializationCluster
void ReadEdges(MessageDeserializer* d) {
const intptr_t next_field_offset = cls_.host_next_field_offset();
#if defined(DART_PRECOMPILED_RUNTIME)
const auto unboxed_fields_bitmap =
d->isolate_group()->class_table()->GetUnboxedFieldsMapAt(cls_.id());
#else
#if !defined(DART_PRECOMPILED_RUNTIME)
const intptr_t type_argument_field_offset =
cls_.host_type_arguments_field_offset();
const bool use_field_guards = d->isolate_group()->use_field_guards();
@ -922,7 +913,6 @@ class InstanceMessageDeserializationCluster
instance ^= d->Ref(id);
for (intptr_t offset = Instance::NextFieldOffset();
offset < next_field_offset; offset += kCompressedWordSize) {
#if defined(DART_PRECOMPILED_RUNTIME)
if (unboxed_fields_bitmap.Get(offset / kCompressedWordSize)) {
compressed_uword* p = reinterpret_cast<compressed_uword*>(
reinterpret_cast<uword>(instance.untag()) + offset);
@ -930,7 +920,6 @@ class InstanceMessageDeserializationCluster
*p = d->ReadWordWith32BitReads();
continue;
}
#endif
value = d->ReadRef();
instance.SetFieldAtOffset(offset, value);
#if !defined(DART_PRECOMPILED_RUNTIME)

View file

@ -3560,6 +3560,19 @@ TypeParameterPtr Class::TypeParameterAt(intptr_t index,
return type_param.ptr();
}
intptr_t Class::UnboxedFieldSizeInBytesByCid(intptr_t cid) {
switch (cid) {
case kDoubleCid:
return sizeof(UntaggedDouble::value_);
case kFloat32x4Cid:
return sizeof(UntaggedFloat32x4::value_);
case kFloat64x2Cid:
return sizeof(UntaggedFloat64x2::value_);
default:
return sizeof(UntaggedMint::value_);
}
}
UnboxedFieldBitmap Class::CalculateFieldOffsets() const {
Array& flds = Array::Handle(fields());
const Class& super = Class::Handle(SuperClass());
@ -3587,11 +3600,8 @@ UnboxedFieldBitmap Class::CalculateFieldOffsets() const {
ASSERT(num_native_fields() == 0);
set_num_native_fields(super.num_native_fields());
if (FLAG_precompiled_mode) {
host_bitmap =
IsolateGroup::Current()->class_table()->GetUnboxedFieldsMapAt(
super.id());
}
host_bitmap = IsolateGroup::Current()->class_table()->GetUnboxedFieldsMapAt(
super.id());
}
// If the super class is parameterized, use the same type_arguments field,
// otherwise, if this class is the first in the super chain to be
@ -3623,27 +3633,9 @@ UnboxedFieldBitmap Class::CalculateFieldOffsets() const {
ASSERT(field.TargetOffset() == 0);
field.SetOffset(host_offset, target_offset);
if (FLAG_precompiled_mode && field.is_unboxing_candidate()) {
intptr_t field_size;
switch (field.guarded_cid()) {
case kDoubleCid:
field_size = sizeof(UntaggedDouble::value_);
break;
case kFloat32x4Cid:
field_size = sizeof(UntaggedFloat32x4::value_);
break;
case kFloat64x2Cid:
field_size = sizeof(UntaggedFloat64x2::value_);
break;
default:
if (field.is_non_nullable_integer()) {
field_size = sizeof(UntaggedMint::value_);
} else {
UNREACHABLE();
field_size = 0;
}
break;
}
if (field.is_unboxed()) {
const intptr_t field_size =
UnboxedFieldSizeInBytesByCid(field.guarded_cid());
const intptr_t host_num_words = field_size / kCompressedWordSize;
const intptr_t host_next_offset = host_offset + field_size;
@ -3669,7 +3661,7 @@ UnboxedFieldBitmap Class::CalculateFieldOffsets() const {
target_offset = target_next_offset;
} else {
// Make the field boxed
field.set_is_unboxing_candidate(false);
field.set_is_unboxed(false);
host_offset += kCompressedWordSize;
target_offset += compiler::target::kCompressedWordSize;
}
@ -3682,7 +3674,6 @@ UnboxedFieldBitmap Class::CalculateFieldOffsets() const {
set_instance_size(RoundedAllocationSize(host_offset),
compiler::target::RoundedAllocationSize(target_offset));
set_next_field_offset(host_offset, target_offset);
return host_bitmap;
}
@ -4155,8 +4146,6 @@ void Class::Finalize() const {
// Unless class is top-level, which don't get instantiated,
// sets the new size in the class table.
isolate_group->class_table()->UpdateClassSize(id(), ptr());
}
if (FLAG_precompiled_mode && !ClassTable::IsTopLevelCid(id())) {
isolate_group->class_table()->SetUnboxedFieldsMapAt(id(), host_bitmap);
}
}
@ -10671,43 +10660,6 @@ FieldPtr Field::Original() const {
}
}
const Object* Field::CloneForUnboxed(const Object& value) const {
if (is_unboxing_candidate() && !is_nullable()) {
switch (guarded_cid()) {
case kDoubleCid:
case kFloat32x4Cid:
case kFloat64x2Cid:
return &Object::Handle(Object::Clone(value, Heap::kNew));
default:
// Not a supported unboxed field type.
return &value;
}
}
return &value;
}
void Field::DisableFieldUnboxing() const {
ASSERT(!IsOriginal());
const Field& original = Field::Handle(Original());
if (!original.is_unboxing_candidate()) {
return;
}
auto thread = Thread::Current();
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
if (!original.is_unboxing_candidate()) {
return;
}
// Ensures that to-be-disabled existing code won't continue running as we
// update field properties as it might write into now boxed field thinking
// it still holds unboxed(reusable box) value.
thread->isolate_group()->RunWithStoppedMutators([&]() {
original.set_is_unboxing_candidate(false);
set_is_unboxing_candidate(false);
original.DeoptimizeDependentCode();
});
}
intptr_t Field::guarded_cid() const {
#if defined(DEBUG)
// This assertion ensures that the cid seen by the background compiler is
@ -10935,19 +10887,14 @@ void Field::InitializeNew(const Field& result,
result.set_is_const(is_const);
result.set_is_reflectable(is_reflectable);
result.set_is_late(is_late);
result.set_is_double_initialized_unsafe(false);
result.set_owner(owner);
result.set_token_pos(token_pos);
result.set_end_token_pos(end_token_pos);
result.set_has_nontrivial_initializer_unsafe(false);
result.set_has_initializer_unsafe(false);
if (FLAG_precompiled_mode) {
// May be updated by KernelLoader::ReadInferredType
result.set_is_unboxing_candidate_unsafe(false);
} else {
result.set_is_unboxing_candidate_unsafe(!is_final && !is_late &&
!is_static);
}
// We will make unboxing decision once we read static type or
// in KernelLoader::ReadInferredType.
result.set_is_unboxed_unsafe(false);
result.set_initializer_changed_after_initialization(false);
NOT_IN_PRECOMPILED(result.set_kernel_offset(0));
result.set_has_pragma(false);
@ -10993,6 +10940,9 @@ FieldPtr Field::New(const String& name,
InitializeNew(result, name, is_static, is_final, is_const, is_reflectable,
is_late, owner, token_pos, end_token_pos);
result.SetFieldTypeSafe(type);
#if !defined(DART_PRECOMPILED_RUNTIME)
compiler::target::UnboxFieldIfSupported(result, type);
#endif
return result.ptr();
}
@ -11249,7 +11199,7 @@ bool Field::IsConsistentWith(const Field& other) const {
(untag()->is_nullable_ == other.untag()->is_nullable_) &&
(untag()->guarded_list_length() ==
other.untag()->guarded_list_length()) &&
(is_unboxing_candidate() == other.is_unboxing_candidate()) &&
(is_unboxed() == other.is_unboxed()) &&
(static_type_exactness_state().Encode() ==
other.static_type_exactness_state().Encode());
}
@ -12019,10 +11969,10 @@ void Field::RecordStore(const Object& value) const {
}
void Field::ForceDynamicGuardedCidAndLength() const {
// Assume nothing about this field.
set_is_unboxing_candidate(false);
set_guarded_cid(kDynamicCid);
set_is_nullable(true);
if (!is_unboxed()) {
set_guarded_cid(kDynamicCid);
set_is_nullable(true);
}
set_guarded_list_length(Field::kNoFixedLength);
set_guarded_list_length_in_object_offset(Field::kUnknownLengthOffset);
if (static_type_exactness_state().IsTracking()) {
@ -19562,7 +19512,7 @@ bool Instance::CheckIsCanonical(Thread* thread) const {
#endif // DEBUG
ObjectPtr Instance::GetField(const Field& field) const {
if (FLAG_precompiled_mode && field.is_unboxing_candidate()) {
if (field.is_unboxed()) {
switch (field.guarded_cid()) {
case kDoubleCid:
return Double::New(*reinterpret_cast<double_t*>(FieldAddr(field)));
@ -19573,12 +19523,7 @@ ObjectPtr Instance::GetField(const Field& field) const {
return Float64x2::New(
*reinterpret_cast<simd128_value_t*>(FieldAddr(field)));
default:
if (field.is_non_nullable_integer()) {
return Integer::New(*reinterpret_cast<int64_t*>(FieldAddr(field)));
} else {
UNREACHABLE();
return nullptr;
}
return Integer::New(*reinterpret_cast<int64_t*>(FieldAddr(field)));
}
} else {
return FieldAddr(field)->Decompress(untag()->heap_base());
@ -19586,7 +19531,7 @@ ObjectPtr Instance::GetField(const Field& field) const {
}
void Instance::SetField(const Field& field, const Object& value) const {
if (FLAG_precompiled_mode && field.is_unboxing_candidate()) {
if (field.is_unboxed()) {
switch (field.guarded_cid()) {
case kDoubleCid:
StoreNonPointer(reinterpret_cast<double_t*>(FieldAddr(field)),
@ -19601,18 +19546,13 @@ void Instance::SetField(const Field& field, const Object& value) const {
Float64x2::Cast(value).value());
break;
default:
if (field.is_non_nullable_integer()) {
StoreNonPointer(reinterpret_cast<int64_t*>(FieldAddr(field)),
Integer::Cast(value).AsInt64Value());
} else {
UNREACHABLE();
}
StoreNonPointer(reinterpret_cast<int64_t*>(FieldAddr(field)),
Integer::Cast(value).AsInt64Value());
break;
}
} else {
field.RecordStore(value);
const Object* stored_value = field.CloneForUnboxed(value);
StoreCompressedPointer(FieldAddr(field), stored_value->ptr());
StoreCompressedPointer(FieldAddr(field), value.ptr());
}
}

View file

@ -1831,6 +1831,10 @@ class Class : public Object {
#endif // defined(DART_PRECOMPILER)
}
static intptr_t UnboxedFieldSizeInBytesByCid(intptr_t cid);
void MarkFieldBoxedDuringReload(ClassTable* class_table,
const Field& field) const;
private:
TypePtr declaration_type() const {
return untag()->declaration_type<std::memory_order_acquire>();
@ -1845,7 +1849,8 @@ class Class : public Object {
ProgramReloadContext* context) const;
// Tells whether instances need morphing for reload.
bool RequiresInstanceMorphing(const Class& replacement) const;
bool RequiresInstanceMorphing(ClassTable* class_table,
const Class& replacement) const;
template <class FakeInstance, class TargetFakeInstance>
static ClassPtr NewCommon(intptr_t index);
@ -4054,9 +4059,6 @@ class Field : public Object {
return !untag()->owner()->IsField();
}
// Mark previously unboxed field boxed. Only operates on clones, updates
// original as well as this clone.
void DisableFieldUnboxing() const;
// Returns a field cloned from 'this'. 'this' is set as the
// original field of result.
FieldPtr CloneFromOriginal() const;
@ -4088,22 +4090,6 @@ class Field : public Object {
// TODO(36097): Once concurrent access is possible ensure updates are safe.
set_kind_bits(ReflectableBit::update(value, untag()->kind_bits_));
}
bool is_double_initialized() const {
return DoubleInitializedBit::decode(kind_bits());
}
// Called in parser after allocating field, immutable property otherwise.
// Marks fields that are initialized with a simple double constant.
void set_is_double_initialized_unsafe(bool value) const {
ASSERT(IsOriginal());
// TODO(36097): Once concurrent access is possible ensure updates are safe.
set_kind_bits(DoubleInitializedBit::update(value, untag()->kind_bits_));
}
void set_is_double_initialized(bool value) const {
DEBUG_ASSERT(
IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter());
set_is_double_initialized_unsafe(value);
}
bool initializer_changed_after_initialization() const {
return InitializerChangedAfterInitializatonBit::decode(kind_bits());
@ -4268,17 +4254,6 @@ class Field : public Object {
return has_initializer() && !has_nontrivial_initializer();
}
bool is_non_nullable_integer() const {
return IsNonNullableIntBit::decode(kind_bits());
}
void set_is_non_nullable_integer(bool is_non_nullable_integer) const {
ASSERT(Thread::Current()->IsMutatorThread());
// TODO(36097): Once concurrent access is possible ensure updates are safe.
set_kind_bits(IsNonNullableIntBit::update(is_non_nullable_integer,
untag()->kind_bits_));
}
StaticTypeExactnessState static_type_exactness_state() const {
return StaticTypeExactnessState::Decode(
LoadNonPointer<int8_t, std::memory_order_relaxed>(
@ -4358,22 +4333,20 @@ class Field : public Object {
const char* GuardedPropertiesAsCString() const;
intptr_t UnboxedFieldCid() const { return guarded_cid(); }
bool is_unboxing_candidate() const {
return UnboxingCandidateBit::decode(kind_bits());
bool is_unboxed() const {
return UnboxedBit::decode(kind_bits());
}
// Default 'true', set to false once optimizing compiler determines it should
// be boxed.
void set_is_unboxing_candidate_unsafe(bool b) const {
set_kind_bits(UnboxingCandidateBit::update(b, untag()->kind_bits_));
// Field unboxing decisions are based either on static types (JIT) or
// inferred types (AOT). See the callers of this function.
void set_is_unboxed_unsafe(bool b) const {
set_kind_bits(UnboxedBit::update(b, untag()->kind_bits_));
}
void set_is_unboxing_candidate(bool b) const {
void set_is_unboxed(bool b) const {
DEBUG_ASSERT(
IsolateGroup::Current()->program_lock()->IsCurrentThreadWriter());
set_is_unboxing_candidate_unsafe(b);
set_is_unboxed_unsafe(b);
}
enum {
@ -4488,10 +4461,6 @@ class Field : public Object {
void set_type_test_cache(const SubtypeTestCache& cache) const;
#endif
// Unboxed fields require exclusive ownership of the box.
// Ensure this by cloning the box if necessary.
const Object* CloneForUnboxed(const Object& value) const;
private:
static void InitializeNew(const Field& result,
const String& name,
@ -4510,9 +4479,8 @@ class Field : public Object {
kStaticBit,
kFinalBit,
kHasNontrivialInitializerBit,
kUnboxingCandidateBit,
kUnboxedBit,
kReflectableBit,
kDoubleInitializedBit,
kInitializerChangedAfterInitializatonBit,
kHasPragmaBit,
kCovariantBit,
@ -4521,18 +4489,14 @@ class Field : public Object {
kIsExtensionMemberBit,
kNeedsLoadGuardBit,
kHasInitializerBit,
kIsNonNullableIntBit,
};
class ConstBit : public BitField<uint16_t, bool, kConstBit, 1> {};
class StaticBit : public BitField<uint16_t, bool, kStaticBit, 1> {};
class FinalBit : public BitField<uint16_t, bool, kFinalBit, 1> {};
class HasNontrivialInitializerBit
: public BitField<uint16_t, bool, kHasNontrivialInitializerBit, 1> {};
class UnboxingCandidateBit
: public BitField<uint16_t, bool, kUnboxingCandidateBit, 1> {};
class UnboxedBit : public BitField<uint16_t, bool, kUnboxedBit, 1> {};
class ReflectableBit : public BitField<uint16_t, bool, kReflectableBit, 1> {};
class DoubleInitializedBit
: public BitField<uint16_t, bool, kDoubleInitializedBit, 1> {};
class InitializerChangedAfterInitializatonBit
: public BitField<uint16_t,
bool,
@ -4549,8 +4513,6 @@ class Field : public Object {
: public BitField<uint16_t, bool, kNeedsLoadGuardBit, 1> {};
class HasInitializerBit
: public BitField<uint16_t, bool, kHasInitializerBit, 1> {};
class IsNonNullableIntBit
: public BitField<uint16_t, bool, kIsNonNullableIntBit, 1> {};
// Force this field's guard to be dynamic and deoptimize dependent code.
void ForceDynamicGuardedCidAndLength() const;
@ -7770,6 +7732,19 @@ class Instance : public Object {
StoreCompressedPointer(RawFieldAddrAtOffset(offset), value.ptr());
}
template <typename T>
T* RawUnboxedFieldAddrAtOffset(intptr_t offset) const {
return reinterpret_cast<T*>(raw_value() - kHeapObjectTag + offset);
}
template <typename T>
T RawGetUnboxedFieldAtOffset(intptr_t offset) const {
return *RawUnboxedFieldAddrAtOffset<T>(offset);
}
template <typename T>
void RawSetUnboxedFieldAtOffset(intptr_t offset, const T& value) const {
*RawUnboxedFieldAddrAtOffset<T>(offset) = value;
}
static InstancePtr NewFromCidAndSize(ClassTable* class_table,
classid_t cid,
Heap::Space heap = Heap::kNew);

View file

@ -100,12 +100,10 @@ class ObjectSlots {
// If the field is unboxed, we don't know the size of it (may be
// multiple words) - but that doesn't matter because
// a) we will process instances using the slots we collect
// (instead of regular GC visitor
// (instead of regular GC visitor);
// b) we will not write the value of the field and instead treat
// it like a dummy reference to 0 (like we do with Smis).
const bool kIsUnboxedField =
FLAG_precompiled_mode && field.is_unboxing_candidate();
slots->Add(ObjectSlot(field.HostOffset(), !kIsUnboxedField,
slots->Add(ObjectSlot(field.HostOffset(), !field.is_unboxed(),
name.ToCString()));
}
}

View file

@ -1304,7 +1304,7 @@ class ObjectCopy : public Base {
typename Types::Object to,
intptr_t cid) {
if (IsImplicitFieldClassId(cid)) {
CopyUserdefinedInstance(from, to);
CopyUserdefinedInstanceWithoutUnboxedFields(from, to);
return;
}
switch (cid) {
@ -1367,21 +1367,18 @@ class ObjectCopy : public Base {
FATAL1("Unexpected object: %s\n", obj.ToCString());
}
#if defined(DART_PRECOMPILED_RUNTIME)
void CopyUserdefinedInstanceAOT(typename Types::Object from,
typename Types::Object to,
UnboxedFieldBitmap bitmap) {
void CopyUserdefinedInstance(typename Types::Object from,
typename Types::Object to,
UnboxedFieldBitmap bitmap) {
const intptr_t instance_size = UntagObject(from)->HeapSize();
Base::ForwardCompressedPointers(from, to, kWordSize, instance_size, bitmap);
}
#endif
void CopyUserdefinedInstance(typename Types::Object from,
typename Types::Object to) {
void CopyUserdefinedInstanceWithoutUnboxedFields(typename Types::Object from,
typename Types::Object to) {
const intptr_t instance_size = UntagObject(from)->HeapSize();
Base::ForwardCompressedPointers(from, to, kWordSize, instance_size);
}
void CopyClosure(typename Types::Closure from, typename Types::Closure to) {
Base::StoreCompressedPointers(
from, to, OFFSET_OF(UntaggedClosure, instantiator_type_arguments_),
@ -1870,13 +1867,9 @@ class FastObjectCopy : public ObjectCopy<FastObjectCopyBase> {
CopyPredefinedInstance(from, to, cid);
return;
}
#if defined(DART_PRECOMPILED_RUNTIME)
const auto bitmap = class_table_->GetUnboxedFieldsMapAt(cid);
CopyUserdefinedInstanceAOT(Instance::RawCast(from), Instance::RawCast(to),
bitmap);
#else
CopyUserdefinedInstance(Instance::RawCast(from), Instance::RawCast(to));
#endif
CopyUserdefinedInstance(Instance::RawCast(from), Instance::RawCast(to),
bitmap);
if (cid == expando_cid_) {
EnqueueExpandoToRehash(to);
}
@ -2004,12 +1997,8 @@ class SlowObjectCopy : public ObjectCopy<SlowObjectCopyBase> {
CopyPredefinedInstance(from, to, cid);
return;
}
#if defined(DART_PRECOMPILED_RUNTIME)
const auto bitmap = class_table_->GetUnboxedFieldsMapAt(cid);
CopyUserdefinedInstanceAOT(from, to, bitmap);
#else
CopyUserdefinedInstance(from, to);
#endif
CopyUserdefinedInstance(from, to, bitmap);
if (cid == expando_cid_) {
EnqueueExpandoToRehash(to);
}

View file

@ -220,12 +220,6 @@ void Class::CopyStaticFieldValues(ProgramReloadContext* reload_context,
field.set_field_id_unsafe(old_field.field_id());
}
reload_context->AddStaticFieldMapping(old_field, field);
} else {
if (old_field.needs_load_guard()) {
ASSERT(!old_field.is_unboxing_candidate());
field.set_needs_load_guard(true);
field.set_is_unboxing_candidate_unsafe(false);
}
}
}
}
@ -745,7 +739,29 @@ void Class::CheckReload(const Class& replacement,
id(), replacement.id());
}
bool Class::RequiresInstanceMorphing(const Class& replacement) const {
void Class::MarkFieldBoxedDuringReload(ClassTable* class_table,
const Field& field) const {
if (!field.is_unboxed()) {
return;
}
field.set_is_unboxed_unsafe(false);
// Make sure to update the bitmap used for scanning.
auto unboxed_fields_map = class_table->GetUnboxedFieldsMapAt(id());
const auto start_index = field.HostOffset() >> kCompressedWordSizeLog2;
const auto end_index =
start_index + (Class::UnboxedFieldSizeInBytesByCid(field.guarded_cid()) >>
kCompressedWordSizeLog2);
ASSERT(unboxed_fields_map.Get(start_index));
for (intptr_t i = start_index; i < end_index; i++) {
unboxed_fields_map.Clear(i);
}
class_table->SetUnboxedFieldsMapAt(id(), unboxed_fields_map);
}
bool Class::RequiresInstanceMorphing(ClassTable* class_table,
const Class& replacement) const {
// Get the field maps for both classes. These field maps walk the class
// hierarchy.
auto isolate_group = IsolateGroup::Current();
@ -783,6 +799,22 @@ bool Class::RequiresInstanceMorphing(const Class& replacement) const {
field_name = field.name();
replacement_field_name = replacement_field.name();
if (!field_name.Equals(replacement_field_name)) return true;
if (field.is_unboxed() && !replacement_field.is_unboxed()) {
return true;
}
if (field.is_unboxed() && (field.type() != replacement_field.type())) {
return true;
}
if (!field.is_unboxed() && replacement_field.is_unboxed()) {
// No actual morphing is required in this case but we need to mark
// the field unboxed.
replacement.MarkFieldBoxedDuringReload(class_table, replacement_field);
}
if (field.needs_load_guard()) {
ASSERT(!field.is_unboxed());
ASSERT(!replacement_field.is_unboxed());
replacement_field.set_needs_load_guard(true);
}
}
return false;
}
@ -799,11 +831,11 @@ bool Class::CanReloadFinalized(const Class& replacement,
TypeParametersChanged(context->zone(), *this, replacement));
return false;
}
if (RequiresInstanceMorphing(replacement)) {
if (RequiresInstanceMorphing(class_table, replacement)) {
ASSERT(id() == replacement.id());
const classid_t cid = id();
// We unconditionally create an instance morpher. As a side effect of
// building the morpher, we will mark all new fields as late.
// building the morpher, we will mark all new fields as guarded on load.
auto instance_morpher = InstanceMorpher::CreateFromClassDescriptors(
context->zone(), class_table, *this, replacement);
group_context->EnsureHasInstanceMorpherFor(cid, instance_morpher);

View file

@ -395,7 +395,6 @@ void UntaggedObject::VisitPointersPrecise(IsolateGroup* isolate_group,
const auto first = reinterpret_cast<CompressedObjectPtr*>(from);
const auto last = reinterpret_cast<CompressedObjectPtr*>(to);
#if defined(SUPPORT_UNBOXED_INSTANCE_FIELDS)
const auto unboxed_fields_bitmap =
visitor->class_table()->GetUnboxedFieldsMapAt(class_id);
@ -409,9 +408,6 @@ void UntaggedObject::VisitPointersPrecise(IsolateGroup* isolate_group,
} else {
visitor->VisitCompressedPointers(heap_base(), first, last);
}
#else
visitor->VisitCompressedPointers(heap_base(), first, last);
#endif // defined(SUPPORT_UNBOXED_INSTANCE_FIELDS)
}
bool UntaggedObject::FindObject(FindObjectVisitor* visitor) {

View file

@ -416,7 +416,6 @@ class UntaggedObject {
const auto first = reinterpret_cast<CompressedObjectPtr*>(from);
const auto last = reinterpret_cast<CompressedObjectPtr*>(to);
#if defined(SUPPORT_UNBOXED_INSTANCE_FIELDS)
const auto unboxed_fields_bitmap =
visitor->class_table()->GetUnboxedFieldsMapAt(class_id);
@ -430,10 +429,6 @@ class UntaggedObject {
} else {
visitor->VisitCompressedPointers(heap_base(), first, last);
}
#else
// Call visitor function virtually
visitor->VisitCompressedPointers(heap_base(), first, last);
#endif // defined(SUPPORT_UNBOXED_INSTANCE_FIELDS)
return instance_size;
}
@ -454,7 +449,6 @@ class UntaggedObject {
const auto first = reinterpret_cast<CompressedObjectPtr*>(from);
const auto last = reinterpret_cast<CompressedObjectPtr*>(to);
#if defined(SUPPORT_UNBOXED_INSTANCE_FIELDS)
const auto unboxed_fields_bitmap =
visitor->class_table()->GetUnboxedFieldsMapAt(class_id);
@ -468,10 +462,6 @@ class UntaggedObject {
} else {
visitor->V::VisitCompressedPointers(heap_base(), first, last);
}
#else
// Call visitor function non-virtually
visitor->V::VisitCompressedPointers(heap_base(), first, last);
#endif // defined(SUPPORT_UNBOXED_INSTANCE_FIELDS)
return instance_size;
}

View file

@ -385,6 +385,16 @@ DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(BoxDouble, 0) {
arguments.SetReturn(Object::Handle(zone, Double::New(val)));
}
DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(BoxFloat32x4, 0) {
const auto val = thread->unboxed_simd128_runtime_arg();
arguments.SetReturn(Object::Handle(zone, Float32x4::New(val)));
}
DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(BoxFloat64x2, 0) {
const auto val = thread->unboxed_simd128_runtime_arg();
arguments.SetReturn(Object::Handle(zone, Float64x2::New(val)));
}
DEFINE_RUNTIME_ENTRY_NO_LAZY_DEOPT(AllocateMint, 0) {
if (FLAG_shared_slow_path_triggers_gc) {
isolate->group()->heap()->CollectAllGarbage(GCReason::kDebugging);

View file

@ -21,6 +21,8 @@ namespace dart {
V(AllocateRecord) \
V(AllocateSuspendState) \
V(BoxDouble) \
V(BoxFloat32x4) \
V(BoxFloat64x2) \
V(BreakpointRuntimeHandler) \
V(SingleStepHandler) \
V(CloneContext) \

View file

@ -59,6 +59,8 @@ namespace dart {
V(AllocateRecord) \
V(AllocateUnhandledException) \
V(BoxDouble) \
V(BoxFloat32x4) \
V(BoxFloat64x2) \
V(CloneContext) \
V(CallToRuntime) \
V(LazyCompile) \

View file

@ -77,8 +77,6 @@ Thread::Thread(bool is_vm_isolate)
store_buffer_block_(nullptr),
marking_stack_block_(nullptr),
vm_tag_(0),
unboxed_int64_runtime_arg_(0),
unboxed_double_runtime_arg_(0.0),
active_exception_(Object::null()),
active_stacktrace_(Object::null()),
global_object_pool_(ObjectPool::null()),
@ -149,6 +147,8 @@ Thread::Thread(bool is_vm_isolate)
#else
next_task_id_ = Random::GlobalNextUInt64();
#endif
memset(&unboxed_runtime_arg_, 0, sizeof(simd128_value_t));
}
static const double double_nan_constant = NAN;

View file

@ -776,22 +776,25 @@ class Thread : public ThreadState {
static intptr_t vm_tag_offset() { return OFFSET_OF(Thread, vm_tag_); }
int64_t unboxed_int64_runtime_arg() const {
return unboxed_int64_runtime_arg_;
return unboxed_runtime_arg_.int64_storage[0];
}
void set_unboxed_int64_runtime_arg(int64_t value) {
unboxed_int64_runtime_arg_ = value;
}
static intptr_t unboxed_int64_runtime_arg_offset() {
return OFFSET_OF(Thread, unboxed_int64_runtime_arg_);
unboxed_runtime_arg_.int64_storage[0] = value;
}
double unboxed_double_runtime_arg() const {
return unboxed_double_runtime_arg_;
return unboxed_runtime_arg_.double_storage[0];
}
void set_unboxed_double_runtime_arg(double value) {
unboxed_double_runtime_arg_ = value;
unboxed_runtime_arg_.double_storage[0] = value;
}
static intptr_t unboxed_double_runtime_arg_offset() {
return OFFSET_OF(Thread, unboxed_double_runtime_arg_);
simd128_value_t unboxed_simd128_runtime_arg() const {
return unboxed_runtime_arg_;
}
void set_unboxed_simd128_runtime_arg(simd128_value_t value) {
unboxed_runtime_arg_ = value;
}
static intptr_t unboxed_runtime_arg_offset() {
return OFFSET_OF(Thread, unboxed_runtime_arg_);
}
static intptr_t global_object_pool_offset() {
@ -1176,8 +1179,7 @@ class Thread : public ThreadState {
// values from generated code to runtime.
// TODO(dartbug.com/33549): Clean this up when unboxed values
// could be passed as arguments.
ALIGN8 int64_t unboxed_int64_runtime_arg_;
ALIGN8 double unboxed_double_runtime_arg_;
ALIGN8 simd128_value_t unboxed_runtime_arg_;
// State that is cached in the TLS for fast access in generated code.
#define DECLARE_MEMBERS(type_name, member_name, expr, default_init_value) \