[vm/compiler] Limit exposure of untagged pointers to managed memory.

After https://dart-review.googlesource.com/c/sdk/+/330600, there were
more chances for the optimizing compiler to introduce or move
GC-triggering instructions like allocations or boxings between the
retrieval of an untagged pointer to GC-moveable memory and its use.

To limit the chance of this happening, this CL removes the explicit
loading of the untagged payload address when building the initial
flow graph in most cases when the array is not known to be an external
array (an external string, an external typed data object, or an FFI
Pointer).

The remaining case is during view allocation, which extracts the
payload address of the base typed data object underlying the view
(which may be GC-movable) to calculate the payload address that should
be stored in the data field of the view object. See
https://github.com/dart-lang/sdk/issues/54884.

During canonicalization of LoadIndexed, StoreIndexed, and MemoryCopy
instructions, if the cid of an array input is an external array
(external string, external typed data object, or Pointer), then a
LoadField instruction that extracts the untagged payload address
is inserted before the instruction and the corresponding input is
rebound to that LoadField instruction.

Once all compiler passes that involve code motion have been performed,
a new pass looks for LoadIndexed, StoreIndexed, or MemoryCopy where
the cid stored in the instruction for the array is a typed data cid.
In these cases, if the array is not an internal typed data object,
then the payload address is extracted. Waiting until this point ensures
that no GC-triggering instructions are inserted between the extraction
of the payload address and the use. (Internal typed data objects are
left as-is because the payload address is inside the object itself
and doesn't require indirection through the data field of the object).

This CL also replaces code conditional on the array cid with code
that is instead conditional on the array element representation in
cases where it makes sense to do so, since this is a less brittle
check than checking the array cid (e.g., checking for kUnboxedInt8
to load, store, or copy an signed byte from an array instead of
listing all possible array cids that store signed bytes).

This CL also fixes an issue with the ARM64 assembler where calling
LoadFromOffset with an Address that has a non-Offset type would
silently generate bad code instead of triggering the ASSERT in
PrepareLargeOffset.

TEST=vm/dart/typed_list_index_checkbound_il_test

Issue: https://github.com/dart-lang/sdk/issues/54710
Cq-Include-Trybots: luci.dart.try:vm-aot-android-release-arm64c-try,vm-aot-android-release-arm_x64-try,vm-aot-linux-debug-x64-try,vm-aot-linux-debug-x64c-try,vm-aot-mac-release-arm64-try,vm-aot-mac-release-x64-try,vm-aot-obfuscate-linux-release-x64-try,vm-aot-optimization-level-linux-release-x64-try,vm-aot-win-debug-arm64-try,vm-appjit-linux-debug-x64-try,vm-asan-linux-release-x64-try,vm-checked-mac-release-arm64-try,vm-eager-optimization-linux-release-ia32-try,vm-eager-optimization-linux-release-x64-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64c-try,vm-ffi-qemu-linux-release-arm-try,vm-ffi-qemu-linux-release-riscv64-try,vm-fuchsia-release-x64-try,vm-linux-debug-ia32-try,vm-linux-debug-x64-try,vm-linux-debug-x64c-try,vm-mac-debug-arm64-try,vm-mac-debug-x64-try,vm-msan-linux-release-x64-try,vm-reload-linux-debug-x64-try,vm-reload-rollback-linux-debug-x64-try,vm-ubsan-linux-release-x64-try,vm-win-debug-arm64-try,vm-win-debug-x64-try,vm-win-release-ia32-try
Change-Id: I25b5f314943e9254d3d28986d720a5d47f12feeb
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/352363
Reviewed-by: Daco Harkes <dacoharkes@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Tess Strickland 2024-03-22 10:12:39 +00:00 committed by Commit Queue
parent 495d08eca8
commit 0d3ade255b
33 changed files with 1422 additions and 2024 deletions

View file

@ -34,26 +34,13 @@ DEFINE_NATIVE_ENTRY(TypedDataView_typedData, 0, 1) {
return TypedDataView::Cast(instance).typed_data();
}
static bool IsClamped(intptr_t cid) {
COMPILE_ASSERT((kTypedDataUint8ClampedArrayCid + 1 ==
kTypedDataUint8ClampedArrayViewCid) &&
(kTypedDataUint8ClampedArrayCid + 2 ==
kExternalTypedDataUint8ClampedArrayCid) &&
(kTypedDataUint8ClampedArrayCid + 3 ==
kUnmodifiableTypedDataUint8ClampedArrayViewCid));
return cid >= kTypedDataUint8ClampedArrayCid &&
cid <= kUnmodifiableTypedDataUint8ClampedArrayViewCid;
}
static bool IsUint8(intptr_t cid) {
COMPILE_ASSERT(
(kTypedDataUint8ArrayCid + 1 == kTypedDataUint8ArrayViewCid) &&
(kTypedDataUint8ArrayCid + 2 == kExternalTypedDataUint8ArrayCid) &&
(kTypedDataUint8ArrayCid + 3 ==
kUnmodifiableTypedDataUint8ArrayViewCid) &&
(kTypedDataUint8ArrayCid + 4 == kTypedDataUint8ClampedArrayCid));
return cid >= kTypedDataUint8ArrayCid &&
cid <= kUnmodifiableTypedDataUint8ClampedArrayViewCid;
static bool IsTypedDataUint8ArrayClassId(intptr_t cid) {
if (!IsTypedDataBaseClassId(cid)) return false;
const intptr_t internal_cid =
cid - ((cid - kFirstTypedDataCid) % kNumTypedDataCidRemainders) +
kTypedDataCidRemainderInternal;
return internal_cid == kTypedDataUint8ArrayCid ||
internal_cid == kTypedDataUint8ClampedArrayCid;
}
DEFINE_NATIVE_ENTRY(TypedDataBase_setClampedRange, 0, 5) {
@ -96,9 +83,13 @@ DEFINE_NATIVE_ENTRY(TypedDataBase_setClampedRange, 0, 5) {
ASSERT(length_in_bytes <= src_length_in_bytes - src_start_in_bytes);
#endif
ASSERT(IsClampedTypedDataBaseClassId(dst.ptr()->GetClassId()));
// The algorithm below assumes the clamped destination has uint8 elements.
ASSERT_EQUAL(element_size_in_bytes, 1);
ASSERT(IsClamped(dst.ptr()->GetClassId()));
ASSERT(!IsUint8(src.ptr()->GetClassId()));
ASSERT(IsTypedDataUint8ArrayClassId(dst.ptr()->GetClassId()));
// The native entry should only be called when clamping is needed. When the
// source has uint8 elements, a direct memory move should be used instead.
ASSERT(!IsTypedDataUint8ArrayClassId(src.ptr()->GetClassId()));
NoSafepointScope no_safepoint;
uint8_t* dst_data =

View file

@ -63,7 +63,7 @@ class Utils {
}
template <typename T>
static inline int ShiftForPowerOfTwo(T x) {
static constexpr int ShiftForPowerOfTwo(T x) {
ASSERT(IsPowerOfTwo(x));
int num_shifts = 0;
while (x > 1) {

View file

@ -42,9 +42,11 @@ void matchIL$retrieveFromView(FlowGraph graph) {
match.LoadField('src', slot: 'TypedDataView.offset_in_bytes'),
'offset' << match.UnboxInt64('boxed_offset'),
'index' << match.BinaryInt64Op('offset', 'n', op_kind: '+'),
'data' << match.LoadField('typed_data', slot: 'PointerBase.data'),
if (is32BitConfiguration) ...[
'boxed_index' << match.BoxInt64('index'),
],
'data' << match.LoadField('typed_data', slot: 'PointerBase.data'),
if (is32BitConfiguration) ...[
'retval32' << match.LoadIndexed('data', 'boxed_index'),
'retval' << match.IntConverter('retval32', from: 'int32', to: 'int64'),
] else ...[

View file

@ -458,6 +458,21 @@ inline bool IsUnmodifiableTypedDataViewClassId(intptr_t index) {
kTypedDataCidRemainderUnmodifiable);
}
inline bool IsClampedTypedDataBaseClassId(intptr_t index) {
if (!IsTypedDataBaseClassId(index)) return false;
const intptr_t internal_cid =
index - ((index - kFirstTypedDataCid) % kNumTypedDataCidRemainders) +
kTypedDataCidRemainderInternal;
// Currently, the only clamped typed data arrays are Uint8.
return internal_cid == kTypedDataUint8ClampedArrayCid;
}
// Whether the given cid is an external array cid, that is, an array where
// the payload is not in GC-managed memory.
inline bool IsExternalPayloadClassId(classid_t cid) {
return cid == kPointerCid || IsExternalTypedDataClassId(cid);
}
// For predefined cids only. Refer to Class::is_deeply_immutable for
// instances of non-predefined classes.
//

View file

@ -3732,9 +3732,9 @@ bool Assembler::AddressCanHoldConstantIndex(const Object& constant,
intptr_t index_scale,
bool* needs_base) {
ASSERT(needs_base != nullptr);
if ((cid == kTypedDataInt32x4ArrayCid) ||
(cid == kTypedDataFloat32x4ArrayCid) ||
(cid == kTypedDataFloat64x2ArrayCid)) {
auto const rep = RepresentationUtils::RepresentationOfArrayElement(cid);
if ((rep == kUnboxedInt32x4) || (rep == kUnboxedFloat32x4) ||
(rep == kUnboxedFloat64x2)) {
// We are using vldmd/vstmd which do not support offset.
return false;
}

View file

@ -949,25 +949,29 @@ Address Assembler::PrepareLargeOffset(Register base,
void Assembler::LoadFromOffset(Register dest,
const Address& addr,
OperandSize sz) {
ldr(dest, PrepareLargeOffset(addr.base(), addr.offset(), sz), sz);
ldr(dest, PrepareLargeOffset(addr.base(), addr.offset(), sz, addr.type()),
sz);
}
void Assembler::LoadSFromOffset(VRegister dest, Register base, int32_t offset) {
fldrs(dest, PrepareLargeOffset(base, offset, kSWord));
auto const type = Address::AddressType::Offset;
fldrs(dest, PrepareLargeOffset(base, offset, kSWord, type));
}
void Assembler::LoadDFromOffset(VRegister dest, Register base, int32_t offset) {
fldrd(dest, PrepareLargeOffset(base, offset, kDWord));
auto const type = Address::AddressType::Offset;
fldrd(dest, PrepareLargeOffset(base, offset, kDWord, type));
}
void Assembler::LoadQFromOffset(VRegister dest, Register base, int32_t offset) {
fldrq(dest, PrepareLargeOffset(base, offset, kQWord));
auto const type = Address::AddressType::Offset;
fldrq(dest, PrepareLargeOffset(base, offset, kQWord, type));
}
void Assembler::StoreToOffset(Register src,
const Address& addr,
OperandSize sz) {
str(src, PrepareLargeOffset(addr.base(), addr.offset(), sz), sz);
str(src, PrepareLargeOffset(addr.base(), addr.offset(), sz, addr.type()), sz);
}
void Assembler::StorePairToOffset(Register low,
@ -975,21 +979,23 @@ void Assembler::StorePairToOffset(Register low,
Register base,
int32_t offset,
OperandSize sz) {
stp(low, high,
PrepareLargeOffset(base, offset, sz, Address::AddressType::PairOffset),
sz);
auto const type = Address::AddressType::PairOffset;
stp(low, high, PrepareLargeOffset(base, offset, sz, type), sz);
}
void Assembler::StoreSToOffset(VRegister src, Register base, int32_t offset) {
fstrs(src, PrepareLargeOffset(base, offset, kSWord));
auto const type = Address::AddressType::Offset;
fstrs(src, PrepareLargeOffset(base, offset, kSWord, type));
}
void Assembler::StoreDToOffset(VRegister src, Register base, int32_t offset) {
fstrd(src, PrepareLargeOffset(base, offset, kDWord));
auto const type = Address::AddressType::Offset;
fstrd(src, PrepareLargeOffset(base, offset, kDWord, type));
}
void Assembler::StoreQToOffset(VRegister src, Register base, int32_t offset) {
fstrq(src, PrepareLargeOffset(base, offset, kQWord));
auto const type = Address::AddressType::Offset;
fstrq(src, PrepareLargeOffset(base, offset, kQWord, type));
}
void Assembler::VRecps(VRegister vd, VRegister vn) {

View file

@ -1879,7 +1879,7 @@ class Assembler : public AssemblerBase {
Address PrepareLargeOffset(Register base,
int32_t offset,
OperandSize sz,
Address::AddressType addr_type = Address::Offset);
Address::AddressType addr_type);
void LoadFromOffset(Register dest,
const Address& address,
OperandSize sz = kEightBytes) override;

View file

@ -97,14 +97,28 @@ class BlockBuilder : public ValueObject {
}
Definition* AddUnboxInstr(Representation rep, Value* value, bool is_checked) {
// Unbox floats by first unboxing a double then converting it to a float.
auto const unbox_rep = rep == kUnboxedFloat
? kUnboxedDouble
: Boxing::NativeRepresentation(rep);
Definition* unboxed_value =
AddDefinition(UnboxInstr::Create(rep, value, DeoptId::kNone));
AddDefinition(UnboxInstr::Create(unbox_rep, value, DeoptId::kNone));
if (rep != unbox_rep && unboxed_value->IsUnboxInteger()) {
ASSERT(RepresentationUtils::ValueSize(rep) <
RepresentationUtils::ValueSize(unbox_rep));
// Mark unboxing of small unboxed integer representations as truncating.
unboxed_value->AsUnboxInteger()->mark_truncating();
}
if (is_checked) {
// The type of |value| has already been checked and it is safe to
// adjust reaching type. This is done manually because there is no type
// propagation when building intrinsics.
unboxed_value->AsUnbox()->value()->SetReachingType(
TypeForRepresentation(rep));
new CompileType(CompileType::FromUnboxedRepresentation(rep)));
}
if (rep == kUnboxedFloat) {
unboxed_value = AddDefinition(
new DoubleToFloatInstr(new Value(unboxed_value), DeoptId::kNone));
}
return unboxed_value;
}
@ -140,25 +154,6 @@ class BlockBuilder : public ValueObject {
Instruction* last() const { return current_; }
private:
static CompileType* TypeForRepresentation(Representation rep) {
switch (rep) {
case kUnboxedDouble:
return new CompileType(CompileType::FromCid(kDoubleCid));
case kUnboxedFloat32x4:
return new CompileType(CompileType::FromCid(kFloat32x4Cid));
case kUnboxedInt32x4:
return new CompileType(CompileType::FromCid(kInt32x4Cid));
case kUnboxedFloat64x2:
return new CompileType(CompileType::FromCid(kFloat64x2Cid));
case kUnboxedUint32:
case kUnboxedInt64:
return new CompileType(CompileType::Int());
default:
UNREACHABLE();
return nullptr;
}
}
FlowGraph* const flow_graph_;
const InstructionSource source_;
BlockEntryInstr* entry_;

View file

@ -2600,6 +2600,83 @@ void FlowGraph::EliminateEnvironments() {
}
}
void FlowGraph::ExtractUntaggedPayload(Instruction* instr,
Value* array,
const Slot& slot,
InnerPointerAccess access) {
auto* const untag_payload = new (Z)
LoadFieldInstr(array->CopyWithType(Z), slot, access, instr->source());
InsertBefore(instr, untag_payload, instr->env(), FlowGraph::kValue);
array->BindTo(untag_payload);
ASSERT_EQUAL(array->definition()->representation(), kUntagged);
}
bool FlowGraph::ExtractExternalUntaggedPayload(Instruction* instr,
Value* array,
classid_t cid) {
ASSERT(array->instruction() == instr);
// Nothing to do if already untagged.
if (array->definition()->representation() != kTagged) return false;
// If we've determined at compile time that this is an object that has an
// external payload, use the cid of the compile type instead.
if (IsExternalPayloadClassId(array->Type()->ToCid())) {
cid = array->Type()->ToCid();
} else if (!IsExternalPayloadClassId(cid)) {
// Can't extract the payload address if it points to GC-managed memory.
return false;
}
const Slot* slot = nullptr;
if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
slot = &Slot::PointerBase_data();
} else {
UNREACHABLE();
}
ExtractUntaggedPayload(instr, array, *slot,
InnerPointerAccess::kCannotBeInnerPointer);
return true;
}
void FlowGraph::ExtractNonInternalTypedDataPayload(Instruction* instr,
Value* array,
classid_t cid) {
ASSERT(array->instruction() == instr);
// Skip if the array payload has already been extracted.
if (array->definition()->representation() == kUntagged) return;
if (!IsTypedDataBaseClassId(cid)) return;
auto const type_cid = array->Type()->ToCid();
// For external PointerBase objects, the payload should have already been
// extracted during canonicalization.
ASSERT(!IsExternalPayloadClassId(cid) || !IsExternalPayloadClassId(type_cid));
// Don't extract if the array is an internal typed data object.
if (IsTypedDataClassId(type_cid)) return;
ExtractUntaggedPayload(instr, array, Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
}
void FlowGraph::ExtractNonInternalTypedDataPayloads() {
for (BlockIterator block_it = reverse_postorder_iterator(); !block_it.Done();
block_it.Advance()) {
BlockEntryInstr* block = block_it.Current();
for (ForwardInstructionIterator it(block); !it.Done(); it.Advance()) {
Instruction* current = it.Current();
if (auto* const load_indexed = current->AsLoadIndexed()) {
ExtractNonInternalTypedDataPayload(load_indexed, load_indexed->array(),
load_indexed->class_id());
} else if (auto* const store_indexed = current->AsStoreIndexed()) {
ExtractNonInternalTypedDataPayload(
store_indexed, store_indexed->array(), store_indexed->class_id());
} else if (auto* const memory_copy = current->AsMemoryCopy()) {
ExtractNonInternalTypedDataPayload(memory_copy, memory_copy->src(),
memory_copy->src_cid());
ExtractNonInternalTypedDataPayload(memory_copy, memory_copy->dest(),
memory_copy->dest_cid());
}
}
}
}
bool FlowGraph::Canonicalize() {
bool changed = false;

View file

@ -478,6 +478,13 @@ class FlowGraph : public ZoneAllocated {
// Remove environments from the instructions which do not deoptimize.
void EliminateEnvironments();
// Extract typed data payloads prior to any LoadIndexed, StoreIndexed, or
// MemoryCopy instruction where the incoming typed data array(s) are not
// proven to be internal typed data objects at compile time.
//
// Once this is done, no intra-block code motion should be performed.
void ExtractNonInternalTypedDataPayloads();
bool IsReceiver(Definition* def) const;
// Optimize (a << b) & c pattern: if c is a positive Smi or zero, then the
@ -578,6 +585,18 @@ class FlowGraph : public ZoneAllocated {
: CompilationMode::kUnoptimized;
}
// If either IsExternalPayloadClassId([cid]) or
// IsExternalPayloadClassId(array()->Type()->ToCid()) is true and
// [array] (an input of [instr]) is tagged, inserts a load of the array
// payload as an untagged pointer and rebinds [array] to the new load.
//
// Otherwise does not change the flow graph.
//
// Returns whether any changes were made to the flow graph.
bool ExtractExternalUntaggedPayload(Instruction* instr,
Value* array,
classid_t cid);
private:
friend class FlowGraphCompiler; // TODO(ajcbik): restructure
friend class FlowGraphChecker;
@ -670,6 +689,15 @@ class FlowGraph : public ZoneAllocated {
Representation rep,
intptr_t cid);
void ExtractUntaggedPayload(Instruction* instr,
Value* array,
const Slot& slot,
InnerPointerAccess access);
void ExtractNonInternalTypedDataPayload(Instruction* instr,
Value* array,
classid_t cid);
Thread* thread_;
// DiscoverBlocks computes parent_ and assigned_vars_ which are then used

View file

@ -2656,7 +2656,7 @@ Instruction* CheckStackOverflowInstr::Canonicalize(FlowGraph* flow_graph) {
}
bool LoadFieldInstr::IsFixedLengthArrayCid(intptr_t cid) {
if (IsTypedDataClassId(cid) || IsExternalTypedDataClassId(cid)) {
if (IsTypedDataBaseClassId(cid)) {
return true;
}
@ -6715,9 +6715,7 @@ Definition* CheckBoundBaseInstr::Canonicalize(FlowGraph* flow_graph) {
}
intptr_t CheckArrayBoundInstr::LengthOffsetFor(intptr_t class_id) {
if (IsTypedDataClassId(class_id) || IsTypedDataViewClassId(class_id) ||
IsUnmodifiableTypedDataViewClassId(class_id) ||
IsExternalTypedDataClassId(class_id)) {
if (IsTypedDataBaseClassId(class_id)) {
return compiler::target::TypedDataBase::length_offset();
}
@ -6747,24 +6745,20 @@ Definition* CheckWritableInstr::Canonicalize(FlowGraph* flow_graph) {
static AlignmentType StrengthenAlignment(intptr_t cid,
AlignmentType alignment) {
switch (cid) {
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kOneByteStringCid:
switch (RepresentationUtils::RepresentationOfArrayElement(cid)) {
case kUnboxedInt8:
case kUnboxedUint8:
// Don't need to worry about alignment for accessing bytes.
return kAlignedAccess;
case kTypedDataFloat64x2ArrayCid:
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
case kUnboxedFloat32x4:
case kUnboxedInt32x4:
case kUnboxedFloat64x2:
// TODO(rmacnak): Investigate alignment requirements of floating point
// loads.
return kAlignedAccess;
default:
return alignment;
}
return alignment;
}
LoadIndexedInstr::LoadIndexedInstr(Value* array,
@ -6783,25 +6777,22 @@ LoadIndexedInstr::LoadIndexedInstr(Value* array,
alignment_(StrengthenAlignment(class_id, alignment)),
token_pos_(source.token_pos),
result_type_(result_type) {
SetInputAt(0, array);
SetInputAt(1, index);
// In particular, notice that kPointerCid is _not_ supported because it gives
// no information about whether the elements are signed for elements with
// unboxed integer representations. The constructor must take that
// information separately to allow kPointerCid.
ASSERT(class_id != kPointerCid);
SetInputAt(kArrayPos, array);
SetInputAt(kIndexPos, index);
}
Definition* LoadIndexedInstr::Canonicalize(FlowGraph* flow_graph) {
auto Z = flow_graph->zone();
if (auto* const untag_payload = array()->definition()->AsLoadField()) {
// If loading from an internal typed data object, remove the load of
// PointerBase.data, as LoadIndexed knows how to load from a tagged
// internal typed data object directly and the LoadField may interfere with
// possible allocation sinking.
if (untag_payload->slot().IsIdentical(Slot::PointerBase_data()) &&
IsTypedDataClassId(untag_payload->instance()->Type()->ToCid())) {
array()->BindTo(untag_payload->instance()->definition());
}
}
flow_graph->ExtractExternalUntaggedPayload(this, array(), class_id());
if (auto box = index()->definition()->AsBoxInt64()) {
// TODO(dartbug.com/39432): Make LoadIndexed fully suport unboxed indices.
if (!box->ComputeCanDeoptimize() && compiler::target::kWordSize == 8) {
auto Z = flow_graph->zone();
auto load = new (Z) LoadIndexedInstr(
array()->CopyWithType(Z), box->value()->CopyWithType(Z),
/*index_unboxed=*/true, index_scale(), class_id(), alignment_,
@ -6837,26 +6828,23 @@ StoreIndexedInstr::StoreIndexedInstr(Value* array,
alignment_(StrengthenAlignment(class_id, alignment)),
token_pos_(source.token_pos),
speculative_mode_(speculative_mode) {
// In particular, notice that kPointerCid is _not_ supported because it gives
// no information about whether the elements are signed for elements with
// unboxed integer representations. The constructor must take that information
// separately to allow kPointerCid.
ASSERT(class_id != kPointerCid);
SetInputAt(kArrayPos, array);
SetInputAt(kIndexPos, index);
SetInputAt(kValuePos, value);
}
Instruction* StoreIndexedInstr::Canonicalize(FlowGraph* flow_graph) {
auto Z = flow_graph->zone();
if (auto* const untag_payload = array()->definition()->AsLoadField()) {
// If loading from an internal typed data object, remove the load of
// PointerBase.data, as LoadIndexed knows how to load from a tagged
// internal typed data object directly and the LoadField may interfere with
// possible allocation sinking.
if (untag_payload->slot().IsIdentical(Slot::PointerBase_data()) &&
IsTypedDataClassId(untag_payload->instance()->Type()->ToCid())) {
array()->BindTo(untag_payload->instance()->definition());
}
}
flow_graph->ExtractExternalUntaggedPayload(this, array(), class_id());
if (auto box = index()->definition()->AsBoxInt64()) {
// TODO(dartbug.com/39432): Make StoreIndexed fully suport unboxed indices.
if (!box->ComputeCanDeoptimize() && compiler::target::kWordSize == 8) {
auto Z = flow_graph->zone();
auto store = new (Z) StoreIndexedInstr(
array()->CopyWithType(Z), box->value()->CopyWithType(Z),
value()->CopyWithType(Z), emit_store_barrier_,
@ -6904,6 +6892,9 @@ static const intptr_t kMaxElementSizeForEfficientCopy =
#endif
Instruction* MemoryCopyInstr::Canonicalize(FlowGraph* flow_graph) {
flow_graph->ExtractExternalUntaggedPayload(this, src(), src_cid_);
flow_graph->ExtractExternalUntaggedPayload(this, dest(), dest_cid_);
if (!length()->BindsToSmiConstant()) {
return this;
} else if (length()->BoundSmiConstant() == 0) {
@ -6933,16 +6924,16 @@ Instruction* MemoryCopyInstr::Canonicalize(FlowGraph* flow_graph) {
return this;
}
Zone* const zone = flow_graph->zone();
// The new element size is larger than the original one, so it must be > 1.
// That means unboxed integers will always require a shift, but Smis
// may not if element_size == 2, so always use Smis.
auto* const length_instr = flow_graph->GetConstant(
Integer::ZoneHandle(zone, Integer::New(new_length, Heap::kOld)));
auto* const src_start_instr = flow_graph->GetConstant(
Integer::ZoneHandle(zone, Integer::New(new_src_start, Heap::kOld)));
auto* const dest_start_instr = flow_graph->GetConstant(
Integer::ZoneHandle(zone, Integer::New(new_dest_start, Heap::kOld)));
auto* const Z = flow_graph->zone();
auto* const length_instr =
flow_graph->GetConstant(Smi::ZoneHandle(Z, Smi::New(new_length)));
auto* const src_start_instr =
flow_graph->GetConstant(Smi::ZoneHandle(Z, Smi::New(new_src_start)));
auto* const dest_start_instr =
flow_graph->GetConstant(Smi::ZoneHandle(Z, Smi::New(new_dest_start)));
length()->BindTo(length_instr);
src_start()->BindTo(src_start_instr);
dest_start()->BindTo(dest_start_instr);
@ -6954,8 +6945,8 @@ Instruction* MemoryCopyInstr::Canonicalize(FlowGraph* flow_graph) {
void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register src_reg = locs()->in(kSrcPos).reg();
const Register dest_reg = locs()->in(kDestPos).reg();
const Representation src_rep = RequiredInputRepresentation(kSrcPos);
const Representation dest_rep = RequiredInputRepresentation(kDestPos);
const Representation src_rep = src()->definition()->representation();
const Representation dest_rep = dest()->definition()->representation();
const Location& src_start_loc = locs()->in(kSrcStartPos);
const Location& dest_start_loc = locs()->in(kDestStartPos);
const Location& length_loc = locs()->in(kLengthPos);

View file

@ -3112,39 +3112,21 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
Value* length,
bool unboxed_inputs,
bool can_overlap = true)
: MemoryCopyInstr(Instance::ElementSizeFor(src_cid),
src,
kTagged,
src_cid,
dest,
kTagged,
dest_cid,
src_start,
dest_start,
length,
unboxed_inputs,
can_overlap) {}
MemoryCopyInstr(intptr_t element_size,
Value* src,
Value* dest,
Value* src_start,
Value* dest_start,
Value* length,
bool unboxed_inputs,
bool can_overlap = true)
: MemoryCopyInstr(element_size,
src,
kUntagged,
kIllegalCid,
dest,
kUntagged,
kIllegalCid,
src_start,
dest_start,
length,
unboxed_inputs,
can_overlap) {}
: src_cid_(src_cid),
dest_cid_(dest_cid),
element_size_(Instance::ElementSizeFor(src_cid)),
unboxed_inputs_(unboxed_inputs),
can_overlap_(can_overlap) {
ASSERT(IsArrayTypeSupported(src_cid));
ASSERT(IsArrayTypeSupported(dest_cid));
ASSERT_EQUAL(Instance::ElementSizeFor(src_cid),
Instance::ElementSizeFor(dest_cid));
SetInputAt(kSrcPos, src);
SetInputAt(kDestPos, dest);
SetInputAt(kSrcStartPos, src_start);
SetInputAt(kDestStartPos, dest_start);
SetInputAt(kLengthPos, length);
}
enum {
kSrcPos = 0,
@ -3157,12 +3139,11 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
DECLARE_INSTRUCTION(MemoryCopy)
virtual Representation RequiredInputRepresentation(intptr_t index) const {
if (index == kSrcPos) {
return src_representation_;
}
if (index == kDestPos) {
return dest_representation_;
if (index == kSrcPos || index == kDestPos) {
// Can be either tagged or untagged.
return kNoRepresentation;
}
ASSERT(index <= kLengthPos);
return unboxed_inputs() ? kUnboxedIntPtr : kTagged;
}
@ -3174,8 +3155,6 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
if (element_size_ != copy->element_size_) return false;
if (unboxed_inputs_ != copy->unboxed_inputs_) return false;
if (can_overlap_ != copy->can_overlap_) return false;
if (src_representation_ != copy->src_representation_) return false;
if (dest_representation_ != copy->dest_representation_) return false;
if (src_cid_ != copy->src_cid_) return false;
if (dest_cid_ != copy->dest_cid_) return false;
return true;
@ -3189,6 +3168,8 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
Value* dest_start() const { return inputs_[kDestStartPos]; }
Value* length() const { return inputs_[kLengthPos]; }
classid_t src_cid() const { return src_cid_; }
classid_t dest_cid() const { return dest_cid_; }
intptr_t element_size() const { return element_size_; }
bool unboxed_inputs() const { return unboxed_inputs_; }
bool can_overlap() const { return can_overlap_; }
@ -3205,9 +3186,7 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
F(const classid_t, dest_cid_) \
F(intptr_t, element_size_) \
F(bool, unboxed_inputs_) \
F(const bool, can_overlap_) \
F(const Representation, src_representation_) \
F(const Representation, dest_representation_)
F(const bool, can_overlap_)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr,
TemplateInstruction,
@ -3215,46 +3194,6 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
#undef FIELD_LIST
private:
MemoryCopyInstr(intptr_t element_size,
Value* src,
Representation src_representation,
classid_t src_cid,
Value* dest,
Representation dest_representation,
classid_t dest_cid,
Value* src_start,
Value* dest_start,
Value* length,
bool unboxed_inputs,
bool can_overlap = true)
: src_cid_(src_cid),
dest_cid_(dest_cid),
element_size_(element_size),
unboxed_inputs_(unboxed_inputs),
can_overlap_(can_overlap),
src_representation_(src_representation),
dest_representation_(dest_representation) {
if (src_representation == kTagged) {
ASSERT(IsArrayTypeSupported(src_cid));
ASSERT_EQUAL(Instance::ElementSizeFor(src_cid), element_size);
} else {
ASSERT_EQUAL(src_representation, kUntagged);
ASSERT_EQUAL(src_cid, kIllegalCid);
}
if (dest_representation == kTagged) {
ASSERT(IsArrayTypeSupported(dest_cid));
ASSERT_EQUAL(Instance::ElementSizeFor(dest_cid), element_size);
} else {
ASSERT_EQUAL(dest_representation, kUntagged);
ASSERT_EQUAL(dest_cid, kIllegalCid);
}
SetInputAt(kSrcPos, src);
SetInputAt(kDestPos, dest);
SetInputAt(kSrcStartPos, src_start);
SetInputAt(kDestStartPos, dest_start);
SetInputAt(kLengthPos, length);
}
// Set array_reg to point to the index indicated by start (contained in
// start_loc) of the typed data or string in array (contained in array_reg).
// If array_rep is tagged, then the payload address is retrieved according
@ -3299,16 +3238,15 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
compiler::Label* copy_forwards = nullptr);
static bool IsArrayTypeSupported(classid_t array_cid) {
if (IsTypedDataBaseClassId(array_cid)) {
return true;
}
switch (array_cid) {
case kOneByteStringCid:
case kTwoByteStringCid:
return true;
default:
return false;
}
// We don't handle clamping negative values in this instruction, instead
// those are handled via a native call.
if (IsClampedTypedDataBaseClassId(array_cid)) return false;
// We don't support the following cids for the given reasons:
// * kStringCid: doesn't give element size information or information
// about how the payload address is calculated.
// * kPointerCid: doesn't give element size or signedness information.
if (array_cid == kPointerCid || array_cid == kStringCid) return false;
return IsTypedDataBaseClassId(array_cid) || IsStringClassId(array_cid);
}
DISALLOW_COPY_AND_ASSIGN(MemoryCopyInstr);
@ -6698,6 +6636,8 @@ class LoadIndexedInstr : public TemplateDefinition<2, NoThrow> {
const InstructionSource& source,
CompileType* result_type = nullptr);
enum { kArrayPos = 0, kIndexPos = 1 };
TokenPosition token_pos() const { return token_pos_; }
DECLARE_INSTRUCTION(LoadIndexed)
@ -6705,9 +6645,9 @@ class LoadIndexedInstr : public TemplateDefinition<2, NoThrow> {
virtual bool RecomputeType();
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0 || idx == 1);
// The array may be tagged or untagged (for external arrays).
if (idx == 0) return kNoRepresentation;
if (idx == kArrayPos) return kNoRepresentation;
ASSERT_EQUAL(idx, kIndexPos);
if (index_unboxed_) {
#if defined(TARGET_ARCH_IS_64_BIT)
@ -6720,21 +6660,24 @@ class LoadIndexedInstr : public TemplateDefinition<2, NoThrow> {
}
}
bool IsExternal() const {
bool IsUntagged() const {
return array()->definition()->representation() == kUntagged;
}
Value* array() const { return inputs_[0]; }
Value* index() const { return inputs_[1]; }
Value* array() const { return inputs_[kArrayPos]; }
Value* index() const { return inputs_[kIndexPos]; }
intptr_t index_scale() const { return index_scale_; }
intptr_t class_id() const { return class_id_; }
bool aligned() const { return alignment_ == kAlignedAccess; }
virtual intptr_t DeoptimizationTarget() const { return GetDeoptId(); }
virtual bool ComputeCanDeoptimize() const {
return GetDeoptId() != DeoptId::kNone;
virtual intptr_t DeoptimizationTarget() const {
// Direct access since this instruction cannot deoptimize, and the deopt-id
// was inherited from another instruction that could deoptimize.
return GetDeoptId();
}
virtual bool ComputeCanDeoptimize() const { return false; }
// The representation returned by LoadIndexed for arrays with the given cid.
// May not match the representation for the element returned by
// RepresentationUtils::RepresentationOfArrayElement.
@ -6750,6 +6693,8 @@ class LoadIndexedInstr : public TemplateDefinition<2, NoThrow> {
virtual Definition* Canonicalize(FlowGraph* flow_graph);
PRINT_OPERANDS_TO_SUPPORT
#define FIELD_LIST(F) \
F(const bool, index_unboxed_) \
F(const intptr_t, index_scale_) \
@ -7039,7 +6984,7 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
virtual Representation RequiredInputRepresentation(intptr_t idx) const;
bool IsExternal() const {
bool IsUntagged() const {
return array()->definition()->representation() == kUntagged;
}
@ -7053,10 +6998,10 @@ class StoreIndexedInstr : public TemplateInstruction<3, NoThrow> {
virtual bool MayHaveVisibleEffect() const { return true; }
void PrintOperandsTo(BaseTextBuffer* f) const;
virtual Instruction* Canonicalize(FlowGraph* flow_graph);
PRINT_OPERANDS_TO_SUPPORT
#define FIELD_LIST(F) \
F(StoreBarrierType, emit_store_barrier_) \
F(const bool, index_unboxed_) \

View file

@ -166,6 +166,12 @@ static constexpr intptr_t kMaxMemoryCopyElementSize =
LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a MemoryCopy
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
!IsTypedDataBaseClassId(dest_cid_)) ||
opt);
const intptr_t kNumInputs = 5;
const intptr_t kNumTemps = element_size_ >= kMaxMemoryCopyElementSize ? 1 : 0;
LocationSummary* locs = new (zone)
@ -397,8 +403,13 @@ void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
if (array_rep != kTagged) {
// Do nothing, array_reg already contains the payload address.
} else if (IsTypedDataBaseClassId(array_cid)) {
__ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
// The incoming array must have been proven to be an internal typed data
// object, where the payload is in the object and we can just offset.
ASSERT_EQUAL(array_rep, kTagged);
offset = compiler::target::TypedData::payload_offset() - kHeapObjectTag;
} else {
ASSERT_EQUAL(array_rep, kTagged);
ASSERT(!IsExternalPayloadClassId(array_cid));
switch (array_cid) {
case kOneByteStringCid:
offset =
@ -2102,56 +2113,58 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const bool directly_addressable =
aligned() && representation() != kUnboxedInt64;
// The compiler must optimize any function that includes a LoadIndexed
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
const bool directly_addressable = aligned() && rep != kUnboxedInt64;
const intptr_t kNumInputs = 2;
intptr_t kNumTemps = 0;
if (!directly_addressable) {
kNumTemps += 1;
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble)) {
if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
kNumTemps += 1;
}
}
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_in(kArrayPos, Location::RequiresRegister());
bool needs_base;
const bool can_be_constant =
index()->BindsToConstant() &&
compiler::Assembler::AddressCanHoldConstantIndex(
index()->BoundConstant(), /*load=*/true, IsExternal(), class_id(),
index()->BoundConstant(), /*load=*/true, IsUntagged(), class_id(),
index_scale(), &needs_base);
// We don't need to check if [needs_base] is true, since we use TMP as the
// temp register in this case and so don't need to allocate a temp register.
locs->set_in(1, can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: Location::RequiresRegister());
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
if (class_id() == kTypedDataFloat32ArrayCid) {
locs->set_in(kIndexPos,
can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: Location::RequiresRegister());
if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedInt64) {
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
} else {
locs->set_out(0, Location::RequiresRegister());
}
} else if (RepresentationUtils::IsUnboxed(rep)) {
if (rep == kUnboxedFloat) {
// Need register < Q7 for float operations.
// TODO(30953): Support register range constraints in the regalloc.
locs->set_out(0, Location::FpuRegisterLocation(Q6));
} else {
locs->set_out(0, Location::RequiresFpuRegister());
}
} else if (representation() == kUnboxedInt64) {
ASSERT(class_id() == kTypedDataInt64ArrayCid ||
class_id() == kTypedDataUint64ArrayCid);
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
} else {
locs->set_out(0, Location::RequiresRegister());
}
if (!directly_addressable) {
locs->set_temp(0, Location::RequiresRegister());
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble)) {
if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
locs->set_temp(1, Location::RequiresRegister());
}
}
@ -2159,11 +2172,13 @@ LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
}
void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const bool directly_addressable =
aligned() && representation() != kUnboxedInt64;
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
ASSERT(representation() == Boxing::NativeRepresentation(rep));
const bool directly_addressable = aligned() && rep != kUnboxedInt64;
// The array register points to the backing store for external arrays.
const Register array = locs()->in(0).reg();
const Location index = locs()->in(1);
const Register array = locs()->in(kArrayPos).reg();
const Location index = locs()->in(kIndexPos);
const Register address =
directly_addressable ? kNoRegister : locs()->temp(0).reg();
@ -2172,12 +2187,12 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
element_address =
index.IsRegister()
? __ ElementAddressForRegIndex(true, // Load.
IsExternal(), class_id(),
IsUntagged(), class_id(),
index_scale(), index_unboxed_, array,
index.reg())
: __ ElementAddressForIntIndex(
true, // Load.
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
compiler::target::SmiValue(index.constant()),
IP); // Temp register.
// Warning: element_address may use register IP as base.
@ -2185,86 +2200,19 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (index.IsRegister()) {
__ LoadElementAddressForRegIndex(address,
true, // Load.
IsExternal(), class_id(), index_scale(),
IsUntagged(), class_id(), index_scale(),
index_unboxed_, array, index.reg());
} else {
__ LoadElementAddressForIntIndex(
address,
true, // Load.
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
compiler::target::SmiValue(index.constant()));
}
}
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
const QRegister result = locs()->out(0).fpu_reg();
const DRegister dresult0 = EvenDRegisterOf(result);
switch (class_id()) {
case kTypedDataFloat32ArrayCid:
// Load single precision float.
// vldrs does not support indexed addressing.
if (aligned()) {
__ vldrs(EvenSRegisterOf(dresult0), element_address);
} else {
const Register value = locs()->temp(1).reg();
__ LoadWordUnaligned(value, address, TMP);
__ vmovsr(EvenSRegisterOf(dresult0), value);
}
break;
case kTypedDataFloat64ArrayCid:
// vldrd does not support indexed addressing.
if (aligned()) {
__ vldrd(dresult0, element_address);
} else {
const Register value = locs()->temp(1).reg();
__ LoadWordUnaligned(value, address, TMP);
__ vmovdr(dresult0, 0, value);
__ AddImmediate(address, address, 4);
__ LoadWordUnaligned(value, address, TMP);
__ vmovdr(dresult0, 1, value);
}
break;
case kTypedDataFloat64x2ArrayCid:
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
ASSERT(element_address.Equals(compiler::Address(IP)));
ASSERT(aligned());
__ vldmd(IA, IP, dresult0, 2);
break;
default:
UNREACHABLE();
}
return;
}
switch (class_id()) {
case kTypedDataInt32ArrayCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedInt32);
if (aligned()) {
__ ldr(result, element_address);
} else {
__ LoadWordUnaligned(result, address, TMP);
}
break;
}
case kTypedDataUint32ArrayCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedUint32);
if (aligned()) {
__ ldr(result, element_address);
} else {
__ LoadWordUnaligned(result, address, TMP);
}
break;
}
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
ASSERT(representation() == kUnboxedInt64);
if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedInt64) {
ASSERT(!directly_addressable); // need to add to register
ASSERT(locs()->out(0).IsPairLocation());
PairLocation* result_pair = locs()->out(0).AsPairLocation();
@ -2279,65 +2227,81 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ AddImmediate(address, address, compiler::target::kWordSize);
__ LoadWordUnaligned(result_hi, address, TMP);
}
break;
}
case kTypedDataInt8ArrayCid: {
} else {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedIntPtr);
ASSERT(index_scale() == 1);
ASSERT(aligned());
__ ldrsb(result, element_address);
break;
}
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kOneByteStringCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedIntPtr);
ASSERT(index_scale() == 1);
ASSERT(aligned());
__ ldrb(result, element_address);
break;
}
case kTypedDataInt16ArrayCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedIntPtr);
if (aligned()) {
__ ldrsh(result, element_address);
__ LoadFromOffset(result, element_address,
RepresentationUtils::OperandSize(rep));
} else {
__ LoadHalfWordUnaligned(result, address, TMP);
switch (rep) {
case kUnboxedUint32:
case kUnboxedInt32:
__ LoadWordUnaligned(result, address, TMP);
break;
case kUnboxedUint16:
__ LoadHalfWordUnsignedUnaligned(result, address, TMP);
break;
case kUnboxedInt16:
__ LoadHalfWordUnaligned(result, address, TMP);
break;
default:
UNREACHABLE();
break;
}
}
break;
}
case kTypedDataUint16ArrayCid:
case kTwoByteStringCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedIntPtr);
} else if (RepresentationUtils::IsUnboxed(rep)) {
const QRegister result = locs()->out(0).fpu_reg();
const DRegister dresult0 = EvenDRegisterOf(result);
if (rep == kUnboxedFloat) {
// Load single precision float.
// vldrs does not support indexed addressing.
if (aligned()) {
__ ldrh(result, element_address);
__ vldrs(EvenSRegisterOf(dresult0), element_address);
} else {
__ LoadHalfWordUnsignedUnaligned(result, address, TMP);
const Register value = locs()->temp(1).reg();
__ LoadWordUnaligned(value, address, TMP);
__ vmovsr(EvenSRegisterOf(dresult0), value);
}
break;
}
default: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kTagged);
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
(class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
__ ldr(result, element_address);
break;
} else if (rep == kUnboxedDouble) {
// vldrd does not support indexed addressing.
if (aligned()) {
__ vldrd(dresult0, element_address);
} else {
const Register value = locs()->temp(1).reg();
__ LoadWordUnaligned(value, address, TMP);
__ vmovdr(dresult0, 0, value);
__ AddImmediate(address, address, 4);
__ LoadWordUnaligned(value, address, TMP);
__ vmovdr(dresult0, 1, value);
}
} else {
ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2);
ASSERT(element_address.Equals(compiler::Address(IP)));
ASSERT(aligned());
__ vldmd(IA, IP, dresult0, 2);
}
} else {
const Register result = locs()->out(0).reg();
ASSERT(rep == kTagged);
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
(class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
__ ldr(result, element_address);
}
}
LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a StoreIndexed
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
const bool directly_addressable =
aligned() && class_id() != kTypedDataInt64ArrayCid &&
class_id() != kTypedDataUint64ArrayCid && class_id() != kArrayCid;
aligned() && rep != kUnboxedInt64 && class_id() != kArrayCid;
const intptr_t kNumInputs = 3;
LocationSummary* locs;
@ -2346,7 +2310,7 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
const bool can_be_constant =
index()->BindsToConstant() &&
compiler::Assembler::AddressCanHoldConstantIndex(
index()->BoundConstant(), /*load=*/false, IsExternal(), class_id(),
index()->BoundConstant(), /*load=*/false, IsUntagged(), class_id(),
index_scale(), &needs_base);
if (can_be_constant) {
if (!directly_addressable) {
@ -2374,58 +2338,42 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
locs->set_temp(i, Location::RequiresRegister());
}
switch (class_id()) {
case kArrayCid:
locs->set_in(2, ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
}
break;
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataUint8ClampedArrayCid:
locs->set_in(2, LocationRegisterOrConstant(value()));
break;
case kExternalTypedDataUint8ArrayCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
locs->set_in(2, Location::RequiresRegister());
break;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedInt64) {
locs->set_in(2, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
break;
case kTypedDataFloat32ArrayCid:
} else if (rep == kUnboxedInt8 || rep == kUnboxedUint8) {
locs->set_in(2, LocationRegisterOrConstant(value()));
} else {
locs->set_in(2, Location::RequiresRegister());
}
} else if (RepresentationUtils::IsUnboxed(rep)) {
if (rep == kUnboxedFloat) {
// Need low register (< Q7).
locs->set_in(2, Location::FpuRegisterLocation(Q6));
break;
case kTypedDataFloat64ArrayCid: // TODO(srdjan): Support Float64 constants.
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
} else { // TODO(srdjan): Support Float64 constants.
locs->set_in(2, Location::RequiresFpuRegister());
break;
default:
UNREACHABLE();
return nullptr;
}
} else if (class_id() == kArrayCid) {
locs->set_in(2, ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
}
}
return locs;
}
void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
ASSERT(RequiredInputRepresentation(2) == Boxing::NativeRepresentation(rep));
const bool directly_addressable =
aligned() && class_id() != kTypedDataInt64ArrayCid &&
class_id() != kTypedDataUint64ArrayCid && class_id() != kArrayCid;
aligned() && rep != kUnboxedInt64 && class_id() != kArrayCid;
// The array register points to the backing store for external arrays.
const Register array = locs()->in(0).reg();
const Location index = locs()->in(1);
@ -2439,106 +2387,54 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
element_address =
index.IsRegister()
? __ ElementAddressForRegIndex(false, // Store.
IsExternal(), class_id(),
IsUntagged(), class_id(),
index_scale(), index_unboxed_, array,
index.reg())
: __ ElementAddressForIntIndex(
false, // Store.
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
compiler::target::SmiValue(index.constant()), temp);
} else {
if (index.IsRegister()) {
__ LoadElementAddressForRegIndex(temp,
false, // Store.
IsExternal(), class_id(), index_scale(),
IsUntagged(), class_id(), index_scale(),
index_unboxed_, array, index.reg());
} else {
__ LoadElementAddressForIntIndex(
temp,
false, // Store.
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
compiler::target::SmiValue(index.constant()));
}
}
switch (class_id()) {
case kArrayCid:
if (ShouldEmitStoreBarrier()) {
const Register value = locs()->in(2).reg();
__ StoreIntoArray(array, temp, value, CanValueBeSmi());
} else if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreIntoObjectNoBarrier(array, compiler::Address(temp), constant);
} else {
const Register value = locs()->in(2).reg();
__ StoreIntoObjectNoBarrier(array, compiler::Address(temp), value);
if (IsClampedTypedDataBaseClassId(class_id())) {
ASSERT(rep == kUnboxedUint8);
if (locs()->in(2).IsConstant()) {
intptr_t value = compiler::target::SmiValue(locs()->in(2).constant());
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
break;
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kOneByteStringCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
__ LoadImmediate(IP,
compiler::target::SmiValue(locs()->in(2).constant()));
__ strb(IP, element_address);
} else {
const Register value = locs()->in(2).reg();
__ strb(value, element_address);
}
break;
}
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ClampedArrayCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
intptr_t value = compiler::target::SmiValue(locs()->in(2).constant());
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
__ LoadImmediate(IP, static_cast<int8_t>(value));
__ strb(IP, element_address);
} else {
const Register value = locs()->in(2).reg();
// Clamp to 0x00 or 0xFF respectively.
__ LoadImmediate(IP, 0xFF);
__ cmp(value,
compiler::Operand(IP)); // Compare Smi value and smi 0xFF.
__ mov(IP, compiler::Operand(0), LE); // IP = value <= 0xFF ? 0 : 0xFF.
__ mov(IP, compiler::Operand(value),
LS); // IP = value in range ? value : IP.
__ strb(IP, element_address);
}
break;
}
case kTwoByteStringCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
__ LoadImmediate(IP, static_cast<int8_t>(value));
__ strb(IP, element_address);
} else {
const Register value = locs()->in(2).reg();
if (aligned()) {
__ strh(value, element_address);
} else {
__ StoreHalfWordUnaligned(value, temp, temp2);
}
break;
// Clamp to 0x00 or 0xFF respectively.
__ LoadImmediate(IP, 0xFF);
// Compare Smi value and smi 0xFF.
__ cmp(value, compiler::Operand(IP));
// IP = value <= 0xFF ? 0 : 0xFF.
__ mov(IP, compiler::Operand(0), LE);
// IP = value in range ? value : IP.
__ mov(IP, compiler::Operand(value), LS);
__ strb(IP, element_address);
}
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid: {
const Register value = locs()->in(2).reg();
if (aligned()) {
__ str(value, element_address);
} else {
__ StoreWordUnaligned(value, temp, temp2);
}
break;
}
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedInt64) {
ASSERT(!directly_addressable); // need to add to register
ASSERT(locs()->in(2).IsPairLocation());
PairLocation* value_pair = locs()->in(2).AsPairLocation();
@ -2552,9 +2448,38 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ AddImmediate(temp, temp, compiler::target::kWordSize);
__ StoreWordUnaligned(value_hi, temp, temp2);
}
break;
} else if (rep == kUnboxedInt8 || rep == kUnboxedUint8) {
if (locs()->in(2).IsConstant()) {
__ LoadImmediate(IP,
compiler::target::SmiValue(locs()->in(2).constant()));
__ strb(IP, element_address);
} else {
const Register value = locs()->in(2).reg();
__ strb(value, element_address);
}
} else {
const Register value = locs()->in(2).reg();
if (aligned()) {
__ StoreToOffset(value, element_address,
RepresentationUtils::OperandSize(rep));
} else {
switch (rep) {
case kUnboxedUint32:
case kUnboxedInt32:
__ StoreWordUnaligned(value, temp, temp2);
break;
case kUnboxedUint16:
case kUnboxedInt16:
__ StoreHalfWordUnaligned(value, temp, temp2);
break;
default:
UNREACHABLE();
break;
}
}
}
case kTypedDataFloat32ArrayCid: {
} else if (RepresentationUtils::IsUnboxed(rep)) {
if (rep == kUnboxedFloat) {
const SRegister value_reg =
EvenSRegisterOf(EvenDRegisterOf(locs()->in(2).fpu_reg()));
if (aligned()) {
@ -2565,9 +2490,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ vmovrs(value, value_reg);
__ StoreWordUnaligned(value, address, TMP);
}
break;
}
case kTypedDataFloat64ArrayCid: {
} else if (rep == kUnboxedDouble) {
const DRegister value_reg = EvenDRegisterOf(locs()->in(2).fpu_reg());
if (aligned()) {
__ vstrd(value_reg, element_address);
@ -2580,19 +2503,25 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ vmovrs(value, OddSRegisterOf(value_reg));
__ StoreWordUnaligned(value, address, TMP);
}
break;
}
case kTypedDataFloat64x2ArrayCid:
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid: {
} else {
ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2);
ASSERT(element_address.Equals(compiler::Address(index.reg())));
ASSERT(aligned());
const DRegister value_reg = EvenDRegisterOf(locs()->in(2).fpu_reg());
__ vstmd(IA, index.reg(), value_reg, 2);
break;
}
default:
UNREACHABLE();
} else if (class_id() == kArrayCid) {
if (ShouldEmitStoreBarrier()) {
const Register value = locs()->in(2).reg();
__ StoreIntoArray(array, temp, value, CanValueBeSmi());
} else if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreIntoObjectNoBarrier(array, compiler::Address(temp), constant);
} else {
const Register value = locs()->in(2).reg();
__ StoreIntoObjectNoBarrier(array, compiler::Address(temp), value);
}
}
}

View file

@ -156,6 +156,12 @@ DEFINE_BACKEND(TailCall,
LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a MemoryCopy
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
!IsTypedDataBaseClassId(dest_cid_)) ||
opt);
const intptr_t kNumInputs = 5;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
@ -312,8 +318,13 @@ void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
if (array_rep != kTagged) {
// Do nothing, array_reg already contains the payload address.
} else if (IsTypedDataBaseClassId(array_cid)) {
__ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
// The incoming array must have been proven to be an internal typed data
// object, where the payload is in the object and we can just offset.
ASSERT_EQUAL(array_rep, kTagged);
offset = compiler::target::TypedData::payload_offset() - kHeapObjectTag;
} else {
ASSERT_EQUAL(array_rep, kTagged);
ASSERT(!IsExternalPayloadClassId(array_cid));
switch (array_cid) {
case kOneByteStringCid:
offset =
@ -1885,23 +1896,29 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a LoadIndexed
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_in(kArrayPos, Location::RequiresRegister());
const bool can_be_constant =
index()->BindsToConstant() &&
compiler::Assembler::AddressCanHoldConstantIndex(
index()->BoundConstant(), IsExternal(), class_id(), index_scale());
locs->set_in(1, can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: Location::RequiresRegister());
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
locs->set_in(kIndexPos,
can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: Location::RequiresRegister());
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
if (RepresentationUtils::IsUnboxedInteger(rep)) {
locs->set_out(0, Location::RequiresRegister());
} else if (RepresentationUtils::IsUnboxed(rep)) {
locs->set_out(0, Location::RequiresFpuRegister());
} else {
locs->set_out(0, Location::RequiresRegister());
@ -1911,87 +1928,42 @@ LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The array register points to the backing store for external arrays.
const Register array = locs()->in(0).reg();
const Location index = locs()->in(1);
const Register array = locs()->in(kArrayPos).reg();
const Location index = locs()->in(kIndexPos);
compiler::Address element_address(TMP); // Bad address.
element_address = index.IsRegister()
? __ ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(),
IsUntagged(), class_id(), index_scale(),
index_unboxed_, array, index.reg(), TMP)
: __ ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
ASSERT(representation() == Boxing::NativeRepresentation(rep));
if (RepresentationUtils::IsUnboxedInteger(rep)) {
const Register result = locs()->out(0).reg();
__ ldr(result, element_address, RepresentationUtils::OperandSize(rep));
} else if (RepresentationUtils::IsUnboxed(rep)) {
const VRegister result = locs()->out(0).fpu_reg();
switch (class_id()) {
case kTypedDataFloat32ArrayCid:
// Load single precision float.
__ fldrs(result, element_address);
break;
case kTypedDataFloat64ArrayCid:
// Load double precision float.
__ fldrd(result, element_address);
break;
case kTypedDataFloat64x2ArrayCid:
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
__ fldrq(result, element_address);
break;
default:
UNREACHABLE();
if (rep == kUnboxedFloat) {
// Load single precision float.
__ fldrs(result, element_address);
} else if (rep == kUnboxedDouble) {
// Load double precision float.
__ fldrd(result, element_address);
} else {
ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2);
__ fldrq(result, element_address);
}
return;
}
const Register result = locs()->out(0).reg();
switch (class_id()) {
case kTypedDataInt32ArrayCid:
ASSERT(representation() == kUnboxedInt32);
__ ldr(result, element_address, compiler::kFourBytes);
break;
case kTypedDataUint32ArrayCid:
ASSERT(representation() == kUnboxedUint32);
__ ldr(result, element_address, compiler::kUnsignedFourBytes);
break;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
ASSERT(representation() == kUnboxedInt64);
__ ldr(result, element_address, compiler::kEightBytes);
break;
case kTypedDataInt8ArrayCid:
ASSERT(representation() == kUnboxedIntPtr);
ASSERT(index_scale() == 1);
__ ldr(result, element_address, compiler::kByte);
break;
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kOneByteStringCid:
ASSERT(representation() == kUnboxedIntPtr);
ASSERT(index_scale() == 1);
__ ldr(result, element_address, compiler::kUnsignedByte);
break;
case kTypedDataInt16ArrayCid:
ASSERT(representation() == kUnboxedIntPtr);
__ ldr(result, element_address, compiler::kTwoBytes);
break;
case kTypedDataUint16ArrayCid:
case kTwoByteStringCid:
ASSERT(representation() == kUnboxedIntPtr);
__ ldr(result, element_address, compiler::kUnsignedTwoBytes);
break;
default:
ASSERT(representation() == kTagged);
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
(class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
__ LoadCompressed(result, element_address);
break;
} else {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kTagged);
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
(class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
__ LoadCompressed(result, element_address);
}
}
@ -2058,6 +2030,11 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a StoreIndexed
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
const intptr_t kNumInputs = 3;
const intptr_t kNumTemps = 1;
LocationSummary* locs = new (zone)
@ -2066,63 +2043,44 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
const bool can_be_constant =
index()->BindsToConstant() &&
compiler::Assembler::AddressCanHoldConstantIndex(
index()->BoundConstant(), IsExternal(), class_id(), index_scale());
index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
locs->set_in(1, can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: Location::RequiresRegister());
locs->set_temp(0, Location::RequiresRegister());
switch (class_id()) {
case kArrayCid:
locs->set_in(2, ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
}
break;
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataUint8ClampedArrayCid:
locs->set_in(2, LocationRegisterOrConstant(value()));
break;
case kExternalTypedDataUint8ArrayCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
ConstantInstr* constant = value()->definition()->AsConstant();
if (constant != nullptr && constant->HasZeroRepresentation()) {
locs->set_in(2, Location::Constant(constant));
} else {
locs->set_in(2, Location::RequiresRegister());
}
break;
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
if (IsClampedTypedDataBaseClassId(class_id())) {
ASSERT(rep == kUnboxedUint8);
locs->set_in(2, LocationRegisterOrConstant(value()));
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
ConstantInstr* constant = value()->definition()->AsConstant();
if (constant != nullptr && constant->HasZeroRepresentation()) {
locs->set_in(2, Location::Constant(constant));
} else {
locs->set_in(2, Location::RequiresRegister());
}
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid: {
} else if (RepresentationUtils::IsUnboxed(rep)) {
if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
ConstantInstr* constant = value()->definition()->AsConstant();
if (constant != nullptr && constant->HasZeroRepresentation()) {
locs->set_in(2, Location::Constant(constant));
} else {
locs->set_in(2, Location::RequiresFpuRegister());
}
break;
}
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
} else {
locs->set_in(2, Location::RequiresFpuRegister());
break;
default:
UNREACHABLE();
return nullptr;
}
} else if (class_id() == kArrayCid) {
locs->set_in(2, ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
}
} else {
UNREACHABLE();
}
return locs;
}
@ -2134,14 +2092,18 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register temp = locs()->temp(0).reg();
compiler::Address element_address(TMP); // Bad address.
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
ASSERT(RequiredInputRepresentation(2) == Boxing::NativeRepresentation(rep));
// Deal with a special case separately.
if (class_id() == kArrayCid && ShouldEmitStoreBarrier()) {
if (index.IsRegister()) {
__ ComputeElementAddressForRegIndex(temp, IsExternal(), class_id(),
__ ComputeElementAddressForRegIndex(temp, IsUntagged(), class_id(),
index_scale(), index_unboxed_, array,
index.reg());
} else {
__ ComputeElementAddressForIntIndex(temp, IsExternal(), class_id(),
__ ComputeElementAddressForIntIndex(temp, IsUntagged(), class_id(),
index_scale(), array,
Smi::Cast(index.constant()).Value());
}
@ -2152,150 +2114,91 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
element_address = index.IsRegister()
? __ ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(),
IsUntagged(), class_id(), index_scale(),
index_unboxed_, array, index.reg(), temp)
: __ ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
switch (class_id()) {
case kArrayCid:
ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above.
if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreCompressedIntoObjectNoBarrier(array, element_address, constant);
} else {
const Register value = locs()->in(2).reg();
__ StoreCompressedIntoObjectNoBarrier(array, element_address, value);
if (IsClampedTypedDataBaseClassId(class_id())) {
ASSERT(rep == kUnboxedUint8);
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
break;
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kOneByteStringCid:
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
if (value == 0) {
__ str(ZR, element_address, compiler::kUnsignedByte);
} else {
__ str(locs()->in(2).reg(), element_address, compiler::kUnsignedByte);
}
break;
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ClampedArrayCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
if (value == 0) {
__ str(ZR, element_address, compiler::kUnsignedByte);
} else {
__ LoadImmediate(TMP, static_cast<int8_t>(value));
__ str(TMP, element_address, compiler::kUnsignedByte);
}
} else {
const Register value = locs()->in(2).reg();
// Clamp to 0x00 or 0xFF respectively.
__ CompareImmediate(value, 0xFF);
__ csetm(TMP, GT); // TMP = value > 0xFF ? -1 : 0.
__ csel(TMP, value, TMP, LS); // TMP = value in range ? value : TMP.
__ LoadImmediate(TMP, static_cast<int8_t>(value));
__ str(TMP, element_address, compiler::kUnsignedByte);
}
break;
} else {
const Register value = locs()->in(2).reg();
// Clamp to 0x00 or 0xFF respectively.
__ CompareImmediate(value, 0xFF);
__ csetm(TMP, GT); // TMP = value > 0xFF ? -1 : 0.
__ csel(TMP, value, TMP, LS); // TMP = value in range ? value : TMP.
__ str(TMP, element_address, compiler::kUnsignedByte);
}
case kTwoByteStringCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ str(ZR, element_address, compiler::kUnsignedTwoBytes);
} else {
__ str(locs()->in(2).reg(), element_address,
compiler::kUnsignedTwoBytes);
}
break;
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ str(ZR, element_address, compiler::kUnsignedFourBytes);
} else {
__ str(locs()->in(2).reg(), element_address,
compiler::kUnsignedFourBytes);
}
break;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ str(ZR, element_address, compiler::kEightBytes);
} else {
__ str(locs()->in(2).reg(), element_address, compiler::kEightBytes);
}
break;
case kTypedDataFloat32ArrayCid:
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ str(ZR, element_address, RepresentationUtils::OperandSize(rep));
} else {
__ str(locs()->in(2).reg(), element_address,
RepresentationUtils::OperandSize(rep));
}
} else if (RepresentationUtils::IsUnboxed(rep)) {
if (rep == kUnboxedFloat) {
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ str(ZR, element_address, compiler::kFourBytes);
} else {
__ fstrs(locs()->in(2).fpu_reg(), element_address);
}
break;
case kTypedDataFloat64ArrayCid: {
} else if (rep == kUnboxedDouble) {
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ str(ZR, element_address, compiler::kEightBytes);
} else {
__ fstrd(locs()->in(2).fpu_reg(), element_address);
}
break;
}
case kTypedDataFloat64x2ArrayCid:
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid: {
} else {
ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2);
const VRegister value_reg = locs()->in(2).fpu_reg();
__ fstrq(value_reg, element_address);
break;
}
default:
UNREACHABLE();
} else if (class_id() == kArrayCid) {
ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above.
if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreCompressedIntoObjectNoBarrier(array, element_address, constant);
} else {
const Register value = locs()->in(2).reg();
__ StoreCompressedIntoObjectNoBarrier(array, element_address, value);
}
} else {
UNREACHABLE();
}
#if defined(USING_MEMORY_SANITIZER)
if (index.IsRegister()) {
__ ComputeElementAddressForRegIndex(TMP, IsExternal(), class_id(),
__ ComputeElementAddressForRegIndex(TMP, IsUntagged(), class_id(),
index_scale(), index_unboxed_, array,
index.reg());
} else {
__ ComputeElementAddressForIntIndex(TMP, IsExternal(), class_id(),
__ ComputeElementAddressForIntIndex(TMP, IsUntagged(), class_id(),
index_scale(), array,
Smi::Cast(index.constant()).Value());
}
intptr_t length_in_bytes;
if (IsTypedDataBaseClassId(class_id_)) {
length_in_bytes = compiler::TypedDataElementSizeInBytes(class_id_);
} else {
switch (class_id_) {
case kArrayCid:
length_in_bytes = compiler::target::kWordSize;
break;
case kOneByteStringCid:
length_in_bytes = 1;
break;
case kTwoByteStringCid:
length_in_bytes = 2;
break;
default:
FATAL("Unknown cid: %" Pd, class_id_);
}
}
const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
RepresentationUtils::RepresentationOfArrayElement(class_id()));
__ MsanUnpoison(TMP, length_in_bytes);
#endif
}

View file

@ -79,6 +79,12 @@ DEFINE_BACKEND(TailCall,
LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a MemoryCopy
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
!IsTypedDataBaseClassId(dest_cid_)) ||
opt);
const bool remove_loop =
length()->BindsToSmiConstant() && length()->BoundSmiConstant() <= 4;
const intptr_t kNumInputs = 5;
@ -189,8 +195,13 @@ void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
if (array_rep != kTagged) {
// Do nothing, array_reg already contains the payload address.
} else if (IsTypedDataBaseClassId(array_cid)) {
__ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
// The incoming array must have been proven to be an internal typed data
// object, where the payload is in the object and we can just offset.
ASSERT_EQUAL(array_rep, kTagged);
offset = compiler::target::TypedData::payload_offset() - kHeapObjectTag;
} else {
ASSERT_EQUAL(array_rep, kTagged);
ASSERT(!IsExternalPayloadClassId(array_cid));
switch (array_cid) {
case kOneByteStringCid:
offset =
@ -1567,34 +1578,40 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a LoadIndexed
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_in(kArrayPos, Location::RequiresRegister());
// The index is either untagged (element size == 1) or a smi (for all
// element sizes > 1).
const bool need_writable_index_register = index_scale() == 1;
const bool can_be_constant =
index()->BindsToConstant() &&
compiler::Assembler::AddressCanHoldConstantIndex(
index()->BoundConstant(), IsExternal(), class_id(), index_scale());
index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
locs->set_in(
1, can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: (need_writable_index_register ? Location::WritableRegister()
: Location::RequiresRegister()));
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
kIndexPos,
can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: (need_writable_index_register ? Location::WritableRegister()
: Location::RequiresRegister()));
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedInt64) {
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
} else {
locs->set_out(0, Location::RequiresRegister());
}
} else if (RepresentationUtils::IsUnboxed(rep)) {
locs->set_out(0, Location::RequiresFpuRegister());
} else if (representation() == kUnboxedInt64) {
ASSERT(class_id() == kTypedDataInt64ArrayCid ||
class_id() == kTypedDataUint64ArrayCid);
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
} else {
locs->set_out(0, Location::RequiresRegister());
}
@ -1603,8 +1620,8 @@ LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The array register points to the backing store for external arrays.
const Register array = locs()->in(0).reg();
const Location index = locs()->in(1);
const Register array = locs()->in(kArrayPos).reg();
const Location index = locs()->in(kIndexPos);
bool index_unboxed = index_unboxed_;
if (index_scale() == 1 && !index_unboxed) {
@ -1618,114 +1635,63 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Address element_address =
index.IsRegister() ? compiler::Assembler::ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(),
IsUntagged(), class_id(), index_scale(),
index_unboxed, array, index.reg())
: compiler::Assembler::ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
XmmRegister result = locs()->out(0).fpu_reg();
switch (class_id()) {
case kTypedDataFloat32ArrayCid:
__ movss(result, element_address);
break;
case kTypedDataFloat64ArrayCid:
__ movsd(result, element_address);
break;
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
__ movups(result, element_address);
break;
default:
UNREACHABLE();
}
return;
}
switch (class_id()) {
case kTypedDataInt32ArrayCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedInt32);
__ movl(result, element_address);
break;
}
case kTypedDataUint32ArrayCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedUint32);
__ movl(result, element_address);
break;
}
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
ASSERT(representation() == kUnboxedInt64);
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
ASSERT(representation() == Boxing::NativeRepresentation(rep));
if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedInt64) {
ASSERT(locs()->out(0).IsPairLocation());
PairLocation* result_pair = locs()->out(0).AsPairLocation();
const Register result_lo = result_pair->At(0).reg();
const Register result_hi = result_pair->At(1).reg();
ASSERT(class_id() == kTypedDataInt64ArrayCid ||
class_id() == kTypedDataUint64ArrayCid);
__ movl(result_lo, element_address);
element_address =
index.IsRegister()
? compiler::Assembler::ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(), index_unboxed,
IsUntagged(), class_id(), index_scale(), index_unboxed,
array, index.reg(), kWordSize)
: compiler::Assembler::ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value(), kWordSize);
__ movl(result_hi, element_address);
break;
} else {
Register result = locs()->out(0).reg();
__ LoadFromOffset(result, element_address,
RepresentationUtils::OperandSize(rep));
}
case kTypedDataInt8ArrayCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedIntPtr);
ASSERT(index_scale() == 1);
__ movsxb(result, element_address);
break;
}
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kOneByteStringCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedIntPtr);
ASSERT(index_scale() == 1);
__ movzxb(result, element_address);
break;
}
case kTypedDataInt16ArrayCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedIntPtr);
__ movsxw(result, element_address);
break;
}
case kTypedDataUint16ArrayCid:
case kTwoByteStringCid: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kUnboxedIntPtr);
__ movzxw(result, element_address);
break;
}
default: {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kTagged);
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
(class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
__ movl(result, element_address);
break;
} else if (RepresentationUtils::IsUnboxed(rep)) {
XmmRegister result = locs()->out(0).fpu_reg();
if (rep == kUnboxedFloat) {
__ movss(result, element_address);
} else if (rep == kUnboxedDouble) {
__ movsd(result, element_address);
} else {
ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2);
__ movups(result, element_address);
}
} else {
const Register result = locs()->out(0).reg();
ASSERT(representation() == kTagged);
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
(class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
__ movl(result, element_address);
}
}
LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a StoreIndexed
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
const intptr_t kNumInputs = 3;
const intptr_t kNumTemps =
class_id() == kArrayCid && ShouldEmitStoreBarrier() ? 2 : 0;
@ -1738,60 +1704,38 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
const bool can_be_constant =
index()->BindsToConstant() &&
compiler::Assembler::AddressCanHoldConstantIndex(
index()->BoundConstant(), IsExternal(), class_id(), index_scale());
index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
locs->set_in(
1, can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: (need_writable_index_register ? Location::WritableRegister()
: Location::RequiresRegister()));
switch (class_id()) {
case kArrayCid:
locs->set_in(2, LocationRegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_in(2, Location::RegisterLocation(kWriteBarrierValueReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
locs->set_temp(1, Location::RequiresRegister());
}
break;
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
// TODO(fschneider): Add location constraint for byte registers (EAX,
// EBX, ECX, EDX) instead of using a fixed register.
locs->set_in(2, LocationFixedRegisterOrSmiConstant(value(), EAX));
break;
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
// Writable register because the value must be untagged before storing.
locs->set_in(2, Location::WritableRegister());
break;
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
locs->set_in(2, Location::RequiresRegister());
break;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
} else if (rep == kUnboxedInt64) {
locs->set_in(2, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
break;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
// TODO(srdjan): Support Float64 constants.
locs->set_in(2, Location::RequiresFpuRegister());
break;
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
locs->set_in(2, Location::RequiresFpuRegister());
break;
default:
UNREACHABLE();
return nullptr;
} else {
locs->set_in(2, Location::RequiresRegister());
}
} else if (RepresentationUtils::IsUnboxed(rep)) {
// TODO(srdjan): Support Float64 constants.
locs->set_in(2, Location::RequiresFpuRegister());
} else if (class_id() == kArrayCid) {
locs->set_in(2, LocationRegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_in(2, Location::RegisterLocation(kWriteBarrierValueReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
locs->set_temp(1, Location::RequiresRegister());
}
} else {
UNREACHABLE();
}
return locs;
}
@ -1808,33 +1752,43 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
compiler::Address element_address =
index.IsRegister() ? compiler::Assembler::ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(),
IsUntagged(), class_id(), index_scale(),
index_unboxed, array, index.reg())
: compiler::Assembler::ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
switch (class_id()) {
case kArrayCid:
if (ShouldEmitStoreBarrier()) {
Register value = locs()->in(2).reg();
Register slot = locs()->temp(0).reg();
Register scratch = locs()->temp(1).reg();
__ leal(slot, element_address);
__ StoreIntoArray(array, slot, value, CanValueBeSmi(), scratch);
} else if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreIntoObjectNoBarrier(array, element_address, constant);
} else {
Register value = locs()->in(2).reg();
__ StoreIntoObjectNoBarrier(array, element_address, value);
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
ASSERT(RequiredInputRepresentation(2) == Boxing::NativeRepresentation(rep));
if (IsClampedTypedDataBaseClassId(class_id())) {
ASSERT(rep == kUnboxedUint8);
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
break;
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kOneByteStringCid:
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
__ movb(element_address, compiler::Immediate(static_cast<int8_t>(value)));
} else {
ASSERT(locs()->in(2).reg() == EAX);
compiler::Label store_value, store_0xff;
__ cmpl(EAX, compiler::Immediate(0xFF));
__ j(BELOW_EQUAL, &store_value, compiler::Assembler::kNearJump);
// Clamp to 0x0 or 0xFF respectively.
__ j(GREATER, &store_0xff);
__ xorl(EAX, EAX);
__ jmp(&store_value, compiler::Assembler::kNearJump);
__ Bind(&store_0xff);
__ movl(EAX, compiler::Immediate(0xFF));
__ Bind(&store_value);
__ movb(element_address, AL);
}
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
__ movb(element_address,
@ -1843,51 +1797,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->in(2).reg() == EAX);
__ movb(element_address, AL);
}
break;
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ClampedArrayCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
__ movb(element_address,
compiler::Immediate(static_cast<int8_t>(value)));
} else {
ASSERT(locs()->in(2).reg() == EAX);
compiler::Label store_value, store_0xff;
__ cmpl(EAX, compiler::Immediate(0xFF));
__ j(BELOW_EQUAL, &store_value, compiler::Assembler::kNearJump);
// Clamp to 0x0 or 0xFF respectively.
__ j(GREATER, &store_0xff);
__ xorl(EAX, EAX);
__ jmp(&store_value, compiler::Assembler::kNearJump);
__ Bind(&store_0xff);
__ movl(EAX, compiler::Immediate(0xFF));
__ Bind(&store_value);
__ movb(element_address, AL);
}
break;
}
case kTwoByteStringCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
const Register value = locs()->in(2).reg();
__ movw(element_address, value);
break;
}
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
__ movl(element_address, locs()->in(2).reg());
break;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
} else if (rep == kUnboxedInt64) {
ASSERT(locs()->in(2).IsPairLocation());
PairLocation* value_pair = locs()->in(2).AsPairLocation();
const Register value_lo = value_pair->At(0).reg();
@ -1896,27 +1806,44 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
element_address =
index.IsRegister()
? compiler::Assembler::ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(), index_unboxed,
IsUntagged(), class_id(), index_scale(), index_unboxed,
array, index.reg(), kWordSize)
: compiler::Assembler::ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value(), kWordSize);
__ movl(element_address, value_hi);
break;
} else {
Register value = locs()->in(2).reg();
__ StoreToOffset(value, element_address,
RepresentationUtils::OperandSize(rep));
}
case kTypedDataFloat32ArrayCid:
} else if (RepresentationUtils::IsUnboxed(rep)) {
if (rep == kUnboxedFloat) {
__ movss(element_address, locs()->in(2).fpu_reg());
break;
case kTypedDataFloat64ArrayCid:
} else if (rep == kUnboxedDouble) {
__ movsd(element_address, locs()->in(2).fpu_reg());
break;
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
} else {
ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2);
__ movups(element_address, locs()->in(2).fpu_reg());
break;
default:
UNREACHABLE();
}
} else if (class_id() == kArrayCid) {
ASSERT(rep == kTagged);
if (ShouldEmitStoreBarrier()) {
Register value = locs()->in(2).reg();
Register slot = locs()->temp(0).reg();
Register scratch = locs()->temp(1).reg();
__ leal(slot, element_address);
__ StoreIntoArray(array, slot, value, CanValueBeSmi(), scratch);
} else if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreIntoObjectNoBarrier(array, element_address, constant);
} else {
Register value = locs()->in(2).reg();
__ StoreIntoObjectNoBarrier(array, element_address, value);
}
} else {
UNREACHABLE();
}
}

View file

@ -1463,7 +1463,25 @@ void StoreIndexedUnsafeInstr::PrintOperandsTo(BaseTextBuffer* f) const {
value()->PrintTo(f);
}
void LoadIndexedInstr::PrintOperandsTo(BaseTextBuffer* f) const {
auto& cls =
Class::Handle(IsolateGroup::Current()->class_table()->At(class_id()));
if (!cls.IsNull()) {
f->Printf("[%s] ", cls.ScrubbedNameCString());
} else {
f->Printf("[cid %" Pd "] ", class_id());
}
Instruction::PrintOperandsTo(f);
}
void StoreIndexedInstr::PrintOperandsTo(BaseTextBuffer* f) const {
auto& cls =
Class::Handle(IsolateGroup::Current()->class_table()->At(class_id()));
if (!cls.IsNull()) {
f->Printf("[%s] ", cls.ScrubbedNameCString());
} else {
f->Printf("[cid %" Pd "] ", class_id());
}
Instruction::PrintOperandsTo(f);
if (!ShouldEmitStoreBarrier()) {
f->AddString(", NoStoreBarrier");
@ -1472,46 +1490,24 @@ void StoreIndexedInstr::PrintOperandsTo(BaseTextBuffer* f) const {
void MemoryCopyInstr::PrintOperandsTo(BaseTextBuffer* f) const {
Instruction::PrintOperandsTo(f);
// kTypedDataUint8ArrayCid is used as the default cid for cases where
// the destination object is a subclass of PointerBase and the arguments
// are given in terms of bytes, so only print if the cid differs.
switch (dest_representation_) {
case kUntagged:
f->Printf(", dest untagged");
break;
case kTagged:
if (dest_cid_ != kTypedDataUint8ArrayCid) {
const Class& cls = Class::Handle(
IsolateGroup::Current()->class_table()->At(dest_cid_));
if (!cls.IsNull()) {
f->Printf(", dest_cid=%s (%d)", cls.ScrubbedNameCString(), dest_cid_);
} else {
f->Printf(", dest_cid=%d", dest_cid_);
}
}
break;
default:
UNREACHABLE();
auto& cls =
Class::Handle(IsolateGroup::Current()->class_table()->At(dest_cid_));
if (!cls.IsNull()) {
f->Printf(", dest_cid=%s (%d)", cls.ScrubbedNameCString(), dest_cid_);
} else {
f->Printf(", dest_cid=%d", dest_cid_);
}
switch (src_representation_) {
case kUntagged:
f->Printf(", src untagged");
break;
case kTagged:
if ((dest_representation_ == kTagged && dest_cid_ != src_cid_) ||
(dest_representation_ != kTagged &&
src_cid_ != kTypedDataUint8ArrayCid)) {
const Class& cls =
Class::Handle(IsolateGroup::Current()->class_table()->At(src_cid_));
if (!cls.IsNull()) {
f->Printf(", src_cid=%s (%d)", cls.ScrubbedNameCString(), src_cid_);
} else {
f->Printf(", src_cid=%d", src_cid_);
}
}
break;
default:
UNREACHABLE();
if (dest()->definition()->representation() == kUntagged) {
f->Printf(" [untagged]");
}
cls = IsolateGroup::Current()->class_table()->At(src_cid_);
if (!cls.IsNull()) {
f->Printf(", src_cid=%s (%d)", cls.ScrubbedNameCString(), src_cid_);
} else {
f->Printf(", src_cid=%d", src_cid_);
}
if (src()->definition()->representation() == kUntagged) {
f->Printf(" [untagged]");
}
if (element_size() != 1) {
f->Printf(", element_size=%" Pd "", element_size());

View file

@ -175,6 +175,12 @@ DEFINE_BACKEND(TailCall,
LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a MemoryCopy
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
!IsTypedDataBaseClassId(dest_cid_)) ||
opt);
const intptr_t kNumInputs = 5;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
@ -393,8 +399,13 @@ void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
if (array_rep != kTagged) {
// Do nothing, array_reg already contains the payload address.
} else if (IsTypedDataBaseClassId(array_cid)) {
__ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
// The incoming array must have been proven to be an internal typed data
// object, where the payload is in the object and we can just offset.
ASSERT_EQUAL(array_rep, kTagged);
offset = compiler::target::TypedData::payload_offset() - kHeapObjectTag;
} else {
ASSERT_EQUAL(array_rep, kTagged);
ASSERT(!IsExternalPayloadClassId(array_cid));
switch (array_cid) {
case kOneByteStringCid:
offset =
@ -2047,31 +2058,36 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a LoadIndexed
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_in(kArrayPos, Location::RequiresRegister());
const bool can_be_constant =
index()->BindsToConstant() &&
compiler::Assembler::AddressCanHoldConstantIndex(
index()->BoundConstant(), IsExternal(), class_id(), index_scale());
locs->set_in(1, can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: Location::RequiresRegister());
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
locs->set_out(0, Location::RequiresFpuRegister());
index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
locs->set_in(kIndexPos,
can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: Location::RequiresRegister());
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
if (RepresentationUtils::IsUnboxedInteger(rep)) {
locs->set_out(0, Location::RequiresRegister());
#if XLEN == 32
} else if (representation() == kUnboxedInt64) {
ASSERT(class_id() == kTypedDataInt64ArrayCid ||
class_id() == kTypedDataUint64ArrayCid);
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
if (rep == kUnboxedInt64) {
locs->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
}
#endif
} else if (RepresentationUtils::IsUnboxed(rep)) {
locs->set_out(0, Location::RequiresFpuRegister());
} else {
locs->set_out(0, Location::RequiresRegister());
}
@ -2080,64 +2096,24 @@ LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The array register points to the backing store for external arrays.
const Register array = locs()->in(0).reg();
const Location index = locs()->in(1);
const Register array = locs()->in(kArrayPos).reg();
const Location index = locs()->in(kIndexPos);
compiler::Address element_address(TMP); // Bad address.
element_address = index.IsRegister()
? __ ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(),
IsUntagged(), class_id(), index_scale(),
index_unboxed_, array, index.reg(), TMP)
: __ ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
const FRegister result = locs()->out(0).fpu_reg();
switch (class_id()) {
case kTypedDataFloat32ArrayCid:
// Load single precision float.
__ flw(result, element_address);
break;
case kTypedDataFloat64ArrayCid:
// Load double precision float.
__ fld(result, element_address);
break;
case kTypedDataFloat64x2ArrayCid:
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
UNIMPLEMENTED();
break;
default:
UNREACHABLE();
}
return;
}
switch (class_id()) {
case kTypedDataInt32ArrayCid: {
ASSERT(representation() == kUnboxedInt32);
const Register result = locs()->out(0).reg();
__ lw(result, element_address);
break;
}
case kTypedDataUint32ArrayCid: {
ASSERT(representation() == kUnboxedUint32);
const Register result = locs()->out(0).reg();
#if XLEN == 32
__ lw(result, element_address);
#else
__ lwu(result, element_address);
#endif
break;
}
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
ASSERT(representation() == kUnboxedInt64);
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
ASSERT(representation() == Boxing::NativeRepresentation(rep));
if (RepresentationUtils::IsUnboxedInteger(rep)) {
#if XLEN == 32
if (rep == kUnboxedInt64) {
ASSERT(locs()->out(0).IsPairLocation());
PairLocation* result_pair = locs()->out(0).AsPairLocation();
const Register result_lo = result_pair->At(0).reg();
@ -2145,51 +2121,35 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ lw(result_lo, element_address);
__ lw(result_hi, compiler::Address(element_address.base(),
element_address.offset() + 4));
} else {
const Register result = locs()->out(0).reg();
__ LoadFromOffset(result, element_address,
RepresentationUtils::OperandSize(rep));
}
#else
const Register result = locs()->out(0).reg();
__ ld(result, element_address);
const Register result = locs()->out(0).reg();
__ LoadFromOffset(result, element_address,
RepresentationUtils::OperandSize(rep));
#endif
break;
}
case kTypedDataInt8ArrayCid: {
ASSERT(representation() == kUnboxedIntPtr);
ASSERT(index_scale() == 1);
const Register result = locs()->out(0).reg();
__ lb(result, element_address);
break;
}
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kOneByteStringCid: {
ASSERT(representation() == kUnboxedIntPtr);
ASSERT(index_scale() == 1);
const Register result = locs()->out(0).reg();
__ lbu(result, element_address);
break;
}
case kTypedDataInt16ArrayCid: {
ASSERT(representation() == kUnboxedIntPtr);
const Register result = locs()->out(0).reg();
__ lh(result, element_address);
break;
}
case kTypedDataUint16ArrayCid:
case kTwoByteStringCid: {
ASSERT(representation() == kUnboxedIntPtr);
const Register result = locs()->out(0).reg();
__ lhu(result, element_address);
break;
}
default: {
ASSERT(representation() == kTagged);
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
(class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
const Register result = locs()->out(0).reg();
__ lx(result, element_address);
break;
} else if (RepresentationUtils::IsUnboxed(rep)) {
const FRegister result = locs()->out(0).fpu_reg();
if (rep == kUnboxedFloat) {
// Load single precision float.
__ flw(result, element_address);
} else if (rep == kUnboxedDouble) {
// Load double precision float.
__ fld(result, element_address);
} else {
ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2);
UNIMPLEMENTED();
}
} else {
ASSERT(rep == kTagged);
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
(class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
const Register result = locs()->out(0).reg();
__ lx(result, element_address);
}
}
@ -2290,6 +2250,11 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a StoreIndexed
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
const intptr_t kNumInputs = 3;
const intptr_t kNumTemps = 1;
LocationSummary* locs = new (zone)
@ -2298,45 +2263,26 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
const bool can_be_constant =
index()->BindsToConstant() &&
compiler::Assembler::AddressCanHoldConstantIndex(
index()->BoundConstant(), IsExternal(), class_id(), index_scale());
index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
locs->set_in(1, can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: Location::RequiresRegister());
locs->set_temp(0, Location::RequiresRegister());
switch (class_id()) {
case kArrayCid:
locs->set_in(2, ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
}
break;
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataUint8ClampedArrayCid:
locs->set_in(2, LocationRegisterOrConstant(value()));
break;
case kExternalTypedDataUint8ArrayCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid: {
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
if (IsClampedTypedDataBaseClassId(class_id())) {
ASSERT(rep == kUnboxedUint8);
locs->set_in(2, LocationRegisterOrConstant(value()));
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
ConstantInstr* constant = value()->definition()->AsConstant();
if (constant != nullptr && constant->HasZeroRepresentation()) {
locs->set_in(2, Location::Constant(constant));
} else {
locs->set_in(2, Location::RequiresRegister());
}
break;
}
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
} else if (rep == kUnboxedInt64) {
#if XLEN == 32
locs->set_in(2, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
@ -2348,18 +2294,23 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
locs->set_in(2, Location::RequiresRegister());
}
#endif
break;
} else {
ConstantInstr* constant = value()->definition()->AsConstant();
if (constant != nullptr && constant->HasZeroRepresentation()) {
locs->set_in(2, Location::Constant(constant));
} else {
locs->set_in(2, Location::RequiresRegister());
}
}
case kTypedDataFloat32ArrayCid: {
} else if (RepresentationUtils::IsUnboxed(rep)) {
if (rep == kUnboxedFloat) {
ConstantInstr* constant = value()->definition()->AsConstant();
if (constant != nullptr && constant->HasZeroRepresentation()) {
locs->set_in(2, Location::Constant(constant));
} else {
locs->set_in(2, Location::RequiresFpuRegister());
}
break;
}
case kTypedDataFloat64ArrayCid: {
} else if (rep == kUnboxedDouble) {
#if XLEN == 32
locs->set_in(2, Location::RequiresFpuRegister());
#else
@ -2370,16 +2321,19 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
locs->set_in(2, Location::RequiresFpuRegister());
}
#endif
break;
}
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
} else {
locs->set_in(2, Location::RequiresFpuRegister());
break;
default:
UNREACHABLE();
return nullptr;
}
} else if (class_id() == kArrayCid) {
locs->set_in(2, ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
}
} else {
UNREACHABLE();
}
return locs;
}
@ -2394,11 +2348,11 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Deal with a special case separately.
if (class_id() == kArrayCid && ShouldEmitStoreBarrier()) {
if (index.IsRegister()) {
__ ComputeElementAddressForRegIndex(temp, IsExternal(), class_id(),
__ ComputeElementAddressForRegIndex(temp, IsUntagged(), class_id(),
index_scale(), index_unboxed_, array,
index.reg());
} else {
__ ComputeElementAddressForIntIndex(temp, IsExternal(), class_id(),
__ ComputeElementAddressForIntIndex(temp, IsUntagged(), class_id(),
index_scale(), array,
Smi::Cast(index.constant()).Value());
}
@ -2409,28 +2363,53 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
element_address = index.IsRegister()
? __ ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale(),
IsUntagged(), class_id(), index_scale(),
index_unboxed_, array, index.reg(), temp)
: __ ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale(), array,
IsUntagged(), class_id(), index_scale(), array,
Smi::Cast(index.constant()).Value());
switch (class_id()) {
case kArrayCid:
ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above.
if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreIntoObjectNoBarrier(array, element_address, constant);
} else {
const Register value = locs()->in(2).reg();
__ StoreIntoObjectNoBarrier(array, element_address, value);
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
ASSERT(RequiredInputRepresentation(2) == Boxing::NativeRepresentation(rep));
if (IsClampedTypedDataBaseClassId(class_id())) {
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
break;
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kOneByteStringCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (value == 0) {
__ sb(ZR, element_address);
} else {
__ LoadImmediate(TMP, static_cast<int8_t>(value));
__ sb(TMP, element_address);
}
} else {
const Register value = locs()->in(2).reg();
compiler::Label store_zero, store_ff, done;
__ blt(value, ZR, &store_zero, compiler::Assembler::kNearJump);
__ li(TMP, 0xFF);
__ bgt(value, TMP, &store_ff, compiler::Assembler::kNearJump);
__ sb(value, element_address);
__ j(&done, compiler::Assembler::kNearJump);
__ Bind(&store_zero);
__ mv(TMP, ZR);
__ Bind(&store_ff);
__ sb(TMP, element_address);
__ Bind(&done);
}
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ sb(ZR, element_address);
@ -2438,72 +2417,7 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register value = locs()->in(2).reg();
__ sb(value, element_address);
}
break;
}
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ClampedArrayCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
if (value == 0) {
__ sb(ZR, element_address);
} else {
__ LoadImmediate(TMP, static_cast<int8_t>(value));
__ sb(TMP, element_address);
}
} else {
const Register value = locs()->in(2).reg();
compiler::Label store_zero, store_ff, done;
__ blt(value, ZR, &store_zero, compiler::Assembler::kNearJump);
__ li(TMP, 0xFF);
__ bgt(value, TMP, &store_ff, compiler::Assembler::kNearJump);
__ sb(value, element_address);
__ j(&done, compiler::Assembler::kNearJump);
__ Bind(&store_zero);
__ mv(TMP, ZR);
__ Bind(&store_ff);
__ sb(TMP, element_address);
__ Bind(&done);
}
break;
}
case kTwoByteStringCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ sh(ZR, element_address);
} else {
__ sh(locs()->in(2).reg(), element_address);
}
break;
}
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid: {
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ sw(ZR, element_address);
} else {
__ sw(locs()->in(2).reg(), element_address);
}
break;
}
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
} else if (rep == kUnboxedInt64) {
#if XLEN >= 64
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
@ -2519,18 +2433,25 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ sw(value_hi, compiler::Address(element_address.base(),
element_address.offset() + 4));
#endif
break;
} else {
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ StoreToOffset(ZR, element_address,
RepresentationUtils::OperandSize(rep));
} else {
__ StoreToOffset(locs()->in(2).reg(), element_address,
RepresentationUtils::OperandSize(rep));
}
}
case kTypedDataFloat32ArrayCid: {
} else if (RepresentationUtils::IsUnboxed(rep)) {
if (rep == kUnboxedFloat) {
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
__ sw(ZR, element_address);
} else {
__ fsw(locs()->in(2).fpu_reg(), element_address);
}
break;
}
case kTypedDataFloat64ArrayCid: {
} else if (rep == kUnboxedDouble) {
#if XLEN >= 64
if (locs()->in(2).IsConstant()) {
ASSERT(locs()->in(2).constant_instruction()->HasZeroRepresentation());
@ -2541,16 +2462,23 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
#else
__ fsd(locs()->in(2).fpu_reg(), element_address);
#endif
break;
}
case kTypedDataFloat64x2ArrayCid:
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid: {
} else {
ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2);
UNIMPLEMENTED();
break;
}
default:
UNREACHABLE();
} else if (class_id() == kArrayCid) {
ASSERT(rep == kTagged);
ASSERT(!ShouldEmitStoreBarrier()); // Specially treated above.
if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreIntoObjectNoBarrier(array, element_address, constant);
} else {
const Register value = locs()->in(2).reg();
__ StoreIntoObjectNoBarrier(array, element_address, value);
}
} else {
UNREACHABLE();
}
#if defined(USING_MEMORY_SANITIZER)

View file

@ -157,6 +157,12 @@ DEFINE_BACKEND(TailCall, (NoLocation, Fixed<Register, ARGS_DESC_REG>)) {
LocationSummary* MemoryCopyInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a MemoryCopy
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT((!IsTypedDataBaseClassId(src_cid_) &&
!IsTypedDataBaseClassId(dest_cid_)) ||
opt);
const intptr_t kNumInputs = 5;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
@ -278,10 +284,13 @@ void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
if (array_rep != kTagged) {
// Do nothing, array_reg already contains the payload address.
} else if (IsTypedDataBaseClassId(array_cid)) {
// The incoming array must have been proven to be an internal typed data
// object, where the payload is in the object and we can just offset.
ASSERT_EQUAL(array_rep, kTagged);
__ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
offset = compiler::target::TypedData::payload_offset() - kHeapObjectTag;
} else {
ASSERT_EQUAL(array_rep, kTagged);
ASSERT(!IsExternalPayloadClassId(array_cid));
switch (array_cid) {
case kOneByteStringCid:
offset =
@ -1802,11 +1811,16 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a LoadIndexed
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
const intptr_t kNumInputs = 2;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
locs->set_in(kArrayPos, Location::RequiresRegister());
// For tagged index with index_scale=1 as well as untagged index with
// index_scale=16 we need a writable register due to addressing mode
// restrictions on X64.
@ -1816,17 +1830,18 @@ LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
const bool can_be_constant =
index()->BindsToConstant() &&
compiler::Assembler::AddressCanHoldConstantIndex(
index()->BoundConstant(), IsExternal(), class_id(), index_scale());
index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
locs->set_in(
1, can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: (need_writable_index_register ? Location::WritableRegister()
: Location::RequiresRegister()));
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
kIndexPos,
can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: (need_writable_index_register ? Location::WritableRegister()
: Location::RequiresRegister()));
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
if (RepresentationUtils::IsUnboxedInteger(rep)) {
locs->set_out(0, Location::RequiresRegister());
} else if (RepresentationUtils::IsUnboxed(rep)) {
locs->set_out(0, Location::RequiresFpuRegister());
} else {
locs->set_out(0, Location::RequiresRegister());
@ -1836,8 +1851,8 @@ LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The array register points to the backing store for external arrays.
const Register array = locs()->in(0).reg();
const Location index = locs()->in(1);
const Register array = locs()->in(kArrayPos).reg();
const Location index = locs()->in(kIndexPos);
bool index_unboxed = index_unboxed_;
if (index.IsRegister()) {
@ -1860,74 +1875,37 @@ void LoadIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Address element_address =
index.IsRegister() ? compiler::Assembler::ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale_,
IsUntagged(), class_id(), index_scale_,
index_unboxed, array, index.reg())
: compiler::Assembler::ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale_, array,
IsUntagged(), class_id(), index_scale_, array,
Smi::Cast(index.constant()).Value());
if ((representation() == kUnboxedFloat) ||
(representation() == kUnboxedDouble) ||
(representation() == kUnboxedFloat32x4) ||
(representation() == kUnboxedInt32x4) ||
(representation() == kUnboxedFloat64x2)) {
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
ASSERT(representation() == Boxing::NativeRepresentation(rep));
if (RepresentationUtils::IsUnboxedInteger(rep)) {
Register result = locs()->out(0).reg();
__ LoadFromOffset(result, element_address,
RepresentationUtils::OperandSize(rep));
} else if (RepresentationUtils::IsUnboxed(rep)) {
XmmRegister result = locs()->out(0).fpu_reg();
if (class_id() == kTypedDataFloat32ArrayCid) {
if (rep == kUnboxedFloat) {
// Load single precision float.
__ movss(result, element_address);
} else if (class_id() == kTypedDataFloat64ArrayCid) {
} else if (rep == kUnboxedDouble) {
__ movsd(result, element_address);
} else {
ASSERT((class_id() == kTypedDataInt32x4ArrayCid) ||
(class_id() == kTypedDataFloat32x4ArrayCid) ||
(class_id() == kTypedDataFloat64x2ArrayCid));
ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2);
__ movups(result, element_address);
}
return;
}
Register result = locs()->out(0).reg();
switch (class_id()) {
case kTypedDataInt32ArrayCid:
ASSERT(representation() == kUnboxedInt32);
__ movsxd(result, element_address);
break;
case kTypedDataUint32ArrayCid:
ASSERT(representation() == kUnboxedUint32);
__ movl(result, element_address);
break;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
ASSERT(representation() == kUnboxedInt64);
__ movq(result, element_address);
break;
case kTypedDataInt8ArrayCid:
ASSERT(representation() == kUnboxedIntPtr);
__ movsxb(result, element_address);
break;
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kOneByteStringCid:
ASSERT(representation() == kUnboxedIntPtr);
__ movzxb(result, element_address);
break;
case kTypedDataInt16ArrayCid:
ASSERT(representation() == kUnboxedIntPtr);
__ movsxw(result, element_address);
break;
case kTypedDataUint16ArrayCid:
case kTwoByteStringCid:
ASSERT(representation() == kUnboxedIntPtr);
__ movzxw(result, element_address);
break;
default:
ASSERT(representation() == kTagged);
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
(class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
__ LoadCompressed(result, element_address);
break;
} else {
ASSERT(rep == kTagged);
ASSERT((class_id() == kArrayCid) || (class_id() == kImmutableArrayCid) ||
(class_id() == kTypeArgumentsCid) || (class_id() == kRecordCid));
Register result = locs()->out(0).reg();
__ LoadCompressed(result, element_address);
}
}
@ -2003,6 +1981,11 @@ void LoadCodeUnitsInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
// The compiler must optimize any function that includes a StoreIndexed
// instruction that uses typed data cids, since extracting the payload address
// from views is done in a compiler pass after all code motion has happened.
ASSERT(!IsTypedDataBaseClassId(class_id()) || opt);
const intptr_t kNumInputs = 3;
const intptr_t kNumTemps =
class_id() == kArrayCid && ShouldEmitStoreBarrier() ? 1 : 0;
@ -2018,57 +2001,35 @@ LocationSummary* StoreIndexedInstr::MakeLocationSummary(Zone* zone,
const bool can_be_constant =
index()->BindsToConstant() &&
compiler::Assembler::AddressCanHoldConstantIndex(
index()->BoundConstant(), IsExternal(), class_id(), index_scale());
index()->BoundConstant(), IsUntagged(), class_id(), index_scale());
locs->set_in(
1, can_be_constant
? Location::Constant(index()->definition()->AsConstant())
: (need_writable_index_register ? Location::WritableRegister()
: Location::RequiresRegister()));
switch (class_id()) {
case kArrayCid:
locs->set_in(2, ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
}
break;
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
// TODO(fschneider): Add location constraint for byte registers (RAX,
// RBX, RCX, RDX) instead of using a fixed register.
locs->set_in(2, LocationFixedRegisterOrSmiConstant(value(), RAX));
break;
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
// Writable register because the value must be untagged before storing.
locs->set_in(2, Location::WritableRegister());
break;
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
} else {
locs->set_in(2, Location::RequiresRegister());
break;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
// TODO(srdjan): Support Float64 constants.
locs->set_in(2, Location::RequiresFpuRegister());
break;
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
case kTypedDataFloat32x4ArrayCid:
locs->set_in(2, Location::RequiresFpuRegister());
break;
default:
UNREACHABLE();
return nullptr;
}
} else if (RepresentationUtils::IsUnboxed(rep)) {
// TODO(srdjan): Support Float64 constants.
locs->set_in(2, Location::RequiresFpuRegister());
} else if (class_id() == kArrayCid) {
locs->set_in(2, ShouldEmitStoreBarrier()
? Location::RegisterLocation(kWriteBarrierValueReg)
: LocationRegisterOrConstant(value()));
if (ShouldEmitStoreBarrier()) {
locs->set_in(0, Location::RegisterLocation(kWriteBarrierObjectReg));
locs->set_temp(0, Location::RegisterLocation(kWriteBarrierSlotReg));
}
} else {
UNREACHABLE();
}
return locs;
}
@ -2099,32 +2060,43 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Address element_address =
index.IsRegister() ? compiler::Assembler::ElementAddressForRegIndex(
IsExternal(), class_id(), index_scale_,
IsUntagged(), class_id(), index_scale_,
index_unboxed, array, index.reg())
: compiler::Assembler::ElementAddressForIntIndex(
IsExternal(), class_id(), index_scale_, array,
IsUntagged(), class_id(), index_scale_, array,
Smi::Cast(index.constant()).Value());
switch (class_id()) {
case kArrayCid:
if (ShouldEmitStoreBarrier()) {
Register value = locs()->in(2).reg();
Register slot = locs()->temp(0).reg();
__ leaq(slot, element_address);
__ StoreCompressedIntoArray(array, slot, value, CanValueBeSmi());
} else if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreCompressedIntoObjectNoBarrier(array, element_address, constant);
} else {
Register value = locs()->in(2).reg();
__ StoreCompressedIntoObjectNoBarrier(array, element_address, value);
auto const rep =
RepresentationUtils::RepresentationOfArrayElement(class_id());
ASSERT(RequiredInputRepresentation(2) == Boxing::NativeRepresentation(rep));
if (IsClampedTypedDataBaseClassId(class_id())) {
ASSERT(rep == kUnboxedUint8);
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
break;
case kOneByteStringCid:
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ArrayCid:
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
__ movb(element_address, compiler::Immediate(static_cast<int8_t>(value)));
} else {
const Register storedValueReg = locs()->in(2).reg();
compiler::Label store_value, store_0xff;
__ CompareImmediate(storedValueReg, compiler::Immediate(0xFF));
__ j(BELOW_EQUAL, &store_value, compiler::Assembler::kNearJump);
// Clamp to 0x0 or 0xFF respectively.
__ j(GREATER, &store_0xff);
__ xorq(storedValueReg, storedValueReg);
__ jmp(&store_value, compiler::Assembler::kNearJump);
__ Bind(&store_0xff);
__ LoadImmediate(storedValueReg, compiler::Immediate(0xFF));
__ Bind(&store_value);
__ movb(element_address, ByteRegisterOf(storedValueReg));
}
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
if (rep == kUnboxedUint8 || rep == kUnboxedInt8) {
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
__ movb(element_address,
@ -2132,92 +2104,43 @@ void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
} else {
__ movb(element_address, ByteRegisterOf(locs()->in(2).reg()));
}
break;
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ClampedArrayCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
if (locs()->in(2).IsConstant()) {
const Smi& constant = Smi::Cast(locs()->in(2).constant());
intptr_t value = constant.Value();
// Clamp to 0x0 or 0xFF respectively.
if (value > 0xFF) {
value = 0xFF;
} else if (value < 0) {
value = 0;
}
__ movb(element_address,
compiler::Immediate(static_cast<int8_t>(value)));
} else {
const Register storedValueReg = locs()->in(2).reg();
compiler::Label store_value, store_0xff;
__ CompareImmediate(storedValueReg, compiler::Immediate(0xFF));
__ j(BELOW_EQUAL, &store_value, compiler::Assembler::kNearJump);
// Clamp to 0x0 or 0xFF respectively.
__ j(GREATER, &store_0xff);
__ xorq(storedValueReg, storedValueReg);
__ jmp(&store_value, compiler::Assembler::kNearJump);
__ Bind(&store_0xff);
__ LoadImmediate(storedValueReg, compiler::Immediate(0xFF));
__ Bind(&store_value);
__ movb(element_address, ByteRegisterOf(storedValueReg));
}
break;
}
case kTwoByteStringCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid: {
ASSERT(RequiredInputRepresentation(2) == kUnboxedIntPtr);
} else {
Register value = locs()->in(2).reg();
__ movw(element_address, value);
break;
__ StoreToOffset(value, element_address,
RepresentationUtils::OperandSize(rep));
}
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid: {
Register value = locs()->in(2).reg();
__ movl(element_address, value);
break;
}
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
Register value = locs()->in(2).reg();
__ movq(element_address, value);
break;
}
case kTypedDataFloat32ArrayCid:
} else if (RepresentationUtils::IsUnboxed(rep)) {
if (rep == kUnboxedFloat) {
__ movss(element_address, locs()->in(2).fpu_reg());
break;
case kTypedDataFloat64ArrayCid:
} else if (rep == kUnboxedDouble) {
__ movsd(element_address, locs()->in(2).fpu_reg());
break;
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
case kTypedDataFloat32x4ArrayCid:
} else {
ASSERT(rep == kUnboxedInt32x4 || rep == kUnboxedFloat32x4 ||
rep == kUnboxedFloat64x2);
__ movups(element_address, locs()->in(2).fpu_reg());
break;
default:
UNREACHABLE();
}
} else if (class_id() == kArrayCid) {
ASSERT(rep == kTagged);
if (ShouldEmitStoreBarrier()) {
Register value = locs()->in(2).reg();
Register slot = locs()->temp(0).reg();
__ leaq(slot, element_address);
__ StoreCompressedIntoArray(array, slot, value, CanValueBeSmi());
} else if (locs()->in(2).IsConstant()) {
const Object& constant = locs()->in(2).constant();
__ StoreCompressedIntoObjectNoBarrier(array, element_address, constant);
} else {
Register value = locs()->in(2).reg();
__ StoreCompressedIntoObjectNoBarrier(array, element_address, value);
}
} else {
UNREACHABLE();
}
#if defined(USING_MEMORY_SANITIZER)
__ leaq(TMP, element_address);
intptr_t length_in_bytes;
if (IsTypedDataBaseClassId(class_id_)) {
length_in_bytes = compiler::TypedDataElementSizeInBytes(class_id_);
} else {
switch (class_id_) {
case kArrayCid:
length_in_bytes = compiler::target::kWordSize;
break;
case kOneByteStringCid:
length_in_bytes = 1;
break;
case kTwoByteStringCid:
length_in_bytes = 2;
break;
default:
FATAL("Unknown cid: %" Pd, class_id_);
}
}
const intptr_t length_in_bytes = RepresentationUtils::ValueSize(
RepresentationUtils::RepresentationOfArrayElement(class_id()));
__ MsanUnpoison(TMP, length_in_bytes);
#endif
}

View file

@ -2832,50 +2832,26 @@ static bool InlineGetIndexed(FlowGraph* flow_graph,
new (Z) FunctionEntryInstr(graph_entry, flow_graph->allocate_block_id(),
call->GetBlock()->try_index(), DeoptId::kNone);
(*entry)->InheritDeoptTarget(Z, call);
Instruction* cursor = *entry;
*last = *entry;
array_cid = PrepareInlineIndexedOp(flow_graph, call, array_cid, &array,
&index, &cursor);
intptr_t deopt_id = DeoptId::kNone;
if ((array_cid == kTypedDataInt32ArrayCid) ||
(array_cid == kTypedDataUint32ArrayCid)) {
// Deoptimization may be needed if result does not always fit in a Smi.
deopt_id =
(compiler::target::kSmiBits >= 32) ? DeoptId::kNone : call->deopt_id();
}
array_cid =
PrepareInlineIndexedOp(flow_graph, call, array_cid, &array, &index, last);
// Array load and return.
intptr_t index_scale = compiler::target::Instance::ElementSizeFor(array_cid);
LoadIndexedInstr* load = new (Z) LoadIndexedInstr(
new (Z) Value(array), new (Z) Value(index), /*index_unboxed=*/false,
index_scale, array_cid, kAlignedAccess, deopt_id, call->source(),
ResultType(call));
*result = new (Z) LoadIndexedInstr(
new (Z) Value(array), new (Z) Value(index),
/*index_unboxed=*/false, index_scale, array_cid, kAlignedAccess,
call->deopt_id(), call->source(), ResultType(call));
*last = flow_graph->AppendTo(*last, *result, call->env(), FlowGraph::kValue);
*last = load;
cursor = flow_graph->AppendTo(
cursor, load, deopt_id != DeoptId::kNone ? call->env() : nullptr,
FlowGraph::kValue);
const bool value_needs_boxing =
array_cid == kTypedDataInt8ArrayCid ||
array_cid == kTypedDataInt16ArrayCid ||
array_cid == kTypedDataUint8ArrayCid ||
array_cid == kTypedDataUint8ClampedArrayCid ||
array_cid == kTypedDataUint16ArrayCid ||
array_cid == kExternalTypedDataUint8ArrayCid ||
array_cid == kExternalTypedDataUint8ClampedArrayCid;
if (array_cid == kTypedDataFloat32ArrayCid) {
*last = new (Z) FloatToDoubleInstr(new (Z) Value(load), deopt_id);
flow_graph->AppendTo(cursor, *last,
deopt_id != DeoptId::kNone ? call->env() : nullptr,
FlowGraph::kValue);
} else if (value_needs_boxing) {
*last = BoxInstr::Create(kUnboxedIntPtr, new Value(load));
flow_graph->AppendTo(cursor, *last, nullptr, FlowGraph::kValue);
if (LoadIndexedInstr::ReturnRepresentation(array_cid) == kUnboxedFloat) {
*result =
new (Z) FloatToDoubleInstr(new (Z) Value(*result), call->deopt_id());
*last =
flow_graph->AppendTo(*last, *result, call->env(), FlowGraph::kValue);
}
*result = (*last)->AsDefinition();
return true;
}
@ -2891,6 +2867,7 @@ static bool InlineSetIndexed(FlowGraph* flow_graph,
Instruction** last,
Definition** result) {
intptr_t array_cid = MethodRecognizer::MethodKindToReceiverCid(kind);
auto const rep = StoreIndexedInstr::ValueRepresentation(array_cid);
Definition* array = receiver;
Definition* index = call->ArgumentAt(1);
@ -2900,6 +2877,7 @@ static bool InlineSetIndexed(FlowGraph* flow_graph,
new (Z) FunctionEntryInstr(graph_entry, flow_graph->allocate_block_id(),
call->GetBlock()->try_index(), DeoptId::kNone);
(*entry)->InheritDeoptTarget(Z, call);
*last = *entry;
bool is_unchecked_call = false;
if (StaticCallInstr* static_call = call->AsStaticCall()) {
@ -2914,7 +2892,6 @@ static bool InlineSetIndexed(FlowGraph* flow_graph,
instance_call->entry_kind() == Code::EntryKind::kUnchecked;
}
Instruction* cursor = *entry;
if (!is_unchecked_call &&
(kind != MethodRecognizer::kObjectArraySetIndexedUnchecked &&
kind != MethodRecognizer::kGrowableArraySetIndexedUnchecked)) {
@ -2924,68 +2901,35 @@ static bool InlineSetIndexed(FlowGraph* flow_graph,
const AbstractType& value_type =
AbstractType::ZoneHandle(Z, target.ParameterTypeAt(2));
Definition* type_args = nullptr;
switch (array_cid) {
case kArrayCid:
case kGrowableObjectArrayCid: {
const Class& instantiator_class = Class::Handle(Z, target.Owner());
LoadFieldInstr* load_type_args = new (Z)
LoadFieldInstr(new (Z) Value(array),
Slot::GetTypeArgumentsSlotFor(flow_graph->thread(),
instantiator_class),
call->source());
cursor = flow_graph->AppendTo(cursor, load_type_args, nullptr,
FlowGraph::kValue);
type_args = load_type_args;
break;
}
case kTypedDataInt8ArrayCid:
FALL_THROUGH;
case kTypedDataUint8ArrayCid:
FALL_THROUGH;
case kTypedDataUint8ClampedArrayCid:
FALL_THROUGH;
case kExternalTypedDataUint8ArrayCid:
FALL_THROUGH;
case kExternalTypedDataUint8ClampedArrayCid:
FALL_THROUGH;
case kTypedDataInt16ArrayCid:
FALL_THROUGH;
case kTypedDataUint16ArrayCid:
FALL_THROUGH;
case kTypedDataInt32ArrayCid:
FALL_THROUGH;
case kTypedDataUint32ArrayCid:
FALL_THROUGH;
case kTypedDataInt64ArrayCid:
FALL_THROUGH;
case kTypedDataUint64ArrayCid:
ASSERT(value_type.IsIntType());
FALL_THROUGH;
case kTypedDataFloat32ArrayCid:
FALL_THROUGH;
case kTypedDataFloat64ArrayCid: {
type_args = flow_graph->constant_null();
ASSERT((array_cid != kTypedDataFloat32ArrayCid &&
array_cid != kTypedDataFloat64ArrayCid) ||
value_type.IsDoubleType());
ASSERT(value_type.IsInstantiated());
break;
}
case kTypedDataFloat32x4ArrayCid: {
type_args = flow_graph->constant_null();
if (rep == kTagged) {
const Class& instantiator_class = Class::Handle(Z, target.Owner());
LoadFieldInstr* load_type_args =
new (Z) LoadFieldInstr(new (Z) Value(array),
Slot::GetTypeArgumentsSlotFor(
flow_graph->thread(), instantiator_class),
call->source());
*last = flow_graph->AppendTo(*last, load_type_args, call->env(),
FlowGraph::kValue);
type_args = load_type_args;
} else if (!RepresentationUtils::IsUnboxed(rep)) {
UNREACHABLE();
} else {
type_args = flow_graph->constant_null();
ASSERT(value_type.IsInstantiated());
#if defined(DEBUG)
if (rep == kUnboxedFloat || rep == kUnboxedDouble) {
ASSERT(value_type.IsDoubleType());
} else if (rep == kUnboxedFloat32x4) {
ASSERT(value_type.IsFloat32x4Type());
ASSERT(value_type.IsInstantiated());
break;
}
case kTypedDataFloat64x2ArrayCid: {
type_args = flow_graph->constant_null();
} else if (rep == kUnboxedInt32x4) {
ASSERT(value_type.IsInt32x4Type());
} else if (rep == kUnboxedFloat64x2) {
ASSERT(value_type.IsFloat64x2Type());
ASSERT(value_type.IsInstantiated());
break;
} else {
ASSERT(RepresentationUtils::IsUnboxedInteger(rep));
ASSERT(value_type.IsIntType());
}
default:
// TODO(fschneider): Add support for other array types.
UNREACHABLE();
#endif
}
if (exactness != nullptr && exactness->is_exact) {
@ -2997,83 +2941,51 @@ static bool InlineSetIndexed(FlowGraph* flow_graph,
source, new (Z) Value(stored_value), new (Z) Value(dst_type),
new (Z) Value(type_args), new (Z) Value(function_type_args),
Symbols::Value(), call->deopt_id());
cursor = flow_graph->AppendSpeculativeTo(cursor, assert_value,
call->env(), FlowGraph::kValue);
*last = flow_graph->AppendSpeculativeTo(*last, assert_value, call->env(),
FlowGraph::kValue);
}
}
array_cid = PrepareInlineIndexedOp(flow_graph, call, array_cid, &array,
&index, &cursor);
array_cid =
PrepareInlineIndexedOp(flow_graph, call, array_cid, &array, &index, last);
const bool is_typed_data_store =
(IsTypedDataClassId(array_cid) || IsTypedDataViewClassId(array_cid) ||
IsExternalTypedDataClassId(array_cid));
const bool is_typed_data_store = IsTypedDataBaseClassId(array_cid);
// Check if store barrier is needed. Byte arrays don't need a store barrier.
StoreBarrierType needs_store_barrier =
is_typed_data_store ? kNoStoreBarrier : kEmitStoreBarrier;
const bool value_needs_unboxing =
array_cid == kTypedDataInt8ArrayCid ||
array_cid == kTypedDataInt16ArrayCid ||
array_cid == kTypedDataInt32ArrayCid ||
array_cid == kTypedDataUint8ArrayCid ||
array_cid == kTypedDataUint8ClampedArrayCid ||
array_cid == kTypedDataUint16ArrayCid ||
array_cid == kTypedDataUint32ArrayCid ||
array_cid == kExternalTypedDataUint8ArrayCid ||
array_cid == kExternalTypedDataUint8ClampedArrayCid;
// We know that the incomming type matches, but we still need to handle the
// null check.
if (is_typed_data_store && !IsCompilingForSoundNullSafety()) {
String& name = String::ZoneHandle(Z, target.name());
Instruction* check = new (Z) CheckNullInstr(
new (Z) Value(stored_value), name, call->deopt_id(), call->source());
cursor =
flow_graph->AppendTo(cursor, check, call->env(), FlowGraph::kEffect);
*last = flow_graph->AppendTo(*last, check, call->env(), FlowGraph::kEffect);
}
if (array_cid == kTypedDataFloat32ArrayCid) {
if (rep == kUnboxedFloat) {
stored_value = new (Z)
DoubleToFloatInstr(new (Z) Value(stored_value), call->deopt_id());
cursor =
flow_graph->AppendTo(cursor, stored_value, nullptr, FlowGraph::kValue);
} else if (value_needs_unboxing) {
Representation representation = kNoRepresentation;
switch (array_cid) {
case kTypedDataInt32ArrayCid:
representation = kUnboxedInt32;
break;
case kTypedDataUint32ArrayCid:
representation = kUnboxedUint32;
break;
case kTypedDataInt8ArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kTypedDataUint16ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
representation = kUnboxedIntPtr;
break;
default:
UNREACHABLE();
}
stored_value = UnboxInstr::Create(
representation, new (Z) Value(stored_value), call->deopt_id());
stored_value->AsUnboxInteger()->mark_truncating();
cursor = flow_graph->AppendTo(cursor, stored_value, call->env(),
FlowGraph::kValue);
*last = flow_graph->AppendTo(*last, stored_value, call->env(),
FlowGraph::kValue);
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
// Insert explicit unboxing instructions with truncation to avoid relying
// on [SelectRepresentations] which doesn't mark them as truncating.
stored_value =
UnboxInstr::Create(rep, new (Z) Value(stored_value), call->deopt_id(),
Instruction::kNotSpeculative);
*last = flow_graph->AppendTo(*last, stored_value, call->env(),
FlowGraph::kValue);
}
const intptr_t index_scale =
compiler::target::Instance::ElementSizeFor(array_cid);
*last = new (Z) StoreIndexedInstr(
auto* const store = new (Z) StoreIndexedInstr(
new (Z) Value(array), new (Z) Value(index), new (Z) Value(stored_value),
needs_store_barrier, /*index_unboxed=*/false, index_scale, array_cid,
kAlignedAccess, call->deopt_id(), call->source());
flow_graph->AppendTo(cursor, *last, call->env(), FlowGraph::kEffect);
*last = flow_graph->AppendTo(*last, store, call->env(), FlowGraph::kEffect);
// We need a return value to replace uses of the original definition. However,
// the final instruction is a use of 'void operator[]=()', so we use null.
*result = flow_graph->constant_null();
@ -3212,7 +3124,7 @@ static Definition* PrepareInlineStringIndexOp(FlowGraph* flow_graph,
LoadIndexedInstr* load_indexed = new (Z) LoadIndexedInstr(
new (Z) Value(str), new (Z) Value(index), /*index_unboxed=*/false,
compiler::target::Instance::ElementSizeFor(cid), cid, kAlignedAccess,
DeoptId::kNone, call->source());
call->deopt_id(), call->source());
cursor =
flow_graph->AppendTo(cursor, load_indexed, nullptr, FlowGraph::kValue);
@ -4564,24 +4476,22 @@ bool FlowGraphInliner::TryInlineRecognizedMethod(
Definition* index = call->ArgumentAt(1);
Definition* value = call->ArgumentAt(2);
auto env = call->deopt_id() != DeoptId::kNone ? call->env() : nullptr;
// Insert explicit unboxing instructions with truncation to avoid relying
// on [SelectRepresentations] which doesn't mark them as truncating.
value =
UnboxInstr::Create(kUnboxedIntPtr, new (Z) Value(value),
call->deopt_id(), Instruction::kNotSpeculative);
value->AsUnboxInteger()->mark_truncating();
flow_graph->AppendTo(*entry, value, env, FlowGraph::kValue);
const bool is_onebyte = kind == MethodRecognizer::kWriteIntoOneByteString;
const intptr_t index_scale = is_onebyte ? 1 : 2;
const intptr_t cid = is_onebyte ? kOneByteStringCid : kTwoByteStringCid;
// Insert explicit unboxing instructions with truncation to avoid relying
// on [SelectRepresentations] which doesn't mark them as truncating.
value = UnboxInstr::Create(StoreIndexedInstr::ValueRepresentation(cid),
new (Z) Value(value), call->deopt_id(),
Instruction::kNotSpeculative);
flow_graph->AppendTo(*entry, value, call->env(), FlowGraph::kValue);
*last = new (Z) StoreIndexedInstr(
new (Z) Value(str), new (Z) Value(index), new (Z) Value(value),
kNoStoreBarrier, /*index_unboxed=*/false, index_scale, cid,
kAlignedAccess, call->deopt_id(), call->source());
flow_graph->AppendTo(value, *last, env, FlowGraph::kEffect);
flow_graph->AppendTo(value, *last, call->env(), FlowGraph::kEffect);
// We need a return value to replace uses of the original definition.
// The final instruction is a use of 'void operator[]=()', so we use null.

View file

@ -5,6 +5,7 @@
#include "vm/compiler/backend/locations.h"
#include <limits>
#include "vm/class_id.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/il_printer.h"
#include "vm/log.h"
@ -83,10 +84,11 @@ Representation RepresentationUtils::RepresentationOfArrayElement(
kTypedDataCidRemainderInternal;
}
switch (cid) {
case kImmutableArrayCid:
#define ARRAY_CASE(Name) case k##Name##Cid:
CLASS_LIST_ARRAYS(ARRAY_CASE)
#undef ARRAY_CASE
case kRecordCid:
case kTypeArgumentsCid:
case kArrayCid:
return kTagged;
case kTypedDataInt8ArrayCid:
return kUnboxedInt8;

View file

@ -262,6 +262,8 @@ static void RunMemoryCopyInstrTest(intptr_t src_start,
kMoveGlob,
{kMatchAndMoveMemoryCopy, &memory_copy},
}));
EXPECT_EQ(kUntagged, memory_copy->src()->definition()->representation());
EXPECT_EQ(kUntagged, memory_copy->dest()->definition()->representation());
EXPECT(memory_copy->src_start()->BindsToConstant());
EXPECT(memory_copy->dest_start()->BindsToConstant());
EXPECT(memory_copy->length()->BindsToConstant());
@ -388,6 +390,8 @@ static void RunMemoryCopyInstrTest(intptr_t src_start,
kMoveGlob,
{kMatchAndMoveMemoryCopy, &memory_copy},
}));
EXPECT_EQ(kUntagged, memory_copy->src()->definition()->representation());
EXPECT_EQ(kUntagged, memory_copy->dest()->definition()->representation());
EXPECT(!memory_copy->src_start()->BindsToConstant());
EXPECT(!memory_copy->dest_start()->BindsToConstant());
EXPECT(!memory_copy->length()->BindsToConstant());

View file

@ -192,21 +192,21 @@ class Place : public ValueObject {
kNoSize,
// 1 byte (Int8List, Uint8List, Uint8ClampedList).
kInt8,
k1Byte,
// 2 bytes (Int16List, Uint16List).
kInt16,
k2Bytes,
// 4 bytes (Int32List, Uint32List, Float32List).
kInt32,
k4Bytes,
// 8 bytes (Int64List, Uint64List, Float64List).
kInt64,
k8Bytes,
// 16 bytes (Int32x4List, Float32x4List, Float64x2List).
kInt128,
k16Bytes,
kLargestElementSize = kInt128,
kLargestElementSize = k16Bytes,
};
Place(const Place& other)
@ -373,8 +373,8 @@ class Place : public ValueObject {
// typed array element that contains this typed array element.
// In other words this method computes the only possible place with the given
// size that can alias this place (due to alignment restrictions).
// For example for X[9|kInt8] and target size kInt32 we would return
// X[8|kInt32].
// For example for X[9|k1Byte] and target size k4Bytes we would return
// X[8|k4Bytes].
Place ToLargerElement(ElementSize to) const {
ASSERT(kind() == kConstantIndexed);
ASSERT(element_size() != kNoSize);
@ -387,8 +387,8 @@ class Place : public ValueObject {
// S/S' - 1 return alias X[ByteOffs + S'*index|S'] - this is the byte offset
// of a smaller typed array element which is contained within this typed
// array element.
// For example X[8|kInt32] contains inside X[8|kInt16] (index is 0) and
// X[10|kInt16] (index is 1).
// For example X[8|k4Bytes] contains inside X[8|k2Bytes] (index is 0) and
// X[10|k2Bytes] (index is 1).
Place ToSmallerElement(ElementSize to, intptr_t index) const {
ASSERT(kind() == kConstantIndexed);
ASSERT(element_size() != kNoSize);
@ -536,40 +536,42 @@ class Place : public ValueObject {
flags_ = ElementSizeBits::update(scale, flags_);
}
void SetIndex(Definition* index, intptr_t scale, intptr_t class_id) {
void SetIndex(Definition* index, intptr_t scale, classid_t class_id) {
ConstantInstr* index_constant = index->AsConstant();
if ((index_constant != nullptr) && index_constant->value().IsSmi()) {
const intptr_t index_value = Smi::Cast(index_constant->value()).Value();
const ElementSize size = ElementSizeFor(class_id);
const bool is_typed_access = (size != kNoSize);
// Indexing into [UntaggedTypedDataView]/[UntaggedExternalTypedData
// happens via a untagged load of the `_data` field (which points to C
// memory).
//
// Indexing into dart:ffi's [UntaggedPointer] happens via loading of the
// `c_memory_address_`, converting it to an integer, doing some arithmetic
// and finally using IntConverterInstr to convert to a untagged
// representation.
//
// In both cases the array used for load/store has untagged
// representation.
const bool can_be_view = instance_->representation() == kUntagged;
// If we are writing into the typed data scale the index to
// get byte offset. Otherwise ignore the scale.
if (!is_typed_access) {
scale = 1;
// Places only need to scale the index for typed data objects, as other
// types of arrays (for which ElementSizeFor returns kNoSize) cannot be
// accessed at different scales.
const ElementSize size = ElementSizeFor(class_id);
if (size == kNoSize) {
set_kind(kConstantIndexed);
set_element_size(size);
index_constant_ = index_value;
return;
}
// Guard against potential multiplication overflow and negative indices.
if ((0 <= index_value) && (index_value < (kMaxInt32 / scale))) {
const intptr_t scaled_index = index_value * scale;
// If the indexed array is a subclass of PointerBase, then it must be
// treated as a view unless the class id of the array is known at
// compile time to be an internal typed data object.
//
// Indexes into untagged pointers only happen for external typed data
// objects or dart:ffi's Pointer, but those should also be treated like
// views, since anyone who has access to the underlying pointer can
// modify the corresponding memory.
auto const cid = instance_->Type()->ToCid();
const bool can_be_view = instance_->representation() == kUntagged ||
!IsTypedDataClassId(cid);
// Guard against unaligned byte offsets and access through raw
// memory pointer (which can be pointing into another typed data).
if (!is_typed_access ||
(!can_be_view &&
Utils::IsAligned(scaled_index, ElementSizeMultiplier(size)))) {
if (!can_be_view &&
Utils::IsAligned(scaled_index, ElementSizeMultiplier(size))) {
set_kind(kConstantIndexed);
set_element_size(size);
index_constant_ = scaled_index;
@ -590,50 +592,34 @@ class Place : public ValueObject {
ElementSizeBits::encode(scale);
}
static ElementSize ElementSizeFor(intptr_t class_id) {
switch (class_id) {
case kArrayCid:
case kImmutableArrayCid:
case kOneByteStringCid:
case kTwoByteStringCid:
// Object arrays and strings do not allow accessing them through
// different types. No need to attach scale.
return kNoSize;
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
return kInt8;
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
return kInt16;
case kTypedDataInt32ArrayCid:
case kTypedDataUint32ArrayCid:
case kTypedDataFloat32ArrayCid:
return kInt32;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
case kTypedDataFloat64ArrayCid:
return kInt64;
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
return kInt128;
static ElementSize ElementSizeFor(classid_t class_id) {
// Object arrays and strings do not allow accessing them through
// different types. No need to attach scale.
if (!IsTypedDataBaseClassId(class_id)) return kNoSize;
const auto rep =
RepresentationUtils::RepresentationOfArrayElement(class_id);
if (!RepresentationUtils::IsUnboxed(rep)) return kNoSize;
switch (RepresentationUtils::ValueSize(rep)) {
case 1:
return k1Byte;
case 2:
return k2Bytes;
case 4:
return k4Bytes;
case 8:
return k8Bytes;
case 16:
return k16Bytes;
default:
UNREACHABLE();
FATAL("Unhandled value size for representation %s",
RepresentationUtils::ToCString(rep));
return kNoSize;
}
}
static intptr_t ElementSizeMultiplier(ElementSize size) {
return 1 << (static_cast<intptr_t>(size) - static_cast<intptr_t>(kInt8));
static constexpr intptr_t ElementSizeMultiplier(ElementSize size) {
return 1 << (static_cast<intptr_t>(size) - static_cast<intptr_t>(k1Byte));
}
static intptr_t RoundByteOffset(ElementSize size, intptr_t offset) {
@ -649,8 +635,13 @@ class Place : public ValueObject {
class KindBits : public BitField<uword, Kind, 0, 3> {};
class RepresentationBits
: public BitField<uword, Representation, KindBits::kNextBit, 11> {};
class ElementSizeBits
: public BitField<uword, ElementSize, RepresentationBits::kNextBit, 3> {};
static constexpr int kNumElementSizeBits = Utils::ShiftForPowerOfTwo(
Utils::RoundUpToPowerOfTwo(kLargestElementSize));
class ElementSizeBits : public BitField<uword,
ElementSize,
RepresentationBits::kNextBit,
kNumElementSizeBits> {};
uword flags_;
Definition* instance_;
@ -1001,7 +992,7 @@ class AliasedSet : public ZoneAllocated {
// *[C'|S']) and thus we need to handle both element sizes smaller
// and larger than S.
const Place no_instance_alias = alias->CopyWithoutInstance();
for (intptr_t i = Place::kInt8; i <= Place::kLargestElementSize;
for (intptr_t i = Place::k1Byte; i <= Place::kLargestElementSize;
i++) {
// Skip element sizes that a guaranteed to have no
// representatives.
@ -1962,11 +1953,10 @@ class LoadOptimizer : public ValueObject {
// to loads because other array stores (intXX/uintXX/float32)
// may implicitly convert the value stored.
bool CanForwardStore(StoreIndexedInstr* array_store) {
return ((array_store == nullptr) ||
(array_store->class_id() == kArrayCid) ||
(array_store->class_id() == kTypedDataFloat64ArrayCid) ||
(array_store->class_id() == kTypedDataFloat32ArrayCid) ||
(array_store->class_id() == kTypedDataFloat32x4ArrayCid));
if (array_store == nullptr) return true;
auto const rep = RepresentationUtils::RepresentationOfArrayElement(
array_store->class_id());
return !RepresentationUtils::IsUnboxedInteger(rep) && rep != kUnboxedFloat;
}
static bool AlreadyPinnedByRedefinition(Definition* replacement,
@ -2591,6 +2581,10 @@ class LoadOptimizer : public ValueObject {
Value* input = new (Z) Value(replacement);
phi->SetInputAt(i, input);
replacement->AddInputUse(input);
// If any input is untagged, then the Phi should be marked as untagged.
if (replacement->representation() == kUntagged) {
phi->set_representation(kUntagged);
}
}
graph_->AllocateSSAIndex(phi);

View file

@ -1646,18 +1646,14 @@ Definition* TypedDataSpecializer::AppendLoadIndexed(TemplateDartCall<0>* call,
classid_t cid) {
const intptr_t element_size = TypedDataBase::ElementSizeFor(cid);
const intptr_t index_scale = element_size;
auto data = new (Z)
LoadFieldInstr(new (Z) Value(array), Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer, call->source());
flow_graph_->InsertBefore(call, data, call->env(), FlowGraph::kValue);
auto const rep = LoadIndexedInstr::ReturnRepresentation(cid);
Definition* load = new (Z) LoadIndexedInstr(
new (Z) Value(data), new (Z) Value(index), /*index_unboxed=*/false,
index_scale, cid, kAlignedAccess, DeoptId::kNone, call->source());
new (Z) Value(array), new (Z) Value(index), /*index_unboxed=*/false,
index_scale, cid, kAlignedAccess, call->deopt_id(), call->source());
flow_graph_->InsertBefore(call, load, call->env(), FlowGraph::kValue);
if (cid == kTypedDataFloat32ArrayCid) {
if (rep == kUnboxedFloat) {
load = new (Z) FloatToDoubleInstr(new (Z) Value(load), call->deopt_id());
flow_graph_->InsertBefore(call, load, call->env(), FlowGraph::kValue);
}
@ -1672,66 +1668,24 @@ void TypedDataSpecializer::AppendStoreIndexed(TemplateDartCall<0>* call,
classid_t cid) {
const intptr_t element_size = TypedDataBase::ElementSizeFor(cid);
const intptr_t index_scale = element_size;
auto const rep = StoreIndexedInstr::ValueRepresentation(cid);
const auto deopt_id = call->deopt_id();
switch (cid) {
case kTypedDataInt8ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint16ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid: {
// Insert explicit unboxing instructions with truncation to avoid relying
// on [SelectRepresentations] which doesn't mark them as truncating.
value = UnboxInstr::Create(kUnboxedIntPtr, new (Z) Value(value), deopt_id,
Instruction::kNotSpeculative);
flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
break;
}
case kTypedDataInt32ArrayCid: {
// Insert explicit unboxing instructions with truncation to avoid relying
// on [SelectRepresentations] which doesn't mark them as truncating.
value = UnboxInstr::Create(kUnboxedInt32, new (Z) Value(value), deopt_id,
Instruction::kNotSpeculative);
flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
break;
}
case kTypedDataUint32ArrayCid: {
// Insert explicit unboxing instructions with truncation to avoid relying
// on [SelectRepresentations] which doesn't mark them as truncating.
value = UnboxInstr::Create(kUnboxedUint32, new (Z) Value(value), deopt_id,
Instruction::kNotSpeculative);
flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
break;
}
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid: {
// Insert explicit unboxing instructions with truncation to avoid relying
// on [SelectRepresentations] which doesn't mark them as truncating.
value = UnboxInstr::Create(kUnboxedInt64, new (Z) Value(value),
DeoptId::kNone, Instruction::kNotSpeculative);
flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
break;
}
case kTypedDataFloat32ArrayCid: {
value = new (Z) DoubleToFloatInstr(new (Z) Value(value), deopt_id,
Instruction::kNotSpeculative);
flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
break;
}
default:
break;
if (RepresentationUtils::IsUnboxedInteger(rep)) {
// Insert explicit unboxing instructions with truncation to avoid relying
// on [SelectRepresentations] which doesn't mark them as truncating.
value = UnboxInstr::Create(rep, new (Z) Value(value), deopt_id,
Instruction::kNotSpeculative);
flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
} else if (rep == kUnboxedFloat) {
value = new (Z) DoubleToFloatInstr(new (Z) Value(value), deopt_id,
Instruction::kNotSpeculative);
flow_graph_->InsertBefore(call, value, call->env(), FlowGraph::kValue);
}
auto data = new (Z)
LoadFieldInstr(new (Z) Value(array), Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer, call->source());
flow_graph_->InsertBefore(call, data, call->env(), FlowGraph::kValue);
auto store = new (Z) StoreIndexedInstr(
new (Z) Value(data), new (Z) Value(index), new (Z) Value(value),
new (Z) Value(array), new (Z) Value(index), new (Z) Value(value),
kNoStoreBarrier, /*index_unboxed=*/false, index_scale, cid,
kAlignedAccess, DeoptId::kNone, call->source(),
Instruction::kNotSpeculative);

View file

@ -329,6 +329,8 @@ FlowGraph* CompilerPass::RunForceOptimizedPipeline(
INVOKE_PASS(Canonicalize);
INVOKE_PASS_AOT(DelayAllocations);
INVOKE_PASS(EliminateWriteBarriers);
// This must be done after all other possible intra-block code motion.
INVOKE_PASS(LoweringAfterCodeMotionDisabled);
INVOKE_PASS(FinalizeGraph);
INVOKE_PASS(ReorderBlocks);
INVOKE_PASS(AllocateRegisters);
@ -397,6 +399,8 @@ FlowGraph* CompilerPass::RunPipeline(PipelineMode mode,
INVOKE_PASS(Canonicalize);
INVOKE_PASS(AllocationSinking_DetachMaterializations);
INVOKE_PASS(EliminateWriteBarriers);
// This must be done after all other possible intra-block code motion.
INVOKE_PASS(LoweringAfterCodeMotionDisabled);
INVOKE_PASS(FinalizeGraph);
INVOKE_PASS(Canonicalize);
INVOKE_PASS(ReorderBlocks);
@ -610,6 +614,9 @@ COMPILER_PASS(TestILSerialization, {
}
});
COMPILER_PASS(LoweringAfterCodeMotionDisabled,
{ flow_graph->ExtractNonInternalTypedDataPayloads(); });
COMPILER_PASS(GenerateCode, { state->graph_compiler->CompileGraph(); });
} // namespace dart

View file

@ -55,6 +55,7 @@ namespace dart {
V(WidenSmiToInt32) \
V(EliminateWriteBarriers) \
V(TestILSerialization) \
V(LoweringAfterCodeMotionDisabled) \
V(GenerateCode)
class AllocationSinking;

View file

@ -282,20 +282,6 @@ Fragment BaseFlowGraphBuilder::MemoryCopy(classid_t src_cid,
return Fragment(copy);
}
Fragment BaseFlowGraphBuilder::MemoryCopyUntagged(intptr_t element_size,
bool unboxed_inputs,
bool can_overlap) {
Value* length = Pop();
Value* dest_start = Pop();
Value* src_start = Pop();
Value* dest = Pop();
Value* src = Pop();
auto copy =
new (Z) MemoryCopyInstr(element_size, src, dest, src_start, dest_start,
length, unboxed_inputs, can_overlap);
return Fragment(copy);
}
Fragment BaseFlowGraphBuilder::TailCall(const Code& code) {
Value* arg_desc = Pop();
return Fragment(new (Z) TailCallInstr(code, arg_desc)).closed();
@ -619,6 +605,8 @@ Fragment BaseFlowGraphBuilder::StoreStaticField(TokenPosition position,
}
Fragment BaseFlowGraphBuilder::StoreIndexed(classid_t class_id) {
// This fragment builder cannot be used for typed data accesses.
ASSERT(!IsTypedDataBaseClassId(class_id));
Value* value = Pop();
Value* index = Pop();
const StoreBarrierType emit_store_barrier =
@ -635,6 +623,7 @@ Fragment BaseFlowGraphBuilder::StoreIndexedTypedData(classid_t class_id,
intptr_t index_scale,
bool index_unboxed,
AlignmentType alignment) {
ASSERT(IsTypedDataBaseClassId(class_id));
Value* value = Pop();
Value* index = Pop();
Value* c_pointer = Pop();
@ -1025,9 +1014,15 @@ Fragment BaseFlowGraphBuilder::AllocateObject(TokenPosition position,
}
Fragment BaseFlowGraphBuilder::Box(Representation from) {
Fragment instructions;
if (from == kUnboxedFloat) {
instructions += FloatToDouble();
from = kUnboxedDouble;
}
BoxInstr* box = BoxInstr::Create(from, Pop());
instructions <<= box;
Push(box);
return Fragment(box);
return instructions;
}
Fragment BaseFlowGraphBuilder::DebugStepCheck(TokenPosition position) {

View file

@ -343,9 +343,6 @@ class BaseFlowGraphBuilder {
classid_t dest_cid,
bool unboxed_inputs,
bool can_overlap = true);
Fragment MemoryCopyUntagged(intptr_t element_size,
bool unboxed_inputs,
bool can_overlap = true);
Fragment TailCall(const Code& code);
Fragment Utf8Scan();

View file

@ -1264,19 +1264,19 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += BuildTypedListSet(function, kTypedDataFloat64x2ArrayCid);
break;
case MethodRecognizer::kTypedData_memMove1:
body += BuildTypedDataMemMove(function, 1);
body += BuildTypedDataMemMove(function, kTypedDataInt8ArrayCid);
break;
case MethodRecognizer::kTypedData_memMove2:
body += BuildTypedDataMemMove(function, 2);
body += BuildTypedDataMemMove(function, kTypedDataInt16ArrayCid);
break;
case MethodRecognizer::kTypedData_memMove4:
body += BuildTypedDataMemMove(function, 4);
body += BuildTypedDataMemMove(function, kTypedDataInt32ArrayCid);
break;
case MethodRecognizer::kTypedData_memMove8:
body += BuildTypedDataMemMove(function, 8);
body += BuildTypedDataMemMove(function, kTypedDataInt64ArrayCid);
break;
case MethodRecognizer::kTypedData_memMove16:
body += BuildTypedDataMemMove(function, 16);
body += BuildTypedDataMemMove(function, kTypedDataInt32x4ArrayCid);
break;
#define CASE(name) \
case MethodRecognizer::kTypedData_##name##_factory: \
@ -1399,25 +1399,17 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
parsed_function_->RawParameterVariable(3);
LocalVariable* arg_length_in_bytes =
parsed_function_->RawParameterVariable(4);
// Load the untagged data fields of the source and destination so they
// can be possibly load optimized away when applicable, and unbox the
// numeric inputs since we're force optimizing _memCopy and that removes
// the need to use SmiUntag within MemoryCopy when element_size is 1.
body += LoadLocal(arg_source);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += LoadLocal(arg_target);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += LoadLocal(arg_source_offset_in_bytes);
body += UnboxTruncate(kUnboxedIntPtr);
body += LoadLocal(arg_target_offset_in_bytes);
body += UnboxTruncate(kUnboxedIntPtr);
body += LoadLocal(arg_length_in_bytes);
body += UnboxTruncate(kUnboxedIntPtr);
body += MemoryCopyUntagged(/*element_size=*/1,
/*unboxed_inputs=*/true,
/*can_overlap=*/true);
body += MemoryCopy(kTypedDataUint8ArrayCid, kTypedDataUint8ArrayCid,
/*unboxed_inputs=*/true,
/*can_overlap=*/true);
body += NullConstant();
} break;
case MethodRecognizer::kFfiAbi:
@ -1466,17 +1458,10 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += LoadLocal(arg_typed_data_base);
body += CheckNullOptimized(String::ZoneHandle(Z, function.name()));
// No GC from here til LoadIndexed.
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += LoadLocal(arg_offset_not_null);
body += UnboxTruncate(kUnboxedFfiIntPtr);
body += LoadIndexed(typed_data_cid, /*index_scale=*/1,
/*index_unboxed=*/true, alignment);
if (kind == MethodRecognizer::kFfiLoadFloat ||
kind == MethodRecognizer::kFfiLoadFloatUnaligned) {
body += FloatToDouble();
}
// Avoid any unnecessary (and potentially deoptimizing) int
// conversions by using the representation returned from LoadIndexed.
body += Box(LoadIndexedInstr::ReturnRepresentation(typed_data_cid));
@ -1539,9 +1524,6 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += LoadLocal(arg_typed_data_base); // Pointer.
body += CheckNullOptimized(String::ZoneHandle(Z, function.name()));
// No GC from here til StoreIndexed.
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += LoadLocal(arg_offset_not_null);
body += UnboxTruncate(kUnboxedFfiIntPtr);
body += LoadLocal(arg_value_not_null);
@ -1555,10 +1537,6 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
// conversions by using the representation consumed by StoreIndexed.
body += UnboxTruncate(
StoreIndexedInstr::ValueRepresentation(typed_data_cid));
if (kind == MethodRecognizer::kFfiStoreFloat ||
kind == MethodRecognizer::kFfiStoreFloatUnaligned) {
body += DoubleToFloat();
}
}
body += StoreIndexedTypedData(typed_data_cid, /*index_scale=*/1,
/*index_unboxed=*/true, alignment);
@ -1926,14 +1904,14 @@ Fragment FlowGraphBuilder::BuildTypedDataViewFactoryConstructor(
return body;
}
static bool CanUnboxElements(intptr_t view_cid) {
switch (view_cid) {
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
static bool CanUnboxElements(classid_t cid) {
switch (RepresentationUtils::RepresentationOfArrayElement(cid)) {
case kUnboxedFloat:
case kUnboxedDouble:
return FlowGraphCompiler::SupportsUnboxedDoubles();
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid:
case kUnboxedInt32x4:
case kUnboxedFloat32x4:
case kUnboxedFloat64x2:
return FlowGraphCompiler::SupportsUnboxedSimd128();
default:
return true;
@ -1941,18 +1919,18 @@ static bool CanUnboxElements(intptr_t view_cid) {
}
static const Function& TypedListGetNativeFunction(Thread* thread,
intptr_t view_cid) {
classid_t cid) {
auto& state = thread->compiler_state();
switch (view_cid) {
case kTypedDataFloat32ArrayCid:
switch (RepresentationUtils::RepresentationOfArrayElement(cid)) {
case kUnboxedFloat:
return state.TypedListGetFloat32();
case kTypedDataFloat64ArrayCid:
case kUnboxedDouble:
return state.TypedListGetFloat64();
case kTypedDataInt32x4ArrayCid:
case kUnboxedInt32x4:
return state.TypedListGetInt32x4();
case kTypedDataFloat32x4ArrayCid:
case kUnboxedFloat32x4:
return state.TypedListGetFloat32x4();
case kTypedDataFloat64x2ArrayCid:
case kUnboxedFloat64x2:
return state.TypedListGetFloat64x2();
default:
UNREACHABLE();
@ -1961,7 +1939,7 @@ static const Function& TypedListGetNativeFunction(Thread* thread,
}
Fragment FlowGraphBuilder::BuildTypedListGet(const Function& function,
intptr_t view_cid) {
classid_t cid) {
const intptr_t kNumParameters = 2;
ASSERT_EQUAL(parsed_function_->function().NumParameters(), kNumParameters);
// Guaranteed to be non-null since it's only called internally from other
@ -1972,16 +1950,14 @@ Fragment FlowGraphBuilder::BuildTypedListGet(const Function& function,
parsed_function_->RawParameterVariable(1);
Fragment body;
if (CanUnboxElements(view_cid)) {
if (CanUnboxElements(cid)) {
body += LoadLocal(arg_receiver);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += LoadLocal(arg_offset_in_bytes);
body += LoadIndexed(view_cid, /*index_scale=*/1,
body += LoadIndexed(cid, /*index_scale=*/1,
/*index_unboxed=*/false, kUnalignedAccess);
body += Box(LoadIndexedInstr::ReturnRepresentation(view_cid));
body += Box(LoadIndexedInstr::ReturnRepresentation(cid));
} else {
const auto& native_function = TypedListGetNativeFunction(thread_, view_cid);
const auto& native_function = TypedListGetNativeFunction(thread_, cid);
body += LoadLocal(arg_receiver);
body += LoadLocal(arg_offset_in_bytes);
body += StaticCall(TokenPosition::kNoSource, native_function,
@ -1991,18 +1967,18 @@ Fragment FlowGraphBuilder::BuildTypedListGet(const Function& function,
}
static const Function& TypedListSetNativeFunction(Thread* thread,
intptr_t view_cid) {
classid_t cid) {
auto& state = thread->compiler_state();
switch (view_cid) {
case kTypedDataFloat32ArrayCid:
switch (RepresentationUtils::RepresentationOfArrayElement(cid)) {
case kUnboxedFloat:
return state.TypedListSetFloat32();
case kTypedDataFloat64ArrayCid:
case kUnboxedDouble:
return state.TypedListSetFloat64();
case kTypedDataInt32x4ArrayCid:
case kUnboxedInt32x4:
return state.TypedListSetInt32x4();
case kTypedDataFloat32x4ArrayCid:
case kUnboxedFloat32x4:
return state.TypedListSetFloat32x4();
case kTypedDataFloat64x2ArrayCid:
case kUnboxedFloat64x2:
return state.TypedListSetFloat64x2();
default:
UNREACHABLE();
@ -2011,7 +1987,7 @@ static const Function& TypedListSetNativeFunction(Thread* thread,
}
Fragment FlowGraphBuilder::BuildTypedListSet(const Function& function,
intptr_t view_cid) {
classid_t cid) {
const intptr_t kNumParameters = 3;
ASSERT_EQUAL(parsed_function_->function().NumParameters(), kNumParameters);
// Guaranteed to be non-null since it's only called internally from other
@ -2023,20 +1999,18 @@ Fragment FlowGraphBuilder::BuildTypedListSet(const Function& function,
LocalVariable* arg_value = parsed_function_->RawParameterVariable(2);
Fragment body;
if (CanUnboxElements(view_cid)) {
if (CanUnboxElements(cid)) {
body += LoadLocal(arg_receiver);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += LoadLocal(arg_offset_in_bytes);
body += LoadLocal(arg_value);
body +=
CheckNullOptimized(Symbols::Value(), CheckNullInstr::kArgumentError);
body += UnboxTruncate(StoreIndexedInstr::ValueRepresentation(view_cid));
body += StoreIndexedTypedData(view_cid, /*index_scale=*/1,
body += UnboxTruncate(StoreIndexedInstr::ValueRepresentation(cid));
body += StoreIndexedTypedData(cid, /*index_scale=*/1,
/*index_unboxed=*/false, kUnalignedAccess);
body += NullConstant();
} else {
const auto& native_function = TypedListSetNativeFunction(thread_, view_cid);
const auto& native_function = TypedListSetNativeFunction(thread_, cid);
body += LoadLocal(arg_receiver);
body += LoadLocal(arg_offset_in_bytes);
body += LoadLocal(arg_value);
@ -2047,7 +2021,7 @@ Fragment FlowGraphBuilder::BuildTypedListSet(const Function& function,
}
Fragment FlowGraphBuilder::BuildTypedDataMemMove(const Function& function,
intptr_t element_size) {
classid_t cid) {
ASSERT_EQUAL(parsed_function_->function().NumParameters(), 5);
LocalVariable* arg_to = parsed_function_->RawParameterVariable(0);
LocalVariable* arg_to_start = parsed_function_->RawParameterVariable(1);
@ -2082,20 +2056,16 @@ Fragment FlowGraphBuilder::BuildTypedDataMemMove(const Function& function,
Fragment use_instruction(is_small_enough);
use_instruction += LoadLocal(arg_from);
use_instruction += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
use_instruction += LoadLocal(arg_to);
use_instruction += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
use_instruction += LoadLocal(arg_from_start);
use_instruction += LoadLocal(arg_to_start);
use_instruction += LoadLocal(arg_count);
use_instruction +=
MemoryCopyUntagged(element_size,
/*unboxed_inputs=*/false, /*can_overlap=*/true);
use_instruction += MemoryCopy(cid, cid,
/*unboxed_inputs=*/false, /*can_overlap=*/true);
use_instruction += Goto(done);
Fragment call_memmove(is_too_large);
const intptr_t element_size = Instance::ElementSizeFor(cid);
call_memmove += LoadLocal(arg_to);
call_memmove += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
@ -4615,10 +4585,16 @@ Fragment FlowGraphBuilder::UnhandledException() {
}
Fragment FlowGraphBuilder::UnboxTruncate(Representation to) {
auto* unbox = UnboxInstr::Create(to, Pop(), DeoptId::kNone,
auto const unbox_to = to == kUnboxedFloat ? kUnboxedDouble : to;
Fragment instructions;
auto* unbox = UnboxInstr::Create(unbox_to, Pop(), DeoptId::kNone,
Instruction::kNotSpeculative);
instructions <<= unbox;
Push(unbox);
return Fragment(unbox);
if (to == kUnboxedFloat) {
instructions += DoubleToFloat();
}
return instructions;
}
Fragment FlowGraphBuilder::LoadThread() {
@ -4851,8 +4827,6 @@ Fragment FlowGraphBuilder::PopFromStackToTypedDataBase(
for (intptr_t i = 0; i < num_defs; i++) {
const Representation representation = representations[i];
body += LoadLocal(uint8_list);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += IntConstant(offset_in_bytes);
body += LoadLocal(definitions->At(i));
body += StoreIndexedTypedDataUnboxed(representation, /*index_scale=*/1,
@ -4891,6 +4865,23 @@ static classid_t typed_data_cid(intptr_t chunk_size) {
UNREACHABLE();
}
// Only for use within CopyFromTypedDataBaseToUnboxedAddress and
// CopyFromUnboxedAddressToTypedDataBase, where we know the "array" being
// passed is an untagged pointer coming from C.
static classid_t external_typed_data_cid(intptr_t chunk_size) {
switch (chunk_size) {
case 8:
return kExternalTypedDataInt64ArrayCid;
case 4:
return kExternalTypedDataInt32ArrayCid;
case 2:
return kExternalTypedDataInt16ArrayCid;
case 1:
return kExternalTypedDataInt8ArrayCid;
}
UNREACHABLE();
}
Fragment FlowGraphBuilder::CopyFromTypedDataBaseToUnboxedAddress(
intptr_t length_in_bytes) {
Fragment body;
@ -4903,13 +4894,10 @@ Fragment FlowGraphBuilder::CopyFromTypedDataBaseToUnboxedAddress(
while (offset_in_bytes < length_in_bytes) {
const intptr_t bytes_left = length_in_bytes - offset_in_bytes;
const intptr_t chunk_sizee = chunk_size(bytes_left);
const classid_t typed_data_cidd = typed_data_cid(chunk_sizee);
body += LoadLocal(typed_data_base);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += IntConstant(offset_in_bytes);
body += LoadIndexed(typed_data_cidd, /*index_scale=*/1,
body += LoadIndexed(typed_data_cid(chunk_sizee), /*index_scale=*/1,
/*index_unboxed=*/false);
LocalVariable* chunk_value = MakeTemporary("chunk_value");
@ -4917,7 +4905,8 @@ Fragment FlowGraphBuilder::CopyFromTypedDataBaseToUnboxedAddress(
body += ConvertUnboxedToUntagged(kUnboxedFfiIntPtr);
body += IntConstant(offset_in_bytes);
body += LoadLocal(chunk_value);
body += StoreIndexedTypedData(typed_data_cidd, /*index_scale=*/1,
body += StoreIndexedTypedData(external_typed_data_cid(chunk_sizee),
/*index_scale=*/1,
/*index_unboxed=*/false);
body += DropTemporary(&chunk_value);
@ -4942,21 +4931,19 @@ Fragment FlowGraphBuilder::CopyFromUnboxedAddressToTypedDataBase(
while (offset_in_bytes < length_in_bytes) {
const intptr_t bytes_left = length_in_bytes - offset_in_bytes;
const intptr_t chunk_sizee = chunk_size(bytes_left);
const classid_t typed_data_cidd = typed_data_cid(chunk_sizee);
body += LoadLocal(unboxed_address);
body += ConvertUnboxedToUntagged(kUnboxedFfiIntPtr);
body += IntConstant(offset_in_bytes);
body += LoadIndexed(typed_data_cidd, /*index_scale=*/1,
body += LoadIndexed(external_typed_data_cid(chunk_sizee), /*index_scale=*/1,
/*index_unboxed=*/false);
LocalVariable* chunk_value = MakeTemporary("chunk_value");
body += LoadLocal(typed_data_base);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += IntConstant(offset_in_bytes);
body += LoadLocal(chunk_value);
body += StoreIndexedTypedData(typed_data_cidd, /*index_scale=*/1,
body += StoreIndexedTypedData(typed_data_cid(chunk_sizee),
/*index_scale=*/1,
/*index_unboxed=*/false);
body += DropTemporary(&chunk_value);
@ -4977,8 +4964,6 @@ Fragment FlowGraphBuilder::LoadTail(LocalVariable* variable,
if (size == 8 || size == 4) {
body += LoadLocal(variable);
body += LoadTypedDataBaseFromCompound();
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += IntConstant(offset_in_bytes);
body += LoadIndexedTypedDataUnboxed(representation, /*index_scale=*/1,
/*index_unboxed=*/false);
@ -4992,8 +4977,6 @@ Fragment FlowGraphBuilder::LoadTail(LocalVariable* variable,
while (remaining >= part_bytes) {
body += LoadLocal(variable);
body += LoadTypedDataBaseFromCompound();
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += IntConstant(offset_in_bytes);
body += LoadIndexed(part_cid, /*index_scale*/ 1,
/*index_unboxed=*/false);
@ -5122,8 +5105,6 @@ Fragment FlowGraphBuilder::FfiCallbackConvertCompoundArgumentToDart(
representation = loc.payload_type().AsRepresentationOverApprox(Z);
}
body += LoadLocal(uint8_list);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += IntConstant(offset_in_bytes);
body += LoadLocal(definitions->At(i));
body += StoreIndexedTypedDataUnboxed(representation, /*index_scale=*/1,

View file

@ -158,10 +158,9 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {
FlowGraph* BuildGraphOfRecognizedMethod(const Function& function);
Fragment BuildTypedListGet(const Function& function, intptr_t view_cid);
Fragment BuildTypedListSet(const Function& function, intptr_t view_cid);
Fragment BuildTypedDataMemMove(const Function& function,
intptr_t element_size);
Fragment BuildTypedListGet(const Function& function, classid_t cid);
Fragment BuildTypedListSet(const Function& function, classid_t cid);
Fragment BuildTypedDataMemMove(const Function& function, classid_t cid);
Fragment BuildTypedDataViewFactoryConstructor(const Function& function,
classid_t cid);
Fragment BuildTypedDataFactoryConstructor(const Function& function,

View file

@ -190,12 +190,17 @@ static Definition* CreateBoxedResultIfNeeded(BlockBuilder* builder,
Representation representation) {
const auto& function = builder->function();
ASSERT(!function.has_unboxed_record_return());
if (function.has_unboxed_return()) {
return value;
} else {
return builder->AddDefinition(
BoxInstr::Create(representation, new Value(value)));
Definition* result = value;
if (representation == kUnboxedFloat) {
result = builder->AddDefinition(
new FloatToDoubleInstr(new Value(result), DeoptId::kNone));
representation = kUnboxedDouble;
}
if (!function.has_unboxed_return()) {
result = builder->AddDefinition(BoxInstr::Create(
Boxing::NativeRepresentation(representation), new Value(result)));
}
return result;
}
static Definition* CreateUnboxedResultIfNeeded(BlockBuilder* builder,
@ -240,72 +245,16 @@ static bool IntrinsifyArrayGetIndexed(FlowGraph* flow_graph,
// following boxing instruction about a more precise range we attach it here
// manually.
// http://dartbug.com/36632
const bool known_range =
array_cid == kTypedDataInt8ArrayCid ||
array_cid == kTypedDataUint8ArrayCid ||
array_cid == kTypedDataUint8ClampedArrayCid ||
array_cid == kExternalTypedDataUint8ArrayCid ||
array_cid == kExternalTypedDataUint8ClampedArrayCid ||
array_cid == kTypedDataInt16ArrayCid ||
array_cid == kTypedDataUint16ArrayCid ||
array_cid == kTypedDataInt32ArrayCid ||
array_cid == kTypedDataUint32ArrayCid || array_cid == kOneByteStringCid ||
array_cid == kTwoByteStringCid;
bool clear_environment = false;
if (known_range) {
Range range;
result->InferRange(/*range_analysis=*/nullptr, &range);
result->set_range(range);
clear_environment = range.Fits(RangeBoundary::kRangeBoundarySmi);
auto const rep = RepresentationUtils::RepresentationOfArrayElement(array_cid);
if (RepresentationUtils::IsUnboxedInteger(rep)) {
result->set_range(Range::Full(rep));
}
const bool clear_environment =
RangeUtils::Fits(result->range(), RangeBoundary::kRangeBoundarySmi);
// Box and/or convert result if necessary.
switch (array_cid) {
case kTypedDataInt32ArrayCid:
case kExternalTypedDataInt32ArrayCid:
result = CreateBoxedResultIfNeeded(&builder, result, kUnboxedInt32);
break;
case kTypedDataUint32ArrayCid:
case kExternalTypedDataUint32ArrayCid:
result = CreateBoxedResultIfNeeded(&builder, result, kUnboxedUint32);
break;
case kTypedDataFloat32ArrayCid:
result = builder.AddDefinition(
new FloatToDoubleInstr(new Value(result), DeoptId::kNone));
FALL_THROUGH;
case kTypedDataFloat64ArrayCid:
result = CreateBoxedResultIfNeeded(&builder, result, kUnboxedDouble);
break;
case kTypedDataFloat32x4ArrayCid:
result = CreateBoxedResultIfNeeded(&builder, result, kUnboxedFloat32x4);
break;
case kTypedDataInt32x4ArrayCid:
result = CreateBoxedResultIfNeeded(&builder, result, kUnboxedInt32x4);
break;
case kTypedDataFloat64x2ArrayCid:
result = CreateBoxedResultIfNeeded(&builder, result, kUnboxedFloat64x2);
break;
case kArrayCid:
case kImmutableArrayCid:
// Nothing to do.
break;
case kTypedDataInt8ArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
case kTypedDataUint16ArrayCid:
case kExternalTypedDataUint8ArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
result = CreateBoxedResultIfNeeded(&builder, result, kUnboxedIntPtr);
break;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
result = CreateBoxedResultIfNeeded(&builder, result, kUnboxedInt64);
break;
default:
UNREACHABLE();
break;
if (RepresentationUtils::IsUnboxed(rep)) {
result = CreateBoxedResultIfNeeded(&builder, result, rep);
}
if (result->IsBoxInteger() && clear_environment) {
result->AsBoxInteger()->ClearEnv();
@ -333,81 +282,29 @@ static bool IntrinsifyArraySetIndexed(FlowGraph* flow_graph,
Slot::GetLengthFieldForArrayCid(array_cid));
// Value check/conversion.
switch (array_cid) {
case kTypedDataUint8ClampedArrayCid:
case kExternalTypedDataUint8ClampedArrayCid:
auto const rep = RepresentationUtils::RepresentationOfArrayElement(array_cid);
if (IsClampedTypedDataBaseClassId(array_cid)) {
#if defined(TARGET_ARCH_IS_32_BIT)
// On 32-bit architectures, clamping operations need the exact value
// for proper operations. On 64-bit architectures, kUnboxedIntPtr
// maps to kUnboxedInt64. All other situations get away with
// truncating even non-smi values.
builder.AddInstruction(new CheckSmiInstr(new Value(value), DeoptId::kNone,
builder.Source()));
FALL_THROUGH;
// On 32-bit architectures, clamping operations need the exact value
// for proper operations. On 64-bit architectures, kUnboxedIntPtr
// maps to kUnboxedInt64. All other situations get away with
// truncating even non-smi values.
builder.AddInstruction(
new CheckSmiInstr(new Value(value), DeoptId::kNone, builder.Source()));
#endif
case kTypedDataInt8ArrayCid:
case kTypedDataInt16ArrayCid:
case kTypedDataUint8ArrayCid:
case kTypedDataUint16ArrayCid:
case kExternalTypedDataUint8ArrayCid:
value = builder.AddUnboxInstr(kUnboxedIntPtr, new Value(value),
/* is_checked = */ false);
value->AsUnboxInteger()->mark_truncating();
break;
case kTypedDataInt32ArrayCid:
case kExternalTypedDataInt32ArrayCid:
// Use same truncating unbox-instruction for int32 and uint32.
FALL_THROUGH;
case kTypedDataUint32ArrayCid:
case kExternalTypedDataUint32ArrayCid:
// Supports smi and mint, slow-case for bigints.
value = builder.AddUnboxInstr(kUnboxedUint32, new Value(value),
/* is_checked = */ false);
break;
case kTypedDataInt64ArrayCid:
case kTypedDataUint64ArrayCid:
value = builder.AddUnboxInstr(kUnboxedInt64, new Value(value),
/* is_checked = */ false);
break;
case kTypedDataFloat32ArrayCid:
case kTypedDataFloat64ArrayCid:
case kTypedDataFloat32x4ArrayCid:
case kTypedDataInt32x4ArrayCid:
case kTypedDataFloat64x2ArrayCid: {
intptr_t value_check_cid = kDoubleCid;
Representation rep = kUnboxedDouble;
switch (array_cid) {
case kTypedDataFloat32x4ArrayCid:
value_check_cid = kFloat32x4Cid;
rep = kUnboxedFloat32x4;
break;
case kTypedDataInt32x4ArrayCid:
value_check_cid = kInt32x4Cid;
rep = kUnboxedInt32x4;
break;
case kTypedDataFloat64x2ArrayCid:
value_check_cid = kFloat64x2Cid;
rep = kUnboxedFloat64x2;
break;
default:
// Float32/Float64 case already handled.
break;
}
Zone* zone = flow_graph->zone();
Cids* value_check = Cids::CreateMonomorphic(zone, value_check_cid);
builder.AddInstruction(new CheckClassInstr(
new Value(value), DeoptId::kNone, *value_check, builder.Source()));
value = builder.AddUnboxInstr(rep, new Value(value),
/* is_checked = */ true);
if (array_cid == kTypedDataFloat32ArrayCid) {
value = builder.AddDefinition(
new DoubleToFloatInstr(new Value(value), DeoptId::kNone));
}
break;
}
default:
UNREACHABLE();
}
if (RepresentationUtils::IsUnboxedInteger(rep)) {
// Use same truncating unbox-instruction for int32 and uint32.
auto const unbox_rep = rep == kUnboxedInt32 ? kUnboxedUint32 : rep;
value = builder.AddUnboxInstr(unbox_rep, new Value(value),
/* is_checked = */ false);
} else if (RepresentationUtils::IsUnboxed(rep)) {
Zone* zone = flow_graph->zone();
Cids* value_check = Cids::CreateMonomorphic(zone, Boxing::BoxCid(rep));
builder.AddInstruction(new CheckClassInstr(new Value(value), DeoptId::kNone,
*value_check, builder.Source()));
value = builder.AddUnboxInstr(rep, new Value(value),
/* is_checked = */ true);
}
if (IsExternalTypedDataClassId(array_cid)) {
@ -557,12 +454,9 @@ static bool BuildCodeUnitAt(FlowGraph* flow_graph, intptr_t cid) {
// following boxing instruction about a more precise range we attach it here
// manually.
// http://dartbug.com/36632
Range range;
load->InferRange(/*range_analysis=*/nullptr, &range);
load->set_range(range);
Definition* result =
CreateBoxedResultIfNeeded(&builder, load, kUnboxedIntPtr);
auto const rep = RepresentationUtils::RepresentationOfArrayElement(cid);
load->set_range(Range::Full(rep));
Definition* result = CreateBoxedResultIfNeeded(&builder, load, rep);
if (result->IsBoxInteger()) {
result->AsBoxInteger()->ClearEnv();
@ -583,7 +477,7 @@ bool GraphIntrinsifier::Build_TwoByteStringCodeUnitAt(FlowGraph* flow_graph) {
static bool BuildSimdOp(FlowGraph* flow_graph, intptr_t cid, Token::Kind kind) {
if (!FlowGraphCompiler::SupportsUnboxedSimd128()) return false;
const Representation rep = RepresentationForCid(cid);
auto const rep = RepresentationForCid(cid);
Zone* zone = flow_graph->zone();
GraphEntryInstr* graph_entry = flow_graph->graph_entry();

View file

@ -122,7 +122,7 @@ namespace dart {
V(::, _typedDataIndexCheck, TypedDataIndexCheck, 0x79215a89) \
V(::, _byteDataByteOffsetCheck, ByteDataByteOffsetCheck, 0xbb05a064) \
V(::, copyRangeFromUint8ListToOneByteString, \
CopyRangeFromUint8ListToOneByteString, 0xcc42cce1) \
CopyRangeFromUint8ListToOneByteString, 0xcc42d0a2) \
V(_StringBase, _interpolate, StringBaseInterpolate, 0x8af456e6) \
V(_SuspendState, get:_functionData, SuspendState_getFunctionData, \
0x7281768e) \

View file

@ -9190,7 +9190,10 @@ InstancePtr Function::GetFfiCallClosurePragmaValue() const {
bool Function::RecognizedKindForceOptimize() const {
switch (recognized_kind()) {
// Uses unboxed/untagged data not supported in unoptimized.
// Uses unboxed/untagged data not supported in unoptimized, or uses
// LoadIndexed/StoreIndexed/MemoryCopy instructions with typed data
// arrays, which requires optimization for payload extraction.
case MethodRecognizer::kCopyRangeFromUint8ListToOneByteString:
case MethodRecognizer::kFinalizerBase_getIsolateFinalizers:
case MethodRecognizer::kFinalizerBase_setIsolate:
case MethodRecognizer::kFinalizerBase_setIsolateFinalizers:

View file

@ -54,6 +54,7 @@ external void writeIntoOneByteString(String string, int index, int codePoint);
/// [length] must specify ranges within the bounds of the list / string.
@pragma("vm:recognized", "other")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
void copyRangeFromUint8ListToOneByteString(
Uint8List from, String to, int fromStart, int toStart, int length) {
for (int i = 0; i < length; i++) {