[vm/compiler] Change MemoryCopy to also take untagged addresses.

This CL adds the ability to pass the payload address of the source
and destination directly to the MemoryCopy instruction as an untagged
value.

The new translation of the _TypedListBase._memMoveN methods use the new
MemoryCopy constructor, retrieving the untagged value of the data field
of both the source and destination. This way, if inlining exposes the
allocation of the object from which the data field is being retrieved,
then allocation sinking can remove the intermediate allocation if there
are no escaping uses of the object.

Since Pointer.asTypedList allocates such ExternalTypedData objects,
this CL makes that method inlined if at all possible, which removes
the intermediate allocation if the only use of the TypedData object
is to call setRange for memory copying purposes.

This CL also separates unboxed native slots into two groups: those
that contain untagged addresses and those that do not. The former
group now have the kUntagged representation, which mimics the old
use of LoadUntagged for the PointerBase data field and also ensures
that any arithmetic operations on untagged addresses must first be
explicitly converted to an unboxed integer and then explicitly converted
back to untagged before being stored in a slot that contains untagged
addresses.

When a unboxed native slot that contains untagged addresses is defined,
the definition also includes a boolean which represents whether
addresses that may be moved by the GC can be stored in this slot or not.
The redundancy eliminator uses this to decide whether it is safe to
eliminate a duplicate load, replace a load with the value originally
stored in the slot, or lift a load out of a loop.

In particular, the PointerBase data field may contain GC-moveable
addresses, but only for internal TypedData objects and views, not
for external TypedData objects or Pointers. To allow load optimizations
involving the latter, the LoadField and StoreField instructions now
take boolean flags for whether loads or stores from the slot are
guaranteed to not be GC-moveable, to override the information from
the slot argument.

Notable benchmark changes on x64 (similar for other archs unless noted):

JIT:
* FfiMemory.PointerPointer: 250.7%
* FfiStructCopy.Copy1Bytes: -26.73% (only x64)
* FfiStructCopy.Copy32Bytes: -25.18% (only x64)
* MemoryCopy.64.setRange.Pointer.Uint8: 19.36%
* MemoryCopy.64.setRange.Pointer.Double: 18.96%
* MemoryCopy.8.setRange.Pointer.Double: 17.59%
* MemoryCopy.8.setRange.Pointer.Uint8: 19.46%

AOT:
* FfiMemory.PointerPointer: 323.5%
* FfiStruct.FieldLoadStore: 483.3%
* FileIO_readwrite_64kb: 15.39%
* FileIO_readwrite_512kb (Intel Xeon): 46.22%
* MemoryCopy.512.setRange.Pointer.Uint8: 35.20%
* MemoryCopy.64.setRange.Pointer.Uint8: 55.40%
* MemoryCopy.512.setRange.Pointer.Double: 29.45%
* MemoryCopy.64.setRange.Pointer.Double: 60.37%
* MemoryCopy.8.setRange.Pointer.Double: 59.54%
* MemoryCopy.8.setRange.Pointer.Uint8: 55.40%
* FfiStructCopy.Copy32Bytes: 398.3%
* FfiStructCopy.Copy1Bytes: 1233%

TEST=vm/dart/address_local_pointer, vm/dart/pointer_as_typed_list

Issue: https://github.com/dart-lang/sdk/issues/42072
Fixes: https://github.com/dart-lang/sdk/issues/53124

Cq-Include-Trybots: luci.dart.try:vm-ffi-qemu-linux-release-arm-try,vm-eager-optimization-linux-release-x64-try,vm-linux-release-x64-try,vm-linux-debug-x64-try,vm-aot-linux-release-x64-try,vm-aot-linux-debug-x64-try
Change-Id: I563e0bfac5b1ac6cf1111649934067c12891b631
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/324820
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Tess Strickland 2023-10-13 19:06:30 +00:00 committed by Commit Queue
parent 0fa13d4627
commit 06d7a2352e
41 changed files with 1035 additions and 717 deletions

View file

@ -183,6 +183,10 @@ class Env {
void bind(String name, Map<String, dynamic> instrOrBlock) {
final id = instrOrBlock['v'] ?? instrOrBlock['b'];
if (id == null) {
throw 'Instruction is not a definition or a block: ${instrOrBlock['o']}';
}
if (nameToId.containsKey(name)) {
if (nameToId[name] != id) {
throw 'Binding mismatch for $name: got ${nameToId[name]} and $id';

View file

@ -29,24 +29,6 @@ static void RangeCheck(intptr_t offset_in_bytes,
}
}
static void AlignmentCheck(intptr_t offset_in_bytes, intptr_t element_size) {
if ((offset_in_bytes % element_size) != 0) {
const auto& error = String::Handle(String::NewFormatted(
"Offset in bytes (%" Pd ") must be a multiple of %" Pd "",
offset_in_bytes, element_size));
Exceptions::ThrowArgumentError(error);
}
}
// Checks to see if a length will not result in an OOM error.
static void LengthCheck(intptr_t len, intptr_t max) {
if (len < 0 || len > max) {
const String& error = String::Handle(String::NewFormatted(
"Length (%" Pd ") of object must be in range [0..%" Pd "]", len, max));
Exceptions::ThrowArgumentError(error);
}
}
DEFINE_NATIVE_ENTRY(TypedDataBase_length, 0, 1) {
GET_NON_NULL_NATIVE_ARGUMENT(TypedDataBase, array, arguments->NativeArgAt(0));
return Smi::New(array.Length());
@ -148,67 +130,6 @@ DEFINE_NATIVE_ENTRY(TypedDataBase_setClampedRange, 0, 5) {
return Object::null();
}
// Native methods for typed data allocation are recognized and implemented
// in FlowGraphBuilder::BuildGraphOfRecognizedMethod.
// These bodies exist only to assert that they are not used.
#define TYPED_DATA_NEW(name) \
DEFINE_NATIVE_ENTRY(TypedData_##name##_new, 0, 2) { \
UNREACHABLE(); \
return Object::null(); \
}
#define TYPED_DATA_NEW_NATIVE(name) TYPED_DATA_NEW(name)
CLASS_LIST_TYPED_DATA(TYPED_DATA_NEW_NATIVE)
#undef TYPED_DATA_NEW_NATIVE
#undef TYPED_DATA_NEW
// We check the length parameter against a possible maximum length for the
// array based on available physical addressable memory on the system.
//
// More specifically
//
// TypedData::MaxElements(cid) is equal to (kSmiMax / ElementSizeInBytes(cid))
//
// which ensures that the number of bytes the array holds is guaranteed to fit
// into a _Smi.
//
// Argument 0 is type arguments and is ignored.
static InstancePtr NewTypedDataView(intptr_t cid,
intptr_t element_size,
Zone* zone,
NativeArguments* arguments) {
GET_NON_NULL_NATIVE_ARGUMENT(TypedDataBase, typed_data,
arguments->NativeArgAt(1));
GET_NON_NULL_NATIVE_ARGUMENT(Smi, offset, arguments->NativeArgAt(2));
GET_NON_NULL_NATIVE_ARGUMENT(Smi, len, arguments->NativeArgAt(3));
const intptr_t backing_length = typed_data.LengthInBytes();
const intptr_t offset_in_bytes = offset.Value();
const intptr_t length = len.Value();
AlignmentCheck(offset_in_bytes, element_size);
LengthCheck(offset_in_bytes + length * element_size, backing_length);
return TypedDataView::New(cid, typed_data, offset_in_bytes, length);
}
#define TYPED_DATA_VIEW_NEW(native_name, cid) \
DEFINE_NATIVE_ENTRY(native_name, 0, 4) { \
return NewTypedDataView(cid, TypedDataBase::ElementSizeInBytes(cid), zone, \
arguments); \
}
#define TYPED_DATA_NEW_NATIVE(name) \
TYPED_DATA_VIEW_NEW(TypedDataView_##name##View_new, \
kTypedData##name##ViewCid) \
TYPED_DATA_VIEW_NEW(TypedDataView_Unmodifiable##name##View_new, \
kUnmodifiableTypedData##name##ViewCid)
CLASS_LIST_TYPED_DATA(TYPED_DATA_NEW_NATIVE)
TYPED_DATA_VIEW_NEW(TypedDataView_ByteDataView_new, kByteDataViewCid)
TYPED_DATA_VIEW_NEW(TypedDataView_UnmodifiableByteDataView_new,
kUnmodifiableByteDataViewCid)
#undef TYPED_DATA_NEW_NATIVE
#undef TYPED_DATA_VIEW_NEW
#define TYPED_DATA_GETTER(getter, object, ctor, access_size) \
DEFINE_NATIVE_ENTRY(TypedData_##getter, 0, 2) { \
GET_NON_NULL_NATIVE_ARGUMENT(TypedDataBase, array, \

View file

@ -0,0 +1,33 @@
// Copyright (c) 2023, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// Verify that returning the address of a locally created Pointer that doesn't
// escape just returns the address used to create the Pointer without actually
// creating it. (See https://github.com/dart-lang/sdk/issues/53124.)
import 'dart:ffi';
import 'package:expect/expect.dart';
import 'package:ffi/ffi.dart';
import 'package:vm/testing/il_matchers.dart';
@pragma('vm:never-inline')
@pragma('vm:testing:print-flow-graph')
int identity(int address) => Pointer<Void>.fromAddress(address).address;
void matchIL$identity(FlowGraph graph) {
graph.dump();
graph.match([
match.block('Graph'),
match.block('Function', [
'address' << match.Parameter(index: 0),
match.Return('address'),
]),
]);
}
void main(List<String> args) {
final n = args.isEmpty ? 100 : int.parse(args.first);
Expect.equals(n, identity(n));
}

View file

@ -0,0 +1,58 @@
// Copyright (c) 2023, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// Verify that we don't generate intermediate external TypedData views when
// using setRange to copy between Pointers.
import 'dart:ffi';
import 'package:expect/expect.dart';
import 'package:ffi/ffi.dart';
import 'package:vm/testing/il_matchers.dart';
@pragma('vm:never-inline')
@pragma('vm:testing:print-flow-graph')
void copyPointerContents(Pointer<Uint8> dest, Pointer<Uint8> src, int n) {
dest.asTypedList(n).setRange(0, n, src.asTypedList(n));
}
void matchIL$copyPointerContents(FlowGraph graph) {
graph.dump();
// Since we only call it with n == 100, the third argument will get optimized
// away. The element_size starts as 1, but canonicalization will turn it into
// 4, the length to 100 / 4 == 25, and the starting offsets to 0 / 4 == 0.
//
// We could change the definition of n in main to:
//
// final n = args.isEmpty ? 100 : int.parse(args.first);
//
// but then we'd have to wade through the generated bounds checks here.
graph.match([
match.block('Graph', [
'cnull' << match.Constant(value: null),
'c0' << match.Constant(value: 0),
'c25' << match.Constant(value: 25),
]),
match.block('Function', [
'dest' << match.Parameter(index: 0),
'src' << match.Parameter(index: 1),
'dest.data' << match.LoadField('dest', slot: 'PointerBase.data'),
'src.data' << match.LoadField('src', slot: 'PointerBase.data'),
match.MemoryCopy('src.data', 'dest.data', 'c0', 'c0', 'c25',
element_size: 4),
match.Return('cnull'),
]),
]);
}
void main(List<String> args) {
final n = 100;
final src = malloc<Uint8>(n);
for (int i = 0; i < n; i++) {
src[i] = n - i;
}
final dest = calloc<Uint8>(n);
copyPointerContents(dest, src, n);
Expect.listEquals(src.asTypedList(n), dest.asTypedList(n));
}

View file

@ -26,9 +26,10 @@ void matchIL$main_foo(FlowGraph graph) {
graph.match([
match.block('Graph'),
match.block('Function', [
match.LoadField(),
'list' << match.Parameter(index: 1),
match.LoadField('list', slot: 'TypedDataBase.length'),
match.GenericCheckBound(),
match.LoadUntagged(),
match.LoadField('list', slot: 'PointerBase.data'),
match.LoadIndexed(),
]),
]);

View file

@ -26,9 +26,10 @@ void matchIL$main_foo(FlowGraph graph) {
graph.match([
match.block('Graph'),
match.block('Function', [
match.LoadField(),
'list' << match.Parameter(index: 1),
match.LoadField('list', slot: 'TypedDataBase.length'),
match.GenericCheckBound(),
match.LoadUntagged(),
match.LoadField('list', slot: 'PointerBase.data'),
match.LoadIndexed(),
]),
]);

View file

@ -258,6 +258,7 @@ void generatePatchExtension(
? ""
: """
@patch
@pragma("vm:prefer-inline")
$typedListType asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,

View file

@ -158,20 +158,6 @@ namespace dart {
V(Timeline_getTraceClock, 0) \
V(Timeline_isDartStreamEnabled, 0) \
V(Timeline_reportTaskEvent, 5) \
V(TypedData_Int8Array_new, 2) \
V(TypedData_Uint8Array_new, 2) \
V(TypedData_Uint8ClampedArray_new, 2) \
V(TypedData_Int16Array_new, 2) \
V(TypedData_Uint16Array_new, 2) \
V(TypedData_Int32Array_new, 2) \
V(TypedData_Uint32Array_new, 2) \
V(TypedData_Int64Array_new, 2) \
V(TypedData_Uint64Array_new, 2) \
V(TypedData_Float32Array_new, 2) \
V(TypedData_Float64Array_new, 2) \
V(TypedData_Float32x4Array_new, 2) \
V(TypedData_Int32x4Array_new, 2) \
V(TypedData_Float64x2Array_new, 2) \
V(TypedDataBase_length, 1) \
V(TypedDataBase_setClampedRange, 5) \
V(TypedData_GetInt8, 2) \
@ -200,38 +186,8 @@ namespace dart {
V(TypedData_SetInt32x4, 3) \
V(TypedData_GetFloat64x2, 2) \
V(TypedData_SetFloat64x2, 3) \
V(TypedDataView_ByteDataView_new, 4) \
V(TypedDataView_Int8ArrayView_new, 4) \
V(TypedDataView_Uint8ArrayView_new, 4) \
V(TypedDataView_Uint8ClampedArrayView_new, 4) \
V(TypedDataView_Int16ArrayView_new, 4) \
V(TypedDataView_Uint16ArrayView_new, 4) \
V(TypedDataView_Int32ArrayView_new, 4) \
V(TypedDataView_Uint32ArrayView_new, 4) \
V(TypedDataView_Int64ArrayView_new, 4) \
V(TypedDataView_Uint64ArrayView_new, 4) \
V(TypedDataView_Float32ArrayView_new, 4) \
V(TypedDataView_Float64ArrayView_new, 4) \
V(TypedDataView_Float32x4ArrayView_new, 4) \
V(TypedDataView_Int32x4ArrayView_new, 4) \
V(TypedDataView_Float64x2ArrayView_new, 4) \
V(TypedDataView_offsetInBytes, 1) \
V(TypedDataView_typedData, 1) \
V(TypedDataView_UnmodifiableByteDataView_new, 4) \
V(TypedDataView_UnmodifiableInt8ArrayView_new, 4) \
V(TypedDataView_UnmodifiableUint8ArrayView_new, 4) \
V(TypedDataView_UnmodifiableUint8ClampedArrayView_new, 4) \
V(TypedDataView_UnmodifiableInt16ArrayView_new, 4) \
V(TypedDataView_UnmodifiableUint16ArrayView_new, 4) \
V(TypedDataView_UnmodifiableInt32ArrayView_new, 4) \
V(TypedDataView_UnmodifiableUint32ArrayView_new, 4) \
V(TypedDataView_UnmodifiableInt64ArrayView_new, 4) \
V(TypedDataView_UnmodifiableUint64ArrayView_new, 4) \
V(TypedDataView_UnmodifiableFloat32ArrayView_new, 4) \
V(TypedDataView_UnmodifiableFloat64ArrayView_new, 4) \
V(TypedDataView_UnmodifiableFloat32x4ArrayView_new, 4) \
V(TypedDataView_UnmodifiableInt32x4ArrayView_new, 4) \
V(TypedDataView_UnmodifiableFloat64x2ArrayView_new, 4) \
V(Float32x4_fromDoubles, 4) \
V(Float32x4_splat, 1) \
V(Float32x4_fromInt32x4Bits, 2) \

View file

@ -191,6 +191,9 @@ class CompileType : public ZoneAllocated {
// Create non-nullable String type.
static CompileType String();
// Create non-nullable Object type.
static CompileType Object();
// Perform a join operation over the type lattice.
void Union(CompileType* other);

View file

@ -2082,16 +2082,15 @@ class PhiUnboxingHeuristic : public ValueObject {
: worklist_(flow_graph, 10) {}
void Process(PhiInstr* phi) {
Representation unboxed = phi->representation();
auto new_representation = kTagged;
switch (phi->Type()->ToCid()) {
case kDoubleCid:
if (CanUnboxDouble()) {
// Could be UnboxedDouble or UnboxedFloat
unboxed = DetermineIfAnyIncomingUnboxedFloats(phi) ? kUnboxedFloat
: kUnboxedDouble;
new_representation = DetermineIfAnyIncomingUnboxedFloats(phi)
? kUnboxedFloat
: kUnboxedDouble;
#if defined(DEBUG)
if (unboxed == kUnboxedFloat) {
if (new_representation == kUnboxedFloat) {
for (auto input : phi->inputs()) {
ASSERT(input->representation() != kUnboxedDouble);
}
@ -2101,78 +2100,90 @@ class PhiUnboxingHeuristic : public ValueObject {
break;
case kFloat32x4Cid:
if (ShouldInlineSimd()) {
unboxed = kUnboxedFloat32x4;
new_representation = kUnboxedFloat32x4;
}
break;
case kInt32x4Cid:
if (ShouldInlineSimd()) {
unboxed = kUnboxedInt32x4;
new_representation = kUnboxedInt32x4;
}
break;
case kFloat64x2Cid:
if (ShouldInlineSimd()) {
unboxed = kUnboxedFloat64x2;
new_representation = kUnboxedFloat64x2;
}
break;
}
// If all the inputs are unboxed, leave the Phi unboxed.
if ((unboxed == kTagged) && phi->Type()->IsInt()) {
bool should_unbox = true;
Representation new_representation = kTagged;
// If all the inputs are untagged or all the inputs are compatible unboxed
// integers, leave the Phi unboxed.
if (new_representation == kTagged && phi->Type()->IsInt()) {
for (auto input : phi->inputs()) {
if (input == phi) continue;
if (!IsUnboxedInteger(input->representation())) {
should_unbox = false;
if (input->representation() != kUntagged &&
!IsUnboxedInteger(input->representation())) {
new_representation = kTagged; // Reset to a boxed phi.
break;
}
if (new_representation == kTagged) {
new_representation = input->representation();
} else if (new_representation == kUntagged) {
// Don't allow mixing of untagged and unboxed values.
ASSERT_EQUAL(input->representation(), kUntagged);
} else if (new_representation != input->representation()) {
new_representation = kNoRepresentation;
// Don't allow mixing of untagged and unboxed values.
ASSERT(IsUnboxedInteger(input->representation()));
// Don't allow implicit conversion between signed and unsigned
// representations of the same size, since that loses information.
// This means the value sizes must be different if they are different
// unboxed integer representations.
ASSERT(RepresentationUtils::ValueSize(new_representation) !=
RepresentationUtils::ValueSize(input->representation()));
// Take the larger representation. If the larger representation is
// unsigned, then the smaller must be as well.
if (RepresentationUtils::ValueSize(new_representation) <
RepresentationUtils::ValueSize(input->representation())) {
ASSERT(!RepresentationUtils::IsUnsigned(input->representation()) ||
RepresentationUtils::IsUnsigned(new_representation));
new_representation = input->representation();
} else {
ASSERT(!RepresentationUtils::IsUnsigned(new_representation) ||
RepresentationUtils::IsUnsigned(input->representation()));
}
}
}
if (should_unbox) {
unboxed =
new_representation != kNoRepresentation ? new_representation
: RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32)
? kUnboxedInt32
: kUnboxedInt64;
}
}
// Decide if it is worth to unbox an integer phi.
if ((unboxed == kTagged) && phi->Type()->IsInt() &&
!phi->Type()->can_be_sentinel()) {
// Decide if it is worth to unbox an boxed integer phi.
if (new_representation == kTagged && !phi->Type()->can_be_sentinel()) {
#if defined(TARGET_ARCH_IS_64_BIT)
// In AOT mode on 64-bit platforms always unbox integer typed phis
// (similar to how we treat doubles and other boxed numeric types).
// In JIT mode only unbox phis which are not fully known to be Smi.
if (is_aot_ || phi->Type()->ToCid() != kSmiCid) {
unboxed = kUnboxedInt64;
}
// In AOT mode on 64-bit platforms always unbox integer typed phis
// (similar to how we treat doubles and other boxed numeric types).
// In JIT mode only unbox phis which are not fully known to be Smi.
if (is_aot_ || phi->Type()->ToCid() != kSmiCid) {
new_representation = kUnboxedInt64;
}
#else
// If we are on a 32-bit platform check if there are unboxed values
// flowing into the phi and the phi value itself is flowing into an
// unboxed operation prefer to keep it unboxed.
// We use this heuristic instead of eagerly unboxing all the phis
// because we are concerned about the code size and register pressure.
const bool has_unboxed_incoming_value = HasUnboxedIncomingValue(phi);
const bool flows_into_unboxed_use = FlowsIntoUnboxedUse(phi);
// If we are on a 32-bit platform check if there are unboxed values
// flowing into the phi and the phi value itself is flowing into an
// unboxed operation prefer to keep it unboxed.
// We use this heuristic instead of eagerly unboxing all the phis
// because we are concerned about the code size and register pressure.
const bool has_unboxed_incoming_value = HasUnboxedIncomingValue(phi);
const bool flows_into_unboxed_use = FlowsIntoUnboxedUse(phi);
if (has_unboxed_incoming_value && flows_into_unboxed_use) {
unboxed =
RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32)
? kUnboxedInt32
: kUnboxedInt64;
}
if (has_unboxed_incoming_value && flows_into_unboxed_use) {
new_representation =
RangeUtils::Fits(phi->range(), RangeBoundary::kRangeBoundaryInt32)
? kUnboxedInt32
: kUnboxedInt64;
}
#endif
}
}
phi->set_representation(unboxed);
phi->set_representation(new_representation);
}
private:

View file

@ -990,6 +990,15 @@ Instruction* StoreFieldInstr::Canonicalize(FlowGraph* flow_graph) {
value()->BindsToConstantNull()) {
return nullptr;
}
if (slot().kind() == Slot::Kind::kPointerBase_data &&
stores_inner_pointer() == InnerPointerAccess::kMayBeInnerPointer) {
const intptr_t cid = instance()->Type()->ToNullableCid();
// Pointers and ExternalTypedData objects never contain inner pointers.
if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
set_stores_inner_pointer(InnerPointerAccess::kCannotBeInnerPointer);
}
}
return this;
}
@ -2732,6 +2741,24 @@ bool LoadFieldInstr::TryEvaluateLoad(const Object& instance,
return true;
}
bool LoadFieldInstr::MayCreateUntaggedAlias() const {
// If the load is guaranteed to never retrieve a GC-moveable address,
// then the returned address can't alias the (GC-moveable) instance.
if (loads_inner_pointer() != InnerPointerAccess::kMayBeInnerPointer) {
return false;
}
if (slot().IsIdentical(Slot::PointerBase_data())) {
// If we know statically that the instance is a Pointer, typed data view,
// or external typed data, then the data field doesn't alias the instance.
const intptr_t cid = instance()->Type()->ToNullableCid();
if (cid == kPointerCid) return false;
if (IsTypedDataViewClassId(cid)) return false;
if (IsUnmodifiableTypedDataViewClassId(cid)) return false;
if (IsExternalTypedDataClassId(cid)) return false;
}
return true;
}
bool LoadFieldInstr::Evaluate(const Object& instance, Object* result) {
return TryEvaluateLoad(instance, slot(), result);
}
@ -2873,6 +2900,16 @@ Definition* LoadFieldInstr::Canonicalize(FlowGraph* flow_graph) {
}
}
break;
case Slot::Kind::kPointerBase_data:
ASSERT(!calls_initializer());
if (loads_inner_pointer() == InnerPointerAccess::kMayBeInnerPointer) {
const intptr_t cid = instance()->Type()->ToNullableCid();
// Pointers and ExternalTypedData objects never contain inner pointers.
if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
set_loads_inner_pointer(InnerPointerAccess::kCannotBeInnerPointer);
}
}
break;
default:
break;
}
@ -2887,7 +2924,7 @@ Definition* LoadFieldInstr::Canonicalize(FlowGraph* flow_graph) {
}
}
if (instance()->definition()->IsAllocateObject() && slot().is_immutable()) {
if (instance()->definition()->IsAllocateObject() && IsImmutableLoad()) {
StoreFieldInstr* initializing_store = nullptr;
for (auto use : instance()->definition()->input_uses()) {
if (auto store = use->instruction()->AsStoreField()) {
@ -3330,11 +3367,14 @@ Definition* IntConverterInstr::Canonicalize(FlowGraph* flow_graph) {
const auto intermediate_rep = first_converter->representation();
// Only eliminate intermediate conversion if it does not change the value.
auto src_defn = first_converter->value()->definition();
if (!Range::Fits(src_defn->range(), intermediate_rep)) {
if (intermediate_rep == kUntagged) {
// Both conversions are no-ops, as the other representations must be
// either kUnboxedIntPtr or kUnboxedFfiIntPtr.
} else if (!Range::Fits(src_defn->range(), intermediate_rep)) {
return this;
}
// Otherise it is safe to discard any other conversions from and then back
// Otherwise it is safe to discard any other conversions from and then back
// to the same integer type.
if (first_converter->from() == to()) {
return src_defn;
@ -4450,20 +4490,40 @@ void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register obj = locs()->in(0).reg();
Register result = locs()->out(0).reg();
if (object()->definition()->representation() == kUntagged) {
__ LoadFromOffset(result, obj, offset());
} else {
ASSERT(object()->definition()->representation() == kTagged);
__ LoadFieldFromOffset(result, obj, offset());
}
}
LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
LocationSummary* locs = nullptr;
if (slot().representation() != kTagged) {
auto const rep = slot().representation();
if (rep != kTagged) {
ASSERT(!calls_initializer());
const intptr_t kNumTemps = 0;
locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, Location::RequiresRegister());
if (RepresentationUtils::IsUnboxedInteger(slot().representation())) {
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
if (rep == kUntagged) {
locs->set_out(0, Location::RequiresRegister());
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
const size_t value_size = RepresentationUtils::ValueSize(rep);
if (value_size <= compiler::target::kWordSize) {
locs->set_out(0, Location::RequiresRegister());
} else {
@ -4509,15 +4569,18 @@ LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register instance_reg = locs()->in(0).reg();
if (representation() != kTagged) {
if (RepresentationUtils::IsUnboxedInteger(representation())) {
const size_t value_size =
RepresentationUtils::ValueSize(representation());
auto const rep = slot().representation();
if (rep != kTagged) {
if (rep == kUntagged) {
const Register result = locs()->out(0).reg();
__ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
RepresentationUtils::OperandSize(rep));
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
const size_t value_size = RepresentationUtils::ValueSize(rep);
if (value_size <= compiler::target::kWordSize) {
const Register result = locs()->out(0).reg();
__ LoadFieldFromOffset(
result, instance_reg, OffsetInBytes(),
RepresentationUtils::OperandSize(representation()));
__ LoadFieldFromOffset(result, instance_reg, OffsetInBytes(),
RepresentationUtils::OperandSize(rep));
} else {
auto const result_pair = locs()->out(0).AsPairLocation();
const Register result_lo = result_pair->At(0).reg();
@ -6815,6 +6878,8 @@ Instruction* MemoryCopyInstr::Canonicalize(FlowGraph* flow_graph) {
void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register src_reg = locs()->in(kSrcPos).reg();
const Register dest_reg = locs()->in(kDestPos).reg();
const Representation src_rep = RequiredInputRepresentation(kSrcPos);
const Representation dest_rep = RequiredInputRepresentation(kDestPos);
const Location& src_start_loc = locs()->in(kSrcStartPos);
const Location& dest_start_loc = locs()->in(kDestStartPos);
const Location& length_loc = locs()->in(kLengthPos);
@ -6830,8 +6895,9 @@ void MemoryCopyInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The zero constant case should be handled via canonicalization.
ASSERT(!constant_length || num_elements > 0);
EmitComputeStartPointer(compiler, src_cid_, src_reg, src_start_loc);
EmitComputeStartPointer(compiler, dest_cid_, dest_reg, dest_start_loc);
EmitComputeStartPointer(compiler, src_cid_, src_reg, src_rep, src_start_loc);
EmitComputeStartPointer(compiler, dest_cid_, dest_reg, dest_rep,
dest_start_loc);
compiler::Label copy_forwards, done;
if (!constant_length) {
@ -7326,9 +7392,7 @@ void FfiCallInstr::EmitParamMoves(FlowGraphCompiler* compiler,
compiler->zone(), pointer_loc.payload_type(),
pointer_loc.container_type(), temp0);
compiler->EmitNativeMove(dst, pointer_loc, &temp_alloc);
__ LoadField(temp0,
compiler::FieldAddress(
temp0, compiler::target::PointerBase::data_offset()));
__ LoadFromSlot(temp0, temp0, Slot::PointerBase_data());
// Copy chunks.
const intptr_t sp_offset =
@ -7396,9 +7460,7 @@ void FfiCallInstr::EmitReturnMoves(FlowGraphCompiler* compiler,
compiler->EmitMove(Location::RegisterLocation(temp0), typed_data_loc,
&no_temp);
}
__ LoadField(temp0,
compiler::FieldAddress(
temp0, compiler::target::PointerBase::data_offset()));
__ LoadFromSlot(temp0, temp0, Slot::PointerBase_data());
if (returnLocation.IsPointerToMemory()) {
// Copy blocks from the stack location to TypedData.
@ -7478,10 +7540,12 @@ LocationSummary* StoreFieldInstr::MakeLocationSummary(Zone* zone,
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(kInstancePos, Location::RequiresRegister());
if (slot().representation() != kTagged) {
if (RepresentationUtils::IsUnboxedInteger(slot().representation())) {
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
const Representation rep = slot().representation();
if (rep != kTagged) {
if (rep == kUntagged) {
summary->set_in(kValuePos, Location::RequiresRegister());
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
const size_t value_size = RepresentationUtils::ValueSize(rep);
if (value_size <= compiler::target::kWordSize) {
summary->set_in(kValuePos, Location::RequiresRegister());
} else {
@ -7539,17 +7603,20 @@ void StoreFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t offset_in_bytes = OffsetInBytes();
ASSERT(offset_in_bytes > 0); // Field is finalized and points after header.
auto const rep = slot().representation();
if (slot().representation() != kTagged) {
// Unboxed field.
ASSERT(memory_order_ != compiler::AssemblerBase::kRelease);
if (RepresentationUtils::IsUnboxedInteger(slot().representation())) {
const size_t value_size =
RepresentationUtils::ValueSize(slot().representation());
if (rep == kUntagged) {
const Register value = locs()->in(kValuePos).reg();
__ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
RepresentationUtils::OperandSize(rep));
} else if (RepresentationUtils::IsUnboxedInteger(rep)) {
const size_t value_size = RepresentationUtils::ValueSize(rep);
if (value_size <= compiler::target::kWordSize) {
const Register value = locs()->in(kValuePos).reg();
__ StoreFieldToOffset(
value, instance_reg, offset_in_bytes,
RepresentationUtils::OperandSize(slot().representation()));
__ StoreFieldToOffset(value, instance_reg, offset_in_bytes,
RepresentationUtils::OperandSize(rep));
} else {
auto const value_pair = locs()->in(kValuePos).AsPairLocation();
const Register value_lo = value_pair->At(0).reg();
@ -7669,10 +7736,7 @@ void NativeReturnInstr::EmitReturnMoves(FlowGraphCompiler* compiler) {
if (dst1.IsMultiple()) {
Register typed_data_reg = locs()->in(0).reg();
// Load the data pointer out of the TypedData/Pointer.
__ LoadField(
typed_data_reg,
compiler::FieldAddress(typed_data_reg,
compiler::target::PointerBase::data_offset()));
__ LoadFromSlot(typed_data_reg, typed_data_reg, Slot::PointerBase_data());
const auto& multiple = dst1.AsMultiple();
int offset_in_bytes = 0;

View file

@ -3080,29 +3080,47 @@ class LoadIndexedUnsafeInstr : public TemplateDefinition<1, NoThrow> {
class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
public:
MemoryCopyInstr(Value* src,
classid_t src_cid,
Value* dest,
classid_t dest_cid,
Value* src_start,
Value* dest_start,
Value* length,
bool unboxed_inputs,
bool can_overlap = true)
: MemoryCopyInstr(Instance::ElementSizeFor(src_cid),
src,
kTagged,
src_cid,
dest,
kTagged,
dest_cid,
src_start,
dest_start,
length,
unboxed_inputs,
can_overlap) {}
MemoryCopyInstr(intptr_t element_size,
Value* src,
Value* dest,
Value* src_start,
Value* dest_start,
Value* length,
classid_t src_cid,
classid_t dest_cid,
bool unboxed_inputs,
bool can_overlap = true)
: src_cid_(src_cid),
dest_cid_(dest_cid),
element_size_(Instance::ElementSizeFor(src_cid)),
unboxed_inputs_(unboxed_inputs),
can_overlap_(can_overlap) {
ASSERT(IsArrayTypeSupported(src_cid));
ASSERT(IsArrayTypeSupported(dest_cid));
ASSERT(Instance::ElementSizeFor(src_cid) ==
Instance::ElementSizeFor(dest_cid));
SetInputAt(kSrcPos, src);
SetInputAt(kDestPos, dest);
SetInputAt(kSrcStartPos, src_start);
SetInputAt(kDestStartPos, dest_start);
SetInputAt(kLengthPos, length);
}
: MemoryCopyInstr(element_size,
src,
kUntagged,
kIllegalCid,
dest,
kUntagged,
kIllegalCid,
src_start,
dest_start,
length,
unboxed_inputs,
can_overlap) {}
enum {
kSrcPos = 0,
@ -3115,9 +3133,11 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
DECLARE_INSTRUCTION(MemoryCopy)
virtual Representation RequiredInputRepresentation(intptr_t index) const {
if (index == kSrcPos || index == kDestPos) {
// The object inputs are always tagged.
return kTagged;
if (index == kSrcPos) {
return src_representation_;
}
if (index == kDestPos) {
return dest_representation_;
}
return unboxed_inputs() ? kUnboxedIntPtr : kTagged;
}
@ -3125,7 +3145,19 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool HasUnknownSideEffects() const { return true; }
virtual bool AttributesEqual(const Instruction& other) const { return true; }
virtual bool AttributesEqual(const Instruction& other) const {
if (auto* const copy = other.AsMemoryCopy()) {
if (element_size_ != copy->element_size_) return false;
if (unboxed_inputs_ != copy->unboxed_inputs_) return false;
if (can_overlap_ != copy->can_overlap_) return false;
if (src_representation_ != copy->src_representation_) return false;
if (dest_representation_ != copy->dest_representation_) return false;
if (src_cid_ != copy->src_cid_) return false;
if (dest_cid_ != copy->dest_cid_) return false;
return true;
}
return false;
}
Value* src() const { return inputs_[kSrcPos]; }
Value* dest() const { return inputs_[kDestPos]; }
@ -3142,12 +3174,16 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
PRINT_OPERANDS_TO_SUPPORT
DECLARE_ATTRIBUTE(element_size());
#define FIELD_LIST(F) \
F(classid_t, src_cid_) \
F(classid_t, dest_cid_) \
F(const classid_t, src_cid_) \
F(const classid_t, dest_cid_) \
F(intptr_t, element_size_) \
F(bool, unboxed_inputs_) \
F(bool, can_overlap_)
F(const bool, can_overlap_) \
F(const Representation, src_representation_) \
F(const Representation, dest_representation_)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(MemoryCopyInstr,
TemplateInstruction,
@ -3155,11 +3191,55 @@ class MemoryCopyInstr : public TemplateInstruction<5, NoThrow> {
#undef FIELD_LIST
private:
MemoryCopyInstr(intptr_t element_size,
Value* src,
Representation src_representation,
classid_t src_cid,
Value* dest,
Representation dest_representation,
classid_t dest_cid,
Value* src_start,
Value* dest_start,
Value* length,
bool unboxed_inputs,
bool can_overlap = true)
: src_cid_(src_cid),
dest_cid_(dest_cid),
element_size_(element_size),
unboxed_inputs_(unboxed_inputs),
can_overlap_(can_overlap),
src_representation_(src_representation),
dest_representation_(dest_representation) {
if (src_representation == kTagged) {
ASSERT(IsArrayTypeSupported(src_cid));
ASSERT_EQUAL(Instance::ElementSizeFor(src_cid), element_size);
} else {
ASSERT_EQUAL(src_representation, kUntagged);
ASSERT_EQUAL(src_cid, kIllegalCid);
}
if (dest_representation == kTagged) {
ASSERT(IsArrayTypeSupported(dest_cid));
ASSERT_EQUAL(Instance::ElementSizeFor(dest_cid), element_size);
} else {
ASSERT_EQUAL(dest_representation, kUntagged);
ASSERT_EQUAL(dest_cid, kIllegalCid);
}
SetInputAt(kSrcPos, src);
SetInputAt(kDestPos, dest);
SetInputAt(kSrcStartPos, src_start);
SetInputAt(kDestStartPos, dest_start);
SetInputAt(kLengthPos, length);
}
// Set array_reg to point to the index indicated by start (contained in
// start_loc) of the typed data or string in array (contained in array_reg).
// If array_rep is tagged, then the payload address is retrieved according
// to array_cid, otherwise the register is assumed to already have the
// payload address.
void EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
Representation array_rep,
Location start_loc);
// Generates an unrolled loop for copying a known amount of data from
@ -6031,6 +6111,12 @@ class DebugStepCheckInstr : public TemplateInstruction<0, NoThrow> {
DISALLOW_COPY_AND_ASSIGN(DebugStepCheckInstr);
};
enum class InnerPointerAccess {
kNotUntagged,
kMayBeInnerPointer,
kCannotBeInnerPointer,
};
enum StoreBarrierType { kNoStoreBarrier, kEmitStoreBarrier };
// StoreField instruction represents a store of the given [value] into
@ -6078,6 +6164,7 @@ class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
Value* instance,
Value* value,
StoreBarrierType emit_store_barrier,
InnerPointerAccess stores_inner_pointer,
const InstructionSource& source,
Kind kind = Kind::kOther,
compiler::Assembler::MemoryOrder memory_order =
@ -6087,11 +6174,42 @@ class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
emit_store_barrier_(emit_store_barrier),
memory_order_(memory_order),
token_pos_(source.token_pos),
is_initialization_(kind == Kind::kInitializing) {
is_initialization_(kind == Kind::kInitializing),
stores_inner_pointer_(stores_inner_pointer) {
switch (stores_inner_pointer) {
case InnerPointerAccess::kNotUntagged:
ASSERT(slot.representation() != kUntagged);
break;
case InnerPointerAccess::kMayBeInnerPointer:
ASSERT(slot.representation() == kUntagged);
ASSERT(slot.may_contain_inner_pointer());
break;
case InnerPointerAccess::kCannotBeInnerPointer:
ASSERT(slot.representation() == kUntagged);
break;
}
SetInputAt(kInstancePos, instance);
SetInputAt(kValuePos, value);
}
// Convenience constructor for slots not containing an untagged address.
StoreFieldInstr(const Slot& slot,
Value* instance,
Value* value,
StoreBarrierType emit_store_barrier,
const InstructionSource& source,
Kind kind = Kind::kOther,
compiler::Assembler::MemoryOrder memory_order =
compiler::Assembler::kRelaxedNonAtomic)
: StoreFieldInstr(slot,
instance,
value,
emit_store_barrier,
InnerPointerAccess::kNotUntagged,
source,
kind,
memory_order) {}
// Convenience constructor that looks up an IL Slot for the given [field].
StoreFieldInstr(const Field& field,
Value* instance,
@ -6128,7 +6246,7 @@ class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
bool is_initialization() const { return is_initialization_; }
bool ShouldEmitStoreBarrier() const {
if (RepresentationUtils::IsUnboxed(slot().representation())) {
if (slot().representation() != kTagged) {
// The target field is native and unboxed, so not traversed by the GC.
return false;
}
@ -6149,6 +6267,17 @@ class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
emit_store_barrier_ = value;
}
InnerPointerAccess stores_inner_pointer() const {
return stores_inner_pointer_;
}
void set_stores_inner_pointer(InnerPointerAccess value) {
// We should never change this for a non-untagged field.
ASSERT(stores_inner_pointer_ != InnerPointerAccess::kNotUntagged);
// We only convert from may to cannot, never the other direction.
ASSERT(value == InnerPointerAccess::kCannotBeInnerPointer);
stores_inner_pointer_ = value;
}
virtual bool CanTriggerGC() const { return false; }
virtual bool ComputeCanDeoptimize() const { return false; }
@ -6173,7 +6302,8 @@ class StoreFieldInstr : public TemplateInstruction<2, NoThrow> {
F(compiler::Assembler::MemoryOrder, memory_order_) \
F(const TokenPosition, token_pos_) \
/* Marks initializing stores. E.g. in the constructor. */ \
F(const bool, is_initialization_)
F(const bool, is_initialization_) \
F(InnerPointerAccess, stores_inner_pointer_)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(StoreFieldInstr,
TemplateInstruction,
@ -7547,11 +7677,12 @@ class AllocateTypedDataInstr : public TemplateArrayAllocation<1> {
DISALLOW_COPY_AND_ASSIGN(AllocateTypedDataInstr);
};
// Note: This instruction must not be moved without the indexed access that
// depends on it (e.g. out of loops). GC may collect the array while the
// external data-array is still accessed.
// TODO(vegorov) enable LICMing this instruction by ensuring that array itself
// is kept alive.
// This instruction is used to access fields in non-Dart objects, such as Thread
// and IsolateGroup.
//
// Note: The instruction must not be moved without the indexed access or store
// that depends on it (e.g. out of loops), as the GC may collect or move the
// object containing that address.
class LoadUntaggedInstr : public TemplateDefinition<1, NoThrow> {
public:
LoadUntaggedInstr(Value* object, intptr_t offset) : offset_(offset) {
@ -7646,6 +7777,7 @@ class LoadFieldInstr : public TemplateLoadField<1> {
public:
LoadFieldInstr(Value* instance,
const Slot& slot,
InnerPointerAccess loads_inner_pointer,
const InstructionSource& source,
bool calls_initializer = false,
intptr_t deopt_id = DeoptId::kNone)
@ -7653,13 +7785,50 @@ class LoadFieldInstr : public TemplateLoadField<1> {
calls_initializer,
deopt_id,
slot.IsDartField() ? &slot.field() : nullptr),
slot_(slot) {
slot_(slot),
loads_inner_pointer_(loads_inner_pointer) {
switch (loads_inner_pointer) {
case InnerPointerAccess::kNotUntagged:
ASSERT(slot.representation() != kUntagged);
break;
case InnerPointerAccess::kMayBeInnerPointer:
ASSERT(slot.representation() == kUntagged);
ASSERT(slot.may_contain_inner_pointer());
break;
case InnerPointerAccess::kCannotBeInnerPointer:
ASSERT(slot.representation() == kUntagged);
break;
}
SetInputAt(0, instance);
}
// Convenience function for slots that cannot hold untagged addresses.
LoadFieldInstr(Value* instance,
const Slot& slot,
const InstructionSource& source,
bool calls_initializer = false,
intptr_t deopt_id = DeoptId::kNone)
: LoadFieldInstr(instance,
slot,
InnerPointerAccess::kNotUntagged,
source,
calls_initializer,
deopt_id) {}
Value* instance() const { return inputs_[0]; }
const Slot& slot() const { return slot_; }
InnerPointerAccess loads_inner_pointer() const {
return loads_inner_pointer_;
}
void set_loads_inner_pointer(InnerPointerAccess value) {
// We should never change this for a non-untagged field.
ASSERT(loads_inner_pointer_ != InnerPointerAccess::kNotUntagged);
// We only convert from may to cannot, never the other direction.
ASSERT(value == InnerPointerAccess::kCannotBeInnerPointer);
loads_inner_pointer_ = value;
}
virtual Representation representation() const;
DECLARE_INSTRUCTION(LoadField)
@ -7669,6 +7838,18 @@ class LoadFieldInstr : public TemplateLoadField<1> {
virtual void InferRange(RangeAnalysis* analysis, Range* range);
bool MayCreateUntaggedAlias() const;
bool IsImmutableLoad() const {
// The data() field in PointerBase is marked mutable, but is not actually
// mutable if it doesn't contain an inner pointer (e.g., for external
// typed data and Pointer objects).
if (slot().IsIdentical(Slot::PointerBase_data())) {
return loads_inner_pointer() != InnerPointerAccess::kMayBeInnerPointer;
}
return slot().is_immutable();
}
bool IsImmutableLengthLoad() const { return slot().IsImmutableLengthSlot(); }
// Try evaluating this load against the given constant value of
@ -7700,7 +7881,9 @@ class LoadFieldInstr : public TemplateLoadField<1> {
PRINT_OPERANDS_TO_SUPPORT
#define FIELD_LIST(F) F(const Slot&, slot_)
#define FIELD_LIST(F) \
F(const Slot&, slot_) \
F(InnerPointerAccess, loads_inner_pointer_)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(LoadFieldInstr,
TemplateLoadField,

View file

@ -389,13 +389,13 @@ void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
Representation array_rep,
Location start_loc) {
intptr_t offset;
if (IsTypedDataBaseClassId(array_cid)) {
__ ldr(array_reg,
compiler::FieldAddress(
array_reg, compiler::target::PointerBase::data_offset()));
offset = 0;
intptr_t offset = 0;
if (array_rep != kTagged) {
// Do nothing, array_reg already contains the payload address.
} else if (IsTypedDataBaseClassId(array_cid)) {
__ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
} else {
switch (array_cid) {
case kOneByteStringCid:
@ -411,14 +411,12 @@ void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
compiler::FieldAddress(array_reg,
compiler::target::ExternalOneByteString::
external_data_offset()));
offset = 0;
break;
case kExternalTwoByteStringCid:
__ ldr(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::ExternalTwoByteString::
external_data_offset()));
offset = 0;
break;
default:
UNREACHABLE();
@ -2047,8 +2045,7 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Label loop, loop_in;
// Address of input bytes.
__ LoadFieldFromOffset(bytes_reg, bytes_reg,
compiler::target::PointerBase::data_offset());
__ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
// Table.
__ AddImmediate(
@ -2100,24 +2097,6 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ StoreFieldToOffset(flags_temp_reg, decoder_reg, scan_flags_field_offset);
}
LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register obj = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
if (object()->definition()->representation() == kUntagged) {
__ LoadFromOffset(result, obj, offset());
} else {
ASSERT(object()->definition()->representation() == kTagged);
__ LoadFieldFromOffset(result, obj, offset());
}
}
static bool CanBeImmediateIndex(Value* value,
intptr_t cid,
bool is_external,

View file

@ -290,13 +290,13 @@ void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
Representation array_rep,
Location start_loc) {
intptr_t offset;
if (IsTypedDataBaseClassId(array_cid)) {
__ ldr(array_reg,
compiler::FieldAddress(
array_reg, compiler::target::PointerBase::data_offset()));
offset = 0;
intptr_t offset = 0;
if (array_rep != kTagged) {
// Do nothing, array_reg already contains the payload address.
} else if (IsTypedDataBaseClassId(array_cid)) {
__ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
} else {
switch (array_cid) {
case kOneByteStringCid:
@ -312,14 +312,12 @@ void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
compiler::FieldAddress(array_reg,
compiler::target::ExternalOneByteString::
external_data_offset()));
offset = 0;
break;
case kExternalTwoByteStringCid:
__ ldr(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::ExternalTwoByteString::
external_data_offset()));
offset = 0;
break;
default:
UNREACHABLE();
@ -1811,8 +1809,7 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Label loop, loop_in;
// Address of input bytes.
__ LoadFieldFromOffset(bytes_reg, bytes_reg,
compiler::target::PointerBase::data_offset());
__ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
// Table.
__ AddImmediate(
@ -1876,24 +1873,6 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register obj = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
if (object()->definition()->representation() == kUntagged) {
__ LoadFromOffset(result, obj, offset());
} else {
ASSERT(object()->definition()->representation() == kTagged);
__ LoadFieldFromOffset(result, obj, offset());
}
}
static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) {
ConstantInstr* constant = value->definition()->AsConstant();
if ((constant == nullptr) || !constant->value().IsSmi()) {

View file

@ -183,13 +183,13 @@ void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
Representation array_rep,
Location start_loc) {
intptr_t offset;
if (IsTypedDataBaseClassId(array_cid)) {
__ movl(array_reg,
compiler::FieldAddress(
array_reg, compiler::target::PointerBase::data_offset()));
offset = 0;
intptr_t offset = 0;
if (array_rep != kTagged) {
// Do nothing, array_reg already contains the payload address.
} else if (IsTypedDataBaseClassId(array_cid)) {
__ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
} else {
switch (array_cid) {
case kOneByteStringCid:
@ -205,14 +205,12 @@ void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
compiler::FieldAddress(array_reg,
compiler::target::ExternalOneByteString::
external_data_offset()));
offset = 0;
break;
case kExternalTwoByteStringCid:
__ movl(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::ExternalTwoByteString::
external_data_offset()));
offset = 0;
break;
default:
UNREACHABLE();
@ -1467,9 +1465,7 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Label rest, rest_loop, rest_loop_in, done;
// Address of input bytes.
__ movl(bytes_reg,
compiler::FieldAddress(bytes_reg,
compiler::target::PointerBase::data_offset()));
__ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
// Pointers to start, end and end-16.
__ leal(bytes_ptr_reg, compiler::Address(bytes_reg, start_reg, TIMES_1, 0));
@ -1587,24 +1583,6 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
flags_reg);
}
LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone, kNumInputs, Location::SameAsFirstInput(),
LocationSummary::kNoCall);
}
void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register obj = locs()->in(0).reg();
Register result = locs()->out(0).reg();
if (object()->definition()->representation() == kUntagged) {
__ movl(result, compiler::Address(obj, offset()));
} else {
ASSERT(object()->definition()->representation() == kTagged);
__ movl(result, compiler::FieldAddress(obj, offset()));
}
}
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;

View file

@ -927,6 +927,9 @@ void StoreFieldInstr::PrintOperandsTo(BaseTextBuffer* f) const {
if (emit_store_barrier_ == kNoStoreBarrier) {
f->AddString(", NoStoreBarrier");
}
if (stores_inner_pointer() == InnerPointerAccess::kMayBeInnerPointer) {
f->AddString(", MayStoreInnerPointer");
}
}
void IfThenElseInstr::PrintOperandsTo(BaseTextBuffer* f) const {
@ -992,10 +995,13 @@ void MaterializeObjectInstr::PrintOperandsTo(BaseTextBuffer* f) const {
void LoadFieldInstr::PrintOperandsTo(BaseTextBuffer* f) const {
instance()->PrintTo(f);
f->Printf(" . %s%s", slot().Name(), slot().is_immutable() ? " {final}" : "");
f->Printf(" . %s%s", slot().Name(), IsImmutableLoad() ? " {final}" : "");
if (calls_initializer()) {
f->AddString(", CallsInitializer");
}
if (loads_inner_pointer() == InnerPointerAccess::kMayBeInnerPointer) {
f->AddString(", MayLoadInnerPointer");
}
}
void LoadUntaggedInstr::PrintOperandsTo(BaseTextBuffer* f) const {
@ -1446,23 +1452,43 @@ void MemoryCopyInstr::PrintOperandsTo(BaseTextBuffer* f) const {
// kTypedDataUint8ArrayCid is used as the default cid for cases where
// the destination object is a subclass of PointerBase and the arguments
// are given in terms of bytes, so only print if the cid differs.
if (dest_cid_ != kTypedDataUint8ArrayCid) {
const Class& cls =
Class::Handle(IsolateGroup::Current()->class_table()->At(dest_cid_));
if (!cls.IsNull()) {
f->Printf(", dest_cid=%s (%d)", cls.ScrubbedNameCString(), dest_cid_);
} else {
f->Printf(", dest_cid=%d", dest_cid_);
}
switch (dest_representation_) {
case kUntagged:
f->Printf(", dest untagged");
break;
case kTagged:
if (dest_cid_ != kTypedDataUint8ArrayCid) {
const Class& cls = Class::Handle(
IsolateGroup::Current()->class_table()->At(dest_cid_));
if (!cls.IsNull()) {
f->Printf(", dest_cid=%s (%d)", cls.ScrubbedNameCString(), dest_cid_);
} else {
f->Printf(", dest_cid=%d", dest_cid_);
}
}
break;
default:
UNREACHABLE();
}
if (src_cid_ != dest_cid_) {
const Class& cls =
Class::Handle(IsolateGroup::Current()->class_table()->At(src_cid_));
if (!cls.IsNull()) {
f->Printf(", src_cid=%s (%d)", cls.ScrubbedNameCString(), src_cid_);
} else {
f->Printf(", src_cid=%d", src_cid_);
}
switch (src_representation_) {
case kUntagged:
f->Printf(", src untagged");
break;
case kTagged:
if ((dest_representation_ == kTagged && dest_cid_ != src_cid_) ||
(dest_representation_ != kTagged &&
src_cid_ != kTypedDataUint8ArrayCid)) {
const Class& cls =
Class::Handle(IsolateGroup::Current()->class_table()->At(src_cid_));
if (!cls.IsNull()) {
f->Printf(", src_cid=%s (%d)", cls.ScrubbedNameCString(), src_cid_);
} else {
f->Printf(", src_cid=%d", src_cid_);
}
}
break;
default:
UNREACHABLE();
}
if (element_size() != 1) {
f->Printf(", element_size=%" Pd "", element_size());

View file

@ -385,13 +385,13 @@ void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
Representation array_rep,
Location start_loc) {
intptr_t offset;
if (IsTypedDataBaseClassId(array_cid)) {
__ lx(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::PointerBase::data_offset()));
offset = 0;
intptr_t offset = 0;
if (array_rep != kTagged) {
// Do nothing, array_reg already contains the payload address.
} else if (IsTypedDataBaseClassId(array_cid)) {
__ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
} else {
switch (array_cid) {
case kOneByteStringCid:
@ -407,14 +407,12 @@ void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
compiler::FieldAddress(array_reg,
compiler::target::ExternalOneByteString::
external_data_offset()));
offset = 0;
break;
case kExternalTwoByteStringCid:
__ lx(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::ExternalTwoByteString::
external_data_offset()));
offset = 0;
break;
default:
UNREACHABLE();
@ -1979,8 +1977,7 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Label loop, loop_in;
// Address of input bytes.
__ LoadFieldFromOffset(bytes_reg, bytes_reg,
compiler::target::PointerBase::data_offset());
__ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
// Table.
__ AddImmediate(
@ -2037,24 +2034,6 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Register obj = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
if (object()->definition()->representation() == kUntagged) {
__ LoadFromOffset(result, obj, offset());
} else {
ASSERT(object()->definition()->representation() == kTagged);
__ LoadFieldFromOffset(result, obj, offset());
}
}
static bool CanBeImmediateIndex(Value* value, intptr_t cid, bool is_external) {
ConstantInstr* constant = value->definition()->AsConstant();
if ((constant == nullptr) || !constant->value().IsSmi()) {

View file

@ -740,14 +740,15 @@ ISOLATE_UNIT_TEST_CASE(IRTest_RawStoreField) {
}));
}
auto pointer_value = Value(pointer);
auto* const load_untagged_instr = new (zone) LoadUntaggedInstr(
&pointer_value, compiler::target::PointerBase::data_offset());
flow_graph->InsertBefore(another_function_call, load_untagged_instr, nullptr,
auto* const load_field_instr = new (zone) LoadFieldInstr(
&pointer_value, Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer, InstructionSource());
flow_graph->InsertBefore(another_function_call, load_field_instr, nullptr,
FlowGraph::kValue);
auto load_untagged_value = Value(load_untagged_instr);
auto load_field_value = Value(load_field_instr);
auto pointer_value2 = Value(pointer);
auto* const raw_store_field_instr =
new (zone) RawStoreFieldInstr(&load_untagged_value, &pointer_value2, 0);
new (zone) RawStoreFieldInstr(&load_field_value, &pointer_value2, 0);
flow_graph->InsertBefore(another_function_call, raw_store_field_instr,
nullptr, FlowGraph::kEffect);
another_function_call->RemoveFromGraph();
@ -758,7 +759,7 @@ ISOLATE_UNIT_TEST_CASE(IRTest_RawStoreField) {
EXPECT(cursor.TryMatch({
kMoveGlob,
kMatchAndMoveStaticCall,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMatchAndMoveRawStoreField,
}));
}
@ -835,18 +836,19 @@ ISOLATE_UNIT_TEST_CASE(IRTest_RawLoadField) {
}));
}
auto pointer_value = Value(pointer);
auto* const load_untagged_instr = new (zone) LoadUntaggedInstr(
&pointer_value, compiler::target::PointerBase::data_offset());
flow_graph->InsertBefore(another_function_call, load_untagged_instr, nullptr,
auto* const load_field_instr = new (zone) LoadFieldInstr(
&pointer_value, Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer, InstructionSource());
flow_graph->InsertBefore(another_function_call, load_field_instr, nullptr,
FlowGraph::kValue);
auto load_untagged_value = Value(load_untagged_instr);
auto load_field_value = Value(load_field_instr);
auto* const constant_instr = new (zone) UnboxedConstantInstr(
Integer::ZoneHandle(zone, Integer::New(0, Heap::kOld)), kUnboxedIntPtr);
flow_graph->InsertBefore(another_function_call, constant_instr, nullptr,
FlowGraph::kValue);
auto constant_value = Value(constant_instr);
auto* const load_indexed_instr = new (zone)
LoadIndexedInstr(&load_untagged_value, &constant_value,
LoadIndexedInstr(&load_field_value, &constant_value,
/*index_unboxed=*/true, /*index_scale=*/1, kArrayCid,
kAlignedAccess, DeoptId::kNone, InstructionSource());
flow_graph->InsertBefore(another_function_call, load_indexed_instr, nullptr,
@ -861,7 +863,7 @@ ISOLATE_UNIT_TEST_CASE(IRTest_RawLoadField) {
EXPECT(cursor.TryMatch({
kMoveGlob,
kMatchAndMoveStaticCall,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMatchAndMoveUnboxedConstant,
kMatchAndMoveLoadIndexed,
kMatchAndMoveStaticCall,

View file

@ -276,14 +276,16 @@ void MemoryCopyInstr::EmitLoopCopy(FlowGraphCompiler* compiler,
void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
classid_t array_cid,
Register array_reg,
Representation array_rep,
Location start_loc) {
intptr_t offset;
if (IsTypedDataBaseClassId(array_cid)) {
__ movq(array_reg,
compiler::FieldAddress(
array_reg, compiler::target::PointerBase::data_offset()));
offset = 0;
intptr_t offset = 0;
if (array_rep != kTagged) {
// Do nothing, array_reg already contains the payload address.
} else if (IsTypedDataBaseClassId(array_cid)) {
ASSERT_EQUAL(array_rep, kTagged);
__ LoadFromSlot(array_reg, array_reg, Slot::PointerBase_data());
} else {
ASSERT_EQUAL(array_rep, kTagged);
switch (array_cid) {
case kOneByteStringCid:
offset =
@ -298,14 +300,12 @@ void MemoryCopyInstr::EmitComputeStartPointer(FlowGraphCompiler* compiler,
compiler::FieldAddress(array_reg,
compiler::target::ExternalOneByteString::
external_data_offset()));
offset = 0;
break;
case kExternalTwoByteStringCid:
__ movq(array_reg,
compiler::FieldAddress(array_reg,
compiler::target::ExternalTwoByteString::
external_data_offset()));
offset = 0;
break;
default:
UNREACHABLE();
@ -1697,9 +1697,7 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Label rest, rest_loop, rest_loop_in, done;
// Address of input bytes.
__ movq(bytes_reg,
compiler::FieldAddress(bytes_reg,
compiler::target::PointerBase::data_offset()));
__ LoadFromSlot(bytes_reg, bytes_reg, Slot::PointerBase_data());
// Pointers to start, end and end-16.
__ leaq(bytes_ptr_reg, compiler::Address(bytes_reg, start_reg, TIMES_1, 0));
@ -1817,24 +1815,6 @@ void Utf8ScanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* LoadUntaggedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
return LocationSummary::Make(zone, kNumInputs, Location::RequiresRegister(),
LocationSummary::kNoCall);
}
void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register obj = locs()->in(0).reg();
Register result = locs()->out(0).reg();
if (object()->definition()->representation() == kUntagged) {
__ movq(result, compiler::Address(obj, offset()));
} else {
ASSERT(object()->definition()->representation() == kTagged);
__ movq(result, compiler::FieldAddress(obj, offset()));
}
}
LocationSummary* LoadIndexedInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 2;

View file

@ -2781,8 +2781,9 @@ static intptr_t PrepareInlineIndexedOp(FlowGraph* flow_graph,
*array = elements;
array_cid = kArrayCid;
} else if (IsExternalTypedDataClassId(array_cid)) {
LoadUntaggedInstr* elements = new (Z) LoadUntaggedInstr(
new (Z) Value(*array), compiler::target::PointerBase::data_offset());
auto* const elements = new (Z) LoadFieldInstr(
new (Z) Value(*array), Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer, call->source());
*cursor =
flow_graph->AppendTo(*cursor, elements, nullptr, FlowGraph::kValue);
*array = elements;
@ -3177,9 +3178,14 @@ static void PrepareInlineByteArrayBaseOp(FlowGraph* flow_graph,
Definition** array,
Instruction** cursor) {
if (array_cid == kDynamicCid || IsExternalTypedDataClassId(array_cid)) {
// Internal or External typed data: load untagged.
auto elements = new (Z) LoadUntaggedInstr(
new (Z) Value(*array), compiler::target::PointerBase::data_offset());
// Internal or External typed data: load the untagged base address.
auto const loads_inner_pointer =
IsExternalTypedDataClassId(array_cid)
? InnerPointerAccess::kCannotBeInnerPointer
: InnerPointerAccess::kMayBeInnerPointer;
auto* const elements =
new (Z) LoadFieldInstr(new (Z) Value(*array), Slot::PointerBase_data(),
loads_inner_pointer, call->source());
*cursor =
flow_graph->AppendTo(*cursor, elements, nullptr, FlowGraph::kValue);
*array = elements;
@ -5034,55 +5040,6 @@ bool FlowGraphInliner::TryInlineRecognizedMethod(
return true;
}
case MethodRecognizer::kMemCopy: {
// Keep consistent with kernel_to_il.cc (except unboxed param).
*entry = new (Z)
FunctionEntryInstr(graph_entry, flow_graph->allocate_block_id(),
call->GetBlock()->try_index(), DeoptId::kNone);
(*entry)->InheritDeoptTarget(Z, call);
Definition* arg_target = call->ArgumentAt(0);
Definition* arg_target_offset_in_bytes = call->ArgumentAt(1);
Definition* arg_source = call->ArgumentAt(2);
Definition* arg_source_offset_in_bytes = call->ArgumentAt(3);
Definition* arg_length_in_bytes = call->ArgumentAt(4);
auto env = call->deopt_id() != DeoptId::kNone ? call->env() : nullptr;
// Insert explicit unboxing instructions with truncation to avoid relying
// on [SelectRepresentations] which doesn't mark them as truncating.
arg_target_offset_in_bytes = UnboxInstr::Create(
kUnboxedIntPtr, new (Z) Value(arg_target_offset_in_bytes),
call->deopt_id(), Instruction::kNotSpeculative);
arg_target_offset_in_bytes->AsUnboxInteger()->mark_truncating();
flow_graph->AppendTo(*entry, arg_target_offset_in_bytes, env,
FlowGraph::kValue);
arg_source_offset_in_bytes = UnboxInstr::Create(
kUnboxedIntPtr, new (Z) Value(arg_source_offset_in_bytes),
call->deopt_id(), Instruction::kNotSpeculative);
arg_source_offset_in_bytes->AsUnboxInteger()->mark_truncating();
flow_graph->AppendTo(arg_target_offset_in_bytes,
arg_source_offset_in_bytes, env, FlowGraph::kValue);
arg_length_in_bytes =
UnboxInstr::Create(kUnboxedIntPtr, new (Z) Value(arg_length_in_bytes),
call->deopt_id(), Instruction::kNotSpeculative);
arg_length_in_bytes->AsUnboxInteger()->mark_truncating();
flow_graph->AppendTo(arg_source_offset_in_bytes, arg_length_in_bytes, env,
FlowGraph::kValue);
*last = new (Z)
MemoryCopyInstr(new (Z) Value(arg_source), new (Z) Value(arg_target),
new (Z) Value(arg_source_offset_in_bytes),
new (Z) Value(arg_target_offset_in_bytes),
new (Z) Value(arg_length_in_bytes),
/*src_cid=*/kTypedDataUint8ArrayCid,
/*dest_cid=*/kTypedDataUint8ArrayCid,
/*unboxed_inputs=*/true, /*can_overlap=*/true);
flow_graph->AppendTo(arg_length_in_bytes, *last, env, FlowGraph::kEffect);
*result = flow_graph->constant_null();
return true;
}
default:
return false;
}

View file

@ -60,8 +60,13 @@ bool RepresentationUtils::IsUnsigned(Representation rep) {
#undef REP_IN_SET_CLAUSE
compiler::OperandSize RepresentationUtils::OperandSize(Representation rep) {
if (rep == kTagged || rep == kUntagged) {
if (rep == kTagged) {
return compiler::kObjectBytes;
} else if (rep == kUntagged) {
// Untagged addresses are either loaded from and stored to word size native
// fields or generated from already-extended tagged addresses when
// compressed pointers are enabled.
return compiler::kWordBytes;
}
ASSERT(IsUnboxedInteger(rep));
switch (ValueSize(rep)) {

View file

@ -224,12 +224,11 @@ static void RunMemoryCopyInstrTest(intptr_t src_start,
Integer::ZoneHandle(zone, Integer::New(length, Heap::kOld)), rep);
auto* const memory_copy_instr = new (zone) MemoryCopyInstr(
new (zone) Value(pointer), new (zone) Value(pointer2),
new (zone) Value(src_start_constant_instr),
new (zone) Value(pointer), /*src_cid=*/cid, new (zone) Value(pointer2),
/*dest_cid=*/cid, new (zone) Value(src_start_constant_instr),
new (zone) Value(dest_start_constant_instr),
new (zone) Value(length_constant_instr),
/*src_cid=*/cid,
/*dest_cid=*/cid, unboxed_inputs, /*can_overlap=*/use_same_buffer);
new (zone) Value(length_constant_instr), unboxed_inputs,
/*can_overlap=*/use_same_buffer);
flow_graph->InsertBefore(another_function_call, memory_copy_instr, nullptr,
FlowGraph::kEffect);
@ -342,11 +341,12 @@ static void RunMemoryCopyInstrTest(intptr_t src_start,
}
auto* const memory_copy_instr = new (zone) MemoryCopyInstr(
new (zone) Value(param_ptr), new (zone) Value(param_ptr2),
new (zone) Value(param_ptr), /*src_cid=*/cid,
new (zone) Value(param_ptr2), /*dest_cid=*/cid,
new (zone) Value(src_start_def), new (zone) Value(dest_start_def),
new (zone) Value(length_def),
/*src_cid=*/cid,
/*dest_cid=*/cid, unboxed_inputs, /*can_overlap=*/use_same_buffer);
unboxed_inputs, /*can_overlap=*/use_same_buffer);
flow_graph->InsertBefore(return_instr, memory_copy_instr, nullptr,
FlowGraph::kEffect);

View file

@ -2850,13 +2850,22 @@ void LoadFieldInstr::InferRange(RangeAnalysis* analysis, Range* range) {
UNREACHABLE();
break;
#define UNBOXED_NATIVE_SLOT_CASE(Class, Untagged, Field, Rep, IsFinal) \
#define UNBOXED_NATIVE_NONADDRESS_SLOT_CASE(Class, Untagged, Field, Rep, \
IsFinal) \
case Slot::Kind::k##Class##_##Field:
UNBOXED_NATIVE_SLOTS_LIST(UNBOXED_NATIVE_SLOT_CASE)
#undef UNBOXED_NATIVE_SLOT_CASE
UNBOXED_NATIVE_NONADDRESS_SLOTS_LIST(UNBOXED_NATIVE_NONADDRESS_SLOT_CASE)
#undef UNBOXED_NATIVE_NONADDRESS_SLOT_CASE
*range = Range::Full(RepresentationToRangeSize(slot().representation()));
break;
#define UNBOXED_NATIVE_ADDRESS_SLOT_CASE(Class, Untagged, Field, MayMove, \
IsFinal) \
case Slot::Kind::k##Class##_##Field:
UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(UNBOXED_NATIVE_ADDRESS_SLOT_CASE)
#undef UNBOXED_NATIVE_ADDRESS_SLOT_CASE
UNREACHABLE();
break;
case Slot::Kind::kClosure_hash:
case Slot::Kind::kLinkedHashBase_hash_mask:
case Slot::Kind::kLinkedHashBase_used_data:

View file

@ -7,6 +7,7 @@
#include <utility>
#include "vm/bit_vector.h"
#include "vm/class_id.h"
#include "vm/compiler/backend/flow_graph.h"
#include "vm/compiler/backend/il.h"
#include "vm/compiler/backend/il_printer.h"
@ -1120,6 +1121,8 @@ class AliasedSet : public ZoneAllocated {
use = use->next_use()) {
Instruction* instr = use->instruction();
if (instr->HasUnknownSideEffects() || instr->IsLoadUntagged() ||
(instr->IsLoadField() &&
instr->AsLoadField()->MayCreateUntaggedAlias()) ||
(instr->IsStoreIndexed() &&
(use->use_index() == StoreIndexedInstr::kValuePos)) ||
instr->IsStoreStaticField() || instr->IsPhi()) {
@ -1415,8 +1418,9 @@ static AliasedSet* NumberPlaces(FlowGraph* graph,
// Load instructions handled by load elimination.
static bool IsLoadEliminationCandidate(Instruction* instr) {
return instr->IsLoadField() || instr->IsLoadIndexed() ||
instr->IsLoadStaticField();
return (instr->IsLoadField() && instr->AsLoadField()->loads_inner_pointer() !=
InnerPointerAccess::kMayBeInnerPointer) ||
instr->IsLoadIndexed() || instr->IsLoadStaticField();
}
static bool IsLoopInvariantLoad(ZoneGrowableArray<BitVector*>* sets,
@ -3832,8 +3836,23 @@ void AllocationSinking::CreateMaterializationAt(
/*index_scale=*/compiler::target::Instance::ElementSizeFor(array_cid),
array_cid, kAlignedAccess, DeoptId::kNone, alloc->source());
} else {
load =
new (Z) LoadFieldInstr(new (Z) Value(alloc), *slot, alloc->source());
auto loads_inner_pointer =
slot->representation() != kUntagged ? InnerPointerAccess::kNotUntagged
: slot->may_contain_inner_pointer()
? InnerPointerAccess::kMayBeInnerPointer
: InnerPointerAccess::kCannotBeInnerPointer;
// PointerBase.data loads for external typed data and pointers never
// access an inner pointer.
if (slot->IsIdentical(Slot::PointerBase_data())) {
if (auto* const alloc_obj = alloc->AsAllocateObject()) {
const classid_t cid = alloc_obj->cls().id();
if (cid == kPointerCid || IsExternalTypedDataClassId(cid)) {
loads_inner_pointer = InnerPointerAccess::kCannotBeInnerPointer;
}
}
}
load = new (Z) LoadFieldInstr(new (Z) Value(alloc), *slot,
loads_inner_pointer, alloc->source());
}
flow_graph_->InsertBefore(load_point, load, nullptr, FlowGraph::kValue);
values.Add(new (Z) Value(load));

View file

@ -722,16 +722,16 @@ ISOLATE_UNIT_TEST_CASE(LoadOptimizer_LoadDataFieldOfNewTypedData) {
new AllocateObjectInstr(InstructionSource(), view_cls, DeoptId::kNone));
// v1 <- LoadNativeField(array, Slot::PointerBase_data())
v1 = builder.AddDefinition(new LoadFieldInstr(new (zone) Value(array),
Slot::PointerBase_data(),
InstructionSource()));
v1 = builder.AddDefinition(new LoadFieldInstr(
new (zone) Value(array), Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer, InstructionSource()));
// StoreNativeField(Slot::PointerBase_data(), view, v1, kNoStoreBarrier,
// kInitalizing)
store = builder.AddInstruction(new StoreFieldInstr(
Slot::PointerBase_data(), new (zone) Value(view), new (zone) Value(v1),
kNoStoreBarrier, InstructionSource(),
StoreFieldInstr::Kind::kInitializing));
kNoStoreBarrier, InnerPointerAccess::kMayBeInnerPointer,
InstructionSource(), StoreFieldInstr::Kind::kInitializing));
// return view
ret = builder.AddInstruction(new ReturnInstr(

View file

@ -106,8 +106,8 @@ Slot* SlotCache::CreateNativeSlot(Slot::Kind kind) {
#undef DEFINE_NONNULLABLE_BOXED_NATIVE_FIELD
#define DEFINE_UNBOXED_NATIVE_FIELD(ClassName, UnderlyingType, FieldName, \
representation, mutability) \
#define DEFINE_UNBOXED_NATIVE_NONADDRESS_FIELD( \
ClassName, UnderlyingType, FieldName, representation, mutability) \
case Slot::Kind::k##ClassName##_##FieldName: \
return new (zone_) \
Slot(Slot::Kind::k##ClassName##_##FieldName, \
@ -118,9 +118,24 @@ Slot* SlotCache::CreateNativeSlot(Slot::Kind kind) {
CompileType::FromUnboxedRepresentation(kUnboxed##representation), \
kUnboxed##representation);
UNBOXED_NATIVE_SLOTS_LIST(DEFINE_UNBOXED_NATIVE_FIELD)
UNBOXED_NATIVE_NONADDRESS_SLOTS_LIST(DEFINE_UNBOXED_NATIVE_NONADDRESS_FIELD)
#undef DEFINE_UNBOXED_NATIVE_FIELD
#undef DEFINE_UNBOXED_NATIVE_NONADDRESS_FIELD
#define DEFINE_UNBOXED_NATIVE_ADDRESS_FIELD(ClassName, UnderlyingType, \
FieldName, GcMayMove, mutability) \
case Slot::Kind::k##ClassName##_##FieldName: \
return new (zone_) \
Slot(Slot::Kind::k##ClassName##_##FieldName, \
Slot::IsImmutableBit::encode(FIELD_##mutability) | \
Slot::MayContainInnerPointerBit::encode(GcMayMove) | \
Slot::IsUnboxedBit::encode(true), \
compiler::target::ClassName::FieldName##_offset(), \
#ClassName "." #FieldName, CompileType::Object(), kUntagged);
UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(DEFINE_UNBOXED_NATIVE_ADDRESS_FIELD)
#undef DEFINE_UNBOXED_NATIVE_NONADDRESS_FIELD
#undef FIELD_VAR
#undef FIELD_FINAL
default:
@ -318,19 +333,6 @@ FieldGuardState::FieldGuardState(const Field& field)
: state_(GuardedCidBits::encode(field.guarded_cid()) |
IsNullableBit::encode(field.is_nullable())) {}
Representation Slot::UnboxedRepresentation() const {
switch (field_guard_state().guarded_cid()) {
case kDoubleCid:
return kUnboxedDouble;
case kFloat32x4Cid:
return kUnboxedFloat32x4;
case kFloat64x2Cid:
return kUnboxedFloat64x2;
default:
return kUnboxedInt64;
}
}
const Slot& Slot::Get(const Field& field,
const ParsedFunction* parsed_function) {
Thread* thread = Thread::Current();

View file

@ -145,17 +145,8 @@ NULLABLE_BOXED_NATIVE_SLOTS_LIST(FOR_EACH_NATIVE_SLOT)
NONNULLABLE_BOXED_NATIVE_SLOTS_LIST(FOR_EACH_NATIVE_SLOT)
#undef FOR_EACH_NATIVE_SLOT
// Only define AOT-only unboxed native slots when in the precompiler. See
// UNBOXED_NATIVE_SLOTS_LIST for the format.
#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
#define AOT_ONLY_UNBOXED_NATIVE_SLOTS_LIST(V) \
V(Closure, UntaggedClosure, entry_point, Uword, FINAL)
#else
#define AOT_ONLY_UNBOXED_NATIVE_SLOTS_LIST(V)
#endif
// List of slots that correspond to unboxed fields of native objects in the
// following format:
// List of slots that correspond to unboxed fields of native objects that
// do not contain untagged addresses in the following format:
//
// V(class_name, underlying_type, field_name, representation, FINAL|VAR)
//
@ -172,21 +163,63 @@ NONNULLABLE_BOXED_NATIVE_SLOTS_LIST(FOR_EACH_NATIVE_SLOT)
//
// Note: Currently LoadFieldInstr::IsImmutableLengthLoad() assumes that no
// unboxed slots represent length loads.
#define UNBOXED_NATIVE_SLOTS_LIST(V) \
AOT_ONLY_UNBOXED_NATIVE_SLOTS_LIST(V) \
#define UNBOXED_NATIVE_NONADDRESS_SLOTS_LIST(V) \
V(AbstractType, UntaggedAbstractType, flags, Uint32, FINAL) \
V(ClosureData, UntaggedClosureData, packed_fields, Uint32, FINAL) \
V(FinalizerBase, UntaggedFinalizerBase, isolate, IntPtr, VAR) \
V(FinalizerEntry, UntaggedFinalizerEntry, external_size, IntPtr, VAR) \
V(Function, UntaggedFunction, entry_point, Uword, FINAL) \
V(Function, UntaggedFunction, kind_tag, Uint32, FINAL) \
V(FunctionType, UntaggedFunctionType, packed_parameter_counts, Uint32, \
FINAL) \
V(FunctionType, UntaggedFunctionType, packed_type_parameter_counts, Uint16, \
FINAL) \
V(PointerBase, UntaggedPointerBase, data, IntPtr, VAR) \
V(SubtypeTestCache, UntaggedSubtypeTestCache, num_inputs, Uint32, FINAL)
// Unboxed native slots containing untagged addresses that do not exist
// in JIT mode. See UNBOXED_NATIVE_ADDRESS_SLOTS_LIST for the format.
#if defined(DART_PRECOMPILER) && !defined(TARGET_ARCH_IA32)
#define AOT_ONLY_UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(V) \
V(Closure, UntaggedClosure, entry_point, false, FINAL)
#else
#define AOT_ONLY_UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(V)
#endif
// List of slots that correspond to unboxed fields of native objects containing
// untagged addresses in the following format:
//
// V(class_name, underlying_type, field_name, gc_may_move, FINAL|VAR)
//
// - class_name and field_name specify the name of the host class and the name
// of the field respectively;
// - underlying_type: the Raw class which holds the field;
// - gc_may_move: whether the untagged address contained in this field is a
// pointer to memory that may be moved by the GC, which means a value loaded
// from this field is invalidated by any instruction that can cause GC;
// - the last component specifies whether field behaves like a final field
// (i.e. initialized once at construction time and does not change after
// that) or like a non-final field.
//
// Note: As the underlying field is unboxed, these slots cannot be nullable.
//
// Note: All slots for unboxed fields that contain untagged addresses are given
// the kUntagged representation, and so a value loaded from these fields must
// be converted explicitly to an unboxed integer representation for any
// pointer arithmetic before use, and an unboxed integer must be converted
// explicitly to an untagged address before being stored to these fields.
//
// Note: Currently LoadFieldInstr::IsImmutableLengthLoad() assumes that no
// unboxed slots represent length loads.
#define UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(V) \
AOT_ONLY_UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(V) \
V(Function, UntaggedFunction, entry_point, false, FINAL) \
V(FinalizerBase, UntaggedFinalizerBase, isolate, false, VAR) \
V(PointerBase, UntaggedPointerBase, data, true, VAR)
// For uses that do not need to know whether a given slot may contain an
// inner pointer to a GC-able object or not. (Generally, such users only need
// the class name, the underlying type, and/or the field name.)
#define UNBOXED_NATIVE_SLOTS_LIST(V) \
UNBOXED_NATIVE_NONADDRESS_SLOTS_LIST(V) UNBOXED_NATIVE_ADDRESS_SLOTS_LIST(V)
// For uses that do not need the exact_type (boxed) or representation (unboxed)
// or whether a boxed native slot is nullable. (Generally, such users only need
// the class name, the underlying type, and/or the field name.)
@ -317,6 +350,12 @@ class Slot : public ZoneAllocated {
bool is_compressed() const { return IsCompressedBit::decode(flags_); }
// Returns true if the field is an unboxed native field that may contain an
// inner pointer to a GC-movable object.
bool may_contain_inner_pointer() const {
return MayContainInnerPointerBit::decode(flags_);
}
// Type information about values that can be read from this slot.
CompileType type() const { return type_; }
@ -335,10 +374,7 @@ class Slot : public ZoneAllocated {
return kind() == Kind::kCapturedVariable || kind() == Kind::kContext_parent;
}
bool is_unboxed() const {
return IsUnboxedBit::decode(flags_);
}
Representation UnboxedRepresentation() const;
bool is_unboxed() const { return IsUnboxedBit::decode(flags_); }
void Write(FlowGraphSerializer* s) const;
static const Slot& Read(FlowGraphDeserializer* d);
@ -372,6 +408,8 @@ class Slot : public ZoneAllocated {
using IsGuardedBit = BitField<int8_t, bool, IsImmutableBit::kNextBit, 1>;
using IsCompressedBit = BitField<int8_t, bool, IsGuardedBit::kNextBit, 1>;
using IsUnboxedBit = BitField<int8_t, bool, IsCompressedBit::kNextBit, 1>;
using MayContainInnerPointerBit =
BitField<int8_t, bool, IsUnboxedBit::kNextBit, 1>;
template <typename T>
const T* DataAs() const {

View file

@ -760,6 +760,11 @@ CompileType CompileType::String() {
kCannotBeSentinel);
}
CompileType CompileType::Object() {
return FromAbstractType(Type::ZoneHandle(Type::ObjectType()), kCannotBeNull,
kCannotBeSentinel);
}
intptr_t CompileType::ToCid() {
if (cid_ == kIllegalCid) {
// Make sure to initialize cid_ for Null type to consistently return

View file

@ -42,7 +42,7 @@ ISOLATE_UNIT_TEST_CASE(IRTest_TypedDataAOT_Inlining) {
CheckNullInstr* check_null = nullptr;
LoadFieldInstr* load_field = nullptr;
GenericCheckBoundInstr* bounds_check = nullptr;
Instruction* load_untagged = nullptr;
LoadFieldInstr* load_untagged = nullptr;
LoadIndexedInstr* load_indexed = nullptr;
ILMatcher cursor(flow_graph, entry);
@ -54,7 +54,7 @@ ISOLATE_UNIT_TEST_CASE(IRTest_TypedDataAOT_Inlining) {
kMatchAndMoveBranchTrue,
kMoveGlob,
{kMatchAndMoveGenericCheckBound, &bounds_check},
{kMatchAndMoveLoadUntagged, &load_untagged},
{kMatchAndMoveLoadField, &load_untagged},
kMoveParallelMoves,
{kMatchAndMoveLoadIndexed, &load_indexed},
kMoveGlob,
@ -69,7 +69,7 @@ ISOLATE_UNIT_TEST_CASE(IRTest_TypedDataAOT_Inlining) {
kMatchAndMoveBranchTrue,
kMoveGlob,
{kMatchAndMoveGenericCheckBound, &bounds_check},
{kMatchAndMoveLoadUntagged, &load_untagged},
{kMatchAndMoveLoadField, &load_untagged},
kMoveParallelMoves,
{kMatchAndMoveLoadIndexed, &load_indexed},
kMoveGlob,
@ -140,27 +140,27 @@ ISOLATE_UNIT_TEST_CASE(IRTest_TypedDataAOT_FunctionalGetSet) {
// Load 1
kMatchAndMoveGenericCheckBound,
kMoveGlob,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMoveParallelMoves,
kMatchAndMoveLoadIndexed,
kMoveGlob,
// Load 2
kMatchAndMoveGenericCheckBound,
kMoveGlob,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMoveParallelMoves,
kMatchAndMoveLoadIndexed,
kMoveGlob,
// Store 1
kMatchAndMoveCheckWritable,
kMoveParallelMoves,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMoveParallelMoves,
kMatchAndMoveStoreIndexed,
kMoveGlob,
// Store 2
kMoveParallelMoves,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMoveParallelMoves,
kMatchAndMoveStoreIndexed,
kMoveGlob,
@ -184,27 +184,27 @@ ISOLATE_UNIT_TEST_CASE(IRTest_TypedDataAOT_FunctionalGetSet) {
// Load 1
kMatchAndMoveGenericCheckBound,
kMoveGlob,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMoveParallelMoves,
kMatchAndMoveLoadIndexed,
kMoveGlob,
// Load 2
kMatchAndMoveGenericCheckBound,
kMoveGlob,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMoveParallelMoves,
kMatchAndMoveLoadIndexed,
kMoveGlob,
// Store 1
kMatchAndMoveCheckWritable,
kMoveParallelMoves,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMoveParallelMoves,
kMatchAndMoveStoreIndexed,
kMoveGlob,
// Store 2
kMoveParallelMoves,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMoveParallelMoves,
kMatchAndMoveStoreIndexed,
kMoveGlob,
@ -282,7 +282,7 @@ ISOLATE_UNIT_TEST_CASE(IRTest_TypedDataAOT_FunctionalIndexError) {
// Store value.
kMoveGlob,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMoveParallelMoves,
kMatchAndMoveOptionalUnbox,
kMoveParallelMoves,
@ -316,7 +316,7 @@ ISOLATE_UNIT_TEST_CASE(IRTest_TypedDataAOT_FunctionalIndexError) {
// Store value.
kMoveGlob,
kMatchAndMoveLoadUntagged,
kMatchAndMoveLoadField,
kMoveParallelMoves,
kMatchAndMoveOptionalUnbox,
kMoveParallelMoves,

View file

@ -1646,8 +1646,9 @@ Definition* TypedDataSpecializer::AppendLoadIndexed(TemplateDartCall<0>* call,
const intptr_t element_size = TypedDataBase::ElementSizeFor(cid);
const intptr_t index_scale = element_size;
auto data = new (Z) LoadUntaggedInstr(
new (Z) Value(array), compiler::target::PointerBase::data_offset());
auto data = new (Z)
LoadFieldInstr(new (Z) Value(array), Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer, call->source());
flow_graph_->InsertBefore(call, data, call->env(), FlowGraph::kValue);
Definition* load = new (Z) LoadIndexedInstr(
@ -1723,8 +1724,9 @@ void TypedDataSpecializer::AppendStoreIndexed(TemplateDartCall<0>* call,
break;
}
auto data = new (Z) LoadUntaggedInstr(
new (Z) Value(array), compiler::target::PointerBase::data_offset());
auto data = new (Z)
LoadFieldInstr(new (Z) Value(array), Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer, call->source());
flow_graph_->InsertBefore(call, data, call->env(), FlowGraph::kValue);
auto store = new (Z) StoreIndexedInstr(

View file

@ -222,7 +222,7 @@ class CallSpecializer : public FlowGraphVisitor {
//
// // Directly access the byte, independent of whether `bytes` is
// // _Uint8List, _Uint8ArrayView or _ExternalUint8Array.
// v5 <- LoadUntagged(v1, "TypedDataBase.data");
// v5 <- LoadField(Slot::PointerBase_data(), v1);
// v5 <- LoadIndexed(v5, v4)
//
class TypedDataSpecializer : public FlowGraphVisitor {

View file

@ -277,8 +277,22 @@ Fragment BaseFlowGraphBuilder::MemoryCopy(classid_t src_cid,
Value* dest = Pop();
Value* src = Pop();
auto copy =
new (Z) MemoryCopyInstr(src, dest, src_start, dest_start, length, src_cid,
dest_cid, unboxed_inputs, can_overlap);
new (Z) MemoryCopyInstr(src, src_cid, dest, dest_cid, src_start,
dest_start, length, unboxed_inputs, can_overlap);
return Fragment(copy);
}
Fragment BaseFlowGraphBuilder::MemoryCopyUntagged(intptr_t element_size,
bool unboxed_inputs,
bool can_overlap) {
Value* length = Pop();
Value* dest_start = Pop();
Value* src_start = Pop();
Value* dest = Pop();
Value* src = Pop();
auto copy =
new (Z) MemoryCopyInstr(element_size, src, dest, src_start, dest_start,
length, unboxed_inputs, can_overlap);
return Fragment(copy);
}
@ -429,29 +443,6 @@ Fragment BaseFlowGraphBuilder::ConvertUnboxedToUntagged(
return Fragment(converted);
}
Fragment BaseFlowGraphBuilder::AddIntptrIntegers() {
Value* right = Pop();
Value* left = Pop();
#if defined(TARGET_ARCH_IS_64_BIT)
auto add = new (Z) BinaryInt64OpInstr(
Token::kADD, left, right, DeoptId::kNone, Instruction::kNotSpeculative);
#else
auto add =
new (Z) BinaryInt32OpInstr(Token::kADD, left, right, DeoptId::kNone);
#endif
add->mark_truncating();
Push(add);
return Fragment(add);
}
Fragment BaseFlowGraphBuilder::UnboxSmiToIntptr() {
Value* value = Pop();
auto untagged = UnboxInstr::Create(kUnboxedIntPtr, value, DeoptId::kNone,
Instruction::kNotSpeculative);
Push(untagged);
return Fragment(untagged);
}
Fragment BaseFlowGraphBuilder::FloatToDouble() {
Value* value = Pop();
FloatToDoubleInstr* instr = new FloatToDoubleInstr(value, DeoptId::kNone);
@ -473,11 +464,13 @@ Fragment BaseFlowGraphBuilder::LoadField(const Field& field,
calls_initializer);
}
Fragment BaseFlowGraphBuilder::LoadNativeField(const Slot& native_field,
bool calls_initializer) {
Fragment BaseFlowGraphBuilder::LoadNativeField(
const Slot& native_field,
InnerPointerAccess loads_inner_pointer,
bool calls_initializer) {
LoadFieldInstr* load = new (Z) LoadFieldInstr(
Pop(), native_field, InstructionSource(), calls_initializer,
calls_initializer ? GetNextDeoptId() : DeoptId::kNone);
Pop(), native_field, loads_inner_pointer, InstructionSource(),
calls_initializer, calls_initializer ? GetNextDeoptId() : DeoptId::kNone);
Push(load);
return Fragment(load);
}
@ -516,6 +509,7 @@ const Field& BaseFlowGraphBuilder::MayCloneField(Zone* zone,
Fragment BaseFlowGraphBuilder::StoreNativeField(
TokenPosition position,
const Slot& slot,
InnerPointerAccess stores_inner_pointer,
StoreFieldInstr::Kind kind /* = StoreFieldInstr::Kind::kOther */,
StoreBarrierType emit_store_barrier /* = kEmitStoreBarrier */,
compiler::Assembler::MemoryOrder memory_order /* = kRelaxed */) {
@ -523,9 +517,9 @@ Fragment BaseFlowGraphBuilder::StoreNativeField(
if (value->BindsToConstant()) {
emit_store_barrier = kNoStoreBarrier;
}
StoreFieldInstr* store =
new (Z) StoreFieldInstr(slot, Pop(), value, emit_store_barrier,
InstructionSource(position), kind);
StoreFieldInstr* store = new (Z)
StoreFieldInstr(slot, Pop(), value, emit_store_barrier,
stores_inner_pointer, InstructionSource(position), kind);
return Fragment(store);
}

View file

@ -175,7 +175,13 @@ class BaseFlowGraphBuilder {
Fragment LoadField(const Field& field, bool calls_initializer);
Fragment LoadNativeField(const Slot& native_field,
InnerPointerAccess loads_inner_pointer,
bool calls_initializer = false);
Fragment LoadNativeField(const Slot& native_field,
bool calls_initializer = false) {
return LoadNativeField(native_field, InnerPointerAccess::kNotUntagged,
calls_initializer);
}
// Pass true for index_unboxed if indexing into external typed data.
Fragment LoadIndexed(classid_t class_id,
intptr_t index_scale = compiler::target::kWordSize,
@ -185,12 +191,9 @@ class BaseFlowGraphBuilder {
Fragment LoadUntagged(intptr_t offset);
Fragment ConvertUntaggedToUnboxed(Representation to);
Fragment ConvertUnboxedToUntagged(Representation from);
Fragment UnboxSmiToIntptr();
Fragment FloatToDouble();
Fragment DoubleToFloat();
Fragment AddIntptrIntegers();
void SetTempIndex(Definition* definition);
Fragment LoadLocal(LocalVariable* variable);
@ -206,17 +209,40 @@ class BaseFlowGraphBuilder {
Fragment StoreNativeField(
TokenPosition position,
const Slot& slot,
InnerPointerAccess stores_inner_pointer,
StoreFieldInstr::Kind kind = StoreFieldInstr::Kind::kOther,
StoreBarrierType emit_store_barrier = kEmitStoreBarrier,
compiler::Assembler::MemoryOrder memory_order =
compiler::Assembler::kRelaxedNonAtomic);
Fragment StoreNativeField(
TokenPosition position,
const Slot& slot,
StoreFieldInstr::Kind kind = StoreFieldInstr::Kind::kOther,
StoreBarrierType emit_store_barrier = kEmitStoreBarrier,
compiler::Assembler::MemoryOrder memory_order =
compiler::Assembler::kRelaxedNonAtomic) {
return StoreNativeField(position, slot, InnerPointerAccess::kNotUntagged,
kind, emit_store_barrier, memory_order);
}
Fragment StoreNativeField(
const Slot& slot,
InnerPointerAccess stores_inner_pointer,
StoreFieldInstr::Kind kind = StoreFieldInstr::Kind::kOther,
StoreBarrierType emit_store_barrier = kEmitStoreBarrier,
compiler::Assembler::MemoryOrder memory_order =
compiler::Assembler::kRelaxedNonAtomic) {
return StoreNativeField(TokenPosition::kNoSource, slot,
stores_inner_pointer, kind, emit_store_barrier,
memory_order);
}
Fragment StoreNativeField(
const Slot& slot,
StoreFieldInstr::Kind kind = StoreFieldInstr::Kind::kOther,
StoreBarrierType emit_store_barrier = kEmitStoreBarrier,
compiler::Assembler::MemoryOrder memory_order =
compiler::Assembler::kRelaxedNonAtomic) {
return StoreNativeField(TokenPosition::kNoSource, slot, kind,
return StoreNativeField(TokenPosition::kNoSource, slot,
InnerPointerAccess::kNotUntagged, kind,
emit_store_barrier, memory_order);
}
Fragment StoreField(
@ -316,6 +342,9 @@ class BaseFlowGraphBuilder {
classid_t dest_cid,
bool unboxed_inputs,
bool can_overlap = true);
Fragment MemoryCopyUntagged(intptr_t element_size,
bool unboxed_inputs,
bool can_overlap = true);
Fragment TailCall(const Code& code);
Fragment Utf8Scan();

View file

@ -1140,19 +1140,19 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
}
case MethodRecognizer::kTypedData_memMove1:
// Pick an appropriate typed data cid based on the element size.
body += BuildTypedDataMemMove(function, kTypedDataUint8ArrayCid);
body += BuildTypedDataMemMove(function, 1);
break;
case MethodRecognizer::kTypedData_memMove2:
body += BuildTypedDataMemMove(function, kTypedDataUint16ArrayCid);
body += BuildTypedDataMemMove(function, 2);
break;
case MethodRecognizer::kTypedData_memMove4:
body += BuildTypedDataMemMove(function, kTypedDataUint32ArrayCid);
body += BuildTypedDataMemMove(function, 4);
break;
case MethodRecognizer::kTypedData_memMove8:
body += BuildTypedDataMemMove(function, kTypedDataUint64ArrayCid);
body += BuildTypedDataMemMove(function, 8);
break;
case MethodRecognizer::kTypedData_memMove16:
body += BuildTypedDataMemMove(function, kTypedDataInt32x4ArrayCid);
body += BuildTypedDataMemMove(function, 16);
break;
#define CASE(name) \
case MethodRecognizer::kTypedData_##name##_factory: \
@ -1266,7 +1266,6 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += Box(kUnboxedIntPtr);
break;
case MethodRecognizer::kMemCopy: {
// Keep consistent with inliner.cc (except boxed param).
ASSERT_EQUAL(function.NumParameters(), 5);
LocalVariable* arg_target = parsed_function_->RawParameterVariable(0);
LocalVariable* arg_target_offset_in_bytes =
@ -1276,15 +1275,25 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
parsed_function_->RawParameterVariable(3);
LocalVariable* arg_length_in_bytes =
parsed_function_->RawParameterVariable(4);
// Load the untagged data fields of the source and destination so they
// can be possibly load optimized away when applicable, and unbox the
// numeric inputs since we're force optimizing _memCopy and that removes
// the need to use SmiUntag within MemoryCopy when element_size is 1.
body += LoadLocal(arg_source);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += LoadLocal(arg_target);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += LoadLocal(arg_source_offset_in_bytes);
body += UnboxTruncate(kUnboxedIntPtr);
body += LoadLocal(arg_target_offset_in_bytes);
body += UnboxTruncate(kUnboxedIntPtr);
body += LoadLocal(arg_length_in_bytes);
// Pointers and TypedData have the same layout.
body += MemoryCopy(kTypedDataUint8ArrayCid, kTypedDataUint8ArrayCid,
/*unboxed_inputs=*/false,
/*can_overlap=*/true);
body += UnboxTruncate(kUnboxedIntPtr);
body += MemoryCopyUntagged(/*element_size=*/1,
/*unboxed_inputs=*/true,
/*can_overlap=*/true);
body += NullConstant();
} break;
case MethodRecognizer::kFfiAbi:
@ -1332,7 +1341,8 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += LoadLocal(arg_pointer);
body += CheckNullOptimized(String::ZoneHandle(Z, function.name()));
// No GC from here til LoadIndexed.
body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer);
body += LoadLocal(arg_offset_not_null);
body += UnboxTruncate(kUnboxedFfiIntPtr);
body += LoadIndexed(typed_data_cid, /*index_scale=*/1,
@ -1360,7 +1370,10 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += LoadLocal(pointer);
body += LoadLocal(address);
body += UnboxTruncate(kUnboxedIntPtr);
body += StoreNativeField(Slot::PointerBase_data());
body += ConvertUnboxedToUntagged(kUnboxedIntPtr);
body += StoreNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer,
StoreFieldInstr::Kind::kInitializing);
body += DropTempsPreserveTop(1); // Drop [address] keep [pointer].
}
body += DropTempsPreserveTop(1); // Drop [arg_offset].
@ -1400,13 +1413,15 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += LoadLocal(arg_pointer); // Pointer.
body += CheckNullOptimized(String::ZoneHandle(Z, function.name()));
// No GC from here til StoreIndexed.
body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer);
body += LoadLocal(arg_offset_not_null);
body += UnboxTruncate(kUnboxedFfiIntPtr);
body += LoadLocal(arg_value_not_null);
if (kind == MethodRecognizer::kFfiStorePointer) {
// This can only be Pointer, so it is always safe to LoadUntagged.
body += LoadUntagged(compiler::target::PointerBase::data_offset());
// This can only be Pointer, so it is safe to load the data field.
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer);
body += ConvertUntaggedToUnboxed(kUnboxedFfiIntPtr);
} else {
// Avoid any unnecessary (and potentially deoptimizing) int
@ -1438,14 +1453,18 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += LoadLocal(parsed_function_->RawParameterVariable(0)); // Address.
body += CheckNullOptimized(String::ZoneHandle(Z, function.name()));
body += UnboxTruncate(kUnboxedIntPtr);
body += StoreNativeField(Slot::PointerBase_data());
body += ConvertUnboxedToUntagged(kUnboxedIntPtr);
body += StoreNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer,
StoreFieldInstr::Kind::kInitializing);
} break;
case MethodRecognizer::kFfiGetAddress: {
ASSERT_EQUAL(function.NumParameters(), 1);
body += LoadLocal(parsed_function_->RawParameterVariable(0)); // Pointer.
body += CheckNullOptimized(String::ZoneHandle(Z, function.name()));
// This can only be Pointer, so it is always safe to LoadUntagged.
body += LoadUntagged(compiler::target::PointerBase::data_offset());
// This can only be Pointer, so it is safe to load the data field.
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer);
body += ConvertUntaggedToUnboxed(kUnboxedFfiIntPtr);
body += Box(kUnboxedFfiIntPtr);
} break;
@ -1528,11 +1547,11 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
// Initialize the result's data pointer field.
body += LoadLocal(typed_data_object);
body += LoadLocal(arg_pointer);
body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += ConvertUntaggedToUnboxed(kUnboxedIntPtr);
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer);
body += StoreNativeField(Slot::PointerBase_data(),
StoreFieldInstr::Kind::kInitializing,
kNoStoreBarrier);
InnerPointerAccess::kCannotBeInnerPointer,
StoreFieldInstr::Kind::kInitializing);
} break;
case MethodRecognizer::kGetNativeField: {
auto& name = String::ZoneHandle(Z, function.name());
@ -1602,8 +1621,8 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
ASSERT_EQUAL(function.NumParameters(), 1);
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += LoadIsolate();
body += ConvertUntaggedToUnboxed(kUnboxedIntPtr);
body += StoreNativeField(Slot::FinalizerBase_isolate());
body += StoreNativeField(Slot::FinalizerBase_isolate(),
InnerPointerAccess::kCannotBeInnerPointer);
body += NullConstant();
break;
case MethodRecognizer::kFinalizerBase_getIsolateFinalizers:
@ -1729,6 +1748,14 @@ Fragment FlowGraphBuilder::BuildTypedDataViewFactoryConstructor(
Fragment body;
// Note that we do no input checking here before allocation. The factory is
// private, and only called by other code in the library implementation.
// Thus, either the inputs are checked within Dart code before the factory is
// called (e.g., the implementation of XList.sublistView), or the inputs to
// the factory are retrieved from previously constructed TypedData objects
// and thus already checked (e.g., the implementation of the
// UnmodifiableXListView constructors).
body += AllocateObject(token_pos, view_class, /*arg_count=*/0);
LocalVariable* view_object = MakeTemporary();
@ -1751,22 +1778,27 @@ Fragment FlowGraphBuilder::BuildTypedDataViewFactoryConstructor(
// Update the inner pointer.
//
// WARNING: Notice that we assume here no GC happens between those 4
// instructions!
// WARNING: Notice that we assume here no GC happens between the
// LoadNativeField and the StoreNativeField, as the GC expects a properly
// updated data field (see ScavengerVisitorBase::VisitTypedDataViewPointers).
body += LoadLocal(view_object);
body += LoadLocal(typed_data);
body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += ConvertUntaggedToUnboxed(kUnboxedIntPtr);
body += LoadLocal(offset_in_bytes);
body += UnboxSmiToIntptr();
body += AddIntptrIntegers();
body += StoreNativeField(Slot::PointerBase_data());
body += UnboxTruncate(kUnboxedIntPtr);
body += BinaryIntegerOp(Token::kADD, kUnboxedIntPtr, /*is_truncating=*/true);
body += ConvertUnboxedToUntagged(kUnboxedIntPtr);
body += StoreNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer,
StoreFieldInstr::Kind::kInitializing);
return body;
}
Fragment FlowGraphBuilder::BuildTypedDataMemMove(const Function& function,
intptr_t cid) {
intptr_t element_size) {
ASSERT_EQUAL(parsed_function_->function().NumParameters(), 5);
LocalVariable* arg_to = parsed_function_->RawParameterVariable(0);
LocalVariable* arg_to_start = parsed_function_->RawParameterVariable(1);
@ -1801,18 +1833,23 @@ Fragment FlowGraphBuilder::BuildTypedDataMemMove(const Function& function,
Fragment use_instruction(is_small_enough);
use_instruction += LoadLocal(arg_from);
use_instruction += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
use_instruction += LoadLocal(arg_to);
use_instruction += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
use_instruction += LoadLocal(arg_from_start);
use_instruction += LoadLocal(arg_to_start);
use_instruction += LoadLocal(arg_count);
use_instruction += MemoryCopy(cid, cid,
/*unboxed_inputs=*/false, /*can_overlap=*/true);
use_instruction +=
MemoryCopyUntagged(element_size,
/*unboxed_inputs=*/false, /*can_overlap=*/true);
use_instruction += Goto(done);
const intptr_t element_size = Instance::ElementSizeFor(cid);
Fragment call_memmove(is_too_large);
call_memmove += LoadLocal(arg_to);
call_memmove += LoadUntagged(compiler::target::PointerBase::data_offset());
call_memmove += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
call_memmove += ConvertUntaggedToUnboxed(kUnboxedIntPtr);
call_memmove += LoadLocal(arg_to_start);
call_memmove += IntConstant(element_size);
@ -1821,7 +1858,8 @@ Fragment FlowGraphBuilder::BuildTypedDataMemMove(const Function& function,
call_memmove +=
BinaryIntegerOp(Token::kADD, kUnboxedIntPtr, /*is_truncating=*/true);
call_memmove += LoadLocal(arg_from);
call_memmove += LoadUntagged(compiler::target::PointerBase::data_offset());
call_memmove += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
call_memmove += ConvertUntaggedToUnboxed(kUnboxedIntPtr);
call_memmove += LoadLocal(arg_from_start);
call_memmove += IntConstant(element_size);
@ -4477,7 +4515,10 @@ Fragment FlowGraphBuilder::FfiPointerFromAddress() {
code += LoadLocal(pointer);
code += LoadLocal(address);
code += UnboxTruncate(kUnboxedIntPtr);
code += StoreNativeField(Slot::PointerBase_data());
code += ConvertUnboxedToUntagged(kUnboxedIntPtr);
code += StoreNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer,
StoreFieldInstr::Kind::kInitializing);
code += StoreLocal(TokenPosition::kNoSource, result);
code += Drop(); // StoreLocal^
code += Drop(); // address
@ -4559,7 +4600,8 @@ Fragment FlowGraphBuilder::CopyFromCompoundToStack(
for (intptr_t i = 0; i < num_defs; i++) {
body += LoadLocal(variable);
body += LoadTypedDataBaseFromCompound();
body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += IntConstant(offset_in_bytes);
const Representation representation = representations[i];
offset_in_bytes += RepresentationUtils::ValueSize(representation);
@ -4581,7 +4623,8 @@ Fragment FlowGraphBuilder::PopFromStackToTypedDataBase(
for (intptr_t i = 0; i < num_defs; i++) {
const Representation representation = representations[i];
body += LoadLocal(uint8_list);
body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += IntConstant(offset_in_bytes);
body += LoadLocal(definitions->At(i));
body += StoreIndexedTypedDataUnboxed(representation, /*index_scale=*/1,
@ -4635,7 +4678,8 @@ Fragment FlowGraphBuilder::CopyFromTypedDataBaseToUnboxedAddress(
const classid_t typed_data_cidd = typed_data_cid(chunk_sizee);
body += LoadLocal(typed_data_base);
body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += IntConstant(offset_in_bytes);
body += LoadIndexed(typed_data_cidd, /*index_scale=*/1,
/*index_unboxed=*/false);
@ -4680,7 +4724,8 @@ Fragment FlowGraphBuilder::CopyFromUnboxedAddressToTypedDataBase(
LocalVariable* chunk_value = MakeTemporary("chunk_value");
body += LoadLocal(typed_data_base);
body += LoadUntagged(compiler::target::PointerBase::data_offset());
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kMayBeInnerPointer);
body += IntConstant(offset_in_bytes);
body += LoadLocal(chunk_value);
body += StoreIndexedTypedData(typed_data_cidd, /*index_scale=*/1,
@ -4836,8 +4881,9 @@ Fragment FlowGraphBuilder::FfiConvertPrimitiveToNative(
Fragment body;
if (marshaller.IsPointer(arg_index)) {
// This can only be Pointer, so it is always safe to LoadUntagged.
body += LoadUntagged(compiler::target::PointerBase::data_offset());
// This can only be Pointer, so it is safe to load the data field.
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer);
body += ConvertUntaggedToUnboxed(kUnboxedFfiIntPtr);
} else if (marshaller.IsHandle(arg_index)) {
body += WrapHandle();
@ -4980,8 +5026,9 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfFfiNative(const Function& function) {
Z, Class::Handle(IG->object_store()->ffi_pointer_class()))
->context_variables()[0]));
// This can only be Pointer, so it is always safe to LoadUntagged.
body += LoadUntagged(compiler::target::PointerBase::data_offset());
// This can only be Pointer, so it is safe to load the data field.
body += LoadNativeField(Slot::PointerBase_data(),
InnerPointerAccess::kCannotBeInnerPointer);
body += ConvertUntaggedToUnboxed(kUnboxedFfiIntPtr);
if (marshaller.PassTypedData()) {

View file

@ -146,7 +146,8 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {
FlowGraph* BuildGraphOfRecognizedMethod(const Function& function);
Fragment BuildTypedDataMemMove(const Function& function, intptr_t cid);
Fragment BuildTypedDataMemMove(const Function& function,
intptr_t element_size);
Fragment BuildTypedDataViewFactoryConstructor(const Function& function,
classid_t cid);
Fragment BuildTypedDataFactoryConstructor(const Function& function,

View file

@ -100,20 +100,20 @@ namespace dart {
TypedData_UnmodifiableInt32x4ArrayView_factory, 0xf66c6993) \
V(_UnmodifiableFloat64x2ArrayView, ._, \
TypedData_UnmodifiableFloat64x2ArrayView_factory, 0x6d9ae5fb) \
V(Int8List, ., TypedData_Int8Array_factory, 0x65ff4ca8) \
V(Uint8List, ., TypedData_Uint8Array_factory, 0xedd56a6f) \
V(Uint8ClampedList, ., TypedData_Uint8ClampedArray_factory, 0x27f7ab75) \
V(Int16List, ., TypedData_Int16Array_factory, 0xd0bf0d13) \
V(Uint16List, ., TypedData_Uint16Array_factory, 0x3ca76f8a) \
V(Int32List, ., TypedData_Int32Array_factory, 0x1b816740) \
V(Uint32List, ., TypedData_Uint32Array_factory, 0x2b210eab) \
V(Int64List, ., TypedData_Int64Array_factory, 0xfb63524f) \
V(Uint64List, ., TypedData_Uint64Array_factory, 0xe3c14418) \
V(Float32List, ., TypedData_Float32Array_factory, 0xa381dd1e) \
V(Float64List, ., TypedData_Float64Array_factory, 0xa0b7c2b1) \
V(Float32x4List, ., TypedData_Float32x4Array_factory, 0x0a6eefa8) \
V(Int32x4List, ., TypedData_Int32x4Array_factory, 0x5a09288e) \
V(Float64x2List, ., TypedData_Float64x2Array_factory, 0xecbc738a) \
V(Int8List, ., TypedData_Int8Array_factory, 0x65ff48e7) \
V(Uint8List, ., TypedData_Uint8Array_factory, 0xedd566ae) \
V(Uint8ClampedList, ., TypedData_Uint8ClampedArray_factory, 0x27f7a7b4) \
V(Int16List, ., TypedData_Int16Array_factory, 0xd0bf0952) \
V(Uint16List, ., TypedData_Uint16Array_factory, 0x3ca76bc9) \
V(Int32List, ., TypedData_Int32Array_factory, 0x1b81637f) \
V(Uint32List, ., TypedData_Uint32Array_factory, 0x2b210aea) \
V(Int64List, ., TypedData_Int64Array_factory, 0xfb634e8e) \
V(Uint64List, ., TypedData_Uint64Array_factory, 0xe3c14057) \
V(Float32List, ., TypedData_Float32Array_factory, 0xa381d95d) \
V(Float64List, ., TypedData_Float64Array_factory, 0xa0b7bef0) \
V(Float32x4List, ., TypedData_Float32x4Array_factory, 0x0a6eebe7) \
V(Int32x4List, ., TypedData_Int32x4Array_factory, 0x5a0924cd) \
V(Float64x2List, ., TypedData_Float64x2Array_factory, 0xecbc6fc9) \
V(_TypedListBase, _memMove1, TypedData_memMove1, 0xd2767fb0) \
V(_TypedListBase, _memMove2, TypedData_memMove2, 0xed382bb6) \
V(_TypedListBase, _memMove4, TypedData_memMove4, 0xcfe37726) \
@ -544,22 +544,22 @@ namespace dart {
kGrowableObjectArrayCid, 0x7be49a4e) \
V(_GrowableListWithData, _GrowableList, ._withData, kGrowableObjectArrayCid, \
0x19394cc1) \
V(_Int8ArrayFactory, Int8List, ., kTypedDataInt8ArrayCid, 0x65ff4ca8) \
V(_Uint8ArrayFactory, Uint8List, ., kTypedDataUint8ArrayCid, 0xedd56a6f) \
V(_Int8ArrayFactory, Int8List, ., kTypedDataInt8ArrayCid, 0x65ff48e7) \
V(_Uint8ArrayFactory, Uint8List, ., kTypedDataUint8ArrayCid, 0xedd566ae) \
V(_Uint8ClampedArrayFactory, Uint8ClampedList, ., \
kTypedDataUint8ClampedArrayCid, 0x27f7ab75) \
V(_Int16ArrayFactory, Int16List, ., kTypedDataInt16ArrayCid, 0xd0bf0d13) \
V(_Uint16ArrayFactory, Uint16List, ., kTypedDataUint16ArrayCid, 0x3ca76f8a) \
V(_Int32ArrayFactory, Int32List, ., kTypedDataInt32ArrayCid, 0x1b816740) \
V(_Uint32ArrayFactory, Uint32List, ., kTypedDataUint32ArrayCid, 0x2b210eab) \
V(_Int64ArrayFactory, Int64List, ., kTypedDataInt64ArrayCid, 0xfb63524f) \
V(_Uint64ArrayFactory, Uint64List, ., kTypedDataUint64ArrayCid, 0xe3c14418) \
kTypedDataUint8ClampedArrayCid, 0x27f7a7b4) \
V(_Int16ArrayFactory, Int16List, ., kTypedDataInt16ArrayCid, 0xd0bf0952) \
V(_Uint16ArrayFactory, Uint16List, ., kTypedDataUint16ArrayCid, 0x3ca76bc9) \
V(_Int32ArrayFactory, Int32List, ., kTypedDataInt32ArrayCid, 0x1b81637f) \
V(_Uint32ArrayFactory, Uint32List, ., kTypedDataUint32ArrayCid, 0x2b210aea) \
V(_Int64ArrayFactory, Int64List, ., kTypedDataInt64ArrayCid, 0xfb634e8e) \
V(_Uint64ArrayFactory, Uint64List, ., kTypedDataUint64ArrayCid, 0xe3c14057) \
V(_Float64ArrayFactory, Float64List, ., kTypedDataFloat64ArrayCid, \
0xa0b7c2b1) \
0xa0b7bef0) \
V(_Float32ArrayFactory, Float32List, ., kTypedDataFloat32ArrayCid, \
0xa381dd1e) \
0xa381d95d) \
V(_Float32x4ArrayFactory, Float32x4List, ., kTypedDataFloat32x4ArrayCid, \
0x0a6eefa8)
0x0a6eebe7)
// clang-format on

View file

@ -1112,6 +1112,9 @@ void DeoptInfoBuilder::AddCopy(Value* value,
deopt_instr =
new (zone()) DeoptWordInstr(ToCpuRegisterSource(source_loc));
break;
#if defined(TARGET_ARCH_IS_64_BIT)
case kUntagged:
#endif
case kUnboxedInt64: {
if (source_loc.IsPairLocation()) {
PairLocation* pair = source_loc.AsPairLocation();
@ -1125,6 +1128,9 @@ void DeoptInfoBuilder::AddCopy(Value* value,
}
break;
}
#if defined(TARGET_ARCH_IS_32_BIT)
case kUntagged:
#endif
case kUnboxedInt32:
deopt_instr =
new (zone()) DeoptInt32Instr(ToCpuRegisterSource(source_loc));

View file

@ -9020,6 +9020,52 @@ void Function::SetIsOptimizable(bool value) const {
}
}
bool Function::IsTypedDataViewFactory() const {
switch (recognized_kind()) {
case MethodRecognizer::kTypedData_ByteDataView_factory:
case MethodRecognizer::kTypedData_Int8ArrayView_factory:
case MethodRecognizer::kTypedData_Uint8ArrayView_factory:
case MethodRecognizer::kTypedData_Uint8ClampedArrayView_factory:
case MethodRecognizer::kTypedData_Int16ArrayView_factory:
case MethodRecognizer::kTypedData_Uint16ArrayView_factory:
case MethodRecognizer::kTypedData_Int32ArrayView_factory:
case MethodRecognizer::kTypedData_Uint32ArrayView_factory:
case MethodRecognizer::kTypedData_Int64ArrayView_factory:
case MethodRecognizer::kTypedData_Uint64ArrayView_factory:
case MethodRecognizer::kTypedData_Float32ArrayView_factory:
case MethodRecognizer::kTypedData_Float64ArrayView_factory:
case MethodRecognizer::kTypedData_Float32x4ArrayView_factory:
case MethodRecognizer::kTypedData_Int32x4ArrayView_factory:
case MethodRecognizer::kTypedData_Float64x2ArrayView_factory:
return true;
default:
return false;
}
}
bool Function::IsUnmodifiableTypedDataViewFactory() const {
switch (recognized_kind()) {
case MethodRecognizer::kTypedData_UnmodifiableByteDataView_factory:
case MethodRecognizer::kTypedData_UnmodifiableInt8ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableUint8ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableUint8ClampedArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableInt16ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableUint16ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableInt32ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableUint32ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableInt64ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableUint64ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableFloat32ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableFloat64ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableFloat32x4ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableInt32x4ArrayView_factory:
case MethodRecognizer::kTypedData_UnmodifiableFloat64x2ArrayView_factory:
return true;
default:
return false;
}
}
bool Function::ForceOptimize() const {
if (RecognizedKindForceOptimize() || IsFfiTrampoline() ||
IsTypedDataViewFactory() || IsUnmodifiableTypedDataViewFactory()) {
@ -9107,6 +9153,7 @@ bool Function::RecognizedKindForceOptimize() const {
case MethodRecognizer::kTypedData_memMove4:
case MethodRecognizer::kTypedData_memMove8:
case MethodRecognizer::kTypedData_memMove16:
case MethodRecognizer::kMemCopy:
// Prevent the GC from running so that the operation is atomic from
// a GC point of view. Always double check implementation in
// kernel_to_il.cc that no GC can happen in between the relevant IL

View file

@ -3928,23 +3928,8 @@ class Function : public Object {
return modifier() == UntaggedFunction::kAsyncGen;
}
bool IsTypedDataViewFactory() const {
if (is_native() && kind() == UntaggedFunction::kConstructor) {
// This is a native factory constructor.
const Class& klass = Class::Handle(Owner());
return IsTypedDataViewClassId(klass.id());
}
return false;
}
bool IsUnmodifiableTypedDataViewFactory() const {
if (is_native() && kind() == UntaggedFunction::kConstructor) {
// This is a native factory constructor.
const Class& klass = Class::Handle(Owner());
return IsUnmodifiableTypedDataViewClassId(klass.id());
}
return false;
}
bool IsTypedDataViewFactory() const;
bool IsUnmodifiableTypedDataViewFactory() const;
DART_WARN_UNUSED_RESULT
ErrorPtr VerifyCallEntryPoint() const;

View file

@ -580,6 +580,7 @@ extension Int8Pointer on Pointer<Int8> {
Pointer<Int8> elementAt(int index) => Pointer.fromAddress(address + index);
@patch
@pragma("vm:prefer-inline")
Int8List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@ -616,6 +617,7 @@ extension Int16Pointer on Pointer<Int16> {
Pointer.fromAddress(address + 2 * index);
@patch
@pragma("vm:prefer-inline")
Int16List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@ -652,6 +654,7 @@ extension Int32Pointer on Pointer<Int32> {
Pointer.fromAddress(address + 4 * index);
@patch
@pragma("vm:prefer-inline")
Int32List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@ -688,6 +691,7 @@ extension Int64Pointer on Pointer<Int64> {
Pointer.fromAddress(address + 8 * index);
@patch
@pragma("vm:prefer-inline")
Int64List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@ -723,6 +727,7 @@ extension Uint8Pointer on Pointer<Uint8> {
Pointer<Uint8> elementAt(int index) => Pointer.fromAddress(address + index);
@patch
@pragma("vm:prefer-inline")
Uint8List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@ -759,6 +764,7 @@ extension Uint16Pointer on Pointer<Uint16> {
Pointer.fromAddress(address + 2 * index);
@patch
@pragma("vm:prefer-inline")
Uint16List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@ -795,6 +801,7 @@ extension Uint32Pointer on Pointer<Uint32> {
Pointer.fromAddress(address + 4 * index);
@patch
@pragma("vm:prefer-inline")
Uint32List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@ -831,6 +838,7 @@ extension Uint64Pointer on Pointer<Uint64> {
Pointer.fromAddress(address + 8 * index);
@patch
@pragma("vm:prefer-inline")
Uint64List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@ -867,6 +875,7 @@ extension FloatPointer on Pointer<Float> {
Pointer.fromAddress(address + 4 * index);
@patch
@pragma("vm:prefer-inline")
Float32List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,
@ -903,6 +912,7 @@ extension DoublePointer on Pointer<Double> {
Pointer.fromAddress(address + 8 * index);
@patch
@pragma("vm:prefer-inline")
Float64List asTypedList(
int length, {
Pointer<NativeFinalizerFunction>? finalizer,

View file

@ -2173,7 +2173,6 @@ class Int8List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int8List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Int8Array_new")
external factory Int8List(int length);
@patch
@ -2233,7 +2232,6 @@ class Uint8List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint8List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Uint8Array_new")
external factory Uint8List(int length);
@patch
@ -2296,7 +2294,6 @@ class Uint8ClampedList {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint8ClampedList)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Uint8ClampedArray_new")
external factory Uint8ClampedList(int length);
@patch
@ -2361,7 +2358,6 @@ class Int16List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int16List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Int16Array_new")
external factory Int16List(int length);
@patch
@ -2441,7 +2437,6 @@ class Uint16List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint16List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Uint16Array_new")
external factory Uint16List(int length);
@patch
@ -2521,7 +2516,6 @@ class Int32List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int32List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Int32Array_new")
external factory Int32List(int length);
@patch
@ -2588,7 +2582,6 @@ class Uint32List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint32List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Uint32Array_new")
external factory Uint32List(int length);
@patch
@ -2655,7 +2648,6 @@ class Int64List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int64List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Int64Array_new")
external factory Int64List(int length);
@patch
@ -2722,7 +2714,6 @@ class Uint64List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint64List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Uint64Array_new")
external factory Uint64List(int length);
@patch
@ -2789,7 +2780,6 @@ class Float32List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float32List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Float32Array_new")
external factory Float32List(int length);
@patch
@ -2857,7 +2847,6 @@ class Float64List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float64List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Float64Array_new")
external factory Float64List(int length);
@patch
@ -2925,7 +2914,6 @@ class Float32x4List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float32x4List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Float32x4Array_new")
external factory Float32x4List(int length);
@patch
@ -2992,7 +2980,6 @@ class Int32x4List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int32x4List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Int32x4Array_new")
external factory Int32x4List(int length);
@patch
@ -3059,7 +3046,6 @@ class Float64x2List {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float64x2List)
@pragma("vm:prefer-inline")
@pragma("vm:external-name", "TypedData_Float64x2Array_new")
external factory Float64x2List(int length);
@patch
@ -4353,7 +4339,7 @@ final class _Int8ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int8ArrayView)
@pragma("vm:external-name", "TypedDataView_Int8ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Int8ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4402,7 +4388,7 @@ final class _Uint8ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint8ArrayView)
@pragma("vm:external-name", "TypedDataView_Uint8ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Uint8ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4454,7 +4440,7 @@ final class _Uint8ClampedArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint8ClampedArrayView)
@pragma("vm:external-name", "TypedDataView_Uint8ClampedArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Uint8ClampedArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4508,7 +4494,7 @@ final class _Int16ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int16ArrayView)
@pragma("vm:external-name", "TypedDataView_Int16ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Int16ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4570,7 +4556,7 @@ final class _Uint16ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint16ArrayView)
@pragma("vm:external-name", "TypedDataView_Uint16ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Uint16ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4633,7 +4619,7 @@ final class _Int32ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int32ArrayView)
@pragma("vm:external-name", "TypedDataView_Int32ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Int32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4682,7 +4668,7 @@ final class _Uint32ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint32ArrayView)
@pragma("vm:external-name", "TypedDataView_Uint32ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Uint32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4731,7 +4717,7 @@ final class _Int64ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int64ArrayView)
@pragma("vm:external-name", "TypedDataView_Int64ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Int64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4780,7 +4766,7 @@ final class _Uint64ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Uint64ArrayView)
@pragma("vm:external-name", "TypedDataView_Uint64ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Uint64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4829,7 +4815,7 @@ final class _Float32ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float32ArrayView)
@pragma("vm:external-name", "TypedDataView_Float32ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Float32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4878,7 +4864,7 @@ final class _Float64ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float64ArrayView)
@pragma("vm:external-name", "TypedDataView_Float64ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Float64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4927,7 +4913,7 @@ final class _Float32x4ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float32x4ArrayView)
@pragma("vm:external-name", "TypedDataView_Float32x4ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Float32x4ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -4974,7 +4960,7 @@ final class _Int32x4ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Int32x4ArrayView)
@pragma("vm:external-name", "TypedDataView_Int32x4ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Int32x4ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5021,7 +5007,7 @@ final class _Float64x2ArrayView extends _TypedListView
// Constructor.
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _Float64x2ArrayView)
@pragma("vm:external-name", "TypedDataView_Float64x2ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _Float64x2ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5065,7 +5051,7 @@ final class _Float64x2ArrayView extends _TypedListView
final class _ByteDataView implements ByteData {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _ByteDataView)
@pragma("vm:external-name", "TypedDataView_ByteDataView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _ByteDataView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5459,14 +5445,18 @@ abstract class UnmodifiableByteBufferView implements Uint8List {
@patch
abstract class UnmodifiableByteDataView implements ByteData {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableByteDataView(ByteData data) =>
new _UnmodifiableByteDataView._((data as _ByteDataView).buffer._data,
data.offsetInBytes, data.lengthInBytes);
new _UnmodifiableByteDataView._(
unsafeCast<_ByteDataView>(data).buffer._data,
data.offsetInBytes,
data.lengthInBytes);
}
@patch
abstract class UnmodifiableUint8ListView implements Uint8List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableUint8ListView(Uint8List list) =>
new _UnmodifiableUint8ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5477,6 +5467,7 @@ abstract class UnmodifiableUint8ListView implements Uint8List {
@patch
abstract class UnmodifiableInt8ListView implements Int8List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableInt8ListView(Int8List list) =>
new _UnmodifiableInt8ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5487,6 +5478,7 @@ abstract class UnmodifiableInt8ListView implements Int8List {
@patch
abstract class UnmodifiableUint8ClampedListView implements Uint8ClampedList {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableUint8ClampedListView(Uint8ClampedList list) =>
new _UnmodifiableUint8ClampedArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5497,6 +5489,7 @@ abstract class UnmodifiableUint8ClampedListView implements Uint8ClampedList {
@patch
abstract class UnmodifiableUint16ListView implements Uint16List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableUint16ListView(Uint16List list) =>
new _UnmodifiableUint16ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5507,6 +5500,7 @@ abstract class UnmodifiableUint16ListView implements Uint16List {
@patch
abstract class UnmodifiableInt16ListView implements Int16List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableInt16ListView(Int16List list) =>
new _UnmodifiableInt16ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5517,6 +5511,7 @@ abstract class UnmodifiableInt16ListView implements Int16List {
@patch
abstract class UnmodifiableUint32ListView implements Uint32List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableUint32ListView(Uint32List list) =>
new _UnmodifiableUint32ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5527,6 +5522,7 @@ abstract class UnmodifiableUint32ListView implements Uint32List {
@patch
abstract class UnmodifiableInt32ListView implements Int32List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableInt32ListView(Int32List list) =>
new _UnmodifiableInt32ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5537,6 +5533,7 @@ abstract class UnmodifiableInt32ListView implements Int32List {
@patch
abstract class UnmodifiableUint64ListView implements Uint64List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableUint64ListView(Uint64List list) =>
new _UnmodifiableUint64ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5547,6 +5544,7 @@ abstract class UnmodifiableUint64ListView implements Uint64List {
@patch
abstract class UnmodifiableInt64ListView implements Int64List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableInt64ListView(Int64List list) =>
new _UnmodifiableInt64ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5557,6 +5555,7 @@ abstract class UnmodifiableInt64ListView implements Int64List {
@patch
abstract class UnmodifiableInt32x4ListView implements Int32x4List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableInt32x4ListView(Int32x4List list) =>
new _UnmodifiableInt32x4ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5567,6 +5566,7 @@ abstract class UnmodifiableInt32x4ListView implements Int32x4List {
@patch
abstract class UnmodifiableFloat32x4ListView implements Float32x4List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableFloat32x4ListView(Float32x4List list) =>
new _UnmodifiableFloat32x4ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5577,6 +5577,7 @@ abstract class UnmodifiableFloat32x4ListView implements Float32x4List {
@patch
abstract class UnmodifiableFloat64x2ListView implements Float64x2List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableFloat64x2ListView(Float64x2List list) =>
new _UnmodifiableFloat64x2ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5587,6 +5588,7 @@ abstract class UnmodifiableFloat64x2ListView implements Float64x2List {
@patch
abstract class UnmodifiableFloat32ListView implements Float32List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableFloat32ListView(Float32List list) =>
new _UnmodifiableFloat32ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5597,6 +5599,7 @@ abstract class UnmodifiableFloat32ListView implements Float32List {
@patch
abstract class UnmodifiableFloat64ListView implements Float64List {
@patch
@pragma("vm:prefer-inline")
factory UnmodifiableFloat64ListView(Float64List list) =>
new _UnmodifiableFloat64ArrayView._(
unsafeCast<_TypedListBase>(list)._typedData,
@ -5609,7 +5612,7 @@ final class _UnmodifiableInt8ArrayView extends _Int8ArrayView
implements UnmodifiableInt8ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableInt8ArrayView)
@pragma("vm:external-name", "TypedDataView_Int8ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableInt8ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5626,7 +5629,7 @@ final class _UnmodifiableUint8ArrayView extends _Uint8ArrayView
implements UnmodifiableUint8ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableUint8ArrayView)
@pragma("vm:external-name", "TypedDataView_Uint8ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableUint8ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5643,7 +5646,7 @@ final class _UnmodifiableUint8ClampedArrayView extends _Uint8ClampedArrayView
implements UnmodifiableUint8ClampedListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableUint8ClampedArrayView)
@pragma("vm:external-name", "TypedDataView_Uint8ClampedArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableUint8ClampedArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5660,7 +5663,7 @@ final class _UnmodifiableInt16ArrayView extends _Int16ArrayView
implements UnmodifiableInt16ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableInt16ArrayView)
@pragma("vm:external-name", "TypedDataView_Int16ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableInt16ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5677,7 +5680,7 @@ final class _UnmodifiableUint16ArrayView extends _Uint16ArrayView
implements UnmodifiableUint16ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableUint16ArrayView)
@pragma("vm:external-name", "TypedDataView_Uint16ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableUint16ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5694,7 +5697,7 @@ final class _UnmodifiableInt32ArrayView extends _Int32ArrayView
implements UnmodifiableInt32ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableInt32ArrayView)
@pragma("vm:external-name", "TypedDataView_Int32ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableInt32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5711,7 +5714,7 @@ final class _UnmodifiableUint32ArrayView extends _Uint32ArrayView
implements UnmodifiableUint32ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableUint32ArrayView)
@pragma("vm:external-name", "TypedDataView_Uint32ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableUint32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5728,7 +5731,7 @@ final class _UnmodifiableInt64ArrayView extends _Int64ArrayView
implements UnmodifiableInt64ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableInt64ArrayView)
@pragma("vm:external-name", "TypedDataView_Int64ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableInt64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5745,7 +5748,7 @@ final class _UnmodifiableUint64ArrayView extends _Uint64ArrayView
implements UnmodifiableUint64ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableUint64ArrayView)
@pragma("vm:external-name", "TypedDataView_Uint64ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableUint64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5762,7 +5765,7 @@ final class _UnmodifiableFloat32ArrayView extends _Float32ArrayView
implements UnmodifiableFloat32ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableFloat32ArrayView)
@pragma("vm:external-name", "TypedDataView_Float32ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableFloat32ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5779,7 +5782,7 @@ final class _UnmodifiableFloat64ArrayView extends _Float64ArrayView
implements UnmodifiableFloat64ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableFloat64ArrayView)
@pragma("vm:external-name", "TypedDataView_Float64ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableFloat64ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5796,7 +5799,7 @@ final class _UnmodifiableFloat32x4ArrayView extends _Float32x4ArrayView
implements UnmodifiableFloat32x4ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableFloat32x4ArrayView)
@pragma("vm:external-name", "TypedDataView_Float32x4ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableFloat32x4ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5813,7 +5816,7 @@ final class _UnmodifiableInt32x4ArrayView extends _Int32x4ArrayView
implements UnmodifiableInt32x4ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableInt32x4ArrayView)
@pragma("vm:external-name", "TypedDataView_Int32x4ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableInt32x4ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5830,7 +5833,7 @@ final class _UnmodifiableFloat64x2ArrayView extends _Float64x2ArrayView
implements UnmodifiableFloat64x2ListView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableFloat64x2ArrayView)
@pragma("vm:external-name", "TypedDataView_Float64x2ArrayView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableFloat64x2ArrayView._(
_TypedList buffer, int offsetInBytes, int length);
@ -5847,7 +5850,7 @@ final class _UnmodifiableByteDataView extends _ByteDataView
implements UnmodifiableByteDataView {
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type", _UnmodifiableByteDataView)
@pragma("vm:external-name", "TypedDataView_ByteDataView_new")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external factory _UnmodifiableByteDataView._(
_TypedList buffer, int offsetInBytes, int length);