[vm/compiler] Move setRange bounds checking entirely into Dart.

The bounds checking was implemented in Dart previously, but this
removes _checkSetRangeArguments, inlining it into
_TypedListBase.setRange, renames _checkBoundsAndMemcpyN to _memMoveN
since it no longer performs bounds checking, and also removes the now
unneeded bounds checking from the native function TypedData_setRange.

TEST=co19{,_2}/LibTest/typed_data lib{,_2}/typed_data
     corelib{,_2}/list_test

Issue: https://github.com/dart-lang/sdk/issues/42072
Cq-Include-Trybots: luci.dart.try:vm-aot-linux-debug-simarm_x64-try,vm-aot-linux-debug-x64-try,vm-aot-linux-debug-x64c-try,vm-kernel-linux-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-linux-debug-x64-try,vm-linux-debug-x64c-try,vm-mac-debug-arm64-try,vm-aot-linux-release-simarm_x64-try,vm-aot-linux-release-x64-try,vm-aot-mac-release-arm64-try,vm-linux-release-x64-try,vm-mac-release-arm64-try,vm-kernel-precomp-linux-release-x64-try
Change-Id: I85ec751708f603f68729f4109d7339dd8407ae77
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/324102
Reviewed-by: Alexander Markov <alexmarkov@google.com>
Commit-Queue: Tess Strickland <sstrickl@google.com>
This commit is contained in:
Tess Strickland 2023-09-05 17:10:51 +00:00 committed by Commit Queue
parent 2a669c571f
commit b94d3f730d
7 changed files with 311 additions and 262 deletions

View file

@ -89,11 +89,13 @@ static bool IsUint8(intptr_t cid) {
}
DEFINE_NATIVE_ENTRY(TypedDataBase_setRange, 0, 5) {
// This is called after bounds checking, so the numeric inputs are
// guaranteed to be Smis, and the length is guaranteed to be non-zero.
const TypedDataBase& dst =
TypedDataBase::CheckedHandle(zone, arguments->NativeArgAt(0));
const Smi& dst_start_smi =
Smi::CheckedHandle(zone, arguments->NativeArgAt(1));
const Smi& dst_end_smi = Smi::CheckedHandle(zone, arguments->NativeArgAt(2));
const Smi& length_smi = Smi::CheckedHandle(zone, arguments->NativeArgAt(2));
const TypedDataBase& src =
TypedDataBase::CheckedHandle(zone, arguments->NativeArgAt(3));
const Smi& src_start_smi =
@ -104,16 +106,29 @@ DEFINE_NATIVE_ENTRY(TypedDataBase_setRange, 0, 5) {
const intptr_t dst_start_in_bytes =
dst_start_smi.Value() * element_size_in_bytes;
const intptr_t dst_end_in_bytes = dst_end_smi.Value() * element_size_in_bytes;
const intptr_t length_in_bytes = length_smi.Value() * element_size_in_bytes;
const intptr_t src_start_in_bytes =
src_start_smi.Value() * element_size_in_bytes;
const intptr_t length_in_bytes = dst_end_in_bytes - dst_start_in_bytes;
#if defined(DEBUG)
// Verify bounds checks weren't needed.
ASSERT(dst_start_in_bytes >= 0);
ASSERT(src_start_in_bytes >= 0);
// The callers of this native function never call it for a zero-sized copy.
ASSERT(length_in_bytes > 0);
const intptr_t dst_length_in_bytes = dst.LengthInBytes();
// Since the length is non-zero, the start can't be the same as the end.
ASSERT(dst_start_in_bytes < dst_length_in_bytes);
ASSERT(length_in_bytes <= dst_length_in_bytes - dst_start_in_bytes);
const intptr_t src_length_in_bytes = src.LengthInBytes();
// Since the length is non-zero, the start can't be the same as the end.
ASSERT(src_start_in_bytes < src_length_in_bytes);
ASSERT(length_in_bytes <= src_length_in_bytes - src_start_in_bytes);
#endif
if (!IsClamped(dst.ptr()->GetClassId()) || IsUint8(src.ptr()->GetClassId())) {
// We've already performed range checking in _boundsCheckAndMemcpyN prior
// to the call to _nativeSetRange, so just perform the memmove.
//
// TODO(dartbug.com/42072): We do this when the copy length gets large
// enough that a native call to invoke memmove is faster than the generated
// code from MemoryCopy. Replace the static call to _nativeSetRange with
@ -125,31 +140,19 @@ DEFINE_NATIVE_ENTRY(TypedDataBase_setRange, 0, 5) {
return Object::null();
}
// This is called on the fast path prior to bounds checking, so perform
// the bounds check even if the length is 0.
const intptr_t dst_length_in_bytes = dst.LengthInBytes();
RangeCheck(dst_start_in_bytes, length_in_bytes, dst_length_in_bytes,
element_size_in_bytes);
const intptr_t src_length_in_bytes = src.LengthInBytes();
RangeCheck(src_start_in_bytes, length_in_bytes, src_length_in_bytes,
element_size_in_bytes);
ASSERT_EQUAL(element_size_in_bytes, 1);
if (length_in_bytes > 0) {
NoSafepointScope no_safepoint;
uint8_t* dst_data =
reinterpret_cast<uint8_t*>(dst.DataAddr(dst_start_in_bytes));
int8_t* src_data =
reinterpret_cast<int8_t*>(src.DataAddr(src_start_in_bytes));
for (intptr_t ix = 0; ix < length_in_bytes; ix++) {
int8_t v = *src_data;
if (v < 0) v = 0;
*dst_data = v;
src_data++;
dst_data++;
}
NoSafepointScope no_safepoint;
uint8_t* dst_data =
reinterpret_cast<uint8_t*>(dst.DataAddr(dst_start_in_bytes));
int8_t* src_data =
reinterpret_cast<int8_t*>(src.DataAddr(src_start_in_bytes));
for (intptr_t ix = 0; ix < length_in_bytes; ix++) {
int8_t v = *src_data;
if (v < 0) v = 0;
*dst_data = v;
src_data++;
dst_data++;
}
return Object::null();

View file

@ -420,8 +420,8 @@ static void RunMemoryCopyInstrTest(intptr_t src_start,
#define MEMORY_MOVE_TEST_BOXED(src_start, dest_start, length, elem_size) \
ISOLATE_UNIT_TEST_CASE( \
IRTest_MemoryMove_##src_start##_##dest_start##_##length##_##elem_size) { \
RunMemoryCopyInstrTest(src_start, dest_start, length, elem_size, true, \
false); \
RunMemoryCopyInstrTest(src_start, dest_start, length, elem_size, false, \
true); \
}
#define MEMORY_MOVE_TEST_UNBOXED(src_start, dest_start, length, el_si) \

View file

@ -925,11 +925,11 @@ bool FlowGraphBuilder::IsRecognizedMethodForFlowGraph(
case MethodRecognizer::kRecord_numFields:
case MethodRecognizer::kSuspendState_clone:
case MethodRecognizer::kSuspendState_resume:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy1:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy2:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy4:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy8:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy16:
case MethodRecognizer::kTypedData_memMove1:
case MethodRecognizer::kTypedData_memMove2:
case MethodRecognizer::kTypedData_memMove4:
case MethodRecognizer::kTypedData_memMove8:
case MethodRecognizer::kTypedData_memMove16:
case MethodRecognizer::kTypedData_ByteDataView_factory:
case MethodRecognizer::kTypedData_Int8ArrayView_factory:
case MethodRecognizer::kTypedData_Uint8ArrayView_factory:
@ -1138,26 +1138,21 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += TailCall(resume_stub);
break;
}
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy1:
case MethodRecognizer::kTypedData_memMove1:
// Pick an appropriate typed data cid based on the element size.
body +=
BuildTypedDataCheckBoundsAndMemcpy(function, kTypedDataUint8ArrayCid);
body += BuildTypedDataMemMove(function, kTypedDataUint8ArrayCid);
break;
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy2:
body += BuildTypedDataCheckBoundsAndMemcpy(function,
kTypedDataUint16ArrayCid);
case MethodRecognizer::kTypedData_memMove2:
body += BuildTypedDataMemMove(function, kTypedDataUint16ArrayCid);
break;
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy4:
body += BuildTypedDataCheckBoundsAndMemcpy(function,
kTypedDataUint32ArrayCid);
case MethodRecognizer::kTypedData_memMove4:
body += BuildTypedDataMemMove(function, kTypedDataUint32ArrayCid);
break;
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy8:
body += BuildTypedDataCheckBoundsAndMemcpy(function,
kTypedDataUint64ArrayCid);
case MethodRecognizer::kTypedData_memMove8:
body += BuildTypedDataMemMove(function, kTypedDataUint64ArrayCid);
break;
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy16:
body += BuildTypedDataCheckBoundsAndMemcpy(function,
kTypedDataInt32x4ArrayCid);
case MethodRecognizer::kTypedData_memMove16:
body += BuildTypedDataMemMove(function, kTypedDataInt32x4ArrayCid);
break;
#define CASE(name) \
case MethodRecognizer::kTypedData_##name##_factory: \
@ -1758,33 +1753,16 @@ Fragment FlowGraphBuilder::BuildTypedDataViewFactoryConstructor(
return body;
}
Fragment FlowGraphBuilder::BuildTypedDataCheckBoundsAndMemcpy(
const Function& function,
intptr_t cid) {
Fragment FlowGraphBuilder::BuildTypedDataMemMove(const Function& function,
intptr_t cid) {
ASSERT_EQUAL(parsed_function_->function().NumParameters(), 5);
LocalVariable* arg_to = parsed_function_->RawParameterVariable(0);
LocalVariable* arg_to_start = parsed_function_->RawParameterVariable(1);
LocalVariable* arg_to_end = parsed_function_->RawParameterVariable(2);
LocalVariable* arg_count = parsed_function_->RawParameterVariable(2);
LocalVariable* arg_from = parsed_function_->RawParameterVariable(3);
LocalVariable* arg_from_start = parsed_function_->RawParameterVariable(4);
const Library& lib = Library::Handle(Z, Library::TypedDataLibrary());
ASSERT(!lib.IsNull());
const Function& check_set_range_args = Function::ZoneHandle(
Z, lib.LookupFunctionAllowPrivate(Symbols::_checkSetRangeArguments()));
ASSERT(!check_set_range_args.IsNull());
Fragment body;
body += LoadLocal(arg_to);
body += LoadLocal(arg_to_start);
body += LoadLocal(arg_to_end);
body += LoadLocal(arg_from);
body += LoadLocal(arg_from_start);
body += StaticCall(TokenPosition::kNoSource, check_set_range_args, 5,
ICData::kStatic);
// The length is guaranteed to be a Smi if bounds checking is successful.
LocalVariable* length_to_copy = MakeTemporary("length");
// If we're copying at least this many elements, calling _nativeSetRange,
// which calls memmove via a native call, is faster than using the code
// currently emitted by the MemoryCopy instruction.
@ -1806,7 +1784,7 @@ Fragment FlowGraphBuilder::BuildTypedDataCheckBoundsAndMemcpy(
JoinEntryInstr* done = BuildJoinEntry();
TargetEntryInstr *is_small_enough, *is_too_large;
body += LoadLocal(length_to_copy);
body += LoadLocal(arg_count);
body += IntConstant(kCopyLengthForNativeCall);
body += SmiRelationalOp(Token::kLT);
body += BranchIfTrue(&is_small_enough, &is_too_large);
@ -1816,13 +1794,15 @@ Fragment FlowGraphBuilder::BuildTypedDataCheckBoundsAndMemcpy(
use_instruction += LoadLocal(arg_to);
use_instruction += LoadLocal(arg_from_start);
use_instruction += LoadLocal(arg_to_start);
use_instruction += LoadLocal(length_to_copy);
use_instruction += LoadLocal(arg_count);
use_instruction += MemoryCopy(cid, cid,
/*unboxed_inputs=*/false, /*can_overlap=*/true);
use_instruction += Goto(done);
// TODO(dartbug.com/42072): Instead of doing a static call to a native
// method, make a leaf runtime entry for memmove and use CCall.
const Library& lib = Library::Handle(Z, Library::TypedDataLibrary());
ASSERT(!lib.IsNull());
const Class& typed_list_base =
Class::Handle(Z, lib.LookupClassAllowPrivate(Symbols::_TypedListBase()));
ASSERT(!typed_list_base.IsNull());
@ -1836,7 +1816,7 @@ Fragment FlowGraphBuilder::BuildTypedDataCheckBoundsAndMemcpy(
Fragment call_native(is_too_large);
call_native += LoadLocal(arg_to);
call_native += LoadLocal(arg_to_start);
call_native += LoadLocal(arg_to_end);
call_native += LoadLocal(arg_count);
call_native += LoadLocal(arg_from);
call_native += LoadLocal(arg_from_start);
call_native += StaticCall(TokenPosition::kNoSource, native_set_range, 5,
@ -1845,7 +1825,6 @@ Fragment FlowGraphBuilder::BuildTypedDataCheckBoundsAndMemcpy(
call_native += Goto(done);
body.current = done;
body += DropTemporary(&length_to_copy);
body += NullConstant();
return body;

View file

@ -146,8 +146,7 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {
FlowGraph* BuildGraphOfRecognizedMethod(const Function& function);
Fragment BuildTypedDataCheckBoundsAndMemcpy(const Function& function,
intptr_t cid);
Fragment BuildTypedDataMemMove(const Function& function, intptr_t cid);
Fragment BuildTypedDataViewFactoryConstructor(const Function& function,
classid_t cid);
Fragment BuildTypedDataFactoryConstructor(const Function& function,

View file

@ -114,16 +114,11 @@ namespace dart {
V(Float32x4List, ., TypedData_Float32x4Array_factory, 0x0a6eefa8) \
V(Int32x4List, ., TypedData_Int32x4Array_factory, 0x5a09288e) \
V(Float64x2List, ., TypedData_Float64x2Array_factory, 0xecbc738a) \
V(_TypedListBase, _checkBoundsAndMemcpy1, \
TypedData_checkBoundsAndMemcpy1, 0xf9d326bd) \
V(_TypedListBase, _checkBoundsAndMemcpy2, \
TypedData_checkBoundsAndMemcpy2, 0xf0756646) \
V(_TypedListBase, _checkBoundsAndMemcpy4, \
TypedData_checkBoundsAndMemcpy4, 0xe8cfd800) \
V(_TypedListBase, _checkBoundsAndMemcpy8, \
TypedData_checkBoundsAndMemcpy8, 0xe945188e) \
V(_TypedListBase, _checkBoundsAndMemcpy16, \
TypedData_checkBoundsAndMemcpy16, 0xebd06cb3) \
V(_TypedListBase, _memMove1, TypedData_memMove1, 0xd2767fb0) \
V(_TypedListBase, _memMove2, TypedData_memMove2, 0xed382bb6) \
V(_TypedListBase, _memMove4, TypedData_memMove4, 0xcfe37726) \
V(_TypedListBase, _memMove8, TypedData_memMove8, 0xd1d8e325) \
V(_TypedListBase, _memMove16, TypedData_memMove16, 0x07861cd5) \
V(::, _toClampedUint8, ConvertIntToClampedUint8, 0xd0e522d0) \
V(::, copyRangeFromUint8ListToOneByteString, \
CopyRangeFromUint8ListToOneByteString, 0xcc42cce1) \

View file

@ -9102,11 +9102,11 @@ bool Function::RecognizedKindForceOptimize() const {
case MethodRecognizer::kRecord_numFields:
case MethodRecognizer::kUtf8DecoderScan:
case MethodRecognizer::kDouble_hashCode:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy1:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy2:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy4:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy8:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy16:
case MethodRecognizer::kTypedData_memMove1:
case MethodRecognizer::kTypedData_memMove2:
case MethodRecognizer::kTypedData_memMove4:
case MethodRecognizer::kTypedData_memMove8:
case MethodRecognizer::kTypedData_memMove16:
// Prevent the GC from running so that the operation is atomic from
// a GC point of view. Always double check implementation in
// kernel_to_il.cc that no GC can happen in between the relevant IL

View file

@ -103,24 +103,47 @@ abstract final class _TypedListBase {
}
@pragma("vm:prefer-inline")
void setRange(int start, int end, Iterable from, [int skipCount = 0]) =>
(from is _TypedListBase &&
(from as _TypedListBase).elementSizeInBytes == elementSizeInBytes)
? _fastSetRange(start, end, from as _TypedListBase, skipCount)
: _slowSetRange(start, end, from, skipCount);
void setRange(int start, int end, Iterable from, [int skipCount = 0]) {
// Range check all numeric inputs.
if (0 > start || start > end || end > length) {
RangeError.checkValidRange(start, end, length); // Always throws.
assert(false);
}
if (skipCount < 0) {
throw RangeError.range(skipCount, 0, null, "skipCount");
}
if (from is _TypedListBase) {
// Note: _TypedListBase is not related to Iterable<int> so there is
// no promotion here.
final fromAsTyped = unsafeCast<_TypedListBase>(from);
if (fromAsTyped.elementSizeInBytes == elementSizeInBytes) {
// Check that from has enough elements, which is assumed by
// _fastSetRange, using the more efficient _TypedListBase length getter.
final count = end - start;
if ((fromAsTyped.length - skipCount) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
return _fastSetRange(start, count, fromAsTyped, skipCount);
}
}
// _slowSetRange checks that from has enough elements internally.
return _slowSetRange(start, end, from, skipCount);
}
// Method(s) implementing Object interface.
String toString() => ListBase.listToString(this as List);
// Internal utility methods.
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount);
void _fastSetRange(int start, int count, _TypedListBase from, int skipCount);
void _slowSetRange(int start, int end, Iterable from, int skipCount);
@pragma("vm:prefer-inline")
bool get _containsUnsignedBytes => false;
// Performs a copy of the elements starting at [skipCount] in [from] to
// [this] starting at [start] (inclusive) and ending at [end] (exclusive).
// Performs a copy of the [count] elements starting at [skipCount] in [from]
// to [this] starting at [start].
//
// Primarily called by Dart code to handle clamping.
//
@ -128,47 +151,47 @@ abstract final class _TypedListBase {
@pragma("vm:external-name", "TypedDataBase_setRange")
@pragma("vm:entry-point")
external void _nativeSetRange(
int start, int end, _TypedListBase from, int skipOffset);
int start, int count, _TypedListBase from, int skipOffset);
// Performs a copy of the elements starting at [skipCount] in [from] to
// [this] starting at [start] (inclusive) and ending at [end] (exclusive).
// Performs a copy of the [count] elements starting at [skipCount] in [from]
// to [this] starting at [start].
//
// The element sizes of [this] and [from] must be 1 (test at caller).
@pragma("vm:recognized", "other")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external void _checkBoundsAndMemcpy1(
int start, int end, _TypedListBase from, int skipCount);
external void _memMove1(
int start, int count, _TypedListBase from, int skipCount);
// Performs a copy of the elements starting at [skipCount] in [from] to
// [this] starting at [start] (inclusive) and ending at [end] (exclusive).
// Performs a copy of the [count] elements starting at [skipCount] in [from]
// to [this] starting at [start].
//
// The element sizes of [this] and [from] must be 2 (test at caller).
@pragma("vm:recognized", "other")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external void _checkBoundsAndMemcpy2(
int start, int end, _TypedListBase from, int skipCount);
external void _memMove2(
int start, int count, _TypedListBase from, int skipCount);
// Performs a copy of the elements starting at [skipCount] in [from] to
// [this] starting at [start] (inclusive) and ending at [end] (exclusive).
// Performs a copy of the [count] elements starting at [skipCount] in [from]
// to [this] starting at [start].
//
// The element sizes of [this] and [from] must be 4 (test at caller).
@pragma("vm:recognized", "other")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external void _checkBoundsAndMemcpy4(
int start, int end, _TypedListBase from, int skipCount);
external void _memMove4(
int start, int count, _TypedListBase from, int skipCount);
// Performs a copy of the elements starting at [skipCount] in [from] to
// [this] starting at [start] (inclusive) and ending at [end] (exclusive).
// Performs a copy of the [count] elements starting at [skipCount] in [from]
// to [this] starting at [start].
//
// The element sizes of [this] and [from] must be 8 (test at caller).
@pragma("vm:recognized", "other")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external void _checkBoundsAndMemcpy8(
int start, int end, _TypedListBase from, int skipCount);
external void _memMove8(
int start, int count, _TypedListBase from, int skipCount);
// Performs a copy of the elements starting at [skipCount] in [from] to
// [this] starting at [start] (inclusive) and ending at [end] (exclusive).
@ -177,8 +200,8 @@ abstract final class _TypedListBase {
@pragma("vm:recognized", "other")
@pragma("vm:prefer-inline")
@pragma("vm:idempotent")
external void _checkBoundsAndMemcpy16(
int start, int end, _TypedListBase from, int skipCount);
external void _memMove16(
int start, int count, _TypedListBase from, int skipCount);
}
mixin _IntListMixin implements List<int> {
@ -462,14 +485,18 @@ mixin _TypedIntListMixin<SpawnedType extends List<int>> on _IntListMixin
SpawnedType _createList(int length);
void _slowSetRange(int start, int end, Iterable from, int skipCount) {
final count = _checkSetRangeArguments(this, start, end, from, skipCount);
if (count == 0) return;
// The numeric inputs have already been checked, all that's left is to
// check that from has enough elements when applicable.
if (from is _TypedListBase) {
// Note: _TypedListBase is not related to Iterable<int> so there is
// no promotion here.
if ((from as _TypedListBase).buffer == this.buffer) {
final fromAsTyped = unsafeCast<_TypedListBase>(from);
if (fromAsTyped.buffer == this.buffer) {
final count = end - start;
if ((fromAsTyped.length - skipCount) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
// Different element sizes, but same buffer means that we need
// an intermediate structure.
// TODO(srdjan): Optimize to skip copying if the range does not overlap.
@ -494,9 +521,11 @@ mixin _TypedIntListMixin<SpawnedType extends List<int>> on _IntListMixin
otherList = from.skip(skipCount).toList(growable: false);
otherStart = 0;
}
if (otherStart + count > otherList.length) {
final count = end - start;
if ((otherList.length - otherStart) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
Lists.copy(otherList, otherStart, this, start, count);
}
@ -793,14 +822,18 @@ mixin _TypedDoubleListMixin<SpawnedType extends List<double>>
SpawnedType _createList(int length);
void _slowSetRange(int start, int end, Iterable from, int skipCount) {
final count = _checkSetRangeArguments(this, start, end, from, skipCount);
if (count == 0) return;
// The numeric inputs have already been checked, all that's left is to
// check that from has enough elements when applicable.
if (from is _TypedListBase) {
// Note: _TypedListBase is not related to Iterable<double> so there is
// Note: _TypedListBase is not related to Iterable<int> so there is
// no promotion here.
if ((from as _TypedListBase).buffer == this.buffer) {
final fromAsTyped = unsafeCast<_TypedListBase>(from);
if (fromAsTyped.buffer == this.buffer) {
final count = end - start;
if ((fromAsTyped.length - skipCount) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
// Different element sizes, but same buffer means that we need
// an intermediate structure.
// TODO(srdjan): Optimize to skip copying if the range does not overlap.
@ -825,9 +858,11 @@ mixin _TypedDoubleListMixin<SpawnedType extends List<double>>
otherList = from.skip(skipCount).toList(growable: false);
otherStart = 0;
}
if (otherStart + count > otherList.length) {
final count = end - start;
if ((otherList.length - otherStart) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
Lists.copy(otherList, otherStart, this, start, count);
}
@ -907,14 +942,18 @@ mixin _Float32x4ListMixin implements List<Float32x4> {
}
void _slowSetRange(int start, int end, Iterable from, int skipCount) {
final count = _checkSetRangeArguments(this, start, end, from, skipCount);
if (count == 0) return;
// The numeric inputs have already been checked, all that's left is to
// check that from has enough elements when applicable.
if (from is _TypedListBase) {
// Note: _TypedListBase is not related to Iterable<Float32x4> so there is
// Note: _TypedListBase is not related to Iterable<int> so there is
// no promotion here.
if ((from as _TypedListBase).buffer == this.buffer) {
final fromAsTyped = unsafeCast<_TypedListBase>(from);
if (fromAsTyped.buffer == this.buffer) {
final count = end - start;
if ((fromAsTyped.length - skipCount) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
// Different element sizes, but same buffer means that we need
// an intermediate structure.
// TODO(srdjan): Optimize to skip copying if the range does not overlap.
@ -939,9 +978,11 @@ mixin _Float32x4ListMixin implements List<Float32x4> {
otherList = from.skip(skipCount).toList(growable: false);
otherStart = 0;
}
if (otherStart + count > otherList.length) {
final count = end - start;
if ((otherList.length - otherStart) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
Lists.copy(otherList, otherStart, this, start, count);
}
@ -1238,14 +1279,18 @@ mixin _Int32x4ListMixin implements List<Int32x4> {
}
void _slowSetRange(int start, int end, Iterable from, int skipCount) {
final count = _checkSetRangeArguments(this, start, end, from, skipCount);
if (count == 0) return;
// The numeric inputs have already been checked, all that's left is to
// check that from has enough elements when applicable.
if (from is _TypedListBase) {
// Note: _TypedListBase is not related to Iterable<Int32x4> so there is
// Note: _TypedListBase is not related to Iterable<int> so there is
// no promotion here.
if ((from as _TypedListBase).buffer == this.buffer) {
final fromAsTyped = unsafeCast<_TypedListBase>(from);
if (fromAsTyped.buffer == this.buffer) {
final count = end - start;
if ((fromAsTyped.length - skipCount) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
// Different element sizes, but same buffer means that we need
// an intermediate structure.
// TODO(srdjan): Optimize to skip copying if the range does not overlap.
@ -1270,9 +1315,11 @@ mixin _Int32x4ListMixin implements List<Int32x4> {
otherList = from.skip(skipCount).toList(growable: false);
otherStart = 0;
}
if (otherStart + count > otherList.length) {
final count = end - start;
if ((otherList.length - otherStart) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
Lists.copy(otherList, otherStart, this, start, count);
}
@ -1568,14 +1615,18 @@ mixin _Float64x2ListMixin implements List<Float64x2> {
}
void _slowSetRange(int start, int end, Iterable from, int skipCount) {
final count = _checkSetRangeArguments(this, start, end, from, skipCount);
if (count == 0) return;
// The numeric inputs have already been checked, all that's left is to
// check that from has enough elements when applicable.
if (from is _TypedListBase) {
// Note: _TypedListBase is not related to Iterable<Float64x2> so there is
// Note: _TypedListBase is not related to Iterable<int> so there is
// no promotion here.
if ((from as _TypedListBase).buffer == this.buffer) {
final fromAsTyped = unsafeCast<_TypedListBase>(from);
if (fromAsTyped.buffer == this.buffer) {
final count = end - start;
if ((fromAsTyped.length - skipCount) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
// Different element sizes, but same buffer means that we need
// an intermediate structure.
// TODO(srdjan): Optimize to skip copying if the range does not overlap.
@ -1600,9 +1651,11 @@ mixin _Float64x2ListMixin implements List<Float64x2> {
otherList = from.skip(skipCount).toList(growable: false);
otherStart = 0;
}
if (otherStart + count > otherList.length) {
final count = end - start;
if ((otherList.length - otherStart) < count) {
throw IterableElementError.tooFew();
}
if (count == 0) return;
Lists.copy(otherList, otherStart, this, start, count);
}
@ -2167,8 +2220,9 @@ final class _Int8List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy1(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove1(start, count, from, skipCount);
}
@patch
@ -2229,8 +2283,9 @@ final class _Uint8List extends _TypedList
bool get _containsUnsignedBytes => true;
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy1(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove1(start, count, from, skipCount);
}
@patch
@ -2291,10 +2346,11 @@ final class _Uint8ClampedList extends _TypedList
bool get _containsUnsignedBytes => true;
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
from._containsUnsignedBytes
? _checkBoundsAndMemcpy1(start, end, from, skipCount)
: _nativeSetRange(start, end, from, skipCount);
? _memMove1(start, count, from, skipCount)
: _nativeSetRange(start, count, from, skipCount);
}
@patch
@ -2364,8 +2420,9 @@ final class _Int16List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy2(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove2(start, count, from, skipCount);
int _getIndexedInt16(int index) {
return _getInt16(index * Int16List.bytesPerElement);
@ -2443,8 +2500,9 @@ final class _Uint16List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy2(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove2(start, count, from, skipCount);
int _getIndexedUint16(int index) {
return _getUint16(index * Uint16List.bytesPerElement);
@ -2509,8 +2567,9 @@ final class _Int32List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy4(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove4(start, count, from, skipCount);
int _getIndexedInt32(int index) {
return _getInt32(index * Int32List.bytesPerElement);
@ -2575,8 +2634,9 @@ final class _Uint32List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy4(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove4(start, count, from, skipCount);
int _getIndexedUint32(int index) {
return _getUint32(index * Uint32List.bytesPerElement);
@ -2641,8 +2701,9 @@ final class _Int64List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy8(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove8(start, count, from, skipCount);
int _getIndexedInt64(int index) {
return _getInt64(index * Int64List.bytesPerElement);
@ -2707,8 +2768,9 @@ final class _Uint64List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy8(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove8(start, count, from, skipCount);
int _getIndexedUint64(int index) {
return _getUint64(index * Uint64List.bytesPerElement);
@ -2774,8 +2836,9 @@ final class _Float32List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy4(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove4(start, count, from, skipCount);
double _getIndexedFloat32(int index) {
return _getFloat32(index * Float32List.bytesPerElement);
@ -2841,8 +2904,9 @@ final class _Float64List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy8(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove8(start, count, from, skipCount);
double _getIndexedFloat64(int index) {
return _getFloat64(index * Float64List.bytesPerElement);
@ -2907,8 +2971,9 @@ final class _Float32x4List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy16(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove16(start, count, from, skipCount);
Float32x4 _getIndexedFloat32x4(int index) {
return _getFloat32x4(index * Float32x4List.bytesPerElement);
@ -2973,8 +3038,9 @@ final class _Int32x4List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy16(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove16(start, count, from, skipCount);
Int32x4 _getIndexedInt32x4(int index) {
return _getInt32x4(index * Int32x4List.bytesPerElement);
@ -3039,8 +3105,9 @@ final class _Float64x2List extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy16(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove16(start, count, from, skipCount);
Float64x2 _getIndexedFloat64x2(int index) {
return _getFloat64x2(index * Float64x2List.bytesPerElement);
@ -3087,8 +3154,9 @@ final class _ExternalInt8Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy1(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove1(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -3133,8 +3201,9 @@ final class _ExternalUint8Array extends _TypedList
bool get _containsUnsignedBytes => true;
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy1(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove1(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -3179,10 +3248,11 @@ final class _ExternalUint8ClampedArray extends _TypedList
bool get _containsUnsignedBytes => true;
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
from._containsUnsignedBytes
? _checkBoundsAndMemcpy1(start, end, from, skipCount)
: _nativeSetRange(start, end, from, skipCount);
? _memMove1(start, count, from, skipCount)
: _nativeSetRange(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -3221,8 +3291,9 @@ final class _ExternalInt16Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy2(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove2(start, count, from, skipCount);
int _getIndexedInt16(int index) {
return _getInt16(index * Int16List.bytesPerElement);
@ -3269,8 +3340,9 @@ final class _ExternalUint16Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy2(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove2(start, count, from, skipCount);
int _getIndexedUint16(int index) {
return _getUint16(index * Uint16List.bytesPerElement);
@ -3317,8 +3389,9 @@ final class _ExternalInt32Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy4(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove4(start, count, from, skipCount);
int _getIndexedInt32(int index) {
return _getInt32(index * Int32List.bytesPerElement);
@ -3365,8 +3438,9 @@ final class _ExternalUint32Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy4(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove4(start, count, from, skipCount);
int _getIndexedUint32(int index) {
return _getUint32(index * Uint32List.bytesPerElement);
@ -3413,8 +3487,9 @@ final class _ExternalInt64Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy8(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove8(start, count, from, skipCount);
int _getIndexedInt64(int index) {
return _getInt64(index * Int64List.bytesPerElement);
@ -3461,8 +3536,9 @@ final class _ExternalUint64Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy8(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove8(start, count, from, skipCount);
int _getIndexedUint64(int index) {
return _getUint64(index * Uint64List.bytesPerElement);
@ -3509,8 +3585,9 @@ final class _ExternalFloat32Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy4(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove4(start, count, from, skipCount);
double _getIndexedFloat32(int index) {
return _getFloat32(index * Float32List.bytesPerElement);
@ -3557,8 +3634,9 @@ final class _ExternalFloat64Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy8(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove8(start, count, from, skipCount);
double _getIndexedFloat64(int index) {
return _getFloat64(index * Float64List.bytesPerElement);
@ -3605,8 +3683,9 @@ final class _ExternalFloat32x4Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy16(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove16(start, count, from, skipCount);
Float32x4 _getIndexedFloat32x4(int index) {
return _getFloat32x4(index * Float32x4List.bytesPerElement);
@ -3653,8 +3732,9 @@ final class _ExternalInt32x4Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy16(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove16(start, count, from, skipCount);
Int32x4 _getIndexedInt32x4(int index) {
return _getInt32x4(index * Int32x4List.bytesPerElement);
@ -3701,8 +3781,9 @@ final class _ExternalFloat64x2Array extends _TypedList
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy16(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove16(start, count, from, skipCount);
Float64x2 _getIndexedFloat64x2(int index) {
return _getFloat64x2(index * Float64x2List.bytesPerElement);
@ -4307,8 +4388,9 @@ final class _Int8ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy1(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove1(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4358,8 +4440,9 @@ final class _Uint8ArrayView extends _TypedListView
bool get _containsUnsignedBytes => true;
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy1(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove1(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4409,10 +4492,11 @@ final class _Uint8ClampedArrayView extends _TypedListView
bool get _containsUnsignedBytes => true;
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
from._containsUnsignedBytes
? _checkBoundsAndMemcpy1(start, end, from, skipCount)
: _nativeSetRange(start, end, from, skipCount);
? _memMove1(start, count, from, skipCount)
: _nativeSetRange(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4472,8 +4556,9 @@ final class _Int16ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy2(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove2(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4534,8 +4619,9 @@ final class _Uint16ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy2(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove2(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4582,8 +4668,9 @@ final class _Int32ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy4(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove4(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4630,8 +4717,9 @@ final class _Uint32ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy4(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove4(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4678,8 +4766,9 @@ final class _Int64ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy8(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove8(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4726,8 +4815,9 @@ final class _Uint64ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy8(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove8(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4774,8 +4864,9 @@ final class _Float32ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy4(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove4(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4822,8 +4913,9 @@ final class _Float64ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy8(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove8(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4868,8 +4960,9 @@ final class _Float32x4ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy16(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove16(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4914,8 +5007,9 @@ final class _Int32x4ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy16(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove16(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -4960,8 +5054,9 @@ final class _Float64x2ArrayView extends _TypedListView
}
@pragma("vm:prefer-inline")
void _fastSetRange(int start, int end, _TypedListBase from, int skipCount) =>
_checkBoundsAndMemcpy16(start, end, from, skipCount);
void _fastSetRange(
int start, int count, _TypedListBase from, int skipCount) =>
_memMove16(start, count, from, skipCount);
}
@pragma("vm:entry-point")
@ -5352,28 +5447,6 @@ void _offsetAlignmentCheck(int offset, int alignment) {
}
}
// Checks the arguments provided to a setRange call. Returns the number
// of elements to copy.
@pragma("vm:entry-point")
int _checkSetRangeArguments(
Iterable to, int start, int end, Iterable from, int skipCount) {
// Check ranges.
if (0 > start || start > end || end > to.length) {
RangeError.checkValidRange(start, end, to.length); // Always throws.
assert(false);
}
if (skipCount < 0) {
throw RangeError.range(skipCount, 0, null, "skipCount");
}
final count = end - start;
if ((from.length - skipCount) < count) {
throw IterableElementError.tooFew();
}
return count;
}
@patch
abstract class UnmodifiableByteBufferView implements Uint8List {
@patch