Revert "Reland "Temporary revert of two changes which potentially caused performance regressions in Flutter.""

The change was already reverted and I accidentally relanded this.

Change-Id: I62ff3367db7aaaf3a470727fdb587830a23fe53c
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/98566
Reviewed-by: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Martin Kustermann 2019-04-03 12:50:57 +00:00
parent debd918696
commit 592aee4838
60 changed files with 1170 additions and 448 deletions

View file

@ -203,20 +203,16 @@ CLASS_LIST_TYPED_DATA(TYPED_DATA_NEW_NATIVE)
#undef TYPED_DATA_NEW_NATIVE
#undef TYPED_DATA_NEW
#define TYPED_DATA_VIEW_NEW(native_name, cid_name) \
#define TYPED_DATA_VIEW_NEW(native_name, cid) \
DEFINE_NATIVE_ENTRY(native_name, 0, 4) { \
GET_NON_NULL_NATIVE_ARGUMENT(Instance, typed_data, \
GET_NON_NULL_NATIVE_ARGUMENT(TypedDataBase, typed_data, \
arguments->NativeArgAt(1)); \
GET_NON_NULL_NATIVE_ARGUMENT(Smi, offset, arguments->NativeArgAt(2)); \
GET_NON_NULL_NATIVE_ARGUMENT(Smi, len, arguments->NativeArgAt(3)); \
const intptr_t backing_length = \
typed_data.IsTypedData() \
? TypedData::Cast(typed_data).LengthInBytes() \
: ExternalTypedData::Cast(typed_data).LengthInBytes(); \
const intptr_t cid = cid_name; \
const intptr_t backing_length = typed_data.LengthInBytes(); \
const intptr_t offset_in_bytes = offset.Value(); \
const intptr_t length = len.Value(); \
const intptr_t element_size = TypedDataView::ElementSizeInBytes(cid); \
const intptr_t element_size = TypedDataBase::ElementSizeInBytes(cid); \
AlignmentCheck(offset_in_bytes, element_size); \
LengthCheck(offset_in_bytes + length * element_size, backing_length); \
return TypedDataView::New(cid, typed_data, offset_in_bytes, length); \

View file

@ -64,6 +64,7 @@ namespace dart {
V(Float32x4) \
V(Int32x4) \
V(Float64x2) \
V(TypedDataBase) \
V(TypedData) \
V(ExternalTypedData) \
V(TypedDataView) \
@ -177,25 +178,19 @@ enum ClassId {
CLASS_LIST(DEFINE_OBJECT_KIND)
#undef DEFINE_OBJECT_KIND
#define DEFINE_OBJECT_KIND(clazz) kFfi##clazz##Cid,
CLASS_LIST_FFI(DEFINE_OBJECT_KIND)
#undef DEFINE_OBJECT_KIND
// clang-format off
#define DEFINE_OBJECT_KIND(clazz) kTypedData##clazz##Cid,
CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
#define DEFINE_OBJECT_KIND(clazz) kFfi##clazz##Cid,
CLASS_LIST_FFI(DEFINE_OBJECT_KIND)
#undef DEFINE_OBJECT_KIND
#define DEFINE_OBJECT_KIND(clazz) kTypedData##clazz##ViewCid,
#define DEFINE_OBJECT_KIND(clazz) \
kTypedData##clazz##Cid, \
kTypedData##clazz##ViewCid, \
kExternalTypedData##clazz##Cid,
CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
#undef DEFINE_OBJECT_KIND
kByteDataViewCid,
#define DEFINE_OBJECT_KIND(clazz) kExternalTypedData##clazz##Cid,
CLASS_LIST_TYPED_DATA(DEFINE_OBJECT_KIND)
#undef DEFINE_OBJECT_KIND
kByteBufferCid,
// clang-format on
@ -208,6 +203,11 @@ enum ClassId {
kNumPredefinedCids,
};
// Keep these in sync with the cid numbering above.
const int kTypedDataCidRemainderInternal = 0;
const int kTypedDataCidRemainderView = 1;
const int kTypedDataCidRemainderExternal = 2;
} // namespace dart
#endif // RUNTIME_VM_CLASS_ID_H_

View file

@ -3372,6 +3372,7 @@ class TypedDataDeserializationCluster : public DeserializationCluster {
Deserializer::InitializeHeader(
data, cid_, TypedData::InstanceSize(length_in_bytes), is_canonical);
data->ptr()->length_ = Smi::New(length);
data->RecomputeDataField();
uint8_t* cdata = reinterpret_cast<uint8_t*>(data->ptr()->data());
d->ReadBytes(cdata, length_in_bytes);
}
@ -3381,6 +3382,84 @@ class TypedDataDeserializationCluster : public DeserializationCluster {
const intptr_t cid_;
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class TypedDataViewSerializationCluster : public SerializationCluster {
public:
explicit TypedDataViewSerializationCluster(intptr_t cid)
: SerializationCluster("TypedDataView"), cid_(cid) {}
~TypedDataViewSerializationCluster() {}
void Trace(Serializer* s, RawObject* object) {
RawTypedDataView* view = TypedDataView::RawCast(object);
objects_.Add(view);
PushFromTo(view);
}
void WriteAlloc(Serializer* s) {
const intptr_t count = objects_.length();
s->WriteCid(cid_);
s->WriteUnsigned(count);
for (intptr_t i = 0; i < count; i++) {
RawTypedDataView* view = objects_[i];
s->AssignRef(view);
}
}
void WriteFill(Serializer* s) {
const intptr_t count = objects_.length();
for (intptr_t i = 0; i < count; i++) {
RawTypedDataView* view = objects_[i];
AutoTraceObject(view);
s->Write<bool>(view->IsCanonical());
WriteFromTo(view);
}
}
private:
const intptr_t cid_;
GrowableArray<RawTypedDataView*> objects_;
};
#endif // !DART_PRECOMPILED_RUNTIME
class TypedDataViewDeserializationCluster : public DeserializationCluster {
public:
explicit TypedDataViewDeserializationCluster(intptr_t cid) : cid_(cid) {}
~TypedDataViewDeserializationCluster() {}
void ReadAlloc(Deserializer* d) {
start_index_ = d->next_index();
PageSpace* old_space = d->heap()->old_space();
const intptr_t count = d->ReadUnsigned();
for (intptr_t i = 0; i < count; i++) {
d->AssignRef(
AllocateUninitialized(old_space, TypedDataView::InstanceSize()));
}
stop_index_ = d->next_index();
}
void ReadFill(Deserializer* d) {
for (intptr_t id = start_index_; id < stop_index_; id++) {
RawTypedDataView* view = reinterpret_cast<RawTypedDataView*>(d->Ref(id));
const bool is_canonical = d->Read<bool>();
Deserializer::InitializeHeader(view, cid_, TypedDataView::InstanceSize(),
is_canonical);
ReadFromTo(view);
}
}
void PostLoad(const Array& refs, Snapshot::Kind kind, Zone* zone) {
auto& view = TypedDataView::Handle(zone);
for (intptr_t id = start_index_; id < stop_index_; id++) {
view ^= refs.At(id);
view.RecomputeDataField();
}
}
private:
const intptr_t cid_;
};
#if !defined(DART_PRECOMPILED_RUNTIME)
class ExternalTypedDataSerializationCluster : public SerializationCluster {
public:
@ -4122,11 +4201,13 @@ SerializationCluster* Serializer::NewClusterForClass(intptr_t cid) {
return NULL;
#else
Zone* Z = zone_;
if ((cid >= kNumPredefinedCids) || (cid == kInstanceCid) ||
RawObject::IsTypedDataViewClassId(cid)) {
if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
Push(isolate()->class_table()->At(cid));
return new (Z) InstanceSerializationCluster(cid);
}
if (RawObject::IsTypedDataViewClassId(cid)) {
return new (Z) TypedDataViewSerializationCluster(cid);
}
if (RawObject::IsExternalTypedDataClassId(cid)) {
return new (Z) ExternalTypedDataSerializationCluster(cid);
}
@ -4745,10 +4826,12 @@ Deserializer::~Deserializer() {
DeserializationCluster* Deserializer::ReadCluster() {
intptr_t cid = ReadCid();
Zone* Z = zone_;
if ((cid >= kNumPredefinedCids) || (cid == kInstanceCid) ||
RawObject::IsTypedDataViewClassId(cid)) {
if (cid >= kNumPredefinedCids || cid == kInstanceCid) {
return new (Z) InstanceDeserializationCluster(cid);
}
if (RawObject::IsTypedDataViewClassId(cid)) {
return new (Z) TypedDataViewDeserializationCluster(cid);
}
if (RawObject::IsExternalTypedDataClassId(cid)) {
return new (Z) ExternalTypedDataDeserializationCluster(cid);
}

View file

@ -442,8 +442,8 @@ Definition* AotCallSpecializer::TryOptimizeMod(TemplateDartCall<0>* instr,
Smi::ZoneHandle(Z, Smi::New(modulus - 1)), kUnboxedInt32);
InsertBefore(instr, right_definition, /*env=*/NULL, FlowGraph::kValue);
right_definition = new (Z)
UnboxedIntConverterInstr(kUnboxedInt32, kUnboxedInt64,
new (Z) Value(right_definition), DeoptId::kNone);
IntConverterInstr(kUnboxedInt32, kUnboxedInt64,
new (Z) Value(right_definition), DeoptId::kNone);
InsertBefore(instr, right_definition, /*env=*/NULL, FlowGraph::kValue);
#else
Definition* right_definition = new (Z) UnboxedConstantInstr(

View file

@ -153,7 +153,7 @@ void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
/* R4: allocation stats address. */ \
__ ldr(R3, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
__ StoreIntoObjectNoBarrier( \
R0, FieldAddress(R0, target::TypedData::length_offset()), R3); \
R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R3); \
/* Initialize all array elements to 0. */ \
/* R0: new object start as a tagged pointer. */ \
/* R1: new object end address. */ \
@ -165,6 +165,8 @@ void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
__ LoadImmediate(R8, 0); \
__ mov(R9, Operand(R8)); \
__ AddImmediate(R3, R0, target::TypedData::InstanceSize() - 1); \
__ StoreInternalPointer( \
R0, FieldAddress(R0, target::TypedDataBase::data_field_offset()), R3); \
Label init_loop; \
__ Bind(&init_loop); \
__ AddImmediate(R3, 2 * target::kWordSize); \

View file

@ -168,7 +168,7 @@ static int GetScaleFactor(intptr_t size) {
/* R1: new object end address. */ \
__ ldr(R2, Address(SP, kArrayLengthStackOffset)); /* Array length. */ \
__ StoreIntoObjectNoBarrier( \
R0, FieldAddress(R0, target::TypedData::length_offset()), R2); \
R0, FieldAddress(R0, target::TypedDataBase::length_offset()), R2); \
/* Initialize all array elements to 0. */ \
/* R0: new object start as a tagged pointer. */ \
/* R1: new object end address. */ \
@ -177,6 +177,8 @@ static int GetScaleFactor(intptr_t size) {
/* data area to be initialized. */ \
__ mov(R3, ZR); \
__ AddImmediate(R2, R0, target::TypedData::InstanceSize() - 1); \
__ StoreInternalPointer( \
R0, FieldAddress(R0, target::TypedDataBase::data_field_offset()), R2); \
Label init_loop, done; \
__ Bind(&init_loop); \
__ cmp(R2, Operand(R1)); \

View file

@ -158,7 +158,7 @@ void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
/* EBX: new object end address. */ \
__ movl(EDI, Address(ESP, kArrayLengthStackOffset)); /* Array length. */ \
__ StoreIntoObjectNoBarrier( \
EAX, FieldAddress(EAX, target::TypedData::length_offset()), EDI); \
EAX, FieldAddress(EAX, target::TypedDataBase::length_offset()), EDI); \
/* Initialize all array elements to 0. */ \
/* EAX: new object start as a tagged pointer. */ \
/* EBX: new object end address. */ \
@ -167,6 +167,9 @@ void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
/* data area to be initialized. */ \
__ xorl(ECX, ECX); /* Zero. */ \
__ leal(EDI, FieldAddress(EAX, target::TypedData::InstanceSize())); \
__ StoreInternalPointer( \
EAX, FieldAddress(EAX, target::TypedDataBase::data_field_offset()), \
EDI); \
Label done, init_loop; \
__ Bind(&init_loop); \
__ cmpl(EDI, EBX); \
@ -2210,7 +2213,7 @@ void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
// On stack: user tag (+1), return-address (+0).
void AsmIntrinsifier::UserTag_makeCurrent(Assembler* assembler,
Label* normal_ir_body) {
// RDI: Isolate.
// EDI: Isolate.
__ LoadIsolate(EDI);
// EAX: Current user tag.
__ movl(EAX, Address(EDI, target::Isolate::current_tag_offset()));

View file

@ -160,7 +160,7 @@ void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
/* RCX: new object end address. */ \
__ movq(RDI, Address(RSP, kArrayLengthStackOffset)); /* Array length. */ \
__ StoreIntoObjectNoBarrier( \
RAX, FieldAddress(RAX, target::TypedData::length_offset()), RDI); \
RAX, FieldAddress(RAX, target::TypedDataBase::length_offset()), RDI); \
/* Initialize all array elements to 0. */ \
/* RAX: new object start as a tagged pointer. */ \
/* RCX: new object end address. */ \
@ -169,6 +169,9 @@ void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
/* data area to be initialized. */ \
__ xorq(RBX, RBX); /* Zero. */ \
__ leaq(RDI, FieldAddress(RAX, target::TypedData::InstanceSize())); \
__ StoreInternalPointer( \
RAX, FieldAddress(RAX, target::TypedDataBase::data_field_offset()), \
RDI); \
Label done, init_loop; \
__ Bind(&init_loop); \
__ cmpq(RDI, RCX); \

View file

@ -1799,6 +1799,12 @@ void Assembler::StoreIntoObjectNoBarrierOffset(Register object,
}
}
void Assembler::StoreInternalPointer(Register object,
const Address& dest,
Register value) {
str(value, dest);
}
void Assembler::InitializeFieldsNoBarrier(Register object,
Register begin,
Register end,

View file

@ -808,6 +808,11 @@ class Assembler : public AssemblerBase {
int32_t offset,
const Object& value);
// Stores a non-tagged value into a heap object.
void StoreInternalPointer(Register object,
const Address& dest,
Register value);
// Store value_even, value_odd, value_even, ... into the words in the address
// range [begin, end), assumed to be uninitialized fields in object (tagged).
// The stores must not need a generational store barrier (e.g., smi/null),
@ -854,6 +859,13 @@ class Assembler : public AssemblerBase {
Register base,
int32_t offset,
Condition cond = AL);
void StoreFieldToOffset(OperandSize type,
Register reg,
Register base,
int32_t offset,
Condition cond = AL) {
StoreToOffset(type, reg, base, offset - kHeapObjectTag, cond);
}
void LoadSFromOffset(SRegister reg,
Register base,
int32_t offset,

View file

@ -1136,6 +1136,12 @@ void Assembler::StoreIntoObjectOffsetNoBarrier(Register object,
}
}
void Assembler::StoreInternalPointer(Register object,
const Address& dest,
Register value) {
str(value, dest);
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(RawObject::kClassIdTagPos == 16);
ASSERT(RawObject::kClassIdTagSize == 16);

View file

@ -1475,6 +1475,11 @@ class Assembler : public AssemblerBase {
int32_t offset,
const Object& value);
// Stores a non-tagged value into a heap object.
void StoreInternalPointer(Register object,
const Address& dest,
Register value);
// Object pool, loading from pool, etc.
void LoadPoolPointer(Register pp = PP);

View file

@ -1980,6 +1980,12 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
// No store buffer update.
}
void Assembler::StoreInternalPointer(Register object,
const Address& dest,
Register value) {
movl(dest, value);
}
void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
#if defined(DEBUG)
Label done;

View file

@ -620,6 +620,11 @@ class Assembler : public AssemblerBase {
const Address& dest,
const Object& value);
// Stores a non-tagged value into a heap object.
void StoreInternalPointer(Register object,
const Address& dest,
Register value);
// Stores a Smi value into a heap object field that always contains a Smi.
void StoreIntoSmiField(const Address& dest, Register value);
void ZeroInitSmiField(const Address& dest);

View file

@ -1389,6 +1389,12 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
StoreObject(dest, value);
}
void Assembler::StoreInternalPointer(Register object,
const Address& dest,
Register value) {
movq(dest, value);
}
void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
#if defined(DEBUG)
Label done;

View file

@ -744,6 +744,11 @@ class Assembler : public AssemblerBase {
const Address& dest,
const Object& value);
// Stores a non-tagged value into a heap object.
void StoreInternalPointer(Register object,
const Address& dest,
Register value);
// Stores a Smi value into a heap object field that always contains a Smi.
void StoreIntoSmiField(const Address& dest, Register value);
void ZeroInitSmiField(const Address& dest);

View file

@ -59,16 +59,17 @@ void BlockScheduler::AssignEdgeWeights() const {
return;
}
const Function& function = flow_graph()->parsed_function().function();
const Array& ic_data_array =
Array::Handle(flow_graph()->zone(),
flow_graph()->parsed_function().function().ic_data_array());
Array::Handle(flow_graph()->zone(), function.ic_data_array());
if (Compiler::IsBackgroundCompilation() && ic_data_array.IsNull()) {
// Deferred loading cleared ic_data_array.
Compiler::AbortBackgroundCompilation(
DeoptId::kNone, "BlockScheduler: ICData array cleared");
}
if (ic_data_array.IsNull()) {
DEBUG_ASSERT(Isolate::Current()->HasAttemptedReload());
DEBUG_ASSERT(Isolate::Current()->HasAttemptedReload() ||
function.ForceOptimize());
return;
}
Array& edge_counters = Array::Handle();

View file

@ -270,6 +270,8 @@ void ConstantPropagator::VisitTailCall(TailCallInstr* instr) {}
void ConstantPropagator::VisitCheckEitherNonSmi(CheckEitherNonSmiInstr* instr) {
}
void ConstantPropagator::VisitStoreUntagged(StoreUntaggedInstr* instr) {}
void ConstantPropagator::VisitStoreIndexedUnsafe(
StoreIndexedUnsafeInstr* instr) {}
@ -1315,8 +1317,7 @@ void ConstantPropagator::VisitUnboxInt32(UnboxInt32Instr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitUnboxedIntConverter(
UnboxedIntConverterInstr* instr) {
void ConstantPropagator::VisitIntConverter(IntConverterInstr* instr) {
SetValue(instr, non_constant_);
}

View file

@ -1727,8 +1727,8 @@ void FlowGraph::InsertConversion(Representation from,
const intptr_t deopt_id = (to == kUnboxedInt32) && (deopt_target != NULL)
? deopt_target->DeoptimizationTarget()
: DeoptId::kNone;
converted = new (Z)
UnboxedIntConverterInstr(from, to, use->CopyWithType(), deopt_id);
converted =
new (Z) IntConverterInstr(from, to, use->CopyWithType(), deopt_id);
} else if ((from == kUnboxedInt32) && (to == kUnboxedDouble)) {
converted = new Int32ToDoubleInstr(use->CopyWithType());
} else if ((from == kUnboxedInt64) && (to == kUnboxedDouble) &&

View file

@ -1770,7 +1770,7 @@ void Instruction::Goto(JoinEntryInstr* entry) {
LinkTo(new GotoInstr(entry, CompilerState::Current().GetNextDeoptId()));
}
bool UnboxedIntConverterInstr::ComputeCanDeoptimize() const {
bool IntConverterInstr::ComputeCanDeoptimize() const {
return (to() == kUnboxedInt32) && !is_truncating() &&
!RangeUtils::Fits(value()->definition()->range(),
RangeBoundary::kRangeBoundaryInt32);
@ -2575,8 +2575,7 @@ Instruction* CheckStackOverflowInstr::Canonicalize(FlowGraph* flow_graph) {
bool LoadFieldInstr::IsImmutableLengthLoad() const {
switch (slot().kind()) {
case Slot::Kind::kArray_length:
case Slot::Kind::kTypedData_length:
case Slot::Kind::kTypedDataView_length:
case Slot::Kind::kTypedDataBase_length:
case Slot::Kind::kString_length:
return true;
case Slot::Kind::kGrowableObjectArray_length:
@ -2592,6 +2591,7 @@ bool LoadFieldInstr::IsImmutableLengthLoad() const {
case Slot::Kind::kArgumentsDescriptor_positional_count:
case Slot::Kind::kArgumentsDescriptor_count:
case Slot::Kind::kTypeArguments:
case Slot::Kind::kTypedDataBase_data_field:
case Slot::Kind::kTypedDataView_offset_in_bytes:
case Slot::Kind::kTypedDataView_data:
case Slot::Kind::kGrowableObjectArray_data:
@ -2927,8 +2927,7 @@ Definition* BoxInt64Instr::Canonicalize(FlowGraph* flow_graph) {
return replacement;
}
UnboxedIntConverterInstr* conv =
value()->definition()->AsUnboxedIntConverter();
IntConverterInstr* conv = value()->definition()->AsIntConverter();
if (conv != NULL) {
Definition* replacement = this;
@ -2998,7 +2997,7 @@ Definition* UnboxIntegerInstr::Canonicalize(FlowGraph* flow_graph) {
return box_defn->value()->definition();
} else if (from_representation != kTagged) {
// Only operate on explicit unboxed operands.
UnboxedIntConverterInstr* converter = new UnboxedIntConverterInstr(
IntConverterInstr* converter = new IntConverterInstr(
from_representation, representation(),
box_defn->value()->CopyWithType(),
(representation() == kUnboxedInt32) ? GetDeoptId() : DeoptId::kNone);
@ -3074,11 +3073,10 @@ Definition* UnboxInt64Instr::Canonicalize(FlowGraph* flow_graph) {
return this;
}
Definition* UnboxedIntConverterInstr::Canonicalize(FlowGraph* flow_graph) {
Definition* IntConverterInstr::Canonicalize(FlowGraph* flow_graph) {
if (!HasUses()) return NULL;
UnboxedIntConverterInstr* box_defn =
value()->definition()->AsUnboxedIntConverter();
IntConverterInstr* box_defn = value()->definition()->AsIntConverter();
if ((box_defn != NULL) && (box_defn->representation() == from())) {
if (box_defn->from() == to()) {
// Do not erase truncating conversions from 64-bit value to 32-bit values
@ -3089,7 +3087,7 @@ Definition* UnboxedIntConverterInstr::Canonicalize(FlowGraph* flow_graph) {
return box_defn->value()->definition();
}
UnboxedIntConverterInstr* converter = new UnboxedIntConverterInstr(
IntConverterInstr* converter = new IntConverterInstr(
box_defn->from(), representation(), box_defn->value()->CopyWithType(),
(to() == kUnboxedInt32) ? GetDeoptId() : DeoptId::kNone);
if ((representation() == kUnboxedInt32) && is_truncating()) {
@ -3548,6 +3546,7 @@ bool UnboxInstr::CanConvertSmi() const {
switch (representation()) {
case kUnboxedDouble:
case kUnboxedFloat:
case kUnboxedInt32:
case kUnboxedInt64:
return true;
@ -4669,6 +4668,10 @@ void UnboxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
EmitLoadFromBox(compiler);
break;
case kUnboxedInt32:
EmitLoadInt32FromBoxOrSmi(compiler);
break;
case kUnboxedInt64: {
if (value()->Type()->ToCid() == kSmiCid) {
// Smi -> int64 conversion is more efficient than
@ -4679,7 +4682,6 @@ void UnboxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
break;
}
default:
UNREACHABLE();
break;
@ -4904,12 +4906,12 @@ Definition* CheckArrayBoundInstr::Canonicalize(FlowGraph* flow_graph) {
}
intptr_t CheckArrayBoundInstr::LengthOffsetFor(intptr_t class_id) {
if (RawObject::IsExternalTypedDataClassId(class_id)) {
return ExternalTypedData::length_offset();
}
if (RawObject::IsTypedDataClassId(class_id)) {
return TypedData::length_offset();
if (RawObject::IsTypedDataClassId(class_id) ||
RawObject::IsTypedDataViewClassId(class_id) ||
RawObject::IsExternalTypedDataClassId(class_id)) {
return TypedDataBase::length_offset();
}
switch (class_id) {
case kGrowableObjectArrayCid:
return GrowableObjectArray::length_offset();
@ -5222,7 +5224,7 @@ void NativeCallInstr::SetupNative() {
Representation FfiCallInstr::RequiredInputRepresentation(intptr_t idx) const {
if (idx == TargetAddressIndex()) {
return kUnboxedIntPtr;
return kUnboxedFfiIntPtr;
} else {
return arg_representations_[idx];
}

View file

@ -382,6 +382,7 @@ struct InstrAttrs {
M(AllocateObject, _) \
M(LoadField, kNoGC) \
M(LoadUntagged, kNoGC) \
M(StoreUntagged, kNoGC) \
M(LoadClassId, kNoGC) \
M(InstantiateType, _) \
M(InstantiateTypeArguments, _) \
@ -449,7 +450,7 @@ struct InstrAttrs {
M(UnboxUint32, kNoGC) \
M(BoxInt32, _) \
M(UnboxInt32, kNoGC) \
M(UnboxedIntConverter, _) \
M(IntConverter, _) \
M(UnboxedWidthExtender, _) \
M(Deoptimize, kNoGC) \
M(SimdOp, kNoGC)
@ -1849,7 +1850,7 @@ class Definition : public Instruction {
bool IsInt32Definition() {
return IsBinaryInt32Op() || IsBoxInt32() || IsUnboxInt32() ||
IsUnboxedIntConverter();
IsIntConverter();
}
// Compute compile type for this definition. It is safe to use this
@ -5097,9 +5098,9 @@ class CreateArrayInstr : public TemplateAllocation<2, Throws> {
DISALLOW_COPY_AND_ASSIGN(CreateArrayInstr);
};
// Note: this instruction must not be moved without the indexed access that
// depends on it (e.g. out of loops). GC may cause collect
// the array while the external data-array is still accessed.
// Note: This instruction must not be moved without the indexed access that
// depends on it (e.g. out of loops). GC may collect the array while the
// external data-array is still accessed.
// TODO(vegorov) enable LICMing this instruction by ensuring that array itself
// is kept alive.
class LoadUntaggedInstr : public TemplateDefinition<1, NoThrow> {
@ -5124,7 +5125,9 @@ class LoadUntaggedInstr : public TemplateDefinition<1, NoThrow> {
virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool HasUnknownSideEffects() const { return false; }
virtual bool AttributesEqual(Instruction* other) const { return true; }
virtual bool AttributesEqual(Instruction* other) const {
return other->AsLoadUntagged()->offset_ == offset_;
}
private:
intptr_t offset_;
@ -5132,6 +5135,55 @@ class LoadUntaggedInstr : public TemplateDefinition<1, NoThrow> {
DISALLOW_COPY_AND_ASSIGN(LoadUntaggedInstr);
};
// Stores an untagged value into the given object.
//
// If the untagged value is a derived pointer (e.g. pointer to start of internal
// typed data array backing) then this instruction cannot be moved across
// instructions which can trigger GC, to ensure that
//
// LoadUntaggeed + Arithmetic + StoreUntagged
//
// are performed atomically
//
// See kernel_to_il.cc:BuildTypedDataViewFactoryConstructor.
class StoreUntaggedInstr : public TemplateInstruction<2, NoThrow> {
public:
StoreUntaggedInstr(Value* object, Value* value, intptr_t offset)
: offset_(offset) {
SetInputAt(0, object);
SetInputAt(1, value);
}
DECLARE_INSTRUCTION(StoreUntagged)
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0 || idx == 1);
// The object may be tagged or untagged (for external objects).
if (idx == 0) return kNoRepresentation;
return kUntagged;
}
Value* object() const { return inputs_[0]; }
Value* value() const { return inputs_[1]; }
intptr_t offset() const { return offset_; }
virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool HasUnknownSideEffects() const { return false; }
virtual bool AttributesEqual(Instruction* other) const {
return other->AsStoreUntagged()->offset_ == offset_;
}
intptr_t offset_from_tagged() const {
const bool is_tagged = object()->definition()->representation() == kTagged;
return offset() - (is_tagged ? kHeapObjectTag : 0);
}
private:
intptr_t offset_;
DISALLOW_COPY_AND_ASSIGN(StoreUntaggedInstr);
};
class LoadClassIdInstr : public TemplateDefinition<1, NoThrow, Pure> {
public:
explicit LoadClassIdInstr(Value* object) { SetInputAt(0, object); }
@ -5655,6 +5707,7 @@ class UnboxInstr : public TemplateDefinition<1, NoThrow, Pure> {
bool CanConvertSmi() const;
void EmitLoadFromBox(FlowGraphCompiler* compiler);
void EmitSmiConversion(FlowGraphCompiler* compiler);
void EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler);
void EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler);
void EmitLoadFromBoxWithDeopt(FlowGraphCompiler* compiler);
@ -7459,21 +7512,23 @@ class CheckConditionInstr : public Instruction {
DISALLOW_COPY_AND_ASSIGN(CheckConditionInstr);
};
class UnboxedIntConverterInstr : public TemplateDefinition<1, NoThrow, Pure> {
class IntConverterInstr : public TemplateDefinition<1, NoThrow, Pure> {
public:
UnboxedIntConverterInstr(Representation from,
Representation to,
Value* value,
intptr_t deopt_id)
IntConverterInstr(Representation from,
Representation to,
Value* value,
intptr_t deopt_id)
: TemplateDefinition(deopt_id),
from_representation_(from),
to_representation_(to),
is_truncating_(to == kUnboxedUint32) {
ASSERT(from != to);
ASSERT((from == kUnboxedInt64) || (from == kUnboxedUint32) ||
(from == kUnboxedInt32));
ASSERT((to == kUnboxedInt64) || (to == kUnboxedUint32) ||
(to == kUnboxedInt32));
ASSERT(from == kUnboxedInt64 || from == kUnboxedUint32 ||
from == kUnboxedInt32 || from == kUntagged);
ASSERT(to == kUnboxedInt64 || to == kUnboxedUint32 || to == kUnboxedInt32 ||
to == kUntagged);
ASSERT(from != kUntagged || to == kUnboxedIntPtr);
ASSERT(to != kUntagged || from == kUnboxedIntPtr);
SetInputAt(0, value);
}
@ -7497,8 +7552,8 @@ class UnboxedIntConverterInstr : public TemplateDefinition<1, NoThrow, Pure> {
}
virtual bool AttributesEqual(Instruction* other) const {
ASSERT(other->IsUnboxedIntConverter());
UnboxedIntConverterInstr* converter = other->AsUnboxedIntConverter();
ASSERT(other->IsIntConverter());
auto converter = other->AsIntConverter();
return (converter->from() == from()) && (converter->to() == to()) &&
(converter->is_truncating() == is_truncating());
}
@ -7510,7 +7565,7 @@ class UnboxedIntConverterInstr : public TemplateDefinition<1, NoThrow, Pure> {
return CompileType::Int();
}
DECLARE_INSTRUCTION(UnboxedIntConverter);
DECLARE_INSTRUCTION(IntConverter);
PRINT_OPERANDS_TO_SUPPORT
@ -7519,7 +7574,7 @@ class UnboxedIntConverterInstr : public TemplateDefinition<1, NoThrow, Pure> {
const Representation to_representation_;
bool is_truncating_;
DISALLOW_COPY_AND_ASSIGN(UnboxedIntConverterInstr);
DISALLOW_COPY_AND_ASSIGN(IntConverterInstr);
};
// Sign- or zero-extends an integer in unboxed 32-bit representation.

View file

@ -1066,6 +1066,10 @@ void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) {
__ StoreToOffset(kWord, value, obj, instr->offset_from_tagged());
}
LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -4084,6 +4088,8 @@ LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
if (representation() == kUnboxedInt64) {
summary->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
} else if (representation() == kUnboxedInt32) {
summary->set_out(0, Location::RequiresRegister());
} else {
summary->set_out(0, Location::RequiresFpuRegister());
}
@ -4156,6 +4162,15 @@ void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
}
}
void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
Label done;
__ SmiUntag(result, value, &done);
__ LoadFieldFromOffset(kWord, result, value, Mint::value_offset());
__ Bind(&done);
}
void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
const Register box = locs()->in(0).reg();
PairLocation* result = locs()->out(0).AsPairLocation();
@ -6513,32 +6528,48 @@ void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ mvn(out, Operand(left));
}
LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
if (from() == kUnboxedInt64) {
ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
if (from() == kUntagged || to() == kUntagged) {
ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
(from() == kUnboxedIntPtr && to() == kUntagged));
ASSERT(!CanDeoptimize());
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
} else if (from() == kUnboxedInt64) {
ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
summary->set_in(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
summary->set_out(0, Location::RequiresRegister());
} else if (to() == kUnboxedInt64) {
ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::Pair(Location::RequiresRegister(),
Location::RequiresRegister()));
} else {
ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
}
return summary;
}
void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const bool is_nop_conversion =
(from() == kUntagged && to() == kUnboxedInt32) ||
(from() == kUntagged && to() == kUnboxedUint32) ||
(from() == kUnboxedInt32 && to() == kUntagged) ||
(from() == kUnboxedUint32 && to() == kUntagged);
if (is_nop_conversion) {
ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
return;
}
if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
const Register out = locs()->out(0).reg();
// Representations are bitwise equivalent.

View file

@ -1053,6 +1053,10 @@ void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) {
__ StoreToOffset(value, obj, instr->offset_from_tagged());
}
LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -3623,7 +3627,8 @@ void BoxInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
const bool is_floating_point = representation() != kUnboxedInt64;
const bool is_floating_point =
representation() != kUnboxedInt64 && representation() != kUnboxedInt32;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
@ -3692,6 +3697,18 @@ void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
}
}
void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
ASSERT(value != result);
Label done;
__ SmiUntag(result, value);
__ BranchIfSmi(value, &done);
__ ldr(result, FieldAddress(value, Mint::value_offset()), kWord);
__ LoadFieldFromOffset(result, value, Mint::value_offset());
__ Bind(&done);
}
void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
@ -5839,19 +5856,23 @@ void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
if (from() == kUnboxedInt64) {
ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
if (from() == kUntagged || to() == kUntagged) {
ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
(from() == kUnboxedIntPtr && to() == kUntagged));
ASSERT(!CanDeoptimize());
} else if (from() == kUnboxedInt64) {
ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
} else if (to() == kUnboxedInt64) {
ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
ASSERT(from() == kUnboxedInt32 || from() == kUnboxedUint32);
} else {
ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
}
summary->set_in(0, Location::RequiresRegister());
if (CanDeoptimize()) {
@ -5862,8 +5883,17 @@ LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Zone* zone,
return summary;
}
void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(from() != to()); // We don't convert from a representation to itself.
const bool is_nop_conversion =
(from() == kUntagged && to() == kUnboxedIntPtr) ||
(from() == kUnboxedIntPtr && to() == kUntagged);
if (is_nop_conversion) {
ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
return;
}
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
Label* deopt = !CanDeoptimize() ? NULL

View file

@ -46,7 +46,7 @@ DECLARE_FLAG(int, optimization_counter_threshold);
M(SpeculativeShiftUint32Op) \
M(TruncDivMod) \
M(UnaryUint32Op) \
M(UnboxedIntConverter) \
M(IntConverter) \
M(UnboxedWidthExtender)
// List of instructions that are not used by DBC.
@ -1167,6 +1167,18 @@ EMIT_NATIVE_CODE(LoadUntagged, 1, Location::RequiresRegister()) {
}
}
EMIT_NATIVE_CODE(StoreUntagged, 1, Location::RequiresRegister()) {
const Register obj = locs()->in(0).reg();
const Register value = locs()->out(0).reg();
const auto offset_in_words = offset() / kWordSize;
if (object()->definition()->representation() == kUntagged) {
__ StoreUntagged(obj, offset_in_words, value);
} else {
ASSERT(object()->definition()->representation() == kTagged);
__ StoreField(obj, offset_in_words, value);
}
}
EMIT_NATIVE_CODE(BooleanNegate, 1, Location::RequiresRegister()) {
if (compiler->is_optimizing()) {
__ BooleanNegate(locs()->out(0).reg(), locs()->in(0).reg());

View file

@ -1058,6 +1058,10 @@ void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) {
__ movl(Address(obj, instr->offset_from_tagged()), value);
}
LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -3508,6 +3512,8 @@ LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
if (representation() == kUnboxedInt64) {
summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
Location::RegisterLocation(EDX)));
} else if (representation() == kUnboxedInt32) {
summary->set_out(0, Location::SameAsFirstInput());
} else {
summary->set_out(0, Location::RequiresFpuRegister());
}
@ -3583,6 +3589,17 @@ void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
}
}
void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
ASSERT(value == result);
Label done;
__ SmiUntag(value); // Leaves CF after SmiUntag.
__ j(NOT_CARRY, &done, Assembler::kNearJump);
__ movl(result, FieldAddress(value, Mint::value_offset()));
__ Bind(&done);
}
void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
const Register box = locs()->in(0).reg();
PairLocation* result = locs()->out(0).AsPairLocation();
@ -5957,14 +5974,21 @@ void UnaryUint32OpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ notl(out);
}
LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
if ((from() == kUnboxedInt32 || from() == kUnboxedUint32) &&
(to() == kUnboxedInt32 || to() == kUnboxedUint32)) {
if (from() == kUntagged || to() == kUntagged) {
ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
(from() == kUnboxedIntPtr && to() == kUntagged));
ASSERT(!CanDeoptimize());
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
} else if ((from() == kUnboxedInt32 || from() == kUnboxedUint32) &&
(to() == kUnboxedInt32 || to() == kUnboxedUint32)) {
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
} else if (from() == kUnboxedInt64) {
@ -5982,10 +6006,19 @@ LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Zone* zone,
summary->set_out(0, Location::Pair(Location::RegisterLocation(EAX),
Location::RegisterLocation(EDX)));
}
return summary;
}
void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const bool is_nop_conversion =
(from() == kUntagged && to() == kUnboxedIntPtr) ||
(from() == kUnboxedIntPtr && to() == kUntagged);
if (is_nop_conversion) {
ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
return;
}
if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
// Representations are bitwise equivalent.
ASSERT(locs()->out(0).reg() == locs()->in(0).reg());

View file

@ -985,7 +985,7 @@ void UnboxIntegerInstr::PrintOperandsTo(BufferFormatter* f) const {
Definition::PrintOperandsTo(f);
}
void UnboxedIntConverterInstr::PrintOperandsTo(BufferFormatter* f) const {
void IntConverterInstr::PrintOperandsTo(BufferFormatter* f) const {
f->Print("%s->%s%s, ", RepresentationToCString(from()),
RepresentationToCString(to()), is_truncating() ? "[tr]" : "");
Definition::PrintOperandsTo(f);

View file

@ -1069,6 +1069,10 @@ void LoadUntaggedInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
DEFINE_BACKEND(StoreUntagged, (NoLocation, Register obj, Register value)) {
__ movq(Address(obj, instr->offset_from_tagged()), value);
}
LocationSummary* LoadClassIdInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
@ -3821,7 +3825,7 @@ LocationSummary* UnboxInstr::MakeLocationSummary(Zone* zone, bool opt) const {
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, needs_writable_input ? Location::WritableRegister()
: Location::RequiresRegister());
if (representation() == kUnboxedInt64) {
if (representation() == kUnboxedInt64 || representation() == kUnboxedInt32) {
summary->set_out(0, Location::SameAsFirstInput());
} else {
summary->set_out(0, Location::RequiresFpuRegister());
@ -3890,6 +3894,17 @@ void UnboxInstr::EmitSmiConversion(FlowGraphCompiler* compiler) {
}
}
void UnboxInstr::EmitLoadInt32FromBoxOrSmi(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
ASSERT(value == result);
Label done;
__ SmiUntag(value);
__ j(NOT_CARRY, &done, Assembler::kNearJump);
__ movsxw(result, Address(value, TIMES_2, Mint::value_offset()));
__ Bind(&done);
}
void UnboxInstr::EmitLoadInt64FromBoxOrSmi(FlowGraphCompiler* compiler) {
const Register value = locs()->in(0).reg();
const Register result = locs()->out(0).reg();
@ -6132,30 +6147,40 @@ DEFINE_BACKEND(UnaryUint32Op, (SameAsFirstInput, Register value)) {
DEFINE_UNIMPLEMENTED_INSTRUCTION(BinaryInt32OpInstr)
LocationSummary* UnboxedIntConverterInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
LocationSummary* IntConverterInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
if (from() == kUnboxedInt64) {
ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
if (from() == kUntagged || to() == kUntagged) {
ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
(from() == kUnboxedIntPtr && to() == kUntagged));
ASSERT(!CanDeoptimize());
} else if (from() == kUnboxedInt64) {
ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
} else if (to() == kUnboxedInt64) {
ASSERT((from() == kUnboxedInt32) || (from() == kUnboxedUint32));
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
ASSERT(from() == kUnboxedInt32 || from() == kUnboxedUint32);
} else {
ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
ASSERT((from() == kUnboxedUint32) || (from() == kUnboxedInt32));
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
ASSERT(from() == kUnboxedUint32 || from() == kUnboxedInt32);
}
summary->set_in(0, Location::RequiresRegister());
summary->set_out(0, Location::SameAsFirstInput());
return summary;
}
void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
void IntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const bool is_nop_conversion =
(from() == kUntagged && to() == kUnboxedIntPtr) ||
(from() == kUnboxedIntPtr && to() == kUntagged);
if (is_nop_conversion) {
ASSERT(locs()->in(0).reg() == locs()->out(0).reg());
return;
}
if (from() == kUnboxedInt32 && to() == kUnboxedUint32) {
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
@ -6176,7 +6201,7 @@ void UnboxedIntConverterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ j(NEGATIVE, deopt);
}
} else if (from() == kUnboxedInt64) {
ASSERT((to() == kUnboxedUint32) || (to() == kUnboxedInt32));
ASSERT(to() == kUnboxedUint32 || to() == kUnboxedInt32);
const Register value = locs()->in(0).reg();
const Register out = locs()->out(0).reg();
if (!CanDeoptimize()) {

View file

@ -34,13 +34,20 @@ enum Representation {
kNumRepresentations
};
// 'UnboxedIntPtr' should be able to hold a pointer of the target word-size. On
// a 32-bit platform, it's an unsigned 32-bit int because it should be
// 'UnboxedFfiIntPtr' should be able to hold a pointer of the target word-size.
// On a 32-bit platform, it's an unsigned 32-bit int because it should be
// zero-extended to 64-bits, not sign-extended (pointers are inherently
// unsigned).
static constexpr Representation kUnboxedIntPtr =
//
// Issue(36370): Use [kUnboxedIntPtr] instead.
static constexpr Representation kUnboxedFfiIntPtr =
compiler::target::kWordSize == 4 ? kUnboxedUint32 : kUnboxedInt64;
// The representation which can be used for native pointers. We use signed 32/64
// bit representation to be able to do arithmetic on pointers.
static constexpr Representation kUnboxedIntPtr =
compiler::target::kWordSize == 4 ? kUnboxedInt32 : kUnboxedInt64;
// Location objects are used to connect register allocator and code generator.
// Instruction templates used by code generator have a corresponding
// LocationSummary object which specifies expected location for every input

View file

@ -2634,8 +2634,7 @@ void LoadFieldInstr::InferRange(RangeAnalysis* analysis, Range* range) {
RangeBoundary::FromConstant(Array::kMaxElements));
break;
case Slot::Kind::kTypedData_length:
case Slot::Kind::kTypedDataView_length:
case Slot::Kind::kTypedDataBase_length:
case Slot::Kind::kTypedDataView_offset_in_bytes:
*range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi());
break;
@ -2662,6 +2661,7 @@ void LoadFieldInstr::InferRange(RangeAnalysis* analysis, Range* range) {
case Slot::Kind::kClosure_function_type_arguments:
case Slot::Kind::kClosure_instantiator_type_arguments:
case Slot::Kind::kPointer_c_memory_address:
case Slot::Kind::kTypedDataBase_data_field:
case Slot::Kind::kTypedDataView_data:
// Not an integer valued field.
UNREACHABLE();
@ -2900,12 +2900,17 @@ void UnboxInt64Instr::InferRange(RangeAnalysis* analysis, Range* range) {
}
}
void UnboxedIntConverterInstr::InferRange(RangeAnalysis* analysis,
Range* range) {
ASSERT((from() == kUnboxedInt32) || (from() == kUnboxedInt64) ||
(from() == kUnboxedUint32));
ASSERT((to() == kUnboxedInt32) || (to() == kUnboxedInt64) ||
(to() == kUnboxedUint32));
void IntConverterInstr::InferRange(RangeAnalysis* analysis, Range* range) {
if (from() == kUntagged || to() == kUntagged) {
ASSERT((from() == kUntagged && to() == kUnboxedIntPtr) ||
(from() == kUnboxedIntPtr && to() == kUntagged));
} else {
ASSERT(from() == kUnboxedInt32 || from() == kUnboxedInt64 ||
from() == kUnboxedUint32);
ASSERT(to() == kUnboxedInt32 || to() == kUnboxedInt64 ||
to() == kUnboxedUint32);
}
const Range* value_range = value()->definition()->range();
if (Range::IsUnknown(value_range)) {
return;

View file

@ -70,13 +70,10 @@ const Slot& Slot::GetNativeSlot(Kind kind) {
// Note: should only be called with cids of array-like classes.
const Slot& Slot::GetLengthFieldForArrayCid(intptr_t array_cid) {
if (RawObject::IsExternalTypedDataClassId(array_cid) ||
RawObject::IsTypedDataClassId(array_cid)) {
return GetNativeSlot(Kind::kTypedData_length);
RawObject::IsTypedDataClassId(array_cid) ||
RawObject::IsTypedDataViewClassId(array_cid)) {
return GetNativeSlot(Kind::kTypedDataBase_length);
}
if (RawObject::IsTypedDataViewClassId(array_cid)) {
return GetNativeSlot(Kind::kTypedDataView_length);
}
switch (array_cid) {
case kGrowableObjectArrayCid:
return GetNativeSlot(Kind::kGrowableObjectArray_length);

View file

@ -58,8 +58,8 @@ class ParsedFunction;
V(Closure, hash, Context, VAR) \
V(GrowableObjectArray, length, Smi, VAR) \
V(GrowableObjectArray, data, Array, VAR) \
V(TypedData, length, Smi, FINAL) \
V(TypedDataView, length, Smi, FINAL) \
V(TypedDataBase, data_field, Dynamic, FINAL) \
V(TypedDataBase, length, Smi, FINAL) \
V(TypedDataView, offset_in_bytes, Smi, FINAL) \
V(TypedDataView, data, Dynamic, FINAL) \
V(String, length, Smi, FINAL) \

View file

@ -68,7 +68,7 @@ Representation TypeRepresentation(const AbstractType& result_type) {
case kFfiIntPtrCid:
case kFfiPointerCid:
default: // Subtypes of Pointer.
return kUnboxedIntPtr;
return kUnboxedFfiIntPtr;
}
}

View file

@ -6,6 +6,7 @@
#include "vm/compiler/frontend/flow_graph_builder.h" // For InlineExitCollector.
#include "vm/compiler/jit/compiler.h" // For Compiler::IsBackgroundCompilation().
#include "vm/compiler/runtime_api.h"
#if !defined(DART_PRECOMPILED_RUNTIME)
@ -323,6 +324,62 @@ Fragment BaseFlowGraphBuilder::LoadIndexed(intptr_t index_scale) {
return Fragment(instr);
}
Fragment BaseFlowGraphBuilder::LoadUntagged(intptr_t offset) {
Value* object = Pop();
auto load = new (Z) LoadUntaggedInstr(object, offset);
Push(load);
return Fragment(load);
}
Fragment BaseFlowGraphBuilder::StoreUntagged(intptr_t offset) {
Value* value = Pop();
Value* object = Pop();
auto store = new (Z) StoreUntaggedInstr(object, value, offset);
return Fragment(store);
}
Fragment BaseFlowGraphBuilder::ConvertUntaggedToIntptr() {
Value* value = Pop();
auto converted = new (Z)
IntConverterInstr(kUntagged, kUnboxedIntPtr, value, DeoptId::kNone);
converted->mark_truncating();
Push(converted);
return Fragment(converted);
}
Fragment BaseFlowGraphBuilder::ConvertIntptrToUntagged() {
Value* value = Pop();
auto converted = new (Z)
IntConverterInstr(kUnboxedIntPtr, kUntagged, value, DeoptId::kNone);
converted->mark_truncating();
Push(converted);
return Fragment(converted);
}
Fragment BaseFlowGraphBuilder::AddIntptrIntegers() {
Value* right = Pop();
Value* left = Pop();
#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_X64)
auto add = new (Z) BinaryInt64OpInstr(
Token::kADD, left, right, DeoptId::kNone, Instruction::kNotSpeculative);
#else
auto add =
new (Z) BinaryInt32OpInstr(Token::kADD, left, right, DeoptId::kNone);
#endif
add->mark_truncating();
Push(add);
return Fragment(add);
}
Fragment BaseFlowGraphBuilder::UnboxSmiToIntptr() {
Value* value = Pop();
auto untagged = new (Z)
UnboxIntegerInstr(kUnboxedIntPtr, UnboxIntegerInstr::kNoTruncation, value,
DeoptId::kNone, Instruction::kNotSpeculative);
Push(untagged);
return Fragment(untagged);
}
Fragment BaseFlowGraphBuilder::LoadField(const Field& field) {
return LoadNativeField(Slot::Get(MayCloneField(field), parsed_function_));
}

View file

@ -136,6 +136,14 @@ class BaseFlowGraphBuilder {
Fragment LoadNativeField(const Slot& native_field);
Fragment LoadIndexed(intptr_t index_scale);
Fragment LoadUntagged(intptr_t offset);
Fragment StoreUntagged(intptr_t offset);
Fragment ConvertUntaggedToIntptr();
Fragment ConvertIntptrToUntagged();
Fragment UnboxSmiToIntptr();
Fragment AddIntptrIntegers();
void SetTempIndex(Definition* definition);
Fragment LoadLocal(LocalVariable* variable);

View file

@ -892,9 +892,11 @@ void BytecodeFlowGraphBuilder::BuildNativeCall() {
ASSERT((function().NumParameters() == 1) && !function().IsGeneric());
code_ += B->LoadNativeField(Slot::Array_length());
break;
case MethodRecognizer::kTypedDataLength:
case MethodRecognizer::kTypedListLength:
case MethodRecognizer::kTypedListViewLength:
case MethodRecognizer::kByteDataViewLength:
ASSERT((function().NumParameters() == 1) && !function().IsGeneric());
code_ += B->LoadNativeField(Slot::TypedData_length());
code_ += B->LoadNativeField(Slot::TypedDataBase_length());
break;
case MethodRecognizer::kClassIDgetID:
ASSERT((function().NumParameters() == 1) && !function().IsGeneric());

View file

@ -931,7 +931,9 @@ RawTypedData* BytecodeReaderHelper::NativeEntry(const Function& function,
case MethodRecognizer::kGrowableArrayLength:
case MethodRecognizer::kObjectArrayLength:
case MethodRecognizer::kImmutableArrayLength:
case MethodRecognizer::kTypedDataLength:
case MethodRecognizer::kTypedListLength:
case MethodRecognizer::kTypedListViewLength:
case MethodRecognizer::kByteDataViewLength:
case MethodRecognizer::kClassIDgetID:
case MethodRecognizer::kGrowableArrayCapacity:
case MethodRecognizer::kListFactory:

View file

@ -743,6 +743,8 @@ Fragment FlowGraphBuilder::NativeFunctionBody(const Function& function,
const MethodRecognizer::Kind kind = MethodRecognizer::RecognizeKind(function);
bool omit_result_type_check = true;
switch (kind) {
// On simdbc we fall back to natives.
#if !defined(TARGET_ARCH_DBC)
case MethodRecognizer::kTypedData_ByteDataView_factory:
body += BuildTypedDataViewFactoryConstructor(function, kByteDataViewCid);
break;
@ -802,6 +804,7 @@ Fragment FlowGraphBuilder::NativeFunctionBody(const Function& function,
body += BuildTypedDataViewFactoryConstructor(
function, kTypedDataFloat64x2ArrayViewCid);
break;
#endif // !defined(TARGET_ARCH_DBC)
case MethodRecognizer::kObjectEquals:
body += LoadLocal(parsed_function_->receiver_var());
body += LoadLocal(first_parameter);
@ -825,14 +828,11 @@ Fragment FlowGraphBuilder::NativeFunctionBody(const Function& function,
body += LoadLocal(parsed_function_->receiver_var());
body += LoadNativeField(Slot::Array_length());
break;
case MethodRecognizer::kTypedDataLength:
body += LoadLocal(parsed_function_->receiver_var());
body += LoadNativeField(Slot::TypedData_length());
break;
case MethodRecognizer::kTypedListLength:
case MethodRecognizer::kTypedListViewLength:
case MethodRecognizer::kByteDataViewLength:
case MethodRecognizer::kTypedDataViewLength:
body += LoadLocal(parsed_function_->receiver_var());
body += LoadNativeField(Slot::TypedDataView_length());
body += LoadNativeField(Slot::TypedDataBase_length());
break;
case MethodRecognizer::kByteDataViewOffsetInBytes:
case MethodRecognizer::kTypedDataViewOffsetInBytes:
@ -1027,7 +1027,21 @@ Fragment FlowGraphBuilder::BuildTypedDataViewFactoryConstructor(
body += LoadLocal(view_object);
body += LoadLocal(length);
body += StoreInstanceField(token_pos, Slot::TypedDataView_length());
body += StoreInstanceField(token_pos, Slot::TypedDataBase_length());
// Update the inner pointer.
//
// WARNING: Notice that we assume here no GC happens between those 4
// instructions!
body += LoadLocal(view_object);
body += LoadLocal(typed_data);
body += LoadUntagged(TypedDataBase::data_field_offset());
body += ConvertUntaggedToIntptr();
body += LoadLocal(offset_in_bytes);
body += UnboxSmiToIntptr();
body += AddIntptrIntegers();
body += ConvertIntptrToUntagged();
body += StoreUntagged(TypedDataView::data_field_offset());
return body;
}
@ -2521,7 +2535,7 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfFfiTrampoline(
if (compiler::ffi::NativeTypeIsPointer(ffi_type)) {
body += LoadAddressFromFfiPointer();
body += UnboxTruncate(kUnboxedIntPtr);
body += UnboxTruncate(kUnboxedFfiIntPtr);
} else {
Representation rep = arg_reps[pos - 1];
body += UnboxTruncate(rep);
@ -2537,12 +2551,12 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfFfiTrampoline(
thread_, *MakeImplicitClosureScope(
Z, Class::Handle(I->object_store()->ffi_pointer_class()))
->context_variables()[0]));
body += UnboxTruncate(kUnboxedIntPtr);
body += UnboxTruncate(kUnboxedFfiIntPtr);
body += FfiCall(signature, arg_reps, arg_locs);
ffi_type = signature.result_type();
if (compiler::ffi::NativeTypeIsPointer(ffi_type)) {
body += Box(kUnboxedIntPtr);
body += Box(kUnboxedFfiIntPtr);
body += FfiPointerFromAddress(Type::Cast(ffi_type));
} else if (compiler::ffi::NativeTypeIsVoid(ffi_type)) {
body += Drop();

View file

@ -707,8 +707,16 @@ bool GraphIntrinsifier::Build_StringBaseLength(FlowGraph* flow_graph) {
return BuildLoadField(flow_graph, Slot::String_length());
}
bool GraphIntrinsifier::Build_TypedDataLength(FlowGraph* flow_graph) {
return BuildLoadField(flow_graph, Slot::TypedData_length());
bool GraphIntrinsifier::Build_TypedListLength(FlowGraph* flow_graph) {
return BuildLoadField(flow_graph, Slot::TypedDataBase_length());
}
bool GraphIntrinsifier::Build_TypedListViewLength(FlowGraph* flow_graph) {
return BuildLoadField(flow_graph, Slot::TypedDataBase_length());
}
bool GraphIntrinsifier::Build_ByteDataViewLength(FlowGraph* flow_graph) {
return BuildLoadField(flow_graph, Slot::TypedDataBase_length());
}
bool GraphIntrinsifier::Build_GrowableArrayCapacity(FlowGraph* flow_graph) {

View file

@ -789,7 +789,7 @@ static RawObject* CompileFunctionHelper(CompilationPipeline* pipeline,
bool optimized,
intptr_t osr_id) {
ASSERT(!FLAG_precompiled_mode);
ASSERT(!optimized || function.WasCompiled());
ASSERT(!optimized || function.WasCompiled() || function.ForceOptimize());
if (function.ForceOptimize()) optimized = true;
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {
@ -1013,8 +1013,8 @@ RawObject* Compiler::CompileFunction(Thread* thread, const Function& function) {
CompilationPipeline* pipeline =
CompilationPipeline::New(thread->zone(), function);
return CompileFunctionHelper(pipeline, function,
/* optimized = */ false, kNoOSRDeoptId);
const bool optimized = function.ForceOptimize();
return CompileFunctionHelper(pipeline, function, optimized, kNoOSRDeoptId);
}
RawError* Compiler::ParseFunction(Thread* thread, const Function& function) {

View file

@ -42,10 +42,8 @@ namespace dart {
V(_TypedList, _setFloat64, ByteArrayBaseSetFloat64, 0x38a80b0d) \
V(_TypedList, _setFloat32x4, ByteArrayBaseSetFloat32x4, 0x40052c4e) \
V(_TypedList, _setInt32x4, ByteArrayBaseSetInt32x4, 0x07b89f54) \
V(_ByteDataView, get:length, ByteDataViewLength, 0x0) \
V(_ByteDataView, get:offsetInBytes, ByteDataViewOffsetInBytes, 0x0) \
V(_ByteDataView, get:_typedData, ByteDataViewTypedData, 0x0) \
V(_TypedListView, get:length, TypedDataViewLength, 0x0) \
V(_TypedListView, get:offsetInBytes, TypedDataViewOffsetInBytes, 0x0) \
V(_TypedListView, get:_typedData, TypedDataViewTypedData, 0x0) \
V(_ByteDataView, ., TypedData_ByteDataView_factory, 0x0) \
@ -298,7 +296,9 @@ namespace dart {
V(_Int32x4List, []=, Int32x4ArraySetIndexed, 0x31453dab) \
V(_Float64x2List, [], Float64x2ArrayGetIndexed, 0x644a0be1) \
V(_Float64x2List, []=, Float64x2ArraySetIndexed, 0x6b836b0b) \
V(_TypedList, get:length, TypedDataLength, 0x2091c4d8) \
V(_TypedList, get:length, TypedListLength, 0x0) \
V(_TypedListView, get:length, TypedListViewLength, 0x0) \
V(_ByteDataView, get:length, ByteDataViewLength, 0x0) \
V(_Float32x4, get:x, Float32x4ShuffleX, 0x63d1a9fd) \
V(_Float32x4, get:y, Float32x4ShuffleY, 0x203523d9) \
V(_Float32x4, get:z, Float32x4ShuffleZ, 0x13190678) \
@ -372,10 +372,11 @@ namespace dart {
V(Object, ==, ObjectEquals, 0x7b32a55a) \
V(_List, get:length, ObjectArrayLength, 0x25952390) \
V(_ImmutableList, get:length, ImmutableArrayLength, 0x25952390) \
V(_TypedList, get:length, TypedDataLength, 0x2091c4d8) \
V(_TypedListView, get:length, TypedDataViewLength, 0x0) \
V(_TypedListView, get:offsetInBytes, TypedDataViewOffsetInBytes, 0x0) \
V(_TypedListView, get:_typedData, TypedDataViewTypedData, 0x0) \
V(_TypedList, get:length, TypedListLength, 0x0) \
V(_TypedListView, get:length, TypedListViewLength, 0x0) \
V(_ByteDataView, get:length, ByteDataViewLength, 0x0) \
V(_GrowableList, get:length, GrowableArrayLength, 0x18dd86b4) \
V(_GrowableList, get:_capacity, GrowableArrayCapacity, 0x2e04be60) \
V(_GrowableList, add, GrowableListAdd, 0x40b490b8) \

View file

@ -254,6 +254,10 @@ const word RawAbstractType::kTypeStateFinalizedInstantiated =
const word RawObject::kBarrierOverlapShift =
dart::RawObject::kBarrierOverlapShift;
bool RawObject::IsTypedDataClassId(intptr_t cid) {
return dart::RawObject::IsTypedDataClassId(cid);
}
intptr_t ObjectPool::element_offset(intptr_t index) {
return dart::ObjectPool::element_offset(index);
}
@ -494,8 +498,9 @@ word Array::header_size() {
V(TimelineStream, enabled_offset) \
V(TwoByteString, data_offset) \
V(Type, arguments_offset) \
V(TypedDataBase, data_field_offset) \
V(TypedDataBase, length_offset) \
V(TypedData, data_offset) \
V(TypedData, length_offset) \
V(Type, hash_offset) \
V(TypeRef, type_offset) \
V(Type, signature_offset) \

View file

@ -335,6 +335,8 @@ class RawObject : public AllStatic {
static const word kSizeTagMaxSizeTag;
static const word kTagBitsSizeTagPos;
static const word kBarrierOverlapShift;
static bool IsTypedDataClassId(intptr_t cid);
};
class RawAbstractType : public AllStatic {
@ -456,10 +458,15 @@ class GrowableObjectArray : public AllStatic {
static word length_offset();
};
class TypedDataBase : public AllStatic {
public:
static word data_field_offset();
static word length_offset();
};
class TypedData : public AllStatic {
public:
static word data_offset();
static word length_offset();
static word InstanceSize();
};

View file

@ -490,6 +490,10 @@ namespace dart {
// stored in the following Nop instruction. Used to access fields with
// large offsets.
//
// - StoreUntagged rA, B, rC
//
// Like StoreField, but assumes that FP[rC] is untagged.
//
// - StoreFieldTOS D
//
// Store value SP[0] into object SP[-1] at offset (in words) D.
@ -906,6 +910,7 @@ namespace dart {
V(LoadIndexedUntaggedFloat64, A_B_C, reg, reg, reg) \
V(StoreField, A_B_C, reg, num, reg) \
V(StoreFieldExt, A_D, reg, reg, ___) \
V(StoreUntagged, A_B_C, reg, num, reg) \
V(StoreFieldTOS, D, num, ___, ___) \
V(LoadField, A_B_C, reg, reg, num) \
V(LoadFieldExt, A_D, reg, reg, ___) \

View file

@ -3042,7 +3042,7 @@ DART_EXPORT Dart_Handle Dart_ListGetAsBytes(Dart_Handle list,
}
if (RawObject::IsTypedDataViewClassId(obj.GetClassId())) {
const auto& view = TypedDataView::Cast(obj);
if (TypedDataView::ElementSizeInBytes(view) == 1) {
if (view.ElementSizeInBytes() == 1) {
const intptr_t view_length = Smi::Value(view.length());
if (!Utils::RangeCheck(offset, length, view_length)) {
return Api::NewError(

View file

@ -162,9 +162,9 @@ class CompactorTask : public ThreadPool::Task {
// Slides live objects down past free gaps, updates pointers and frees empty
// pages. Keeps cursors pointing to the next free and next live chunks, and
// repeatedly moves the next live chunk to the next free chunk, one block at a
// time, keeping blocks from spanning page boundries (see ForwardingBlock). Free
// space at the end of a page that is too small for the next block is added to
// the freelist.
// time, keeping blocks from spanning page boundaries (see ForwardingBlock).
// Free space at the end of a page that is too small for the next block is
// added to the freelist.
void GCCompactor::Compact(HeapPage* pages,
FreeList* freelist,
Mutex* pages_lock) {
@ -246,6 +246,34 @@ void GCCompactor::Compact(HeapPage* pages,
barrier.Exit();
}
// Update inner pointers in typed data views (needs to be done after all
// threads are done with sliding since we need to access fields of the
// view's backing store)
//
// (If the sliding compactor was single-threaded we could do this during the
// sliding phase: The class id of the backing store can be either accessed by
// looking at the already-slided-object or the not-yet-slided object. Though
// with parallel sliding there is no safe way to access the backing store
// object header.)
{
TIMELINE_FUNCTION_GC_DURATION(thread(),
"ForwardTypedDataViewInternalPointers");
const intptr_t length = typed_data_views_.length();
for (intptr_t i = 0; i < length; ++i) {
auto raw_view = typed_data_views_[i];
const classid_t cid = raw_view->ptr()->typed_data_->GetClassIdMayBeSmi();
// If we have external typed data we can simply return, since the backing
// store lives in C-heap and will not move. Otherwise we have to update
// the inner pointer.
if (RawObject::IsTypedDataClassId(cid)) {
raw_view->RecomputeDataFieldForInternalTypedData();
} else {
ASSERT(RawObject::IsExternalTypedDataClassId(cid));
}
}
}
for (intptr_t task_index = 0; task_index < num_tasks; task_index++) {
ASSERT(tails[task_index] != NULL);
}
@ -394,7 +422,7 @@ void CompactorTask::PlanPage(HeapPage* page) {
uword current = page->object_start();
uword end = page->object_end();
ForwardingPage* forwarding_page = page->AllocateForwardingPage();
auto forwarding_page = page->AllocateForwardingPage();
while (current < end) {
current = PlanBlock(current, forwarding_page);
}
@ -404,7 +432,7 @@ void CompactorTask::SlidePage(HeapPage* page) {
uword current = page->object_start();
uword end = page->object_end();
ForwardingPage* forwarding_page = page->forwarding_page();
auto forwarding_page = page->forwarding_page();
while (current < end) {
current = SlideBlock(current, forwarding_page);
}
@ -483,6 +511,10 @@ uword CompactorTask::SlideBlock(uword first_object,
// Slide the object down.
memmove(reinterpret_cast<void*>(new_addr),
reinterpret_cast<void*>(old_addr), size);
if (RawObject::IsTypedDataClassId(new_obj->GetClassId())) {
reinterpret_cast<RawTypedData*>(new_obj)->RecomputeDataField();
}
}
new_obj->ClearMarkBit();
new_obj->VisitPointers(compactor_);
@ -566,6 +598,40 @@ void GCCompactor::ForwardPointer(RawObject** ptr) {
*ptr = new_target;
}
void GCCompactor::VisitTypedDataViewPointers(RawTypedDataView* view,
RawObject** first,
RawObject** last) {
// First we forward all fields of the typed data view.
RawObject* old_backing = view->ptr()->typed_data_;
VisitPointers(first, last);
RawObject* new_backing = view->ptr()->typed_data_;
const bool backing_moved = old_backing != new_backing;
if (backing_moved) {
// The backing store moved, so we *might* need to update the view's inner
// pointer. If the backing store is internal typed data we *have* to update
// it, otherwise (in case of external typed data) we don't have to.
//
// Unfortunately we cannot find out whether the backing store is internal
// or external during sliding phase: Even though we know the old and new
// location of the backing store another thread might be responsible for
// moving it and we have no way to tell when it got moved.
//
// So instead we queue all those views up and fix their inner pointer in a
// final phase after compaction.
MutexLocker ml(&typed_data_view_mutex_);
typed_data_views_.Add(view);
} else {
// The backing store didn't move, we therefore don't need to update the
// inner pointer.
if (view->ptr()->data_ == 0) {
ASSERT(ValueFromRawSmi(view->ptr()->offset_in_bytes_) == 0 &&
ValueFromRawSmi(view->ptr()->length_) == 0 &&
view->ptr()->typed_data_ == Object::null());
}
}
}
// N.B.: This pointer visitor is not idempotent. We must take care to visit
// each pointer exactly once.
void GCCompactor::VisitPointers(RawObject** first, RawObject** last) {

View file

@ -5,6 +5,8 @@
#ifndef RUNTIME_VM_HEAP_COMPACTOR_H_
#define RUNTIME_VM_HEAP_COMPACTOR_H_
#include "platform/growable_array.h"
#include "vm/allocation.h"
#include "vm/dart_api_state.h"
#include "vm/globals.h"
@ -32,9 +34,14 @@ class GCCompactor : public ValueObject,
void Compact(HeapPage* pages, FreeList* freelist, Mutex* mutex);
private:
friend class CompactorTask;
void SetupImagePageBoundaries();
void ForwardStackPointers();
void ForwardPointer(RawObject** ptr);
void VisitTypedDataViewPointers(RawTypedDataView* view,
RawObject** first,
RawObject** last);
void VisitPointers(RawObject** first, RawObject** last);
void VisitHandle(uword addr);
@ -48,6 +55,11 @@ class GCCompactor : public ValueObject,
// {instructions, data} x {vm isolate, current isolate, shared}
static const intptr_t kMaxImagePages = 6;
ImagePageRange image_page_ranges_[kMaxImagePages];
// The typed data views whose inner pointer must be updated after sliding is
// complete.
Mutex typed_data_view_mutex_;
MallocGrowableArray<RawTypedDataView*> typed_data_views_;
};
} // namespace dart

View file

@ -99,6 +99,44 @@ class ScavengerVisitor : public ObjectPointerVisitor {
bytes_promoted_(0),
visiting_old_object_(NULL) {}
virtual void VisitTypedDataViewPointers(RawTypedDataView* view,
RawObject** first,
RawObject** last) {
// First we forward all fields of the typed data view.
VisitPointers(first, last);
if (view->ptr()->data_ == nullptr) {
ASSERT(ValueFromRawSmi(view->ptr()->offset_in_bytes_) == 0 &&
ValueFromRawSmi(view->ptr()->length_) == 0);
return;
}
// Validate 'this' is a typed data view.
const uword view_header =
*reinterpret_cast<uword*>(RawObject::ToAddr(view));
ASSERT(!IsForwarding(view_header) || view->IsOldObject());
ASSERT(RawObject::IsTypedDataViewClassId(view->GetClassIdMayBeSmi()));
// Validate that the backing store is not a forwarding word.
RawTypedDataBase* td = view->ptr()->typed_data_;
ASSERT(td->IsHeapObject());
const uword td_header = *reinterpret_cast<uword*>(RawObject::ToAddr(td));
ASSERT(!IsForwarding(td_header) || td->IsOldObject());
// We can always obtain the class id from the forwarded backing store.
const classid_t cid = td->GetClassId();
// If we have external typed data we can simply return since the backing
// store lives in C-heap and will not move.
if (RawObject::IsExternalTypedDataClassId(cid)) {
return;
}
// Now we update the inner pointer.
ASSERT(RawObject::IsTypedDataClassId(cid));
view->RecomputeDataFieldForInternalTypedData();
}
void VisitPointers(RawObject** first, RawObject** last) {
ASSERT(Utils::IsAligned(first, sizeof(*first)));
ASSERT(Utils::IsAligned(last, sizeof(*last)));
@ -208,6 +246,10 @@ class ScavengerVisitor : public ObjectPointerVisitor {
new_obj->ptr()->tags_ = tags;
}
if (RawObject::IsTypedDataClassId(new_obj->GetClassId())) {
reinterpret_cast<RawTypedData*>(new_obj)->RecomputeDataField();
}
// Remember forwarding address.
ForwardTo(raw_addr, new_addr);
}

View file

@ -2069,10 +2069,12 @@ SwitchDispatch:
SP[0] = reinterpret_cast<RawObject**>(
instance->ptr())[Array::length_offset() / kWordSize];
} break;
case MethodRecognizer::kTypedDataLength: {
RawInstance* instance = reinterpret_cast<RawInstance*>(SP[0]);
case MethodRecognizer::kTypedListLength:
case MethodRecognizer::kTypedListViewLength:
case MethodRecognizer::kByteDataViewLength: {
RawInstance* instance = reinterpret_cast<RawTypedDataBase*>(SP[0]);
SP[0] = reinterpret_cast<RawObject**>(
instance->ptr())[TypedData::length_offset() / kWordSize];
instance->ptr())[TypedDataBase::length_offset() / kWordSize];
} break;
case MethodRecognizer::kClassIDgetID: {
SP[0] = InterpreterHelpers::GetClassIdAsSmi(SP[0]);

View file

@ -1249,6 +1249,7 @@ void Object::MakeUnusedSpaceTraversable(const Object& obj,
intptr_t leftover_len = (leftover_size - TypedData::InstanceSize(0));
ASSERT(TypedData::InstanceSize(leftover_len) == leftover_size);
raw->StoreSmi(&(raw->ptr()->length_), Smi::New(leftover_len));
raw->RecomputeDataField();
} else {
// Update the leftover space as a basic object.
ASSERT(leftover_size == Object::InstanceSize());
@ -6494,6 +6495,7 @@ bool Function::IsOptimizable() const {
if (FLAG_precompiled_mode) {
return true;
}
if (ForceOptimize()) return true;
if (is_native()) {
// Native methods don't need to be optimized.
return false;
@ -8707,39 +8709,28 @@ RawObject* Field::EvaluateInitializer() const {
}
static intptr_t GetListLength(const Object& value) {
if (value.IsTypedData()) {
const TypedData& list = TypedData::Cast(value);
return list.Length();
if (value.IsTypedData() || value.IsTypedDataView() ||
value.IsExternalTypedData()) {
return TypedDataBase::Cast(value).Length();
} else if (value.IsArray()) {
const Array& list = Array::Cast(value);
return list.Length();
return Array::Cast(value).Length();
} else if (value.IsGrowableObjectArray()) {
// List length is variable.
return Field::kNoFixedLength;
} else if (value.IsExternalTypedData()) {
// TODO(johnmccutchan): Enable for external typed data.
return Field::kNoFixedLength;
} else if (RawObject::IsTypedDataViewClassId(value.GetClassId())) {
// TODO(johnmccutchan): Enable for typed data views.
return Field::kNoFixedLength;
}
return Field::kNoFixedLength;
}
static intptr_t GetListLengthOffset(intptr_t cid) {
if (RawObject::IsTypedDataClassId(cid)) {
if (RawObject::IsTypedDataClassId(cid) ||
RawObject::IsTypedDataViewClassId(cid) ||
RawObject::IsExternalTypedDataClassId(cid)) {
return TypedData::length_offset();
} else if (cid == kArrayCid || cid == kImmutableArrayCid) {
return Array::length_offset();
} else if (cid == kGrowableObjectArrayCid) {
// List length is variable.
return Field::kUnknownLengthOffset;
} else if (RawObject::IsExternalTypedDataClassId(cid)) {
// TODO(johnmccutchan): Enable for external typed data.
return Field::kUnknownLengthOffset;
} else if (RawObject::IsTypedDataViewClassId(cid)) {
// TODO(johnmccutchan): Enable for typed data views.
return Field::kUnknownLengthOffset;
}
return Field::kUnknownLengthOffset;
}
@ -16494,10 +16485,10 @@ bool Instance::IsValidFieldOffset(intptr_t offset) const {
}
intptr_t Instance::ElementSizeFor(intptr_t cid) {
if (RawObject::IsExternalTypedDataClassId(cid)) {
return ExternalTypedData::ElementSizeInBytes(cid);
} else if (RawObject::IsTypedDataClassId(cid)) {
return TypedData::ElementSizeInBytes(cid);
if (RawObject::IsExternalTypedDataClassId(cid) ||
RawObject::IsTypedDataClassId(cid) ||
RawObject::IsTypedDataViewClassId(cid)) {
return TypedDataBase::ElementSizeInBytes(cid);
}
switch (cid) {
case kArrayCid:
@ -20923,21 +20914,22 @@ const char* Float64x2::ToCString() const {
return OS::SCreate(Thread::Current()->zone(), "[%f, %f]", _x, _y);
}
const intptr_t TypedData::element_size_table[TypedData::kNumElementSizes] = {
1, // kTypedDataInt8ArrayCid.
1, // kTypedDataUint8ArrayCid.
1, // kTypedDataUint8ClampedArrayCid.
2, // kTypedDataInt16ArrayCid.
2, // kTypedDataUint16ArrayCid.
4, // kTypedDataInt32ArrayCid.
4, // kTypedDataUint32ArrayCid.
8, // kTypedDataInt64ArrayCid.
8, // kTypedDataUint64ArrayCid.
4, // kTypedDataFloat32ArrayCid.
8, // kTypedDataFloat64ArrayCid.
16, // kTypedDataFloat32x4ArrayCid.
16, // kTypedDataInt32x4ArrayCid.
16, // kTypedDataFloat64x2ArrayCid,
const intptr_t
TypedDataBase::element_size_table[TypedDataBase::kNumElementSizes] = {
1, // kTypedDataInt8ArrayCid.
1, // kTypedDataUint8ArrayCid.
1, // kTypedDataUint8ClampedArrayCid.
2, // kTypedDataInt16ArrayCid.
2, // kTypedDataUint16ArrayCid.
4, // kTypedDataInt32ArrayCid.
4, // kTypedDataUint32ArrayCid.
8, // kTypedDataInt64ArrayCid.
8, // kTypedDataUint64ArrayCid.
4, // kTypedDataFloat32ArrayCid.
8, // kTypedDataFloat64ArrayCid.
16, // kTypedDataFloat32x4ArrayCid.
16, // kTypedDataInt32x4ArrayCid.
16, // kTypedDataFloat64x2ArrayCid,
};
bool TypedData::CanonicalizeEquals(const Instance& other) const {
@ -20985,14 +20977,15 @@ RawTypedData* TypedData::New(intptr_t class_id,
}
TypedData& result = TypedData::Handle();
{
const intptr_t lengthInBytes = len * ElementSizeInBytes(class_id);
const intptr_t length_in_bytes = len * ElementSizeInBytes(class_id);
RawObject* raw = Object::Allocate(
class_id, TypedData::InstanceSize(lengthInBytes), space);
class_id, TypedData::InstanceSize(length_in_bytes), space);
NoSafepointScope no_safepoint;
result ^= raw;
result.SetLength(len);
result.RecomputeDataField();
if (len > 0) {
memset(result.DataAddr(0), 0, lengthInBytes);
memset(result.DataAddr(0), 0, length_in_bytes);
}
}
return result.raw();
@ -21042,25 +21035,27 @@ RawTypedDataView* TypedDataView::New(intptr_t class_id, Heap::Space space) {
Object::Allocate(class_id, TypedDataView::InstanceSize(), space);
NoSafepointScope no_safepoint;
result ^= raw;
result.clear_typed_data();
result.set_offset_in_bytes(0);
result.set_length(0);
result.Clear();
}
return result.raw();
}
RawTypedDataView* TypedDataView::New(intptr_t class_id,
const Instance& typed_data,
const TypedDataBase& typed_data,
intptr_t offset_in_bytes,
intptr_t length,
Heap::Space space) {
auto& result = TypedDataView::Handle(TypedDataView::New(class_id, space));
result.set_typed_data(typed_data);
result.set_offset_in_bytes(offset_in_bytes);
result.set_length(length);
result.InitializeWith(typed_data, offset_in_bytes, length);
return result.raw();
}
const char* TypedDataBase::ToCString() const {
// There are no instances of RawTypedDataBase.
UNREACHABLE();
return nullptr;
}
const char* TypedDataView::ToCString() const {
auto zone = Thread::Current()->zone();
return OS::SCreate(zone, "TypedDataView(cid: %" Pd ")", GetClassId());

View file

@ -2514,7 +2514,15 @@ class Function : public Object {
// deoptimize, since we won't generate deoptimization info or register
// dependencies. It will be compiled into optimized code immediately when it's
// run.
bool ForceOptimize() const { return IsFfiTrampoline(); }
bool ForceOptimize() const {
return IsFfiTrampoline()
// On DBC we use native calls instead of IR for the view factories (see
// kernel_to_il.cc)
#if !defined(TARGET_ARCH_DBC)
|| IsTypedDataViewFactory()
#endif
;
}
bool CanBeInlined() const;
@ -2698,6 +2706,15 @@ class Function : public Object {
return modifier() != RawFunction::kNoModifier;
}
bool IsTypedDataViewFactory() const {
if (is_native() && kind() == RawFunction::kConstructor) {
// This is a native factory constructor.
const Class& klass = Class::Handle(Owner());
return RawObject::IsTypedDataViewClassId(klass.id());
}
return false;
}
DART_WARN_UNUSED_RESULT
RawError* VerifyCallEntryPoint() const;
@ -6912,7 +6929,7 @@ class Smi : public Integer {
static const intptr_t kMaxValue = kSmiMax;
static const intptr_t kMinValue = kSmiMin;
intptr_t Value() const { return ValueFromRaw(raw_value()); }
intptr_t Value() const { return ValueFromRawSmi(raw()); }
virtual bool Equals(const Instance& other) const;
virtual bool IsZero() const { return Value() == 0; }
@ -6929,9 +6946,10 @@ class Smi : public Integer {
static intptr_t InstanceSize() { return 0; }
static RawSmi* New(intptr_t value) {
intptr_t raw_smi = (value << kSmiTagShift) | kSmiTag;
ASSERT(ValueFromRaw(raw_smi) == value);
return reinterpret_cast<RawSmi*>(raw_smi);
RawSmi* raw_smi =
reinterpret_cast<RawSmi*>((value << kSmiTagShift) | kSmiTag);
ASSERT(ValueFromRawSmi(raw_smi) == value);
return raw_smi;
}
static RawSmi* FromAlignedAddress(uword address) {
@ -6942,7 +6960,7 @@ class Smi : public Integer {
static RawClass* Class();
static intptr_t Value(const RawSmi* raw_smi) {
return ValueFromRaw(reinterpret_cast<uword>(raw_smi));
return ValueFromRawSmi(raw_smi);
}
static intptr_t RawValue(intptr_t value) {
@ -6968,12 +6986,6 @@ class Smi : public Integer {
return -kWordSize;
}
static intptr_t ValueFromRaw(uword raw_value) {
intptr_t value = raw_value;
ASSERT((value & kSmiTagMask) == kSmiTag);
return (value >> kSmiTagShift);
}
static cpp_vtable handle_vtable_;
Smi() : Integer() {}
@ -8299,33 +8311,87 @@ class Float64x2 : public Instance {
friend class Class;
};
class TypedData : public Instance {
class TypedDataBase : public Instance {
public:
// We use 30 bits for the hash code so hashes in a snapshot taken on a
// 64-bit architecture stay in Smi range when loaded on a 32-bit
// architecture.
static const intptr_t kHashBits = 30;
static intptr_t length_offset() {
return OFFSET_OF(RawTypedDataBase, length_);
}
static intptr_t data_field_offset() {
return OFFSET_OF(RawTypedDataBase, data_);
}
RawSmi* length() const { return raw_ptr()->length_; }
intptr_t Length() const {
ASSERT(!IsNull());
return Smi::Value(raw_ptr()->length_);
}
intptr_t ElementSizeInBytes() const {
intptr_t cid = raw()->GetClassId();
return ElementSizeInBytes(cid);
intptr_t LengthInBytes() const {
return ElementSizeInBytes(raw()->GetClassId()) * Length();
}
TypedDataElementType ElementType() const {
intptr_t cid = raw()->GetClassId();
return ElementType(cid);
return ElementType(raw()->GetClassId());
}
intptr_t LengthInBytes() const {
intptr_t cid = raw()->GetClassId();
return (ElementSizeInBytes(cid) * Length());
intptr_t ElementSizeInBytes() const {
return element_size(ElementType(raw()->GetClassId()));
}
static intptr_t ElementSizeInBytes(classid_t cid) {
return element_size(ElementType(cid));
}
static TypedDataElementType ElementType(classid_t cid) {
if (cid == kByteDataViewCid) {
return kUint8ArrayElement;
} else if (RawObject::IsTypedDataClassId(cid)) {
const intptr_t index =
(cid - kTypedDataInt8ArrayCid - kTypedDataCidRemainderInternal) / 3;
return static_cast<TypedDataElementType>(index);
} else if (RawObject::IsTypedDataViewClassId(cid)) {
const intptr_t index =
(cid - kTypedDataInt8ArrayCid - kTypedDataCidRemainderView) / 3;
return static_cast<TypedDataElementType>(index);
} else {
ASSERT(RawObject::IsExternalTypedDataClassId(cid));
const intptr_t index =
(cid - kTypedDataInt8ArrayCid - kTypedDataCidRemainderExternal) / 3;
return static_cast<TypedDataElementType>(index);
}
}
protected:
void SetLength(intptr_t value) const {
ASSERT(value <= Smi::kMaxValue);
StoreSmi(&raw_ptr()->length_, Smi::New(value));
}
private:
friend class Class;
static intptr_t element_size(intptr_t index) {
ASSERT(0 <= index && index < kNumElementSizes);
intptr_t size = element_size_table[index];
ASSERT(size != 0);
return size;
}
static const intptr_t kNumElementSizes =
(kTypedDataFloat64x2ArrayCid - kTypedDataInt8ArrayCid) / 3 + 1;
static const intptr_t element_size_table[kNumElementSizes];
HEAP_OBJECT_IMPLEMENTATION(TypedDataBase, Instance);
};
class TypedData : public TypedDataBase {
public:
// We use 30 bits for the hash code so hashes in a snapshot taken on a
// 64-bit architecture stay in Smi range when loaded on a 32-bit
// architecture.
static const intptr_t kHashBits = 30;
void* DataAddr(intptr_t byte_offset) const {
ASSERT((byte_offset == 0) ||
((byte_offset > 0) && (byte_offset < LengthInBytes())));
@ -8364,15 +8430,11 @@ class TypedData : public Instance {
#undef TYPED_GETTER_SETTER
static intptr_t length_offset() { return OFFSET_OF(RawTypedData, length_); }
static intptr_t data_offset() {
return OFFSET_OF_RETURNED_VALUE(RawTypedData, data);
}
static intptr_t data_offset() { return RawTypedData::payload_offset(); }
static intptr_t InstanceSize() {
ASSERT(sizeof(RawTypedData) ==
OFFSET_OF_RETURNED_VALUE(RawTypedData, data));
OFFSET_OF_RETURNED_VALUE(RawTypedData, internal_data));
return 0;
}
@ -8381,16 +8443,6 @@ class TypedData : public Instance {
return RoundedAllocationSize(sizeof(RawTypedData) + lengthInBytes);
}
static intptr_t ElementSizeInBytes(intptr_t class_id) {
ASSERT(RawObject::IsTypedDataClassId(class_id));
return element_size(ElementType(class_id));
}
static TypedDataElementType ElementType(intptr_t class_id) {
ASSERT(RawObject::IsTypedDataClassId(class_id));
return static_cast<TypedDataElementType>(class_id - kTypedDataInt8ArrayCid);
}
static intptr_t MaxElements(intptr_t class_id) {
ASSERT(RawObject::IsTypedDataClassId(class_id));
return (kSmiMax / ElementSizeInBytes(class_id));
@ -8460,9 +8512,7 @@ class TypedData : public Instance {
}
protected:
void SetLength(intptr_t value) const {
StoreSmi(&raw_ptr()->length_, Smi::New(value));
}
void RecomputeDataField() { raw()->RecomputeDataField(); }
private:
// Provides const access to non-pointer, non-aligned data within the object.
@ -8477,48 +8527,18 @@ class TypedData : public Instance {
byte_offset);
}
static intptr_t element_size(intptr_t index) {
ASSERT(0 <= index && index < kNumElementSizes);
intptr_t size = element_size_table[index];
ASSERT(size != 0);
return size;
}
static const intptr_t kNumElementSizes =
kTypedDataFloat64x2ArrayCid - kTypedDataInt8ArrayCid + 1;
static const intptr_t element_size_table[kNumElementSizes];
FINAL_HEAP_OBJECT_IMPLEMENTATION(TypedData, Instance);
FINAL_HEAP_OBJECT_IMPLEMENTATION(TypedData, TypedDataBase);
friend class Class;
friend class ExternalTypedData;
friend class TypedDataView;
};
class ExternalTypedData : public Instance {
class ExternalTypedData : public TypedDataBase {
public:
// Alignment of data when serializing ExternalTypedData in a clustered
// snapshot. Should be independent of word size.
static const int kDataSerializationAlignment = 8;
intptr_t Length() const {
ASSERT(!IsNull());
return Smi::Value(raw_ptr()->length_);
}
intptr_t ElementSizeInBytes() const {
intptr_t cid = raw()->GetClassId();
return ElementSizeInBytes(cid);
}
TypedDataElementType ElementType() const {
intptr_t cid = raw()->GetClassId();
return ElementType(cid);
}
intptr_t LengthInBytes() const {
intptr_t cid = raw()->GetClassId();
return (ElementSizeInBytes(cid) * Length());
}
void* DataAddr(intptr_t byte_offset) const {
ASSERT((byte_offset == 0) ||
((byte_offset > 0) && (byte_offset < LengthInBytes())));
@ -8553,10 +8573,6 @@ class ExternalTypedData : public Instance {
Dart_WeakPersistentHandleFinalizer callback,
intptr_t external_size) const;
static intptr_t length_offset() {
return OFFSET_OF(RawExternalTypedData, length_);
}
static intptr_t data_offset() {
return OFFSET_OF(RawExternalTypedData, data_);
}
@ -8565,17 +8581,6 @@ class ExternalTypedData : public Instance {
return RoundedAllocationSize(sizeof(RawExternalTypedData));
}
static intptr_t ElementSizeInBytes(intptr_t class_id) {
ASSERT(RawObject::IsExternalTypedDataClassId(class_id));
return TypedData::element_size(ElementType(class_id));
}
static TypedDataElementType ElementType(intptr_t class_id) {
ASSERT(RawObject::IsExternalTypedDataClassId(class_id));
return static_cast<TypedDataElementType>(class_id -
kExternalTypedDataInt8ArrayCid);
}
static intptr_t MaxElements(intptr_t class_id) {
ASSERT(RawObject::IsExternalTypedDataClassId(class_id));
return (kSmiMax / ElementSizeInBytes(class_id));
@ -8594,6 +8599,7 @@ class ExternalTypedData : public Instance {
protected:
void SetLength(intptr_t value) const {
ASSERT(value <= Smi::kMaxValue);
StoreSmi(&raw_ptr()->length_, Smi::New(value));
}
@ -8604,16 +8610,16 @@ class ExternalTypedData : public Instance {
}
private:
FINAL_HEAP_OBJECT_IMPLEMENTATION(ExternalTypedData, Instance);
FINAL_HEAP_OBJECT_IMPLEMENTATION(ExternalTypedData, TypedDataBase);
friend class Class;
};
class TypedDataView : public Instance {
class TypedDataView : public TypedDataBase {
public:
static RawTypedDataView* New(intptr_t class_id,
Heap::Space space = Heap::kNew);
static RawTypedDataView* New(intptr_t class_id,
const Instance& typed_data,
const TypedDataBase& typed_data,
intptr_t offset_in_bytes,
intptr_t length,
Heap::Space space = Heap::kNew);
@ -8622,12 +8628,6 @@ class TypedDataView : public Instance {
return RoundedAllocationSize(sizeof(RawTypedDataView));
}
static intptr_t ElementSizeInBytes(const TypedDataView& view_obj) {
ASSERT(!view_obj.IsNull());
intptr_t cid = view_obj.raw()->GetClassId();
return ElementSizeInBytes(cid);
}
static RawInstance* Data(const TypedDataView& view) {
return view.typed_data();
}
@ -8636,8 +8636,6 @@ class TypedDataView : public Instance {
return view.offset_in_bytes();
}
static RawSmi* Length(const TypedDataView& view) { return view.length(); }
static bool IsExternalTypedDataView(const TypedDataView& view_obj) {
const auto& data = Instance::Handle(Data(view_obj));
intptr_t cid = data.raw()->GetClassId();
@ -8650,49 +8648,42 @@ class TypedDataView : public Instance {
return OFFSET_OF(RawTypedDataView, typed_data_);
}
static intptr_t length_offset() {
return OFFSET_OF(RawTypedDataView, length_);
}
static intptr_t offset_in_bytes_offset() {
return OFFSET_OF(RawTypedDataView, offset_in_bytes_);
}
static intptr_t ElementSizeInBytes(intptr_t class_id) {
ASSERT(RawObject::IsTypedDataViewClassId(class_id));
return (class_id == kByteDataViewCid)
? 1
: TypedData::element_size(class_id - kTypedDataInt8ArrayViewCid);
}
RawInstance* typed_data() const { return raw_ptr()->typed_data_; }
void set_typed_data(const Instance& typed_data) {
void InitializeWith(const TypedDataBase& typed_data,
intptr_t offset_in_bytes,
intptr_t length) {
const classid_t cid = typed_data.GetClassId();
ASSERT(RawObject::IsTypedDataClassId(cid) ||
RawObject::IsExternalTypedDataClassId(cid));
StorePointer(&raw_ptr()->typed_data_, typed_data.raw());
}
StoreSmi(&raw_ptr()->length_, Smi::New(length));
StoreSmi(&raw_ptr()->offset_in_bytes_, Smi::New(offset_in_bytes));
void set_length(intptr_t value) {
StorePointer(&raw_ptr()->length_, Smi::New(value));
}
void set_offset_in_bytes(intptr_t value) {
StorePointer(&raw_ptr()->offset_in_bytes_, Smi::New(value));
// Update the inner pointer.
RecomputeDataField();
}
RawSmi* offset_in_bytes() const { return raw_ptr()->offset_in_bytes_; }
RawSmi* length() const { return raw_ptr()->length_; }
private:
void clear_typed_data() {
StorePointer(&raw_ptr()->typed_data_, Instance::RawCast(Object::null()));
void RecomputeDataField() { raw()->RecomputeDataField(); }
void Clear() {
StoreSmi(&raw_ptr()->length_, Smi::New(0));
StoreSmi(&raw_ptr()->offset_in_bytes_, Smi::New(0));
StoreNonPointer(&raw_ptr()->data_, nullptr);
StorePointer(&raw_ptr()->typed_data_,
TypedDataBase::RawCast(Object::null()));
}
FINAL_HEAP_OBJECT_IMPLEMENTATION(TypedDataView, Instance);
FINAL_HEAP_OBJECT_IMPLEMENTATION(TypedDataView, TypedDataBase);
friend class Class;
friend class TypedDataViewDeserializationCluster;
};
class ByteBuffer : public AllStatic {

View file

@ -1358,6 +1358,10 @@ void Float64x2::PrintJSONImpl(JSONStream* stream, bool ref) const {
jsobj.AddProperty("valueAsString", ToCString());
}
void TypedDataBase::PrintJSONImpl(JSONStream* stream, bool ref) const {
UNREACHABLE();
}
void TypedData::PrintJSONImpl(JSONStream* stream, bool ref) const {
JSONObject jsobj(stream);
PrintSharedInstanceJSON(&jsobj, ref);

View file

@ -14,6 +14,8 @@
namespace dart {
class RawSmi;
// Dart VM aligns all objects by 2 words in in the old space and misaligns them
// in new space. This allows to distinguish new and old pointers by their bits.
//
@ -63,6 +65,12 @@ enum {
kSmiTagShift = 1,
};
inline intptr_t ValueFromRawSmi(const RawSmi* raw_value) {
const intptr_t value = reinterpret_cast<intptr_t>(raw_value);
ASSERT((value & kSmiTagMask) == kSmiTag);
return (value >> kSmiTagShift);
}
} // namespace dart
#endif // RUNTIME_VM_POINTER_TAGGING_H_

View file

@ -358,6 +358,22 @@ bool RawObject::FindObject(FindObjectVisitor* visitor) {
return Type::InstanceSize(); \
}
// It calls the from() and to() methods on the raw object to get the first and
// last cells that need visiting.
//
// Though as opposed to Similar to [REGULAR_VISITOR] this visitor will call the
// specializd VisitTypedDataViewPointers
#define TYPED_DATA_VIEW_VISITOR(Type) \
intptr_t Raw##Type::Visit##Type##Pointers(Raw##Type* raw_obj, \
ObjectPointerVisitor* visitor) { \
/* Make sure that we got here with the tagged pointer as this. */ \
ASSERT(raw_obj->IsHeapObject()); \
ASSERT_UNCOMPRESSED(Type); \
visitor->VisitTypedDataViewPointers(raw_obj, raw_obj->from(), \
raw_obj->to()); \
return Type::InstanceSize(); \
}
// For variable length objects. get_length is a code snippet that gets the
// length of the object, which is passed to InstanceSize and the to() method.
#define VARIABLE_VISITOR(Type, get_length) \
@ -434,7 +450,7 @@ REGULAR_VISITOR(ExternalTwoByteString)
COMPRESSED_VISITOR(GrowableObjectArray)
COMPRESSED_VISITOR(LinkedHashMap)
COMPRESSED_VISITOR(ExternalTypedData)
REGULAR_VISITOR(TypedDataView)
TYPED_DATA_VIEW_VISITOR(TypedDataView)
REGULAR_VISITOR(ReceivePort)
REGULAR_VISITOR(StackTrace)
REGULAR_VISITOR(RegExp)
@ -471,6 +487,7 @@ VARIABLE_NULL_VISITOR(OneByteString, Smi::Value(raw_obj->ptr()->length_))
VARIABLE_NULL_VISITOR(TwoByteString, Smi::Value(raw_obj->ptr()->length_))
// Abstract types don't have their visitor called.
UNREACHABLE_VISITOR(AbstractType)
UNREACHABLE_VISITOR(TypedDataBase)
UNREACHABLE_VISITOR(Error)
UNREACHABLE_VISITOR(Number)
UNREACHABLE_VISITOR(Integer)

View file

@ -124,7 +124,7 @@ class RawObject {
kReservedTagPos = 6,
kReservedTagSize = 2,
kSizeTagPos = kReservedTagPos + kReservedTagSize, // = 8
kSizeTagPos = kReservedTagPos + kReservedTagSize, // = 8
kSizeTagSize = 8,
kClassIdTagPos = kSizeTagPos + kSizeTagSize, // = 16
kClassIdTagSize = 16,
@ -702,9 +702,10 @@ class RawObject {
friend class Deserializer;
friend class SnapshotWriter;
friend class String;
friend class Type; // GetClassId
friend class TypedData;
friend class TypedDataView;
friend class Type; // GetClassId
friend class TypedDataBase; // GetClassId
friend class TypedData; // GetClassId
friend class TypedDataView; // GetClassId
friend class WeakProperty; // StorePointer
friend class Instance; // StorePointer
friend class StackFrame; // GetCodeObject assertion.
@ -2065,22 +2066,126 @@ class RawTwoByteString : public RawString {
friend class String;
};
// All _*ArrayView/_ByteDataView classes share the same layout.
class RawTypedDataView : public RawInstance {
RAW_HEAP_OBJECT_IMPLEMENTATION(TypedDataView);
// Abstract base class for RawTypedData/RawExternalTypedData/RawTypedDataView.
class RawTypedDataBase : public RawInstance {
protected:
// The contents of [data_] depends on what concrete subclass is used:
//
// - RawTypedData: Start of the payload.
// - RawExternalTypedData: Start of the C-heap payload.
// - RawTypedDataView: The [data_] field of the backing store for the view
// plus the [offset_in_bytes_] the view has.
//
// During allocation or snapshot reading the [data_] can be temporarily
// nullptr (which is the case for views which just got created but haven't
// gotten the backing store set).
uint8_t* data_;
// The length of the view in element sizes (obtainable via
// [TypedDataBase::ElementSizeInBytes]).
RawSmi* length_;
private:
friend class RawTypedDataView;
RAW_HEAP_OBJECT_IMPLEMENTATION(TypedDataBase);
};
class RawTypedData : public RawTypedDataBase {
RAW_HEAP_OBJECT_IMPLEMENTATION(TypedData);
public:
static intptr_t payload_offset() {
return OFFSET_OF_RETURNED_VALUE(RawTypedData, internal_data);
}
// Recompute [data_] pointer to internal data.
void RecomputeDataField() { ptr()->data_ = ptr()->internal_data(); }
protected:
VISIT_FROM(RawObject*, typed_data_)
RawInstance* typed_data_;
VISIT_FROM(RawCompressed, length_)
VISIT_TO_LENGTH(RawCompressed, &ptr()->length_)
// Variable length data follows here.
uint8_t* internal_data() { OPEN_ARRAY_START(uint8_t, uint8_t); }
const uint8_t* internal_data() const { OPEN_ARRAY_START(uint8_t, uint8_t); }
uint8_t* data() {
ASSERT(data_ == internal_data());
return data_;
}
const uint8_t* data() const {
ASSERT(data_ == internal_data());
return data_;
}
friend class Api;
friend class Instance;
friend class NativeEntryData;
friend class Object;
friend class ObjectPool;
friend class ObjectPoolDeserializationCluster;
friend class ObjectPoolSerializationCluster;
friend class RawObjectPool;
friend class SnapshotReader;
};
// All _*ArrayView/_ByteDataView classes share the same layout.
class RawTypedDataView : public RawTypedDataBase {
RAW_HEAP_OBJECT_IMPLEMENTATION(TypedDataView);
public:
// Recompute [data_] based on internal/external [typed_data_].
void RecomputeDataField() {
const intptr_t offset_in_bytes = ValueFromRawSmi(ptr()->offset_in_bytes_);
uint8_t* payload = ptr()->typed_data_->ptr()->data_;
ptr()->data_ = payload + offset_in_bytes;
}
// Recopute [data_] based on internal [typed_data_] - needs to be called by GC
// whenever the backing store moved.
//
// NOTICE: This method assumes [this] is the forwarded object and the
// [typed_data_] pointer points to the new backing store. The backing store's
// fields don't need to be valid - only it's address.
void RecomputeDataFieldForInternalTypedData() {
const intptr_t offset_in_bytes = ValueFromRawSmi(ptr()->offset_in_bytes_);
uint8_t* payload = reinterpret_cast<uint8_t*>(
RawObject::ToAddr(ptr()->typed_data_) + RawTypedData::payload_offset());
ptr()->data_ = payload + offset_in_bytes;
}
void ValidateInnerPointer() {
if (ptr()->typed_data_->GetClassId() == kNullCid) {
// The view object must have gotten just initialized.
if (ptr()->data_ != nullptr ||
ValueFromRawSmi(ptr()->offset_in_bytes_) != 0 ||
ValueFromRawSmi(ptr()->length_) != 0) {
FATAL("RawTypedDataView has invalid inner pointer.");
}
} else {
const intptr_t offset_in_bytes = ValueFromRawSmi(ptr()->offset_in_bytes_);
uint8_t* payload = ptr()->typed_data_->ptr()->data_;
if ((payload + offset_in_bytes) != ptr()->data_) {
FATAL("RawTypedDataView has invalid inner pointer.");
}
}
}
protected:
VISIT_FROM(RawObject*, length_)
RawTypedDataBase* typed_data_;
RawSmi* offset_in_bytes_;
RawSmi* length_;
VISIT_TO(RawObject*, length_)
VISIT_TO(RawObject*, offset_in_bytes_)
RawObject** to_snapshot(Snapshot::Kind kind) { return to(); }
friend class Api;
friend class Object;
friend class ObjectPoolDeserializationCluster;
friend class ObjectPoolSerializationCluster;
friend class RawObjectPool;
friend class GCCompactor;
friend class ScavengerVisitor;
friend class SnapshotReader;
};
@ -2228,38 +2333,13 @@ COMPILE_ASSERT(sizeof(RawFloat64x2) == 24);
#error Architecture is not 32-bit or 64-bit.
#endif // ARCH_IS_32_BIT
class RawTypedData : public RawInstance {
RAW_HEAP_OBJECT_IMPLEMENTATION(TypedData);
protected:
VISIT_FROM(RawCompressed, length_)
RawSmi* length_;
VISIT_TO_LENGTH(RawCompressed, &ptr()->length_)
// Variable length data follows here.
uint8_t* data() { OPEN_ARRAY_START(uint8_t, uint8_t); }
const uint8_t* data() const { OPEN_ARRAY_START(uint8_t, uint8_t); }
friend class Api;
friend class Instance;
friend class NativeEntryData;
friend class Object;
friend class ObjectPool;
friend class ObjectPoolDeserializationCluster;
friend class ObjectPoolSerializationCluster;
friend class RawObjectPool;
friend class SnapshotReader;
};
class RawExternalTypedData : public RawInstance {
class RawExternalTypedData : public RawTypedDataBase {
RAW_HEAP_OBJECT_IMPLEMENTATION(ExternalTypedData);
protected:
VISIT_FROM(RawCompressed, length_)
RawSmi* length_;
VISIT_TO(RawCompressed, length_)
uint8_t* data_;
friend class RawBytecode;
};
@ -2483,73 +2563,35 @@ inline bool RawObject::IsBuiltinListClassId(intptr_t index) {
inline bool RawObject::IsTypedDataClassId(intptr_t index) {
// Make sure this is updated when new TypedData types are added.
COMPILE_ASSERT(kTypedDataUint8ArrayCid == kTypedDataInt8ArrayCid + 1 &&
kTypedDataUint8ClampedArrayCid == kTypedDataInt8ArrayCid + 2 &&
kTypedDataInt16ArrayCid == kTypedDataInt8ArrayCid + 3 &&
kTypedDataUint16ArrayCid == kTypedDataInt8ArrayCid + 4 &&
kTypedDataInt32ArrayCid == kTypedDataInt8ArrayCid + 5 &&
kTypedDataUint32ArrayCid == kTypedDataInt8ArrayCid + 6 &&
kTypedDataInt64ArrayCid == kTypedDataInt8ArrayCid + 7 &&
kTypedDataUint64ArrayCid == kTypedDataInt8ArrayCid + 8 &&
kTypedDataFloat32ArrayCid == kTypedDataInt8ArrayCid + 9 &&
kTypedDataFloat64ArrayCid == kTypedDataInt8ArrayCid + 10 &&
kTypedDataFloat32x4ArrayCid == kTypedDataInt8ArrayCid + 11 &&
kTypedDataInt32x4ArrayCid == kTypedDataInt8ArrayCid + 12 &&
kTypedDataFloat64x2ArrayCid == kTypedDataInt8ArrayCid + 13 &&
kTypedDataInt8ArrayViewCid == kTypedDataInt8ArrayCid + 14);
return (index >= kTypedDataInt8ArrayCid &&
index <= kTypedDataFloat64x2ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 3 == kTypedDataUint8ArrayCid);
const bool is_typed_data_base =
index >= kTypedDataInt8ArrayCid && index < kByteDataViewCid;
return is_typed_data_base && ((index - kTypedDataInt8ArrayCid) % 3) ==
kTypedDataCidRemainderInternal;
}
inline bool RawObject::IsTypedDataViewClassId(intptr_t index) {
// Make sure this is updated when new TypedData types are added.
COMPILE_ASSERT(
kTypedDataUint8ArrayViewCid == kTypedDataInt8ArrayViewCid + 1 &&
kTypedDataUint8ClampedArrayViewCid == kTypedDataInt8ArrayViewCid + 2 &&
kTypedDataInt16ArrayViewCid == kTypedDataInt8ArrayViewCid + 3 &&
kTypedDataUint16ArrayViewCid == kTypedDataInt8ArrayViewCid + 4 &&
kTypedDataInt32ArrayViewCid == kTypedDataInt8ArrayViewCid + 5 &&
kTypedDataUint32ArrayViewCid == kTypedDataInt8ArrayViewCid + 6 &&
kTypedDataInt64ArrayViewCid == kTypedDataInt8ArrayViewCid + 7 &&
kTypedDataUint64ArrayViewCid == kTypedDataInt8ArrayViewCid + 8 &&
kTypedDataFloat32ArrayViewCid == kTypedDataInt8ArrayViewCid + 9 &&
kTypedDataFloat64ArrayViewCid == kTypedDataInt8ArrayViewCid + 10 &&
kTypedDataFloat32x4ArrayViewCid == kTypedDataInt8ArrayViewCid + 11 &&
kTypedDataInt32x4ArrayViewCid == kTypedDataInt8ArrayViewCid + 12 &&
kTypedDataFloat64x2ArrayViewCid == kTypedDataInt8ArrayViewCid + 13 &&
kByteDataViewCid == kTypedDataInt8ArrayViewCid + 14 &&
kExternalTypedDataInt8ArrayCid == kTypedDataInt8ArrayViewCid + 15);
return (index >= kTypedDataInt8ArrayViewCid && index <= kByteDataViewCid);
COMPILE_ASSERT(kTypedDataInt8ArrayViewCid + 3 == kTypedDataUint8ArrayViewCid);
const bool is_typed_data_base =
index >= kTypedDataInt8ArrayCid && index < kByteDataViewCid;
const bool is_byte_data_view = index == kByteDataViewCid;
return is_byte_data_view ||
(is_typed_data_base &&
((index - kTypedDataInt8ArrayCid) % 3) == kTypedDataCidRemainderView);
}
inline bool RawObject::IsExternalTypedDataClassId(intptr_t index) {
// Make sure this is updated when new ExternalTypedData types are added.
COMPILE_ASSERT(
(kExternalTypedDataUint8ArrayCid == kExternalTypedDataInt8ArrayCid + 1) &&
(kExternalTypedDataUint8ClampedArrayCid ==
kExternalTypedDataInt8ArrayCid + 2) &&
(kExternalTypedDataInt16ArrayCid == kExternalTypedDataInt8ArrayCid + 3) &&
(kExternalTypedDataUint16ArrayCid ==
kExternalTypedDataInt8ArrayCid + 4) &&
(kExternalTypedDataInt32ArrayCid == kExternalTypedDataInt8ArrayCid + 5) &&
(kExternalTypedDataUint32ArrayCid ==
kExternalTypedDataInt8ArrayCid + 6) &&
(kExternalTypedDataInt64ArrayCid == kExternalTypedDataInt8ArrayCid + 7) &&
(kExternalTypedDataUint64ArrayCid ==
kExternalTypedDataInt8ArrayCid + 8) &&
(kExternalTypedDataFloat32ArrayCid ==
kExternalTypedDataInt8ArrayCid + 9) &&
(kExternalTypedDataFloat64ArrayCid ==
kExternalTypedDataInt8ArrayCid + 10) &&
(kExternalTypedDataFloat32x4ArrayCid ==
kExternalTypedDataInt8ArrayCid + 11) &&
(kExternalTypedDataInt32x4ArrayCid ==
kExternalTypedDataInt8ArrayCid + 12) &&
(kExternalTypedDataFloat64x2ArrayCid ==
kExternalTypedDataInt8ArrayCid + 13) &&
(kByteBufferCid == kExternalTypedDataInt8ArrayCid + 14));
return (index >= kExternalTypedDataInt8ArrayCid &&
index <= kExternalTypedDataFloat64x2ArrayCid);
// Make sure this is updated when new TypedData types are added.
COMPILE_ASSERT(kExternalTypedDataInt8ArrayCid + 3 ==
kExternalTypedDataUint8ArrayCid);
const bool is_typed_data_base =
index >= kTypedDataInt8ArrayCid && index < kByteDataViewCid;
return is_typed_data_base && ((index - kTypedDataInt8ArrayCid) % 3) ==
kTypedDataCidRemainderExternal;
}
inline bool RawObject::IsFfiNativeTypeTypeClassId(intptr_t index) {
@ -2630,11 +2672,31 @@ inline bool RawObject::IsImplicitFieldClassId(intptr_t index) {
inline intptr_t RawObject::NumberOfTypedDataClasses() {
// Make sure this is updated when new TypedData types are added.
COMPILE_ASSERT(kTypedDataInt8ArrayViewCid == kTypedDataInt8ArrayCid + 14);
COMPILE_ASSERT(kExternalTypedDataInt8ArrayCid ==
kTypedDataInt8ArrayViewCid + 15);
COMPILE_ASSERT(kByteBufferCid == kExternalTypedDataInt8ArrayCid + 14);
COMPILE_ASSERT(kNullCid == kByteBufferCid + 1);
// Ensure that each typed data type comes in internal/view/external variants
// next to each other.
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 1 == kTypedDataInt8ArrayViewCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 2 == kExternalTypedDataInt8ArrayCid);
// Ensure the order of the typed data members in 3-step.
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 1 * 3 == kTypedDataUint8ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 2 * 3 ==
kTypedDataUint8ClampedArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 3 * 3 == kTypedDataInt16ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 4 * 3 == kTypedDataUint16ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 5 * 3 == kTypedDataInt32ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 6 * 3 == kTypedDataUint32ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 7 * 3 == kTypedDataInt64ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 8 * 3 == kTypedDataUint64ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 9 * 3 == kTypedDataFloat32ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 10 * 3 == kTypedDataFloat64ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 11 * 3 ==
kTypedDataFloat32x4ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 12 * 3 == kTypedDataInt32x4ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 13 * 3 ==
kTypedDataFloat64x2ArrayCid);
COMPILE_ASSERT(kTypedDataInt8ArrayCid + 14 * 3 == kByteDataViewCid);
COMPILE_ASSERT(kByteBufferCid + 1 == kNullCid);
return (kNullCid - kTypedDataInt8ArrayCid);
}

View file

@ -1721,6 +1721,22 @@ void RawFloat64x2::WriteTo(SnapshotWriter* writer,
writer->Write<double>(ptr()->value_[1]);
}
RawTypedDataBase* TypedDataBase::ReadFrom(SnapshotReader* reader,
intptr_t object_id,
intptr_t tags,
Snapshot::Kind kind,
bool as_reference) {
UNREACHABLE(); // TypedDataBase is an abstract class.
return NULL;
}
void RawTypedDataBase::WriteTo(SnapshotWriter* writer,
intptr_t object_id,
Snapshot::Kind kind,
bool as_reference) {
UNREACHABLE(); // TypedDataBase is an abstract class.
}
RawTypedData* TypedData::ReadFrom(SnapshotReader* reader,
intptr_t object_id,
intptr_t tags,
@ -1968,6 +1984,9 @@ void RawTypedDataView::WriteTo(SnapshotWriter* writer,
intptr_t object_id,
Snapshot::Kind kind,
bool as_reference) {
// Views have always a backing store.
ASSERT(ptr()->typed_data_ != Object::null());
// Write out the serialization header value for this object.
writer->WriteInlinedObjectHeader(object_id);
@ -1986,7 +2005,7 @@ RawTypedDataView* TypedDataView::ReadFrom(SnapshotReader* reader,
intptr_t tags,
Snapshot::Kind kind,
bool as_reference) {
auto& typed_data = *reader->InstanceHandle();
auto& typed_data = *reader->TypedDataBaseHandle();
const classid_t cid = RawObject::ClassIdTag::decode(tags);
auto& view = *reader->TypedDataViewHandle();
@ -1996,9 +2015,7 @@ RawTypedDataView* TypedDataView::ReadFrom(SnapshotReader* reader,
const intptr_t offset_in_bytes = reader->ReadSmiValue();
const intptr_t length = reader->ReadSmiValue();
typed_data ^= reader->ReadObjectImpl(as_reference);
view.set_offset_in_bytes(offset_in_bytes);
view.set_length(length);
view.set_typed_data(typed_data);
view.InitializeWith(typed_data, offset_in_bytes, length);
return view.raw();
}

View file

@ -2700,6 +2700,17 @@ SwitchDispatch:
DISPATCH();
}
{
BYTECODE(StoreUntagged, A_B_C);
const uint16_t offset_in_words = rB;
const uint16_t value_reg = rC;
RawInstance* instance = reinterpret_cast<RawInstance*>(FP[rA]);
word value = reinterpret_cast<word>(FP[value_reg]);
reinterpret_cast<word*>(instance->ptr())[offset_in_words] = value;
DISPATCH();
}
{
BYTECODE(StoreFieldTOS, __D);
const uint16_t offset_in_words = rD;

View file

@ -220,6 +220,7 @@ SnapshotReader::SnapshotReader(const uint8_t* buffer,
type_arguments_(TypeArguments::Handle(zone_)),
tokens_(GrowableObjectArray::Handle(zone_)),
data_(ExternalTypedData::Handle(zone_)),
typed_data_base_(TypedDataBase::Handle(zone_)),
typed_data_(TypedData::Handle(zone_)),
typed_data_view_(TypedDataView::Handle(zone_)),
function_(Function::Handle(zone_)),

View file

@ -26,6 +26,7 @@ class ClassTable;
class Closure;
class Code;
class ExternalTypedData;
class TypedDataBase;
class GrowableObjectArray;
class Heap;
class Instance;
@ -310,6 +311,7 @@ class SnapshotReader : public BaseReader {
TypeArguments* TypeArgumentsHandle() { return &type_arguments_; }
GrowableObjectArray* TokensHandle() { return &tokens_; }
ExternalTypedData* DataHandle() { return &data_; }
TypedDataBase* TypedDataBaseHandle() { return &typed_data_base_; }
TypedData* TypedDataHandle() { return &typed_data_; }
TypedDataView* TypedDataViewHandle() { return &typed_data_view_; }
Function* FunctionHandle() { return &function_; }
@ -403,6 +405,7 @@ class SnapshotReader : public BaseReader {
TypeArguments& type_arguments_; // Temporary type argument handle.
GrowableObjectArray& tokens_; // Temporary tokens handle.
ExternalTypedData& data_; // Temporary stream data handle.
TypedDataBase& typed_data_base_; // Temporary typed data base handle.
TypedData& typed_data_; // Temporary typed data handle.
TypedDataView& typed_data_view_; // Temporary typed data view handle.
Function& function_; // Temporary function handle.

View file

@ -15,6 +15,7 @@ namespace dart {
class Isolate;
class RawObject;
class RawFunction;
class RawTypedDataView;
// An object pointer visitor interface.
class ObjectPointerVisitor {
@ -24,6 +25,15 @@ class ObjectPointerVisitor {
Isolate* isolate() const { return isolate_; }
// Visit pointers inside the given typed data [view].
//
// Range of pointers to visit 'first' <= pointer <= 'last'.
virtual void VisitTypedDataViewPointers(RawTypedDataView* view,
RawObject** first,
RawObject** last) {
VisitPointers(first, last);
}
// Range of pointers to visit 'first' <= pointer <= 'last'.
virtual void VisitPointers(RawObject** first, RawObject** last) = 0;

View file

@ -0,0 +1,33 @@
// Copyright (c) 2019, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
// See fragmentation_test.dart for more information.
//
// VMOptions=--no_concurrent_mark --no_concurrent_sweep
// VMOptions=--no_concurrent_mark --concurrent_sweep
// VMOptions=--no_concurrent_mark --use_compactor
// VMOptions=--no_concurrent_mark --use_compactor --force_evacuation
// VMOptions=--concurrent_mark --no_concurrent_sweep
// VMOptions=--concurrent_mark --concurrent_sweep
// VMOptions=--concurrent_mark --use_compactor
// VMOptions=--concurrent_mark --use_compactor --force_evacuation
import 'dart:typed_data';
main() {
final List<List> arrays = [];
// Fill up heap with alternate large-small items.
for (int i = 0; i < 500000; i++) {
arrays.add(new Uint32List(260));
arrays.add(new Uint32List(1));
}
// Clear the large items so the heap has large gaps.
for (int i = 0; i < arrays.length; i += 2) {
arrays[i] = null;
}
// Allocate a lot of large items which don't fit in the gaps created above.
for (int i = 0; i < 600000; i++) {
arrays.add(new Uint32List(300));
}
}