[vm] Packed representation of record shape

The representation of record shape in record instances and record
types is changed from a pair

  int num_fields
  Array field_names

to a single integer - packed bitfield

  int num_fields(16)
  int field_names_index(kSmiBits-16)

where field names index is an index in the array available from
ObjectStore.

With the new representation of record shapes:
1) Size of record instances is reduced.
2) Number of comparisons for a shape test reduced from 2 to 1
(shape test is used during type checks).
3) A few operations removed from Record.hashCode.
4) Type testing stubs (TTS) are now supported for record types with
named fields. Previously it was not possible to check shape of records
with named fields in TTS as TTS cannot access object pool and cannot
load field names array).

TEST=existing

Issue: https://github.com/dart-lang/sdk/issues/49719

Change-Id: I7cdcbb53938aba5d561cd24dc99530395dbbea7e
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/276201
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Alexander Markov <alexmarkov@google.com>
This commit is contained in:
Alexander Markov 2022-12-20 20:11:48 +00:00 committed by Commit Queue
parent e38a7046fb
commit 3675fd25f4
52 changed files with 1458 additions and 1105 deletions

View file

@ -148,13 +148,12 @@ static bool HaveSameRuntimeTypeHelper(Zone* zone,
if (left_cid == kRecordCid) {
const auto& left_record = Record::Cast(left);
const auto& right_record = Record::Cast(right);
const intptr_t num_fields = left_record.num_fields();
if ((num_fields != right_record.num_fields()) ||
(left_record.field_names() != right_record.field_names())) {
if (left_record.shape() != right_record.shape()) {
return false;
}
Instance& left_field = Instance::Handle(zone);
Instance& right_field = Instance::Handle(zone);
const intptr_t num_fields = left_record.num_fields();
for (intptr_t i = 0; i < num_fields; ++i) {
left_field ^= left_record.FieldAt(i);
right_field ^= right_record.FieldAt(i);

View file

@ -138,7 +138,7 @@ void matchIL$test(FlowGraph graph) {
'r4' << match.DispatchTableCall('obj2_cid'),
'r4_0' << match.ExtractNthOutput('r4', index: 0),
'r4_y' << match.ExtractNthOutput('r4', index: 1),
'r4_boxed' << match.AllocateSmallRecord(match.any, 'r4_0', 'r4_y'),
'r4_boxed' << match.AllocateSmallRecord('r4_0', 'r4_y'),
match.PushArgument('r4_boxed'),
match.StaticCall(),

View file

@ -4934,8 +4934,7 @@ class RecordSerializationCluster : public SerializationCluster {
RecordPtr record = Record::RawCast(object);
objects_.Add(record);
s->Push(record->untag()->field_names());
const intptr_t num_fields = Smi::Value(record->untag()->num_fields());
const intptr_t num_fields = Record::NumFields(record);
for (intptr_t i = 0; i < num_fields; ++i) {
s->Push(record->untag()->field(i));
}
@ -4948,7 +4947,7 @@ class RecordSerializationCluster : public SerializationCluster {
RecordPtr record = objects_[i];
s->AssignRef(record);
AutoTraceObject(record);
const intptr_t num_fields = Smi::Value(record->untag()->num_fields());
const intptr_t num_fields = Record::NumFields(record);
s->WriteUnsigned(num_fields);
target_memory_size_ += compiler::target::Record::InstanceSize(num_fields);
}
@ -4959,9 +4958,9 @@ class RecordSerializationCluster : public SerializationCluster {
for (intptr_t i = 0; i < count; ++i) {
RecordPtr record = objects_[i];
AutoTraceObject(record);
const intptr_t num_fields = Smi::Value(record->untag()->num_fields());
s->WriteUnsigned(num_fields);
WriteField(record, field_names());
const RecordShape shape(record->untag()->shape());
s->WriteUnsigned(shape.AsInt());
const intptr_t num_fields = shape.num_fields();
for (intptr_t j = 0; j < num_fields; ++j) {
s->WriteElementRef(record->untag()->field(j), j);
}
@ -4998,12 +4997,12 @@ class RecordDeserializationCluster
const bool stamp_canonical = primary && is_canonical();
for (intptr_t id = start_index_, n = stop_index_; id < n; id++) {
RecordPtr record = static_cast<RecordPtr>(d.Ref(id));
const intptr_t num_fields = d.ReadUnsigned();
const intptr_t shape = d.ReadUnsigned();
const intptr_t num_fields = RecordShape(shape).num_fields();
Deserializer::InitializeHeader(record, kRecordCid,
Record::InstanceSize(num_fields),
stamp_canonical);
record->untag()->num_fields_ = Smi::New(num_fields);
record->untag()->field_names_ = static_cast<ArrayPtr>(d.ReadRef());
record->untag()->shape_ = Smi::New(shape);
for (intptr_t j = 0; j < num_fields; ++j) {
record->untag()->data()[j] = d.ReadRef();
}

View file

@ -940,11 +940,6 @@ AbstractTypePtr ClassFinalizer::FinalizeRecordType(
record.SetFieldTypeAt(i, finalized_type);
}
}
// Canonicalize field names so they can be compared with pointer comparison.
// The field names are already sorted in the front-end.
Array& field_names = Array::Handle(zone, record.field_names());
field_names ^= field_names.Canonicalize(Thread::Current());
record.set_field_names(field_names);
if (FLAG_trace_type_finalization) {
THR_Print("Marking record type '%s' as finalized\n",

View file

@ -854,12 +854,17 @@ class Assembler : public AssemblerBase {
void AddRegisters(Register dest, Register src) {
add(dest, dest, Operand(src));
}
// [dest] = [src] << [scale] + [value].
void AddScaled(Register dest,
Register src,
ScaleFactor scale,
int32_t value) {
LoadImmediate(dest, value);
add(dest, dest, Operand(src, LSL, scale));
if (scale == 0) {
AddImmediate(dest, src, value);
} else {
Lsl(dest, src, Operand(scale));
AddImmediate(dest, dest, value);
}
}
void SubImmediate(Register rd,
Register rn,

View file

@ -1824,12 +1824,17 @@ class Assembler : public AssemblerBase {
void AddRegisters(Register dest, Register src) {
add(dest, dest, Operand(src));
}
// [dest] = [src] << [scale] + [value].
void AddScaled(Register dest,
Register src,
ScaleFactor scale,
int32_t value) {
LoadImmediate(dest, value);
add(dest, dest, Operand(src, LSL, scale));
if (scale == 0) {
AddImmediate(dest, src, value);
} else {
orr(dest, ZR, Operand(src, LSL, scale));
AddImmediate(dest, dest, value);
}
}
void SubImmediateSetFlags(Register dest,
Register rn,

View file

@ -254,6 +254,7 @@ class Assembler : public AssemblerBase {
void pushl(Register reg);
void pushl(const Address& address);
void pushl(const Immediate& imm);
void PushImmediate(int32_t value) { pushl(Immediate(value)); }
void popl(Register reg);
void popl(const Address& address);
@ -751,6 +752,7 @@ class Assembler : public AssemblerBase {
void AddRegisters(Register dest, Register src) {
addl(dest, src);
}
// [dest] = [src] << [scale] + [value].
void AddScaled(Register dest,
Register src,
ScaleFactor scale,
@ -774,6 +776,10 @@ class Assembler : public AssemblerBase {
void AndImmediate(Register dst, int32_t value) {
andl(dst, Immediate(value));
}
void AndImmediate(Register dst, Register src, int32_t value) {
MoveRegister(dst, src);
andl(dst, Immediate(value));
}
void AndRegisters(Register dst,
Register src1,
Register src2 = kNoRegister) override;

View file

@ -1009,6 +1009,7 @@ class Assembler : public MicroAssembler {
MulImmediate(dest, dest, imm, width);
}
void AddRegisters(Register dest, Register src) { add(dest, dest, src); }
// [dest] = [src] << [scale] + [value].
void AddScaled(Register dest,
Register src,
ScaleFactor scale,

View file

@ -586,6 +586,10 @@ class Assembler : public AssemblerBase {
void AndImmediate(Register dst, int64_t value) {
AndImmediate(dst, Immediate(value));
}
void AndImmediate(Register dst, Register src, int64_t value) {
MoveRegister(dst, src);
AndImmediate(dst, value);
}
void AndRegisters(Register dst,
Register src1,
Register src2 = kNoRegister) override;
@ -783,6 +787,7 @@ class Assembler : public AssemblerBase {
void AddRegisters(Register dest, Register src) {
addq(dest, src);
}
// [dest] = [src] << [scale] + [value].
void AddScaled(Register dest,
Register src,
ScaleFactor scale,

View file

@ -2000,18 +2000,14 @@ void FlowGraph::InsertRecordBoxing(Definition* def) {
ASSERT(target != nullptr && !target->IsNull());
const auto& type = AbstractType::Handle(Z, target->result_type());
ASSERT(type.IsRecordType());
const auto& field_names =
Array::Handle(Z, RecordType::Cast(type).field_names());
Value* field_names_value = (field_names.Length() != 0)
? new (Z) Value(GetConstant(field_names))
: nullptr;
const RecordShape shape = RecordType::Cast(type).shape();
auto* x = new (Z)
ExtractNthOutputInstr(new (Z) Value(def), 0, kTagged, kDynamicCid);
auto* y = new (Z)
ExtractNthOutputInstr(new (Z) Value(def), 1, kTagged, kDynamicCid);
auto* alloc = new (Z) AllocateSmallRecordInstr(
InstructionSource(), 2, field_names_value, new (Z) Value(x),
new (Z) Value(y), nullptr, def->deopt_id());
auto* alloc = new (Z)
AllocateSmallRecordInstr(InstructionSource(), shape, new (Z) Value(x),
new (Z) Value(y), nullptr, def->deopt_id());
def->ReplaceUsesWith(alloc);
// Uses of 'def' in 'x' and 'y' should not be replaced as 'x' and 'y'
// are not added to the flow graph yet.

View file

@ -395,16 +395,6 @@ bool HierarchyInfo::CanUseRecordSubtypeRangeCheckFor(const AbstractType& type) {
return false;
}
const RecordType& rec = RecordType::Cast(type);
// Type testing stubs have no access to their object pools
// so they will not be able to load field names from object pool
// in order to check the shape of a record instance.
// See TypeTestingStubGenerator::BuildOptimizedRecordSubtypeRangeCheck.
if (rec.NumNamedFields() != 0) {
return false;
} else {
ASSERT(rec.field_names() == Object::empty_array().ptr());
ASSERT(compiler::target::CanLoadFromThread(Object::empty_array()));
}
Zone* zone = thread()->zone();
auto& field_type = AbstractType::Handle(zone);
for (intptr_t i = 0, n = rec.NumFields(); i < n; ++i) {
@ -2708,16 +2698,9 @@ bool LoadFieldInstr::TryEvaluateLoad(const Object& instance,
}
return false;
case Slot::Kind::kRecord_num_fields:
case Slot::Kind::kRecord_shape:
if (instance.IsRecord()) {
*result = Smi::New(Record::Cast(instance).num_fields());
return true;
}
return false;
case Slot::Kind::kRecord_field_names:
if (instance.IsRecord()) {
*result = Record::Cast(instance).field_names();
*result = Record::Cast(instance).shape().AsSmi();
return true;
}
return false;
@ -2847,37 +2830,17 @@ Definition* LoadFieldInstr::Canonicalize(FlowGraph* flow_graph) {
}
}
break;
case Slot::Kind::kRecord_num_fields:
case Slot::Kind::kRecord_shape:
ASSERT(!calls_initializer());
if (auto* alloc_rec = orig_instance->AsAllocateRecord()) {
return flow_graph->GetConstant(
Smi::Handle(Smi::New(alloc_rec->num_fields())));
return flow_graph->GetConstant(Smi::Handle(alloc_rec->shape().AsSmi()));
} else if (auto* alloc_rec = orig_instance->AsAllocateSmallRecord()) {
return flow_graph->GetConstant(
Smi::Handle(Smi::New(alloc_rec->num_fields())));
return flow_graph->GetConstant(Smi::Handle(alloc_rec->shape().AsSmi()));
} else {
const AbstractType* type = instance()->Type()->ToAbstractType();
if (type->IsRecordType()) {
return flow_graph->GetConstant(
Smi::Handle(Smi::New(RecordType::Cast(*type).NumFields())));
}
}
break;
case Slot::Kind::kRecord_field_names:
ASSERT(!calls_initializer());
if (auto* alloc_rec = orig_instance->AsAllocateRecord()) {
return alloc_rec->field_names()->definition();
} else if (auto* alloc_rec = orig_instance->AsAllocateSmallRecord()) {
if (alloc_rec->has_named_fields()) {
return alloc_rec->field_names()->definition();
} else {
return flow_graph->GetConstant(Object::empty_array());
}
} else {
const AbstractType* type = instance()->Type()->ToAbstractType();
if (type->IsRecordType()) {
return flow_graph->GetConstant(
Array::Handle(RecordType::Cast(*type).field_names()));
Smi::Handle(RecordType::Cast(*type).shape().AsSmi()));
}
}
break;
@ -7765,12 +7728,10 @@ void SuspendInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
LocationSummary* AllocateRecordInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumInputs = 0;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
locs->set_in(0,
Location::RegisterLocation(AllocateRecordABI::kFieldNamesReg));
locs->set_out(0, Location::RegisterLocation(AllocateRecordABI::kResultReg));
return locs;
}
@ -7779,8 +7740,8 @@ void AllocateRecordInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Code& stub = Code::ZoneHandle(
compiler->zone(),
compiler->isolate_group()->object_store()->allocate_record_stub());
__ LoadImmediate(AllocateRecordABI::kNumFieldsReg,
Smi::RawValue(num_fields()));
__ LoadImmediate(AllocateRecordABI::kShapeReg,
Smi::RawValue(shape().AsInt()));
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs(), deopt_id(), env());
}
@ -7792,35 +7753,25 @@ LocationSummary* AllocateSmallRecordInstr::MakeLocationSummary(Zone* zone,
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
if (has_named_fields()) {
locs->set_in(0,
Location::RegisterLocation(AllocateSmallRecordABI::kValue0Reg));
locs->set_in(1,
Location::RegisterLocation(AllocateSmallRecordABI::kValue1Reg));
if (num_fields() > 2) {
locs->set_in(
0, Location::RegisterLocation(AllocateSmallRecordABI::kFieldNamesReg));
locs->set_in(
1, Location::RegisterLocation(AllocateSmallRecordABI::kValue0Reg));
locs->set_in(
2, Location::RegisterLocation(AllocateSmallRecordABI::kValue1Reg));
if (num_fields() > 2) {
locs->set_in(
3, Location::RegisterLocation(AllocateSmallRecordABI::kValue2Reg));
}
} else {
locs->set_in(
0, Location::RegisterLocation(AllocateSmallRecordABI::kValue0Reg));
locs->set_in(
1, Location::RegisterLocation(AllocateSmallRecordABI::kValue1Reg));
if (num_fields() > 2) {
locs->set_in(
2, Location::RegisterLocation(AllocateSmallRecordABI::kValue2Reg));
}
2, Location::RegisterLocation(AllocateSmallRecordABI::kValue2Reg));
}
locs->set_out(0, Location::RegisterLocation(AllocateRecordABI::kResultReg));
locs->set_out(0,
Location::RegisterLocation(AllocateSmallRecordABI::kResultReg));
return locs;
}
void AllocateSmallRecordInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
auto object_store = compiler->isolate_group()->object_store();
Code& stub = Code::ZoneHandle(compiler->zone());
if (has_named_fields()) {
if (shape().HasNamedFields()) {
__ LoadImmediate(AllocateSmallRecordABI::kShapeReg,
Smi::RawValue(shape().AsInt()));
switch (num_fields()) {
case 2:
stub = object_store->allocate_record2_named_stub();

View file

@ -6982,40 +6982,27 @@ class AllocateUninitializedContextInstr : public TemplateAllocation<0> {
};
// Allocates and null initializes a record object.
class AllocateRecordInstr : public TemplateAllocation<1> {
class AllocateRecordInstr : public TemplateAllocation<0> {
public:
enum { kFieldNamesPos = 0 };
AllocateRecordInstr(const InstructionSource& source,
intptr_t num_fields,
Value* field_names,
RecordShape shape,
intptr_t deopt_id)
: TemplateAllocation(source, deopt_id), num_fields_(num_fields) {
SetInputAt(kFieldNamesPos, field_names);
}
: TemplateAllocation(source, deopt_id), shape_(shape) {}
DECLARE_INSTRUCTION(AllocateRecord)
virtual CompileType ComputeType() const;
intptr_t num_fields() const { return num_fields_; }
Value* field_names() const { return InputAt(kFieldNamesPos); }
virtual const Slot* SlotForInput(intptr_t pos) {
switch (pos) {
case kFieldNamesPos:
return &Slot::Record_field_names();
default:
return TemplateAllocation::SlotForInput(pos);
}
}
RecordShape shape() const { return shape_; }
intptr_t num_fields() const { return shape_.num_fields(); }
virtual bool HasUnknownSideEffects() const { return false; }
virtual bool WillAllocateNewOrRemembered() const {
return Heap::IsAllocatableInNewSpace(
compiler::target::Record::InstanceSize(num_fields_));
compiler::target::Record::InstanceSize(num_fields()));
}
#define FIELD_LIST(F) F(const intptr_t, num_fields_)
#define FIELD_LIST(F) F(const RecordShape, shape_)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateRecordInstr,
TemplateAllocation,
@ -7028,75 +7015,46 @@ class AllocateRecordInstr : public TemplateAllocation<1> {
// Allocates and initializes fields of a small record object
// (with 2 or 3 fields).
class AllocateSmallRecordInstr : public TemplateAllocation<4> {
class AllocateSmallRecordInstr : public TemplateAllocation<3> {
public:
AllocateSmallRecordInstr(const InstructionSource& source,
intptr_t num_fields, // 2 or 3.
Value* field_names, // Optional.
RecordShape shape, // 2 or 3 fields.
Value* value0,
Value* value1,
Value* value2, // Optional.
intptr_t deopt_id)
: TemplateAllocation(source, deopt_id),
num_fields_(num_fields),
has_named_fields_(field_names != nullptr) {
: TemplateAllocation(source, deopt_id), shape_(shape) {
const intptr_t num_fields = shape.num_fields();
ASSERT(num_fields == 2 || num_fields == 3);
ASSERT((num_fields > 2) == (value2 != nullptr));
if (has_named_fields_) {
SetInputAt(0, field_names);
SetInputAt(1, value0);
SetInputAt(2, value1);
if (num_fields > 2) {
SetInputAt(3, value2);
}
} else {
SetInputAt(0, value0);
SetInputAt(1, value1);
if (num_fields > 2) {
SetInputAt(2, value2);
}
SetInputAt(0, value0);
SetInputAt(1, value1);
if (num_fields > 2) {
SetInputAt(2, value2);
}
}
DECLARE_INSTRUCTION(AllocateSmallRecord)
virtual CompileType ComputeType() const;
intptr_t num_fields() const { return num_fields_; }
bool has_named_fields() const { return has_named_fields_; }
RecordShape shape() const { return shape_; }
intptr_t num_fields() const { return shape().num_fields(); }
Value* field_names() const {
ASSERT(has_named_fields_);
return InputAt(0);
}
virtual intptr_t InputCount() const {
return (has_named_fields_ ? 1 : 0) + num_fields_;
}
virtual intptr_t InputCount() const { return num_fields(); }
virtual const Slot* SlotForInput(intptr_t pos) {
if (has_named_fields_) {
if (pos == 0) {
return &Slot::Record_field_names();
} else {
return &Slot::GetRecordFieldSlot(
Thread::Current(), compiler::target::Record::field_offset(pos - 1));
}
} else {
return &Slot::GetRecordFieldSlot(
Thread::Current(), compiler::target::Record::field_offset(pos));
}
return &Slot::GetRecordFieldSlot(
Thread::Current(), compiler::target::Record::field_offset(pos));
}
virtual bool HasUnknownSideEffects() const { return false; }
virtual bool WillAllocateNewOrRemembered() const {
return Heap::IsAllocatableInNewSpace(
compiler::target::Record::InstanceSize(num_fields_));
compiler::target::Record::InstanceSize(num_fields()));
}
#define FIELD_LIST(F) \
F(const intptr_t, num_fields_) \
F(const bool, has_named_fields_)
#define FIELD_LIST(F) F(const RecordShape, shape_)
DECLARE_INSTRUCTION_SERIALIZABLE_FIELDS(AllocateSmallRecordInstr,
TemplateAllocation,
@ -7114,12 +7072,12 @@ class MaterializeObjectInstr : public VariadicDefinition {
public:
MaterializeObjectInstr(AllocationInstr* allocation,
const Class& cls,
intptr_t num_elements,
intptr_t length_or_shape,
const ZoneGrowableArray<const Slot*>& slots,
InputsArray&& values)
: VariadicDefinition(std::move(values)),
cls_(cls),
num_elements_(num_elements),
length_or_shape_(length_or_shape),
slots_(slots),
registers_remapped_(false),
allocation_(allocation) {
@ -7129,7 +7087,7 @@ class MaterializeObjectInstr : public VariadicDefinition {
AllocationInstr* allocation() const { return allocation_; }
const Class& cls() const { return cls_; }
intptr_t num_elements() const { return num_elements_; }
intptr_t length_or_shape() const { return length_or_shape_; }
intptr_t FieldOffsetAt(intptr_t i) const {
return slots_[i]->offset_in_bytes();
@ -7169,7 +7127,7 @@ class MaterializeObjectInstr : public VariadicDefinition {
#define FIELD_LIST(F) \
F(const Class&, cls_) \
F(intptr_t, num_elements_) \
F(intptr_t, length_or_shape_) \
F(const ZoneGrowableArray<const Slot*>&, slots_) \
F(bool, registers_remapped_)

View file

@ -28,6 +28,7 @@ namespace dart {
FlowGraphSerializer::FlowGraphSerializer(NonStreamingWriteStream* stream)
: stream_(stream),
zone_(Thread::Current()->zone()),
thread_(Thread::Current()),
isolate_group_(IsolateGroup::Current()),
heap_(IsolateGroup::Current()->heap()) {}
@ -1600,11 +1601,9 @@ void FlowGraphSerializer::WriteObjectImpl(const Object& x,
case kRecordCid: {
ASSERT(x.IsCanonical());
const auto& record = Record::Cast(x);
const intptr_t num_fields = record.num_fields();
Write<intptr_t>(num_fields);
Write<const Array&>(Array::Handle(Z, record.field_names()));
Write<RecordShape>(record.shape());
auto& field = Object::Handle(Z);
for (intptr_t i = 0; i < num_fields; ++i) {
for (intptr_t i = 0, n = record.num_fields(); i < n; ++i) {
field = record.FieldAt(i);
Write<const Object&>(field);
}
@ -1615,7 +1614,7 @@ void FlowGraphSerializer::WriteObjectImpl(const Object& x,
ASSERT(rec.IsFinalized());
TypeScope type_scope(this, rec.IsRecursive());
Write<int8_t>(static_cast<int8_t>(rec.nullability()));
Write<const Array&>(Array::Handle(Z, rec.field_names()));
Write<RecordShape>(rec.shape());
Write<const Array&>(Array::Handle(Z, rec.field_types()));
Write<bool>(type_scope.CanBeCanonicalized());
break;
@ -1881,11 +1880,9 @@ const Object& FlowGraphDeserializer::ReadObjectImpl(intptr_t cid,
Symbols::FromLatin1(thread(), latin1, length));
}
case kRecordCid: {
const intptr_t num_fields = Read<intptr_t>();
const auto& field_names = Read<const Array&>();
auto& record =
Record::ZoneHandle(Z, Record::New(num_fields, field_names));
for (intptr_t i = 0; i < num_fields; ++i) {
const RecordShape shape = Read<RecordShape>();
auto& record = Record::ZoneHandle(Z, Record::New(shape));
for (intptr_t i = 0, n = shape.num_fields(); i < n; ++i) {
record.SetFieldAt(i, Read<const Object&>());
}
record ^= record.Canonicalize(thread());
@ -1893,10 +1890,10 @@ const Object& FlowGraphDeserializer::ReadObjectImpl(intptr_t cid,
}
case kRecordTypeCid: {
const Nullability nullability = static_cast<Nullability>(Read<int8_t>());
const Array& field_names = Read<const Array&>();
const RecordShape shape = Read<RecordShape>();
const Array& field_types = Read<const Array&>();
RecordType& rec = RecordType::ZoneHandle(
Z, RecordType::New(field_types, field_names, nullability));
Z, RecordType::New(shape, field_types, nullability));
rec.SetIsFinalized();
rec ^= MaybeCanonicalize(rec, object_index, Read<bool>());
return rec;
@ -2133,6 +2130,22 @@ RangeBoundary::RangeBoundary(FlowGraphDeserializer* d)
value_(d->Read<int64_t>()),
offset_(d->Read<int64_t>()) {}
template <>
void FlowGraphSerializer::WriteTrait<RecordShape>::Write(FlowGraphSerializer* s,
RecordShape x) {
s->Write<intptr_t>(x.num_fields());
s->Write<const Array&>(
Array::Handle(s->zone(), x.GetFieldNames(s->thread())));
}
template <>
RecordShape FlowGraphDeserializer::ReadTrait<RecordShape>::Read(
FlowGraphDeserializer* d) {
const intptr_t num_fields = d->Read<intptr_t>();
const auto& field_names = d->Read<const Array&>();
return RecordShape::Register(d->thread(), num_fields, field_names);
}
void RegisterSet::Write(FlowGraphSerializer* s) const {
s->Write<uintptr_t>(cpu_registers_.data());
s->Write<uintptr_t>(untagged_cpu_registers_.data());

View file

@ -45,6 +45,7 @@ class ParallelMoveInstr;
class PhiInstr;
class Range;
class ReadStream;
class RecordShape;
class TargetEntryInstr;
class TokenPosition;
@ -104,6 +105,7 @@ class NativeCallingConvention;
V(ParallelMoveInstr*) \
V(PhiInstr*) \
V(Range*) \
V(RecordShape) \
V(Representation) \
V(const Slot&) \
V(const Slot*) \
@ -295,6 +297,7 @@ class FlowGraphSerializer : public ValueObject {
BaseWriteStream* stream() const { return stream_; }
Zone* zone() const { return zone_; }
Thread* thread() const { return thread_; }
IsolateGroup* isolate_group() const { return isolate_group_; }
Heap* heap() const { return heap_; }
bool can_write_refs() const { return can_write_refs_; }
@ -329,6 +332,7 @@ class FlowGraphSerializer : public ValueObject {
NonStreamingWriteStream* stream_;
Zone* zone_;
Thread* thread_;
IsolateGroup* isolate_group_;
Heap* heap_;
intptr_t object_counter_ = 0;

View file

@ -2776,10 +2776,8 @@ void LoadFieldInstr::InferRange(RangeAnalysis* analysis, Range* range) {
compiler::target::TypeArguments::kMaxElements));
break;
case Slot::Kind::kRecord_num_fields:
*range = Range(
RangeBoundary::FromConstant(0),
RangeBoundary::FromConstant(compiler::target::Record::kMaxElements));
case Slot::Kind::kRecord_shape:
*range = Range(RangeBoundary::FromConstant(0), RangeBoundary::MaxSmi());
break;
case Slot::Kind::kString_length:
@ -2825,7 +2823,6 @@ void LoadFieldInstr::InferRange(RangeAnalysis* analysis, Range* range) {
case Slot::Kind::kFunctionType_parameter_types:
case Slot::Kind::kFunctionType_type_parameters:
case Slot::Kind::kInstance_native_fields_array:
case Slot::Kind::kRecord_field_names:
case Slot::Kind::kSuspendState_function_data:
case Slot::Kind::kSuspendState_then_callback:
case Slot::Kind::kSuspendState_error_callback:

View file

@ -2251,11 +2251,7 @@ class LoadOptimizer : public ValueObject {
forward_def = alloc->InputAt(pos)->definition();
} else {
// Fields not provided as an input to the instruction are
// initialized to null during allocation (except
// Record::num_fields).
// Accesses to Record::num_fields should be folded in
// LoadFieldInstr::Canonicalize.
ASSERT(slot->kind() != Slot::Kind::kRecord_num_fields);
// initialized to null during allocation.
forward_def = graph_->constant_null();
}
}
@ -3839,7 +3835,7 @@ void AllocationSinking::CreateMaterializationAt(
}
const Class* cls = nullptr;
intptr_t num_elements = -1;
intptr_t length_or_shape = -1;
if (auto instr = alloc->AsAllocateObject()) {
cls = &(instr->cls());
} else if (alloc->IsAllocateClosure()) {
@ -3847,31 +3843,31 @@ void AllocationSinking::CreateMaterializationAt(
flow_graph_->isolate_group()->object_store()->closure_class());
} else if (auto instr = alloc->AsAllocateContext()) {
cls = &Class::ZoneHandle(Object::context_class());
num_elements = instr->num_context_variables();
length_or_shape = instr->num_context_variables();
} else if (auto instr = alloc->AsAllocateUninitializedContext()) {
cls = &Class::ZoneHandle(Object::context_class());
num_elements = instr->num_context_variables();
length_or_shape = instr->num_context_variables();
} else if (auto instr = alloc->AsCreateArray()) {
cls = &Class::ZoneHandle(
flow_graph_->isolate_group()->object_store()->array_class());
num_elements = instr->GetConstantNumElements();
length_or_shape = instr->GetConstantNumElements();
} else if (auto instr = alloc->AsAllocateTypedData()) {
cls = &Class::ZoneHandle(
flow_graph_->isolate_group()->class_table()->At(instr->class_id()));
num_elements = instr->GetConstantNumElements();
length_or_shape = instr->GetConstantNumElements();
} else if (auto instr = alloc->AsAllocateRecord()) {
cls = &Class::ZoneHandle(
flow_graph_->isolate_group()->class_table()->At(kRecordCid));
num_elements = instr->num_fields();
length_or_shape = instr->shape().AsInt();
} else if (auto instr = alloc->AsAllocateSmallRecord()) {
cls = &Class::ZoneHandle(
flow_graph_->isolate_group()->class_table()->At(kRecordCid));
num_elements = instr->num_fields();
length_or_shape = instr->shape().AsInt();
} else {
UNREACHABLE();
}
MaterializeObjectInstr* mat = new (Z) MaterializeObjectInstr(
alloc->AsAllocation(), *cls, num_elements, slots, std::move(values));
alloc->AsAllocation(), *cls, length_or_shape, slots, std::move(values));
flow_graph_->InsertBefore(exit, mat, nullptr, FlowGraph::kValue);

View file

@ -134,7 +134,6 @@ bool Slot::IsImmutableLengthSlot() const {
switch (kind()) {
case Slot::Kind::kArray_length:
case Slot::Kind::kTypedDataBase_length:
case Slot::Kind::kRecord_num_fields:
case Slot::Kind::kString_length:
case Slot::Kind::kTypeArguments_length:
return true;
@ -191,7 +190,7 @@ bool Slot::IsImmutableLengthSlot() const {
case Slot::Kind::kFunctionType_parameter_types:
case Slot::Kind::kFunctionType_type_parameters:
case Slot::Kind::kRecordField:
case Slot::Kind::kRecord_field_names:
case Slot::Kind::kRecord_shape:
case Slot::Kind::kSuspendState_function_data:
case Slot::Kind::kSuspendState_then_callback:
case Slot::Kind::kSuspendState_error_callback:

View file

@ -125,8 +125,7 @@ class ParsedFunction;
V(ArgumentsDescriptor, UntaggedArray, positional_count, Smi, FINAL) \
V(ArgumentsDescriptor, UntaggedArray, count, Smi, FINAL) \
V(ArgumentsDescriptor, UntaggedArray, size, Smi, FINAL) \
V(Record, UntaggedRecord, field_names, ImmutableArray, FINAL) \
V(Record, UntaggedRecord, num_fields, Smi, FINAL) \
V(Record, UntaggedRecord, shape, Smi, FINAL) \
V(TypeArguments, UntaggedTypeArguments, hash, Smi, VAR) \
V(TypeArguments, UntaggedTypeArguments, length, Smi, FINAL) \
V(TypeParameters, UntaggedTypeParameters, names, Array, FINAL) \

View file

@ -931,25 +931,23 @@ Fragment BaseFlowGraphBuilder::CreateArray() {
}
Fragment BaseFlowGraphBuilder::AllocateRecord(TokenPosition position,
intptr_t num_fields) {
Value* field_names = Pop();
AllocateRecordInstr* allocate = new (Z) AllocateRecordInstr(
InstructionSource(position), num_fields, field_names, GetNextDeoptId());
RecordShape shape) {
AllocateRecordInstr* allocate = new (Z)
AllocateRecordInstr(InstructionSource(position), shape, GetNextDeoptId());
Push(allocate);
return Fragment(allocate);
}
Fragment BaseFlowGraphBuilder::AllocateSmallRecord(TokenPosition position,
intptr_t num_fields,
bool has_named_fields) {
RecordShape shape) {
const intptr_t num_fields = shape.num_fields();
ASSERT(num_fields == 2 || num_fields == 3);
Value* value2 = (num_fields > 2) ? Pop() : nullptr;
Value* value1 = Pop();
Value* value0 = Pop();
Value* field_names = has_named_fields ? Pop() : nullptr;
AllocateSmallRecordInstr* allocate = new (Z) AllocateSmallRecordInstr(
InstructionSource(position), num_fields, field_names, value0, value1,
value2, GetNextDeoptId());
AllocateSmallRecordInstr* allocate = new (Z)
AllocateSmallRecordInstr(InstructionSource(position), shape, value0,
value1, value2, GetNextDeoptId());
Push(allocate);
return Fragment(allocate);
}

View file

@ -351,10 +351,8 @@ class BaseFlowGraphBuilder {
// Top of the stack should be the closure function.
Fragment AllocateClosure(TokenPosition position = TokenPosition::kNoSource);
Fragment CreateArray();
Fragment AllocateRecord(TokenPosition position, intptr_t num_fields);
Fragment AllocateSmallRecord(TokenPosition position,
intptr_t num_fields,
bool has_named_fields);
Fragment AllocateRecord(TokenPosition position, RecordShape shape);
Fragment AllocateSmallRecord(TokenPosition position, RecordShape shape);
Fragment AllocateTypedData(TokenPosition position, classid_t class_id);
Fragment InstantiateType(const AbstractType& type);
Fragment InstantiateTypeArguments(const TypeArguments& type_arguments);

View file

@ -330,13 +330,13 @@ InstancePtr ConstantReader::ReadConstantInternal(intptr_t constant_index) {
reader.ReadUInt();
}
names.MakeImmutable();
names ^= H.Canonicalize(names);
field_names = &names;
}
}
const intptr_t num_fields = num_positional + num_named;
const auto& record =
Record::Handle(Z, Record::New(num_fields, *field_names));
const RecordShape shape =
RecordShape::Register(H.thread(), num_fields, *field_names);
const auto& record = Record::Handle(Z, Record::New(shape));
intptr_t pos = 0;
for (intptr_t j = 0; j < num_positional; ++j) {
const intptr_t entry_index = reader.ReadUInt();

View file

@ -3879,27 +3879,14 @@ Fragment StreamingFlowGraphBuilder::BuildRecordIsTest(TokenPosition position,
instructions.current = is_record;
}
// Test number of fields.
{
TargetEntryInstr* same_num_fields;
TargetEntryInstr* different_num_fields;
instructions += LoadLocal(instance);
instructions += LoadNativeField(Slot::Record_num_fields());
instructions += IntConstant(type.NumFields());
instructions += BranchIfEqual(&same_num_fields, &different_num_fields);
Fragment(different_num_fields) + Goto(is_false);
instructions.current = same_num_fields;
}
// Test record shape.
{
TargetEntryInstr* same_shape;
TargetEntryInstr* different_shape;
instructions += LoadLocal(instance);
instructions += LoadNativeField(Slot::Record_field_names());
instructions += Constant(Array::ZoneHandle(Z, type.field_names()));
instructions += LoadNativeField(Slot::Record_shape());
instructions += IntConstant(type.shape().AsInt());
instructions += BranchIfEqual(&same_shape, &different_shape);
Fragment(different_shape) + Goto(is_false);
instructions.current = same_shape;
@ -4161,20 +4148,17 @@ Fragment StreamingFlowGraphBuilder::BuildRecordLiteral(TokenPosition* p) {
names.SetAt(i, name);
}
names.MakeImmutable();
names ^= H.Canonicalize(names);
field_names = &names;
}
}
const intptr_t num_fields = positional_count + named_count;
const RecordShape shape =
RecordShape::Register(thread(), num_fields, *field_names);
Fragment instructions;
if (num_fields == 2 ||
(num_fields == 3 && AllocateSmallRecordABI::kValue2Reg != kNoRegister)) {
// Generate specialized allocation for a small number of fields.
const bool has_named_fields = named_count > 0;
if (has_named_fields) {
instructions += Constant(*field_names);
}
for (intptr_t i = 0; i < positional_count; ++i) {
instructions += BuildExpression(); // read ith expression.
}
@ -4185,14 +4169,12 @@ Fragment StreamingFlowGraphBuilder::BuildRecordLiteral(TokenPosition* p) {
}
SkipDartType(); // read recordType.
instructions +=
B->AllocateSmallRecord(position, num_fields, has_named_fields);
instructions += B->AllocateSmallRecord(position, shape);
return instructions;
}
instructions += Constant(*field_names);
instructions += B->AllocateRecord(position, num_fields);
instructions += B->AllocateRecord(position, shape);
LocalVariable* record = MakeTemporary();
// List of positional.
@ -4233,19 +4215,23 @@ Fragment StreamingFlowGraphBuilder::BuildRecordFieldGet(TokenPosition* p,
RecordType::Cast(T.BuildType()); // read recordType.
intptr_t field_index = -1;
const Array& field_names =
Array::Handle(Z, record_type.GetFieldNames(H.thread()));
const intptr_t num_positional_fields =
record_type.NumFields() - field_names.Length();
if (is_named) {
const String& field_name = H.DartSymbolPlain(ReadStringReference());
for (intptr_t i = 0, n = record_type.NumNamedFields(); i < n; ++i) {
if (record_type.FieldNameAt(i) == field_name.ptr()) {
for (intptr_t i = 0, n = field_names.Length(); i < n; ++i) {
if (field_names.At(i) == field_name.ptr()) {
field_index = i;
break;
}
}
ASSERT(field_index >= 0 && field_index < record_type.NumNamedFields());
field_index += record_type.NumPositionalFields();
ASSERT(field_index >= 0 && field_index < field_names.Length());
field_index += num_positional_fields;
} else {
field_index = ReadUInt();
ASSERT(field_index < record_type.NumPositionalFields());
ASSERT(field_index < num_positional_fields);
}
instructions += B->LoadNativeField(Slot::GetRecordFieldSlot(

View file

@ -877,8 +877,7 @@ Fragment FlowGraphBuilder::NativeFunctionBody(const Function& function,
V(LinkedHashBase_getIndex, LinkedHashBase_index) \
V(LinkedHashBase_getUsedData, LinkedHashBase_used_data) \
V(ObjectArrayLength, Array_length) \
V(Record_fieldNames, Record_field_names) \
V(Record_numFields, Record_num_fields) \
V(Record_shape, Record_shape) \
V(SuspendState_getFunctionData, SuspendState_function_data) \
V(SuspendState_getThenCallback, SuspendState_then_callback) \
V(SuspendState_getErrorCallback, SuspendState_error_callback) \
@ -915,6 +914,8 @@ bool FlowGraphBuilder::IsRecognizedMethodForFlowGraph(
switch (kind) {
case MethodRecognizer::kRecord_fieldAt:
case MethodRecognizer::kRecord_fieldNames:
case MethodRecognizer::kRecord_numFields:
case MethodRecognizer::kSuspendState_clone:
case MethodRecognizer::kSuspendState_resume:
case MethodRecognizer::kTypedData_ByteDataView_factory:
@ -1089,6 +1090,25 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += LoadIndexed(
kRecordCid, /*index_scale*/ compiler::target::kCompressedWordSize);
break;
case MethodRecognizer::kRecord_fieldNames:
body += LoadObjectStore();
body += RawLoadField(
compiler::target::ObjectStore::record_field_names_offset());
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += LoadNativeField(Slot::Record_shape());
body += IntConstant(compiler::target::RecordShape::kFieldNamesIndexShift);
body += SmiBinaryOp(Token::kSHR);
body += IntConstant(compiler::target::RecordShape::kFieldNamesIndexMask);
body += SmiBinaryOp(Token::kBIT_AND);
body += LoadIndexed(
kArrayCid, /*index_scale=*/compiler::target::kCompressedWordSize);
break;
case MethodRecognizer::kRecord_numFields:
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += LoadNativeField(Slot::Record_shape());
body += IntConstant(compiler::target::RecordShape::kNumFieldsMask);
body += SmiBinaryOp(Token::kBIT_AND);
break;
case MethodRecognizer::kSuspendState_clone: {
ASSERT_EQUAL(function.NumParameters(), 1);
body += LoadLocal(parsed_function_->RawParameterVariable(0));
@ -2204,6 +2224,7 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecordFieldGetter(
graph_entry_->set_normal_entry(normal_entry);
JoinEntryInstr* nsm = BuildJoinEntry();
JoinEntryInstr* done = BuildJoinEntry();
Fragment body(normal_entry);
body += CheckStackOverflowInPrologue(function.token_pos());
@ -2212,20 +2233,37 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecordFieldGetter(
ASSERT(Field::IsGetterName(name));
name = Field::NameFromGetter(name);
// Get an array of field names.
const Class& cls = Class::Handle(Z, IG->class_table()->At(kRecordCid));
const auto& error = cls.EnsureIsFinalized(thread_);
ASSERT(error == Error::null());
const Function& get_field_names_function = Function::ZoneHandle(
Z, cls.LookupFunctionAllowPrivate(Symbols::Get_fieldNames()));
ASSERT(!get_field_names_function.IsNull());
body += LoadLocal(parsed_function_->receiver_var());
body += StaticCall(TokenPosition::kNoSource, get_field_names_function, 1,
ICData::kStatic);
LocalVariable* field_names = MakeTemporary("field_names");
body += LoadLocal(field_names);
body += LoadNativeField(Slot::Array_length());
LocalVariable* num_named = MakeTemporary("num_named");
// num_positional = num_fields - field_names.length
body += LoadLocal(parsed_function_->receiver_var());
body += LoadNativeField(Slot::Record_shape());
body += IntConstant(compiler::target::RecordShape::kNumFieldsMask);
body += SmiBinaryOp(Token::kBIT_AND);
body += LoadLocal(num_named);
body += SmiBinaryOp(Token::kSUB);
LocalVariable* num_positional = MakeTemporary("num_positional");
const intptr_t field_index =
Record::GetPositionalFieldIndexFromFieldName(name);
if (field_index >= 0) {
// Get positional record field by index.
body += IntConstant(field_index);
// num_positional = num_fields - field_names.length
body += LoadLocal(parsed_function_->receiver_var());
body += LoadNativeField(Slot::Record_num_fields());
body += LoadLocal(parsed_function_->receiver_var());
body += LoadNativeField(Slot::Record_field_names());
body += LoadNativeField(Slot::Array_length());
body += SmiBinaryOp(Token::kSUB);
body += LoadLocal(num_positional);
body += SmiRelationalOp(Token::kLT);
TargetEntryInstr* valid_index;
TargetEntryInstr* invalid_index;
@ -2235,20 +2273,16 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecordFieldGetter(
body += LoadLocal(parsed_function_->receiver_var());
body += LoadNativeField(Slot::GetRecordFieldSlot(
thread_, compiler::target::Record::field_offset(field_index)));
body += Return(TokenPosition::kNoSource);
body += StoreLocal(TokenPosition::kNoSource,
parsed_function_->expression_temp_var());
body += Drop();
body += Goto(done);
body.current = invalid_index;
}
// Search field among named fields.
body += LoadLocal(parsed_function_->receiver_var());
body += LoadNativeField(Slot::Record_field_names());
LocalVariable* field_names = MakeTemporary("field_names");
body += LoadLocal(field_names);
body += LoadNativeField(Slot::Array_length());
LocalVariable* num_named = MakeTemporary("num_named");
body += IntConstant(0);
body += LoadLocal(num_named);
body += SmiRelationalOp(Token::kLT);
@ -2298,16 +2332,22 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecordFieldGetter(
body += LoadLocal(parsed_function_->receiver_var());
body += LoadLocal(parsed_function_->receiver_var());
body += LoadNativeField(Slot::Record_num_fields());
body += LoadLocal(num_named);
body += SmiBinaryOp(Token::kSUB);
body += LoadLocal(num_positional);
body += LoadLocal(index);
body += SmiBinaryOp(Token::kADD);
body += LoadIndexed(kRecordCid,
/*index_scale*/ compiler::target::kCompressedWordSize);
body += DropTempsPreserveTop(2);
body += StoreLocal(TokenPosition::kNoSource,
parsed_function_->expression_temp_var());
body += Drop();
body += Goto(done);
body.current = done;
body += LoadLocal(parsed_function_->expression_temp_var());
body += DropTempsPreserveTop(3); // field_names, num_named, num_positional
body += Return(TokenPosition::kNoSource);
Fragment throw_nsm(nsm);
@ -4152,6 +4192,20 @@ Fragment FlowGraphBuilder::LoadIsolate() {
return body;
}
Fragment FlowGraphBuilder::LoadIsolateGroup() {
Fragment body;
body += LoadThread();
body += LoadUntagged(compiler::target::Thread::isolate_group_offset());
return body;
}
Fragment FlowGraphBuilder::LoadObjectStore() {
Fragment body;
body += LoadIsolateGroup();
body += LoadUntagged(compiler::target::IsolateGroup::object_store_offset());
return body;
}
Fragment FlowGraphBuilder::LoadServiceExtensionStream() {
Fragment body;
body += LoadThread();

View file

@ -276,6 +276,12 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {
// Loads the (untagged) isolate address.
Fragment LoadIsolate();
// Loads the (untagged) current IsolateGroup address.
Fragment LoadIsolateGroup();
// Loads the (untagged) current ObjectStore address.
Fragment LoadObjectStore();
// Loads the (untagged) service extension stream address.
Fragment LoadServiceExtensionStream();

View file

@ -3385,11 +3385,13 @@ void TypeTranslator::BuildRecordType() {
if (named_count != 0) {
field_names.MakeImmutable();
}
const RecordShape shape =
RecordShape::Register(H.thread(), num_fields, field_names);
finalize_ = finalize;
RecordType& rec = RecordType::Handle(
Z, RecordType::New(field_types, field_names, nullability));
RecordType& rec =
RecordType::Handle(Z, RecordType::New(shape, field_types, nullability));
if (finalize_) {
rec ^= ClassFinalizer::FinalizeType(rec);

View file

@ -21,6 +21,7 @@ namespace dart {
V(_GrowableList, []=, GrowableArraySetIndexed, 0x050cd2ba) \
V(_Record, get:_fieldNames, Record_fieldNames, 0x68e5459d) \
V(_Record, get:_numFields, Record_numFields, 0x7bc20792) \
V(_Record, get:_shape, Record_shape, 0x70e120f3) \
V(_Record, _fieldAt, Record_fieldAt, 0xb49cb873) \
V(_TypedList, _getInt8, ByteArrayBaseGetInt8, 0x1623dc34) \
V(_TypedList, _getUint8, ByteArrayBaseGetUint8, 0x177ffe2a) \

View file

@ -1118,6 +1118,13 @@ const word Array::kMaxElements = Array_kMaxElements;
const word Context::kMaxElements = Context_kMaxElements;
const word Record::kMaxElements = Record_kMaxElements;
const word RecordShape::kNumFieldsMask = RecordShape_kNumFieldsMask;
const word RecordShape::kMaxNumFields = RecordShape_kMaxNumFields;
const word RecordShape::kFieldNamesIndexShift =
RecordShape_kFieldNamesIndexShift;
const word RecordShape::kFieldNamesIndexMask = RecordShape_kFieldNamesIndexMask;
const word RecordShape::kMaxFieldNamesIndex = RecordShape_kMaxFieldNamesIndex;
} // namespace target
} // namespace compiler
} // namespace dart

View file

@ -594,10 +594,18 @@ class GrowableObjectArray : public AllStatic {
FINAL_CLASS();
};
class RecordShape : public AllStatic {
public:
static const word kNumFieldsMask;
static const word kMaxNumFields;
static const word kFieldNamesIndexShift;
static const word kFieldNamesIndexMask;
static const word kMaxFieldNamesIndex;
};
class Record : public AllStatic {
public:
static word num_fields_offset();
static word field_names_offset();
static word shape_offset();
static word field_offset(intptr_t index);
static intptr_t field_index_at_offset(intptr_t offset_in_bytes);
static word InstanceSize(intptr_t length);
@ -1309,6 +1317,7 @@ class ObjectStore : public AllStatic {
public:
static word double_type_offset();
static word int_type_offset();
static word record_field_names_offset();
static word string_type_offset();
static word type_type_offset();

File diff suppressed because it is too large Load diff

View file

@ -84,6 +84,11 @@
CONSTANT(NativeEntry, kNumCallWrapperArguments) \
CONSTANT(Page, kBytesPerCardLog2) \
CONSTANT(Record, kMaxElements) \
CONSTANT(RecordShape, kFieldNamesIndexMask) \
CONSTANT(RecordShape, kFieldNamesIndexShift) \
CONSTANT(RecordShape, kMaxFieldNamesIndex) \
CONSTANT(RecordShape, kMaxNumFields) \
CONSTANT(RecordShape, kNumFieldsMask) \
CONSTANT(String, kMaxElements) \
CONSTANT(SubtypeTestCache, kFunctionTypeArguments) \
CONSTANT(SubtypeTestCache, kInstanceCidOrSignature) \
@ -185,6 +190,7 @@
FIELD(NativeArguments, thread_offset) \
FIELD(ObjectStore, double_type_offset) \
FIELD(ObjectStore, int_type_offset) \
FIELD(ObjectStore, record_field_names_offset) \
FIELD(ObjectStore, string_type_offset) \
FIELD(ObjectStore, type_type_offset) \
FIELD(ObjectStore, suspend_state_await_offset) \
@ -201,8 +207,7 @@
FIELD(OneByteString, data_offset) \
FIELD(PointerBase, data_offset) \
FIELD(Pointer, type_arguments_offset) \
FIELD(Record, num_fields_offset) \
FIELD(Record, field_names_offset) \
FIELD(Record, shape_offset) \
FIELD(SingleTargetCache, entry_point_offset) \
FIELD(SingleTargetCache, lower_limit_offset) \
FIELD(SingleTargetCache, target_offset) \

View file

@ -1326,8 +1326,7 @@ void StubCodeCompiler::GenerateAllocateGrowableArrayStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateRecordStub(Assembler* assembler) {
const Register result_reg = AllocateRecordABI::kResultReg;
const Register num_fields_reg = AllocateRecordABI::kNumFieldsReg;
const Register field_names_reg = AllocateRecordABI::kFieldNamesReg;
const Register shape_reg = AllocateRecordABI::kShapeReg;
const Register temp_reg = AllocateRecordABI::kTemp1Reg;
const Register new_top_reg = AllocateRecordABI::kTemp2Reg;
Label slow_case;
@ -1335,11 +1334,16 @@ void StubCodeCompiler::GenerateAllocateRecordStub(Assembler* assembler) {
// Check for allocation tracing.
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kRecordCid, &slow_case, temp_reg));
// Extract number of fields from the shape.
__ AndImmediate(
temp_reg, shape_reg,
compiler::target::RecordShape::kNumFieldsMask << kSmiTagShift);
// Compute the rounded instance size.
const intptr_t fixed_size_plus_alignment_padding =
(target::Record::field_offset(0) +
target::ObjectAlignment::kObjectAlignment - 1);
__ AddScaled(temp_reg, num_fields_reg, TIMES_COMPRESSED_HALF_WORD_SIZE,
__ AddScaled(temp_reg, temp_reg, TIMES_COMPRESSED_HALF_WORD_SIZE,
fixed_size_plus_alignment_padding);
__ AndImmediate(temp_reg, -target::ObjectAlignment::kObjectAlignment);
@ -1380,17 +1384,12 @@ void StubCodeCompiler::GenerateAllocateRecordStub(Assembler* assembler) {
}
__ StoreCompressedIntoObjectNoBarrier(
result_reg, FieldAddress(result_reg, target::Record::num_fields_offset()),
num_fields_reg);
__ StoreCompressedIntoObjectNoBarrier(
result_reg,
FieldAddress(result_reg, target::Record::field_names_offset()),
field_names_reg);
result_reg, FieldAddress(result_reg, target::Record::shape_offset()),
shape_reg);
// Initialize the remaining words of the object.
{
const Register field_reg = field_names_reg;
const Register field_reg = shape_reg;
#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_RISCV32) || \
defined(TARGET_ARCH_RISCV64)
const Register null_reg = NULL_REG;
@ -1424,9 +1423,9 @@ void StubCodeCompiler::GenerateAllocateRecordStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushObject(NullObject()); // Space on the stack for the return value.
__ PushRegistersInOrder({num_fields_reg, field_names_reg});
__ CallRuntime(kAllocateRecordRuntimeEntry, 2);
__ Drop(2);
__ PushRegister(shape_reg);
__ CallRuntime(kAllocateRecordRuntimeEntry, 1);
__ Drop(1);
__ PopRegister(AllocateRecordABI::kResultReg);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
@ -1439,7 +1438,7 @@ void StubCodeCompiler::GenerateAllocateSmallRecordStub(Assembler* assembler,
bool has_named_fields) {
ASSERT(num_fields == 2 || num_fields == 3);
const Register result_reg = AllocateSmallRecordABI::kResultReg;
const Register field_names_reg = AllocateSmallRecordABI::kFieldNamesReg;
const Register shape_reg = AllocateSmallRecordABI::kShapeReg;
const Register value0_reg = AllocateSmallRecordABI::kValue0Reg;
const Register value1_reg = AllocateSmallRecordABI::kValue1Reg;
const Register value2_reg = AllocateSmallRecordABI::kValue2Reg;
@ -1462,18 +1461,13 @@ void StubCodeCompiler::GenerateAllocateSmallRecordStub(Assembler* assembler,
__ TryAllocateObject(kRecordCid, target::Record::InstanceSize(num_fields),
&slow_case, distance, result_reg, temp_reg);
__ LoadImmediate(temp_reg, Smi::RawValue(num_fields));
__ StoreCompressedIntoObjectNoBarrier(
result_reg, FieldAddress(result_reg, target::Record::num_fields_offset()),
temp_reg);
if (!has_named_fields) {
__ LoadObject(field_names_reg, Object::empty_array());
__ LoadImmediate(
shape_reg, Smi::RawValue(RecordShape::ForUnnamed(num_fields).AsInt()));
}
__ StoreCompressedIntoObjectNoBarrier(
result_reg,
FieldAddress(result_reg, target::Record::field_names_offset()),
field_names_reg);
result_reg, FieldAddress(result_reg, target::Record::shape_offset()),
shape_reg);
__ StoreCompressedIntoObjectNoBarrier(
result_reg, FieldAddress(result_reg, target::Record::field_offset(0)),
@ -1495,11 +1489,11 @@ void StubCodeCompiler::GenerateAllocateSmallRecordStub(Assembler* assembler,
__ EnterStubFrame();
__ PushObject(NullObject()); // Space on the stack for the return value.
__ PushObject(Smi::ZoneHandle(Smi::New(num_fields)));
if (has_named_fields) {
__ PushRegister(field_names_reg);
__ PushRegister(shape_reg);
} else {
__ PushObject(Object::empty_array());
__ PushImmediate(
Smi::RawValue(RecordShape::ForUnnamed(num_fields).AsInt()));
}
__ PushRegistersInOrder({value0_reg, value1_reg});
if (num_fields > 2) {
@ -1507,8 +1501,8 @@ void StubCodeCompiler::GenerateAllocateSmallRecordStub(Assembler* assembler,
} else {
__ PushObject(NullObject());
}
__ CallRuntime(kAllocateSmallRecordRuntimeEntry, 5);
__ Drop(5);
__ CallRuntime(kAllocateSmallRecordRuntimeEntry, 4);
__ Drop(4);
__ PopRegister(result_reg);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);

View file

@ -532,17 +532,16 @@ struct AllocateArrayABI {
// ABI for AllocateRecordStub.
struct AllocateRecordABI {
static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kNumFieldsReg = R2;
static const Register kFieldNamesReg = R1;
static const Register kTemp1Reg = R3;
static const Register kTemp2Reg = R4;
static const Register kShapeReg = R1;
static const Register kTemp1Reg = R2;
static const Register kTemp2Reg = R3;
};
// ABI for AllocateSmallRecordStub (AllocateRecord2, AllocateRecord2Named,
// AllocateRecord3, AllocateRecord3Named).
struct AllocateSmallRecordABI {
static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kFieldNamesReg = R1;
static const Register kShapeReg = R1;
static const Register kValue0Reg = R2;
static const Register kValue1Reg = R3;
static const Register kValue2Reg = R4;

View file

@ -366,17 +366,16 @@ struct AllocateArrayABI {
// ABI for AllocateRecordStub.
struct AllocateRecordABI {
static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kNumFieldsReg = R2;
static const Register kFieldNamesReg = R1;
static const Register kTemp1Reg = R3;
static const Register kTemp2Reg = R4;
static const Register kShapeReg = R1;
static const Register kTemp1Reg = R2;
static const Register kTemp2Reg = R3;
};
// ABI for AllocateSmallRecordStub (AllocateRecord2, AllocateRecord2Named,
// AllocateRecord3, AllocateRecord3Named).
struct AllocateSmallRecordABI {
static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kFieldNamesReg = R1;
static const Register kShapeReg = R1;
static const Register kValue0Reg = R2;
static const Register kValue1Reg = R3;
static const Register kValue2Reg = R4;

View file

@ -255,8 +255,7 @@ struct AllocateArrayABI {
// ABI for AllocateRecordStub.
struct AllocateRecordABI {
static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kNumFieldsReg = EDX;
static const Register kFieldNamesReg = ECX;
static const Register kShapeReg = EDX;
static const Register kTemp1Reg = EBX;
static const Register kTemp2Reg = EDI;
};
@ -265,7 +264,7 @@ struct AllocateRecordABI {
// AllocateRecord3, AllocateRecord3Named).
struct AllocateSmallRecordABI {
static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kFieldNamesReg = EBX;
static const Register kShapeReg = EBX;
static const Register kValue0Reg = ECX;
static const Register kValue1Reg = EDX;
static const Register kValue2Reg = kNoRegister;

View file

@ -375,17 +375,16 @@ struct AllocateArrayABI {
// ABI for AllocateRecordStub.
struct AllocateRecordABI {
static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kNumFieldsReg = T2;
static const Register kFieldNamesReg = T1;
static const Register kTemp1Reg = T3;
static const Register kTemp2Reg = T4;
static const Register kShapeReg = T1;
static const Register kTemp1Reg = T2;
static const Register kTemp2Reg = T3;
};
// ABI for AllocateSmallRecordStub (AllocateRecord2, AllocateRecord2Named,
// AllocateRecord3, AllocateRecord3Named).
struct AllocateSmallRecordABI {
static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kFieldNamesReg = T2;
static const Register kShapeReg = T2;
static const Register kValue0Reg = T3;
static const Register kValue1Reg = T4;
static const Register kValue2Reg = A1;

View file

@ -336,8 +336,7 @@ struct AllocateArrayABI {
// ABI for AllocateRecordStub.
struct AllocateRecordABI {
static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kNumFieldsReg = R10;
static const Register kFieldNamesReg = RBX;
static const Register kShapeReg = RBX;
static const Register kTemp1Reg = RDX;
static const Register kTemp2Reg = RCX;
};
@ -346,7 +345,7 @@ struct AllocateRecordABI {
// AllocateRecord3, AllocateRecord3Named).
struct AllocateSmallRecordABI {
static const Register kResultReg = AllocateObjectABI::kResultReg;
static const Register kFieldNamesReg = R10;
static const Register kShapeReg = R10;
static const Register kValue0Reg = RBX;
static const Register kValue1Reg = RDX;
static const Register kValue2Reg = RCX;

View file

@ -228,7 +228,7 @@ void DeferredObject::Create() {
switch (cls.id()) {
case kContextCid: {
const intptr_t num_variables =
Smi::Cast(Object::Handle(GetLength())).Value();
Smi::Cast(Object::Handle(GetLengthOrShape())).Value();
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(
"materializing context of length %" Pd " (%" Px ", %" Pd " vars)\n",
@ -238,7 +238,7 @@ void DeferredObject::Create() {
} break;
case kArrayCid: {
const intptr_t num_elements =
Smi::Cast(Object::Handle(GetLength())).Value();
Smi::Cast(Object::Handle(GetLengthOrShape())).Value();
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr("materializing array of length %" Pd " (%" Px ", %" Pd
" elements)\n",
@ -248,20 +248,18 @@ void DeferredObject::Create() {
object_ = &Array::ZoneHandle(Array::New(num_elements));
} break;
case kRecordCid: {
const intptr_t num_fields =
Smi::Cast(Object::Handle(GetLength())).Value();
const RecordShape shape(Smi::RawCast(GetLengthOrShape()));
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr("materializing record of length %" Pd " (%" Px ", %" Pd
" fields)\n",
num_fields, reinterpret_cast<uword>(args_), field_count_);
OS::PrintErr(
"materializing record of shape %" Px " (%" Px ", %" Pd " fields)\n",
shape.AsInt(), reinterpret_cast<uword>(args_), field_count_);
}
object_ =
&Record::ZoneHandle(Record::New(num_fields, Object::empty_array()));
object_ = &Record::ZoneHandle(Record::New(shape));
} break;
default:
if (IsTypedDataClassId(cls.id())) {
const intptr_t num_elements =
Smi::Cast(Object::Handle(GetLength())).Value();
Smi::Cast(Object::Handle(GetLengthOrShape())).Value();
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr("materializing typed data cid %" Pd " of length %" Pd
" (%" Px ", %" Pd " elements)\n",
@ -360,23 +358,12 @@ void DeferredObject::Fill() {
for (intptr_t i = 0; i < field_count_; i++) {
offset ^= GetFieldOffset(i);
if (offset.Value() == Record::field_names_offset()) {
// Copy field_names.
Array& field_names = Array::Handle();
field_names ^= GetValue(i);
record.set_field_names(field_names);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" record@field_names (offset %" Pd ") <- %s\n",
offset.Value(), field_names.ToCString());
}
} else {
const intptr_t index = Record::field_index_at_offset(offset.Value());
value = GetValue(i);
record.SetFieldAt(index, value);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" record@%" Pd " (offset %" Pd ") <- %s\n", index,
offset.Value(), value.ToCString());
}
const intptr_t index = Record::field_index_at_offset(offset.Value());
value = GetValue(i);
record.SetFieldAt(index, value);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" record@%" Pd " (offset %" Pd ") <- %s\n", index,
offset.Value(), value.ToCString());
}
}
} break;

View file

@ -202,10 +202,11 @@ class DeferredObject {
enum {
kClassIndex = 0,
// Number of context variables for contexts,
// number of elements for arrays and typed data objects,
// For contexts: number of context variables.
// For arrays and typed data objects: number of elements.
// For records: shape.
// -1 otherwise.
kLengthIndex,
kLengthOrShapeIndex,
kFieldsStartIndex
};
@ -226,7 +227,7 @@ class DeferredObject {
ObjectPtr GetClass() const { return GetArg(kClassIndex); }
ObjectPtr GetLength() const { return GetArg(kLengthIndex); }
ObjectPtr GetLengthOrShape() const { return GetArg(kLengthOrShapeIndex); }
ObjectPtr GetFieldOffset(intptr_t index) const {
return GetArg(kFieldsStartIndex + kFieldEntrySize * index + kOffsetIndex);

View file

@ -1208,7 +1208,8 @@ intptr_t DeoptInfoBuilder::EmitMaterializationArguments(intptr_t dest_index) {
MaterializeObjectInstr* mat = materializations_[i];
// Class of the instance to allocate.
AddConstant(mat->cls(), dest_index++);
AddConstant(Smi::ZoneHandle(Smi::New(mat->num_elements())), dest_index++);
AddConstant(Smi::ZoneHandle(Smi::New(mat->length_or_shape())),
dest_index++);
for (intptr_t i = 0; i < mat->InputCount(); i++) {
if (!mat->InputAt(i)->BindsToConstantNull()) {
// Emit offset-value pair.

View file

@ -2323,15 +2323,14 @@ class FieldInvalidator {
}
const Record& record = Record::Cast(value);
const intptr_t num_fields = record.num_fields();
if (num_fields != type.NumFields() ||
record.field_names() != type.field_names()) {
if (record.shape() != type.shape()) {
return false;
}
// This method can be called recursively, so cannot reuse handles.
auto& field_value = Object::Handle(zone_);
auto& field_type = AbstractType::Handle(zone_);
const intptr_t num_fields = record.num_fields();
for (intptr_t i = 0; i < num_fields; ++i) {
field_value = record.FieldAt(i);
field_type = type.FieldTypeAt(i);

View file

@ -8659,6 +8659,7 @@ bool Function::RecognizedKindForceOptimize() const {
case MethodRecognizer::kFfiAsExternalTypedDataFloat:
case MethodRecognizer::kFfiAsExternalTypedDataDouble:
case MethodRecognizer::kGetNativeField:
case MethodRecognizer::kRecord_fieldNames:
case MethodRecognizer::kRecord_numFields:
case MethodRecognizer::kUtf8DecoderScan:
case MethodRecognizer::kDouble_hashCode:
@ -20172,15 +20173,12 @@ bool Instance::RuntimeTypeIsSubtypeOf(
}
const Record& record = Record::Cast(*this);
const RecordType& record_type = RecordType::Cast(instantiated_other);
const intptr_t num_fields = record.num_fields();
ASSERT(Array::Handle(record.field_names()).IsCanonical());
ASSERT(Array::Handle(record_type.field_names()).IsCanonical());
if ((num_fields != record_type.NumFields()) ||
(record.field_names() != record_type.field_names())) {
if (record.shape() != record_type.shape()) {
return false;
}
Instance& field_value = Instance::Handle(zone);
AbstractType& field_type = AbstractType::Handle(zone);
const intptr_t num_fields = record.num_fields();
for (intptr_t i = 0; i < num_fields; ++i) {
field_value ^= record.FieldAt(i);
field_type = record_type.FieldTypeAt(i);
@ -27598,23 +27596,12 @@ void RecordType::set_field_types(const Array& value) const {
untag()->set_field_types(value.ptr());
}
StringPtr RecordType::FieldNameAt(intptr_t index) const {
const Array& field_names = Array::Handle(untag()->field_names());
return String::RawCast(field_names.At(index));
void RecordType::set_shape(RecordShape shape) const {
untag()->set_shape(shape.AsSmi());
}
void RecordType::SetFieldNameAt(intptr_t index, const String& value) const {
ASSERT(!value.IsNull());
ASSERT(value.IsSymbol());
const Array& field_names = Array::Handle(untag()->field_names());
field_names.SetAt(index, value);
}
void RecordType::set_field_names(const Array& value) const {
ASSERT(!value.IsNull());
ASSERT(value.IsImmutable());
ASSERT(value.ptr() == Object::empty_array().ptr() || value.Length() > 0);
untag()->set_field_names(value.ptr());
ArrayPtr RecordType::GetFieldNames(Thread* thread) const {
return shape().GetFieldNames(thread);
}
void RecordType::Print(NameVisibility name_visibility,
@ -27628,7 +27615,8 @@ void RecordType::Print(NameVisibility name_visibility,
AbstractType& type = AbstractType::Handle(zone);
String& name = String::Handle(zone);
const intptr_t num_fields = NumFields();
const intptr_t num_positional_fields = NumPositionalFields();
const Array& field_names = Array::Handle(zone, GetFieldNames(thread));
const intptr_t num_positional_fields = num_fields - field_names.Length();
printer->AddString("(");
for (intptr_t i = 0; i < num_fields; ++i) {
if (i != 0) {
@ -27641,7 +27629,7 @@ void RecordType::Print(NameVisibility name_visibility,
type.PrintName(name_visibility, printer);
if (i >= num_positional_fields) {
printer->AddString(" ");
name = FieldNameAt(i - num_positional_fields);
name ^= field_names.At(i - num_positional_fields);
printer->AddString(name.ToCString());
}
}
@ -27680,14 +27668,14 @@ RecordTypePtr RecordType::New(Heap::Space space) {
return static_cast<RecordTypePtr>(raw);
}
RecordTypePtr RecordType::New(const Array& field_types,
const Array& field_names,
RecordTypePtr RecordType::New(RecordShape shape,
const Array& field_types,
Nullability nullability,
Heap::Space space) {
Zone* Z = Thread::Current()->zone();
const RecordType& result = RecordType::Handle(Z, RecordType::New(space));
result.set_shape(shape);
result.set_field_types(field_types);
result.set_field_names(field_names);
result.SetHash(0);
result.set_flags(0);
result.set_nullability(nullability);
@ -27737,9 +27725,9 @@ bool RecordType::IsEquivalent(const Instance& other,
return false;
}
const RecordType& other_type = RecordType::Cast(other);
if ((NumFields() != other_type.NumFields()) ||
(NumNamedFields() != other_type.NumNamedFields())) {
// Different number of positional or named fields.
// Equal record types must have the same shape
// (number of fields and named fields).
if (shape() != other_type.shape()) {
return false;
}
Thread* thread = Thread::Current();
@ -27747,7 +27735,7 @@ bool RecordType::IsEquivalent(const Instance& other,
if (!IsNullabilityEquivalent(thread, other_type, kind)) {
return false;
}
// Equal record types must have equal field types and names.
// Equal record types must have equal field types.
AbstractType& field_type = Type::Handle(zone);
AbstractType& other_field_type = Type::Handle(zone);
const intptr_t num_fields = NumFields();
@ -27758,14 +27746,6 @@ bool RecordType::IsEquivalent(const Instance& other,
return false;
}
}
if (field_names() != other_type.field_names()) {
const intptr_t num_named_fields = NumNamedFields();
for (intptr_t i = 0; i < num_named_fields; ++i) {
if (FieldNameAt(i) != other_type.FieldNameAt(i)) {
return false;
}
}
}
return true;
}
@ -27779,20 +27759,13 @@ uword RecordType::ComputeHash() const {
type_nullability = Nullability::kNonNullable;
}
result = CombineHashes(result, static_cast<uint32_t>(type_nullability));
result = CombineHashes(result, static_cast<uint32_t>(shape().AsInt()));
AbstractType& type = AbstractType::Handle();
const intptr_t num_fields = NumFields();
for (intptr_t i = 0; i < num_fields; ++i) {
type = FieldTypeAt(i);
result = CombineHashes(result, type.Hash());
}
const intptr_t num_named_fields = NumNamedFields();
if (num_named_fields > 0) {
String& field_name = String::Handle();
for (intptr_t i = 0; i < num_named_fields; ++i) {
field_name = FieldNameAt(i);
result = CombineHashes(result, field_name.Hash());
}
}
result = FinalizeHash(result, kHashBits);
SetHash(result);
return result;
@ -27837,7 +27810,6 @@ AbstractTypePtr RecordType::Canonicalize(Thread* thread, TrailPtr trail) const {
#ifdef DEBUG
// Verify that all fields are allocated in old space and are canonical.
ASSERT(Array::Handle(zone, field_types()).IsOld());
ASSERT(Array::Handle(zone, field_names()).IsOld());
const intptr_t num_fields = NumFields();
for (intptr_t i = 0; i < num_fields; ++i) {
type = FieldTypeAt(i);
@ -27858,7 +27830,6 @@ AbstractTypePtr RecordType::Canonicalize(Thread* thread, TrailPtr trail) const {
}
if (rec.IsNull()) {
ASSERT(Array::Handle(zone, field_types()).IsOld());
ASSERT(Array::Handle(zone, field_names()).IsOld());
const intptr_t num_fields = NumFields();
for (intptr_t i = 0; i < num_fields; ++i) {
type = FieldTypeAt(i);
@ -27958,8 +27929,7 @@ AbstractTypePtr RecordType::InstantiateFrom(
}
const auto& rec = RecordType::Handle(
zone, RecordType::New(new_field_types, Array::Handle(zone, field_names()),
nullability(), space));
zone, RecordType::New(shape(), new_field_types, nullability(), space));
if (IsFinalized()) {
rec.SetIsFinalized();
@ -27980,8 +27950,7 @@ bool RecordType::IsSubtypeOf(const RecordType& other, Heap::Space space) const {
ASSERT(IsFinalized());
ASSERT(other.IsFinalized());
const intptr_t num_fields = NumFields();
if ((num_fields != other.NumFields()) ||
(field_names() != other.field_names())) {
if (shape() != other.shape()) {
// Different number of fields or different named fields.
return false;
}
@ -28003,26 +27972,8 @@ bool RecordType::IsSubtypeOf(const RecordType& other, Heap::Space space) const {
return true;
}
intptr_t Record::NumNamedFields() const {
return Array::LengthOf(field_names());
}
intptr_t Record::NumPositionalFields() const {
return num_fields() - NumNamedFields();
}
void Record::set_field_names(const Array& field_names) const {
ASSERT(!field_names.IsNull());
ASSERT(field_names.IsCanonical());
ASSERT(field_names.IsImmutable());
ASSERT(field_names.ptr() == Object::empty_array().ptr() ||
field_names.Length() > 0);
untag()->set_field_names(field_names.ptr());
}
RecordPtr Record::New(intptr_t num_fields,
const Array& field_names,
Heap::Space space) {
RecordPtr Record::New(RecordShape shape, Heap::Space space) {
const intptr_t num_fields = shape.num_fields();
ASSERT(num_fields >= 0);
Record& result = Record::Handle();
{
@ -28030,10 +27981,9 @@ RecordPtr Record::New(intptr_t num_fields,
Object::Allocate(Record::kClassId, Record::InstanceSize(num_fields),
space, Record::ContainsCompressedPointers()));
NoSafepointScope no_safepoint;
raw->untag()->set_num_fields(Smi::New(num_fields));
raw->untag()->set_shape(shape.AsSmi());
result ^= raw;
}
result.set_field_names(field_names);
return result.ptr();
}
@ -28041,11 +27991,12 @@ const char* Record::ToCString() const {
if (IsNull()) {
return "Record: null";
}
Zone* zone = Thread::Current()->zone();
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
ZoneTextBuffer printer(zone);
const intptr_t num_fields = this->num_fields();
const intptr_t num_positional_fields = NumPositionalFields();
const Array& field_names = Array::Handle(zone, this->field_names());
const Array& field_names = Array::Handle(zone, GetFieldNames(thread));
const intptr_t num_positional_fields = num_fields - field_names.Length();
Object& obj = Object::Handle(zone);
printer.AddString("Record (");
for (intptr_t i = 0; i < num_fields; ++i) {
@ -28074,16 +28025,11 @@ bool Record::CanonicalizeEquals(const Instance& other) const {
}
const Record& other_rec = Record::Cast(other);
if (shape() != other_rec.shape()) {
return false;
}
const intptr_t num_fields = this->num_fields();
if (num_fields != other_rec.num_fields()) {
return false;
}
if (field_names() != other_rec.field_names()) {
return false;
}
for (intptr_t i = 0; i < num_fields; ++i) {
if (this->FieldAt(i) != other_rec.FieldAt(i)) {
return false;
@ -28098,10 +28044,9 @@ uint32_t Record::CanonicalizeHash() const {
if (hash != 0) {
return hash;
}
hash = shape().AsInt();
Instance& element = Instance::Handle();
const intptr_t num_fields = this->num_fields();
hash = num_fields;
Instance& element = Instance::Handle(field_names());
hash = CombineHashes(hash, element.CanonicalizeHash());
for (intptr_t i = 0; i < num_fields; ++i) {
element ^= FieldAt(i);
hash = CombineHashes(hash, element.CanonicalizeHash());
@ -28134,8 +28079,7 @@ RecordTypePtr Record::GetRecordType() const {
type = obj.GetType(Heap::kNew);
field_types.SetAt(i, type);
}
const Array& field_names = Array::Handle(zone, this->field_names());
type = RecordType::New(field_types, field_names, Nullability::kNonNullable);
type = RecordType::New(shape(), field_types, Nullability::kNonNullable);
type = ClassFinalizer::FinalizeType(type);
return RecordType::Cast(type).ptr();
}
@ -28155,21 +28099,137 @@ intptr_t Record::GetPositionalFieldIndexFromFieldName(
return -1;
}
intptr_t Record::GetFieldIndexByName(const String& field_name) const {
intptr_t Record::GetFieldIndexByName(Thread* thread,
const String& field_name) const {
ASSERT(field_name.IsSymbol());
const intptr_t field_index =
Record::GetPositionalFieldIndexFromFieldName(field_name);
if ((field_index >= 0) && (field_index < NumPositionalFields())) {
const Array& field_names = Array::Handle(GetFieldNames(thread));
const intptr_t num_positional_fields = num_fields() - field_names.Length();
if ((field_index >= 0) && (field_index < num_positional_fields)) {
return field_index;
} else {
const Array& field_names = Array::Handle(this->field_names());
for (intptr_t i = 0, n = field_names.Length(); i < n; ++i) {
if (field_names.At(i) == field_name.ptr()) {
return NumPositionalFields() + i;
return num_positional_fields + i;
}
}
}
return -1;
}
class RecordFieldNamesMapTraits {
public:
static const char* Name() { return "RecordFieldNamesMapTraits"; }
static bool ReportStats() { return false; }
static bool IsMatch(const Object& a, const Object& b) {
return Array::Cast(a).CanonicalizeEquals(Array::Cast(b));
}
static uword Hash(const Object& key) {
return Array::Cast(key).CanonicalizeHash();
}
static ObjectPtr NewKey(const Array& arr) { return arr.ptr(); }
};
typedef UnorderedHashMap<RecordFieldNamesMapTraits> RecordFieldNamesMap;
RecordShape RecordShape::Register(Thread* thread,
intptr_t num_fields,
const Array& field_names) {
ASSERT(!field_names.IsNull());
ASSERT(field_names.IsImmutable());
ASSERT(field_names.ptr() == Object::empty_array().ptr() ||
field_names.Length() > 0);
Zone* zone = thread->zone();
IsolateGroup* isolate_group = thread->isolate_group();
ObjectStore* object_store = isolate_group->object_store();
if (object_store->record_field_names<std::memory_order_acquire>() ==
Array::null()) {
// First-time initialization.
SafepointWriteRwLocker ml(thread, isolate_group->program_lock());
if (object_store->record_field_names() == Array::null()) {
// Reserve record field names index 0 for records without named fields.
RecordFieldNamesMap map(
HashTables::New<RecordFieldNamesMap>(16, Heap::kOld));
map.InsertOrGetValue(Object::empty_array(),
Smi::Handle(zone, Smi::New(0)));
ASSERT(map.NumOccupied() == 1);
object_store->set_record_field_names_map(map.Release());
const auto& table = Array::Handle(zone, Array::New(16));
table.SetAt(0, Object::empty_array());
object_store->set_record_field_names<std::memory_order_release>(table);
}
}
#if defined(DART_PRECOMPILER)
const intptr_t kMaxNumFields = compiler::target::RecordShape::kMaxNumFields;
const intptr_t kMaxFieldNamesIndex =
compiler::target::RecordShape::kMaxFieldNamesIndex;
#else
const intptr_t kMaxNumFields = RecordShape::kMaxNumFields;
const intptr_t kMaxFieldNamesIndex = RecordShape::kMaxFieldNamesIndex;
#endif
if (num_fields > kMaxNumFields) {
FATAL("Too many record fields");
}
if (field_names.ptr() == Object::empty_array().ptr()) {
return RecordShape::ForUnnamed(num_fields);
}
{
SafepointReadRwLocker ml(thread, isolate_group->program_lock());
RecordFieldNamesMap map(object_store->record_field_names_map());
Smi& index = Smi::Handle(zone);
index ^= map.GetOrNull(field_names);
ASSERT(map.Release().ptr() == object_store->record_field_names_map());
if (!index.IsNull()) {
return RecordShape(num_fields, index.Value());
}
}
SafepointWriteRwLocker ml(thread, isolate_group->program_lock());
RecordFieldNamesMap map(object_store->record_field_names_map());
const intptr_t new_index = map.NumOccupied();
if (new_index > kMaxFieldNamesIndex) {
FATAL("Too many record shapes");
}
const intptr_t index = Smi::Value(Smi::RawCast(map.InsertOrGetValue(
field_names, Smi::Handle(zone, Smi::New(new_index)))));
ASSERT(index > 0);
if (index == new_index) {
ASSERT(map.NumOccupied() == (new_index + 1));
Array& table = Array::Handle(zone, object_store->record_field_names());
intptr_t capacity = table.Length();
if (index >= table.Length()) {
capacity = capacity + (capacity >> 2);
table = Array::Grow(table, capacity);
object_store->set_record_field_names(table);
}
table.SetAt(index, field_names);
} else {
ASSERT(index < new_index);
}
object_store->set_record_field_names_map(map.Release());
const RecordShape shape(num_fields, index);
ASSERT(shape.GetFieldNames(thread) == field_names.ptr());
ASSERT(shape.num_fields() == num_fields);
return shape;
}
ArrayPtr RecordShape::GetFieldNames(Thread* thread) const {
ObjectStore* object_store = thread->isolate_group()->object_store();
Array& table =
Array::Handle(thread->zone(), object_store->record_field_names());
ASSERT(!table.IsNull());
return Array::RawCast(table.At(field_names_index()));
}
} // namespace dart

View file

@ -9136,90 +9136,6 @@ class FunctionType : public AbstractType {
friend class Function;
};
// A RecordType represents the type of a record. It describes
// number of named and positional fields, field types and
// names of the named fields.
class RecordType : public AbstractType {
public:
static intptr_t hash_offset() { return OFFSET_OF(UntaggedRecordType, hash_); }
virtual bool HasTypeClass() const { return false; }
RecordTypePtr ToNullability(Nullability value, Heap::Space space) const;
virtual classid_t type_class_id() const { return kIllegalCid; }
virtual bool IsInstantiated(Genericity genericity = kAny,
intptr_t num_free_fun_type_params = kAllFree,
TrailPtr trail = nullptr) const;
virtual bool IsEquivalent(const Instance& other,
TypeEquality kind,
TrailPtr trail = nullptr) const;
virtual bool IsRecursive(TrailPtr trail = nullptr) const;
virtual bool RequireConstCanonicalTypeErasure(Zone* zone,
TrailPtr trail = nullptr) const;
virtual AbstractTypePtr InstantiateFrom(
const TypeArguments& instantiator_type_arguments,
const TypeArguments& function_type_arguments,
intptr_t num_free_fun_type_params,
Heap::Space space,
TrailPtr trail = nullptr) const;
virtual AbstractTypePtr Canonicalize(Thread* thread, TrailPtr trail) const;
#if defined(DEBUG)
// Check if type is canonical.
virtual bool CheckIsCanonical(Thread* thread) const;
#endif // DEBUG
virtual void EnumerateURIs(URIs* uris) const;
virtual void PrintName(NameVisibility visibility,
BaseTextBuffer* printer) const;
virtual uword Hash() const;
uword ComputeHash() const;
bool IsSubtypeOf(const RecordType& other, Heap::Space space) const;
ArrayPtr field_types() const {
return untag()->field_types();
}
AbstractTypePtr FieldTypeAt(intptr_t index) const;
void SetFieldTypeAt(intptr_t index, const AbstractType& value) const;
// Names of the named fields, sorted.
ArrayPtr field_names() const {
return untag()->field_names();
}
StringPtr FieldNameAt(intptr_t index) const;
void SetFieldNameAt(intptr_t index, const String& value) const;
intptr_t NumFields() const;
intptr_t NumNamedFields() const;
intptr_t NumPositionalFields() const;
void Print(NameVisibility name_visibility, BaseTextBuffer* printer) const;
static intptr_t InstanceSize() {
return RoundedAllocationSize(sizeof(UntaggedRecordType));
}
static RecordTypePtr New(const Array& field_types,
const Array& field_names,
Nullability nullability = Nullability::kLegacy,
Heap::Space space = Heap::kOld);
private:
void SetHash(intptr_t value) const;
void set_field_types(const Array& value) const;
void set_field_names(const Array& value) const;
static RecordTypePtr New(Heap::Space space);
FINAL_HEAP_OBJECT_IMPLEMENTATION(RecordType, AbstractType);
friend class Class;
friend class ClassFinalizer;
friend class ClearTypeHashVisitor;
friend class Record;
};
// A TypeRef is used to break cycles in the representation of recursive types.
// Its only field is the recursive AbstractType it refers to, which can
// temporarily be null during finalization.
@ -10956,23 +10872,160 @@ class Float64x2 : public Instance {
friend class Class;
};
// Packed representation of record shape (number of fields and field names).
class RecordShape {
enum {
kNumFieldsBits = 16,
kFieldNamesIndexBits = kSmiBits - kNumFieldsBits,
};
using NumFieldsBitField = BitField<intptr_t, intptr_t, 0, kNumFieldsBits>;
using FieldNamesIndexBitField = BitField<intptr_t,
intptr_t,
NumFieldsBitField::kNextBit,
kFieldNamesIndexBits>;
public:
static constexpr intptr_t kNumFieldsMask = NumFieldsBitField::mask();
static constexpr intptr_t kMaxNumFields = kNumFieldsMask;
static constexpr intptr_t kFieldNamesIndexMask =
FieldNamesIndexBitField::mask();
static constexpr intptr_t kFieldNamesIndexShift =
FieldNamesIndexBitField::shift();
static constexpr intptr_t kMaxFieldNamesIndex = kFieldNamesIndexMask;
explicit RecordShape(intptr_t value) : value_(value) { ASSERT(value_ >= 0); }
explicit RecordShape(SmiPtr smi_value) : value_(Smi::Value(smi_value)) {
ASSERT(value_ >= 0);
}
RecordShape(intptr_t num_fields, intptr_t field_names_index)
: value_(NumFieldsBitField::encode(num_fields) |
FieldNamesIndexBitField::encode(field_names_index)) {
ASSERT(value_ >= 0);
}
static RecordShape ForUnnamed(intptr_t num_fields) {
return RecordShape(num_fields, 0);
}
bool HasNamedFields() const { return field_names_index() != 0; }
intptr_t num_fields() const { return NumFieldsBitField::decode(value_); }
intptr_t field_names_index() const {
return FieldNamesIndexBitField::decode(value_);
}
SmiPtr AsSmi() const { return Smi::New(value_); }
intptr_t AsInt() const { return value_; }
bool operator==(const RecordShape& other) const {
return value_ == other.value_;
}
bool operator!=(const RecordShape& other) const {
return value_ != other.value_;
}
// Registers record shape with [num_fields] and [field_names] in the current
// isolate group.
static RecordShape Register(Thread* thread,
intptr_t num_fields,
const Array& field_names);
// Retrieves an array of field names.
ArrayPtr GetFieldNames(Thread* thread) const;
private:
intptr_t value_;
DISALLOW_ALLOCATION();
};
// A RecordType represents the type of a record. It describes
// number of named and positional fields, field types and
// names of the named fields.
class RecordType : public AbstractType {
public:
static intptr_t hash_offset() { return OFFSET_OF(UntaggedRecordType, hash_); }
virtual bool HasTypeClass() const { return false; }
RecordTypePtr ToNullability(Nullability value, Heap::Space space) const;
virtual classid_t type_class_id() const { return kIllegalCid; }
virtual bool IsInstantiated(Genericity genericity = kAny,
intptr_t num_free_fun_type_params = kAllFree,
TrailPtr trail = nullptr) const;
virtual bool IsEquivalent(const Instance& other,
TypeEquality kind,
TrailPtr trail = nullptr) const;
virtual bool IsRecursive(TrailPtr trail = nullptr) const;
virtual bool RequireConstCanonicalTypeErasure(Zone* zone,
TrailPtr trail = nullptr) const;
virtual AbstractTypePtr InstantiateFrom(
const TypeArguments& instantiator_type_arguments,
const TypeArguments& function_type_arguments,
intptr_t num_free_fun_type_params,
Heap::Space space,
TrailPtr trail = nullptr) const;
virtual AbstractTypePtr Canonicalize(Thread* thread, TrailPtr trail) const;
#if defined(DEBUG)
// Check if type is canonical.
virtual bool CheckIsCanonical(Thread* thread) const;
#endif // DEBUG
virtual void EnumerateURIs(URIs* uris) const;
virtual void PrintName(NameVisibility visibility,
BaseTextBuffer* printer) const;
virtual uword Hash() const;
uword ComputeHash() const;
bool IsSubtypeOf(const RecordType& other, Heap::Space space) const;
RecordShape shape() const { return RecordShape(untag()->shape()); }
ArrayPtr field_types() const { return untag()->field_types(); }
AbstractTypePtr FieldTypeAt(intptr_t index) const;
void SetFieldTypeAt(intptr_t index, const AbstractType& value) const;
// Names of the named fields, sorted.
ArrayPtr GetFieldNames(Thread* thread) const;
intptr_t NumFields() const;
void Print(NameVisibility name_visibility, BaseTextBuffer* printer) const;
static intptr_t InstanceSize() {
return RoundedAllocationSize(sizeof(UntaggedRecordType));
}
static RecordTypePtr New(RecordShape shape,
const Array& field_types,
Nullability nullability = Nullability::kLegacy,
Heap::Space space = Heap::kOld);
private:
void SetHash(intptr_t value) const;
void set_shape(RecordShape shape) const;
void set_field_types(const Array& value) const;
static RecordTypePtr New(Heap::Space space);
FINAL_HEAP_OBJECT_IMPLEMENTATION(RecordType, AbstractType);
friend class Class;
friend class ClassFinalizer;
friend class ClearTypeHashVisitor;
friend class Record;
};
class Record : public Instance {
public:
intptr_t num_fields() const { return NumFields(ptr()); }
static intptr_t NumFields(RecordPtr ptr) {
return Smi::Value(ptr->untag()->num_fields());
}
static intptr_t num_fields_offset() {
return OFFSET_OF(UntaggedRecord, num_fields_);
return RecordShape(ptr->untag()->shape()).num_fields();
}
intptr_t NumNamedFields() const;
intptr_t NumPositionalFields() const;
ArrayPtr field_names() const { return untag()->field_names(); }
static intptr_t field_names_offset() {
return OFFSET_OF(UntaggedRecord, field_names_);
}
RecordShape shape() const { return RecordShape(untag()->shape()); }
static intptr_t shape_offset() { return OFFSET_OF(UntaggedRecord, shape_); }
ObjectPtr FieldAt(intptr_t field_index) const {
return untag()->field(field_index);
@ -10982,7 +11035,7 @@ class Record : public Instance {
}
static const intptr_t kBytesPerElement = kCompressedWordSize;
static const intptr_t kMaxElements = kSmiMax / kBytesPerElement;
static const intptr_t kMaxElements = RecordShape::kMaxNumFields;
struct ArrayTraits {
static intptr_t elements_start_offset() { return sizeof(UntaggedRecord); }
@ -11012,9 +11065,7 @@ class Record : public Instance {
(num_fields * kBytesPerElement));
}
static RecordPtr New(intptr_t num_fields,
const Array& field_names,
Heap::Space space = Heap::kNew);
static RecordPtr New(RecordShape shape, Heap::Space space = Heap::kNew);
virtual bool CanonicalizeEquals(const Instance& other) const;
virtual uint32_t CanonicalizeHash() const;
@ -11034,14 +11085,15 @@ class Record : public Instance {
// Returns index of the field with given name, or -1
// if such field doesn't exist.
// Supports positional field names ("$0", "$1", etc).
intptr_t GetFieldIndexByName(const String& field_name) const;
intptr_t GetFieldIndexByName(Thread* thread, const String& field_name) const;
ArrayPtr GetFieldNames(Thread* thread) const {
return shape().GetFieldNames(thread);
}
private:
void set_field_names(const Array& field_names) const;
FINAL_HEAP_OBJECT_IMPLEMENTATION(Record, Instance);
friend class Class;
friend class DeferredObject; // For set_field_names.
friend class Object;
};
@ -12971,14 +13023,6 @@ inline intptr_t RecordType::NumFields() const {
return Array::LengthOf(field_types());
}
inline intptr_t RecordType::NumNamedFields() const {
return Array::LengthOf(field_names());
}
inline intptr_t RecordType::NumPositionalFields() const {
return NumFields() - NumNamedFields();
}
inline uword TypeParameter::Hash() const {
ASSERT(IsFinalized() || IsBeingFinalized()); // Bound may not be finalized.
intptr_t result = Smi::Value(untag()->hash());

View file

@ -283,8 +283,8 @@ void UpdateLengthField(intptr_t cid, ObjectPtr from, ObjectPtr to) {
static_cast<UntaggedTypedDataBase*>(to.untag())->length_ =
static_cast<UntaggedTypedDataBase*>(from.untag())->length_;
} else if (cid == kRecordCid) {
static_cast<UntaggedRecord*>(to.untag())->num_fields_ =
static_cast<UntaggedRecord*>(from.untag())->num_fields_;
static_cast<UntaggedRecord*>(to.untag())->shape_ =
static_cast<UntaggedRecord*>(from.untag())->shape_;
}
}
@ -1425,11 +1425,9 @@ class ObjectCopy : public Base {
void CopyRecord(typename Types::Record from, typename Types::Record to) {
const intptr_t num_fields = Record::NumFields(Types::GetRecordPtr(from));
Base::StoreCompressedPointersNoBarrier(
from, to, OFFSET_OF(UntaggedRecord, num_fields_),
OFFSET_OF(UntaggedRecord, num_fields_));
Base::ForwardCompressedPointer(from, to,
OFFSET_OF(UntaggedRecord, field_names_));
Base::StoreCompressedPointersNoBarrier(from, to,
OFFSET_OF(UntaggedRecord, shape_),
OFFSET_OF(UntaggedRecord, shape_));
Base::ForwardCompressedPointers(
from, to, Record::field_offset(0),
Record::field_offset(0) + Record::kBytesPerElement * num_fields);

View file

@ -1229,7 +1229,8 @@ void RecordType::PrintJSONImpl(JSONStream* stream, bool ref) const {
String& name = String::Handle();
AbstractType& type = AbstractType::Handle();
const intptr_t num_fields = NumFields();
const intptr_t num_positional_fields = NumPositionalFields();
const Array& field_names = Array::Handle(GetFieldNames(Thread::Current()));
const intptr_t num_positional_fields = num_fields - field_names.Length();
for (intptr_t index = 0; index < num_fields; ++index) {
JSONObject jsfield(&jsarr);
// TODO(derekx): Remove this because BoundField isn't a response type in
@ -1238,7 +1239,7 @@ void RecordType::PrintJSONImpl(JSONStream* stream, bool ref) const {
if (index < num_positional_fields) {
jsfield.AddProperty("name", index);
} else {
name = FieldNameAt(index - num_positional_fields);
name ^= field_names.At(index - num_positional_fields);
jsfield.AddProperty("name", name.ToCString());
}
type = FieldTypeAt(index);
@ -1639,8 +1640,8 @@ void Record::PrintJSONImpl(JSONStream* stream, bool ref) const {
String& name = String::Handle();
Object& value = Object::Handle();
const intptr_t num_fields = this->num_fields();
const intptr_t num_positional_fields = NumPositionalFields();
const Array& field_names = Array::Handle(this->field_names());
const Array& field_names = Array::Handle(GetFieldNames(Thread::Current()));
const intptr_t num_positional_fields = num_fields - field_names.Length();
for (intptr_t index = 0; index < num_fields; ++index) {
JSONObject jsfield(&jsarr);
// TODO(derekx): Remove this because BoundField isn't a response type in

View file

@ -167,6 +167,8 @@ class ObjectPointerVisitor;
RW(Array, loading_units) \
RW(GrowableObjectArray, closure_functions) \
RW(GrowableObjectArray, pending_classes) \
RW(Array, record_field_names_map) \
ARW_RELAXED(Array, record_field_names) \
RW(Instance, stack_overflow) \
RW(Instance, out_of_memory) \
RW(Function, growable_list_factory) \

View file

@ -160,7 +160,8 @@ intptr_t UntaggedObject::HeapSizeFromClass(uword tags) const {
}
case kRecordCid: {
const RecordPtr raw_record = static_cast<const RecordPtr>(this);
intptr_t num_fields = Smi::Value(raw_record->untag()->num_fields());
intptr_t num_fields =
RecordShape(raw_record->untag()->shape()).num_fields();
instance_size = Record::InstanceSize(num_fields);
break;
}
@ -576,7 +577,8 @@ VARIABLE_COMPRESSED_VISITOR(
TypedData::ElementSizeInBytes(raw_obj->GetClassId()) *
Smi::Value(raw_obj->untag()->length()))
VARIABLE_COMPRESSED_VISITOR(ContextScope, raw_obj->untag()->num_variables_)
VARIABLE_COMPRESSED_VISITOR(Record, Smi::Value(raw_obj->untag()->num_fields()))
VARIABLE_COMPRESSED_VISITOR(Record,
RecordShape(raw_obj->untag()->shape()).num_fields())
NULL_VISITOR(Sentinel)
REGULAR_VISITOR(InstructionsTable)
NULL_VISITOR(Mint)

View file

@ -2728,9 +2728,9 @@ class UntaggedRecordType : public UntaggedAbstractType {
private:
RAW_HEAP_OBJECT_IMPLEMENTATION(RecordType);
COMPRESSED_SMI_FIELD(SmiPtr, shape)
COMPRESSED_POINTER_FIELD(ArrayPtr, field_types)
COMPRESSED_POINTER_FIELD(ArrayPtr, field_names);
COMPRESSED_POINTER_FIELD(SmiPtr, hash)
COMPRESSED_SMI_FIELD(SmiPtr, hash)
VISIT_TO(hash)
CompressedObjectPtr* to_snapshot(Snapshot::Kind kind) { return to(); }
@ -3224,14 +3224,13 @@ COMPILE_ASSERT(sizeof(UntaggedFloat64x2) == 24);
class UntaggedRecord : public UntaggedInstance {
RAW_HEAP_OBJECT_IMPLEMENTATION(Record);
COMPRESSED_SMI_FIELD(SmiPtr, num_fields)
COMPRESSED_POINTER_FIELD(ArrayPtr, field_names)
VISIT_FROM(num_fields)
COMPRESSED_SMI_FIELD(SmiPtr, shape)
VISIT_FROM(shape)
// Variable length data follows here.
COMPRESSED_VARIABLE_POINTER_FIELDS(ObjectPtr, field, data)
friend void UpdateLengthField(intptr_t, ObjectPtr,
ObjectPtr); // num_fields_
ObjectPtr); // shape_
};
// Define an aliases for intptr_t.

View file

@ -721,36 +721,31 @@ DEFINE_RUNTIME_ENTRY(CloneContext, 1) {
}
// Allocate a new record instance.
// Arg0: number of fields.
// Arg1: field names.
// Arg0: record shape id.
// Return value: newly allocated record.
DEFINE_RUNTIME_ENTRY(AllocateRecord, 2) {
const Smi& num_fields = Smi::CheckedHandle(zone, arguments.ArgAt(0));
const auto& field_names = Array::CheckedHandle(zone, arguments.ArgAt(1));
DEFINE_RUNTIME_ENTRY(AllocateRecord, 1) {
const RecordShape shape(Smi::RawCast(arguments.ArgAt(0)));
const Record& record =
Record::Handle(zone, Record::New(num_fields.Value(), field_names,
SpaceForRuntimeAllocation()));
Record::Handle(zone, Record::New(shape, SpaceForRuntimeAllocation()));
arguments.SetReturn(record);
}
// Allocate a new small record instance and initialize its fields.
// Arg0: number of fields.
// Arg1: field names.
// Arg2-Arg4: field values.
// Arg0: record shape id.
// Arg1-Arg3: field values.
// Return value: newly allocated record.
DEFINE_RUNTIME_ENTRY(AllocateSmallRecord, 5) {
const Smi& num_fields = Smi::CheckedHandle(zone, arguments.ArgAt(0));
const auto& field_names = Array::CheckedHandle(zone, arguments.ArgAt(1));
const auto& value0 = Instance::CheckedHandle(zone, arguments.ArgAt(2));
const auto& value1 = Instance::CheckedHandle(zone, arguments.ArgAt(3));
const auto& value2 = Instance::CheckedHandle(zone, arguments.ArgAt(4));
DEFINE_RUNTIME_ENTRY(AllocateSmallRecord, 4) {
const RecordShape shape(Smi::RawCast(arguments.ArgAt(0)));
const auto& value0 = Instance::CheckedHandle(zone, arguments.ArgAt(1));
const auto& value1 = Instance::CheckedHandle(zone, arguments.ArgAt(2));
const auto& value2 = Instance::CheckedHandle(zone, arguments.ArgAt(3));
const Record& record =
Record::Handle(zone, Record::New(num_fields.Value(), field_names,
SpaceForRuntimeAllocation()));
ASSERT(num_fields.Value() == 2 || num_fields.Value() == 3);
Record::Handle(zone, Record::New(shape, SpaceForRuntimeAllocation()));
const intptr_t num_fields = shape.num_fields();
ASSERT(num_fields == 2 || num_fields == 3);
record.SetFieldAt(0, value0);
record.SetFieldAt(1, value1);
if (num_fields.Value() > 2) {
if (num_fields > 2) {
record.SetFieldAt(2, value2);
}
arguments.SetReturn(record);
@ -2645,7 +2640,8 @@ static ObjectPtr InvokeCallThroughGetterOrNoSuchMethod(
if (receiver.IsRecord()) {
const Record& record = Record::Cast(receiver);
const intptr_t field_index = record.GetFieldIndexByName(function_name);
const intptr_t field_index =
record.GetFieldIndexByName(thread, function_name);
if (field_index >= 0) {
return record.FieldAt(field_index);
}
@ -2720,7 +2716,7 @@ static ObjectPtr InvokeCallThroughGetterOrNoSuchMethod(
if (receiver.IsRecord()) {
const Record& record = Record::Cast(receiver);
const intptr_t field_index =
record.GetFieldIndexByName(demangled_target_name);
record.GetFieldIndexByName(thread, demangled_target_name);
if (field_index >= 0) {
const Object& getter_result =
Object::Handle(zone, record.FieldAt(field_index));

View file

@ -2271,13 +2271,15 @@ static Breakpoint* LookupBreakpoint(Isolate* isolate,
}
static inline void AddParentFieldToResponseBasedOnRecord(
Thread* thread,
Array* field_names_handle,
String* name_handle,
const JSONObject& jsresponse,
const Record& record,
const intptr_t field_slot_offset) {
const intptr_t num_positional_fields = record.NumPositionalFields();
*field_names_handle = record.field_names();
*field_names_handle = record.GetFieldNames(thread);
const intptr_t num_positional_fields =
record.num_fields() - field_names_handle->Length();
const intptr_t field_index =
(field_slot_offset - Record::field_offset(0)) / Record::kBytesPerElement;
if (field_index < num_positional_fields) {
@ -2321,8 +2323,8 @@ static void PrintInboundReferences(Thread* thread,
jselement.AddProperty("parentListIndex", element_index);
jselement.AddProperty("parentField", element_index);
} else if (source.IsRecord()) {
AddParentFieldToResponseBasedOnRecord(&field_names, &name, jselement,
Record::Cast(source),
AddParentFieldToResponseBasedOnRecord(thread, &field_names, &name,
jselement, Record::Cast(source),
slot_offset.Value());
} else {
if (source.IsInstance()) {
@ -2447,8 +2449,8 @@ static void PrintRetainingPath(Thread* thread,
jselement.AddProperty("parentListIndex", element_index);
jselement.AddProperty("parentField", element_index);
} else if (element.IsRecord()) {
AddParentFieldToResponseBasedOnRecord(&field_names, &name, jselement,
Record::Cast(element),
AddParentFieldToResponseBasedOnRecord(thread, &field_names, &name,
jselement, Record::Cast(element),
slot_offset.Value());
} else if (element.IsMap()) {
map = static_cast<MapPtr>(path.At(i * 2));

View file

@ -150,6 +150,7 @@ class ObjectPointerVisitor;
V(GetLength, "get:length") \
V(GetRuntimeType, "get:runtimeType") \
V(GetterPrefix, "get:") \
V(Get_fieldNames, "get:_fieldNames") \
V(GreaterEqualOperator, ">=") \
V(HaveSameRuntimeType, "_haveSameRuntimeType") \
V(ICData, "ICData") \

View file

@ -83,8 +83,9 @@ void TypeTestingStubNamer::StringifyTypeTo(BaseTextBuffer* buffer,
const RecordType& rec = RecordType::Cast(type);
buffer->AddString("Record");
const intptr_t num_fields = rec.NumFields();
const intptr_t num_positional_fields = rec.NumPositionalFields();
const auto& field_names = Array::Handle(rec.field_names());
const auto& field_names =
Array::Handle(rec.GetFieldNames(Thread::Current()));
const intptr_t num_positional_fields = num_fields - field_names.Length();
const auto& field_types = Array::Handle(rec.field_types());
for (intptr_t i = 0; i < num_fields; ++i) {
buffer->AddString("__");
@ -707,19 +708,9 @@ void TypeTestingStubGenerator::BuildOptimizedRecordSubtypeRangeCheck(
__ LoadCompressedSmi(
TTSInternalRegs::kScratchReg,
compiler::FieldAddress(TypeTestABI::kInstanceReg,
compiler::target::Record::num_fields_offset()));
compiler::target::Record::shape_offset()));
__ CompareImmediate(TTSInternalRegs::kScratchReg,
Smi::RawValue(type.NumFields()));
__ BranchIf(NOT_EQUAL, &is_not_subtype);
__ LoadCompressedField(
TTSInternalRegs::kScratchReg,
compiler::FieldAddress(TypeTestABI::kInstanceReg,
compiler::target::Record::field_names_offset()));
// Cannot load arbitrary field names from object pool, so
// only record types without named fields are supported.
ASSERT(type.field_names() == Object::empty_array().ptr());
__ CompareObject(TTSInternalRegs::kScratchReg, Object::empty_array());
Smi::RawValue(type.shape().AsInt()));
__ BranchIf(NOT_EQUAL, &is_not_subtype);
auto& field_type = AbstractType::Handle(zone);

View file

@ -24,12 +24,11 @@ class _Record implements Record {
}
_Record otherRec = unsafeCast<_Record>(other);
final int numFields = _numFields;
if (numFields != otherRec._numFields ||
!identical(_fieldNames, otherRec._fieldNames)) {
if (_shape != otherRec._shape) {
return false;
}
final int numFields = _numFields;
for (int i = 0; i < numFields; ++i) {
if (_fieldAt(i) != otherRec._fieldAt(i)) {
return false;
@ -42,9 +41,8 @@ class _Record implements Record {
// record field accesses.
@pragma("vm:never-inline")
int get hashCode {
int hash = _shape;
final int numFields = _numFields;
int hash = numFields;
hash = SystemHash.combine(hash, identityHashCode(_fieldNames));
for (int i = 0; i < numFields; ++i) {
hash = SystemHash.combine(hash, _fieldAt(i).hashCode);
}
@ -73,6 +71,10 @@ class _Record implements Record {
return buffer.toString();
}
@pragma("vm:recognized", "other")
@pragma("vm:prefer-inline")
external int get _shape;
@pragma("vm:recognized", "other")
@pragma("vm:prefer-inline")
external int get _numFields;