[vm] Raise the limit on the number of classes.

Some large applications are bumping up against the 16-bit limit.

The object header changes from hash:32,cid:16,size:8,gc:8 to hash:32,cid:20,size:4,gc:8.

Because the CID field is no longer a power of two, it takes two instructions to load instead of one, increasing code size. The alternative change to cid:32,hash:24,gc:8 was considered and rejected because the reduction in hash bits very noticeably affects performance in applications with large data sets.

TEST=ci
Bug: b/255934984
Change-Id: I5fa36ba48a6852359d994393cf80c6a761c5d84c
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/269120
Reviewed-by: Martin Kustermann <kustermann@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2022-11-17 20:35:47 +00:00 committed by Commit Queue
parent 7477a6cdf3
commit 9182d5e535
22 changed files with 430 additions and 411 deletions

View file

@ -164,7 +164,7 @@ class BitField {
// Returns an S with the bit field value encoded.
static constexpr S encode(T value) {
assert(is_valid(value));
ASSERT(is_valid(value));
return encode_unchecked(value);
}

View file

@ -11,11 +11,15 @@
// for these classes.
#include "platform/assert.h"
#include "vm/globals.h"
namespace dart {
// Size of the class-id part of the object header. See UntaggedObject.
typedef uint16_t ClassIdTagType;
// Large enough to contain the class-id part of the object header. See
// UntaggedObject. Signed to be comparable to intptr_t.
typedef int32_t ClassIdTagType;
static constexpr intptr_t kClassIdTagMax = (1 << 20) - 1;
// Classes that are not subclasses of Instance and only handled by the VM,
// but do not require any special handling other than being a predefined class.

View file

@ -5,7 +5,6 @@
#ifndef RUNTIME_VM_CLASS_TABLE_H_
#define RUNTIME_VM_CLASS_TABLE_H_
#include <limits>
#include <memory>
#include <tuple>
#include <utility>
@ -272,7 +271,7 @@ class CidIndexedTable {
};
void SetNumCids(intptr_t new_num_cids) {
if (new_num_cids > std::numeric_limits<CidType>::max()) {
if (new_num_cids > kClassIdTagMax) {
FATAL("Too many classes");
}
num_cids_ = new_num_cids;
@ -508,7 +507,7 @@ class ClassTable : public MallocAllocated {
friend class IsolateGroup; // for table()
static const int kInitialCapacity = 512;
static const intptr_t kTopLevelCidOffset = (1 << 16);
static const intptr_t kTopLevelCidOffset = kClassIdTagMax + 1;
ClassTable(const ClassTable& original)
: allocator_(original.allocator_),

View file

@ -1998,15 +1998,18 @@ void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
str(value, dest);
}
void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
Lsr(result, tags, Operand(target::UntaggedObject::kClassIdTagPos), AL);
void Assembler::ExtractClassIdFromTags(Register result,
Register tags,
Condition cond) {
ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
ubfx(result, tags, target::UntaggedObject::kClassIdTagPos,
target::UntaggedObject::kClassIdTagSize, cond);
}
void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
ASSERT(target::UntaggedObject::kSizeTagPos == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 4);
Lsr(result, tags,
Operand(target::UntaggedObject::kSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2),
@ -2017,12 +2020,8 @@ void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
}
void Assembler::LoadClassId(Register result, Register object, Condition cond) {
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
ldrh(result, FieldAddress(object, class_id_offset), cond);
ldr(result, FieldAddress(object, target::Object::tags_offset()), cond);
ExtractClassIdFromTags(result, result, cond);
}
void Assembler::LoadClassById(Register result, Register class_id) {

View file

@ -1004,7 +1004,9 @@ class Assembler : public AssemblerBase {
// Stores a Smi value into a heap object field that always contains a Smi.
void StoreIntoSmiField(const Address& dest, Register value);
void ExtractClassIdFromTags(Register result, Register tags);
void ExtractClassIdFromTags(Register result,
Register tags,
Condition cond = AL);
void ExtractInstanceSizeFromTags(Register result, Register tags);
void RangeCheck(Register value,

View file

@ -1339,28 +1339,23 @@ void Assembler::StoreInternalPointer(Register object,
}
void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
LsrImmediate(result, tags, target::UntaggedObject::kClassIdTagPos,
kFourBytes);
ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
ubfx(result, tags, target::UntaggedObject::kClassIdTagPos,
target::UntaggedObject::kClassIdTagSize);
}
void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
ASSERT(target::UntaggedObject::kSizeTagPos == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 4);
ubfx(result, tags, target::UntaggedObject::kSizeTagPos,
target::UntaggedObject::kSizeTagSize);
LslImmediate(result, result, target::ObjectAlignment::kObjectAlignmentLog2);
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
LoadFromOffset(result, object, class_id_offset - kHeapObjectTag,
kUnsignedTwoBytes);
ldr(result, FieldAddress(object, target::Object::tags_offset()));
ExtractClassIdFromTags(result, result);
}
void Assembler::LoadClassById(Register result, Register class_id) {

View file

@ -2897,12 +2897,10 @@ void Assembler::EmitGenericShift(int rm,
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
movzxw(result, FieldAddress(object, class_id_offset));
ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
movl(result, FieldAddress(object, target::Object::tags_offset()));
shrl(result, Immediate(target::UntaggedObject::kClassIdTagPos));
}
void Assembler::LoadClassById(Register result, Register class_id) {
@ -2927,18 +2925,16 @@ void Assembler::SmiUntagOrCheckClass(Register object,
Register scratch,
Label* is_smi) {
ASSERT(kSmiTagShift == 1);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);
j(NOT_CARRY, is_smi, kNearJump);
// Load cid: can't use LoadClassId, object is untagged. Use TIMES_2 scale
// factor in the addressing mode to compensate for this.
movzxw(scratch, Address(object, TIMES_2, class_id_offset));
movl(scratch, Address(object, TIMES_2,
target::Object::tags_offset() + kHeapObjectTag));
shrl(scratch, Immediate(target::UntaggedObject::kClassIdTagPos));
cmpl(scratch, Immediate(class_id));
}

View file

@ -3403,30 +3403,34 @@ void Assembler::CompareObject(Register reg, const Object& object) {
}
void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
#if XLEN == 64
srliw(result, tags, target::UntaggedObject::kClassIdTagPos);
#else
srli(result, tags, target::UntaggedObject::kClassIdTagPos);
#endif
}
void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
ASSERT(target::UntaggedObject::kSizeTagPos == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 4);
srli(result, tags, target::UntaggedObject::kSizeTagPos);
andi(result, result, (1 << target::UntaggedObject::kSizeTagSize) - 1);
slli(result, result, target::ObjectAlignment::kObjectAlignmentLog2);
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
lhu(result, FieldAddress(object, class_id_offset));
ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
#if XLEN == 64
lwu(result, FieldAddress(object, target::Object::tags_offset()));
#else
lw(result, FieldAddress(object, target::Object::tags_offset()));
#endif
srli(result, result, target::UntaggedObject::kClassIdTagPos);
}
void Assembler::LoadClassById(Register result, Register class_id) {
ASSERT(result != class_id);

View file

@ -2101,7 +2101,9 @@ void Assembler::MonomorphicCheckedEntryJIT() {
OBJ(add)(FieldAddress(RBX, count_offset), Immediate(target::ToRawSmi(1)));
xorq(R10, R10); // GC-safe for OptimizeInvokedFunction.
#if defined(DART_COMPRESSED_POINTERS)
nop(3);
nop(4);
#else
nop(1);
#endif
// Fall through to unchecked entry.
@ -2135,10 +2137,8 @@ void Assembler::MonomorphicCheckedEntryAOT() {
// Ensure the unchecked entry is 2-byte aligned (so GC can see them if we
// store them in ICData / MegamorphicCache arrays).
#if !defined(DART_COMPRESSED_POINTERS)
#if defined(DART_COMPRESSED_POINTERS)
nop(1);
#else
nop(2);
#endif
// Fall through to unchecked entry.
@ -2427,15 +2427,15 @@ void Assembler::EmitGenericShift(bool wide,
}
void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
movl(result, tags);
shrl(result, Immediate(target::UntaggedObject::kClassIdTagPos));
shrl(result, Immediate(12));
}
void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
ASSERT(target::UntaggedObject::kSizeTagPos == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 4);
movzxw(result, tags);
shrl(result, Immediate(target::UntaggedObject::kSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
@ -2445,12 +2445,10 @@ void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
movzxw(result, FieldAddress(object, class_id_offset));
ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
movl(result, FieldAddress(object, target::Object::tags_offset()));
shrl(result, Immediate(target::UntaggedObject::kClassIdTagPos));
}
void Assembler::LoadClassById(Register result, Register class_id) {
@ -2475,18 +2473,16 @@ void Assembler::SmiUntagOrCheckClass(Register object,
Label* is_smi) {
#if !defined(DART_COMPRESSED_POINTERS)
ASSERT(kSmiTagShift == 1);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
ASSERT(target::UntaggedObject::kClassIdTagPos == 12);
ASSERT(target::UntaggedObject::kClassIdTagSize == 20);
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);
j(NOT_CARRY, is_smi, kNearJump);
// Load cid: can't use LoadClassId, object is untagged. Use TIMES_2 scale
// factor in the addressing mode to compensate for this.
movzxw(TMP, Address(object, TIMES_2, class_id_offset));
movl(TMP, Address(object, TIMES_2,
target::Object::tags_offset() + kHeapObjectTag));
shrl(TMP, Immediate(target::UntaggedObject::kClassIdTagPos));
cmpl(TMP, Immediate(class_id));
#else
// Cannot speculatively untag compressed Smis because it erases upper address

View file

@ -585,9 +585,9 @@ int ARMDecoder::FormatOption(Instr* instr, const char* format) {
ASSERT(STRING_STARTS_WITH(format, "width"));
// 'width: width field of bit field extract instructions
// (field value in encoding is 1 less than in mnemonic)
buffer_pos_ = Utils::SNPrint(current_position_in_buffer(),
remaining_size_in_buffer(), "%u",
instr->BitFieldExtractWidthField() + 1);
buffer_pos_ += Utils::SNPrint(current_position_in_buffer(),
remaining_size_in_buffer(), "%u",
instr->BitFieldExtractWidthField() + 1);
return 5;
} else {
// 'w: W field of load and store instructions.

View file

@ -2483,9 +2483,9 @@ LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 20);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
@ -2531,16 +2531,16 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (value_cid == kDynamicCid) {
LoadValueCid(compiler, value_cid_reg, value_reg);
__ ldrh(IP, field_cid_operand);
__ ldr(IP, field_cid_operand);
__ cmp(value_cid_reg, compiler::Operand(IP));
__ b(&ok, EQ);
__ ldrh(IP, field_nullability_operand);
__ ldr(IP, field_nullability_operand);
__ cmp(value_cid_reg, compiler::Operand(IP));
} else if (value_cid == kNullCid) {
__ ldrh(value_cid_reg, field_nullability_operand);
__ ldr(value_cid_reg, field_nullability_operand);
__ CompareImmediate(value_cid_reg, value_cid);
} else {
__ ldrh(value_cid_reg, field_cid_operand);
__ ldr(value_cid_reg, field_cid_operand);
__ CompareImmediate(value_cid_reg, value_cid);
}
__ b(&ok, EQ);
@ -2554,17 +2554,17 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (!field().needs_length_check()) {
// Uninitialized field can be handled inline. Check if the
// field is still unitialized.
__ ldrh(IP, field_cid_operand);
__ ldr(IP, field_cid_operand);
__ CompareImmediate(IP, kIllegalCid);
__ b(fail, NE);
if (value_cid == kDynamicCid) {
__ strh(value_cid_reg, field_cid_operand);
__ strh(value_cid_reg, field_nullability_operand);
__ str(value_cid_reg, field_cid_operand);
__ str(value_cid_reg, field_nullability_operand);
} else {
__ LoadImmediate(IP, value_cid);
__ strh(IP, field_cid_operand);
__ strh(IP, field_nullability_operand);
__ str(IP, field_cid_operand);
__ str(IP, field_nullability_operand);
}
__ b(&ok);
@ -2573,9 +2573,8 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (deopt == NULL) {
__ Bind(fail);
__ ldrh(IP,
compiler::FieldAddress(
field_reg, compiler::target::Field::guarded_cid_offset()));
__ ldr(IP, compiler::FieldAddress(
field_reg, compiler::target::Field::guarded_cid_offset()));
__ CompareImmediate(IP, kDynamicCid);
__ b(&ok, EQ);

View file

@ -2267,9 +2267,9 @@ LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 20);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
@ -2316,18 +2316,18 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (value_cid == kDynamicCid) {
LoadValueCid(compiler, value_cid_reg, value_reg);
compiler::Label skip_length_check;
__ ldr(TMP, field_cid_operand, compiler::kUnsignedTwoBytes);
__ ldr(TMP, field_cid_operand, compiler::kUnsignedFourBytes);
__ CompareRegisters(value_cid_reg, TMP);
__ b(&ok, EQ);
__ ldr(TMP, field_nullability_operand, compiler::kUnsignedTwoBytes);
__ ldr(TMP, field_nullability_operand, compiler::kUnsignedFourBytes);
__ CompareRegisters(value_cid_reg, TMP);
} else if (value_cid == kNullCid) {
__ ldr(value_cid_reg, field_nullability_operand,
compiler::kUnsignedTwoBytes);
compiler::kUnsignedFourBytes);
__ CompareImmediate(value_cid_reg, value_cid);
} else {
compiler::Label skip_length_check;
__ ldr(value_cid_reg, field_cid_operand, compiler::kUnsignedTwoBytes);
__ ldr(value_cid_reg, field_cid_operand, compiler::kUnsignedFourBytes);
__ CompareImmediate(value_cid_reg, value_cid);
}
__ b(&ok, EQ);
@ -2341,18 +2341,18 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (!field().needs_length_check()) {
// Uninitialized field can be handled inline. Check if the
// field is still unitialized.
__ ldr(TMP, field_cid_operand, compiler::kUnsignedTwoBytes);
__ ldr(TMP, field_cid_operand, compiler::kUnsignedFourBytes);
__ CompareImmediate(TMP, kIllegalCid);
__ b(fail, NE);
if (value_cid == kDynamicCid) {
__ str(value_cid_reg, field_cid_operand, compiler::kUnsignedTwoBytes);
__ str(value_cid_reg, field_cid_operand, compiler::kUnsignedFourBytes);
__ str(value_cid_reg, field_nullability_operand,
compiler::kUnsignedTwoBytes);
compiler::kUnsignedFourBytes);
} else {
__ LoadImmediate(TMP, value_cid);
__ str(TMP, field_cid_operand, compiler::kUnsignedTwoBytes);
__ str(TMP, field_nullability_operand, compiler::kUnsignedTwoBytes);
__ str(TMP, field_cid_operand, compiler::kUnsignedFourBytes);
__ str(TMP, field_nullability_operand, compiler::kUnsignedFourBytes);
}
__ b(&ok);
@ -2362,7 +2362,7 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(fail);
__ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset(),
compiler::kUnsignedTwoBytes);
compiler::kUnsignedFourBytes);
__ CompareImmediate(TMP, kDynamicCid);
__ b(&ok, EQ);

View file

@ -1924,9 +1924,9 @@ LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 20);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
@ -1972,17 +1972,17 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (value_cid == kDynamicCid) {
LoadValueCid(compiler, value_cid_reg, value_reg);
__ cmpw(value_cid_reg, field_cid_operand);
__ cmpl(value_cid_reg, field_cid_operand);
__ j(EQUAL, &ok);
__ cmpw(value_cid_reg, field_nullability_operand);
__ cmpl(value_cid_reg, field_nullability_operand);
} else if (value_cid == kNullCid) {
// Value in graph known to be null.
// Compare with null.
__ cmpw(field_nullability_operand, compiler::Immediate(value_cid));
__ cmpl(field_nullability_operand, compiler::Immediate(value_cid));
} else {
// Value in graph known to be non-null.
// Compare class id with guard field class id.
__ cmpw(field_cid_operand, compiler::Immediate(value_cid));
__ cmpl(field_cid_operand, compiler::Immediate(value_cid));
}
__ j(EQUAL, &ok);
@ -1995,19 +1995,19 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (!field().needs_length_check()) {
// Uninitialized field can be handled inline. Check if the
// field is still unitialized.
__ cmpw(field_cid_operand, compiler::Immediate(kIllegalCid));
__ cmpl(field_cid_operand, compiler::Immediate(kIllegalCid));
// Jump to failure path when guard field has been initialized and
// the field and value class ids do not not match.
__ j(NOT_EQUAL, fail);
if (value_cid == kDynamicCid) {
// Do not know value's class id.
__ movw(field_cid_operand, value_cid_reg);
__ movw(field_nullability_operand, value_cid_reg);
__ movl(field_cid_operand, value_cid_reg);
__ movl(field_nullability_operand, value_cid_reg);
} else {
ASSERT(field_reg != kNoRegister);
__ movw(field_cid_operand, compiler::Immediate(value_cid));
__ movw(field_nullability_operand, compiler::Immediate(value_cid));
__ movl(field_cid_operand, compiler::Immediate(value_cid));
__ movl(field_nullability_operand, compiler::Immediate(value_cid));
}
__ jmp(&ok);
@ -2016,7 +2016,7 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (deopt == NULL) {
__ Bind(fail);
__ cmpw(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
__ cmpl(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
compiler::Immediate(kDynamicCid));
__ j(EQUAL, &ok);

View file

@ -2621,9 +2621,9 @@ LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 20);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
@ -2670,17 +2670,17 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (value_cid == kDynamicCid) {
LoadValueCid(compiler, value_cid_reg, value_reg);
compiler::Label skip_length_check;
__ lhu(TMP, field_cid_operand);
__ lw(TMP, field_cid_operand);
__ CompareRegisters(value_cid_reg, TMP);
__ BranchIf(EQ, &ok);
__ lhu(TMP, field_nullability_operand);
__ lw(TMP, field_nullability_operand);
__ CompareRegisters(value_cid_reg, TMP);
} else if (value_cid == kNullCid) {
__ lhu(value_cid_reg, field_nullability_operand);
__ lw(value_cid_reg, field_nullability_operand);
__ CompareImmediate(value_cid_reg, value_cid);
} else {
compiler::Label skip_length_check;
__ lhu(value_cid_reg, field_cid_operand);
__ lw(value_cid_reg, field_cid_operand);
__ CompareImmediate(value_cid_reg, value_cid);
}
__ BranchIf(EQ, &ok);
@ -2694,17 +2694,17 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (!field().needs_length_check()) {
// Uninitialized field can be handled inline. Check if the
// field is still unitialized.
__ lhu(TMP, field_cid_operand);
__ lw(TMP, field_cid_operand);
__ CompareImmediate(TMP, kIllegalCid);
__ BranchIf(NE, fail);
if (value_cid == kDynamicCid) {
__ sh(value_cid_reg, field_cid_operand);
__ sh(value_cid_reg, field_nullability_operand);
__ sw(value_cid_reg, field_cid_operand);
__ sw(value_cid_reg, field_nullability_operand);
} else {
__ LoadImmediate(TMP, value_cid);
__ sh(TMP, field_cid_operand);
__ sh(TMP, field_nullability_operand);
__ sw(TMP, field_cid_operand);
__ sw(TMP, field_nullability_operand);
}
__ j(&ok);

View file

@ -2216,9 +2216,9 @@ LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 20);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 4);
ASSERT(sizeof(UntaggedField::is_nullable_) == 4);
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
@ -2265,13 +2265,13 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (value_cid == kDynamicCid) {
LoadValueCid(compiler, value_cid_reg, value_reg);
__ cmpw(value_cid_reg, field_cid_operand);
__ cmpl(value_cid_reg, field_cid_operand);
__ j(EQUAL, &ok);
__ cmpw(value_cid_reg, field_nullability_operand);
__ cmpl(value_cid_reg, field_nullability_operand);
} else if (value_cid == kNullCid) {
__ cmpw(field_nullability_operand, compiler::Immediate(value_cid));
__ cmpl(field_nullability_operand, compiler::Immediate(value_cid));
} else {
__ cmpw(field_cid_operand, compiler::Immediate(value_cid));
__ cmpl(field_cid_operand, compiler::Immediate(value_cid));
}
__ j(EQUAL, &ok);
@ -2286,16 +2286,16 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (!is_complicated_field) {
// Uninitialized field can be handled inline. Check if the
// field is still unitialized.
__ cmpw(field_cid_operand, compiler::Immediate(kIllegalCid));
__ cmpl(field_cid_operand, compiler::Immediate(kIllegalCid));
__ j(NOT_EQUAL, fail);
if (value_cid == kDynamicCid) {
__ movw(field_cid_operand, value_cid_reg);
__ movw(field_nullability_operand, value_cid_reg);
__ movl(field_cid_operand, value_cid_reg);
__ movl(field_nullability_operand, value_cid_reg);
} else {
ASSERT(field_reg != kNoRegister);
__ movw(field_cid_operand, compiler::Immediate(value_cid));
__ movw(field_nullability_operand, compiler::Immediate(value_cid));
__ movl(field_cid_operand, compiler::Immediate(value_cid));
__ movl(field_nullability_operand, compiler::Immediate(value_cid));
}
__ jmp(&ok);
@ -2304,7 +2304,7 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (deopt == NULL) {
__ Bind(fail);
__ cmpw(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
__ cmpl(compiler::FieldAddress(field_reg, Field::guarded_cid_offset()),
compiler::Immediate(kDynamicCid));
__ j(EQUAL, &ok);

File diff suppressed because it is too large Load diff

View file

@ -650,7 +650,7 @@ static void GenerateNullIsAssignableToType(Assembler* assembler,
__ LoadFieldFromOffset(
kScratchReg, kCurrentTypeReg,
target::TypeParameter::parameterized_class_id_offset(),
kUnsignedTwoBytes);
kUnsignedFourBytes);
__ CompareImmediate(kScratchReg, kFunctionCid);
__ BranchIf(EQUAL, &function_type_param, Assembler::kNearJump);
handle_case(TypeTestABI::kInstantiatorTypeArgumentsReg);
@ -767,7 +767,7 @@ static void BuildTypeParameterTypeTestStub(Assembler* assembler,
Label function_type_param;
__ LoadFieldFromOffset(TypeTestABI::kScratchReg, TypeTestABI::kDstTypeReg,
target::TypeParameter::parameterized_class_id_offset(),
kUnsignedTwoBytes);
kUnsignedFourBytes);
__ CompareImmediate(TypeTestABI::kScratchReg, kFunctionCid);
__ BranchIf(EQUAL, &function_type_param, Assembler::kNearJump);
handle_case(TypeTestABI::kInstantiatorTypeArgumentsReg);

View file

@ -285,4 +285,31 @@ ISOLATE_UNIT_TEST_CASE(EvalExpressionExhaustCIDs) {
EXPECT_EQ(initial_class_table_size, final_class_table_size);
}
// Too slow in debug mode.
#if !defined(DEBUG)
TEST_CASE(ManyClasses) {
// Limit is 20 bits. Check only more than 16 bits so test completes in
// reasonable time.
const intptr_t kNumClasses = (1 << 16) + 1;
TextBuffer buffer(MB);
for (intptr_t i = 0; i < kNumClasses; i++) {
buffer.Printf("class C%" Pd " { String toString() => 'C%" Pd "'; }\n", i,
i);
}
buffer.Printf("main() {\n");
for (intptr_t i = 0; i < kNumClasses; i++) {
buffer.Printf(" new C%" Pd "().toString();\n", i);
}
buffer.Printf("}\n");
Dart_Handle lib = TestCase::LoadTestScript(buffer.buffer(), NULL);
EXPECT_VALID(lib);
Dart_Handle result = Dart_Invoke(lib, NewString("main"), 0, NULL);
EXPECT_VALID(result);
EXPECT(IsolateGroup::Current()->class_table()->NumCids() >= kNumClasses);
}
#endif // !defined(DEBUG)
} // namespace dart

View file

@ -21833,8 +21833,7 @@ TypePtr Type::New(const Class& clazz,
}
void Type::set_type_class_id(intptr_t id) const {
COMPILE_ASSERT(std::is_unsigned<ClassIdTagType>::value);
ASSERT(Utils::IsUint(sizeof(ClassIdTagType) * kBitsPerByte, id));
ASSERT(Utils::IsUint(UntaggedObject::kClassIdTagSize, id));
// We should never need a Type object for a top-level class.
ASSERT(!ClassTable::IsTopLevelCid(id));
ASSERT(id != kIllegalCid);

View file

@ -5421,34 +5421,34 @@ class Instructions : public Object {
// necessary) to allow them to be seen as Smis by the GC.
#if defined(TARGET_ARCH_IA32)
static const intptr_t kMonomorphicEntryOffsetJIT = 6;
static const intptr_t kPolymorphicEntryOffsetJIT = 34;
static const intptr_t kPolymorphicEntryOffsetJIT = 36;
static const intptr_t kMonomorphicEntryOffsetAOT = 0;
static const intptr_t kPolymorphicEntryOffsetAOT = 0;
#elif defined(TARGET_ARCH_X64)
static const intptr_t kMonomorphicEntryOffsetJIT = 8;
static const intptr_t kPolymorphicEntryOffsetJIT = 40;
static const intptr_t kPolymorphicEntryOffsetJIT = 42;
static const intptr_t kMonomorphicEntryOffsetAOT = 8;
static const intptr_t kPolymorphicEntryOffsetAOT = 22;
#elif defined(TARGET_ARCH_ARM)
static const intptr_t kMonomorphicEntryOffsetJIT = 0;
static const intptr_t kPolymorphicEntryOffsetJIT = 40;
static const intptr_t kPolymorphicEntryOffsetJIT = 44;
static const intptr_t kMonomorphicEntryOffsetAOT = 0;
static const intptr_t kPolymorphicEntryOffsetAOT = 12;
static const intptr_t kPolymorphicEntryOffsetAOT = 16;
#elif defined(TARGET_ARCH_ARM64)
static const intptr_t kMonomorphicEntryOffsetJIT = 8;
static const intptr_t kPolymorphicEntryOffsetJIT = 48;
static const intptr_t kPolymorphicEntryOffsetJIT = 52;
static const intptr_t kMonomorphicEntryOffsetAOT = 8;
static const intptr_t kPolymorphicEntryOffsetAOT = 20;
static const intptr_t kPolymorphicEntryOffsetAOT = 24;
#elif defined(TARGET_ARCH_RISCV32)
static const intptr_t kMonomorphicEntryOffsetJIT = 6;
static const intptr_t kPolymorphicEntryOffsetJIT = 42;
static const intptr_t kPolymorphicEntryOffsetJIT = 44;
static const intptr_t kMonomorphicEntryOffsetAOT = 6;
static const intptr_t kPolymorphicEntryOffsetAOT = 16;
static const intptr_t kPolymorphicEntryOffsetAOT = 18;
#elif defined(TARGET_ARCH_RISCV64)
static const intptr_t kMonomorphicEntryOffsetJIT = 6;
static const intptr_t kPolymorphicEntryOffsetJIT = 42;
static const intptr_t kPolymorphicEntryOffsetJIT = 44;
static const intptr_t kMonomorphicEntryOffsetAOT = 6;
static const intptr_t kPolymorphicEntryOffsetAOT = 16;
static const intptr_t kPolymorphicEntryOffsetAOT = 18;
#else
#error Missing entry offsets for current architecture
#endif

View file

@ -248,12 +248,12 @@ intptr_t UntaggedObject::HeapSizeFromClass(uword tags) const {
auto isolate_group = IsolateGroup::Current();
#if defined(DEBUG)
auto class_table = isolate_group->heap_walk_class_table();
ASSERT(class_table->SizeAt(class_id) > 0);
if (!class_table->IsValidIndex(class_id) ||
!class_table->HasValidClassAt(class_id)) {
FATAL3("Invalid cid: %" Pd ", obj: %p, tags: %x. Corrupt heap?",
class_id, this, static_cast<uint32_t>(tags));
}
ASSERT(class_table->SizeAt(class_id) > 0);
#endif // DEBUG
instance_size = isolate_group->heap_walk_class_table()->SizeAt(class_id);
}

View file

@ -167,9 +167,9 @@ class UntaggedObject {
kReservedBit = 7,
kSizeTagPos = kReservedBit + 1, // = 8
kSizeTagSize = 8,
kClassIdTagPos = kSizeTagPos + kSizeTagSize, // = 16
kClassIdTagSize = 16,
kSizeTagSize = 4,
kClassIdTagPos = kSizeTagPos + kSizeTagSize, // = 12
kClassIdTagSize = 20,
kHashTagPos = kClassIdTagPos + kClassIdTagSize, // = 32
kHashTagSize = 32,
};
@ -230,7 +230,8 @@ class UntaggedObject {
ClassIdTagType,
kClassIdTagPos,
kClassIdTagSize> {};
COMPILE_ASSERT(kBitsPerByte * sizeof(ClassIdTagType) == kClassIdTagSize);
COMPILE_ASSERT(kBitsPerByte * sizeof(ClassIdTagType) >= kClassIdTagSize);
COMPILE_ASSERT(kClassIdTagMax == (1 << kClassIdTagSize) - 1);
#if defined(HASH_IN_OBJECT_HEADER)
class HashTag : public BitField<uword, uint32_t, kHashTagPos, kHashTagSize> {
@ -2641,10 +2642,8 @@ class UntaggedAbstractType : public UntaggedInstance {
class UntaggedType : public UntaggedAbstractType {
public:
static constexpr intptr_t kTypeClassIdShift = TypeStateBits::kNextBit;
using TypeClassIdBits = BitField<uint32_t,
ClassIdTagType,
kTypeClassIdShift,
sizeof(ClassIdTagType) * kBitsPerByte>;
using TypeClassIdBits =
BitField<uint32_t, ClassIdTagType, kTypeClassIdShift, kClassIdTagSize>;
private:
RAW_HEAP_OBJECT_IMPLEMENTATION(Type);