[vm] Replace asm intrinsic _GrowableList._withData with flow graph implementation

_GrowableList._withData constructor can be implemented in flow graph and
inlined, instead of having an asm intrinsic implementation.

This change also adds a specialized allocation stub for _GrowableList
class to offset slower general-purpose allocation stub.

TEST=ci
Fixes https://github.com/dart-lang/sdk/issues/48255

Change-Id: Ice0ca9156d7a504960cce1718ffd9aca24a9a3d1
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/231527
Reviewed-by: Slava Egorov <vegorov@google.com>
Commit-Queue: Alexander Markov <alexmarkov@google.com>
This commit is contained in:
Alexander Markov 2022-02-18 16:06:12 +00:00 committed by Commit Bot
parent 9ad6cc49af
commit 5010df50fe
21 changed files with 91 additions and 199 deletions

View file

@ -26,41 +26,6 @@ namespace compiler {
#define __ assembler->
// Allocate a GrowableObjectArray:: using the backing array specified.
// On stack: type argument (+1), data (+0).
void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
Label* normal_ir_body) {
// The newly allocated object is returned in R0.
const intptr_t kTypeArgumentsOffset = 1 * target::kWordSize;
const intptr_t kArrayOffset = 0 * target::kWordSize;
// Try allocating in new space.
const Class& cls = GrowableObjectArrayClass();
__ TryAllocate(cls, normal_ir_body, Assembler::kFarJump, R0, R1);
// Store backing array object in growable array object.
__ ldr(R1, Address(SP, kArrayOffset)); // Data argument.
// R0 is new, no barrier needed.
__ StoreIntoObjectNoBarrier(
R0, FieldAddress(R0, target::GrowableObjectArray::data_offset()), R1);
// R0: new growable array object start as a tagged pointer.
// Store the type argument field in the growable array object.
__ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument.
__ StoreIntoObjectNoBarrier(
R0,
FieldAddress(R0, target::GrowableObjectArray::type_arguments_offset()),
R1);
// Set the length field in the growable array object to 0.
__ LoadImmediate(R1, 0);
__ StoreIntoObjectNoBarrier(
R0, FieldAddress(R0, target::GrowableObjectArray::length_offset()), R1);
__ Ret(); // Returns the newly allocated object in R0.
__ Bind(normal_ir_body);
}
// Loads args from stack into R0 and R1
// Tests if they are smis, jumps to label not_smi if not.
static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {

View file

@ -26,40 +26,6 @@ namespace compiler {
#define __ assembler->
// Allocate a GrowableObjectArray:: using the backing array specified.
// On stack: type argument (+1), data (+0).
void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
Label* normal_ir_body) {
// The newly allocated object is returned in R0.
const intptr_t kTypeArgumentsOffset = 1 * target::kWordSize;
const intptr_t kArrayOffset = 0 * target::kWordSize;
// Try allocating in new space.
const Class& cls = GrowableObjectArrayClass();
__ TryAllocate(cls, normal_ir_body, Assembler::kFarJump, R0, R1);
// Store backing array object in growable array object.
__ ldr(R1, Address(SP, kArrayOffset)); // Data argument.
// R0 is new, no barrier needed.
__ StoreCompressedIntoObjectNoBarrier(
R0, FieldAddress(R0, target::GrowableObjectArray::data_offset()), R1);
// R0: new growable array object start as a tagged pointer.
// Store the type argument field in the growable array object.
__ ldr(R1, Address(SP, kTypeArgumentsOffset)); // Type argument.
__ StoreCompressedIntoObjectNoBarrier(
R0,
FieldAddress(R0, target::GrowableObjectArray::type_arguments_offset()),
R1);
// Set the length field in the growable array object to 0.
__ StoreCompressedIntoObjectNoBarrier(
R0, FieldAddress(R0, target::GrowableObjectArray::length_offset()), ZR);
__ ret(); // Returns the newly allocated object in R0.
__ Bind(normal_ir_body);
}
// Loads args from stack into R0 and R1
// Tests if they are smis, jumps to label not_smi if not.
static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {

View file

@ -31,42 +31,6 @@ namespace compiler {
#define __ assembler->
// Allocate a GrowableObjectArray:: using the backing array specified.
// On stack: type argument (+2), data (+1), return-address (+0).
void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
Label* normal_ir_body) {
// This snippet of inlined code uses the following registers:
// EAX, EBX
// and the newly allocated object is returned in EAX.
const intptr_t kTypeArgumentsOffset = 2 * target::kWordSize;
const intptr_t kArrayOffset = 1 * target::kWordSize;
// Try allocating in new space.
const Class& cls = GrowableObjectArrayClass();
__ TryAllocate(cls, normal_ir_body, Assembler::kNearJump, EAX, EBX);
// Store backing array object in growable array object.
__ movl(EBX, Address(ESP, kArrayOffset)); // data argument.
// EAX is new, no barrier needed.
__ StoreIntoObjectNoBarrier(
EAX, FieldAddress(EAX, target::GrowableObjectArray::data_offset()), EBX);
// EAX: new growable array object start as a tagged pointer.
// Store the type argument field in the growable array object.
__ movl(EBX, Address(ESP, kTypeArgumentsOffset)); // type argument.
__ StoreIntoObjectNoBarrier(
EAX,
FieldAddress(EAX, target::GrowableObjectArray::type_arguments_offset()),
EBX);
__ ZeroInitSmiField(
FieldAddress(EAX, target::GrowableObjectArray::length_offset()));
__ ret(); // returns the newly allocated object in EAX.
__ Bind(normal_ir_body);
}
// Tests if two top most arguments are smis, jumps to label not_smi if not.
// Topmost argument is in EAX.
static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {

View file

@ -26,40 +26,6 @@ namespace compiler {
#define __ assembler->
// Allocate a GrowableObjectArray:: using the backing array specified.
// On stack: type argument (+1), data (+0).
void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
Label* normal_ir_body) {
// The newly allocated object is returned in R0.
const intptr_t kTypeArgumentsOffset = 1 * target::kWordSize;
const intptr_t kArrayOffset = 0 * target::kWordSize;
// Try allocating in new space.
const Class& cls = GrowableObjectArrayClass();
__ TryAllocate(cls, normal_ir_body, Assembler::kFarJump, A0, A1);
// Store backing array object in growable array object.
__ lx(A1, Address(SP, kArrayOffset)); // Data argument.
// R0 is new, no barrier needed.
__ StoreCompressedIntoObjectNoBarrier(
A0, FieldAddress(A0, target::GrowableObjectArray::data_offset()), A1);
// R0: new growable array object start as a tagged pointer.
// Store the type argument field in the growable array object.
__ lx(A1, Address(SP, kTypeArgumentsOffset)); // Type argument.
__ StoreCompressedIntoObjectNoBarrier(
A0,
FieldAddress(A0, target::GrowableObjectArray::type_arguments_offset()),
A1);
// Set the length field in the growable array object to 0.
__ StoreCompressedIntoObjectNoBarrier(
A0, FieldAddress(A0, target::GrowableObjectArray::length_offset()), ZR);
__ ret(); // Returns the newly allocated object in A0.
__ Bind(normal_ir_body);
}
// Loads args from stack into A0 and A1
// Tests if they are smis, jumps to label not_smi if not.
static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {

View file

@ -26,42 +26,6 @@ namespace compiler {
#define __ assembler->
// Allocate a GrowableObjectArray using the backing array specified.
// On stack: type argument (+2), data (+1), return-address (+0).
void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
Label* normal_ir_body) {
// This snippet of inlined code uses the following registers:
// RAX, RCX, R13
// and the newly allocated object is returned in RAX.
const intptr_t kTypeArgumentsOffset = 2 * target::kWordSize;
const intptr_t kArrayOffset = 1 * target::kWordSize;
// Try allocating in new space.
const Class& cls = GrowableObjectArrayClass();
__ TryAllocate(cls, normal_ir_body, Assembler::kFarJump, RAX, R13);
// Store backing array object in growable array object.
__ movq(RCX, Address(RSP, kArrayOffset)); // data argument.
// RAX is new, no barrier needed.
__ StoreCompressedIntoObjectNoBarrier(
RAX, FieldAddress(RAX, target::GrowableObjectArray::data_offset()), RCX);
// RAX: new growable array object start as a tagged pointer.
// Store the type argument field in the growable array object.
__ movq(RCX, Address(RSP, kTypeArgumentsOffset)); // type argument.
__ StoreCompressedIntoObjectNoBarrier(
RAX,
FieldAddress(RAX, target::GrowableObjectArray::type_arguments_offset()),
RCX);
// Set the length field in the growable array object to 0.
__ ZeroInitCompressedSmiField(
FieldAddress(RAX, target::GrowableObjectArray::length_offset()));
__ ret(); // returns the newly allocated object in RAX.
__ Bind(normal_ir_body);
}
// Tests if two top most arguments are smis, jumps to label not_smi if not.
// Topmost argument is in RAX.
static void TestBothArgumentsSmis(Assembler* assembler, Label* not_smi) {

View file

@ -877,6 +877,7 @@ bool FlowGraphBuilder::IsRecognizedMethodForFlowGraph(
case MethodRecognizer::kByteDataViewTypedData:
case MethodRecognizer::kTypedDataViewTypedData:
case MethodRecognizer::kClassIDgetID:
case MethodRecognizer::kGrowableArrayAllocateWithData:
case MethodRecognizer::kGrowableArrayCapacity:
case MethodRecognizer::kListFactory:
case MethodRecognizer::kObjectArrayAllocate:
@ -1124,6 +1125,26 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += LoadClassId();
break;
case MethodRecognizer::kGrowableArrayAllocateWithData: {
ASSERT(function.IsFactory());
ASSERT_EQUAL(function.NumParameters(), 2);
const Class& cls =
Class::ZoneHandle(Z, compiler::GrowableObjectArrayClass().ptr());
body += LoadLocal(parsed_function_->RawParameterVariable(0));
body += AllocateObject(TokenPosition::kNoSource, cls, 1);
LocalVariable* object = MakeTemporary();
body += LoadLocal(object);
body += LoadLocal(parsed_function_->RawParameterVariable(1));
body += StoreNativeField(Slot::GrowableObjectArray_data(),
StoreInstanceFieldInstr::Kind::kInitializing,
kNoStoreBarrier);
body += LoadLocal(object);
body += IntConstant(0);
body += StoreNativeField(Slot::GrowableObjectArray_length(),
StoreInstanceFieldInstr::Kind::kInitializing,
kNoStoreBarrier);
break;
}
case MethodRecognizer::kGrowableArrayCapacity:
ASSERT_EQUAL(function.NumParameters(), 1);
body += LoadLocal(parsed_function_->RawParameterVariable(0));

View file

@ -18,6 +18,7 @@ namespace dart {
V(List, ., ListFactory, 0xbc820cf9) \
V(_List, ., ObjectArrayAllocate, 0xd693eee6) \
V(_List, []=, ObjectArraySetIndexed, 0xd7b48abc) \
V(_GrowableList, ._withData, GrowableArrayAllocateWithData, 0xa32d060b) \
V(_GrowableList, []=, GrowableArraySetIndexed, 0xd7b48abc) \
V(_TypedList, _getInt8, ByteArrayBaseGetInt8, 0x1623dc34) \
V(_TypedList, _getUint8, ByteArrayBaseGetUint8, 0x177ffe2a) \
@ -274,7 +275,6 @@ namespace dart {
V(_Double, get:isNegative, Double_getIsNegative, 0xd4715091) \
V(_Double, _mulFromInteger, Double_mulFromInteger, 0x0a50d2cf) \
V(_Double, .fromInteger, DoubleFromInteger, 0x7d0fd999) \
V(_GrowableList, ._withData, GrowableArray_Allocate, 0xa32d060b) \
V(_RegExp, _ExecuteMatch, RegExp_ExecuteMatch, 0x9911d549) \
V(_RegExp, _ExecuteMatchSticky, RegExp_ExecuteMatchSticky, 0x91dd880f) \
V(Object, ==, ObjectEquals, 0x46587030) \

View file

@ -899,6 +899,43 @@ void StubCodeCompiler::GenerateAllocateClosureStub(Assembler* assembler) {
__ Ret();
}
// Generates allocation stub for _GrowableList class.
// This stub exists solely for performance reasons: default allocation
// stub is slower as it doesn't use specialized inline allocation.
void StubCodeCompiler::GenerateAllocateGrowableArrayStub(Assembler* assembler) {
#if defined(TARGET_ARCH_IA32)
// This stub is not used on IA32 because IA32 version of
// StubCodeCompiler::GenerateAllocationStubForClass uses inline
// allocation. Also, AllocateObjectSlow stub is not generated on IA32.
__ Breakpoint();
#else
const intptr_t instance_size = target::RoundedAllocationSize(
target::GrowableObjectArray::InstanceSize());
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
__ Comment("Inline allocation of GrowableList");
__ TryAllocateObject(kGrowableObjectArrayCid, instance_size, &slow_case,
Assembler::kNearJump, AllocateObjectABI::kResultReg,
/*temp_reg=*/AllocateObjectABI::kTagsReg);
__ StoreIntoObjectNoBarrier(
AllocateObjectABI::kResultReg,
FieldAddress(AllocateObjectABI::kResultReg,
target::GrowableObjectArray::type_arguments_offset()),
AllocateObjectABI::kTypeArgumentsReg);
__ Ret();
__ Bind(&slow_case);
}
const uword tags = target::MakeTagWordForNewSpaceObject(
kGrowableObjectArrayCid, instance_size);
__ LoadImmediate(AllocateObjectABI::kTagsReg, tags);
__ Jump(
Address(THR, target::Thread::allocate_object_slow_entry_point_offset()));
#endif // defined(TARGET_ARCH_IA32)
}
// The UnhandledException class lives in the VM isolate, so it cannot cache
// an allocation stub for itself. Instead, we cache it in the stub code list.
void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub(

View file

@ -1728,7 +1728,7 @@ void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
static void GenerateAllocateObjectHelper(Assembler* assembler,
bool is_cls_parameterized) {
const Register kTagsReg = R2;
const Register kTagsReg = AllocateObjectABI::kTagsReg;
{
Label slow_case;
@ -1845,7 +1845,6 @@ void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
const Register kClsReg = R1;
const Register kTagsReg = R2;
if (!FLAG_precompiled_mode) {
__ ldr(CODE_REG,
@ -1856,7 +1855,8 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
// calling into the runtime.
__ EnterStubFrame();
__ ExtractClassIdFromTags(AllocateObjectABI::kResultReg, kTagsReg);
__ ExtractClassIdFromTags(AllocateObjectABI::kResultReg,
AllocateObjectABI::kTagsReg);
__ LoadClassById(kClsReg, AllocateObjectABI::kResultReg);
__ LoadObject(AllocateObjectABI::kResultReg, NullObject());
@ -1903,7 +1903,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
const uword tags =
target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
const Register kTagsReg = R2;
const Register kTagsReg = AllocateObjectABI::kTagsReg;
__ LoadImmediate(kTagsReg, tags);

View file

@ -1904,7 +1904,7 @@ void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
static void GenerateAllocateObjectHelper(Assembler* assembler,
bool is_cls_parameterized) {
const Register kTagsReg = R2;
const Register kTagsReg = AllocateObjectABI::kTagsReg;
{
Label slow_case;
@ -2014,20 +2014,19 @@ void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
const Register kTagsToClsIdReg = R2;
if (!FLAG_precompiled_mode) {
__ ldr(CODE_REG,
Address(THR, target::Thread::call_to_runtime_stub_offset()));
}
__ ExtractClassIdFromTags(kTagsToClsIdReg, kTagsToClsIdReg);
__ ExtractClassIdFromTags(AllocateObjectABI::kTagsReg,
AllocateObjectABI::kTagsReg);
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
__ LoadClassById(R0, kTagsToClsIdReg);
__ LoadClassById(R0, AllocateObjectABI::kTagsReg);
__ PushPair(R0, NULL_REG); // Pushes result slot, then class object.
// Should be Object::null() if class is non-parameterized.
@ -2072,7 +2071,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
// Note: Keep in sync with helper function.
const Register kTagsReg = R2;
const Register kTagsReg = AllocateObjectABI::kTagsReg;
__ LoadImmediate(kTagsReg, tags);

View file

@ -1831,7 +1831,7 @@ void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
static void GenerateAllocateObjectHelper(Assembler* assembler,
bool is_cls_parameterized) {
const Register kTagsReg = T2;
const Register kTagsReg = AllocateObjectABI::kTagsReg;
{
Label slow_case;
@ -1936,8 +1936,6 @@ void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
const Register kTagsToClsIdReg = T2;
if (!FLAG_precompiled_mode) {
__ lx(CODE_REG,
Address(THR, target::Thread::call_to_runtime_stub_offset()));
@ -1947,8 +1945,9 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
// calling into the runtime.
__ EnterStubFrame();
__ ExtractClassIdFromTags(kTagsToClsIdReg, kTagsToClsIdReg);
__ LoadClassById(A0, kTagsToClsIdReg);
__ ExtractClassIdFromTags(AllocateObjectABI::kTagsReg,
AllocateObjectABI::kTagsReg);
__ LoadClassById(A0, AllocateObjectABI::kTagsReg);
__ subi(SP, SP, 3 * target::kWordSize);
__ sx(ZR, Address(SP, 2 * target::kWordSize)); // Result slot.
@ -1993,7 +1992,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
// Note: Keep in sync with helper function.
const Register kTagsReg = T2;
const Register kTagsReg = AllocateObjectABI::kTagsReg;
ASSERT(kTagsReg != AllocateObjectABI::kTypeArgumentsReg);
__ LoadImmediate(kTagsReg, tags);

View file

@ -1938,7 +1938,7 @@ void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
static void GenerateAllocateObjectHelper(Assembler* assembler,
bool is_cls_parameterized) {
// Note: Keep in sync with calling function.
const Register kTagsReg = R8;
const Register kTagsReg = AllocateObjectABI::kTagsReg;
{
Label slow_case;
@ -2049,14 +2049,13 @@ void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
const Register kTagsToClsIdReg = R8;
if (!FLAG_precompiled_mode) {
__ movq(CODE_REG,
Address(THR, target::Thread::call_to_runtime_stub_offset()));
}
__ ExtractClassIdFromTags(kTagsToClsIdReg, kTagsToClsIdReg);
__ ExtractClassIdFromTags(AllocateObjectABI::kTagsReg,
AllocateObjectABI::kTagsReg);
// Create a stub frame.
// Ensure constant pool is allowed so we can e.g. load class object.
@ -2067,7 +2066,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
__ pushq(AllocateObjectABI::kResultReg);
// Push class of object to be allocated.
__ LoadClassById(AllocateObjectABI::kResultReg, kTagsToClsIdReg);
__ LoadClassById(AllocateObjectABI::kResultReg, AllocateObjectABI::kTagsReg);
__ pushq(AllocateObjectABI::kResultReg);
// Must be Object::null() if non-parameterized class.
@ -2118,7 +2117,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
const uword tags =
target::MakeTagWordForNewSpaceObject(cls_id, instance_size);
const Register kTagsReg = R8;
const Register kTagsReg = AllocateObjectABI::kTagsReg;
__ movq(kTagsReg, Immediate(tags));

View file

@ -470,6 +470,7 @@ struct RangeErrorABI {
struct AllocateObjectABI {
static const Register kResultReg = R0;
static const Register kTypeArgumentsReg = R3;
static const Register kTagsReg = R2;
};
// ABI for AllocateClosureStub.

View file

@ -309,6 +309,7 @@ struct RangeErrorABI {
struct AllocateObjectABI {
static const Register kResultReg = R0;
static const Register kTypeArgumentsReg = R1;
static const Register kTagsReg = R2;
};
// ABI for AllocateClosureStub.

View file

@ -206,6 +206,7 @@ struct RangeErrorABI {
struct AllocateObjectABI {
static const Register kResultReg = EAX;
static const Register kTypeArgumentsReg = EDX;
static const Register kTagsReg = kNoRegister; // Not used.
};
// ABI for Allocate{Mint,Double,Float32x4,Float64x2}Stub.

View file

@ -326,6 +326,7 @@ struct RangeErrorABI {
struct AllocateObjectABI {
static constexpr Register kResultReg = A0;
static constexpr Register kTypeArgumentsReg = T1;
static const Register kTagsReg = T2;
};
// ABI for AllocateClosureStub.

View file

@ -281,6 +281,7 @@ struct RangeErrorABI {
struct AllocateObjectABI {
static const Register kResultReg = RAX;
static const Register kTypeArgumentsReg = RDX;
static const Register kTagsReg = R8;
};
// ABI for AllocateClosureStub.

View file

@ -208,6 +208,7 @@ class ObjectPointerVisitor;
RW(Code, allocate_float64x2_array_stub) \
RW(Code, allocate_closure_stub) \
RW(Code, allocate_context_stub) \
RW(Code, allocate_growable_array_stub) \
RW(Code, allocate_object_stub) \
RW(Code, allocate_object_parametrized_stub) \
RW(Code, allocate_unhandled_exception_stub) \
@ -284,6 +285,7 @@ class ObjectPointerVisitor;
DO(allocate_float64x2_array_stub, AllocateFloat64x2Array) \
DO(allocate_closure_stub, AllocateClosure) \
DO(allocate_context_stub, AllocateContext) \
DO(allocate_growable_array_stub, AllocateGrowableArray) \
DO(allocate_object_stub, AllocateObject) \
DO(allocate_object_parametrized_stub, AllocateObjectParameterized) \
DO(allocate_unhandled_exception_stub, AllocateUnhandledException) \

View file

@ -172,6 +172,10 @@ CodePtr StubCode::GetAllocationStubForClass(const Class& cls) {
switch (cls.id()) {
case kArrayCid:
return object_store->allocate_array_stub();
#if !defined(TARGET_ARCH_IA32)
case kGrowableObjectArrayCid:
return object_store->allocate_growable_array_stub();
#endif // !defined(TARGET_ARCH_IA32)
case kContextCid:
return object_store->allocate_context_stub();
case kUnhandledExceptionCid:

View file

@ -52,6 +52,7 @@ namespace dart {
V(AllocateMintSharedWithoutFPURegs) \
V(AllocateClosure) \
V(AllocateContext) \
V(AllocateGrowableArray) \
V(AllocateObject) \
V(AllocateObjectParameterized) \
V(AllocateObjectSlow) \

View file

@ -222,7 +222,7 @@ class _GrowableList<T> extends ListBase<T> {
return list;
}
@pragma("vm:recognized", "asm-intrinsic")
@pragma("vm:recognized", "other")
@pragma("vm:exact-result-type",
<dynamic>[_GrowableList, "result-type-uses-passed-type-arguments"])
@pragma("vm:external-name", "GrowableList_allocate")