Reland "[vm] Refactor StubCodeCompiler to be a real class."

This is a reland of commit afdf640866

Original change's description:
> [vm] Refactor StubCodeCompiler to be a real class.
>
> Previously, StubCodeCompiler was just a set of static methods, all of
> which take an assembler as their first arg. This makes it hard to pass
> additional state to the ~160 stub macro defined stub generators.
>
> This refactor makes StubCodeCompiler a real class, with assembler as a
> field. So we can easily add new fields to the class later, to pass new
> state without having to update every stub generator.
>
> assembler is declared as a public field for a few reasons:
> - There's one place where it needs to be accessed by a non-member
>   function (in the ia32 file).
> - If it's private, it has to be named assembler_, which would mean a lot
>   more insignificant diffs.
> - Non-member functions that take assembler would have to take assembler_,
>   for consistency with the __ macro, which would be weird.
>
> Change-Id: I142f0803a07c7839753188065c69c334d4d1798a
> TEST=CI
> Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/289924
> Reviewed-by: Ryan Macnak <rmacnak@google.com>
> Commit-Queue: Liam Appelbe <liama@google.com>

Change-Id: Ib5be28c46a0a80b84e31aea60893ab5bbc02e2ea
TEST=CI, including all sanitizers and architectures
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/290681
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Liam Appelbe <liama@google.com>
This commit is contained in:
Liam Appelbe 2023-03-22 23:48:56 +00:00 committed by Commit Queue
parent c7cee7fd46
commit 929fec660b
12 changed files with 714 additions and 960 deletions

View file

@ -38,7 +38,7 @@ intptr_t StubCodeCompiler::WordOffsetFromFpToCpuRegister(
return slots_from_fp;
}
void StubCodeCompiler::GenerateInitStaticFieldStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInitStaticFieldStub() {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for result.
__ PushRegister(InitStaticFieldABI::kFieldReg);
@ -49,8 +49,7 @@ void StubCodeCompiler::GenerateInitStaticFieldStub(Assembler* assembler) {
__ Ret();
}
void StubCodeCompiler::GenerateInitLateStaticFieldStub(Assembler* assembler,
bool is_final) {
void StubCodeCompiler::GenerateInitLateStaticFieldStub(bool is_final) {
const Register kResultReg = InitStaticFieldABI::kResultReg;
const Register kFieldReg = InitStaticFieldABI::kFieldReg;
const Register kAddressReg = InitLateStaticFieldInternalRegs::kAddressReg;
@ -100,16 +99,15 @@ void StubCodeCompiler::GenerateInitLateStaticFieldStub(Assembler* assembler,
}
}
void StubCodeCompiler::GenerateInitLateStaticFieldStub(Assembler* assembler) {
GenerateInitLateStaticFieldStub(assembler, /*is_final=*/false);
void StubCodeCompiler::GenerateInitLateStaticFieldStub() {
GenerateInitLateStaticFieldStub(/*is_final=*/false);
}
void StubCodeCompiler::GenerateInitLateFinalStaticFieldStub(
Assembler* assembler) {
GenerateInitLateStaticFieldStub(assembler, /*is_final=*/true);
void StubCodeCompiler::GenerateInitLateFinalStaticFieldStub() {
GenerateInitLateStaticFieldStub(/*is_final=*/true);
}
void StubCodeCompiler::GenerateInitInstanceFieldStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInitInstanceFieldStub() {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for result.
__ PushRegistersInOrder(
@ -121,8 +119,7 @@ void StubCodeCompiler::GenerateInitInstanceFieldStub(Assembler* assembler) {
__ Ret();
}
void StubCodeCompiler::GenerateInitLateInstanceFieldStub(Assembler* assembler,
bool is_final) {
void StubCodeCompiler::GenerateInitLateInstanceFieldStub(bool is_final) {
const Register kInstanceReg = InitInstanceFieldABI::kInstanceReg;
const Register kFieldReg = InitInstanceFieldABI::kFieldReg;
const Register kAddressReg = InitLateInstanceFieldInternalRegs::kAddressReg;
@ -196,16 +193,15 @@ void StubCodeCompiler::GenerateInitLateInstanceFieldStub(Assembler* assembler,
}
}
void StubCodeCompiler::GenerateInitLateInstanceFieldStub(Assembler* assembler) {
GenerateInitLateInstanceFieldStub(assembler, /*is_final=*/false);
void StubCodeCompiler::GenerateInitLateInstanceFieldStub() {
GenerateInitLateInstanceFieldStub(/*is_final=*/false);
}
void StubCodeCompiler::GenerateInitLateFinalInstanceFieldStub(
Assembler* assembler) {
GenerateInitLateInstanceFieldStub(assembler, /*is_final=*/true);
void StubCodeCompiler::GenerateInitLateFinalInstanceFieldStub() {
GenerateInitLateInstanceFieldStub(/*is_final=*/true);
}
void StubCodeCompiler::GenerateThrowStub(Assembler* assembler) {
void StubCodeCompiler::GenerateThrowStub() {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for (unused) result.
__ PushRegister(ThrowABI::kExceptionReg);
@ -213,7 +209,7 @@ void StubCodeCompiler::GenerateThrowStub(Assembler* assembler) {
__ Breakpoint();
}
void StubCodeCompiler::GenerateReThrowStub(Assembler* assembler) {
void StubCodeCompiler::GenerateReThrowStub() {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for (unused) result.
__ PushRegistersInOrder(
@ -222,7 +218,7 @@ void StubCodeCompiler::GenerateReThrowStub(Assembler* assembler) {
__ Breakpoint();
}
void StubCodeCompiler::GenerateAssertBooleanStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAssertBooleanStub() {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for (unused) result.
__ PushRegister(AssertBooleanABI::kObjectReg);
@ -230,7 +226,7 @@ void StubCodeCompiler::GenerateAssertBooleanStub(Assembler* assembler) {
__ Breakpoint();
}
void StubCodeCompiler::GenerateAssertSubtypeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAssertSubtypeStub() {
__ EnterStubFrame();
__ PushRegistersInOrder({AssertSubtypeABI::kInstantiatorTypeArgumentsReg,
AssertSubtypeABI::kFunctionTypeArgumentsReg,
@ -243,7 +239,7 @@ void StubCodeCompiler::GenerateAssertSubtypeStub(Assembler* assembler) {
__ Ret();
}
void StubCodeCompiler::GenerateAssertAssignableStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAssertAssignableStub() {
#if !defined(TARGET_ARCH_IA32)
__ Breakpoint();
#else
@ -277,8 +273,7 @@ void StubCodeCompiler::GenerateAssertAssignableStub(Assembler* assembler) {
// - InstantiationABI::kResultTypeArgumentsReg: instantiated tav
// Clobbers:
// - InstantiationABI::kScratchReg
void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub() {
// We only need the offset of the current entry up until we either call
// the runtime or until we retrieve the instantiated type arguments out of it
// to put in the result register, so we use the result register to store it.
@ -495,8 +490,7 @@ void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub(
}
void StubCodeCompiler::
GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub(
Assembler* assembler) {
GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub() {
const Register kScratch1Reg = InstantiationABI::kResultTypeArgumentsReg;
const Register kScratch2Reg = InstantiationABI::kScratchReg;
// Return the instantiator type arguments if its nullability is compatible for
@ -518,11 +512,11 @@ void StubCodeCompiler::
__ Ret();
__ Bind(&cache_lookup);
GenerateInstantiateTypeArgumentsStub(assembler);
GenerateInstantiateTypeArgumentsStub();
}
void StubCodeCompiler::GenerateInstantiateTypeArgumentsMayShareFunctionTAStub(
Assembler* assembler) {
void StubCodeCompiler::
GenerateInstantiateTypeArgumentsMayShareFunctionTAStub() {
const Register kScratch1Reg = InstantiationABI::kResultTypeArgumentsReg;
const Register kScratch2Reg = InstantiationABI::kScratchReg;
// Return the function type arguments if its nullability is compatible for
@ -544,7 +538,7 @@ void StubCodeCompiler::GenerateInstantiateTypeArgumentsMayShareFunctionTAStub(
__ Ret();
__ Bind(&cache_lookup);
GenerateInstantiateTypeArgumentsStub(assembler);
GenerateInstantiateTypeArgumentsStub();
}
static void BuildInstantiateTypeRuntimeCall(Assembler* assembler) {
@ -626,48 +620,45 @@ static void BuildInstantiateTypeParameterStub(Assembler* assembler,
BuildInstantiateTypeRuntimeCall(assembler);
}
void StubCodeCompiler::GenerateInstantiateTypeNonNullableClassTypeParameterStub(
Assembler* assembler) {
void StubCodeCompiler::
GenerateInstantiateTypeNonNullableClassTypeParameterStub() {
BuildInstantiateTypeParameterStub(assembler, Nullability::kNonNullable,
/*is_function_parameter=*/false);
}
void StubCodeCompiler::GenerateInstantiateTypeNullableClassTypeParameterStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateInstantiateTypeNullableClassTypeParameterStub() {
BuildInstantiateTypeParameterStub(assembler, Nullability::kNullable,
/*is_function_parameter=*/false);
}
void StubCodeCompiler::GenerateInstantiateTypeLegacyClassTypeParameterStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateInstantiateTypeLegacyClassTypeParameterStub() {
BuildInstantiateTypeParameterStub(assembler, Nullability::kLegacy,
/*is_function_parameter=*/false);
}
void StubCodeCompiler::
GenerateInstantiateTypeNonNullableFunctionTypeParameterStub(
Assembler* assembler) {
GenerateInstantiateTypeNonNullableFunctionTypeParameterStub() {
BuildInstantiateTypeParameterStub(assembler, Nullability::kNonNullable,
/*is_function_parameter=*/true);
}
void StubCodeCompiler::GenerateInstantiateTypeNullableFunctionTypeParameterStub(
Assembler* assembler) {
void StubCodeCompiler::
GenerateInstantiateTypeNullableFunctionTypeParameterStub() {
BuildInstantiateTypeParameterStub(assembler, Nullability::kNullable,
/*is_function_parameter=*/true);
}
void StubCodeCompiler::GenerateInstantiateTypeLegacyFunctionTypeParameterStub(
Assembler* assembler) {
void StubCodeCompiler::
GenerateInstantiateTypeLegacyFunctionTypeParameterStub() {
BuildInstantiateTypeParameterStub(assembler, Nullability::kLegacy,
/*is_function_parameter=*/true);
}
void StubCodeCompiler::GenerateInstantiateTypeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInstantiateTypeStub() {
BuildInstantiateTypeRuntimeCall(assembler);
}
void StubCodeCompiler::GenerateInstanceOfStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInstanceOfStub() {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for the result.
__ PushRegistersInOrder({TypeTestABI::kInstanceReg, TypeTestABI::kDstTypeReg,
@ -806,16 +797,12 @@ static void GenerateTypeIsTopTypeForSubtyping(Assembler* assembler,
__ Jump(&check_top_type, compiler::Assembler::kNearJump);
}
void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingStub(
Assembler* assembler) {
GenerateTypeIsTopTypeForSubtyping(assembler,
/*null_safety=*/false);
void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingStub() {
GenerateTypeIsTopTypeForSubtyping(assembler, /*null_safety=*/false);
}
void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingNullSafeStub(
Assembler* assembler) {
GenerateTypeIsTopTypeForSubtyping(assembler,
/*null_safety=*/true);
void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingNullSafeStub() {
GenerateTypeIsTopTypeForSubtyping(assembler, /*null_safety=*/true);
}
// Version of Instance::NullIsAssignableTo(other, inst_tav, fun_tav) used when
@ -955,16 +942,12 @@ static void GenerateNullIsAssignableToType(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateNullIsAssignableToTypeStub(
Assembler* assembler) {
GenerateNullIsAssignableToType(assembler,
/*null_safety=*/false);
void StubCodeCompiler::GenerateNullIsAssignableToTypeStub() {
GenerateNullIsAssignableToType(assembler, /*null_safety=*/false);
}
void StubCodeCompiler::GenerateNullIsAssignableToTypeNullSafeStub(
Assembler* assembler) {
GenerateNullIsAssignableToType(assembler,
/*null_safety=*/true);
void StubCodeCompiler::GenerateNullIsAssignableToTypeNullSafeStub() {
GenerateNullIsAssignableToType(assembler, /*null_safety=*/true);
}
#if !defined(TARGET_ARCH_IA32)
// The <X>TypeTestStubs are used to test whether a given value is of a given
@ -986,15 +969,14 @@ void StubCodeCompiler::GenerateNullIsAssignableToTypeNullSafeStub(
//
// Note of warning: The caller will not populate CODE_REG and we have therefore
// no access to the pool.
void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDefaultTypeTestStub() {
__ LoadFromOffset(CODE_REG, THR,
target::Thread::slow_type_test_stub_offset());
__ Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
}
// Used instead of DefaultTypeTestStub when null is assignable.
void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDefaultNullableTypeTestStub() {
Label done;
// Fast case for 'null'.
@ -1009,11 +991,11 @@ void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
__ Ret();
}
void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
void StubCodeCompiler::GenerateTopTypeTypeTestStub() {
__ Ret();
}
void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
void StubCodeCompiler::GenerateUnreachableTypeTestStub() {
__ Breakpoint();
}
@ -1057,12 +1039,11 @@ static void BuildTypeParameterTypeTestStub(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub() {
BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/true);
}
void StubCodeCompiler::GenerateTypeParameterTypeTestStub(Assembler* assembler) {
void StubCodeCompiler::GenerateTypeParameterTypeTestStub() {
BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/false);
}
@ -1086,8 +1067,7 @@ static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler,
__ Drop(1); // Discard return value.
}
void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateLazySpecializeTypeTestStub() {
__ LoadFromOffset(CODE_REG, THR,
target::Thread::lazy_specialize_type_test_stub_offset());
__ EnterStubFrame();
@ -1097,8 +1077,7 @@ void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
}
// Used instead of LazySpecializeTypeTestStub when null is assignable.
void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub() {
Label done;
__ CompareObject(TypeTestABI::kInstanceReg, NullObject());
@ -1114,7 +1093,7 @@ void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
__ Ret();
}
void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSlowTypeTestStub() {
Label done, call_runtime;
if (!FLAG_precompiled_mode) {
@ -1188,7 +1167,7 @@ void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
#else
// Type testing stubs are not implemented on IA32.
#define GENERATE_BREAKPOINT_STUB(Name) \
void StubCodeCompiler::Generate##Name##Stub(Assembler* assembler) { \
void StubCodeCompiler::Generate##Name##Stub() { \
__ Breakpoint(); \
}
@ -1204,7 +1183,7 @@ VM_TYPE_TESTING_STUB_CODE_LIST(GENERATE_BREAKPOINT_STUB)
// AllocateClosureABI::kResultReg: new allocated Closure object.
// Clobbered:
// AllocateClosureABI::kScratchReg
void StubCodeCompiler::GenerateAllocateClosureStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateClosureStub() {
const intptr_t instance_size =
target::RoundedAllocationSize(target::Closure::InstanceSize());
__ EnsureHasClassIdInDEBUG(kFunctionCid, AllocateClosureABI::kFunctionReg,
@ -1280,7 +1259,7 @@ void StubCodeCompiler::GenerateAllocateClosureStub(Assembler* assembler) {
__ PopRegister(AllocateClosureABI::kFunctionReg);
__ PopRegister(AllocateClosureABI::kResultReg);
ASSERT(target::WillAllocateNewOrRememberedObject(instance_size));
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
__ LeaveStubFrame();
// AllocateClosureABI::kResultReg: new object
@ -1290,7 +1269,7 @@ void StubCodeCompiler::GenerateAllocateClosureStub(Assembler* assembler) {
// Generates allocation stub for _GrowableList class.
// This stub exists solely for performance reasons: default allocation
// stub is slower as it doesn't use specialized inline allocation.
void StubCodeCompiler::GenerateAllocateGrowableArrayStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateGrowableArrayStub() {
#if defined(TARGET_ARCH_IA32)
// This stub is not used on IA32 because IA32 version of
// StubCodeCompiler::GenerateAllocationStubForClass uses inline
@ -1324,7 +1303,7 @@ void StubCodeCompiler::GenerateAllocateGrowableArrayStub(Assembler* assembler) {
#endif // defined(TARGET_ARCH_IA32)
}
void StubCodeCompiler::GenerateAllocateRecordStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateRecordStub() {
const Register result_reg = AllocateRecordABI::kResultReg;
const Register shape_reg = AllocateRecordABI::kShapeReg;
const Register temp_reg = AllocateRecordABI::kTemp1Reg;
@ -1428,13 +1407,12 @@ void StubCodeCompiler::GenerateAllocateRecordStub(Assembler* assembler) {
__ Drop(1);
__ PopRegister(AllocateRecordABI::kResultReg);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
__ LeaveStubFrame();
__ Ret();
}
void StubCodeCompiler::GenerateAllocateSmallRecordStub(Assembler* assembler,
intptr_t num_fields,
void StubCodeCompiler::GenerateAllocateSmallRecordStub(intptr_t num_fields,
bool has_named_fields) {
ASSERT(num_fields == 2 || num_fields == 3);
const Register result_reg = AllocateSmallRecordABI::kResultReg;
@ -1505,31 +1483,30 @@ void StubCodeCompiler::GenerateAllocateSmallRecordStub(Assembler* assembler,
__ Drop(4);
__ PopRegister(result_reg);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
__ LeaveStubFrame();
__ Ret();
}
void StubCodeCompiler::GenerateAllocateRecord2Stub(Assembler* assembler) {
GenerateAllocateSmallRecordStub(assembler, 2, /*has_named_fields=*/false);
void StubCodeCompiler::GenerateAllocateRecord2Stub() {
GenerateAllocateSmallRecordStub(2, /*has_named_fields=*/false);
}
void StubCodeCompiler::GenerateAllocateRecord2NamedStub(Assembler* assembler) {
GenerateAllocateSmallRecordStub(assembler, 2, /*has_named_fields=*/true);
void StubCodeCompiler::GenerateAllocateRecord2NamedStub() {
GenerateAllocateSmallRecordStub(2, /*has_named_fields=*/true);
}
void StubCodeCompiler::GenerateAllocateRecord3Stub(Assembler* assembler) {
GenerateAllocateSmallRecordStub(assembler, 3, /*has_named_fields=*/false);
void StubCodeCompiler::GenerateAllocateRecord3Stub() {
GenerateAllocateSmallRecordStub(3, /*has_named_fields=*/false);
}
void StubCodeCompiler::GenerateAllocateRecord3NamedStub(Assembler* assembler) {
GenerateAllocateSmallRecordStub(assembler, 3, /*has_named_fields=*/true);
void StubCodeCompiler::GenerateAllocateRecord3NamedStub() {
GenerateAllocateSmallRecordStub(3, /*has_named_fields=*/true);
}
// The UnhandledException class lives in the VM isolate, so it cannot cache
// an allocation stub for itself. Instead, we cache it in the stub code list.
void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub() {
Thread* thread = Thread::Current();
auto class_table = thread->isolate_group()->class_table();
ASSERT(class_table->HasValidClassAt(kUnhandledExceptionCid));
@ -1537,27 +1514,25 @@ void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub(
class_table->At(kUnhandledExceptionCid));
ASSERT(!cls.IsNull());
GenerateAllocationStubForClass(assembler, nullptr, cls,
Code::Handle(Code::null()),
GenerateAllocationStubForClass(nullptr, cls, Code::Handle(Code::null()),
Code::Handle(Code::null()));
}
#define TYPED_DATA_ALLOCATION_STUB(clazz) \
void StubCodeCompiler::GenerateAllocate##clazz##Stub(Assembler* assembler) { \
GenerateAllocateTypedDataArrayStub(assembler, kTypedData##clazz##Cid); \
void StubCodeCompiler::GenerateAllocate##clazz##Stub() { \
GenerateAllocateTypedDataArrayStub(kTypedData##clazz##Cid); \
}
CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATION_STUB)
#undef TYPED_DATA_ALLOCATION_STUB
void StubCodeCompiler::GenerateLateInitializationError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateLateInitializationError(bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
__ PushRegister(LateInitializationErrorABI::kFieldReg);
__ CallRuntime(kLateFieldNotInitializedErrorRuntimeEntry,
/*argument_count=*/1);
};
GenerateSharedStubGeneric(
assembler, /*save_fpu_registers=*/with_fpu_regs,
/*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::
late_initialization_error_shared_with_fpu_regs_stub_offset()
@ -1566,125 +1541,109 @@ void StubCodeCompiler::GenerateLateInitializationError(Assembler* assembler,
/*allow_return=*/false, perform_runtime_call);
}
void StubCodeCompiler::GenerateLateInitializationErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateLateInitializationError(assembler, /*with_fpu_regs=*/false);
void StubCodeCompiler::
GenerateLateInitializationErrorSharedWithoutFPURegsStub() {
GenerateLateInitializationError(/*with_fpu_regs=*/false);
}
void StubCodeCompiler::GenerateLateInitializationErrorSharedWithFPURegsStub(
Assembler* assembler) {
GenerateLateInitializationError(assembler, /*with_fpu_regs=*/true);
void StubCodeCompiler::GenerateLateInitializationErrorSharedWithFPURegsStub() {
GenerateLateInitializationError(/*with_fpu_regs=*/true);
}
void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub() {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
/*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
target::Thread::null_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub() {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
/*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
target::Thread::null_error_shared_with_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub() {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry,
/*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry,
target::Thread::null_arg_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub() {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry,
/*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry,
target::Thread::null_arg_error_shared_with_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub() {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry,
/*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry,
target::Thread::null_cast_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub() {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry,
/*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry,
target::Thread::null_cast_error_shared_with_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub() {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false,
&kInterruptOrStackOverflowRuntimeEntry,
/*save_fpu_registers=*/false, &kInterruptOrStackOverflowRuntimeEntry,
target::Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/true);
}
void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub() {
GenerateSharedStub(
assembler, /*save_fpu_registers=*/true,
&kInterruptOrStackOverflowRuntimeEntry,
/*save_fpu_registers=*/true, &kInterruptOrStackOverflowRuntimeEntry,
target::Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
/*allow_return=*/true);
}
void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateRangeError(assembler, /*with_fpu_regs=*/false);
void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub() {
GenerateRangeError(/*with_fpu_regs=*/false);
}
void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub(
Assembler* assembler) {
GenerateRangeError(assembler, /*with_fpu_regs=*/true);
void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub() {
GenerateRangeError(/*with_fpu_regs=*/true);
}
void StubCodeCompiler::GenerateWriteErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateWriteError(assembler, /*with_fpu_regs=*/false);
void StubCodeCompiler::GenerateWriteErrorSharedWithoutFPURegsStub() {
GenerateWriteError(/*with_fpu_regs=*/false);
}
void StubCodeCompiler::GenerateWriteErrorSharedWithFPURegsStub(
Assembler* assembler) {
GenerateWriteError(assembler, /*with_fpu_regs=*/true);
void StubCodeCompiler::GenerateWriteErrorSharedWithFPURegsStub() {
GenerateWriteError(/*with_fpu_regs=*/true);
}
void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub() {
__ Breakpoint(); // Marker stub.
}
void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAsynchronousGapMarkerStub() {
__ Breakpoint(); // Marker stub.
}
void StubCodeCompiler::GenerateUnknownDartCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateUnknownDartCodeStub() {
// Enter frame to include caller into the backtrace.
__ EnterStubFrame();
__ Breakpoint(); // Marker stub.
}
void StubCodeCompiler::GenerateNotLoadedStub(Assembler* assembler) {
void StubCodeCompiler::GenerateNotLoadedStub() {
__ EnterStubFrame();
__ CallRuntime(kNotLoadedRuntimeEntry, 0);
__ Breakpoint();
}
#define EMIT_BOX_ALLOCATION(Name) \
void StubCodeCompiler::GenerateAllocate##Name##Stub(Assembler* assembler) { \
void StubCodeCompiler::GenerateAllocate##Name##Stub() { \
Label call_runtime; \
if (!FLAG_use_slow_path && FLAG_inline_alloc) { \
__ TryAllocate(compiler::Name##Class(), &call_runtime, \
@ -1735,13 +1694,13 @@ static void GenerateBoxFpuValueStub(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateBoxDoubleStub(Assembler* assembler) {
void StubCodeCompiler::GenerateBoxDoubleStub() {
GenerateBoxFpuValueStub(assembler, compiler::DoubleClass(),
kBoxDoubleRuntimeEntry,
&Assembler::StoreUnboxedDouble);
}
void StubCodeCompiler::GenerateBoxFloat32x4Stub(Assembler* assembler) {
void StubCodeCompiler::GenerateBoxFloat32x4Stub() {
#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
GenerateBoxFpuValueStub(assembler, compiler::Float32x4Class(),
kBoxFloat32x4RuntimeEntry,
@ -1751,7 +1710,7 @@ void StubCodeCompiler::GenerateBoxFloat32x4Stub(Assembler* assembler) {
#endif
}
void StubCodeCompiler::GenerateBoxFloat64x2Stub(Assembler* assembler) {
void StubCodeCompiler::GenerateBoxFloat64x2Stub() {
#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
GenerateBoxFpuValueStub(assembler, compiler::Float64x2Class(),
kBoxFloat64x2RuntimeEntry,
@ -1761,7 +1720,7 @@ void StubCodeCompiler::GenerateBoxFloat64x2Stub(Assembler* assembler) {
#endif
}
void StubCodeCompiler::GenerateDoubleToIntegerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDoubleToIntegerStub() {
__ EnterStubFrame();
__ StoreUnboxedDouble(DoubleToIntegerStubABI::kInputReg, THR,
target::Thread::unboxed_runtime_arg_offset());
@ -1884,7 +1843,6 @@ static void GenerateAllocateSuspendState(Assembler* assembler,
}
void StubCodeCompiler::GenerateSuspendStub(
Assembler* assembler,
bool call_suspend_function,
bool pass_type_arguments,
intptr_t suspend_entry_point_offset_in_thread,
@ -2140,36 +2098,35 @@ void StubCodeCompiler::GenerateSuspendStub(
__ Jump(&call_dart);
}
void StubCodeCompiler::GenerateAwaitStub(Assembler* assembler) {
GenerateSuspendStub(assembler,
/*call_suspend_function=*/true,
/*pass_type_arguments=*/false,
target::Thread::suspend_state_await_entry_point_offset(),
target::ObjectStore::suspend_state_await_offset());
void StubCodeCompiler::GenerateAwaitStub() {
GenerateSuspendStub(
/*call_suspend_function=*/true,
/*pass_type_arguments=*/false,
target::Thread::suspend_state_await_entry_point_offset(),
target::ObjectStore::suspend_state_await_offset());
}
void StubCodeCompiler::GenerateAwaitWithTypeCheckStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAwaitWithTypeCheckStub() {
GenerateSuspendStub(
assembler,
/*call_suspend_function=*/true,
/*pass_type_arguments=*/true,
target::Thread::suspend_state_await_with_type_check_entry_point_offset(),
target::ObjectStore::suspend_state_await_with_type_check_offset());
}
void StubCodeCompiler::GenerateYieldAsyncStarStub(Assembler* assembler) {
void StubCodeCompiler::GenerateYieldAsyncStarStub() {
GenerateSuspendStub(
assembler,
/*call_suspend_function=*/true,
/*pass_type_arguments=*/false,
target::Thread::suspend_state_yield_async_star_entry_point_offset(),
target::ObjectStore::suspend_state_yield_async_star_offset());
}
void StubCodeCompiler::GenerateSuspendSyncStarAtStartStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateSuspendSyncStarAtStartStub() {
GenerateSuspendStub(
assembler,
/*call_suspend_function=*/true,
/*pass_type_arguments=*/false,
target::Thread::
@ -2177,15 +2134,13 @@ void StubCodeCompiler::GenerateSuspendSyncStarAtStartStub(
target::ObjectStore::suspend_state_suspend_sync_star_at_start_offset());
}
void StubCodeCompiler::GenerateSuspendSyncStarAtYieldStub(
Assembler* assembler) {
GenerateSuspendStub(assembler,
/*call_suspend_function=*/false,
/*pass_type_arguments=*/false, -1, -1);
void StubCodeCompiler::GenerateSuspendSyncStarAtYieldStub() {
GenerateSuspendStub(
/*call_suspend_function=*/false,
/*pass_type_arguments=*/false, -1, -1);
}
void StubCodeCompiler::GenerateInitSuspendableFunctionStub(
Assembler* assembler,
intptr_t init_entry_point_offset_in_thread,
intptr_t init_function_offset_in_object_store) {
const Register kTypeArgs = InitSuspendableFunctionStubABI::kTypeArgsReg;
@ -2205,27 +2160,25 @@ void StubCodeCompiler::GenerateInitSuspendableFunctionStub(
__ Ret();
}
void StubCodeCompiler::GenerateInitAsyncStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInitAsyncStub() {
GenerateInitSuspendableFunctionStub(
assembler, target::Thread::suspend_state_init_async_entry_point_offset(),
target::Thread::suspend_state_init_async_entry_point_offset(),
target::ObjectStore::suspend_state_init_async_offset());
}
void StubCodeCompiler::GenerateInitAsyncStarStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInitAsyncStarStub() {
GenerateInitSuspendableFunctionStub(
assembler,
target::Thread::suspend_state_init_async_star_entry_point_offset(),
target::ObjectStore::suspend_state_init_async_star_offset());
}
void StubCodeCompiler::GenerateInitSyncStarStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInitSyncStarStub() {
GenerateInitSuspendableFunctionStub(
assembler,
target::Thread::suspend_state_init_sync_star_entry_point_offset(),
target::ObjectStore::suspend_state_init_sync_star_offset());
}
void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateResumeStub() {
const Register kSuspendState = ResumeStubABI::kSuspendStateReg;
const Register kTemp = ResumeStubABI::kTempReg;
const Register kFrameSize = ResumeStubABI::kFrameSizeReg;
@ -2406,7 +2359,6 @@ void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
}
void StubCodeCompiler::GenerateReturnStub(
Assembler* assembler,
intptr_t return_entry_point_offset_in_thread,
intptr_t return_function_offset_in_object_store,
intptr_t return_stub_offset_in_thread) {
@ -2438,32 +2390,29 @@ void StubCodeCompiler::GenerateReturnStub(
__ Ret();
}
void StubCodeCompiler::GenerateReturnAsyncStub(Assembler* assembler) {
void StubCodeCompiler::GenerateReturnAsyncStub() {
GenerateReturnStub(
assembler,
target::Thread::suspend_state_return_async_entry_point_offset(),
target::ObjectStore::suspend_state_return_async_offset(),
target::Thread::return_async_stub_offset());
}
void StubCodeCompiler::GenerateReturnAsyncNotFutureStub(Assembler* assembler) {
void StubCodeCompiler::GenerateReturnAsyncNotFutureStub() {
GenerateReturnStub(
assembler,
target::Thread::
suspend_state_return_async_not_future_entry_point_offset(),
target::ObjectStore::suspend_state_return_async_not_future_offset(),
target::Thread::return_async_not_future_stub_offset());
}
void StubCodeCompiler::GenerateReturnAsyncStarStub(Assembler* assembler) {
void StubCodeCompiler::GenerateReturnAsyncStarStub() {
GenerateReturnStub(
assembler,
target::Thread::suspend_state_return_async_star_entry_point_offset(),
target::ObjectStore::suspend_state_return_async_star_offset(),
target::Thread::return_async_star_stub_offset());
}
void StubCodeCompiler::GenerateAsyncExceptionHandlerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAsyncExceptionHandlerStub() {
const Register kSuspendState = AsyncExceptionHandlerStubABI::kSuspendStateReg;
ASSERT(kSuspendState != kExceptionObjectReg);
ASSERT(kSuspendState != kStackTraceObjectReg);
@ -2514,7 +2463,7 @@ void StubCodeCompiler::GenerateAsyncExceptionHandlerStub(Assembler* assembler) {
__ Breakpoint();
}
void StubCodeCompiler::GenerateCloneSuspendStateStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCloneSuspendStateStub() {
const Register kSource = CloneSuspendStateStubABI::kSourceReg;
const Register kDestination = CloneSuspendStateStubABI::kDestinationReg;
const Register kTemp = CloneSuspendStateStubABI::kTempReg;

View file

@ -48,29 +48,28 @@ class UnresolvedPcRelativeCall : public ZoneAllocated {
using UnresolvedPcRelativeCalls = GrowableArray<UnresolvedPcRelativeCall*>;
class StubCodeCompiler : public AllStatic {
class StubCodeCompiler {
public:
StubCodeCompiler(Assembler* assembler_) : assembler(assembler_) {}
Assembler* assembler;
#if !defined(TARGET_ARCH_IA32)
static void GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic);
void GenerateBuildMethodExtractorStub(const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic);
#endif
static void EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers = true);
void EnsureIsNewOrRemembered(bool preserve_registers = true);
static ArrayPtr BuildStaticCallsTable(
Zone* zone,
compiler::UnresolvedPcRelativeCalls* unresolved_calls);
#define STUB_CODE_GENERATE(name) \
static void Generate##name##Stub(Assembler* assembler);
#define STUB_CODE_GENERATE(name) void Generate##name##Stub();
VM_STUB_CODE_LIST(STUB_CODE_GENERATE)
#undef STUB_CODE_GENERATE
static void GenerateAllocationStubForClass(
Assembler* assembler,
void GenerateAllocationStubForClass(
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const dart::Code& allocate_object,
@ -88,16 +87,13 @@ class StubCodeCompiler : public AllStatic {
kCheckExactness,
kIgnoreExactness,
};
static void GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
Optimized optimized,
CallType type,
Exactness exactness);
static void GenerateNArgsCheckInlineCacheStubForEntryKind(
Assembler* assembler,
void GenerateNArgsCheckInlineCacheStub(intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
Optimized optimized,
CallType type,
Exactness exactness);
void GenerateNArgsCheckInlineCacheStubForEntryKind(
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -105,9 +101,8 @@ class StubCodeCompiler : public AllStatic {
CallType type,
Exactness exactness,
CodeEntryKind entry_kind);
static void GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg);
static void GenerateOptimizedUsageCounterIncrement(Assembler* assembler);
void GenerateUsageCounterIncrement(Register temp_reg);
void GenerateOptimizedUsageCounterIncrement();
#if defined(TARGET_ARCH_X64)
static constexpr intptr_t kNativeCallbackTrampolineSize = 10;
@ -145,8 +140,7 @@ class StubCodeCompiler : public AllStatic {
#error What architecture?
#endif
static void GenerateJITCallbackTrampolines(Assembler* assembler,
intptr_t next_callback_id);
void GenerateJITCallbackTrampolines(intptr_t next_callback_id);
// Calculates the offset (in words) from FP to the provided [cpu_register].
//
@ -161,64 +155,52 @@ class StubCodeCompiler : public AllStatic {
static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register);
private:
DISALLOW_COPY_AND_ASSIGN(StubCodeCompiler);
// Common function for generating InitLateStaticField and
// InitLateFinalStaticField stubs.
static void GenerateInitLateStaticFieldStub(Assembler* assembler,
bool is_final);
void GenerateInitLateStaticFieldStub(bool is_final);
// Common function for generating InitLateInstanceField and
// InitLateFinalInstanceField stubs.
static void GenerateInitLateInstanceFieldStub(Assembler* assembler,
bool is_final);
void GenerateInitLateInstanceFieldStub(bool is_final);
// Common function for generating Allocate<TypedData>Array stubs.
static void GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid);
void GenerateAllocateTypedDataArrayStub(intptr_t cid);
static void GenerateAllocateSmallRecordStub(Assembler* assembler,
intptr_t num_fields,
bool has_named_fields);
void GenerateAllocateSmallRecordStub(intptr_t num_fields,
bool has_named_fields);
static void GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
std::function<void()> perform_runtime_call);
void GenerateSharedStubGeneric(bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
std::function<void()> perform_runtime_call);
// Generates shared slow path stub which saves registers and calls
// [target] runtime entry.
// If [store_runtime_result_in_result_register], then stub puts result into
// SharedSlowPathStubABI::kResultReg.
static void GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
bool store_runtime_result_in_result_register = false);
void GenerateSharedStub(bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
bool store_runtime_result_in_result_register = false);
static void GenerateLateInitializationError(Assembler* assembler,
bool with_fpu_regs);
void GenerateLateInitializationError(bool with_fpu_regs);
static void GenerateRangeError(Assembler* assembler, bool with_fpu_regs);
static void GenerateWriteError(Assembler* assembler, bool with_fpu_regs);
void GenerateRangeError(bool with_fpu_regs);
void GenerateWriteError(bool with_fpu_regs);
static void GenerateSuspendStub(
Assembler* assembler,
bool call_suspend_function,
bool pass_type_arguments,
intptr_t suspend_entry_point_offset_in_thread,
intptr_t suspend_function_offset_in_object_store);
static void GenerateInitSuspendableFunctionStub(
Assembler* assembler,
void GenerateSuspendStub(bool call_suspend_function,
bool pass_type_arguments,
intptr_t suspend_entry_point_offset_in_thread,
intptr_t suspend_function_offset_in_object_store);
void GenerateInitSuspendableFunctionStub(
intptr_t init_entry_point_offset_in_thread,
intptr_t init_function_offset_in_object_store);
static void GenerateReturnStub(
Assembler* assembler,
intptr_t return_entry_point_offset_in_thread,
intptr_t return_function_offset_in_object_store,
intptr_t return_stub_offset_in_thread);
void GenerateReturnStub(intptr_t return_entry_point_offset_in_thread,
intptr_t return_function_offset_in_object_store,
intptr_t return_stub_offset_in_thread);
};
} // namespace compiler

View file

@ -35,8 +35,7 @@ namespace compiler {
//
// WARNING: This might clobber all registers except for [R0], [THR] and [FP].
// The caller should simply call LeaveStubFrame() and return.
void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers) {
void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
// If the object is not remembered we call a leaf-runtime to add it to the
// remembered set.
Label done;
@ -62,7 +61,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
// SP + 4*R4 : address of return value.
// R9 : address of the runtime function to call.
// R4 : number of arguments to the call.
void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallToRuntimeStub() {
const intptr_t thread_offset = target::NativeArguments::thread_offset();
const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
const intptr_t argv_offset = target::NativeArguments::argv_offset();
@ -153,7 +152,6 @@ void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
}
void StubCodeCompiler::GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
@ -181,7 +179,6 @@ void StubCodeCompiler::GenerateSharedStubGeneric(
}
void StubCodeCompiler::GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
@ -203,7 +200,7 @@ void StubCodeCompiler::GenerateSharedStub(
SharedSlowPathStubABI::kResultReg)));
}
};
GenerateSharedStubGeneric(assembler, save_fpu_registers,
GenerateSharedStubGeneric(save_fpu_registers,
self_code_stub_offset_from_thread, allow_return,
perform_runtime_call);
}
@ -212,7 +209,6 @@ void StubCodeCompiler::GenerateSharedStub(
// R4: The type_arguments_field_offset (or 0)
// SP+0: The object from which we are tearing a method off.
void StubCodeCompiler::GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic) {
@ -300,7 +296,7 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
__ Ret();
}
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateEnterSafepointStub() {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers);
@ -338,13 +334,12 @@ static void GenerateExitSafepointStubCommon(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateExitSafepointStub() {
GenerateExitSafepointStubCommon(
assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
GenerateExitSafepointStubCommon(
assembler,
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@ -359,8 +354,7 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
// On exit:
// Stack: preserved
// NOTFP, R4: clobbered, although normally callee-saved
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
COMPILE_ASSERT(IsAbiPreservedRegister(R4));
// TransitionGeneratedToNative might clobber LR if it takes the slow path.
@ -380,7 +374,6 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
#if !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateJITCallbackTrampolines(
Assembler* assembler,
intptr_t next_callback_id) {
#if defined(USING_SIMULATOR)
// TODO(37299): FFI is not support in SIMARM.
@ -493,8 +486,7 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
}
#endif // !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
__ EnterStubFrame();
__ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
__ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@ -503,8 +495,7 @@ void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
__ Breakpoint();
}
void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
ASSERT(!GenericCheckBoundInstr::UseUnboxedRepresentation());
__ PushRegistersInOrder(
@ -514,22 +505,21 @@ void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
};
GenerateSharedStubGeneric(
assembler, /*save_fpu_registers=*/with_fpu_regs,
/*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
: target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false, perform_runtime_call);
}
void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
__ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
__ Breakpoint();
};
GenerateSharedStubGeneric(
assembler, /*save_fpu_registers=*/with_fpu_regs,
/*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
: target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@ -632,14 +622,14 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -652,7 +642,7 @@ void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
// R9 : address of the native function to call.
// R2 : address of first argument in argument array.
// R1 : argc_tag including number of arguments and function kind.
void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -661,7 +651,7 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
// Input parameters:
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallStaticFunctionStub() {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@ -681,7 +671,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
void StubCodeCompiler::GenerateFixCallersTargetStub() {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -731,8 +721,7 @@ void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -754,8 +743,7 @@ void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
// Called from object allocate instruction when the allocation stub for a
// generic class has been disabled.
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -965,8 +953,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// R0: result, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(IP, kZapCodeReg);
__ Push(IP);
@ -980,8 +967,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(IP, kZapCodeReg);
__ Push(IP);
@ -993,7 +979,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
__ Ret();
}
void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeStub() {
__ Push(CODE_REG);
__ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@ -1051,8 +1037,7 @@ static void GenerateDispatcherCode(Assembler* assembler,
// Input:
// ARGS_DESC_REG - arguments descriptor
// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
GenerateNoSuchMethodDispatcherBody(assembler);
}
@ -1065,7 +1050,7 @@ void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
// AllocateArrayABI::kResultReg: newly allocated array.
// Clobbered:
// R3, R4, R8, R9
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateArrayStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
@ -1196,8 +1181,7 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
}
// Called for allocation of Mint.
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1209,16 +1193,14 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
}
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
&kAllocateMintRuntimeEntry,
GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
}
// Called for allocation of Mint.
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1231,7 +1213,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
/*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
@ -1244,7 +1226,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
// R1 : arguments descriptor array.
// R2 : arguments array.
// R3 : current thread.
void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInvokeDartCodeStub() {
SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
// Push code object to PC marker slot.
@ -1449,7 +1431,7 @@ static void GenerateAllocateContext(Assembler* assembler, Label* slow_case) {
// R0: new allocated Context object.
// Clobbered:
// Potentially any since is can go to runtime.
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateContextStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1494,7 +1476,7 @@ void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// R0: new object
// Restore the frame pointer.
@ -1510,7 +1492,7 @@ void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
// R0: new allocated Context object.
// Clobbered:
// Potentially any since it can go to runtime.
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCloneContextStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1569,7 +1551,7 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// R0: new object
// Restore the frame pointer.
@ -1577,7 +1559,7 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
__ Ret();
}
void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@ -1750,11 +1732,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
}
}
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
void StubCodeCompiler::GenerateWriteBarrierStub() {
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
GenerateWriteBarrierStubHelper(assembler, true);
}
@ -1866,16 +1848,15 @@ static void GenerateAllocateObjectHelper(Assembler* assembler,
}
// Called for inline allocation of objects (any class).
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectStub() {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
}
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
const Register kClsReg = R1;
if (!FLAG_precompiled_mode) {
@ -1905,14 +1886,13 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
__ LeaveDartFrameAndReturn();
}
// Called for inline allocation of objects.
void StubCodeCompiler::GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const Code& allocate_object,
@ -1981,8 +1961,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
// LR : return address.
// SP : address of last argument.
// R4: arguments descriptor array.
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
__ EnterStubFrame();
// Load the receiver.
@ -2020,8 +1999,7 @@ void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
// R9: inline cache data object.
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
Register ic_reg = R9;
Register func_reg = R8;
if (FLAG_precompiled_mode) {
@ -2044,8 +2022,7 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
}
// Loads function into 'temp_reg'.
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
if (FLAG_precompiled_mode) {
__ Breakpoint();
return;
@ -2156,7 +2133,6 @@ static void GenerateRecordEntryPoint(Assembler* assembler) {
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -2174,9 +2150,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
}
if (optimized == kOptimized) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateOptimizedUsageCounterIncrement();
} else {
GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
GenerateUsageCounterIncrement(/* scratch */ R8);
}
ASSERT(exactness == kIgnoreExactness); // Unimplemented.
@ -2401,67 +2377,63 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
__ Stop("Unimplemented");
}
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R9: ICData
// R8: Function
// LR: return address
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kIgnoreExactness);
}
// R0: receiver
@ -2469,8 +2441,7 @@ void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
// R8: Function
// LR: return address
void StubCodeCompiler::
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
__ Stop("Unimplemented");
}
@ -2478,19 +2449,17 @@ void StubCodeCompiler::
// R9: ICData
// R8: Function
// LR: return address
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
GenerateRecordEntryPoint(assembler);
GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
GenerateUsageCounterIncrement(/* scratch */ R8);
#if defined(DEBUG)
{
Label ok;
@ -2559,28 +2528,26 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ R8);
GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
Token::kILLEGAL, kUnoptimized, kStaticCall,
kIgnoreExactness);
}
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// Stub for compiling a function and jumping to the compiled code.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
void StubCodeCompiler::GenerateLazyCompileStub() {
__ EnterStubFrame();
// Preserve arg desc, pass function.
COMPILE_ASSERT(FUNCTION_REG < ARGS_DESC_REG);
@ -2594,7 +2561,7 @@ void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
}
// R9: Contains an ICData.
void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2611,8 +2578,7 @@ void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2627,7 +2593,7 @@ void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2643,7 +2609,7 @@ void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
}
// Called only from unoptimized code. All relevant registers have been saved.
void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDebugStepCheckStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2894,27 +2860,27 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 3);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 5);
}
// See comment on[GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 7);
}
// Return the current stack pointer address, used to do stack alignment checks.
void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateGetCStackPointerStub() {
__ mov(R0, Operand(SP));
__ Ret();
}
@ -2928,7 +2894,7 @@ void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
// Does not return.
//
// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
void StubCodeCompiler::GenerateJumpToFrameStub() {
COMPILE_ASSERT(kExceptionObjectReg == R0);
COMPILE_ASSERT(kStackTraceObjectReg == R1);
COMPILE_ASSERT(IsAbiPreservedRegister(R4));
@ -2982,7 +2948,7 @@ void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
//
// The arguments are stored in the Thread object.
// Does not return.
void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
WRITES_RETURN_ADDRESS_TO_LR(
__ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
@ -3006,7 +2972,7 @@ void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptForRewindStub() {
// Push zap value instead of CODE_REG.
__ LoadImmediate(IP, kZapCodeReg);
__ Push(IP);
@ -3026,7 +2992,7 @@ void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Calls to the runtime to optimize the given function.
// R8: function to be reoptimized.
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizeFunctionStub() {
__ ldr(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
__ Push(ARGS_DESC_REG);
@ -3107,8 +3073,7 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
@ -3142,8 +3107,7 @@ void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
const Register temp = R2;
const Register left = R1;
const Register right = R0;
@ -3160,7 +3124,7 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
// FUNCTION_REG: target function
// ARGS_DESC_REG: arguments descriptor
// CODE_REG: target Code
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateMegamorphicCallStub() {
__ LoadTaggedClassIdMayBeSmi(R8, R0);
// R8: receiver cid as Smi.
__ ldr(R2,
@ -3215,10 +3179,10 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
__ b(&loop);
__ Bind(&miss);
GenerateSwitchableCallMissStub(assembler);
GenerateSwitchableCallMissStub();
}
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallThroughCodeStub() {
Label loop, found, miss;
__ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ ldr(R4, FieldAddress(IC_DATA_REG,
@ -3267,8 +3231,7 @@ void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
// R9: MonomorphicSmiableCall object
//
// R2, R3: clobbered
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
__ LoadClassIdMayBeSmi(IP, R0);
// entrypoint_ should come right after expected_cid_
@ -3301,7 +3264,7 @@ static void CallSwitchableCallMissRuntimeEntry(Assembler* assembler,
// Called from switchable IC calls.
// R0: receiver
void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSwitchableCallMissStub() {
__ ldr(CODE_REG,
Address(THR, target::Thread::switchable_call_miss_stub_offset()));
__ EnterStubFrame();
@ -3317,7 +3280,7 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
// R9: SingleTargetCache
// Passed to target:
// CODE_REG: target Code object
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSingleTargetCallStub() {
Label miss;
__ LoadClassIdMayBeSmi(R1, R0);
__ ldrh(R2,
@ -3360,8 +3323,7 @@ static int GetScaleFactor(intptr_t size) {
return -1;
}
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid) {
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
const intptr_t element_size = TypedDataElementSizeInBytes(cid);
const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
const intptr_t scale_shift = GetScaleFactor(element_size);

View file

@ -34,8 +34,7 @@ namespace compiler {
//
// WARNING: This might clobber all registers except for [R0], [THR] and [FP].
// The caller should simply call LeaveStubFrame() and return.
void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers) {
void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
// If the object is not remembered we call a leaf-runtime to add it to the
// remembered set.
Label done;
@ -154,7 +153,7 @@ static void WithExceptionCatchingTrampoline(Assembler* assembler,
// SP + 8*R4 : address of return value.
// R5 : address of the runtime function to call.
// R4 : number of arguments to the call.
void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallToRuntimeStub() {
const intptr_t thread_offset = target::NativeArguments::thread_offset();
const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
const intptr_t argv_offset = target::NativeArguments::argv_offset();
@ -270,7 +269,6 @@ void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
}
void StubCodeCompiler::GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
@ -298,7 +296,6 @@ void StubCodeCompiler::GenerateSharedStubGeneric(
}
void StubCodeCompiler::GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
@ -318,12 +315,12 @@ void StubCodeCompiler::GenerateSharedStub(
SharedSlowPathStubABI::kResultReg)));
}
};
GenerateSharedStubGeneric(assembler, save_fpu_registers,
GenerateSharedStubGeneric(save_fpu_registers,
self_code_stub_offset_from_thread, allow_return,
perform_runtime_call);
}
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateEnterSafepointStub() {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
@ -378,13 +375,12 @@ static void GenerateExitSafepointStubCommon(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateExitSafepointStub() {
GenerateExitSafepointStubCommon(
assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
GenerateExitSafepointStubCommon(
assembler,
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@ -399,8 +395,7 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
// On exit:
// R19: clobbered, although normally callee-saved
// Stack: preserved, CSP == SP
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
COMPILE_ASSERT(IsAbiPreservedRegister(R19));
SPILLS_RETURN_ADDRESS_FROM_LR_TO_REGISTER(__ mov(R19, LR));
@ -432,7 +427,6 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
#if !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateJITCallbackTrampolines(
Assembler* assembler,
intptr_t next_callback_id) {
#if !defined(HOST_ARCH_ARM64)
// TODO(37299): FFI is not support in SIMARM64.
@ -577,7 +571,6 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// R1: The extracted method.
// R4: The type_arguments_field_offset (or 0)
void StubCodeCompiler::GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic) {
@ -670,8 +663,7 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
__ Ret();
}
void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
__ EnterStubFrame();
__ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
__ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@ -680,8 +672,7 @@ void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
__ Breakpoint();
}
void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
// If the generated code has unboxed index/length we need to box them before
// calling the runtime entry.
@ -730,22 +721,21 @@ void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
};
GenerateSharedStubGeneric(
assembler, /*save_fpu_registers=*/with_fpu_regs,
/*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
: target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false, perform_runtime_call);
}
void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
__ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
__ Breakpoint();
};
GenerateSharedStubGeneric(
assembler, /*save_fpu_registers=*/with_fpu_regs,
/*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
: target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@ -865,14 +855,14 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -885,7 +875,7 @@ void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
// R5 : address of the native function to call.
// R2 : address of first argument in argument array.
// R1 : argc_tag including number of arguments and function kind.
void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -894,7 +884,7 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
// Input parameters:
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallStaticFunctionStub() {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@ -915,7 +905,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
void StubCodeCompiler::GenerateFixCallersTargetStub() {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -967,8 +957,7 @@ void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -989,8 +978,7 @@ void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
// Called from object allocate instruction when the allocation stub for a
// generic class has been disabled.
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -1210,8 +1198,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// R0: result, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, kZapCodeReg);
__ Push(TMP);
@ -1225,8 +1212,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, kZapCodeReg);
__ Push(TMP);
@ -1238,7 +1224,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
__ ret();
}
void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeStub() {
__ Push(CODE_REG);
__ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@ -1296,8 +1282,7 @@ static void GenerateDispatcherCode(Assembler* assembler,
// Input:
// ARGS_DESC_REG - arguments descriptor
// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
GenerateNoSuchMethodDispatcherBody(assembler);
}
@ -1310,7 +1295,7 @@ void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
// AllocateArrayABI::kResultReg: newly allocated array.
// Clobbered:
// R3, R7
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateArrayStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
@ -1467,8 +1452,7 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
__ ret();
}
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1480,15 +1464,13 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
}
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
&kAllocateMintRuntimeEntry,
GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
}
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1501,7 +1483,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
/*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
@ -1514,7 +1496,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
// R1 : arguments descriptor array.
// R2 : arguments array.
// R3 : current thread.
void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInvokeDartCodeStub() {
__ Comment("InvokeDartCodeStub");
// Copy the C stack pointer (CSP/R31) into the stack pointer we'll actually
@ -1731,7 +1713,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// R0: new allocated Context object.
// Clobbered:
// R2, R3, R4, TMP
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateContextStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1786,7 +1768,7 @@ void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// R0: new object
// Restore the frame pointer.
@ -1802,7 +1784,7 @@ void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
// R0: new allocated Context object.
// Clobbered:
// R1, (R2), R3, R4, (TMP)
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCloneContextStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1862,7 +1844,7 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// R0: new object
// Restore the frame pointer.
@ -1870,7 +1852,7 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
__ ret();
}
void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@ -2068,11 +2050,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
}
}
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
void StubCodeCompiler::GenerateWriteBarrierStub() {
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
GenerateWriteBarrierStubHelper(assembler, true);
}
@ -2182,16 +2164,15 @@ static void GenerateAllocateObjectHelper(Assembler* assembler,
}
// Called for inline allocation of objects (any class).
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectStub() {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
}
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
if (!FLAG_precompiled_mode) {
__ ldr(CODE_REG,
Address(THR, target::Thread::call_to_runtime_stub_offset()));
@ -2217,7 +2198,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
__ LeaveStubFrame();
@ -2226,7 +2207,6 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
// Called for inline allocation of objects.
void StubCodeCompiler::GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const Code& allocate_object,
@ -2299,8 +2279,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
// LR : return address.
// SP : address of last argument.
// R4: arguments descriptor array.
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
__ EnterStubFrame();
// Load the receiver.
@ -2339,8 +2318,7 @@ void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
// R5: inline cache data object.
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
Register ic_reg = R5;
Register func_reg = R6;
if (FLAG_precompiled_mode) {
@ -2367,8 +2345,7 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
}
// Loads function into 'temp_reg'.
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
if (FLAG_precompiled_mode) {
__ Breakpoint();
return;
@ -2482,7 +2459,6 @@ static void GenerateRecordEntryPoint(Assembler* assembler) {
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -2500,9 +2476,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
}
if (optimized == kOptimized) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateOptimizedUsageCounterIncrement();
} else {
GenerateUsageCounterIncrement(assembler, /*scratch=*/R6);
GenerateUsageCounterIncrement(/*scratch=*/R6);
}
ASSERT(exactness == kIgnoreExactness); // Unimplemented.
@ -2735,67 +2711,63 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
__ Stop("Unimplemented");
}
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R5: ICData
// R6: Function
// LR: return address
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kIgnoreExactness);
}
// R0: receiver
@ -2803,8 +2775,7 @@ void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
// R6: Function
// LR: return address
void StubCodeCompiler::
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
__ Stop("Unimplemented");
}
@ -2812,19 +2783,17 @@ void StubCodeCompiler::
// R5: ICData
// R6: Function
// LR: return address
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
GenerateRecordEntryPoint(assembler);
GenerateUsageCounterIncrement(assembler, /* scratch */ R6);
GenerateUsageCounterIncrement(/* scratch */ R6);
#if defined(DEBUG)
{
Label ok;
@ -2900,28 +2869,26 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ R6);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ R6);
GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
Token::kILLEGAL, kUnoptimized, kStaticCall,
kIgnoreExactness);
}
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ R6);
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ R6);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// Stub for compiling a function and jumping to the compiled code.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
void StubCodeCompiler::GenerateLazyCompileStub() {
// Preserve arg desc.
__ EnterStubFrame();
__ Push(ARGS_DESC_REG); // Save arg. desc.
@ -2939,7 +2906,7 @@ void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
}
// R5: Contains an ICData.
void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2957,8 +2924,7 @@ void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2974,7 +2940,7 @@ void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2989,7 +2955,7 @@ void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
}
// Called only from unoptimized code. All relevant registers have been saved.
void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDebugStepCheckStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -3219,26 +3185,26 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 3);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 5);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 7);
}
void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateGetCStackPointerStub() {
__ mov(R0, CSP);
__ ret();
}
@ -3252,7 +3218,7 @@ void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
// Does not return.
//
// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
void StubCodeCompiler::GenerateJumpToFrameStub() {
ASSERT(kExceptionObjectReg == R0);
ASSERT(kStackTraceObjectReg == R1);
__ set_lr_state(compiler::LRState::Clobbered());
@ -3305,7 +3271,7 @@ void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
//
// The arguments are stored in the Thread object.
// Does not return.
void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
WRITES_RETURN_ADDRESS_TO_LR(
__ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
@ -3328,7 +3294,7 @@ void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptForRewindStub() {
// Push zap value instead of CODE_REG.
__ LoadImmediate(TMP, kZapCodeReg);
__ Push(TMP);
@ -3348,7 +3314,7 @@ void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Calls to the runtime to optimize the given function.
// R6: function to be re-optimized.
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizeFunctionStub() {
__ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
__ EnterStubFrame();
__ Push(ARGS_DESC_REG);
@ -3416,8 +3382,7 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
@ -3450,8 +3415,7 @@ void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
const Register left = R1;
const Register right = R0;
__ LoadFromOffset(left, SP, 1 * target::kWordSize);
@ -3466,7 +3430,7 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
// FUNCTION_REG: target function
// CODE_REG: target Code
// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateMegamorphicCallStub() {
// Jump if receiver is a smi.
Label smi_case;
__ BranchIfSmi(R0, &smi_case);
@ -3541,13 +3505,13 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
__ b(&cid_loaded);
__ Bind(&miss);
GenerateSwitchableCallMissStub(assembler);
GenerateSwitchableCallMissStub();
}
// Input:
// R0 - receiver
// IC_DATA_REG - icdata
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallThroughCodeStub() {
Label loop, found, miss;
__ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ ldr(ARGS_DESC_REG,
@ -3599,8 +3563,7 @@ void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
// R5: MonomorphicSmiableCall object
//
// R1: clobbered
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
Label miss;
__ LoadClassIdMayBeSmi(IP0, R0);
@ -3623,7 +3586,7 @@ void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
// Called from switchable IC calls.
// R0: receiver
void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSwitchableCallMissStub() {
__ ldr(CODE_REG,
Address(THR, target::Thread::switchable_call_miss_stub_offset()));
__ EnterStubFrame();
@ -3650,7 +3613,7 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
// R5: SingleTargetCache
// Passed to target:
// CODE_REG: target Code object
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSingleTargetCallStub() {
Label miss;
__ LoadClassIdMayBeSmi(R1, R0);
__ ldr(R2, FieldAddress(R5, target::SingleTargetCache::lower_limit_offset()),
@ -3705,8 +3668,7 @@ static int GetScaleFactor(intptr_t size) {
return -1;
}
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid) {
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
const intptr_t element_size = TypedDataElementSizeInBytes(cid);
const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
const intptr_t scale_shift = GetScaleFactor(element_size);

View file

@ -33,8 +33,7 @@ namespace compiler {
//
// WARNING: This might clobber all registers except for [EAX], [THR] and [FP].
// The caller should simply call LeaveFrame() and return.
void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers) {
void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
// If the object is not remembered we call a leaf-runtime to add it to the
// remembered set.
Label done;
@ -61,7 +60,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
// ECX : address of the runtime function to call.
// EDX : number of arguments to the call.
// Must preserve callee saved registers EDI and EBX.
void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallToRuntimeStub() {
const intptr_t thread_offset = target::NativeArguments::thread_offset();
const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
const intptr_t argv_offset = target::NativeArguments::argv_offset();
@ -137,7 +136,7 @@ void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
__ ret();
}
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateEnterSafepointStub() {
__ pushal();
__ subl(SPREG, Immediate(8));
__ movsd(Address(SPREG, 0), XMM0);
@ -179,13 +178,12 @@ static void GenerateExitSafepointStubCommon(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateExitSafepointStub() {
GenerateExitSafepointStubCommon(
assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
GenerateExitSafepointStubCommon(
assembler,
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@ -200,8 +198,7 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
// On exit:
// Stack: preserved
// EBX: clobbered (even though it's normally callee-saved)
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
__ popl(EBX);
__ movl(ECX, compiler::Immediate(target::Thread::exit_through_ffi()));
@ -214,7 +211,6 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
}
void StubCodeCompiler::GenerateJITCallbackTrampolines(
Assembler* assembler,
intptr_t next_callback_id) {
Label done, ret_4;
@ -346,7 +342,6 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
}
void StubCodeCompiler::GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
@ -356,7 +351,6 @@ void StubCodeCompiler::GenerateSharedStubGeneric(
}
void StubCodeCompiler::GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
@ -366,20 +360,17 @@ void StubCodeCompiler::GenerateSharedStub(
__ Breakpoint();
}
void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
// Only used in AOT.
__ Breakpoint();
}
void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
// Only used in AOT.
__ Breakpoint();
}
void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
// Only used in AOT.
__ Breakpoint();
}
@ -472,14 +463,14 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -492,7 +483,7 @@ void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
// EAX : address of first argument in argument array.
// ECX : address of the native function to call.
// EDX : argc_tag including number of arguments and function kind.
void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -501,7 +492,7 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
// Input parameters:
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallStaticFunctionStub() {
__ EnterStubFrame();
__ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(Immediate(0)); // Setup space on stack for return value.
@ -517,7 +508,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
void StubCodeCompiler::GenerateFixCallersTargetStub() {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -552,8 +543,7 @@ void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
__ EnterStubFrame();
__ pushl(Immediate(0)); // Setup space on stack for return value.
__ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
@ -566,8 +556,7 @@ void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
// Called from object allocate instruction when the allocation stub for a
// generic class has been disabled.
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
__ EnterStubFrame();
// Preserve type arguments register.
__ pushl(AllocateObjectABI::kTypeArgumentsReg);
@ -763,8 +752,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// EAX: result, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
// Return address for "call" to deopt stub.
__ pushl(Immediate(kZapReturnAddress));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
@ -773,15 +761,14 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
// EAX: exception, must be preserved
// EDX: stacktrace, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
// Return address for "call" to deopt stub.
__ pushl(Immediate(kZapReturnAddress));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
__ ret();
}
void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeStub() {
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
__ ret();
}
@ -832,8 +819,7 @@ static void GenerateDispatcherCode(Assembler* assembler,
GenerateNoSuchMethodDispatcherCode(assembler);
}
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
GenerateNoSuchMethodDispatcherCode(assembler);
}
@ -845,7 +831,7 @@ void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
// AllocateArrayABI::kResultReg: newly allocated array.
// Clobbered:
// EBX, EDI
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateArrayStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
@ -998,7 +984,7 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
// ESP + 12 : arguments array.
// ESP + 16 : current thread.
// Uses EAX, EDX, ECX, EDI as temporary registers.
void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInvokeDartCodeStub() {
const intptr_t kTargetCodeOffset = 2 * target::kWordSize;
const intptr_t kArgumentsDescOffset = 3 * target::kWordSize;
const intptr_t kArgumentsOffset = 4 * target::kWordSize;
@ -1207,7 +1193,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// EAX: new allocated Context object.
// Clobbered:
// EBX, EDX
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateContextStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1257,7 +1243,7 @@ void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// EAX: new object
// Restore the frame pointer.
@ -1273,7 +1259,7 @@ void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
// EAX: new allocated Context object.
// Clobbered:
// EBX, ECX, EDX
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCloneContextStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1331,7 +1317,7 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// EAX: new object
// Restore the frame pointer.
@ -1339,7 +1325,7 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
__ ret();
}
void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@ -1367,8 +1353,7 @@ void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
COMPILE_ASSERT(kWriteBarrierObjectReg == EDX);
COMPILE_ASSERT(kWriteBarrierValueReg == EBX);
COMPILE_ASSERT(kWriteBarrierSlotReg == EDI);
static void GenerateWriteBarrierStubHelper(Assembler* assembler,
bool cards) {
static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
// Save values being destroyed.
__ pushl(EAX);
__ pushl(ECX);
@ -1524,24 +1509,23 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
}
}
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
void StubCodeCompiler::GenerateWriteBarrierStub() {
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
GenerateWriteBarrierStubHelper(assembler, true);
}
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectStub() {
__ int3();
}
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
__ int3();
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
__ int3();
}
@ -1554,7 +1538,6 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
// Returns patch_code_pc offset where patching code for disabling the stub
// has been generated (similar to regularly generated Dart code).
void StubCodeCompiler::GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const Code& allocate_object,
@ -1682,7 +1665,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
if (AllocateObjectInstr::WillAllocateNewOrRemembered(cls)) {
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
}
// AllocateObjectABI::kResultReg: new object
@ -1699,8 +1682,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
// ESP + 4 : address of last argument.
// EDX : arguments descriptor array.
// Uses EAX, EBX, EDI as temporary registers.
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
__ EnterStubFrame();
// Load the receiver.
@ -1738,8 +1720,7 @@ void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
Register ic_reg = ECX;
Register func_reg = EAX;
if (FLAG_trace_optimized_ic_calls) {
@ -1759,8 +1740,7 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
}
// Loads function into 'temp_reg'.
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
if (FLAG_optimization_counter_threshold >= 0) {
Register func_reg = temp_reg;
ASSERT(func_reg != IC_DATA_REG);
@ -1856,24 +1836,22 @@ static void EmitFastSmiOp(Assembler* assembler,
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
Optimized optimized,
CallType type,
Exactness exactness) {
GenerateNArgsCheckInlineCacheStubForEntryKind(
assembler, num_args, handle_ic_miss, kind, optimized, type, exactness,
CodeEntryKind::kNormal);
GenerateNArgsCheckInlineCacheStubForEntryKind(num_args, handle_ic_miss, kind,
optimized, type, exactness,
CodeEntryKind::kNormal);
__ BindUncheckedEntryPoint();
GenerateNArgsCheckInlineCacheStubForEntryKind(
assembler, num_args, handle_ic_miss, kind, optimized, type, exactness,
CodeEntryKind::kUnchecked);
GenerateNArgsCheckInlineCacheStubForEntryKind(num_args, handle_ic_miss, kind,
optimized, type, exactness,
CodeEntryKind::kUnchecked);
}
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -1882,9 +1860,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
Exactness exactness,
CodeEntryKind entry_kind) {
if (optimized == kOptimized) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateOptimizedUsageCounterIncrement();
} else {
GenerateUsageCounterIncrement(assembler, /* scratch */ EAX);
GenerateUsageCounterIncrement(/* scratch */ EAX);
}
ASSERT(exactness == kIgnoreExactness); // Unimplemented.
@ -2065,77 +2043,71 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
__ Stop("Unimplemented");
}
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
__ Stop("Unimplemented");
}
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
__ Stop("Unimplemented");
}
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
// ECX: ICData
// EAX: Function
// ESP[0]: return address
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
@ -2143,8 +2115,7 @@ void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
// EAX: Function
// ESP[0]: return address
void StubCodeCompiler::
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
__ Stop("Unimplemented");
}
@ -2152,19 +2123,19 @@ void StubCodeCompiler::
// ECX: ICData
// EAX: Function
// ESP[0]: return address
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// ECX: ICData
// ESP[0]: return address
static void GenerateZeroArgsUnoptimizedStaticCallForEntryKind(
Assembler* assembler,
StubCodeCompiler* stub_code_compiler,
CodeEntryKind entry_kind) {
StubCodeCompiler::GenerateUsageCounterIncrement(assembler, /* scratch */ EAX);
stub_code_compiler->GenerateUsageCounterIncrement(/* scratch */ EAX);
auto* const assembler = stub_code_compiler->assembler;
#if defined(DEBUG)
{
@ -2226,37 +2197,34 @@ static void GenerateZeroArgsUnoptimizedStaticCallForEntryKind(
#endif
}
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateZeroArgsUnoptimizedStaticCallForEntryKind(assembler,
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
GenerateZeroArgsUnoptimizedStaticCallForEntryKind(this,
CodeEntryKind::kNormal);
__ BindUncheckedEntryPoint();
GenerateZeroArgsUnoptimizedStaticCallForEntryKind(assembler,
GenerateZeroArgsUnoptimizedStaticCallForEntryKind(this,
CodeEntryKind::kUnchecked);
}
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// Stub for compiling a function and jumping to the compiled code.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
void StubCodeCompiler::GenerateLazyCompileStub() {
__ EnterStubFrame();
__ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(FUNCTION_REG); // Pass function.
@ -2269,7 +2237,7 @@ void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
}
// ECX: Contains an ICData.
void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2287,8 +2255,7 @@ void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2304,7 +2271,7 @@ void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2321,7 +2288,7 @@ void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
}
// Called only from unoptimized code.
void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDebugStepCheckStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2568,29 +2535,29 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 3);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 5);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 7);
}
// Return the current stack pointer address, used to do stack alignment checks.
// TOS + 0: return address
// Result in EAX.
void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateGetCStackPointerStub() {
__ leal(EAX, Address(ESP, target::kWordSize));
__ ret();
}
@ -2602,7 +2569,7 @@ void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
// TOS + 3: frame_pointer
// TOS + 4: thread
// No Result.
void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
void StubCodeCompiler::GenerateJumpToFrameStub() {
__ movl(THR, Address(ESP, 4 * target::kWordSize)); // Load target thread.
__ movl(EBP,
Address(ESP, 3 * target::kWordSize)); // Load target frame_pointer.
@ -2641,7 +2608,7 @@ void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
//
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
ASSERT(kExceptionObjectReg == EAX);
ASSERT(kStackTraceObjectReg == EDX);
__ movl(EBX, Address(THR, target::Thread::resume_pc_offset()));
@ -2665,7 +2632,7 @@ void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptForRewindStub() {
// Push the deopt pc.
__ pushl(Address(THR, target::Thread::resume_pc_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@ -2680,7 +2647,7 @@ void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Calls to the runtime to optimize the given function.
// EBX: function to be reoptimized.
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizeFunctionStub() {
__ movl(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
__ pushl(ARGS_DESC_REG);
@ -2756,8 +2723,7 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
@ -2790,8 +2756,7 @@ void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
const Register left = EAX;
const Register right = EDX;
const Register temp = ECX;
@ -2808,7 +2773,7 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
// EBX: target entry point
// FUNCTION_REG: target function
// ARGS_DESC_REG: argument descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateMegamorphicCallStub() {
// Jump if receiver is a smi.
Label smi_case;
// Check if object (in tmp) is a Smi.
@ -2880,21 +2845,20 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
__ Bind(&miss);
__ popl(EBX); // restore receiver
GenerateSwitchableCallMissStub(assembler);
GenerateSwitchableCallMissStub();
}
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallThroughCodeStub() {
__ int3(); // AOT only.
}
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
__ int3(); // AOT only.
}
// Called from switchable IC calls.
// EBX: receiver
void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSwitchableCallMissStub() {
__ movl(CODE_REG,
Address(THR, target::Thread::switchable_call_miss_stub_offset()));
__ EnterStubFrame();
@ -2916,7 +2880,7 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
__ jmp(EAX);
}
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSingleTargetCallStub() {
__ int3(); // AOT only.
}
@ -2937,8 +2901,7 @@ static ScaleFactor GetScaleFactor(intptr_t size) {
return static_cast<ScaleFactor>(0);
}
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid) {
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
const intptr_t element_size = TypedDataElementSizeInBytes(cid);
const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
ScaleFactor scale_factor = GetScaleFactor(element_size);

View file

@ -34,8 +34,7 @@ namespace compiler {
//
// WARNING: This might clobber all registers except for [A0], [THR] and [FP].
// The caller should simply call LeaveStubFrame() and return.
void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers) {
void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
// If the object is not remembered we call a leaf-runtime to add it to the
// remembered set.
Label done;
@ -60,7 +59,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
// SP + 8*T4 : address of return value.
// T5 : address of the runtime function to call.
// T4 : number of arguments to the call.
void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallToRuntimeStub() {
const intptr_t thread_offset = target::NativeArguments::thread_offset();
const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
const intptr_t argv_offset = target::NativeArguments::argv_offset();
@ -160,7 +159,6 @@ void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
}
void StubCodeCompiler::GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
@ -188,7 +186,6 @@ void StubCodeCompiler::GenerateSharedStubGeneric(
}
void StubCodeCompiler::GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
@ -207,12 +204,12 @@ void StubCodeCompiler::GenerateSharedStub(
SharedSlowPathStubABI::kResultReg)));
}
};
GenerateSharedStubGeneric(assembler, save_fpu_registers,
GenerateSharedStubGeneric(save_fpu_registers,
self_code_stub_offset_from_thread, allow_return,
perform_runtime_call);
}
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateEnterSafepointStub() {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
@ -253,13 +250,12 @@ static void GenerateExitSafepointStubCommon(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateExitSafepointStub() {
GenerateExitSafepointStubCommon(
assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
GenerateExitSafepointStubCommon(
assembler,
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@ -274,8 +270,7 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
// On exit:
// S3: clobbered, although normally callee-saved
// Stack: preserved, CSP == SP
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
COMPILE_ASSERT(IsAbiPreservedRegister(S3));
__ mv(S3, RA);
__ LoadImmediate(T1, target::Thread::exit_through_ffi());
@ -299,7 +294,6 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
#if !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateJITCallbackTrampolines(
Assembler* assembler,
intptr_t next_callback_id) {
#if defined(USING_SIMULATOR)
// TODO(37299): FFI is not support in SIMRISCV32/64.
@ -421,7 +415,6 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// T1: The extracted method.
// T4: The type_arguments_field_offset (or 0)
void StubCodeCompiler::GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic) {
@ -513,8 +506,7 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
__ Ret();
}
void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
__ EnterStubFrame();
__ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
__ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@ -523,8 +515,7 @@ void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
__ Breakpoint();
}
void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
// If the generated code has unboxed index/length we need to box them before
// calling the runtime entry.
@ -569,22 +560,21 @@ void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
};
GenerateSharedStubGeneric(
assembler, /*save_fpu_registers=*/with_fpu_regs,
/*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
: target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false, perform_runtime_call);
}
void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
__ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
__ Breakpoint();
};
GenerateSharedStubGeneric(
assembler, /*save_fpu_registers=*/with_fpu_regs,
/*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
: target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@ -684,14 +674,14 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -704,7 +694,7 @@ void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
// R5 : address of the native function to call.
// R2 : address of first argument in argument array.
// R1 : argc_tag including number of arguments and function kind.
void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -713,7 +703,7 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
// Input parameters:
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallStaticFunctionStub() {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@ -735,7 +725,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
void StubCodeCompiler::GenerateFixCallersTargetStub() {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -786,8 +776,7 @@ void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -808,8 +797,7 @@ void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
// Called from object allocate instruction when the allocation stub for a
// generic class has been disabled.
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -1029,8 +1017,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// A0: result, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, kZapCodeReg);
__ PushRegister(TMP);
@ -1044,8 +1031,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
// A0: exception, must be preserved
// A1: stacktrace, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, kZapCodeReg);
__ PushRegister(TMP);
@ -1057,7 +1043,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
__ ret();
}
void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeStub() {
__ PushRegister(CODE_REG);
__ lx(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@ -1114,8 +1100,7 @@ static void GenerateDispatcherCode(Assembler* assembler,
// Input:
// ARGS_DESC_REG - arguments descriptor
// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
GenerateNoSuchMethodDispatcherBody(assembler);
}
@ -1128,7 +1113,7 @@ void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
// AllocateArrayABI::kResultReg: newly allocated array.
// Clobbered:
// T3, T4, T5
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateArrayStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
@ -1278,8 +1263,7 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
__ ret();
}
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1291,15 +1275,13 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
}
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
&kAllocateMintRuntimeEntry,
GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
}
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1312,7 +1294,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
/*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
@ -1326,7 +1308,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
// A2 : arguments array.
// A3 : current thread.
// Beware! TMP == A3
void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInvokeDartCodeStub() {
__ Comment("InvokeDartCodeStub");
__ EnterFrame(1 * target::kWordSize);
@ -1529,7 +1511,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// T1: number of context variables.
// Output:
// A0: new allocated Context object.
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateContextStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1578,7 +1560,7 @@ void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// A0: new object
// Restore the frame pointer.
@ -1591,7 +1573,7 @@ void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
// T5: context variable to clone.
// Output:
// A0: new allocated Context object.
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCloneContextStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1650,14 +1632,14 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// A0: new object
__ LeaveStubFrame();
__ ret();
}
void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@ -1863,11 +1845,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
}
}
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
void StubCodeCompiler::GenerateWriteBarrierStub() {
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
GenerateWriteBarrierStubHelper(assembler, true);
}
@ -1971,16 +1953,15 @@ static void GenerateAllocateObjectHelper(Assembler* assembler,
}
// Called for inline allocation of objects (any class).
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectStub() {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
}
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
if (!FLAG_precompiled_mode) {
__ lx(CODE_REG,
Address(THR, target::Thread::call_to_runtime_stub_offset()));
@ -2005,7 +1986,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
__ LeaveStubFrame();
@ -2014,7 +1995,6 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
// Called for inline allocation of objects.
void StubCodeCompiler::GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const Code& allocate_object,
@ -2087,8 +2067,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
// RA : return address.
// SP : address of last argument.
// S4: arguments descriptor array.
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
__ EnterStubFrame();
// Load the receiver.
@ -2126,8 +2105,7 @@ void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
// S5: inline cache data object.
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
if (FLAG_precompiled_mode) {
__ Breakpoint();
return;
@ -2143,8 +2121,7 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
}
// Loads function into 'func_reg'.
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register func_reg) {
void StubCodeCompiler::GenerateUsageCounterIncrement(Register func_reg) {
if (FLAG_precompiled_mode) {
__ trap();
return;
@ -2264,7 +2241,6 @@ static void GenerateRecordEntryPoint(Assembler* assembler) {
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -2283,9 +2259,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
}
if (optimized == kOptimized) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateOptimizedUsageCounterIncrement();
} else {
GenerateUsageCounterIncrement(assembler, /*scratch=*/T0);
GenerateUsageCounterIncrement(/*scratch=*/T0);
}
ASSERT(exactness == kIgnoreExactness); // Unimplemented.
@ -2508,67 +2484,63 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
__ Stop("Unimplemented");
}
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// A0: receiver
// S5: ICData
// A6: Function
// RA: return address
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kIgnoreExactness);
}
// A0: receiver
@ -2576,8 +2548,7 @@ void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
// A6: Function
// RA: return address
void StubCodeCompiler::
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
__ Stop("Unimplemented");
}
@ -2585,19 +2556,17 @@ void StubCodeCompiler::
// S5: ICData
// A6: Function
// RA: return address
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
GenerateRecordEntryPoint(assembler);
GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
GenerateUsageCounterIncrement(/* scratch */ T0);
#if defined(DEBUG)
{
@ -2673,28 +2642,26 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ T0);
GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
Token::kILLEGAL, kUnoptimized, kStaticCall,
kIgnoreExactness);
}
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ T0);
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// Stub for compiling a function and jumping to the compiled code.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
void StubCodeCompiler::GenerateLazyCompileStub() {
// Preserve arg desc.
__ EnterStubFrame();
// Save arguments descriptor and pass function.
@ -2713,7 +2680,7 @@ void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
// A0: Receiver
// S5: ICData
void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2733,8 +2700,7 @@ void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
}
// S5: ICData
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2751,7 +2717,7 @@ void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2767,7 +2733,7 @@ void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
}
// Called only from unoptimized code. All relevant registers have been saved.
void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDebugStepCheckStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2996,26 +2962,26 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 3);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 5);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 7);
}
void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateGetCStackPointerStub() {
__ mv(A0, SP);
__ ret();
}
@ -3029,7 +2995,7 @@ void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
// Does not return.
//
// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
void StubCodeCompiler::GenerateJumpToFrameStub() {
ASSERT(kExceptionObjectReg == A0);
ASSERT(kStackTraceObjectReg == A1);
__ mv(CALLEE_SAVED_TEMP, A0); // Program counter.
@ -3078,7 +3044,7 @@ void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
//
// The arguments are stored in the Thread object.
// Does not return.
void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
// Exception object.
ASSERT(kExceptionObjectReg == A0);
__ LoadFromOffset(A0, THR, target::Thread::active_exception_offset());
@ -3096,7 +3062,7 @@ void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptForRewindStub() {
// Push zap value instead of CODE_REG.
__ LoadImmediate(TMP, kZapCodeReg);
__ PushRegister(TMP);
@ -3115,7 +3081,7 @@ void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Calls to the runtime to optimize the given function.
// A0: function to be re-optimized.
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizeFunctionStub() {
__ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
__ EnterStubFrame();
@ -3203,8 +3169,7 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
// SP + 4: left operand.
// SP + 0: right operand.
// Return TMP set to 0 if equal.
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
@ -3237,8 +3202,7 @@ void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
// SP + 4: left operand.
// SP + 0: right operand.
// Return TMP set to 0 if equal.
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
const Register left = A0;
const Register right = A1;
__ LoadFromOffset(left, SP, 1 * target::kWordSize);
@ -3254,7 +3218,7 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
// FUNCTION_REG: target function
// CODE_REG: target Code
// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateMegamorphicCallStub() {
// Jump if receiver is a smi.
Label smi_case;
__ BranchIfSmi(A0, &smi_case);
@ -3326,13 +3290,13 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
__ j(&cid_loaded);
__ Bind(&miss);
GenerateSwitchableCallMissStub(assembler);
GenerateSwitchableCallMissStub();
}
// Input:
// A0 - receiver
// IC_DATA_REG - icdata
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallThroughCodeStub() {
Label loop, found, miss;
__ lx(T1, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ lx(ARGS_DESC_REG,
@ -3383,8 +3347,7 @@ void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
// S5: MonomorphicSmiableCall object
//
// T1,T2: clobbered
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
Label miss;
__ LoadClassIdMayBeSmi(T1, A0);
@ -3405,7 +3368,7 @@ void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
// Called from switchable IC calls.
// A0: receiver
void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSwitchableCallMissStub() {
__ lx(CODE_REG,
Address(THR, target::Thread::switchable_call_miss_stub_offset()));
__ EnterStubFrame();
@ -3430,7 +3393,7 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
// S5: SingleTargetCache
// Passed to target:
// CODE_REG: target Code object
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSingleTargetCallStub() {
Label miss;
__ LoadClassIdMayBeSmi(A1, A0);
__ lhu(T2, FieldAddress(S5, target::SingleTargetCache::lower_limit_offset()));
@ -3478,8 +3441,7 @@ static int GetScaleFactor(intptr_t size) {
return -1;
}
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid) {
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
const intptr_t element_size = TypedDataElementSizeInBytes(cid);
const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
const intptr_t scale_shift = GetScaleFactor(element_size);

View file

@ -37,8 +37,7 @@ namespace compiler {
//
// WARNING: This might clobber all registers except for [RAX], [THR] and [FP].
// The caller should simply call LeaveStubFrame() and return.
void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers) {
void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
// If the object is not remembered we call a leaf-runtime to add it to the
// remembered set.
Label done;
@ -159,7 +158,7 @@ static void WithExceptionCatchingTrampoline(Assembler* assembler,
// RBX : address of the runtime function to call.
// R10 : number of arguments to the call.
// Must preserve callee saved registers R12 and R13.
void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallToRuntimeStub() {
const intptr_t thread_offset = target::NativeArguments::thread_offset();
const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
const intptr_t argv_offset = target::NativeArguments::argv_offset();
@ -253,7 +252,6 @@ void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
}
void StubCodeCompiler::GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
@ -291,7 +289,6 @@ void StubCodeCompiler::GenerateSharedStubGeneric(
}
void StubCodeCompiler::GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
@ -310,12 +307,12 @@ void StubCodeCompiler::GenerateSharedStub(
RAX);
}
};
GenerateSharedStubGeneric(assembler, save_fpu_registers,
GenerateSharedStubGeneric(save_fpu_registers,
self_code_stub_offset_from_thread, allow_return,
perform_runtime_call);
}
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateEnterSafepointStub() {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers);
@ -353,13 +350,12 @@ static void GenerateExitSafepointStubCommon(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateExitSafepointStub() {
GenerateExitSafepointStubCommon(
assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
GenerateExitSafepointStubCommon(
assembler,
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@ -374,8 +370,7 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
// On exit:
// Stack pointer lowered by shadow space
// RBX, R12 clobbered
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
__ movq(R12, compiler::Immediate(target::Thread::exit_through_ffi()));
__ TransitionGeneratedToNative(RBX, FPREG, R12,
/*enter_safepoint=*/true);
@ -396,7 +391,6 @@ static const RegisterSet kArgumentRegisterSet(
CallingConventions::kFpuArgumentRegisters);
void StubCodeCompiler::GenerateJITCallbackTrampolines(
Assembler* assembler,
intptr_t next_callback_id) {
Label done;
@ -511,7 +505,6 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// RBX: The extracted method.
// RDX: The type_arguments_field_offset (or 0)
void StubCodeCompiler::GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic) {
@ -602,8 +595,7 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
__ Ret();
}
void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
__ EnterStubFrame();
__ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
__ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@ -612,8 +604,7 @@ void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
__ Breakpoint();
}
void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
// If the generated code has unboxed index/length we need to box them before
// calling the runtime entry.
@ -664,22 +655,21 @@ void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
};
GenerateSharedStubGeneric(
assembler, /*save_fpu_registers=*/with_fpu_regs,
/*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
: target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false, perform_runtime_call);
}
void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
bool with_fpu_regs) {
void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
__ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
__ Breakpoint();
};
GenerateSharedStubGeneric(
assembler, /*save_fpu_registers=*/with_fpu_regs,
/*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
: target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@ -781,14 +771,14 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -801,7 +791,7 @@ void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
// RAX : address of first argument in argument array.
// RBX : address of the native function to call.
// R10 : argc_tag including number of arguments and function kind.
void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -810,7 +800,7 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
// Input parameters:
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCallStaticFunctionStub() {
__ EnterStubFrame();
__ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
// Setup space on stack for return value.
@ -828,7 +818,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
void StubCodeCompiler::GenerateFixCallersTargetStub() {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -874,8 +864,7 @@ void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -894,8 +883,7 @@ void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
// Called from object allocate instruction when the allocation stub for a
// generic class has been disabled.
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -1113,8 +1101,7 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// RAX: result, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
// Push zap value instead of CODE_REG for lazy deopt.
__ pushq(Immediate(kZapCodeReg));
// Return address for "call" to deopt stub.
@ -1127,8 +1114,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
// RAX: exception, must be preserved
// RDX: stacktrace, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
// Push zap value instead of CODE_REG for lazy deopt.
__ pushq(Immediate(kZapCodeReg));
// Return address for "call" to deopt stub.
@ -1139,7 +1125,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
__ ret();
}
void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptimizeStub() {
__ popq(TMP);
__ pushq(CODE_REG);
__ pushq(TMP);
@ -1204,8 +1190,7 @@ static void GenerateDispatcherCode(Assembler* assembler,
// Input:
// IC_DATA_REG - icdata/megamorphic_cache
// RDX - receiver
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
__ EnterStubFrame();
__ movq(ARGS_DESC_REG,
@ -1225,7 +1210,7 @@ void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
// AllocateArrayABI::kResultReg: newly allocated array.
// Clobbered:
// RCX, RDI, R12
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateArrayStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
@ -1355,14 +1340,13 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this array (depending on the
// array length). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler);
EnsureIsNewOrRemembered();
__ LeaveStubFrame();
__ ret();
}
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1374,15 +1358,13 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
}
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
&kAllocateMintRuntimeEntry,
GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
}
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1395,7 +1377,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(
assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
/*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
@ -1412,7 +1394,7 @@ static const RegisterSet kCalleeSavedRegisterSet(
// RSI : arguments descriptor array.
// RDX : arguments array.
// RCX : current thread.
void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInvokeDartCodeStub() {
__ EnterFrame(0);
const Register kTargetReg = CallingConventions::kArg1Reg;
@ -1643,7 +1625,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// RAX: new allocated Context object.
// Clobbered:
// R9, R13
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateContextStub() {
__ LoadObject(R9, NullObject());
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1696,7 +1678,7 @@ void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// RAX: new object
// Restore the frame pointer.
@ -1712,7 +1694,7 @@ void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
// RAX: new allocated Context object.
// Clobbered:
// R10, R13
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
void StubCodeCompiler::GenerateCloneContextStub() {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1769,7 +1751,7 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// RAX: new object
// Restore the frame pointer.
@ -1778,7 +1760,7 @@ void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
__ ret();
}
void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@ -1805,8 +1787,7 @@ void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
COMPILE_ASSERT(kWriteBarrierObjectReg == RDX);
COMPILE_ASSERT(kWriteBarrierValueReg == RAX);
COMPILE_ASSERT(kWriteBarrierSlotReg == R13);
static void GenerateWriteBarrierStubHelper(Assembler* assembler,
bool cards) {
static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
Label add_to_mark_stack, remember_card, lost_race;
__ testq(RAX, Immediate(1 << target::ObjectAlignment::kNewObjectBitPosition));
__ j(ZERO, &add_to_mark_stack);
@ -1956,11 +1937,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
}
}
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
void StubCodeCompiler::GenerateWriteBarrierStub() {
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
GenerateWriteBarrierStubHelper(assembler, true);
}
@ -2066,16 +2047,15 @@ static void GenerateAllocateObjectHelper(Assembler* assembler,
}
// Called for inline allocation of objects (any class).
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectStub() {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
}
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
if (!FLAG_precompiled_mode) {
__ movq(CODE_REG,
Address(THR, target::Thread::call_to_runtime_stub_offset()));
@ -2107,7 +2087,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
// AllocateObjectABI::kResultReg: new object
// Restore the frame pointer.
@ -2118,7 +2098,6 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
// Called for inline allocation of objects.
void StubCodeCompiler::GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const Code& allocate_object,
@ -2187,8 +2166,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
// RSP : points to return address.
// RSP + 8 : address of last argument.
// R10 : arguments descriptor array.
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
__ EnterStubFrame();
// Load the receiver.
@ -2229,8 +2207,7 @@ void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
if (FLAG_precompiled_mode) {
__ Breakpoint();
return;
@ -2254,8 +2231,7 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
}
// Loads function into 'temp_reg', preserves IC_DATA_REG.
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
if (FLAG_precompiled_mode) {
__ Breakpoint();
return;
@ -2372,7 +2348,6 @@ static void GenerateRecordEntryPoint(Assembler* assembler) {
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -2390,9 +2365,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
}
if (optimized == kOptimized) {
GenerateOptimizedUsageCounterIncrement(assembler);
GenerateOptimizedUsageCounterIncrement();
} else {
GenerateUsageCounterIncrement(assembler, /* scratch */ RCX);
GenerateUsageCounterIncrement(/* scratch */ RCX);
}
ASSERT(num_args == 1 || num_args == 2);
@ -2648,69 +2623,65 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kCheckExactness);
}
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized, kInstanceCall, kIgnoreExactness);
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
// RBX: ICData
// RDI: Function
// RSP[0]: return address
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
@ -2718,30 +2689,27 @@ void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
// RDI: Function
// RSP[0]: return address
void StubCodeCompiler::
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kCheckExactness);
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kCheckExactness);
}
// RDX: receiver
// RBX: ICData
// RDI: Function
// RSP[0]: return address
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
GenerateRecordEntryPoint(assembler);
GenerateUsageCounterIncrement(assembler, /* scratch */ RCX);
GenerateUsageCounterIncrement(/* scratch */ RCX);
#if defined(DEBUG)
{
Label ok;
@ -2818,26 +2786,24 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
Token::kILLEGAL, kUnoptimized, kStaticCall,
kIgnoreExactness);
}
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
GenerateNArgsCheckInlineCacheStub(
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// Stub for compiling a function and jumping to the compiled code.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
void StubCodeCompiler::GenerateLazyCompileStub() {
__ EnterStubFrame();
__ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushq(FUNCTION_REG); // Pass function.
@ -2855,7 +2821,7 @@ void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
// RBX: Contains an ICData.
// TOS(0): return address (Dart code).
void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2874,8 +2840,7 @@ void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2893,7 +2858,7 @@ void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
}
// TOS(0): return address (Dart code).
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2909,7 +2874,7 @@ void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
}
// Called only from unoptimized code.
void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDebugStepCheckStub() {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -3135,22 +3100,22 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 3);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 5);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
GenerateSubtypeNTestCacheStub(assembler, 7);
}
@ -3158,7 +3123,7 @@ void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
// checks.
// TOS + 0: return address
// Result in RAX.
void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateGetCStackPointerStub() {
__ leaq(RAX, Address(RSP, target::kWordSize));
__ ret();
}
@ -3170,7 +3135,7 @@ void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
// Arg3: frame_pointer
// Arg4: thread
// No Result.
void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
void StubCodeCompiler::GenerateJumpToFrameStub() {
__ movq(THR, CallingConventions::kArg4Reg);
__ movq(RBP, CallingConventions::kArg3Reg);
__ movq(RSP, CallingConventions::kArg2Reg);
@ -3211,7 +3176,7 @@ void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
//
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
ASSERT(kExceptionObjectReg == RAX);
ASSERT(kStackTraceObjectReg == RDX);
__ movq(CallingConventions::kArg1Reg,
@ -3238,7 +3203,7 @@ void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
void StubCodeCompiler::GenerateDeoptForRewindStub() {
// Push zap value instead of CODE_REG.
__ pushq(Immediate(kZapCodeReg));
@ -3259,7 +3224,7 @@ void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Calls to the runtime to optimize the given function.
// RDI: function to be reoptimized.
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizeFunctionStub() {
__ movq(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
__ pushq(ARGS_DESC_REG); // Preserve args descriptor.
@ -3323,8 +3288,7 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
@ -3358,8 +3322,7 @@ void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
const Register left = RAX;
const Register right = RDX;
@ -3376,7 +3339,7 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
// FUNCTION_REG: target function
// CODE_REG: target Code
// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateMegamorphicCallStub() {
// Jump if receiver is a smi.
Label smi_case;
__ testq(RDX, Immediate(kSmiTagMask));
@ -3451,13 +3414,13 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
__ jmp(&cid_loaded);
__ Bind(&miss);
GenerateSwitchableCallMissStub(assembler);
GenerateSwitchableCallMissStub();
}
// Input:
// IC_DATA_REG - icdata
// RDX - receiver object
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallThroughCodeStub() {
Label loop, found, miss;
__ movq(R13, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ movq(ARGS_DESC_REG,
@ -3503,8 +3466,7 @@ void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
__ jmp(RCX);
}
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
Assembler* assembler) {
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
Label have_cid, miss;
__ movq(RAX, Immediate(kSmiCid));
@ -3527,7 +3489,7 @@ void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
// Called from switchable IC calls.
// RDX: receiver
void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSwitchableCallMissStub() {
__ movq(CODE_REG,
Address(THR, target::Thread::switchable_call_miss_stub_offset()));
__ EnterStubFrame();
@ -3554,7 +3516,7 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
// RBX: SingleTargetCache
// Passed to target::
// CODE_REG: target Code object
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateSingleTargetCallStub() {
Label miss;
__ LoadClassIdMayBeSmi(RAX, RDX);
__ movzxw(R9,
@ -3608,8 +3570,7 @@ static ScaleFactor GetScaleFactor(intptr_t size) {
return static_cast<ScaleFactor>(0);
}
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid) {
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
const intptr_t element_size = TypedDataElementSizeInBytes(cid);
const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
ScaleFactor scale_factor = GetScaleFactor(element_size);

View file

@ -60,8 +60,8 @@ void NativeCallbackTrampolines::AllocateTrampoline() {
trampoline_pages_.Add(memory);
compiler::Assembler assembler(/*object_pool_builder=*/nullptr);
compiler::StubCodeCompiler::GenerateJITCallbackTrampolines(
&assembler, next_callback_id_);
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
stubCodeCompiler.GenerateJITCallbackTrampolines(next_callback_id_);
MemoryRegion region(memory->address(), memory->size());
assembler.FinalizeInstructions(region);

View file

@ -8193,10 +8193,16 @@ static CodePtr CreateInvokeInstantiateTypeArgumentsStub(Thread* thread) {
zone, Function::New(signature, symbol, UntaggedFunction::kRegularFunction,
false, false, false, false, false, klass,
TokenPosition::kNoSource));
compiler::ObjectPoolBuilder pool_builder;
const auto& invoke_instantiate_tav =
Code::Handle(zone, StubCode::Generate("InstantiateTAV", &pool_builder,
&GenerateInvokeInstantiateTAVStub));
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
compiler::Assembler assembler(&pool_builder);
GenerateInvokeInstantiateTAVStub(&assembler);
const Code& invoke_instantiate_tav = Code::Handle(
Code::FinalizeCodeAndNotify("InstantiateTAV", nullptr, &assembler,
Code::PoolAttachment::kNotAttachPool,
/*optimized=*/false));
const auto& pool =
ObjectPool::Handle(zone, ObjectPool::NewFromBuilder(pool_builder));
invoke_instantiate_tav.set_object_pool(pool.ptr());

View file

@ -29,7 +29,7 @@ StubCode::StubCodeEntry StubCode::entries_[kNumStubEntries] = {
#define STUB_CODE_DECLARE(name) {nullptr, #name},
#else
#define STUB_CODE_DECLARE(name) \
{nullptr, #name, compiler::StubCodeCompiler::Generate##name##Stub},
{nullptr, #name, &compiler::StubCodeCompiler::Generate##name##Stub},
#endif
VM_STUB_CODE_LIST(STUB_CODE_DECLARE)
#undef STUB_CODE_DECLARE
@ -91,15 +91,15 @@ void StubCode::Init() {
#undef STUB_CODE_GENERATE
#undef STUB_CODE_SET_OBJECT_POOL
CodePtr StubCode::Generate(
const char* name,
compiler::ObjectPoolBuilder* object_pool_builder,
void (*GenerateStub)(compiler::Assembler* assembler)) {
CodePtr StubCode::Generate(const char* name,
compiler::ObjectPoolBuilder* object_pool_builder,
void (compiler::StubCodeCompiler::*GenerateStub)()) {
auto thread = Thread::Current();
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
compiler::Assembler assembler(object_pool_builder);
GenerateStub(&assembler);
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
(stubCodeCompiler.*GenerateStub)();
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
name, nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
/*optimized=*/false));
@ -221,8 +221,9 @@ CodePtr StubCode::GetAllocationStubForClass(const Class& cls) {
compiler::Assembler assembler(wrapper);
compiler::UnresolvedPcRelativeCalls unresolved_calls;
const char* name = cls.ToCString();
compiler::StubCodeCompiler::GenerateAllocationStubForClass(
&assembler, &unresolved_calls, cls, allocate_object_stub,
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
stubCodeCompiler.GenerateAllocationStubForClass(
&unresolved_calls, cls, allocate_object_stub,
allocate_object_parametrized_stub);
const auto& static_calls_table =
@ -316,8 +317,9 @@ CodePtr StubCode::GetBuildMethodExtractorStub(compiler::ObjectPoolBuilder* pool,
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(pool != nullptr ? pool : &object_pool_builder);
compiler::StubCodeCompiler::GenerateBuildMethodExtractorStub(
&assembler, closure_allocation_stub, context_allocation_stub, generic);
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
stubCodeCompiler.GenerateBuildMethodExtractorStub(
closure_allocation_stub, context_allocation_stub, generic);
const char* name = generic ? "BuildGenericMethodExtractor"
: "BuildNonGenericMethodExtractor";

View file

@ -83,7 +83,7 @@ class StubCode : public AllStatic {
// code executable area.
static CodePtr Generate(const char* name,
compiler::ObjectPoolBuilder* object_pool_builder,
void (*GenerateStub)(compiler::Assembler* assembler));
void (compiler::StubCodeCompiler::*GenerateStub)());
#endif // !defined(DART_PRECOMPILED_RUNTIME)
static const Code& UnoptimizedStaticCallEntry(intptr_t num_args_tested);
@ -104,7 +104,7 @@ class StubCode : public AllStatic {
compiler::ObjectPoolBuilder* opw) { \
return StubCode::Generate( \
"_iso_stub_" #name, opw, \
compiler::StubCodeCompiler::Generate##name##Stub); \
&compiler::StubCodeCompiler::Generate##name##Stub); \
}
VM_STUB_CODE_LIST(GENERATE_STUB);
#undef GENERATE_STUB
@ -127,7 +127,7 @@ class StubCode : public AllStatic {
Code* code;
const char* name;
#if !defined(DART_PRECOMPILED_RUNTIME)
void (*generator)(compiler::Assembler* assembler);
void (compiler::StubCodeCompiler::*generator)();
#endif
};
static StubCodeEntry entries_[kNumStubEntries];

View file

@ -479,10 +479,15 @@ class TTSTestState : public ValueObject {
zone, Function::New(
signature, symbol, UntaggedFunction::kRegularFunction, false,
false, false, false, false, klass, TokenPosition::kNoSource));
compiler::ObjectPoolBuilder pool_builder;
const auto& invoke_tts = Code::Handle(
zone,
StubCode::Generate("InvokeTTS", &pool_builder, &GenerateInvokeTTSStub));
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
compiler::Assembler assembler(&pool_builder);
GenerateInvokeTTSStub(&assembler);
const Code& invoke_tts = Code::Handle(Code::FinalizeCodeAndNotify(
"InvokeTTS", nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
/*optimized=*/false));
const auto& pool =
ObjectPool::Handle(zone, ObjectPool::NewFromBuilder(pool_builder));
invoke_tts.set_object_pool(pool.ptr());