Revert "[vm] Refactor StubCodeCompiler to be a real class."

This reverts commit afdf640866.

Reason for revert: breaks google3. See b/274712076.

Original change's description:
> [vm] Refactor StubCodeCompiler to be a real class.
>
> Previously, StubCodeCompiler was just a set of static methods, all of
> which take an assembler as their first arg. This makes it hard to pass
> additional state to the ~160 stub macro defined stub generators.
>
> This refactor makes StubCodeCompiler a real class, with assembler as a
> field. So we can easily add new fields to the class later, to pass new
> state without having to update every stub generator.
>
> assembler is declared as a public field for a few reasons:
> - There's one place where it needs to be accessed by a non-member
>   function (in the ia32 file).
> - If it's private, it has to be named assembler_, which would mean a lot
>   more insignificant diffs.
> - Non-member functions that take assembler would have to take assembler_,
>   for consistency with the __ macro, which would be weird.
>
> Change-Id: I142f0803a07c7839753188065c69c334d4d1798a
> TEST=CI
> Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/289924
> Reviewed-by: Ryan Macnak <rmacnak@google.com>
> Commit-Queue: Liam Appelbe <liama@google.com>

Change-Id: If36a9122e1a55d86673d05afbbd21dfb27d7acd5
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/290522
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Reviewed-by: Ivan Inozemtsev <iinozemtsev@google.com>
Reviewed-by: Alexander Thomas <athom@google.com>
Commit-Queue: Emmanuel Pellereau <emmanuelp@google.com>
This commit is contained in:
Emmanuel Pellereau 2023-03-22 15:11:10 +00:00 committed by Commit Queue
parent e8b51c7375
commit 7acb6f2c5c
12 changed files with 964 additions and 717 deletions

View file

@ -38,7 +38,7 @@ intptr_t StubCodeCompiler::WordOffsetFromFpToCpuRegister(
return slots_from_fp;
}
void StubCodeCompiler::GenerateInitStaticFieldStub() {
void StubCodeCompiler::GenerateInitStaticFieldStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for result.
__ PushRegister(InitStaticFieldABI::kFieldReg);
@ -49,7 +49,8 @@ void StubCodeCompiler::GenerateInitStaticFieldStub() {
__ Ret();
}
void StubCodeCompiler::GenerateInitLateStaticFieldStub(bool is_final) {
void StubCodeCompiler::GenerateInitLateStaticFieldStub(Assembler* assembler,
bool is_final) {
const Register kResultReg = InitStaticFieldABI::kResultReg;
const Register kFieldReg = InitStaticFieldABI::kFieldReg;
const Register kAddressReg = InitLateStaticFieldInternalRegs::kAddressReg;
@ -99,15 +100,16 @@ void StubCodeCompiler::GenerateInitLateStaticFieldStub(bool is_final) {
}
}
void StubCodeCompiler::GenerateInitLateStaticFieldStub() {
GenerateInitLateStaticFieldStub(/*is_final=*/false);
void StubCodeCompiler::GenerateInitLateStaticFieldStub(Assembler* assembler) {
GenerateInitLateStaticFieldStub(assembler, /*is_final=*/false);
}
void StubCodeCompiler::GenerateInitLateFinalStaticFieldStub() {
GenerateInitLateStaticFieldStub(/*is_final=*/true);
void StubCodeCompiler::GenerateInitLateFinalStaticFieldStub(
Assembler* assembler) {
GenerateInitLateStaticFieldStub(assembler, /*is_final=*/true);
}
void StubCodeCompiler::GenerateInitInstanceFieldStub() {
void StubCodeCompiler::GenerateInitInstanceFieldStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for result.
__ PushRegistersInOrder(
@ -119,7 +121,8 @@ void StubCodeCompiler::GenerateInitInstanceFieldStub() {
__ Ret();
}
void StubCodeCompiler::GenerateInitLateInstanceFieldStub(bool is_final) {
void StubCodeCompiler::GenerateInitLateInstanceFieldStub(Assembler* assembler,
bool is_final) {
const Register kInstanceReg = InitInstanceFieldABI::kInstanceReg;
const Register kFieldReg = InitInstanceFieldABI::kFieldReg;
const Register kAddressReg = InitLateInstanceFieldInternalRegs::kAddressReg;
@ -193,15 +196,16 @@ void StubCodeCompiler::GenerateInitLateInstanceFieldStub(bool is_final) {
}
}
void StubCodeCompiler::GenerateInitLateInstanceFieldStub() {
GenerateInitLateInstanceFieldStub(/*is_final=*/false);
void StubCodeCompiler::GenerateInitLateInstanceFieldStub(Assembler* assembler) {
GenerateInitLateInstanceFieldStub(assembler, /*is_final=*/false);
}
void StubCodeCompiler::GenerateInitLateFinalInstanceFieldStub() {
GenerateInitLateInstanceFieldStub(/*is_final=*/true);
void StubCodeCompiler::GenerateInitLateFinalInstanceFieldStub(
Assembler* assembler) {
GenerateInitLateInstanceFieldStub(assembler, /*is_final=*/true);
}
void StubCodeCompiler::GenerateThrowStub() {
void StubCodeCompiler::GenerateThrowStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for (unused) result.
__ PushRegister(ThrowABI::kExceptionReg);
@ -209,7 +213,7 @@ void StubCodeCompiler::GenerateThrowStub() {
__ Breakpoint();
}
void StubCodeCompiler::GenerateReThrowStub() {
void StubCodeCompiler::GenerateReThrowStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for (unused) result.
__ PushRegistersInOrder(
@ -218,7 +222,7 @@ void StubCodeCompiler::GenerateReThrowStub() {
__ Breakpoint();
}
void StubCodeCompiler::GenerateAssertBooleanStub() {
void StubCodeCompiler::GenerateAssertBooleanStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for (unused) result.
__ PushRegister(AssertBooleanABI::kObjectReg);
@ -226,7 +230,7 @@ void StubCodeCompiler::GenerateAssertBooleanStub() {
__ Breakpoint();
}
void StubCodeCompiler::GenerateAssertSubtypeStub() {
void StubCodeCompiler::GenerateAssertSubtypeStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushRegistersInOrder({AssertSubtypeABI::kInstantiatorTypeArgumentsReg,
AssertSubtypeABI::kFunctionTypeArgumentsReg,
@ -239,7 +243,7 @@ void StubCodeCompiler::GenerateAssertSubtypeStub() {
__ Ret();
}
void StubCodeCompiler::GenerateAssertAssignableStub() {
void StubCodeCompiler::GenerateAssertAssignableStub(Assembler* assembler) {
#if !defined(TARGET_ARCH_IA32)
__ Breakpoint();
#else
@ -273,7 +277,8 @@ void StubCodeCompiler::GenerateAssertAssignableStub() {
// - InstantiationABI::kResultTypeArgumentsReg: instantiated tav
// Clobbers:
// - InstantiationABI::kScratchReg
void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub() {
void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub(
Assembler* assembler) {
// We only need the offset of the current entry up until we either call
// the runtime or until we retrieve the instantiated type arguments out of it
// to put in the result register, so we use the result register to store it.
@ -490,7 +495,8 @@ void StubCodeCompiler::GenerateInstantiateTypeArgumentsStub() {
}
void StubCodeCompiler::
GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub() {
GenerateInstantiateTypeArgumentsMayShareInstantiatorTAStub(
Assembler* assembler) {
const Register kScratch1Reg = InstantiationABI::kResultTypeArgumentsReg;
const Register kScratch2Reg = InstantiationABI::kScratchReg;
// Return the instantiator type arguments if its nullability is compatible for
@ -512,11 +518,11 @@ void StubCodeCompiler::
__ Ret();
__ Bind(&cache_lookup);
GenerateInstantiateTypeArgumentsStub();
GenerateInstantiateTypeArgumentsStub(assembler);
}
void StubCodeCompiler::
GenerateInstantiateTypeArgumentsMayShareFunctionTAStub() {
void StubCodeCompiler::GenerateInstantiateTypeArgumentsMayShareFunctionTAStub(
Assembler* assembler) {
const Register kScratch1Reg = InstantiationABI::kResultTypeArgumentsReg;
const Register kScratch2Reg = InstantiationABI::kScratchReg;
// Return the function type arguments if its nullability is compatible for
@ -538,7 +544,7 @@ void StubCodeCompiler::
__ Ret();
__ Bind(&cache_lookup);
GenerateInstantiateTypeArgumentsStub();
GenerateInstantiateTypeArgumentsStub(assembler);
}
static void BuildInstantiateTypeRuntimeCall(Assembler* assembler) {
@ -620,45 +626,48 @@ static void BuildInstantiateTypeParameterStub(Assembler* assembler,
BuildInstantiateTypeRuntimeCall(assembler);
}
void StubCodeCompiler::
GenerateInstantiateTypeNonNullableClassTypeParameterStub() {
void StubCodeCompiler::GenerateInstantiateTypeNonNullableClassTypeParameterStub(
Assembler* assembler) {
BuildInstantiateTypeParameterStub(assembler, Nullability::kNonNullable,
/*is_function_parameter=*/false);
}
void StubCodeCompiler::GenerateInstantiateTypeNullableClassTypeParameterStub() {
void StubCodeCompiler::GenerateInstantiateTypeNullableClassTypeParameterStub(
Assembler* assembler) {
BuildInstantiateTypeParameterStub(assembler, Nullability::kNullable,
/*is_function_parameter=*/false);
}
void StubCodeCompiler::GenerateInstantiateTypeLegacyClassTypeParameterStub() {
void StubCodeCompiler::GenerateInstantiateTypeLegacyClassTypeParameterStub(
Assembler* assembler) {
BuildInstantiateTypeParameterStub(assembler, Nullability::kLegacy,
/*is_function_parameter=*/false);
}
void StubCodeCompiler::
GenerateInstantiateTypeNonNullableFunctionTypeParameterStub() {
GenerateInstantiateTypeNonNullableFunctionTypeParameterStub(
Assembler* assembler) {
BuildInstantiateTypeParameterStub(assembler, Nullability::kNonNullable,
/*is_function_parameter=*/true);
}
void StubCodeCompiler::
GenerateInstantiateTypeNullableFunctionTypeParameterStub() {
void StubCodeCompiler::GenerateInstantiateTypeNullableFunctionTypeParameterStub(
Assembler* assembler) {
BuildInstantiateTypeParameterStub(assembler, Nullability::kNullable,
/*is_function_parameter=*/true);
}
void StubCodeCompiler::
GenerateInstantiateTypeLegacyFunctionTypeParameterStub() {
void StubCodeCompiler::GenerateInstantiateTypeLegacyFunctionTypeParameterStub(
Assembler* assembler) {
BuildInstantiateTypeParameterStub(assembler, Nullability::kLegacy,
/*is_function_parameter=*/true);
}
void StubCodeCompiler::GenerateInstantiateTypeStub() {
void StubCodeCompiler::GenerateInstantiateTypeStub(Assembler* assembler) {
BuildInstantiateTypeRuntimeCall(assembler);
}
void StubCodeCompiler::GenerateInstanceOfStub() {
void StubCodeCompiler::GenerateInstanceOfStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushObject(NullObject()); // Make room for the result.
__ PushRegistersInOrder({TypeTestABI::kInstanceReg, TypeTestABI::kDstTypeReg,
@ -797,12 +806,16 @@ static void GenerateTypeIsTopTypeForSubtyping(Assembler* assembler,
__ Jump(&check_top_type, compiler::Assembler::kNearJump);
}
void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingStub() {
GenerateTypeIsTopTypeForSubtyping(assembler, /*null_safety=*/false);
void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingStub(
Assembler* assembler) {
GenerateTypeIsTopTypeForSubtyping(assembler,
/*null_safety=*/false);
}
void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingNullSafeStub() {
GenerateTypeIsTopTypeForSubtyping(assembler, /*null_safety=*/true);
void StubCodeCompiler::GenerateTypeIsTopTypeForSubtypingNullSafeStub(
Assembler* assembler) {
GenerateTypeIsTopTypeForSubtyping(assembler,
/*null_safety=*/true);
}
// Version of Instance::NullIsAssignableTo(other, inst_tav, fun_tav) used when
@ -942,12 +955,16 @@ static void GenerateNullIsAssignableToType(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateNullIsAssignableToTypeStub() {
GenerateNullIsAssignableToType(assembler, /*null_safety=*/false);
void StubCodeCompiler::GenerateNullIsAssignableToTypeStub(
Assembler* assembler) {
GenerateNullIsAssignableToType(assembler,
/*null_safety=*/false);
}
void StubCodeCompiler::GenerateNullIsAssignableToTypeNullSafeStub() {
GenerateNullIsAssignableToType(assembler, /*null_safety=*/true);
void StubCodeCompiler::GenerateNullIsAssignableToTypeNullSafeStub(
Assembler* assembler) {
GenerateNullIsAssignableToType(assembler,
/*null_safety=*/true);
}
#if !defined(TARGET_ARCH_IA32)
// The <X>TypeTestStubs are used to test whether a given value is of a given
@ -969,14 +986,15 @@ void StubCodeCompiler::GenerateNullIsAssignableToTypeNullSafeStub() {
//
// Note of warning: The caller will not populate CODE_REG and we have therefore
// no access to the pool.
void StubCodeCompiler::GenerateDefaultTypeTestStub() {
void StubCodeCompiler::GenerateDefaultTypeTestStub(Assembler* assembler) {
__ LoadFromOffset(CODE_REG, THR,
target::Thread::slow_type_test_stub_offset());
__ Jump(FieldAddress(CODE_REG, target::Code::entry_point_offset()));
}
// Used instead of DefaultTypeTestStub when null is assignable.
void StubCodeCompiler::GenerateDefaultNullableTypeTestStub() {
void StubCodeCompiler::GenerateDefaultNullableTypeTestStub(
Assembler* assembler) {
Label done;
// Fast case for 'null'.
@ -991,11 +1009,11 @@ void StubCodeCompiler::GenerateDefaultNullableTypeTestStub() {
__ Ret();
}
void StubCodeCompiler::GenerateTopTypeTypeTestStub() {
void StubCodeCompiler::GenerateTopTypeTypeTestStub(Assembler* assembler) {
__ Ret();
}
void StubCodeCompiler::GenerateUnreachableTypeTestStub() {
void StubCodeCompiler::GenerateUnreachableTypeTestStub(Assembler* assembler) {
__ Breakpoint();
}
@ -1039,11 +1057,12 @@ static void BuildTypeParameterTypeTestStub(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub() {
void StubCodeCompiler::GenerateNullableTypeParameterTypeTestStub(
Assembler* assembler) {
BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/true);
}
void StubCodeCompiler::GenerateTypeParameterTypeTestStub() {
void StubCodeCompiler::GenerateTypeParameterTypeTestStub(Assembler* assembler) {
BuildTypeParameterTypeTestStub(assembler, /*allow_null=*/false);
}
@ -1067,7 +1086,8 @@ static void InvokeTypeCheckFromTypeTestStub(Assembler* assembler,
__ Drop(1); // Discard return value.
}
void StubCodeCompiler::GenerateLazySpecializeTypeTestStub() {
void StubCodeCompiler::GenerateLazySpecializeTypeTestStub(
Assembler* assembler) {
__ LoadFromOffset(CODE_REG, THR,
target::Thread::lazy_specialize_type_test_stub_offset());
__ EnterStubFrame();
@ -1077,7 +1097,8 @@ void StubCodeCompiler::GenerateLazySpecializeTypeTestStub() {
}
// Used instead of LazySpecializeTypeTestStub when null is assignable.
void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub() {
void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub(
Assembler* assembler) {
Label done;
__ CompareObject(TypeTestABI::kInstanceReg, NullObject());
@ -1093,7 +1114,7 @@ void StubCodeCompiler::GenerateLazySpecializeNullableTypeTestStub() {
__ Ret();
}
void StubCodeCompiler::GenerateSlowTypeTestStub() {
void StubCodeCompiler::GenerateSlowTypeTestStub(Assembler* assembler) {
Label done, call_runtime;
if (!FLAG_precompiled_mode) {
@ -1167,7 +1188,7 @@ void StubCodeCompiler::GenerateSlowTypeTestStub() {
#else
// Type testing stubs are not implemented on IA32.
#define GENERATE_BREAKPOINT_STUB(Name) \
void StubCodeCompiler::Generate##Name##Stub() { \
void StubCodeCompiler::Generate##Name##Stub(Assembler* assembler) { \
__ Breakpoint(); \
}
@ -1183,7 +1204,7 @@ VM_TYPE_TESTING_STUB_CODE_LIST(GENERATE_BREAKPOINT_STUB)
// AllocateClosureABI::kResultReg: new allocated Closure object.
// Clobbered:
// AllocateClosureABI::kScratchReg
void StubCodeCompiler::GenerateAllocateClosureStub() {
void StubCodeCompiler::GenerateAllocateClosureStub(Assembler* assembler) {
const intptr_t instance_size =
target::RoundedAllocationSize(target::Closure::InstanceSize());
__ EnsureHasClassIdInDEBUG(kFunctionCid, AllocateClosureABI::kFunctionReg,
@ -1259,7 +1280,7 @@ void StubCodeCompiler::GenerateAllocateClosureStub() {
__ PopRegister(AllocateClosureABI::kFunctionReg);
__ PopRegister(AllocateClosureABI::kResultReg);
ASSERT(target::WillAllocateNewOrRememberedObject(instance_size));
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
__ LeaveStubFrame();
// AllocateClosureABI::kResultReg: new object
@ -1269,7 +1290,7 @@ void StubCodeCompiler::GenerateAllocateClosureStub() {
// Generates allocation stub for _GrowableList class.
// This stub exists solely for performance reasons: default allocation
// stub is slower as it doesn't use specialized inline allocation.
void StubCodeCompiler::GenerateAllocateGrowableArrayStub() {
void StubCodeCompiler::GenerateAllocateGrowableArrayStub(Assembler* assembler) {
#if defined(TARGET_ARCH_IA32)
// This stub is not used on IA32 because IA32 version of
// StubCodeCompiler::GenerateAllocationStubForClass uses inline
@ -1303,7 +1324,7 @@ void StubCodeCompiler::GenerateAllocateGrowableArrayStub() {
#endif // defined(TARGET_ARCH_IA32)
}
void StubCodeCompiler::GenerateAllocateRecordStub() {
void StubCodeCompiler::GenerateAllocateRecordStub(Assembler* assembler) {
const Register result_reg = AllocateRecordABI::kResultReg;
const Register shape_reg = AllocateRecordABI::kShapeReg;
const Register temp_reg = AllocateRecordABI::kTemp1Reg;
@ -1407,12 +1428,13 @@ void StubCodeCompiler::GenerateAllocateRecordStub() {
__ Drop(1);
__ PopRegister(AllocateRecordABI::kResultReg);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
__ LeaveStubFrame();
__ Ret();
}
void StubCodeCompiler::GenerateAllocateSmallRecordStub(intptr_t num_fields,
void StubCodeCompiler::GenerateAllocateSmallRecordStub(Assembler* assembler,
intptr_t num_fields,
bool has_named_fields) {
ASSERT(num_fields == 2 || num_fields == 3);
const Register result_reg = AllocateSmallRecordABI::kResultReg;
@ -1483,30 +1505,31 @@ void StubCodeCompiler::GenerateAllocateSmallRecordStub(intptr_t num_fields,
__ Drop(4);
__ PopRegister(result_reg);
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
__ LeaveStubFrame();
__ Ret();
}
void StubCodeCompiler::GenerateAllocateRecord2Stub() {
GenerateAllocateSmallRecordStub(2, /*has_named_fields=*/false);
void StubCodeCompiler::GenerateAllocateRecord2Stub(Assembler* assembler) {
GenerateAllocateSmallRecordStub(assembler, 2, /*has_named_fields=*/false);
}
void StubCodeCompiler::GenerateAllocateRecord2NamedStub() {
GenerateAllocateSmallRecordStub(2, /*has_named_fields=*/true);
void StubCodeCompiler::GenerateAllocateRecord2NamedStub(Assembler* assembler) {
GenerateAllocateSmallRecordStub(assembler, 2, /*has_named_fields=*/true);
}
void StubCodeCompiler::GenerateAllocateRecord3Stub() {
GenerateAllocateSmallRecordStub(3, /*has_named_fields=*/false);
void StubCodeCompiler::GenerateAllocateRecord3Stub(Assembler* assembler) {
GenerateAllocateSmallRecordStub(assembler, 3, /*has_named_fields=*/false);
}
void StubCodeCompiler::GenerateAllocateRecord3NamedStub() {
GenerateAllocateSmallRecordStub(3, /*has_named_fields=*/true);
void StubCodeCompiler::GenerateAllocateRecord3NamedStub(Assembler* assembler) {
GenerateAllocateSmallRecordStub(assembler, 3, /*has_named_fields=*/true);
}
// The UnhandledException class lives in the VM isolate, so it cannot cache
// an allocation stub for itself. Instead, we cache it in the stub code list.
void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub() {
void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub(
Assembler* assembler) {
Thread* thread = Thread::Current();
auto class_table = thread->isolate_group()->class_table();
ASSERT(class_table->HasValidClassAt(kUnhandledExceptionCid));
@ -1514,25 +1537,27 @@ void StubCodeCompiler::GenerateAllocateUnhandledExceptionStub() {
class_table->At(kUnhandledExceptionCid));
ASSERT(!cls.IsNull());
GenerateAllocationStubForClass(nullptr, cls, Code::Handle(Code::null()),
GenerateAllocationStubForClass(assembler, nullptr, cls,
Code::Handle(Code::null()),
Code::Handle(Code::null()));
}
#define TYPED_DATA_ALLOCATION_STUB(clazz) \
void StubCodeCompiler::GenerateAllocate##clazz##Stub() { \
GenerateAllocateTypedDataArrayStub(kTypedData##clazz##Cid); \
void StubCodeCompiler::GenerateAllocate##clazz##Stub(Assembler* assembler) { \
GenerateAllocateTypedDataArrayStub(assembler, kTypedData##clazz##Cid); \
}
CLASS_LIST_TYPED_DATA(TYPED_DATA_ALLOCATION_STUB)
#undef TYPED_DATA_ALLOCATION_STUB
void StubCodeCompiler::GenerateLateInitializationError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateLateInitializationError(Assembler* assembler,
bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
__ PushRegister(LateInitializationErrorABI::kFieldReg);
__ CallRuntime(kLateFieldNotInitializedErrorRuntimeEntry,
/*argument_count=*/1);
};
GenerateSharedStubGeneric(
/*save_fpu_registers=*/with_fpu_regs,
assembler, /*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::
late_initialization_error_shared_with_fpu_regs_stub_offset()
@ -1541,109 +1566,125 @@ void StubCodeCompiler::GenerateLateInitializationError(bool with_fpu_regs) {
/*allow_return=*/false, perform_runtime_call);
}
void StubCodeCompiler::
GenerateLateInitializationErrorSharedWithoutFPURegsStub() {
GenerateLateInitializationError(/*with_fpu_regs=*/false);
void StubCodeCompiler::GenerateLateInitializationErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateLateInitializationError(assembler, /*with_fpu_regs=*/false);
}
void StubCodeCompiler::GenerateLateInitializationErrorSharedWithFPURegsStub() {
GenerateLateInitializationError(/*with_fpu_regs=*/true);
void StubCodeCompiler::GenerateLateInitializationErrorSharedWithFPURegsStub(
Assembler* assembler) {
GenerateLateInitializationError(assembler, /*with_fpu_regs=*/true);
}
void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub() {
void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
/*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
assembler, /*save_fpu_registers=*/false, &kNullErrorRuntimeEntry,
target::Thread::null_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub() {
void StubCodeCompiler::GenerateNullErrorSharedWithFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
/*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
assembler, /*save_fpu_registers=*/true, &kNullErrorRuntimeEntry,
target::Thread::null_error_shared_with_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub() {
void StubCodeCompiler::GenerateNullArgErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
/*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry,
assembler, /*save_fpu_registers=*/false, &kArgumentNullErrorRuntimeEntry,
target::Thread::null_arg_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub() {
void StubCodeCompiler::GenerateNullArgErrorSharedWithFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
/*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry,
assembler, /*save_fpu_registers=*/true, &kArgumentNullErrorRuntimeEntry,
target::Thread::null_arg_error_shared_with_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub() {
void StubCodeCompiler::GenerateNullCastErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
/*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry,
assembler, /*save_fpu_registers=*/false, &kNullCastErrorRuntimeEntry,
target::Thread::null_cast_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub() {
void StubCodeCompiler::GenerateNullCastErrorSharedWithFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
/*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry,
assembler, /*save_fpu_registers=*/true, &kNullCastErrorRuntimeEntry,
target::Thread::null_cast_error_shared_with_fpu_regs_stub_offset(),
/*allow_return=*/false);
}
void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub() {
void StubCodeCompiler::GenerateStackOverflowSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
/*save_fpu_registers=*/false, &kInterruptOrStackOverflowRuntimeEntry,
assembler, /*save_fpu_registers=*/false,
&kInterruptOrStackOverflowRuntimeEntry,
target::Thread::stack_overflow_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/true);
}
void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub() {
void StubCodeCompiler::GenerateStackOverflowSharedWithFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(
/*save_fpu_registers=*/true, &kInterruptOrStackOverflowRuntimeEntry,
assembler, /*save_fpu_registers=*/true,
&kInterruptOrStackOverflowRuntimeEntry,
target::Thread::stack_overflow_shared_with_fpu_regs_stub_offset(),
/*allow_return=*/true);
}
void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub() {
GenerateRangeError(/*with_fpu_regs=*/false);
void StubCodeCompiler::GenerateRangeErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateRangeError(assembler, /*with_fpu_regs=*/false);
}
void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub() {
GenerateRangeError(/*with_fpu_regs=*/true);
void StubCodeCompiler::GenerateRangeErrorSharedWithFPURegsStub(
Assembler* assembler) {
GenerateRangeError(assembler, /*with_fpu_regs=*/true);
}
void StubCodeCompiler::GenerateWriteErrorSharedWithoutFPURegsStub() {
GenerateWriteError(/*with_fpu_regs=*/false);
void StubCodeCompiler::GenerateWriteErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateWriteError(assembler, /*with_fpu_regs=*/false);
}
void StubCodeCompiler::GenerateWriteErrorSharedWithFPURegsStub() {
GenerateWriteError(/*with_fpu_regs=*/true);
void StubCodeCompiler::GenerateWriteErrorSharedWithFPURegsStub(
Assembler* assembler) {
GenerateWriteError(assembler, /*with_fpu_regs=*/true);
}
void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub() {
void StubCodeCompiler::GenerateFrameAwaitingMaterializationStub(
Assembler* assembler) {
__ Breakpoint(); // Marker stub.
}
void StubCodeCompiler::GenerateAsynchronousGapMarkerStub() {
void StubCodeCompiler::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
__ Breakpoint(); // Marker stub.
}
void StubCodeCompiler::GenerateUnknownDartCodeStub() {
void StubCodeCompiler::GenerateUnknownDartCodeStub(Assembler* assembler) {
// Enter frame to include caller into the backtrace.
__ EnterStubFrame();
__ Breakpoint(); // Marker stub.
}
void StubCodeCompiler::GenerateNotLoadedStub() {
void StubCodeCompiler::GenerateNotLoadedStub(Assembler* assembler) {
__ EnterStubFrame();
__ CallRuntime(kNotLoadedRuntimeEntry, 0);
__ Breakpoint();
}
#define EMIT_BOX_ALLOCATION(Name) \
void StubCodeCompiler::GenerateAllocate##Name##Stub() { \
void StubCodeCompiler::GenerateAllocate##Name##Stub(Assembler* assembler) { \
Label call_runtime; \
if (!FLAG_use_slow_path && FLAG_inline_alloc) { \
__ TryAllocate(compiler::Name##Class(), &call_runtime, \
@ -1694,13 +1735,13 @@ static void GenerateBoxFpuValueStub(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateBoxDoubleStub() {
void StubCodeCompiler::GenerateBoxDoubleStub(Assembler* assembler) {
GenerateBoxFpuValueStub(assembler, compiler::DoubleClass(),
kBoxDoubleRuntimeEntry,
&Assembler::StoreUnboxedDouble);
}
void StubCodeCompiler::GenerateBoxFloat32x4Stub() {
void StubCodeCompiler::GenerateBoxFloat32x4Stub(Assembler* assembler) {
#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
GenerateBoxFpuValueStub(assembler, compiler::Float32x4Class(),
kBoxFloat32x4RuntimeEntry,
@ -1710,7 +1751,7 @@ void StubCodeCompiler::GenerateBoxFloat32x4Stub() {
#endif
}
void StubCodeCompiler::GenerateBoxFloat64x2Stub() {
void StubCodeCompiler::GenerateBoxFloat64x2Stub(Assembler* assembler) {
#if !defined(TARGET_ARCH_RISCV32) && !defined(TARGET_ARCH_RISCV64)
GenerateBoxFpuValueStub(assembler, compiler::Float64x2Class(),
kBoxFloat64x2RuntimeEntry,
@ -1720,7 +1761,7 @@ void StubCodeCompiler::GenerateBoxFloat64x2Stub() {
#endif
}
void StubCodeCompiler::GenerateDoubleToIntegerStub() {
void StubCodeCompiler::GenerateDoubleToIntegerStub(Assembler* assembler) {
__ EnterStubFrame();
__ StoreUnboxedDouble(DoubleToIntegerStubABI::kInputReg, THR,
target::Thread::unboxed_runtime_arg_offset());
@ -1843,6 +1884,7 @@ static void GenerateAllocateSuspendState(Assembler* assembler,
}
void StubCodeCompiler::GenerateSuspendStub(
Assembler* assembler,
bool call_suspend_function,
bool pass_type_arguments,
intptr_t suspend_entry_point_offset_in_thread,
@ -2098,35 +2140,36 @@ void StubCodeCompiler::GenerateSuspendStub(
__ Jump(&call_dart);
}
void StubCodeCompiler::GenerateAwaitStub() {
GenerateSuspendStub(
/*call_suspend_function=*/true,
/*pass_type_arguments=*/false,
target::Thread::suspend_state_await_entry_point_offset(),
target::ObjectStore::suspend_state_await_offset());
void StubCodeCompiler::GenerateAwaitStub(Assembler* assembler) {
GenerateSuspendStub(assembler,
/*call_suspend_function=*/true,
/*pass_type_arguments=*/false,
target::Thread::suspend_state_await_entry_point_offset(),
target::ObjectStore::suspend_state_await_offset());
}
void StubCodeCompiler::GenerateAwaitWithTypeCheckStub() {
void StubCodeCompiler::GenerateAwaitWithTypeCheckStub(Assembler* assembler) {
GenerateSuspendStub(
assembler,
/*call_suspend_function=*/true,
/*pass_type_arguments=*/true,
target::Thread::suspend_state_await_with_type_check_entry_point_offset(),
target::ObjectStore::suspend_state_await_with_type_check_offset());
}
void StubCodeCompiler::GenerateYieldAsyncStarStub() {
void StubCodeCompiler::GenerateYieldAsyncStarStub(Assembler* assembler) {
GenerateSuspendStub(
assembler,
/*call_suspend_function=*/true,
/*pass_type_arguments=*/false,
target::Thread::suspend_state_yield_async_star_entry_point_offset(),
target::ObjectStore::suspend_state_yield_async_star_offset());
}
void StubCodeCompiler::GenerateSuspendSyncStarAtStartStub() {
void StubCodeCompiler::GenerateSuspendSyncStarAtStartStub(
Assembler* assembler) {
GenerateSuspendStub(
assembler,
/*call_suspend_function=*/true,
/*pass_type_arguments=*/false,
target::Thread::
@ -2134,13 +2177,15 @@ void StubCodeCompiler::GenerateSuspendSyncStarAtStartStub() {
target::ObjectStore::suspend_state_suspend_sync_star_at_start_offset());
}
void StubCodeCompiler::GenerateSuspendSyncStarAtYieldStub() {
GenerateSuspendStub(
/*call_suspend_function=*/false,
/*pass_type_arguments=*/false, -1, -1);
void StubCodeCompiler::GenerateSuspendSyncStarAtYieldStub(
Assembler* assembler) {
GenerateSuspendStub(assembler,
/*call_suspend_function=*/false,
/*pass_type_arguments=*/false, -1, -1);
}
void StubCodeCompiler::GenerateInitSuspendableFunctionStub(
Assembler* assembler,
intptr_t init_entry_point_offset_in_thread,
intptr_t init_function_offset_in_object_store) {
const Register kTypeArgs = InitSuspendableFunctionStubABI::kTypeArgsReg;
@ -2160,25 +2205,27 @@ void StubCodeCompiler::GenerateInitSuspendableFunctionStub(
__ Ret();
}
void StubCodeCompiler::GenerateInitAsyncStub() {
void StubCodeCompiler::GenerateInitAsyncStub(Assembler* assembler) {
GenerateInitSuspendableFunctionStub(
target::Thread::suspend_state_init_async_entry_point_offset(),
assembler, target::Thread::suspend_state_init_async_entry_point_offset(),
target::ObjectStore::suspend_state_init_async_offset());
}
void StubCodeCompiler::GenerateInitAsyncStarStub() {
void StubCodeCompiler::GenerateInitAsyncStarStub(Assembler* assembler) {
GenerateInitSuspendableFunctionStub(
assembler,
target::Thread::suspend_state_init_async_star_entry_point_offset(),
target::ObjectStore::suspend_state_init_async_star_offset());
}
void StubCodeCompiler::GenerateInitSyncStarStub() {
void StubCodeCompiler::GenerateInitSyncStarStub(Assembler* assembler) {
GenerateInitSuspendableFunctionStub(
assembler,
target::Thread::suspend_state_init_sync_star_entry_point_offset(),
target::ObjectStore::suspend_state_init_sync_star_offset());
}
void StubCodeCompiler::GenerateResumeStub() {
void StubCodeCompiler::GenerateResumeStub(Assembler* assembler) {
const Register kSuspendState = ResumeStubABI::kSuspendStateReg;
const Register kTemp = ResumeStubABI::kTempReg;
const Register kFrameSize = ResumeStubABI::kFrameSizeReg;
@ -2359,6 +2406,7 @@ void StubCodeCompiler::GenerateResumeStub() {
}
void StubCodeCompiler::GenerateReturnStub(
Assembler* assembler,
intptr_t return_entry_point_offset_in_thread,
intptr_t return_function_offset_in_object_store,
intptr_t return_stub_offset_in_thread) {
@ -2390,29 +2438,32 @@ void StubCodeCompiler::GenerateReturnStub(
__ Ret();
}
void StubCodeCompiler::GenerateReturnAsyncStub() {
void StubCodeCompiler::GenerateReturnAsyncStub(Assembler* assembler) {
GenerateReturnStub(
assembler,
target::Thread::suspend_state_return_async_entry_point_offset(),
target::ObjectStore::suspend_state_return_async_offset(),
target::Thread::return_async_stub_offset());
}
void StubCodeCompiler::GenerateReturnAsyncNotFutureStub() {
void StubCodeCompiler::GenerateReturnAsyncNotFutureStub(Assembler* assembler) {
GenerateReturnStub(
assembler,
target::Thread::
suspend_state_return_async_not_future_entry_point_offset(),
target::ObjectStore::suspend_state_return_async_not_future_offset(),
target::Thread::return_async_not_future_stub_offset());
}
void StubCodeCompiler::GenerateReturnAsyncStarStub() {
void StubCodeCompiler::GenerateReturnAsyncStarStub(Assembler* assembler) {
GenerateReturnStub(
assembler,
target::Thread::suspend_state_return_async_star_entry_point_offset(),
target::ObjectStore::suspend_state_return_async_star_offset(),
target::Thread::return_async_star_stub_offset());
}
void StubCodeCompiler::GenerateAsyncExceptionHandlerStub() {
void StubCodeCompiler::GenerateAsyncExceptionHandlerStub(Assembler* assembler) {
const Register kSuspendState = AsyncExceptionHandlerStubABI::kSuspendStateReg;
ASSERT(kSuspendState != kExceptionObjectReg);
ASSERT(kSuspendState != kStackTraceObjectReg);
@ -2463,7 +2514,7 @@ void StubCodeCompiler::GenerateAsyncExceptionHandlerStub() {
__ Breakpoint();
}
void StubCodeCompiler::GenerateCloneSuspendStateStub() {
void StubCodeCompiler::GenerateCloneSuspendStateStub(Assembler* assembler) {
const Register kSource = CloneSuspendStateStubABI::kSourceReg;
const Register kDestination = CloneSuspendStateStubABI::kDestinationReg;
const Register kTemp = CloneSuspendStateStubABI::kTempReg;

View file

@ -48,28 +48,29 @@ class UnresolvedPcRelativeCall : public ZoneAllocated {
using UnresolvedPcRelativeCalls = GrowableArray<UnresolvedPcRelativeCall*>;
class StubCodeCompiler {
class StubCodeCompiler : public AllStatic {
public:
StubCodeCompiler(Assembler* assembler_) : assembler(assembler_) {}
Assembler* assembler;
#if !defined(TARGET_ARCH_IA32)
void GenerateBuildMethodExtractorStub(const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic);
static void GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic);
#endif
void EnsureIsNewOrRemembered(bool preserve_registers = true);
static void EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers = true);
static ArrayPtr BuildStaticCallsTable(
Zone* zone,
compiler::UnresolvedPcRelativeCalls* unresolved_calls);
#define STUB_CODE_GENERATE(name) void Generate##name##Stub();
#define STUB_CODE_GENERATE(name) \
static void Generate##name##Stub(Assembler* assembler);
VM_STUB_CODE_LIST(STUB_CODE_GENERATE)
#undef STUB_CODE_GENERATE
void GenerateAllocationStubForClass(
static void GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const dart::Code& allocate_object,
@ -87,13 +88,16 @@ class StubCodeCompiler {
kCheckExactness,
kIgnoreExactness,
};
void GenerateNArgsCheckInlineCacheStub(intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
Optimized optimized,
CallType type,
Exactness exactness);
void GenerateNArgsCheckInlineCacheStubForEntryKind(
static void GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
Optimized optimized,
CallType type,
Exactness exactness);
static void GenerateNArgsCheckInlineCacheStubForEntryKind(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -101,8 +105,9 @@ class StubCodeCompiler {
CallType type,
Exactness exactness,
CodeEntryKind entry_kind);
void GenerateUsageCounterIncrement(Register temp_reg);
void GenerateOptimizedUsageCounterIncrement();
static void GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg);
static void GenerateOptimizedUsageCounterIncrement(Assembler* assembler);
#if defined(TARGET_ARCH_X64)
static constexpr intptr_t kNativeCallbackTrampolineSize = 10;
@ -140,7 +145,8 @@ class StubCodeCompiler {
#error What architecture?
#endif
void GenerateJITCallbackTrampolines(intptr_t next_callback_id);
static void GenerateJITCallbackTrampolines(Assembler* assembler,
intptr_t next_callback_id);
// Calculates the offset (in words) from FP to the provided [cpu_register].
//
@ -155,52 +161,64 @@ class StubCodeCompiler {
static intptr_t WordOffsetFromFpToCpuRegister(Register cpu_register);
private:
DISALLOW_COPY_AND_ASSIGN(StubCodeCompiler);
// Common function for generating InitLateStaticField and
// InitLateFinalStaticField stubs.
void GenerateInitLateStaticFieldStub(bool is_final);
static void GenerateInitLateStaticFieldStub(Assembler* assembler,
bool is_final);
// Common function for generating InitLateInstanceField and
// InitLateFinalInstanceField stubs.
void GenerateInitLateInstanceFieldStub(bool is_final);
static void GenerateInitLateInstanceFieldStub(Assembler* assembler,
bool is_final);
// Common function for generating Allocate<TypedData>Array stubs.
void GenerateAllocateTypedDataArrayStub(intptr_t cid);
static void GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid);
void GenerateAllocateSmallRecordStub(intptr_t num_fields,
bool has_named_fields);
static void GenerateAllocateSmallRecordStub(Assembler* assembler,
intptr_t num_fields,
bool has_named_fields);
void GenerateSharedStubGeneric(bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
std::function<void()> perform_runtime_call);
static void GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
std::function<void()> perform_runtime_call);
// Generates shared slow path stub which saves registers and calls
// [target] runtime entry.
// If [store_runtime_result_in_result_register], then stub puts result into
// SharedSlowPathStubABI::kResultReg.
void GenerateSharedStub(bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
bool store_runtime_result_in_result_register = false);
static void GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
bool store_runtime_result_in_result_register = false);
void GenerateLateInitializationError(bool with_fpu_regs);
static void GenerateLateInitializationError(Assembler* assembler,
bool with_fpu_regs);
void GenerateRangeError(bool with_fpu_regs);
void GenerateWriteError(bool with_fpu_regs);
static void GenerateRangeError(Assembler* assembler, bool with_fpu_regs);
static void GenerateWriteError(Assembler* assembler, bool with_fpu_regs);
void GenerateSuspendStub(bool call_suspend_function,
bool pass_type_arguments,
intptr_t suspend_entry_point_offset_in_thread,
intptr_t suspend_function_offset_in_object_store);
void GenerateInitSuspendableFunctionStub(
static void GenerateSuspendStub(
Assembler* assembler,
bool call_suspend_function,
bool pass_type_arguments,
intptr_t suspend_entry_point_offset_in_thread,
intptr_t suspend_function_offset_in_object_store);
static void GenerateInitSuspendableFunctionStub(
Assembler* assembler,
intptr_t init_entry_point_offset_in_thread,
intptr_t init_function_offset_in_object_store);
void GenerateReturnStub(intptr_t return_entry_point_offset_in_thread,
intptr_t return_function_offset_in_object_store,
intptr_t return_stub_offset_in_thread);
static void GenerateReturnStub(
Assembler* assembler,
intptr_t return_entry_point_offset_in_thread,
intptr_t return_function_offset_in_object_store,
intptr_t return_stub_offset_in_thread);
};
} // namespace compiler

View file

@ -35,7 +35,8 @@ namespace compiler {
//
// WARNING: This might clobber all registers except for [R0], [THR] and [FP].
// The caller should simply call LeaveStubFrame() and return.
void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers) {
// If the object is not remembered we call a leaf-runtime to add it to the
// remembered set.
Label done;
@ -61,7 +62,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
// SP + 4*R4 : address of return value.
// R9 : address of the runtime function to call.
// R4 : number of arguments to the call.
void StubCodeCompiler::GenerateCallToRuntimeStub() {
void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
const intptr_t thread_offset = target::NativeArguments::thread_offset();
const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
const intptr_t argv_offset = target::NativeArguments::argv_offset();
@ -152,6 +153,7 @@ void StubCodeCompiler::GenerateCallToRuntimeStub() {
}
void StubCodeCompiler::GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
@ -179,6 +181,7 @@ void StubCodeCompiler::GenerateSharedStubGeneric(
}
void StubCodeCompiler::GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
@ -200,7 +203,7 @@ void StubCodeCompiler::GenerateSharedStub(
SharedSlowPathStubABI::kResultReg)));
}
};
GenerateSharedStubGeneric(save_fpu_registers,
GenerateSharedStubGeneric(assembler, save_fpu_registers,
self_code_stub_offset_from_thread, allow_return,
perform_runtime_call);
}
@ -209,6 +212,7 @@ void StubCodeCompiler::GenerateSharedStub(
// R4: The type_arguments_field_offset (or 0)
// SP+0: The object from which we are tearing a method off.
void StubCodeCompiler::GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic) {
@ -296,7 +300,7 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
__ Ret();
}
void StubCodeCompiler::GenerateEnterSafepointStub() {
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers);
@ -334,12 +338,13 @@ static void GenerateExitSafepointStubCommon(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateExitSafepointStub() {
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
GenerateExitSafepointStubCommon(
assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
Assembler* assembler) {
GenerateExitSafepointStubCommon(
assembler,
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@ -354,7 +359,8 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
// On exit:
// Stack: preserved
// NOTFP, R4: clobbered, although normally callee-saved
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
Assembler* assembler) {
COMPILE_ASSERT(IsAbiPreservedRegister(R4));
// TransitionGeneratedToNative might clobber LR if it takes the slow path.
@ -374,6 +380,7 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
#if !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateJITCallbackTrampolines(
Assembler* assembler,
intptr_t next_callback_id) {
#if defined(USING_SIMULATOR)
// TODO(37299): FFI is not support in SIMARM.
@ -486,7 +493,8 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
}
#endif // !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
Assembler* assembler) {
__ EnterStubFrame();
__ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
__ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@ -495,7 +503,8 @@ void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
__ Breakpoint();
}
void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
ASSERT(!GenericCheckBoundInstr::UseUnboxedRepresentation());
__ PushRegistersInOrder(
@ -505,21 +514,22 @@ void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
};
GenerateSharedStubGeneric(
/*save_fpu_registers=*/with_fpu_regs,
assembler, /*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
: target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false, perform_runtime_call);
}
void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
__ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
__ Breakpoint();
};
GenerateSharedStubGeneric(
/*save_fpu_registers=*/with_fpu_regs,
assembler, /*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
: target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@ -622,14 +632,14 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -642,7 +652,7 @@ void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
// R9 : address of the native function to call.
// R2 : address of first argument in argument array.
// R1 : argc_tag including number of arguments and function kind.
void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -651,7 +661,7 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
// Input parameters:
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub() {
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@ -671,7 +681,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub() {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub() {
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -721,7 +731,8 @@ void StubCodeCompiler::GenerateFixCallersTargetStub() {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -743,7 +754,8 @@ void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
// Called from object allocate instruction when the allocation stub for a
// generic class has been disabled.
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -953,7 +965,8 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// R0: result, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(IP, kZapCodeReg);
__ Push(IP);
@ -967,7 +980,8 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(IP, kZapCodeReg);
__ Push(IP);
@ -979,7 +993,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
__ Ret();
}
void StubCodeCompiler::GenerateDeoptimizeStub() {
void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
__ Push(CODE_REG);
__ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@ -1037,7 +1051,8 @@ static void GenerateDispatcherCode(Assembler* assembler,
// Input:
// ARGS_DESC_REG - arguments descriptor
// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
GenerateNoSuchMethodDispatcherBody(assembler);
}
@ -1050,7 +1065,7 @@ void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
// AllocateArrayABI::kResultReg: newly allocated array.
// Clobbered:
// R3, R4, R8, R9
void StubCodeCompiler::GenerateAllocateArrayStub() {
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
@ -1181,7 +1196,8 @@ void StubCodeCompiler::GenerateAllocateArrayStub() {
}
// Called for allocation of Mint.
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1193,14 +1209,16 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
}
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
&kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
}
// Called for allocation of Mint.
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1213,7 +1231,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(
/*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
@ -1226,7 +1244,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
// R1 : arguments descriptor array.
// R2 : arguments array.
// R3 : current thread.
void StubCodeCompiler::GenerateInvokeDartCodeStub() {
void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
SPILLS_LR_TO_FRAME(__ EnterFrame((1 << FP) | (1 << LR), 0));
// Push code object to PC marker slot.
@ -1431,7 +1449,7 @@ static void GenerateAllocateContext(Assembler* assembler, Label* slow_case) {
// R0: new allocated Context object.
// Clobbered:
// Potentially any since is can go to runtime.
void StubCodeCompiler::GenerateAllocateContextStub() {
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1476,7 +1494,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// R0: new object
// Restore the frame pointer.
@ -1492,7 +1510,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
// R0: new allocated Context object.
// Clobbered:
// Potentially any since it can go to runtime.
void StubCodeCompiler::GenerateCloneContextStub() {
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1551,7 +1569,7 @@ void StubCodeCompiler::GenerateCloneContextStub() {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// R0: new object
// Restore the frame pointer.
@ -1559,7 +1577,7 @@ void StubCodeCompiler::GenerateCloneContextStub() {
__ Ret();
}
void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@ -1732,11 +1750,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
}
}
void StubCodeCompiler::GenerateWriteBarrierStub() {
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(assembler, true);
}
@ -1848,15 +1866,16 @@ static void GenerateAllocateObjectHelper(Assembler* assembler,
}
// Called for inline allocation of objects (any class).
void StubCodeCompiler::GenerateAllocateObjectStub() {
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
}
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
Assembler* assembler) {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
const Register kClsReg = R1;
if (!FLAG_precompiled_mode) {
@ -1886,13 +1905,14 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
__ LeaveDartFrameAndReturn();
}
// Called for inline allocation of objects.
void StubCodeCompiler::GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const Code& allocate_object,
@ -1961,7 +1981,8 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
// LR : return address.
// SP : address of last argument.
// R4: arguments descriptor array.
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
@ -1999,7 +2020,8 @@ void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
// R9: inline cache data object.
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
Assembler* assembler) {
Register ic_reg = R9;
Register func_reg = R8;
if (FLAG_precompiled_mode) {
@ -2022,7 +2044,8 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
}
// Loads function into 'temp_reg'.
void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
if (FLAG_precompiled_mode) {
__ Breakpoint();
return;
@ -2133,6 +2156,7 @@ static void GenerateRecordEntryPoint(Assembler* assembler) {
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -2150,9 +2174,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
}
if (optimized == kOptimized) {
GenerateOptimizedUsageCounterIncrement();
GenerateOptimizedUsageCounterIncrement(assembler);
} else {
GenerateUsageCounterIncrement(/* scratch */ R8);
GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
}
ASSERT(exactness == kIgnoreExactness); // Unimplemented.
@ -2377,63 +2401,67 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R9: ICData
// R8: Function
// LR: return address
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kIgnoreExactness);
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
@ -2441,7 +2469,8 @@ void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
// R8: Function
// LR: return address
void StubCodeCompiler::
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
@ -2449,17 +2478,19 @@ void StubCodeCompiler::
// R9: ICData
// R8: Function
// LR: return address
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateRecordEntryPoint(assembler);
GenerateUsageCounterIncrement(/* scratch */ R8);
GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
#if defined(DEBUG)
{
Label ok;
@ -2528,26 +2559,28 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ R8);
GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
Token::kILLEGAL, kUnoptimized, kStaticCall,
kIgnoreExactness);
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// R9: ICData
// LR: return address
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ R8);
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ R8);
GenerateNArgsCheckInlineCacheStub(
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// Stub for compiling a function and jumping to the compiled code.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub() {
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
// Preserve arg desc, pass function.
COMPILE_ASSERT(FUNCTION_REG < ARGS_DESC_REG);
@ -2561,7 +2594,7 @@ void StubCodeCompiler::GenerateLazyCompileStub() {
}
// R9: Contains an ICData.
void StubCodeCompiler::GenerateICCallBreakpointStub() {
void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2578,7 +2611,8 @@ void StubCodeCompiler::GenerateICCallBreakpointStub() {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2593,7 +2627,7 @@ void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2609,7 +2643,7 @@ void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
}
// Called only from unoptimized code. All relevant registers have been saved.
void StubCodeCompiler::GenerateDebugStepCheckStub() {
void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2860,27 +2894,27 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 3);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 5);
}
// See comment on[GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 7);
}
// Return the current stack pointer address, used to do stack alignment checks.
void StubCodeCompiler::GenerateGetCStackPointerStub() {
void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
__ mov(R0, Operand(SP));
__ Ret();
}
@ -2894,7 +2928,7 @@ void StubCodeCompiler::GenerateGetCStackPointerStub() {
// Does not return.
//
// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
void StubCodeCompiler::GenerateJumpToFrameStub() {
void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
COMPILE_ASSERT(kExceptionObjectReg == R0);
COMPILE_ASSERT(kStackTraceObjectReg == R1);
COMPILE_ASSERT(IsAbiPreservedRegister(R4));
@ -2948,7 +2982,7 @@ void StubCodeCompiler::GenerateJumpToFrameStub() {
//
// The arguments are stored in the Thread object.
// Does not return.
void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
WRITES_RETURN_ADDRESS_TO_LR(
__ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
@ -2972,7 +3006,7 @@ void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateDeoptForRewindStub() {
void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Push zap value instead of CODE_REG.
__ LoadImmediate(IP, kZapCodeReg);
__ Push(IP);
@ -2992,7 +3026,7 @@ void StubCodeCompiler::GenerateDeoptForRewindStub() {
// Calls to the runtime to optimize the given function.
// R8: function to be reoptimized.
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub() {
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ ldr(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
__ Push(ARGS_DESC_REG);
@ -3073,7 +3107,8 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
@ -3107,7 +3142,8 @@ void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
const Register temp = R2;
const Register left = R1;
const Register right = R0;
@ -3124,7 +3160,7 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
// FUNCTION_REG: target function
// ARGS_DESC_REG: arguments descriptor
// CODE_REG: target Code
void StubCodeCompiler::GenerateMegamorphicCallStub() {
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
__ LoadTaggedClassIdMayBeSmi(R8, R0);
// R8: receiver cid as Smi.
__ ldr(R2,
@ -3179,10 +3215,10 @@ void StubCodeCompiler::GenerateMegamorphicCallStub() {
__ b(&loop);
__ Bind(&miss);
GenerateSwitchableCallMissStub();
GenerateSwitchableCallMissStub(assembler);
}
void StubCodeCompiler::GenerateICCallThroughCodeStub() {
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
__ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ ldr(R4, FieldAddress(IC_DATA_REG,
@ -3231,7 +3267,8 @@ void StubCodeCompiler::GenerateICCallThroughCodeStub() {
// R9: MonomorphicSmiableCall object
//
// R2, R3: clobbered
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
Assembler* assembler) {
__ LoadClassIdMayBeSmi(IP, R0);
// entrypoint_ should come right after expected_cid_
@ -3264,7 +3301,7 @@ static void CallSwitchableCallMissRuntimeEntry(Assembler* assembler,
// Called from switchable IC calls.
// R0: receiver
void StubCodeCompiler::GenerateSwitchableCallMissStub() {
void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
__ ldr(CODE_REG,
Address(THR, target::Thread::switchable_call_miss_stub_offset()));
__ EnterStubFrame();
@ -3280,7 +3317,7 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub() {
// R9: SingleTargetCache
// Passed to target:
// CODE_REG: target Code object
void StubCodeCompiler::GenerateSingleTargetCallStub() {
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
Label miss;
__ LoadClassIdMayBeSmi(R1, R0);
__ ldrh(R2,
@ -3323,7 +3360,8 @@ static int GetScaleFactor(intptr_t size) {
return -1;
}
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid) {
const intptr_t element_size = TypedDataElementSizeInBytes(cid);
const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
const intptr_t scale_shift = GetScaleFactor(element_size);

View file

@ -34,7 +34,8 @@ namespace compiler {
//
// WARNING: This might clobber all registers except for [R0], [THR] and [FP].
// The caller should simply call LeaveStubFrame() and return.
void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers) {
// If the object is not remembered we call a leaf-runtime to add it to the
// remembered set.
Label done;
@ -153,7 +154,7 @@ static void WithExceptionCatchingTrampoline(Assembler* assembler,
// SP + 8*R4 : address of return value.
// R5 : address of the runtime function to call.
// R4 : number of arguments to the call.
void StubCodeCompiler::GenerateCallToRuntimeStub() {
void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
const intptr_t thread_offset = target::NativeArguments::thread_offset();
const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
const intptr_t argv_offset = target::NativeArguments::argv_offset();
@ -269,6 +270,7 @@ void StubCodeCompiler::GenerateCallToRuntimeStub() {
}
void StubCodeCompiler::GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
@ -296,6 +298,7 @@ void StubCodeCompiler::GenerateSharedStubGeneric(
}
void StubCodeCompiler::GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
@ -315,12 +318,12 @@ void StubCodeCompiler::GenerateSharedStub(
SharedSlowPathStubABI::kResultReg)));
}
};
GenerateSharedStubGeneric(save_fpu_registers,
GenerateSharedStubGeneric(assembler, save_fpu_registers,
self_code_stub_offset_from_thread, allow_return,
perform_runtime_call);
}
void StubCodeCompiler::GenerateEnterSafepointStub() {
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
@ -375,12 +378,13 @@ static void GenerateExitSafepointStubCommon(Assembler* assembler,
__ Ret();
}
void StubCodeCompiler::GenerateExitSafepointStub() {
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
GenerateExitSafepointStubCommon(
assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
Assembler* assembler) {
GenerateExitSafepointStubCommon(
assembler,
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@ -395,7 +399,8 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
// On exit:
// R19: clobbered, although normally callee-saved
// Stack: preserved, CSP == SP
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
Assembler* assembler) {
COMPILE_ASSERT(IsAbiPreservedRegister(R19));
SPILLS_RETURN_ADDRESS_FROM_LR_TO_REGISTER(__ mov(R19, LR));
@ -427,6 +432,7 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
#if !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateJITCallbackTrampolines(
Assembler* assembler,
intptr_t next_callback_id) {
#if !defined(HOST_ARCH_ARM64)
// TODO(37299): FFI is not support in SIMARM64.
@ -571,6 +577,7 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// R1: The extracted method.
// R4: The type_arguments_field_offset (or 0)
void StubCodeCompiler::GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic) {
@ -663,7 +670,8 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
__ Ret();
}
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
Assembler* assembler) {
__ EnterStubFrame();
__ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
__ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@ -672,7 +680,8 @@ void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
__ Breakpoint();
}
void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
// If the generated code has unboxed index/length we need to box them before
// calling the runtime entry.
@ -721,21 +730,22 @@ void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
};
GenerateSharedStubGeneric(
/*save_fpu_registers=*/with_fpu_regs,
assembler, /*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
: target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false, perform_runtime_call);
}
void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
__ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
__ Breakpoint();
};
GenerateSharedStubGeneric(
/*save_fpu_registers=*/with_fpu_regs,
assembler, /*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
: target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@ -855,14 +865,14 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -875,7 +885,7 @@ void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
// R5 : address of the native function to call.
// R2 : address of first argument in argument array.
// R1 : argc_tag including number of arguments and function kind.
void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -884,7 +894,7 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
// Input parameters:
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub() {
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@ -905,7 +915,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub() {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub() {
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -957,7 +967,8 @@ void StubCodeCompiler::GenerateFixCallersTargetStub() {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -978,7 +989,8 @@ void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
// Called from object allocate instruction when the allocation stub for a
// generic class has been disabled.
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -1198,7 +1210,8 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// R0: result, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, kZapCodeReg);
__ Push(TMP);
@ -1212,7 +1225,8 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
// R0: exception, must be preserved
// R1: stacktrace, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, kZapCodeReg);
__ Push(TMP);
@ -1224,7 +1238,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
__ ret();
}
void StubCodeCompiler::GenerateDeoptimizeStub() {
void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
__ Push(CODE_REG);
__ ldr(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@ -1282,7 +1296,8 @@ static void GenerateDispatcherCode(Assembler* assembler,
// Input:
// ARGS_DESC_REG - arguments descriptor
// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
GenerateNoSuchMethodDispatcherBody(assembler);
}
@ -1295,7 +1310,7 @@ void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
// AllocateArrayABI::kResultReg: newly allocated array.
// Clobbered:
// R3, R7
void StubCodeCompiler::GenerateAllocateArrayStub() {
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
@ -1452,7 +1467,8 @@ void StubCodeCompiler::GenerateAllocateArrayStub() {
__ ret();
}
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1464,13 +1480,15 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
}
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
&kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
}
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1483,7 +1501,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(
/*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
@ -1496,7 +1514,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
// R1 : arguments descriptor array.
// R2 : arguments array.
// R3 : current thread.
void StubCodeCompiler::GenerateInvokeDartCodeStub() {
void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ Comment("InvokeDartCodeStub");
// Copy the C stack pointer (CSP/R31) into the stack pointer we'll actually
@ -1713,7 +1731,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// R0: new allocated Context object.
// Clobbered:
// R2, R3, R4, TMP
void StubCodeCompiler::GenerateAllocateContextStub() {
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1768,7 +1786,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// R0: new object
// Restore the frame pointer.
@ -1784,7 +1802,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
// R0: new allocated Context object.
// Clobbered:
// R1, (R2), R3, R4, (TMP)
void StubCodeCompiler::GenerateCloneContextStub() {
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1844,7 +1862,7 @@ void StubCodeCompiler::GenerateCloneContextStub() {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// R0: new object
// Restore the frame pointer.
@ -1852,7 +1870,7 @@ void StubCodeCompiler::GenerateCloneContextStub() {
__ ret();
}
void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@ -2050,11 +2068,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
}
}
void StubCodeCompiler::GenerateWriteBarrierStub() {
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(assembler, true);
}
@ -2164,15 +2182,16 @@ static void GenerateAllocateObjectHelper(Assembler* assembler,
}
// Called for inline allocation of objects (any class).
void StubCodeCompiler::GenerateAllocateObjectStub() {
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
}
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
Assembler* assembler) {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
if (!FLAG_precompiled_mode) {
__ ldr(CODE_REG,
Address(THR, target::Thread::call_to_runtime_stub_offset()));
@ -2198,7 +2217,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
__ LeaveStubFrame();
@ -2207,6 +2226,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
// Called for inline allocation of objects.
void StubCodeCompiler::GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const Code& allocate_object,
@ -2279,7 +2299,8 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
// LR : return address.
// SP : address of last argument.
// R4: arguments descriptor array.
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
@ -2318,7 +2339,8 @@ void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
// R5: inline cache data object.
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
Assembler* assembler) {
Register ic_reg = R5;
Register func_reg = R6;
if (FLAG_precompiled_mode) {
@ -2345,7 +2367,8 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
}
// Loads function into 'temp_reg'.
void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
if (FLAG_precompiled_mode) {
__ Breakpoint();
return;
@ -2459,6 +2482,7 @@ static void GenerateRecordEntryPoint(Assembler* assembler) {
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -2476,9 +2500,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
}
if (optimized == kOptimized) {
GenerateOptimizedUsageCounterIncrement();
GenerateOptimizedUsageCounterIncrement(assembler);
} else {
GenerateUsageCounterIncrement(/*scratch=*/R6);
GenerateUsageCounterIncrement(assembler, /*scratch=*/R6);
}
ASSERT(exactness == kIgnoreExactness); // Unimplemented.
@ -2711,63 +2735,67 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
// R5: ICData
// R6: Function
// LR: return address
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kIgnoreExactness);
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// R0: receiver
@ -2775,7 +2803,8 @@ void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
// R6: Function
// LR: return address
void StubCodeCompiler::
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
@ -2783,17 +2812,19 @@ void StubCodeCompiler::
// R5: ICData
// R6: Function
// LR: return address
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateRecordEntryPoint(assembler);
GenerateUsageCounterIncrement(/* scratch */ R6);
GenerateUsageCounterIncrement(assembler, /* scratch */ R6);
#if defined(DEBUG)
{
Label ok;
@ -2869,26 +2900,28 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ R6);
GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
Token::kILLEGAL, kUnoptimized, kStaticCall,
kIgnoreExactness);
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ R6);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// R5: ICData
// LR: return address
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ R6);
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ R6);
GenerateNArgsCheckInlineCacheStub(
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// Stub for compiling a function and jumping to the compiled code.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub() {
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
// Preserve arg desc.
__ EnterStubFrame();
__ Push(ARGS_DESC_REG); // Save arg. desc.
@ -2906,7 +2939,7 @@ void StubCodeCompiler::GenerateLazyCompileStub() {
}
// R5: Contains an ICData.
void StubCodeCompiler::GenerateICCallBreakpointStub() {
void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2924,7 +2957,8 @@ void StubCodeCompiler::GenerateICCallBreakpointStub() {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2940,7 +2974,7 @@ void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2955,7 +2989,7 @@ void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
}
// Called only from unoptimized code. All relevant registers have been saved.
void StubCodeCompiler::GenerateDebugStepCheckStub() {
void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -3185,26 +3219,26 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 3);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 5);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 7);
}
void StubCodeCompiler::GenerateGetCStackPointerStub() {
void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
__ mov(R0, CSP);
__ ret();
}
@ -3218,7 +3252,7 @@ void StubCodeCompiler::GenerateGetCStackPointerStub() {
// Does not return.
//
// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
void StubCodeCompiler::GenerateJumpToFrameStub() {
void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
ASSERT(kExceptionObjectReg == R0);
ASSERT(kStackTraceObjectReg == R1);
__ set_lr_state(compiler::LRState::Clobbered());
@ -3271,7 +3305,7 @@ void StubCodeCompiler::GenerateJumpToFrameStub() {
//
// The arguments are stored in the Thread object.
// Does not return.
void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
WRITES_RETURN_ADDRESS_TO_LR(
__ LoadFromOffset(LR, THR, target::Thread::resume_pc_offset()));
@ -3294,7 +3328,7 @@ void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateDeoptForRewindStub() {
void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Push zap value instead of CODE_REG.
__ LoadImmediate(TMP, kZapCodeReg);
__ Push(TMP);
@ -3314,7 +3348,7 @@ void StubCodeCompiler::GenerateDeoptForRewindStub() {
// Calls to the runtime to optimize the given function.
// R6: function to be re-optimized.
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub() {
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
__ EnterStubFrame();
__ Push(ARGS_DESC_REG);
@ -3382,7 +3416,8 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
@ -3415,7 +3450,8 @@ void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
// SP + 4: left operand.
// SP + 0: right operand.
// Return Zero condition flag set if equal.
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
const Register left = R1;
const Register right = R0;
__ LoadFromOffset(left, SP, 1 * target::kWordSize);
@ -3430,7 +3466,7 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
// FUNCTION_REG: target function
// CODE_REG: target Code
// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub() {
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
__ BranchIfSmi(R0, &smi_case);
@ -3505,13 +3541,13 @@ void StubCodeCompiler::GenerateMegamorphicCallStub() {
__ b(&cid_loaded);
__ Bind(&miss);
GenerateSwitchableCallMissStub();
GenerateSwitchableCallMissStub(assembler);
}
// Input:
// R0 - receiver
// IC_DATA_REG - icdata
void StubCodeCompiler::GenerateICCallThroughCodeStub() {
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
__ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ ldr(ARGS_DESC_REG,
@ -3563,7 +3599,8 @@ void StubCodeCompiler::GenerateICCallThroughCodeStub() {
// R5: MonomorphicSmiableCall object
//
// R1: clobbered
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
Assembler* assembler) {
Label miss;
__ LoadClassIdMayBeSmi(IP0, R0);
@ -3586,7 +3623,7 @@ void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
// Called from switchable IC calls.
// R0: receiver
void StubCodeCompiler::GenerateSwitchableCallMissStub() {
void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
__ ldr(CODE_REG,
Address(THR, target::Thread::switchable_call_miss_stub_offset()));
__ EnterStubFrame();
@ -3613,7 +3650,7 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub() {
// R5: SingleTargetCache
// Passed to target:
// CODE_REG: target Code object
void StubCodeCompiler::GenerateSingleTargetCallStub() {
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
Label miss;
__ LoadClassIdMayBeSmi(R1, R0);
__ ldr(R2, FieldAddress(R5, target::SingleTargetCache::lower_limit_offset()),
@ -3668,7 +3705,8 @@ static int GetScaleFactor(intptr_t size) {
return -1;
}
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid) {
const intptr_t element_size = TypedDataElementSizeInBytes(cid);
const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
const intptr_t scale_shift = GetScaleFactor(element_size);

View file

@ -33,7 +33,8 @@ namespace compiler {
//
// WARNING: This might clobber all registers except for [EAX], [THR] and [FP].
// The caller should simply call LeaveFrame() and return.
void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers) {
// If the object is not remembered we call a leaf-runtime to add it to the
// remembered set.
Label done;
@ -60,7 +61,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
// ECX : address of the runtime function to call.
// EDX : number of arguments to the call.
// Must preserve callee saved registers EDI and EBX.
void StubCodeCompiler::GenerateCallToRuntimeStub() {
void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
const intptr_t thread_offset = target::NativeArguments::thread_offset();
const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
const intptr_t argv_offset = target::NativeArguments::argv_offset();
@ -136,7 +137,7 @@ void StubCodeCompiler::GenerateCallToRuntimeStub() {
__ ret();
}
void StubCodeCompiler::GenerateEnterSafepointStub() {
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
__ pushal();
__ subl(SPREG, Immediate(8));
__ movsd(Address(SPREG, 0), XMM0);
@ -178,12 +179,13 @@ static void GenerateExitSafepointStubCommon(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateExitSafepointStub() {
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
GenerateExitSafepointStubCommon(
assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
Assembler* assembler) {
GenerateExitSafepointStubCommon(
assembler,
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@ -198,7 +200,8 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
// On exit:
// Stack: preserved
// EBX: clobbered (even though it's normally callee-saved)
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
Assembler* assembler) {
__ popl(EBX);
__ movl(ECX, compiler::Immediate(target::Thread::exit_through_ffi()));
@ -211,6 +214,7 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
}
void StubCodeCompiler::GenerateJITCallbackTrampolines(
Assembler* assembler,
intptr_t next_callback_id) {
Label done, ret_4;
@ -342,6 +346,7 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
}
void StubCodeCompiler::GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
@ -351,6 +356,7 @@ void StubCodeCompiler::GenerateSharedStubGeneric(
}
void StubCodeCompiler::GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
@ -360,17 +366,20 @@ void StubCodeCompiler::GenerateSharedStub(
__ Breakpoint();
}
void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
bool with_fpu_regs) {
// Only used in AOT.
__ Breakpoint();
}
void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
bool with_fpu_regs) {
// Only used in AOT.
__ Breakpoint();
}
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
Assembler* assembler) {
// Only used in AOT.
__ Breakpoint();
}
@ -463,14 +472,14 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -483,7 +492,7 @@ void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
// EAX : address of first argument in argument array.
// ECX : address of the native function to call.
// EDX : argc_tag including number of arguments and function kind.
void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -492,7 +501,7 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
// Input parameters:
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub() {
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(Immediate(0)); // Setup space on stack for return value.
@ -508,7 +517,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub() {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub() {
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -543,7 +552,8 @@ void StubCodeCompiler::GenerateFixCallersTargetStub() {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
Assembler* assembler) {
__ EnterStubFrame();
__ pushl(Immediate(0)); // Setup space on stack for return value.
__ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0);
@ -556,7 +566,8 @@ void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
// Called from object allocate instruction when the allocation stub for a
// generic class has been disabled.
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
Assembler* assembler) {
__ EnterStubFrame();
// Preserve type arguments register.
__ pushl(AllocateObjectABI::kTypeArgumentsReg);
@ -752,7 +763,8 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// EAX: result, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
Assembler* assembler) {
// Return address for "call" to deopt stub.
__ pushl(Immediate(kZapReturnAddress));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn);
@ -761,14 +773,15 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
// EAX: exception, must be preserved
// EDX: stacktrace, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
Assembler* assembler) {
// Return address for "call" to deopt stub.
__ pushl(Immediate(kZapReturnAddress));
GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow);
__ ret();
}
void StubCodeCompiler::GenerateDeoptimizeStub() {
void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
__ ret();
}
@ -819,7 +832,8 @@ static void GenerateDispatcherCode(Assembler* assembler,
GenerateNoSuchMethodDispatcherCode(assembler);
}
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
GenerateNoSuchMethodDispatcherCode(assembler);
}
@ -831,7 +845,7 @@ void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
// AllocateArrayABI::kResultReg: newly allocated array.
// Clobbered:
// EBX, EDI
void StubCodeCompiler::GenerateAllocateArrayStub() {
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
@ -984,7 +998,7 @@ void StubCodeCompiler::GenerateAllocateArrayStub() {
// ESP + 12 : arguments array.
// ESP + 16 : current thread.
// Uses EAX, EDX, ECX, EDI as temporary registers.
void StubCodeCompiler::GenerateInvokeDartCodeStub() {
void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
const intptr_t kTargetCodeOffset = 2 * target::kWordSize;
const intptr_t kArgumentsDescOffset = 3 * target::kWordSize;
const intptr_t kArgumentsOffset = 4 * target::kWordSize;
@ -1193,7 +1207,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// EAX: new allocated Context object.
// Clobbered:
// EBX, EDX
void StubCodeCompiler::GenerateAllocateContextStub() {
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1243,7 +1257,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// EAX: new object
// Restore the frame pointer.
@ -1259,7 +1273,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
// EAX: new allocated Context object.
// Clobbered:
// EBX, ECX, EDX
void StubCodeCompiler::GenerateCloneContextStub() {
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1317,7 +1331,7 @@ void StubCodeCompiler::GenerateCloneContextStub() {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// EAX: new object
// Restore the frame pointer.
@ -1325,7 +1339,7 @@ void StubCodeCompiler::GenerateCloneContextStub() {
__ ret();
}
void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@ -1353,7 +1367,8 @@ void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
COMPILE_ASSERT(kWriteBarrierObjectReg == EDX);
COMPILE_ASSERT(kWriteBarrierValueReg == EBX);
COMPILE_ASSERT(kWriteBarrierSlotReg == EDI);
static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
static void GenerateWriteBarrierStubHelper(Assembler* assembler,
bool cards) {
// Save values being destroyed.
__ pushl(EAX);
__ pushl(ECX);
@ -1509,23 +1524,24 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
}
}
void StubCodeCompiler::GenerateWriteBarrierStub() {
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(assembler, true);
}
void StubCodeCompiler::GenerateAllocateObjectStub() {
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
__ int3();
}
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
Assembler* assembler) {
__ int3();
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
__ int3();
}
@ -1538,6 +1554,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
// Returns patch_code_pc offset where patching code for disabling the stub
// has been generated (similar to regularly generated Dart code).
void StubCodeCompiler::GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const Code& allocate_object,
@ -1665,7 +1682,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
if (AllocateObjectInstr::WillAllocateNewOrRemembered(cls)) {
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
}
// AllocateObjectABI::kResultReg: new object
@ -1682,7 +1699,8 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
// ESP + 4 : address of last argument.
// EDX : arguments descriptor array.
// Uses EAX, EBX, EDI as temporary registers.
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
@ -1720,7 +1738,8 @@ void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
Assembler* assembler) {
Register ic_reg = ECX;
Register func_reg = EAX;
if (FLAG_trace_optimized_ic_calls) {
@ -1740,7 +1759,8 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
}
// Loads function into 'temp_reg'.
void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
if (FLAG_optimization_counter_threshold >= 0) {
Register func_reg = temp_reg;
ASSERT(func_reg != IC_DATA_REG);
@ -1836,22 +1856,24 @@ static void EmitFastSmiOp(Assembler* assembler,
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
Optimized optimized,
CallType type,
Exactness exactness) {
GenerateNArgsCheckInlineCacheStubForEntryKind(num_args, handle_ic_miss, kind,
optimized, type, exactness,
CodeEntryKind::kNormal);
GenerateNArgsCheckInlineCacheStubForEntryKind(
assembler, num_args, handle_ic_miss, kind, optimized, type, exactness,
CodeEntryKind::kNormal);
__ BindUncheckedEntryPoint();
GenerateNArgsCheckInlineCacheStubForEntryKind(num_args, handle_ic_miss, kind,
optimized, type, exactness,
CodeEntryKind::kUnchecked);
GenerateNArgsCheckInlineCacheStubForEntryKind(
assembler, num_args, handle_ic_miss, kind, optimized, type, exactness,
CodeEntryKind::kUnchecked);
}
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -1860,9 +1882,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
Exactness exactness,
CodeEntryKind entry_kind) {
if (optimized == kOptimized) {
GenerateOptimizedUsageCounterIncrement();
GenerateOptimizedUsageCounterIncrement(assembler);
} else {
GenerateUsageCounterIncrement(/* scratch */ EAX);
GenerateUsageCounterIncrement(assembler, /* scratch */ EAX);
}
ASSERT(exactness == kIgnoreExactness); // Unimplemented.
@ -2043,71 +2065,77 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
// ECX: ICData
// EAX: Function
// ESP[0]: return address
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kIgnoreExactness);
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// EBX: receiver
@ -2115,7 +2143,8 @@ void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
// EAX: Function
// ESP[0]: return address
void StubCodeCompiler::
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
@ -2123,19 +2152,19 @@ void StubCodeCompiler::
// ECX: ICData
// EAX: Function
// ESP[0]: return address
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// ECX: ICData
// ESP[0]: return address
static void GenerateZeroArgsUnoptimizedStaticCallForEntryKind(
StubCodeCompiler* stub_code_compiler,
Assembler* assembler,
CodeEntryKind entry_kind) {
stub_code_compiler->GenerateUsageCounterIncrement(/* scratch */ EAX);
auto* const assembler = stub_code_compiler->assembler;
StubCodeCompiler::GenerateUsageCounterIncrement(assembler, /* scratch */ EAX);
#if defined(DEBUG)
{
@ -2197,34 +2226,37 @@ static void GenerateZeroArgsUnoptimizedStaticCallForEntryKind(
#endif
}
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
GenerateZeroArgsUnoptimizedStaticCallForEntryKind(this,
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateZeroArgsUnoptimizedStaticCallForEntryKind(assembler,
CodeEntryKind::kNormal);
__ BindUncheckedEntryPoint();
GenerateZeroArgsUnoptimizedStaticCallForEntryKind(this,
GenerateZeroArgsUnoptimizedStaticCallForEntryKind(assembler,
CodeEntryKind::kUnchecked);
}
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// ECX: ICData
// ESP[0]: return address
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// Stub for compiling a function and jumping to the compiled code.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub() {
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(FUNCTION_REG); // Pass function.
@ -2237,7 +2269,7 @@ void StubCodeCompiler::GenerateLazyCompileStub() {
}
// ECX: Contains an ICData.
void StubCodeCompiler::GenerateICCallBreakpointStub() {
void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2255,7 +2287,8 @@ void StubCodeCompiler::GenerateICCallBreakpointStub() {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2271,7 +2304,7 @@ void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2288,7 +2321,7 @@ void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
}
// Called only from unoptimized code.
void StubCodeCompiler::GenerateDebugStepCheckStub() {
void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2535,29 +2568,29 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 3);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 5);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 7);
}
// Return the current stack pointer address, used to do stack alignment checks.
// TOS + 0: return address
// Result in EAX.
void StubCodeCompiler::GenerateGetCStackPointerStub() {
void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
__ leal(EAX, Address(ESP, target::kWordSize));
__ ret();
}
@ -2569,7 +2602,7 @@ void StubCodeCompiler::GenerateGetCStackPointerStub() {
// TOS + 3: frame_pointer
// TOS + 4: thread
// No Result.
void StubCodeCompiler::GenerateJumpToFrameStub() {
void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
__ movl(THR, Address(ESP, 4 * target::kWordSize)); // Load target thread.
__ movl(EBP,
Address(ESP, 3 * target::kWordSize)); // Load target frame_pointer.
@ -2608,7 +2641,7 @@ void StubCodeCompiler::GenerateJumpToFrameStub() {
//
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
ASSERT(kExceptionObjectReg == EAX);
ASSERT(kStackTraceObjectReg == EDX);
__ movl(EBX, Address(THR, target::Thread::resume_pc_offset()));
@ -2632,7 +2665,7 @@ void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateDeoptForRewindStub() {
void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Push the deopt pc.
__ pushl(Address(THR, target::Thread::resume_pc_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@ -2647,7 +2680,7 @@ void StubCodeCompiler::GenerateDeoptForRewindStub() {
// Calls to the runtime to optimize the given function.
// EBX: function to be reoptimized.
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub() {
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ movl(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
__ pushl(ARGS_DESC_REG);
@ -2723,7 +2756,8 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
@ -2756,7 +2790,8 @@ void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
const Register left = EAX;
const Register right = EDX;
const Register temp = ECX;
@ -2773,7 +2808,7 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
// EBX: target entry point
// FUNCTION_REG: target function
// ARGS_DESC_REG: argument descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub() {
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
// Check if object (in tmp) is a Smi.
@ -2845,20 +2880,21 @@ void StubCodeCompiler::GenerateMegamorphicCallStub() {
__ Bind(&miss);
__ popl(EBX); // restore receiver
GenerateSwitchableCallMissStub();
GenerateSwitchableCallMissStub(assembler);
}
void StubCodeCompiler::GenerateICCallThroughCodeStub() {
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
__ int3(); // AOT only.
}
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
Assembler* assembler) {
__ int3(); // AOT only.
}
// Called from switchable IC calls.
// EBX: receiver
void StubCodeCompiler::GenerateSwitchableCallMissStub() {
void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
__ movl(CODE_REG,
Address(THR, target::Thread::switchable_call_miss_stub_offset()));
__ EnterStubFrame();
@ -2880,7 +2916,7 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub() {
__ jmp(EAX);
}
void StubCodeCompiler::GenerateSingleTargetCallStub() {
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
__ int3(); // AOT only.
}
@ -2901,7 +2937,8 @@ static ScaleFactor GetScaleFactor(intptr_t size) {
return static_cast<ScaleFactor>(0);
}
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid) {
const intptr_t element_size = TypedDataElementSizeInBytes(cid);
const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
ScaleFactor scale_factor = GetScaleFactor(element_size);

View file

@ -34,7 +34,8 @@ namespace compiler {
//
// WARNING: This might clobber all registers except for [A0], [THR] and [FP].
// The caller should simply call LeaveStubFrame() and return.
void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers) {
// If the object is not remembered we call a leaf-runtime to add it to the
// remembered set.
Label done;
@ -59,7 +60,7 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
// SP + 8*T4 : address of return value.
// T5 : address of the runtime function to call.
// T4 : number of arguments to the call.
void StubCodeCompiler::GenerateCallToRuntimeStub() {
void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
const intptr_t thread_offset = target::NativeArguments::thread_offset();
const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
const intptr_t argv_offset = target::NativeArguments::argv_offset();
@ -159,6 +160,7 @@ void StubCodeCompiler::GenerateCallToRuntimeStub() {
}
void StubCodeCompiler::GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
@ -186,6 +188,7 @@ void StubCodeCompiler::GenerateSharedStubGeneric(
}
void StubCodeCompiler::GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
@ -204,12 +207,12 @@ void StubCodeCompiler::GenerateSharedStub(
SharedSlowPathStubABI::kResultReg)));
}
};
GenerateSharedStubGeneric(save_fpu_registers,
GenerateSharedStubGeneric(assembler, save_fpu_registers,
self_code_stub_offset_from_thread, allow_return,
perform_runtime_call);
}
void StubCodeCompiler::GenerateEnterSafepointStub() {
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
@ -250,12 +253,13 @@ static void GenerateExitSafepointStubCommon(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateExitSafepointStub() {
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
GenerateExitSafepointStubCommon(
assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
Assembler* assembler) {
GenerateExitSafepointStubCommon(
assembler,
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@ -270,7 +274,8 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
// On exit:
// S3: clobbered, although normally callee-saved
// Stack: preserved, CSP == SP
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
Assembler* assembler) {
COMPILE_ASSERT(IsAbiPreservedRegister(S3));
__ mv(S3, RA);
__ LoadImmediate(T1, target::Thread::exit_through_ffi());
@ -294,6 +299,7 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
#if !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateJITCallbackTrampolines(
Assembler* assembler,
intptr_t next_callback_id) {
#if defined(USING_SIMULATOR)
// TODO(37299): FFI is not support in SIMRISCV32/64.
@ -415,6 +421,7 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// T1: The extracted method.
// T4: The type_arguments_field_offset (or 0)
void StubCodeCompiler::GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic) {
@ -506,7 +513,8 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
__ Ret();
}
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
Assembler* assembler) {
__ EnterStubFrame();
__ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
__ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@ -515,7 +523,8 @@ void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
__ Breakpoint();
}
void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
// If the generated code has unboxed index/length we need to box them before
// calling the runtime entry.
@ -560,21 +569,22 @@ void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
};
GenerateSharedStubGeneric(
/*save_fpu_registers=*/with_fpu_regs,
assembler, /*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
: target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false, perform_runtime_call);
}
void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
__ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
__ Breakpoint();
};
GenerateSharedStubGeneric(
/*save_fpu_registers=*/with_fpu_regs,
assembler, /*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
: target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@ -674,14 +684,14 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -694,7 +704,7 @@ void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
// R5 : address of the native function to call.
// R2 : address of first argument in argument array.
// R1 : argc_tag including number of arguments and function kind.
void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -703,7 +713,7 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
// Input parameters:
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub() {
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@ -725,7 +735,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub() {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub() {
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -776,7 +786,8 @@ void StubCodeCompiler::GenerateFixCallersTargetStub() {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -797,7 +808,8 @@ void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
// Called from object allocate instruction when the allocation stub for a
// generic class has been disabled.
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -1017,7 +1029,8 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// A0: result, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, kZapCodeReg);
__ PushRegister(TMP);
@ -1031,7 +1044,8 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
// A0: exception, must be preserved
// A1: stacktrace, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ LoadImmediate(TMP, kZapCodeReg);
__ PushRegister(TMP);
@ -1043,7 +1057,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
__ ret();
}
void StubCodeCompiler::GenerateDeoptimizeStub() {
void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
__ PushRegister(CODE_REG);
__ lx(CODE_REG, Address(THR, target::Thread::deoptimize_stub_offset()));
GenerateDeoptimizationSequence(assembler, kEagerDeopt);
@ -1100,7 +1114,8 @@ static void GenerateDispatcherCode(Assembler* assembler,
// Input:
// ARGS_DESC_REG - arguments descriptor
// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
GenerateNoSuchMethodDispatcherBody(assembler);
}
@ -1113,7 +1128,7 @@ void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
// AllocateArrayABI::kResultReg: newly allocated array.
// Clobbered:
// T3, T4, T5
void StubCodeCompiler::GenerateAllocateArrayStub() {
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
@ -1263,7 +1278,8 @@ void StubCodeCompiler::GenerateAllocateArrayStub() {
__ ret();
}
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1275,13 +1291,15 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
}
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
&kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
}
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1294,7 +1312,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(
/*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
@ -1308,7 +1326,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
// A2 : arguments array.
// A3 : current thread.
// Beware! TMP == A3
void StubCodeCompiler::GenerateInvokeDartCodeStub() {
void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ Comment("InvokeDartCodeStub");
__ EnterFrame(1 * target::kWordSize);
@ -1511,7 +1529,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// T1: number of context variables.
// Output:
// A0: new allocated Context object.
void StubCodeCompiler::GenerateAllocateContextStub() {
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1560,7 +1578,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// A0: new object
// Restore the frame pointer.
@ -1573,7 +1591,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
// T5: context variable to clone.
// Output:
// A0: new allocated Context object.
void StubCodeCompiler::GenerateCloneContextStub() {
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1632,14 +1650,14 @@ void StubCodeCompiler::GenerateCloneContextStub() {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// A0: new object
__ LeaveStubFrame();
__ ret();
}
void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@ -1845,11 +1863,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler,
}
}
void StubCodeCompiler::GenerateWriteBarrierStub() {
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(assembler, true);
}
@ -1953,15 +1971,16 @@ static void GenerateAllocateObjectHelper(Assembler* assembler,
}
// Called for inline allocation of objects (any class).
void StubCodeCompiler::GenerateAllocateObjectStub() {
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
}
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
Assembler* assembler) {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
if (!FLAG_precompiled_mode) {
__ lx(CODE_REG,
Address(THR, target::Thread::call_to_runtime_stub_offset()));
@ -1986,7 +2005,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
__ LeaveStubFrame();
@ -1995,6 +2014,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
// Called for inline allocation of objects.
void StubCodeCompiler::GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const Code& allocate_object,
@ -2067,7 +2087,8 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
// RA : return address.
// SP : address of last argument.
// S4: arguments descriptor array.
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
@ -2105,7 +2126,8 @@ void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
// S5: inline cache data object.
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
Assembler* assembler) {
if (FLAG_precompiled_mode) {
__ Breakpoint();
return;
@ -2121,7 +2143,8 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
}
// Loads function into 'func_reg'.
void StubCodeCompiler::GenerateUsageCounterIncrement(Register func_reg) {
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register func_reg) {
if (FLAG_precompiled_mode) {
__ trap();
return;
@ -2241,6 +2264,7 @@ static void GenerateRecordEntryPoint(Assembler* assembler) {
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -2259,9 +2283,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
}
if (optimized == kOptimized) {
GenerateOptimizedUsageCounterIncrement();
GenerateOptimizedUsageCounterIncrement(assembler);
} else {
GenerateUsageCounterIncrement(/*scratch=*/T0);
GenerateUsageCounterIncrement(assembler, /*scratch=*/T0);
}
ASSERT(exactness == kIgnoreExactness); // Unimplemented.
@ -2484,63 +2508,67 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// A0: receiver
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// A0: receiver
// S5: ICData
// A6: Function
// RA: return address
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kIgnoreExactness);
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// A0: receiver
@ -2548,7 +2576,8 @@ void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
// A6: Function
// RA: return address
void StubCodeCompiler::
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
__ Stop("Unimplemented");
}
@ -2556,17 +2585,19 @@ void StubCodeCompiler::
// S5: ICData
// A6: Function
// RA: return address
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateRecordEntryPoint(assembler);
GenerateUsageCounterIncrement(/* scratch */ T0);
GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
#if defined(DEBUG)
{
@ -2642,26 +2673,28 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ T0);
GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
Token::kILLEGAL, kUnoptimized, kStaticCall,
kIgnoreExactness);
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// S5: ICData
// RA: return address
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
GenerateUsageCounterIncrement(/* scratch */ T0);
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateUsageCounterIncrement(assembler, /* scratch */ T0);
GenerateNArgsCheckInlineCacheStub(
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// Stub for compiling a function and jumping to the compiled code.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub() {
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
// Preserve arg desc.
__ EnterStubFrame();
// Save arguments descriptor and pass function.
@ -2680,7 +2713,7 @@ void StubCodeCompiler::GenerateLazyCompileStub() {
// A0: Receiver
// S5: ICData
void StubCodeCompiler::GenerateICCallBreakpointStub() {
void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2700,7 +2733,8 @@ void StubCodeCompiler::GenerateICCallBreakpointStub() {
}
// S5: ICData
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2717,7 +2751,7 @@ void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2733,7 +2767,7 @@ void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
}
// Called only from unoptimized code. All relevant registers have been saved.
void StubCodeCompiler::GenerateDebugStepCheckStub() {
void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2962,26 +2996,26 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 3);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 5);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 7);
}
void StubCodeCompiler::GenerateGetCStackPointerStub() {
void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
__ mv(A0, SP);
__ ret();
}
@ -2995,7 +3029,7 @@ void StubCodeCompiler::GenerateGetCStackPointerStub() {
// Does not return.
//
// Notice: We need to keep this in sync with `Simulator::JumpToFrame()`.
void StubCodeCompiler::GenerateJumpToFrameStub() {
void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
ASSERT(kExceptionObjectReg == A0);
ASSERT(kStackTraceObjectReg == A1);
__ mv(CALLEE_SAVED_TEMP, A0); // Program counter.
@ -3044,7 +3078,7 @@ void StubCodeCompiler::GenerateJumpToFrameStub() {
//
// The arguments are stored in the Thread object.
// Does not return.
void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
// Exception object.
ASSERT(kExceptionObjectReg == A0);
__ LoadFromOffset(A0, THR, target::Thread::active_exception_offset());
@ -3062,7 +3096,7 @@ void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateDeoptForRewindStub() {
void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Push zap value instead of CODE_REG.
__ LoadImmediate(TMP, kZapCodeReg);
__ PushRegister(TMP);
@ -3081,7 +3115,7 @@ void StubCodeCompiler::GenerateDeoptForRewindStub() {
// Calls to the runtime to optimize the given function.
// A0: function to be re-optimized.
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub() {
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
__ EnterStubFrame();
@ -3169,7 +3203,8 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
// SP + 4: left operand.
// SP + 0: right operand.
// Return TMP set to 0 if equal.
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
@ -3202,7 +3237,8 @@ void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
// SP + 4: left operand.
// SP + 0: right operand.
// Return TMP set to 0 if equal.
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
const Register left = A0;
const Register right = A1;
__ LoadFromOffset(left, SP, 1 * target::kWordSize);
@ -3218,7 +3254,7 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
// FUNCTION_REG: target function
// CODE_REG: target Code
// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub() {
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
__ BranchIfSmi(A0, &smi_case);
@ -3290,13 +3326,13 @@ void StubCodeCompiler::GenerateMegamorphicCallStub() {
__ j(&cid_loaded);
__ Bind(&miss);
GenerateSwitchableCallMissStub();
GenerateSwitchableCallMissStub(assembler);
}
// Input:
// A0 - receiver
// IC_DATA_REG - icdata
void StubCodeCompiler::GenerateICCallThroughCodeStub() {
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
__ lx(T1, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ lx(ARGS_DESC_REG,
@ -3347,7 +3383,8 @@ void StubCodeCompiler::GenerateICCallThroughCodeStub() {
// S5: MonomorphicSmiableCall object
//
// T1,T2: clobbered
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
Assembler* assembler) {
Label miss;
__ LoadClassIdMayBeSmi(T1, A0);
@ -3368,7 +3405,7 @@ void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
// Called from switchable IC calls.
// A0: receiver
void StubCodeCompiler::GenerateSwitchableCallMissStub() {
void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
__ lx(CODE_REG,
Address(THR, target::Thread::switchable_call_miss_stub_offset()));
__ EnterStubFrame();
@ -3393,7 +3430,7 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub() {
// S5: SingleTargetCache
// Passed to target:
// CODE_REG: target Code object
void StubCodeCompiler::GenerateSingleTargetCallStub() {
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
Label miss;
__ LoadClassIdMayBeSmi(A1, A0);
__ lhu(T2, FieldAddress(S5, target::SingleTargetCache::lower_limit_offset()));
@ -3441,7 +3478,8 @@ static int GetScaleFactor(intptr_t size) {
return -1;
}
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid) {
const intptr_t element_size = TypedDataElementSizeInBytes(cid);
const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
const intptr_t scale_shift = GetScaleFactor(element_size);

View file

@ -37,7 +37,8 @@ namespace compiler {
//
// WARNING: This might clobber all registers except for [RAX], [THR] and [FP].
// The caller should simply call LeaveStubFrame() and return.
void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
void StubCodeCompiler::EnsureIsNewOrRemembered(Assembler* assembler,
bool preserve_registers) {
// If the object is not remembered we call a leaf-runtime to add it to the
// remembered set.
Label done;
@ -67,7 +68,8 @@ void StubCodeCompiler::EnsureIsNewOrRemembered(bool preserve_registers) {
// The callback [fun] may be invoked with a modified [RSP] due to allocating
// a [jmp_buf] allocating structure on the stack (as well as the saved old
// [Thread::tsan_utils_->setjmp_buffer_]).
static void WithExceptionCatchingTrampoline(std::function<void()> fun) {
static void WithExceptionCatchingTrampoline(Assembler* assembler,
std::function<void()> fun) {
#if defined(USING_THREAD_SANITIZER) && !defined(USING_SIMULATOR)
const Register kTsanUtilsReg = RAX;
@ -157,7 +159,7 @@ static void WithExceptionCatchingTrampoline(std::function<void()> fun) {
// RBX : address of the runtime function to call.
// R10 : number of arguments to the call.
// Must preserve callee saved registers R12 and R13.
void StubCodeCompiler::GenerateCallToRuntimeStub() {
void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
const intptr_t thread_offset = target::NativeArguments::thread_offset();
const intptr_t argc_tag_offset = target::NativeArguments::argc_tag_offset();
const intptr_t argv_offset = target::NativeArguments::argv_offset();
@ -190,7 +192,7 @@ void StubCodeCompiler::GenerateCallToRuntimeStub() {
// Mark that the thread is executing VM code.
__ movq(Assembler::VMTagAddress(), RBX);
WithExceptionCatchingTrampoline([&]() {
WithExceptionCatchingTrampoline(assembler, [&]() {
// Reserve space for arguments and align frame before entering C++ world.
__ subq(RSP, Immediate(target::NativeArguments::StructSize()));
if (OS::ActivationFrameAlignment() > 1) {
@ -251,6 +253,7 @@ void StubCodeCompiler::GenerateCallToRuntimeStub() {
}
void StubCodeCompiler::GenerateSharedStubGeneric(
Assembler* assembler,
bool save_fpu_registers,
intptr_t self_code_stub_offset_from_thread,
bool allow_return,
@ -288,6 +291,7 @@ void StubCodeCompiler::GenerateSharedStubGeneric(
}
void StubCodeCompiler::GenerateSharedStub(
Assembler* assembler,
bool save_fpu_registers,
const RuntimeEntry* target,
intptr_t self_code_stub_offset_from_thread,
@ -306,12 +310,12 @@ void StubCodeCompiler::GenerateSharedStub(
RAX);
}
};
GenerateSharedStubGeneric(save_fpu_registers,
GenerateSharedStubGeneric(assembler, save_fpu_registers,
self_code_stub_offset_from_thread, allow_return,
perform_runtime_call);
}
void StubCodeCompiler::GenerateEnterSafepointStub() {
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers);
@ -349,12 +353,13 @@ static void GenerateExitSafepointStubCommon(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateExitSafepointStub() {
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
GenerateExitSafepointStubCommon(
assembler, kExitSafepointRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub(
Assembler* assembler) {
GenerateExitSafepointStubCommon(
assembler,
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
@ -369,7 +374,8 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
// On exit:
// Stack pointer lowered by shadow space
// RBX, R12 clobbered
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
Assembler* assembler) {
__ movq(R12, compiler::Immediate(target::Thread::exit_through_ffi()));
__ TransitionGeneratedToNative(RBX, FPREG, R12,
/*enter_safepoint=*/true);
@ -390,6 +396,7 @@ static const RegisterSet kArgumentRegisterSet(
CallingConventions::kFpuArgumentRegisters);
void StubCodeCompiler::GenerateJITCallbackTrampolines(
Assembler* assembler,
intptr_t next_callback_id) {
Label done;
@ -504,6 +511,7 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// RBX: The extracted method.
// RDX: The type_arguments_field_offset (or 0)
void StubCodeCompiler::GenerateBuildMethodExtractorStub(
Assembler* assembler,
const Code& closure_allocation_stub,
const Code& context_allocation_stub,
bool generic) {
@ -594,7 +602,8 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
__ Ret();
}
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
void StubCodeCompiler::GenerateDispatchTableNullErrorStub(
Assembler* assembler) {
__ EnterStubFrame();
__ SmiTag(DispatchTableNullErrorABI::kClassIdReg);
__ PushRegister(DispatchTableNullErrorABI::kClassIdReg);
@ -603,7 +612,8 @@ void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
__ Breakpoint();
}
void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateRangeError(Assembler* assembler,
bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
// If the generated code has unboxed index/length we need to box them before
// calling the runtime entry.
@ -654,21 +664,22 @@ void StubCodeCompiler::GenerateRangeError(bool with_fpu_regs) {
};
GenerateSharedStubGeneric(
/*save_fpu_registers=*/with_fpu_regs,
assembler, /*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::range_error_shared_with_fpu_regs_stub_offset()
: target::Thread::range_error_shared_without_fpu_regs_stub_offset(),
/*allow_return=*/false, perform_runtime_call);
}
void StubCodeCompiler::GenerateWriteError(bool with_fpu_regs) {
void StubCodeCompiler::GenerateWriteError(Assembler* assembler,
bool with_fpu_regs) {
auto perform_runtime_call = [&]() {
__ CallRuntime(kWriteErrorRuntimeEntry, /*argument_count=*/0);
__ Breakpoint();
};
GenerateSharedStubGeneric(
/*save_fpu_registers=*/with_fpu_regs,
assembler, /*save_fpu_registers=*/with_fpu_regs,
with_fpu_regs
? target::Thread::write_error_shared_with_fpu_regs_stub_offset()
: target::Thread::write_error_shared_without_fpu_regs_stub_offset(),
@ -718,7 +729,7 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
// Mark that the thread is executing native code.
__ movq(Assembler::VMTagAddress(), RBX);
WithExceptionCatchingTrampoline([&]() {
WithExceptionCatchingTrampoline(assembler, [&]() {
// Reserve space for the native arguments structure passed on the stack (the
// outgoing pointer parameter to the native arguments structure is passed in
// RDI) and align frame before entering the C++ world.
@ -770,14 +781,14 @@ static void GenerateCallNativeWithWrapperStub(Assembler* assembler,
__ ret();
}
void StubCodeCompiler::GenerateCallNoScopeNativeStub() {
void StubCodeCompiler::GenerateCallNoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
target::Thread::no_scope_native_wrapper_entry_point_offset()));
}
void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
void StubCodeCompiler::GenerateCallAutoScopeNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -790,7 +801,7 @@ void StubCodeCompiler::GenerateCallAutoScopeNativeStub() {
// RAX : address of first argument in argument array.
// RBX : address of the native function to call.
// R10 : argc_tag including number of arguments and function kind.
void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
GenerateCallNativeWithWrapperStub(
assembler,
Address(THR,
@ -799,7 +810,7 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub() {
// Input parameters:
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub() {
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
// Setup space on stack for return value.
@ -817,7 +828,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub() {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub() {
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -863,7 +874,8 @@ void StubCodeCompiler::GenerateFixCallersTargetStub() {
// Called from object allocate instruction when the allocation stub has been
// disabled.
void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
void StubCodeCompiler::GenerateFixAllocationStubTargetStub(
Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -882,7 +894,8 @@ void StubCodeCompiler::GenerateFixAllocationStubTargetStub() {
// Called from object allocate instruction when the allocation stub for a
// generic class has been disabled.
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub() {
void StubCodeCompiler::GenerateFixParameterizedAllocationStubTargetStub(
Assembler* assembler) {
// Load code pointer to this stub from the thread:
// The one that is passed in, is not correct - it points to the code object
// that needs to be replaced.
@ -1100,7 +1113,8 @@ static void GenerateDeoptimizationSequence(Assembler* assembler,
}
// RAX: result, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub(
Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ pushq(Immediate(kZapCodeReg));
// Return address for "call" to deopt stub.
@ -1113,7 +1127,8 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromReturnStub() {
// RAX: exception, must be preserved
// RDX: stacktrace, must be preserved
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub(
Assembler* assembler) {
// Push zap value instead of CODE_REG for lazy deopt.
__ pushq(Immediate(kZapCodeReg));
// Return address for "call" to deopt stub.
@ -1124,7 +1139,7 @@ void StubCodeCompiler::GenerateDeoptimizeLazyFromThrowStub() {
__ ret();
}
void StubCodeCompiler::GenerateDeoptimizeStub() {
void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
__ popq(TMP);
__ pushq(CODE_REG);
__ pushq(TMP);
@ -1189,7 +1204,8 @@ static void GenerateDispatcherCode(Assembler* assembler,
// Input:
// IC_DATA_REG - icdata/megamorphic_cache
// RDX - receiver
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
__ EnterStubFrame();
__ movq(ARGS_DESC_REG,
@ -1209,7 +1225,7 @@ void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub() {
// AllocateArrayABI::kResultReg: newly allocated array.
// Clobbered:
// RCX, RDI, R12
void StubCodeCompiler::GenerateAllocateArrayStub() {
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
@ -1339,13 +1355,14 @@ void StubCodeCompiler::GenerateAllocateArrayStub() {
// Write-barrier elimination might be enabled for this array (depending on the
// array length). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered();
EnsureIsNewOrRemembered(assembler);
__ LeaveStubFrame();
__ ret();
}
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1357,13 +1374,15 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithFPURegsStub() {
}
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(/*save_fpu_registers=*/true, &kAllocateMintRuntimeEntry,
GenerateSharedStub(assembler, /*save_fpu_registers=*/true,
&kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_with_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
}
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub(
Assembler* assembler) {
// For test purpose call allocation stub without inline allocation attempt.
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1376,7 +1395,7 @@ void StubCodeCompiler::GenerateAllocateMintSharedWithoutFPURegsStub() {
COMPILE_ASSERT(AllocateMintABI::kResultReg ==
SharedSlowPathStubABI::kResultReg);
GenerateSharedStub(
/*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
assembler, /*save_fpu_registers=*/false, &kAllocateMintRuntimeEntry,
target::Thread::allocate_mint_without_fpu_regs_stub_offset(),
/*allow_return=*/true,
/*store_runtime_result_in_result_register=*/true);
@ -1393,7 +1412,7 @@ static const RegisterSet kCalleeSavedRegisterSet(
// RSI : arguments descriptor array.
// RDX : arguments array.
// RCX : current thread.
void StubCodeCompiler::GenerateInvokeDartCodeStub() {
void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ EnterFrame(0);
const Register kTargetReg = CallingConventions::kArg1Reg;
@ -1624,7 +1643,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// RAX: new allocated Context object.
// Clobbered:
// R9, R13
void StubCodeCompiler::GenerateAllocateContextStub() {
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
__ LoadObject(R9, NullObject());
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1677,7 +1696,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// RAX: new object
// Restore the frame pointer.
@ -1693,7 +1712,7 @@ void StubCodeCompiler::GenerateAllocateContextStub() {
// RAX: new allocated Context object.
// Clobbered:
// R10, R13
void StubCodeCompiler::GenerateCloneContextStub() {
void StubCodeCompiler::GenerateCloneContextStub(Assembler* assembler) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
@ -1750,7 +1769,7 @@ void StubCodeCompiler::GenerateCloneContextStub() {
// Write-barrier elimination might be enabled for this context (depending on
// the size). To be sure we will check if the allocated object is in old
// space and if so call a leaf runtime to add it to the remembered set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// RAX: new object
// Restore the frame pointer.
@ -1759,7 +1778,7 @@ void StubCodeCompiler::GenerateCloneContextStub() {
__ ret();
}
void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
void StubCodeCompiler::GenerateWriteBarrierWrappersStub(Assembler* assembler) {
for (intptr_t i = 0; i < kNumberOfCpuRegisters; ++i) {
if ((kDartAvailableCpuRegs & (1 << i)) == 0) continue;
@ -1786,7 +1805,8 @@ void StubCodeCompiler::GenerateWriteBarrierWrappersStub() {
COMPILE_ASSERT(kWriteBarrierObjectReg == RDX);
COMPILE_ASSERT(kWriteBarrierValueReg == RAX);
COMPILE_ASSERT(kWriteBarrierSlotReg == R13);
static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
static void GenerateWriteBarrierStubHelper(Assembler* assembler,
bool cards) {
Label add_to_mark_stack, remember_card, lost_race;
__ testq(RAX, Immediate(1 << target::ObjectAlignment::kNewObjectBitPosition));
__ j(ZERO, &add_to_mark_stack);
@ -1936,11 +1956,11 @@ static void GenerateWriteBarrierStubHelper(Assembler* assembler, bool cards) {
}
}
void StubCodeCompiler::GenerateWriteBarrierStub() {
void StubCodeCompiler::GenerateWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(assembler, false);
}
void StubCodeCompiler::GenerateArrayWriteBarrierStub() {
void StubCodeCompiler::GenerateArrayWriteBarrierStub(Assembler* assembler) {
GenerateWriteBarrierStubHelper(assembler, true);
}
@ -2046,15 +2066,16 @@ static void GenerateAllocateObjectHelper(Assembler* assembler,
}
// Called for inline allocation of objects (any class).
void StubCodeCompiler::GenerateAllocateObjectStub() {
void StubCodeCompiler::GenerateAllocateObjectStub(Assembler* assembler) {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/false);
}
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub() {
void StubCodeCompiler::GenerateAllocateObjectParameterizedStub(
Assembler* assembler) {
GenerateAllocateObjectHelper(assembler, /*is_cls_parameterized=*/true);
}
void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
void StubCodeCompiler::GenerateAllocateObjectSlowStub(Assembler* assembler) {
if (!FLAG_precompiled_mode) {
__ movq(CODE_REG,
Address(THR, target::Thread::call_to_runtime_stub_offset()));
@ -2086,7 +2107,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
// Write-barrier elimination is enabled for [cls] and we therefore need to
// ensure that the object is in new-space or has remembered bit set.
EnsureIsNewOrRemembered(/*preserve_registers=*/false);
EnsureIsNewOrRemembered(assembler, /*preserve_registers=*/false);
// AllocateObjectABI::kResultReg: new object
// Restore the frame pointer.
@ -2097,6 +2118,7 @@ void StubCodeCompiler::GenerateAllocateObjectSlowStub() {
// Called for inline allocation of objects.
void StubCodeCompiler::GenerateAllocationStubForClass(
Assembler* assembler,
UnresolvedPcRelativeCalls* unresolved_calls,
const Class& cls,
const Code& allocate_object,
@ -2165,7 +2187,8 @@ void StubCodeCompiler::GenerateAllocationStubForClass(
// RSP : points to return address.
// RSP + 8 : address of last argument.
// R10 : arguments descriptor array.
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub(
Assembler* assembler) {
__ EnterStubFrame();
// Load the receiver.
@ -2206,7 +2229,8 @@ void StubCodeCompiler::GenerateCallClosureNoSuchMethodStub() {
// Cannot use function object from ICData as it may be the inlined
// function and not the top-scope function.
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
Assembler* assembler) {
if (FLAG_precompiled_mode) {
__ Breakpoint();
return;
@ -2230,7 +2254,8 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement() {
}
// Loads function into 'temp_reg', preserves IC_DATA_REG.
void StubCodeCompiler::GenerateUsageCounterIncrement(Register temp_reg) {
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
if (FLAG_precompiled_mode) {
__ Breakpoint();
return;
@ -2347,6 +2372,7 @@ static void GenerateRecordEntryPoint(Assembler* assembler) {
// - Match found -> jump to target.
// - Match not found -> jump to IC miss.
void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
Assembler* assembler,
intptr_t num_args,
const RuntimeEntry& handle_ic_miss,
Token::Kind kind,
@ -2364,9 +2390,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
}
if (optimized == kOptimized) {
GenerateOptimizedUsageCounterIncrement();
GenerateOptimizedUsageCounterIncrement(assembler);
} else {
GenerateUsageCounterIncrement(/* scratch */ RCX);
GenerateUsageCounterIncrement(assembler, /* scratch */ RCX);
}
ASSERT(num_args == 1 || num_args == 2);
@ -2622,65 +2648,69 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub() {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub() {
void StubCodeCompiler::GenerateOneArgCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kCheckExactness);
}
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub() {
void StubCodeCompiler::GenerateTwoArgsCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateSmiAddInlineCacheStub() {
void StubCodeCompiler::GenerateSmiAddInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateSmiLessInlineCacheStub() {
void StubCodeCompiler::GenerateSmiLessInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kLT,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub() {
void StubCodeCompiler::GenerateSmiEqualInlineCacheStub(Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ, kUnoptimized,
kInstanceCall, kIgnoreExactness);
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ,
kUnoptimized, kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
// RBX: ICData
// RDI: Function
// RSP[0]: return address
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kIgnoreExactness);
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// RDX: receiver
@ -2688,27 +2718,30 @@ void StubCodeCompiler::GenerateOneArgOptimizedCheckInlineCacheStub() {
// RDI: Function
// RSP[0]: return address
void StubCodeCompiler::
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub() {
GenerateOneArgOptimizedCheckInlineCacheWithExactnessCheckStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL, kOptimized,
kInstanceCall, kCheckExactness);
assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kCheckExactness);
}
// RDX: receiver
// RBX: ICData
// RDI: Function
// RSP[0]: return address
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub() {
void StubCodeCompiler::GenerateTwoArgsOptimizedCheckInlineCacheStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kOptimized, kInstanceCall, kIgnoreExactness);
}
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateRecordEntryPoint(assembler);
GenerateUsageCounterIncrement(/* scratch */ RCX);
GenerateUsageCounterIncrement(assembler, /* scratch */ RCX);
#if defined(DEBUG)
{
Label ok;
@ -2785,24 +2818,26 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub() {
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub() {
GenerateNArgsCheckInlineCacheStub(1, kStaticCallMissHandlerOneArgRuntimeEntry,
Token::kILLEGAL, kUnoptimized, kStaticCall,
kIgnoreExactness);
void StubCodeCompiler::GenerateOneArgUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// RBX: ICData
// RSP[0]: return address
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub() {
void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
Assembler* assembler) {
GenerateNArgsCheckInlineCacheStub(
2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL,
kUnoptimized, kStaticCall, kIgnoreExactness);
}
// Stub for compiling a function and jumping to the compiled code.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub() {
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushq(FUNCTION_REG); // Pass function.
@ -2820,7 +2855,7 @@ void StubCodeCompiler::GenerateLazyCompileStub() {
// RBX: Contains an ICData.
// TOS(0): return address (Dart code).
void StubCodeCompiler::GenerateICCallBreakpointStub() {
void StubCodeCompiler::GenerateICCallBreakpointStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2839,7 +2874,8 @@ void StubCodeCompiler::GenerateICCallBreakpointStub() {
#endif // defined(PRODUCT)
}
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub(
Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2857,7 +2893,7 @@ void StubCodeCompiler::GenerateUnoptStaticCallBreakpointStub() {
}
// TOS(0): return address (Dart code).
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
void StubCodeCompiler::GenerateRuntimeCallBreakpointStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -2873,7 +2909,7 @@ void StubCodeCompiler::GenerateRuntimeCallBreakpointStub() {
}
// Called only from unoptimized code.
void StubCodeCompiler::GenerateDebugStepCheckStub() {
void StubCodeCompiler::GenerateDebugStepCheckStub(Assembler* assembler) {
#if defined(PRODUCT)
__ Stop("No debugging in PRODUCT mode");
#else
@ -3099,22 +3135,22 @@ static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) {
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype1TestCacheStub() {
void StubCodeCompiler::GenerateSubtype1TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 1);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype3TestCacheStub() {
void StubCodeCompiler::GenerateSubtype3TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 3);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype5TestCacheStub() {
void StubCodeCompiler::GenerateSubtype5TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 5);
}
// See comment on [GenerateSubtypeNTestCacheStub].
void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
void StubCodeCompiler::GenerateSubtype7TestCacheStub(Assembler* assembler) {
GenerateSubtypeNTestCacheStub(assembler, 7);
}
@ -3122,7 +3158,7 @@ void StubCodeCompiler::GenerateSubtype7TestCacheStub() {
// checks.
// TOS + 0: return address
// Result in RAX.
void StubCodeCompiler::GenerateGetCStackPointerStub() {
void StubCodeCompiler::GenerateGetCStackPointerStub(Assembler* assembler) {
__ leaq(RAX, Address(RSP, target::kWordSize));
__ ret();
}
@ -3134,7 +3170,7 @@ void StubCodeCompiler::GenerateGetCStackPointerStub() {
// Arg3: frame_pointer
// Arg4: thread
// No Result.
void StubCodeCompiler::GenerateJumpToFrameStub() {
void StubCodeCompiler::GenerateJumpToFrameStub(Assembler* assembler) {
__ movq(THR, CallingConventions::kArg4Reg);
__ movq(RBP, CallingConventions::kArg3Reg);
__ movq(RSP, CallingConventions::kArg2Reg);
@ -3175,7 +3211,7 @@ void StubCodeCompiler::GenerateJumpToFrameStub() {
//
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
void StubCodeCompiler::GenerateRunExceptionHandlerStub(Assembler* assembler) {
ASSERT(kExceptionObjectReg == RAX);
ASSERT(kStackTraceObjectReg == RDX);
__ movq(CallingConventions::kArg1Reg,
@ -3202,7 +3238,7 @@ void StubCodeCompiler::GenerateRunExceptionHandlerStub() {
// Deoptimize a frame on the call stack before rewinding.
// The arguments are stored in the Thread object.
// No result.
void StubCodeCompiler::GenerateDeoptForRewindStub() {
void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Push zap value instead of CODE_REG.
__ pushq(Immediate(kZapCodeReg));
@ -3223,7 +3259,7 @@ void StubCodeCompiler::GenerateDeoptForRewindStub() {
// Calls to the runtime to optimize the given function.
// RDI: function to be reoptimized.
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub() {
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ movq(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
__ pushq(ARGS_DESC_REG); // Preserve args descriptor.
@ -3287,7 +3323,8 @@ static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler,
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
#if !defined(PRODUCT)
// Check single stepping.
Label stepping, done_stepping;
@ -3321,7 +3358,8 @@ void StubCodeCompiler::GenerateUnoptimizedIdenticalWithNumberCheckStub() {
// TOS + 1: right argument.
// TOS + 2: left argument.
// Returns ZF set.
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
Assembler* assembler) {
const Register left = RAX;
const Register right = RDX;
@ -3338,7 +3376,7 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub() {
// FUNCTION_REG: target function
// CODE_REG: target Code
// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub() {
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
__ testq(RDX, Immediate(kSmiTagMask));
@ -3413,13 +3451,13 @@ void StubCodeCompiler::GenerateMegamorphicCallStub() {
__ jmp(&cid_loaded);
__ Bind(&miss);
GenerateSwitchableCallMissStub();
GenerateSwitchableCallMissStub(assembler);
}
// Input:
// IC_DATA_REG - icdata
// RDX - receiver object
void StubCodeCompiler::GenerateICCallThroughCodeStub() {
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
__ movq(R13, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ movq(ARGS_DESC_REG,
@ -3465,7 +3503,8 @@ void StubCodeCompiler::GenerateICCallThroughCodeStub() {
__ jmp(RCX);
}
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub(
Assembler* assembler) {
Label have_cid, miss;
__ movq(RAX, Immediate(kSmiCid));
@ -3488,7 +3527,7 @@ void StubCodeCompiler::GenerateMonomorphicSmiableCheckStub() {
// Called from switchable IC calls.
// RDX: receiver
void StubCodeCompiler::GenerateSwitchableCallMissStub() {
void StubCodeCompiler::GenerateSwitchableCallMissStub(Assembler* assembler) {
__ movq(CODE_REG,
Address(THR, target::Thread::switchable_call_miss_stub_offset()));
__ EnterStubFrame();
@ -3515,7 +3554,7 @@ void StubCodeCompiler::GenerateSwitchableCallMissStub() {
// RBX: SingleTargetCache
// Passed to target::
// CODE_REG: target Code object
void StubCodeCompiler::GenerateSingleTargetCallStub() {
void StubCodeCompiler::GenerateSingleTargetCallStub(Assembler* assembler) {
Label miss;
__ LoadClassIdMayBeSmi(RAX, RDX);
__ movzxw(R9,
@ -3569,7 +3608,8 @@ static ScaleFactor GetScaleFactor(intptr_t size) {
return static_cast<ScaleFactor>(0);
}
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(intptr_t cid) {
void StubCodeCompiler::GenerateAllocateTypedDataArrayStub(Assembler* assembler,
intptr_t cid) {
const intptr_t element_size = TypedDataElementSizeInBytes(cid);
const intptr_t max_len = TypedDataMaxNewSpaceElements(cid);
ScaleFactor scale_factor = GetScaleFactor(element_size);

View file

@ -60,8 +60,8 @@ void NativeCallbackTrampolines::AllocateTrampoline() {
trampoline_pages_.Add(memory);
compiler::Assembler assembler(/*object_pool_builder=*/nullptr);
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
stubCodeCompiler.GenerateJITCallbackTrampolines(next_callback_id_);
compiler::StubCodeCompiler::GenerateJITCallbackTrampolines(
&assembler, next_callback_id_);
MemoryRegion region(memory->address(), memory->size());
assembler.FinalizeInstructions(region);

View file

@ -8193,16 +8193,10 @@ static CodePtr CreateInvokeInstantiateTypeArgumentsStub(Thread* thread) {
zone, Function::New(signature, symbol, UntaggedFunction::kRegularFunction,
false, false, false, false, false, klass,
TokenPosition::kNoSource));
compiler::ObjectPoolBuilder pool_builder;
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
compiler::Assembler assembler(&pool_builder);
GenerateInvokeInstantiateTAVStub(&assembler);
const Code& invoke_instantiate_tav = Code::Handle(
Code::FinalizeCodeAndNotify("InstantiateTAV", nullptr, &assembler,
Code::PoolAttachment::kNotAttachPool,
/*optimized=*/false));
const auto& invoke_instantiate_tav =
Code::Handle(zone, StubCode::Generate("InstantiateTAV", &pool_builder,
&GenerateInvokeInstantiateTAVStub));
const auto& pool =
ObjectPool::Handle(zone, ObjectPool::NewFromBuilder(pool_builder));
invoke_instantiate_tav.set_object_pool(pool.ptr());

View file

@ -29,7 +29,7 @@ StubCode::StubCodeEntry StubCode::entries_[kNumStubEntries] = {
#define STUB_CODE_DECLARE(name) {nullptr, #name},
#else
#define STUB_CODE_DECLARE(name) \
{nullptr, #name, &compiler::StubCodeCompiler::Generate##name##Stub},
{nullptr, #name, compiler::StubCodeCompiler::Generate##name##Stub},
#endif
VM_STUB_CODE_LIST(STUB_CODE_DECLARE)
#undef STUB_CODE_DECLARE
@ -91,15 +91,15 @@ void StubCode::Init() {
#undef STUB_CODE_GENERATE
#undef STUB_CODE_SET_OBJECT_POOL
CodePtr StubCode::Generate(const char* name,
compiler::ObjectPoolBuilder* object_pool_builder,
void (compiler::StubCodeCompiler::*GenerateStub)()) {
CodePtr StubCode::Generate(
const char* name,
compiler::ObjectPoolBuilder* object_pool_builder,
void (*GenerateStub)(compiler::Assembler* assembler)) {
auto thread = Thread::Current();
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
compiler::Assembler assembler(object_pool_builder);
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
(stubCodeCompiler.*GenerateStub)();
GenerateStub(&assembler);
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
name, nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
/*optimized=*/false));
@ -221,9 +221,8 @@ CodePtr StubCode::GetAllocationStubForClass(const Class& cls) {
compiler::Assembler assembler(wrapper);
compiler::UnresolvedPcRelativeCalls unresolved_calls;
const char* name = cls.ToCString();
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
stubCodeCompiler.GenerateAllocationStubForClass(
&unresolved_calls, cls, allocate_object_stub,
compiler::StubCodeCompiler::GenerateAllocationStubForClass(
&assembler, &unresolved_calls, cls, allocate_object_stub,
allocate_object_parametrized_stub);
const auto& static_calls_table =
@ -317,9 +316,8 @@ CodePtr StubCode::GetBuildMethodExtractorStub(compiler::ObjectPoolBuilder* pool,
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(pool != nullptr ? pool : &object_pool_builder);
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
stubCodeCompiler.GenerateBuildMethodExtractorStub(
closure_allocation_stub, context_allocation_stub, generic);
compiler::StubCodeCompiler::GenerateBuildMethodExtractorStub(
&assembler, closure_allocation_stub, context_allocation_stub, generic);
const char* name = generic ? "BuildGenericMethodExtractor"
: "BuildNonGenericMethodExtractor";

View file

@ -83,7 +83,7 @@ class StubCode : public AllStatic {
// code executable area.
static CodePtr Generate(const char* name,
compiler::ObjectPoolBuilder* object_pool_builder,
void (compiler::StubCodeCompiler::*GenerateStub)());
void (*GenerateStub)(compiler::Assembler* assembler));
#endif // !defined(DART_PRECOMPILED_RUNTIME)
static const Code& UnoptimizedStaticCallEntry(intptr_t num_args_tested);
@ -104,7 +104,7 @@ class StubCode : public AllStatic {
compiler::ObjectPoolBuilder* opw) { \
return StubCode::Generate( \
"_iso_stub_" #name, opw, \
&compiler::StubCodeCompiler::Generate##name##Stub); \
compiler::StubCodeCompiler::Generate##name##Stub); \
}
VM_STUB_CODE_LIST(GENERATE_STUB);
#undef GENERATE_STUB
@ -127,7 +127,7 @@ class StubCode : public AllStatic {
Code* code;
const char* name;
#if !defined(DART_PRECOMPILED_RUNTIME)
void (compiler::StubCodeCompiler::*generator)();
void (*generator)(compiler::Assembler* assembler);
#endif
};
static StubCodeEntry entries_[kNumStubEntries];

View file

@ -479,15 +479,10 @@ class TTSTestState : public ValueObject {
zone, Function::New(
signature, symbol, UntaggedFunction::kRegularFunction, false,
false, false, false, false, klass, TokenPosition::kNoSource));
compiler::ObjectPoolBuilder pool_builder;
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
compiler::Assembler assembler(&pool_builder);
GenerateInvokeTTSStub(&assembler);
const Code& invoke_tts = Code::Handle(Code::FinalizeCodeAndNotify(
"InvokeTTS", nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
/*optimized=*/false));
const auto& invoke_tts = Code::Handle(
zone,
StubCode::Generate("InvokeTTS", &pool_builder, &GenerateInvokeTTSStub));
const auto& pool =
ObjectPool::Handle(zone, ObjectPool::NewFromBuilder(pool_builder));
invoke_tts.set_object_pool(pool.ptr());