[vm] Introduce FUNCTION_REG constant

This refactoring introduces FUNCTION_REG constant for a register
containing target function object when calling Dart functions in JIT
mode. This register is similar to CODE_REG and ARGS_DESC_REG
registers which are also a part of Dart calling conventions.

Hardcoded registers are replaced with new constant where
appropriate. Also, ARGS_DESC_REG and IC_DATA_REG are used instead of
hardcoded registers in more places.

TEST=ci (pure refactoring)

Change-Id: I9e71022d7bca8d4e555b9e4f22558f388073495f
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/243681
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Alexander Markov <alexmarkov@google.com>
This commit is contained in:
Alexander Markov 2022-05-04 22:59:24 +00:00 committed by Commit Bot
parent 71b41199a5
commit c3e0d770dd
25 changed files with 492 additions and 415 deletions

View file

@ -1863,16 +1863,16 @@ void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
__ LoadClassId(R1, R1);
__ AddImmediate(R1, -kOneByteStringCid);
__ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2));
__ ldr(R0, FieldAddress(R1, target::RegExp::function_offset(kOneByteStringCid,
sticky)));
__ ldr(FUNCTION_REG, FieldAddress(R1, target::RegExp::function_offset(
kOneByteStringCid, sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in R0, the argument descriptor in R4, and IC-Data in R9.
__ eor(R9, R9, Operand(R9));
// Tail-call the function.
__ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
__ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
__ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,

View file

@ -2105,7 +2105,8 @@ void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
#else
__ add(R1, R2, Operand(R1, LSL, target::kWordSizeLog2 - 1));
#endif
__ LoadCompressed(R0, FieldAddress(R1, target::RegExp::function_offset(
__ LoadCompressed(FUNCTION_REG,
FieldAddress(R1, target::RegExp::function_offset(
kOneByteStringCid, sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
@ -2113,9 +2114,10 @@ void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
__ eor(R5, R5, Operand(R5));
// Tail-call the function.
__ LoadCompressed(CODE_REG,
FieldAddress(R0, target::Function::code_offset()));
__ ldr(R1, FieldAddress(R0, target::Function::entry_point_offset()));
__ LoadCompressed(
CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ ldr(R1,
FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ br(R1);
}

View file

@ -1888,16 +1888,16 @@ void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
__ movl(EDI, Address(ESP, kStringParamOffset));
__ LoadClassId(EDI, EDI);
__ SubImmediate(EDI, Immediate(kOneByteStringCid));
__ movl(EAX, FieldAddress(
EBX, EDI, TIMES_4,
target::RegExp::function_offset(kOneByteStringCid, sticky)));
__ movl(FUNCTION_REG, FieldAddress(EBX, EDI, TIMES_4,
target::RegExp::function_offset(
kOneByteStringCid, sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in EAX, the argument descriptor in EDX, and IC-Data in ECX.
__ xorl(ECX, ECX);
// Tail-call the function.
__ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
__ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
void AsmIntrinsifier::UserTag_defaultTag(Assembler* assembler,

View file

@ -1770,16 +1770,16 @@ void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
__ AddImmediate(T1, -kOneByteStringCid);
__ slli(T1, T1, target::kWordSizeLog2);
__ add(T1, T1, T2);
__ lx(T0, FieldAddress(T1, target::RegExp::function_offset(kOneByteStringCid,
sticky)));
__ lx(FUNCTION_REG, FieldAddress(T1, target::RegExp::function_offset(
kOneByteStringCid, sticky)));
// Registers are now set up for the lazy compile stub. It expects the function
// in T0, the argument descriptor in S4, and IC-Data in S5.
__ li(S5, 0);
// Tail-call the function.
__ lx(CODE_REG, FieldAddress(T0, target::Function::code_offset()));
__ lx(T1, FieldAddress(T0, target::Function::entry_point_offset()));
__ lx(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ lx(T1, FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ jr(T1);
}

View file

@ -1976,11 +1976,11 @@ void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
__ LoadClassId(RDI, RDI);
__ SubImmediate(RDI, Immediate(kOneByteStringCid));
#if !defined(DART_COMPRESSED_POINTERS)
__ movq(RAX, FieldAddress(
RBX, RDI, TIMES_8,
target::RegExp::function_offset(kOneByteStringCid, sticky)));
__ movq(FUNCTION_REG, FieldAddress(RBX, RDI, TIMES_8,
target::RegExp::function_offset(
kOneByteStringCid, sticky)));
#else
__ LoadCompressed(RAX, FieldAddress(RBX, RDI, TIMES_4,
__ LoadCompressed(FUNCTION_REG, FieldAddress(RBX, RDI, TIMES_4,
target::RegExp::function_offset(
kOneByteStringCid, sticky)));
#endif
@ -1990,9 +1990,10 @@ void AsmIntrinsifier::IntrinsifyRegExpExecuteMatch(Assembler* assembler,
__ xorq(RCX, RCX);
// Tail-call the function.
__ LoadCompressed(CODE_REG,
FieldAddress(RAX, target::Function::code_offset()));
__ movq(RDI, FieldAddress(RAX, target::Function::entry_point_offset()));
__ LoadCompressed(
CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ movq(RDI,
FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ jmp(RDI);
}

View file

@ -522,7 +522,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(
__ LoadObject(R8, parsed_function().function());
__ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
__ LoadUniqueObject(R9, ic_data);
__ LoadUniqueObject(IC_DATA_REG, ic_data);
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
@ -539,7 +539,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
entry_kind == Code::EntryKind::kUnchecked);
ASSERT(Array::Handle(zone(), ic_data.arguments_descriptor()).Length() > 0);
__ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
__ LoadUniqueObject(R9, ic_data);
__ LoadUniqueObject(IC_DATA_REG, ic_data);
__ LoadUniqueObject(CODE_REG, stub);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
@ -573,10 +573,10 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
// The AOT runtime will replace the slot in the object pool with the
// entrypoint address - see app_snapshot.cc.
CLOBBERS_LR(__ LoadUniqueObject(LR, StubCode::MegamorphicCall()));
__ LoadUniqueObject(R9, cache);
__ LoadUniqueObject(IC_DATA_REG, cache);
CLOBBERS_LR(__ blx(LR));
} else {
__ LoadUniqueObject(R9, cache);
__ LoadUniqueObject(IC_DATA_REG, cache);
__ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
__ Call(compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
@ -672,10 +672,10 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.PrologueNeedsArgumentsDescriptor()) {
__ LoadObject(R4, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
} else {
if (!FLAG_precompiled_mode) {
__ LoadImmediate(R4, 0); // GC safe smi zero because of stub.
__ LoadImmediate(ARGS_DESC_REG, 0); // GC safe smi zero because of stub.
}
}
// Do not use the code from the function, but let the code be patched so that
@ -815,7 +815,7 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
// Load receiver into R0.
__ LoadFromOffset(
R0, SP, (count_without_type_args - 1) * compiler::target::kWordSize);
__ LoadObject(R4, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,

View file

@ -505,7 +505,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(
__ LoadObject(R6, parsed_function().function());
__ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
__ LoadUniqueObject(R5, ic_data);
__ LoadUniqueObject(IC_DATA_REG, ic_data);
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
@ -529,7 +529,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
const intptr_t stub_index =
op.AddObject(stub, ObjectPool::Patchability::kPatchable);
ASSERT((ic_data_index + 1) == stub_index);
__ LoadDoubleWordFromPoolIndex(R5, CODE_REG, ic_data_index);
__ LoadDoubleWordFromPoolIndex(IC_DATA_REG, CODE_REG, ic_data_index);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
@ -567,9 +567,9 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
if (FLAG_precompiled_mode) {
// The AOT runtime will replace the slot in the object pool with the
// entrypoint address - see app_snapshot.cc.
CLOBBERS_LR(__ LoadDoubleWordFromPoolIndex(R5, LR, data_index));
CLOBBERS_LR(__ LoadDoubleWordFromPoolIndex(IC_DATA_REG, LR, data_index));
} else {
__ LoadDoubleWordFromPoolIndex(R5, CODE_REG, data_index);
__ LoadDoubleWordFromPoolIndex(IC_DATA_REG, CODE_REG, data_index);
CLOBBERS_LR(__ ldr(LR, compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(
Code::EntryKind::kMonomorphic))));
@ -673,10 +673,10 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.PrologueNeedsArgumentsDescriptor()) {
__ LoadObject(R4, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
} else {
if (!FLAG_precompiled_mode) {
__ LoadImmediate(R4, 0); // GC safe smi zero because of stub.
__ LoadImmediate(ARGS_DESC_REG, 0); // GC safe smi zero because of stub.
}
}
// Do not use the code from the function, but let the code be patched so that
@ -823,7 +823,7 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
__ Comment("EmitTestAndCall");
// Load receiver into R0.
__ LoadFromOffset(R0, SP, (count_without_type_args - 1) * kWordSize);
__ LoadObject(R4, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,

View file

@ -555,7 +555,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(
// Load receiver into EBX.
__ movl(EBX, compiler::Address(
ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
__ LoadObject(ECX, ic_data);
__ LoadObject(IC_DATA_REG, ic_data);
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
@ -574,7 +574,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
// Load receiver into EBX.
__ movl(EBX, compiler::Address(
ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
__ LoadObject(ECX, ic_data, true);
__ LoadObject(IC_DATA_REG, ic_data, true);
__ LoadObject(CODE_REG, stub, true);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
@ -602,7 +602,7 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
__ Comment("MegamorphicCall");
// Load receiver into EBX.
__ movl(EBX, compiler::Address(ESP, (args_desc.Count() - 1) * kWordSize));
__ LoadObject(ECX, cache, true);
__ LoadObject(IC_DATA_REG, cache, true);
__ LoadObject(CODE_REG, StubCode::MegamorphicCall(), true);
__ call(compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
@ -643,9 +643,9 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
if (function.PrologueNeedsArgumentsDescriptor()) {
__ LoadObject(EDX, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
} else {
__ xorl(EDX, EDX); // GC safe smi zero because of stub.
__ xorl(ARGS_DESC_REG, ARGS_DESC_REG); // GC safe smi zero because of stub.
}
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
@ -810,7 +810,7 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
// Load receiver into EAX.
__ movl(EAX,
compiler::Address(ESP, (count_without_type_args - 1) * kWordSize));
__ LoadObject(EDX, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,

View file

@ -525,7 +525,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(
// Load receiver into RDX.
__ movq(RDX, compiler::Address(
RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
__ LoadUniqueObject(RBX, ic_data);
__ LoadUniqueObject(IC_DATA_REG, ic_data);
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs(), RCX);
@ -544,7 +544,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
// Load receiver into RDX.
__ movq(RDX, compiler::Address(
RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
__ LoadUniqueObject(RBX, ic_data);
__ LoadUniqueObject(IC_DATA_REG, ic_data);
__ LoadUniqueObject(CODE_REG, stub);
const intptr_t entry_point_offset =
entry_kind == Code::EntryKind::kNormal
@ -577,10 +577,10 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
// The AOT runtime will replace the slot in the object pool with the
// entrypoint address - see app_snapshot.cc.
__ LoadUniqueObject(RCX, StubCode::MegamorphicCall());
__ LoadUniqueObject(RBX, cache);
__ LoadUniqueObject(IC_DATA_REG, cache);
__ call(RCX);
} else {
__ LoadUniqueObject(RBX, cache);
__ LoadUniqueObject(IC_DATA_REG, cache);
__ LoadUniqueObject(CODE_REG, StubCode::MegamorphicCall());
__ call(compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
@ -656,10 +656,11 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
ASSERT(CanCallDart());
ASSERT(!function.IsClosureFunction());
if (function.PrologueNeedsArgumentsDescriptor()) {
__ LoadObject(R10, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
} else {
if (!FLAG_precompiled_mode) {
__ xorl(R10, R10); // GC safe smi zero because of stub.
__ xorl(ARGS_DESC_REG,
ARGS_DESC_REG); // GC safe smi zero because of stub.
}
}
// Do not use the code from the function, but let the code be patched so that
@ -792,7 +793,7 @@ void FlowGraphCompiler::EmitTestAndCallLoadReceiver(
// Load receiver into RAX.
__ movq(RAX,
compiler::Address(RSP, (count_without_type_args - 1) * kWordSize));
__ LoadObject(R10, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
}
void FlowGraphCompiler::EmitTestAndCallSmiBranch(compiler::Label* label,

View file

@ -593,36 +593,40 @@ LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(R0)); // Function.
summary->set_in(
0, Location::RegisterLocation(FLAG_precompiled_mode ? R0 : FUNCTION_REG));
return MakeCallSummary(zone, this, summary);
}
void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Load arguments descriptor in R4.
// Load arguments descriptor in ARGS_DESC_REG.
const intptr_t argument_count = ArgumentCount(); // Includes type args.
const Array& arguments_descriptor =
Array::ZoneHandle(Z, GetArgumentsDescriptor());
__ LoadObject(R4, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
ASSERT(locs()->in(0).reg() == R0);
if (FLAG_precompiled_mode) {
ASSERT(locs()->in(0).reg() == R0);
// R0: Closure with a cached entry point.
__ ldr(R2, compiler::FieldAddress(
R0, compiler::target::Closure::entry_point_offset()));
} else {
// R0: Function.
__ ldr(CODE_REG, compiler::FieldAddress(
R0, compiler::target::Function::code_offset()));
ASSERT(locs()->in(0).reg() == FUNCTION_REG);
// FUNCTION_REG: Function.
__ ldr(CODE_REG,
compiler::FieldAddress(FUNCTION_REG,
compiler::target::Function::code_offset()));
// Closure functions only have one entry point.
__ ldr(R2, compiler::FieldAddress(
R0, compiler::target::Function::entry_point_offset()));
__ ldr(R2,
compiler::FieldAddress(
FUNCTION_REG, compiler::target::Function::entry_point_offset()));
}
// R4: Arguments descriptor array.
// ARGS_DESC_REG: Arguments descriptor array.
// R2: instructions entry point.
if (!FLAG_precompiled_mode) {
// R9: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
__ LoadImmediate(R9, 0);
__ LoadImmediate(IC_DATA_REG, 0);
}
__ blx(R2);
compiler->EmitCallsiteMetadata(source(), deopt_id(),

View file

@ -517,36 +517,38 @@ LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(R0)); // Function.
summary->set_in(
0, Location::RegisterLocation(FLAG_precompiled_mode ? R0 : FUNCTION_REG));
return MakeCallSummary(zone, this, summary);
}
void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Load arguments descriptor in R4.
// Load arguments descriptor in ARGS_DESC_REG.
const intptr_t argument_count = ArgumentCount(); // Includes type args.
const Array& arguments_descriptor =
Array::ZoneHandle(Z, GetArgumentsDescriptor());
__ LoadObject(R4, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
ASSERT(locs()->in(0).reg() == R0);
if (FLAG_precompiled_mode) {
ASSERT(locs()->in(0).reg() == R0);
// R0: Closure with a cached entry point.
__ LoadFieldFromOffset(R2, R0,
compiler::target::Closure::entry_point_offset());
} else {
// R0: Function.
__ LoadCompressedFieldFromOffset(CODE_REG, R0,
ASSERT(locs()->in(0).reg() == FUNCTION_REG);
// FUNCTION_REG: Function.
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
compiler::target::Function::code_offset());
// Closure functions only have one entry point.
__ LoadFieldFromOffset(R2, R0,
__ LoadFieldFromOffset(R2, FUNCTION_REG,
compiler::target::Function::entry_point_offset());
}
// R4: Arguments descriptor array.
// ARGS_DESC_REG: Arguments descriptor array.
// R2: instructions entry point.
if (!FLAG_precompiled_mode) {
// R5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
__ LoadImmediate(R5, 0);
__ LoadImmediate(IC_DATA_REG, 0);
}
__ blr(R2);
compiler->EmitCallsiteMetadata(source(), deopt_id(),

View file

@ -6580,7 +6580,7 @@ LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(EAX)); // Function.
summary->set_in(0, Location::RegisterLocation(FUNCTION_REG)); // Function.
summary->set_out(0, Location::RegisterLocation(EAX));
return summary;
}
@ -6590,16 +6590,17 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t argument_count = ArgumentCount(); // Includes type args.
const Array& arguments_descriptor =
Array::ZoneHandle(Z, GetArgumentsDescriptor());
__ LoadObject(EDX, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
// EBX: Code (compiled code or lazy compile stub).
ASSERT(locs()->in(0).reg() == EAX);
__ movl(EBX, compiler::FieldAddress(EAX, Function::entry_point_offset()));
ASSERT(locs()->in(0).reg() == FUNCTION_REG);
__ movl(EBX,
compiler::FieldAddress(FUNCTION_REG, Function::entry_point_offset()));
// EAX: Function.
// EDX: Arguments descriptor array.
// FUNCTION_REG: Function.
// ARGS_DESC_REG: Arguments descriptor array.
// ECX: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
__ xorl(ECX, ECX);
__ xorl(IC_DATA_REG, IC_DATA_REG);
__ call(EBX);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
UntaggedPcDescriptors::kOther, locs(), env());

View file

@ -570,33 +570,35 @@ LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(T0)); // Function.
summary->set_in(
0, Location::RegisterLocation(FLAG_precompiled_mode ? T0 : FUNCTION_REG));
return MakeCallSummary(zone, this, summary);
}
void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Load arguments descriptor in S4.
// Load arguments descriptor in ARGS_DESC_REG.
const intptr_t argument_count = ArgumentCount(); // Includes type args.
const Array& arguments_descriptor =
Array::ZoneHandle(Z, GetArgumentsDescriptor());
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
ASSERT(locs()->in(0).reg() == T0);
if (FLAG_precompiled_mode) {
ASSERT(locs()->in(0).reg() == T0);
// T0: Closure with a cached entry point.
__ LoadFieldFromOffset(A1, T0,
compiler::target::Closure::entry_point_offset());
} else {
// T0: Function.
__ LoadCompressedFieldFromOffset(CODE_REG, T0,
ASSERT(locs()->in(0).reg() == FUNCTION_REG);
// FUNCTION_REG: Function.
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
compiler::target::Function::code_offset());
// Closure functions only have one entry point.
__ LoadFieldFromOffset(A1, T0,
__ LoadFieldFromOffset(A1, FUNCTION_REG,
compiler::target::Function::entry_point_offset());
}
// T0: Function (argument to lazy compile stub)
// S4: Arguments descriptor array.
// FUNCTION_REG: Function (argument to lazy compile stub)
// ARGS_DESC_REG: Arguments descriptor array.
// A1: instructions entry point.
if (!FLAG_precompiled_mode) {
// S5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).

View file

@ -6916,37 +6916,40 @@ LocationSummary* ClosureCallInstr::MakeLocationSummary(Zone* zone,
const intptr_t kNumTemps = 0;
LocationSummary* summary = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kCall);
summary->set_in(0, Location::RegisterLocation(RAX)); // Function.
summary->set_in(0, Location::RegisterLocation(
FLAG_precompiled_mode ? RAX : FUNCTION_REG));
return MakeCallSummary(zone, this, summary);
}
void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Arguments descriptor is expected in R10.
// Arguments descriptor is expected in ARGS_DESC_REG.
const intptr_t argument_count = ArgumentCount(); // Includes type args.
const Array& arguments_descriptor =
Array::ZoneHandle(Z, GetArgumentsDescriptor());
__ LoadObject(R10, arguments_descriptor);
__ LoadObject(ARGS_DESC_REG, arguments_descriptor);
ASSERT(locs()->in(0).reg() == RAX);
if (FLAG_precompiled_mode) {
ASSERT(locs()->in(0).reg() == RAX);
// RAX: Closure with cached entry point.
__ movq(RCX, compiler::FieldAddress(
RAX, compiler::target::Closure::entry_point_offset()));
} else {
// RAX: Function.
ASSERT(locs()->in(0).reg() == FUNCTION_REG);
// FUNCTION_REG: Function.
__ LoadCompressed(
CODE_REG,
compiler::FieldAddress(RAX, compiler::target::Function::code_offset()));
CODE_REG, compiler::FieldAddress(
FUNCTION_REG, compiler::target::Function::code_offset()));
// Closure functions only have one entry point.
__ movq(RCX, compiler::FieldAddress(
RAX, compiler::target::Function::entry_point_offset()));
FUNCTION_REG,
compiler::target::Function::entry_point_offset()));
}
// R10: Arguments descriptor array.
// ARGS_DESC_REG: Arguments descriptor array.
// RCX: instructions entry point.
if (!FLAG_precompiled_mode) {
// RBX: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value).
__ xorq(RBX, RBX);
__ xorq(IC_DATA_REG, IC_DATA_REG);
}
__ call(RCX);
compiler->EmitCallsiteMetadata(source(), deopt_id(),

View file

@ -51,7 +51,6 @@ void StubCodeCompiler::GenerateInitStaticFieldStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInitLateStaticFieldStub(Assembler* assembler,
bool is_final) {
const Register kResultReg = InitStaticFieldABI::kResultReg;
const Register kFunctionReg = InitLateStaticFieldInternalRegs::kFunctionReg;
const Register kFieldReg = InitStaticFieldABI::kFieldReg;
const Register kAddressReg = InitLateStaticFieldInternalRegs::kAddressReg;
const Register kScratchReg = InitLateStaticFieldInternalRegs::kScratchReg;
@ -61,14 +60,14 @@ void StubCodeCompiler::GenerateInitLateStaticFieldStub(Assembler* assembler,
__ Comment("Calling initializer function");
__ PushRegister(kFieldReg);
__ LoadCompressedFieldFromOffset(
kFunctionReg, kFieldReg, target::Field::initializer_function_offset());
FUNCTION_REG, kFieldReg, target::Field::initializer_function_offset());
if (!FLAG_precompiled_mode) {
__ LoadCompressedFieldFromOffset(CODE_REG, kFunctionReg,
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
// Load a GC-safe value for the arguments descriptor (unused but tagged).
__ LoadImmediate(ARGS_DESC_REG, 0);
}
__ Call(FieldAddress(kFunctionReg, target::Function::entry_point_offset()));
__ Call(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ MoveRegister(kResultReg, CallingConventions::kReturnReg);
__ PopRegister(kFieldReg);
__ LoadStaticFieldAddress(kAddressReg, kFieldReg, kScratchReg);
@ -123,7 +122,6 @@ void StubCodeCompiler::GenerateInitInstanceFieldStub(Assembler* assembler) {
void StubCodeCompiler::GenerateInitLateInstanceFieldStub(Assembler* assembler,
bool is_final) {
const Register kFunctionReg = InitLateInstanceFieldInternalRegs::kFunctionReg;
const Register kInstanceReg = InitInstanceFieldABI::kInstanceReg;
const Register kFieldReg = InitInstanceFieldABI::kFieldReg;
const Register kAddressReg = InitLateInstanceFieldInternalRegs::kAddressReg;
@ -139,15 +137,15 @@ void StubCodeCompiler::GenerateInitLateInstanceFieldStub(Assembler* assembler,
"Result is a return value from initializer");
__ LoadCompressedFieldFromOffset(
kFunctionReg, InitInstanceFieldABI::kFieldReg,
FUNCTION_REG, InitInstanceFieldABI::kFieldReg,
target::Field::initializer_function_offset());
if (!FLAG_precompiled_mode) {
__ LoadCompressedFieldFromOffset(CODE_REG, kFunctionReg,
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
// Load a GC-safe value for the arguments descriptor (unused but tagged).
__ LoadImmediate(ARGS_DESC_REG, 0);
}
__ Call(FieldAddress(kFunctionReg, target::Function::entry_point_offset()));
__ Call(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ Drop(1); // Drop argument.
__ PopRegisterPair(kInstanceReg, kFieldReg);

View file

@ -645,17 +645,17 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
}
// Input parameters:
// R4: arguments descriptor array.
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
// Setup space on stack for return value and preserve arguments descriptor.
__ LoadImmediate(R0, 0);
__ PushList((1 << R0) | (1 << R4));
__ PushList((1 << R0) | (1 << ARGS_DESC_REG));
__ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
// Get Code object result and restore arguments descriptor array.
__ PopList((1 << R0) | (1 << R4));
__ PopList((1 << R0) | (1 << ARGS_DESC_REG));
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
@ -665,7 +665,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// R4: arguments descriptor array.
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -680,10 +680,10 @@ void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
__ EnterStubFrame();
// Setup space on stack for return value and preserve arguments descriptor.
__ LoadImmediate(R0, 0);
__ PushList((1 << R0) | (1 << R4));
__ PushList((1 << R0) | (1 << ARGS_DESC_REG));
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
// Get Code object result and restore arguments descriptor array.
__ PopList((1 << R0) | (1 << R4));
__ PopList((1 << R0) | (1 << ARGS_DESC_REG));
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
@ -958,27 +958,29 @@ void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
__ Ret();
}
// R9: ICData/MegamorphicCache
// IC_DATA_REG: ICData/MegamorphicCache
static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
__ EnterStubFrame();
__ ldr(R4,
FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
__ ldr(ARGS_DESC_REG,
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
// Load the receiver.
__ ldr(R2, FieldAddress(R4, target::ArgumentsDescriptor::size_offset()));
__ ldr(R2, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::size_offset()));
__ add(IP, FP, Operand(R2, LSL, 1)); // R2 is Smi.
__ ldr(R8, Address(IP, target::frame_layout.param_end_from_fp *
target::kWordSize));
__ LoadImmediate(IP, 0);
__ Push(IP); // Result slot.
__ Push(R8); // Receiver.
__ Push(R9); // ICData/MegamorphicCache.
__ Push(R4); // Arguments descriptor.
__ Push(IC_DATA_REG); // ICData/MegamorphicCache.
__ Push(ARGS_DESC_REG); // Arguments descriptor.
// Adjust arguments count.
__ ldr(R3,
FieldAddress(R4, target::ArgumentsDescriptor::type_args_len_offset()));
__ ldr(R3, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::type_args_len_offset()));
__ cmp(R3, Operand(0));
__ AddImmediate(R2, R2, target::ToRawSmi(1),
NE); // Include the type arguments.
@ -1005,8 +1007,8 @@ static void GenerateDispatcherCode(Assembler* assembler,
}
// Input:
// R4 - arguments descriptor
// R9 - icdata/megamorphic_cache
// ARGS_DESC_REG - arguments descriptor
// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
GenerateNoSuchMethodDispatcherBody(assembler);
@ -2011,11 +2013,10 @@ void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
return;
}
if (FLAG_optimization_counter_threshold >= 0) {
Register ic_reg = R9;
Register func_reg = temp_reg;
ASSERT(temp_reg == R8);
__ Comment("Increment function counter");
__ ldr(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
__ ldr(func_reg, FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
__ ldr(TMP,
FieldAddress(func_reg, target::Function::usage_counter_offset()));
__ add(TMP, TMP, Operand(1));
@ -2185,22 +2186,26 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
if (type == kInstanceCall) {
__ LoadTaggedClassIdMayBeSmi(R0, R0);
__ ldr(R4, FieldAddress(
R9, target::CallSiteData::arguments_descriptor_offset()));
__ ldr(
ARGS_DESC_REG,
FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
if (num_args == 2) {
__ ldr(R1, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
__ ldr(R1, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::count_offset()));
__ sub(R1, R1, Operand(target::ToRawSmi(2)));
__ ldr(R1, Address(SP, R1, LSL, 1)); // R1 (argument_count - 2) is Smi.
__ LoadTaggedClassIdMayBeSmi(R1, R1);
}
} else {
// Load arguments descriptor into R4.
__ ldr(R4, FieldAddress(
R9, target::CallSiteData::arguments_descriptor_offset()));
__ ldr(
ARGS_DESC_REG,
FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
// Get the receiver's class ID (first read number of arguments from
// arguments descriptor array and then access the receiver from the stack).
__ ldr(R1, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
__ ldr(R1, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::count_offset()));
__ sub(R1, R1, Operand(target::ToRawSmi(1)));
// R1: argument_count - 1 (smi).
@ -2255,7 +2260,8 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ Bind(&miss);
__ Comment("IC miss");
// Compute address of arguments.
__ ldr(R1, FieldAddress(R4, target::ArgumentsDescriptor::count_offset()));
__ ldr(R1, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::count_offset()));
__ sub(R1, R1, Operand(target::ToRawSmi(1)));
// R1: argument_count - 1 (smi).
__ add(R1, SP, Operand(R1, LSL, 1)); // R1 is Smi.
@ -2266,7 +2272,7 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ LoadImmediate(R0, 0);
// Preserve IC data object and arguments descriptor array and
// setup space on stack for result (target code object).
RegList regs = (1 << R0) | (1 << R4) | (1 << R9);
RegList regs = (1 << R0) | (1 << ARGS_DESC_REG) | (1 << R9);
if (save_entry_point) {
__ SmiTag(R3);
regs |= 1 << R3;
@ -2284,6 +2290,7 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ Drop(num_args + 1);
// Pop returned function object into R0.
// Restore arguments descriptor array and IC data array.
COMPILE_ASSERT(FUNCTION_REG == R0);
__ PopList(regs);
if (save_entry_point) {
__ SmiUntag(R3);
@ -2303,7 +2310,7 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
target::ICData::TargetIndexFor(num_args) * target::kWordSize;
const intptr_t count_offset =
target::ICData::CountIndexFor(num_args) * target::kWordSize;
__ LoadFromOffset(R0, R8, kIcDataOffset + target_offset);
__ LoadFromOffset(FUNCTION_REG, R8, kIcDataOffset + target_offset);
if (FLAG_optimization_counter_threshold >= 0) {
__ Comment("Update caller's counter");
@ -2316,12 +2323,13 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ Comment("Call target");
__ Bind(&call_target_function);
// R0: target function.
__ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
__ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
if (save_entry_point) {
__ Branch(Address(R0, R3));
__ Branch(Address(FUNCTION_REG, R3));
} else {
__ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
__ Branch(
FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
#if !defined(PRODUCT)
@ -2488,14 +2496,14 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
}
// Load arguments descriptor into R4.
__ ldr(R4,
__ ldr(ARGS_DESC_REG,
FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
// Get function and call it, if possible.
__ LoadFromOffset(R0, R8, target_offset);
__ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
__ LoadFromOffset(FUNCTION_REG, R8, target_offset);
__ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ Branch(Address(R0, R3));
__ Branch(Address(FUNCTION_REG, R3));
#if !defined(PRODUCT)
__ Bind(&stepping);
@ -2532,17 +2540,19 @@ void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
}
// Stub for compiling a function and jumping to the compiled code.
// R4: Arguments descriptor.
// R0: Function.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
__ PushList((1 << R0) | (1 << R4)); // Preserve arg desc, pass function.
// Preserve arg desc, pass function.
COMPILE_ASSERT(FUNCTION_REG < ARGS_DESC_REG);
__ PushList((1 << FUNCTION_REG) | (1 << ARGS_DESC_REG));
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
__ PopList((1 << R0) | (1 << R4));
__ PopList((1 << FUNCTION_REG) | (1 << ARGS_DESC_REG));
__ LeaveStubFrame();
__ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
__ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
__ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
// R9: Contains an ICData.
@ -2977,21 +2987,21 @@ void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Calls to the runtime to optimize the given function.
// R8: function to be reoptimized.
// R4: argument descriptor (preserved).
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ ldr(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
__ Push(R4);
__ Push(ARGS_DESC_REG);
__ LoadImmediate(IP, 0);
__ Push(IP); // Setup space on stack for return value.
__ Push(R8);
__ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
__ Pop(R0); // Discard argument.
__ Pop(R0); // Get Function object
__ Pop(R4); // Restore argument descriptor.
__ Pop(FUNCTION_REG); // Get Function object
__ Pop(ARGS_DESC_REG); // Restore argument descriptor.
__ LeaveStubFrame();
__ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
__ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
__ ldr(CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ bkpt(0);
}
@ -3107,16 +3117,18 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
// Called from megamorphic calls.
// R0: receiver
// R9: MegamorphicCache (preserved)
// IC_DATA_REG: MegamorphicCache (preserved)
// Passed to target:
// R0: function
// R4: arguments descriptor
// FUNCTION_REG: target function
// ARGS_DESC_REG: arguments descriptor
// CODE_REG: target Code
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
__ LoadTaggedClassIdMayBeSmi(R8, R0);
// R8: receiver cid as Smi.
__ ldr(R2, FieldAddress(R9, target::MegamorphicCache::buckets_offset()));
__ ldr(R1, FieldAddress(R9, target::MegamorphicCache::mask_offset()));
__ ldr(R2,
FieldAddress(IC_DATA_REG, target::MegamorphicCache::buckets_offset()));
__ ldr(R1,
FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
// R2: cache buckets array.
// R1: mask as a smi.
@ -3143,13 +3155,15 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
__ ldr(R0, FieldAddress(IP, base + target::kWordSize));
__ ldr(FUNCTION_REG, FieldAddress(IP, base + target::kWordSize));
if (!FLAG_precompiled_mode) {
__ ldr(CODE_REG, FieldAddress(R0, target::Function::code_offset()));
__ ldr(CODE_REG,
FieldAddress(FUNCTION_REG, target::Function::code_offset()));
}
__ ldr(ARGS_DESC_REG,
FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
__ Branch(FieldAddress(R0, target::Function::entry_point_offset()));
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
__ Branch(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
// Probe failed, check if it is a miss.
__ Bind(&probe_failed);
@ -3168,9 +3182,9 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
__ ldr(R8, FieldAddress(R9, target::ICData::entries_offset()));
__ ldr(R4,
FieldAddress(R9, target::CallSiteData::arguments_descriptor_offset()));
__ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ ldr(R4, FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
__ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
// R8: first IC entry
__ LoadTaggedClassIdMayBeSmi(R1, R0);

View file

@ -878,18 +878,18 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
}
// Input parameters:
// R4: arguments descriptor array.
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
// Setup space on stack for return value and preserve arguments descriptor.
__ Push(R4);
__ Push(ARGS_DESC_REG);
__ Push(ZR);
__ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
// Get Code object result and restore arguments descriptor array.
__ Pop(CODE_REG);
__ Pop(R4);
__ Pop(ARGS_DESC_REG);
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
@ -899,7 +899,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// R4: arguments descriptor array.
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -913,12 +913,12 @@ void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// calling into the runtime.
__ EnterStubFrame();
// Setup space on stack for return value and preserve arguments descriptor.
__ Push(R4);
__ Push(ARGS_DESC_REG);
__ Push(ZR);
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
// Get Code object result and restore arguments descriptor array.
__ Pop(CODE_REG);
__ Pop(R4);
__ Pop(ARGS_DESC_REG);
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
@ -1204,27 +1204,28 @@ void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
__ ret();
}
// R5: ICData/MegamorphicCache
// IC_DATA_REG: ICData/MegamorphicCache
static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
__ EnterStubFrame();
__ ldr(R4,
FieldAddress(R5, target::CallSiteData::arguments_descriptor_offset()));
__ ldr(ARGS_DESC_REG,
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
// Load the receiver.
__ LoadCompressedSmiFieldFromOffset(
R2, R4, target::ArgumentsDescriptor::size_offset());
R2, ARGS_DESC_REG, target::ArgumentsDescriptor::size_offset());
__ add(TMP, FP, Operand(R2, LSL, target::kWordSizeLog2 - 1)); // R2 is Smi.
__ LoadFromOffset(R6, TMP,
target::frame_layout.param_end_from_fp * target::kWordSize);
__ Push(ZR); // Result slot.
__ Push(R6); // Receiver.
__ Push(R5); // ICData/MegamorphicCache.
__ Push(R4); // Arguments descriptor.
__ Push(IC_DATA_REG); // ICData/MegamorphicCache.
__ Push(ARGS_DESC_REG); // Arguments descriptor.
// Adjust arguments count.
__ LoadCompressedSmiFieldFromOffset(
R3, R4, target::ArgumentsDescriptor::type_args_len_offset());
R3, ARGS_DESC_REG, target::ArgumentsDescriptor::type_args_len_offset());
__ AddImmediate(TMP, R2, 1, kObjectBytes); // Include the type arguments.
__ cmp(R3, Operand(0), kObjectBytes);
// R2 <- (R3 == 0) ? R2 : TMP + 1 (R2 : R2 + 2).
@ -1252,8 +1253,8 @@ static void GenerateDispatcherCode(Assembler* assembler,
}
// Input:
// R4 - arguments descriptor
// R5 - icdata/megamorphic_cache
// ARGS_DESC_REG - arguments descriptor
// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
GenerateNoSuchMethodDispatcherBody(assembler);
@ -2325,11 +2326,11 @@ void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
return;
}
if (FLAG_optimization_counter_threshold >= 0) {
Register ic_reg = R5;
Register func_reg = temp_reg;
ASSERT(temp_reg == R6);
__ Comment("Increment function counter");
__ LoadFieldFromOffset(func_reg, ic_reg, target::ICData::owner_offset());
__ LoadFieldFromOffset(func_reg, IC_DATA_REG,
target::ICData::owner_offset());
__ LoadFieldFromOffset(
R7, func_reg, target::Function::usage_counter_offset(), kFourBytes);
__ AddImmediate(R7, 1);
@ -2503,11 +2504,11 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
if (type == kInstanceCall) {
__ LoadTaggedClassIdMayBeSmi(R0, R0);
__ LoadFieldFromOffset(R4, R5,
__ LoadFieldFromOffset(ARGS_DESC_REG, R5,
target::CallSiteData::arguments_descriptor_offset());
if (num_args == 2) {
__ LoadCompressedSmiFieldFromOffset(
R7, R4, target::ArgumentsDescriptor::count_offset());
R7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
__ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
__ sub(R7, R7, Operand(2));
// R1 <- [SP + (R1 << 3)]
@ -2515,12 +2516,12 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ LoadTaggedClassIdMayBeSmi(R1, R1);
}
} else {
__ LoadFieldFromOffset(R4, R5,
__ LoadFieldFromOffset(ARGS_DESC_REG, R5,
target::CallSiteData::arguments_descriptor_offset());
// Get the receiver's class ID (first read number of arguments from
// arguments descriptor array and then access the receiver from the stack).
__ LoadCompressedSmiFieldFromOffset(
R7, R4, target::ArgumentsDescriptor::count_offset());
R7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
__ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
__ sub(R7, R7, Operand(1));
// R0 <- [SP + (R7 << 3)]
@ -2577,7 +2578,7 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
// Compute address of arguments.
__ LoadCompressedSmiFieldFromOffset(
R7, R4, target::ArgumentsDescriptor::count_offset());
R7, ARGS_DESC_REG, target::ArgumentsDescriptor::count_offset());
__ SmiUntag(R7); // Untag so we can use the LSL 3 addressing mode.
__ sub(R7, R7, Operand(1));
// R7: argument_count - 1 (untagged).
@ -2589,7 +2590,7 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ EnterStubFrame();
// Preserve IC data object and arguments descriptor array and
// setup space on stack for result (target code object).
__ Push(R4); // Preserve arguments descriptor array.
__ Push(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ Push(R5); // Preserve IC Data.
if (save_entry_point) {
__ SmiTag(R8);
@ -2609,13 +2610,13 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ Drop(num_args + 1);
// Pop returned function object into R0.
// Restore arguments descriptor array and IC data array.
__ Pop(R0); // Pop returned function object into R0.
__ Pop(FUNCTION_REG); // Pop returned function object into R0.
if (save_entry_point) {
__ Pop(R8);
__ SmiUntag(R8);
}
__ Pop(R5); // Restore IC Data.
__ Pop(R4); // Restore arguments descriptor array.
__ Pop(ARGS_DESC_REG); // Restore arguments descriptor array.
__ RestoreCodePointer();
__ LeaveStubFrame();
Label call_target_function;
@ -2632,7 +2633,7 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
target::ICData::TargetIndexFor(num_args) * target::kCompressedWordSize;
const intptr_t count_offset =
target::ICData::CountIndexFor(num_args) * target::kCompressedWordSize;
__ LoadCompressedFromOffset(R0, R6, target_offset);
__ LoadCompressedFromOffset(FUNCTION_REG, R6, target_offset);
if (FLAG_optimization_counter_threshold >= 0) {
// Update counter, ignore overflow.
@ -2644,13 +2645,14 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ Comment("Call target");
__ Bind(&call_target_function);
// R0: target function.
__ LoadCompressedFieldFromOffset(CODE_REG, R0,
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
if (save_entry_point) {
__ add(R2, R0, Operand(R8));
__ add(R2, FUNCTION_REG, Operand(R8));
__ ldr(R2, Address(R2, 0));
} else {
__ LoadFieldFromOffset(R2, R0, target::Function::entry_point_offset());
__ LoadFieldFromOffset(R2, FUNCTION_REG,
target::Function::entry_point_offset());
}
__ br(R2);
@ -2821,14 +2823,14 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
}
// Load arguments descriptor into R4.
__ LoadFieldFromOffset(R4, R5,
__ LoadFieldFromOffset(ARGS_DESC_REG, R5,
target::CallSiteData::arguments_descriptor_offset());
// Get function and call it, if possible.
__ LoadCompressedFromOffset(R0, R6, target_offset);
__ LoadCompressedFieldFromOffset(CODE_REG, R0,
__ LoadCompressedFromOffset(FUNCTION_REG, R6, target_offset);
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
__ add(R2, R0, Operand(R8));
__ add(R2, FUNCTION_REG, Operand(R8));
__ ldr(R2, Address(R2, 0));
__ br(R2);
@ -2869,21 +2871,22 @@ void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
}
// Stub for compiling a function and jumping to the compiled code.
// R4: Arguments descriptor.
// R0: Function.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
// Preserve arg desc.
__ EnterStubFrame();
__ Push(R4); // Save arg. desc.
__ Push(R0); // Pass function.
__ Push(ARGS_DESC_REG); // Save arg. desc.
__ Push(FUNCTION_REG); // Pass function.
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
__ Pop(R0); // Restore argument.
__ Pop(R4); // Restore arg desc.
__ Pop(FUNCTION_REG); // Restore function.
__ Pop(ARGS_DESC_REG); // Restore arg desc.
__ LeaveStubFrame();
__ LoadCompressedFieldFromOffset(CODE_REG, R0,
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
__ LoadFieldFromOffset(R2, R0, target::Function::entry_point_offset());
__ LoadFieldFromOffset(R2, FUNCTION_REG,
target::Function::entry_point_offset());
__ br(R2);
}
@ -3308,21 +3311,22 @@ void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Calls to the runtime to optimize the given function.
// R6: function to be re-optimized.
// R4: argument descriptor (preserved).
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
__ EnterStubFrame();
__ Push(R4);
__ Push(ARGS_DESC_REG);
// Setup space on stack for the return value.
__ Push(ZR);
__ Push(R6);
__ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
__ Pop(R0); // Discard argument.
__ Pop(R0); // Get Function object
__ Pop(R4); // Restore argument descriptor.
__ LoadCompressedFieldFromOffset(CODE_REG, R0,
__ Pop(FUNCTION_REG); // Get Function object
__ Pop(ARGS_DESC_REG); // Restore argument descriptor.
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
__ LoadFieldFromOffset(R1, R0, target::Function::entry_point_offset());
__ LoadFieldFromOffset(R1, FUNCTION_REG,
target::Function::entry_point_offset());
__ LeaveStubFrame();
__ br(R1);
__ brk(0);
@ -3421,12 +3425,11 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
// Called from megamorphic call sites.
// R0: receiver (passed to target)
// R5: MegamorphicCache (preserved)
// IC_DATA_REG: MegamorphicCache (preserved)
// Passed to target:
// R0: receiver
// FUNCTION_REG: target function
// CODE_REG: target Code
// R4: arguments descriptor
// R5: MegamorphicCache
// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
@ -3437,8 +3440,10 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
Label cid_loaded;
__ Bind(&cid_loaded);
__ ldr(R2, FieldAddress(R5, target::MegamorphicCache::buckets_offset()));
__ ldr(R1, FieldAddress(R5, target::MegamorphicCache::mask_offset()));
__ ldr(R2,
FieldAddress(IC_DATA_REG, target::MegamorphicCache::buckets_offset()));
__ ldr(R1,
FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
// R2: cache buckets array.
// R1: mask as a smi.
@ -3471,13 +3476,16 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
__ LoadCompressed(
R0, FieldAddress(TMP, base + target::kCompressedWordSize, kObjectBytes));
__ ldr(R1, FieldAddress(R0, target::Function::entry_point_offset()));
FUNCTION_REG,
FieldAddress(TMP, base + target::kCompressedWordSize, kObjectBytes));
__ ldr(R1,
FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ ldr(ARGS_DESC_REG,
FieldAddress(R5, target::CallSiteData::arguments_descriptor_offset()));
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
if (!FLAG_precompiled_mode) {
__ LoadCompressed(CODE_REG,
FieldAddress(R0, target::Function::code_offset()));
__ LoadCompressed(
CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
}
__ br(R1);
@ -3503,12 +3511,13 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Input:
// R0 - receiver
// R5 - icdata
// IC_DATA_REG - icdata
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
__ ldr(R8, FieldAddress(R5, target::ICData::entries_offset()));
__ ldr(R4,
FieldAddress(R5, target::CallSiteData::arguments_descriptor_offset()));
__ ldr(R8, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ ldr(ARGS_DESC_REG,
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
__ AddImmediate(R8, target::Array::data_offset() - kHeapObjectTag);
// R8: first IC entry
__ LoadTaggedClassIdMayBeSmi(R1, R0);

View file

@ -494,14 +494,14 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
}
// Input parameters:
// EDX: arguments descriptor array.
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushl(EDX); // Preserve arguments descriptor array.
__ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(Immediate(0)); // Setup space on stack for return value.
__ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
__ popl(EAX); // Get Code object result.
__ popl(EDX); // Restore arguments descriptor array.
__ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
// Remove the stub frame as we are about to jump to the dart function.
__ LeaveFrame();
@ -510,18 +510,18 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// EDX: arguments descriptor array.
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
// This was a static call.
__ EnterStubFrame();
__ pushl(EDX); // Preserve arguments descriptor array.
__ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(Immediate(0)); // Setup space on stack for return value.
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
__ popl(EAX); // Get Code object.
__ popl(EDX); // Restore arguments descriptor array.
__ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
__ movl(EAX, FieldAddress(EAX, target::Code::entry_point_offset()));
__ LeaveFrame();
__ jmp(EAX);
@ -1701,11 +1701,11 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
if (FLAG_optimization_counter_threshold >= 0) {
Register ic_reg = ECX;
Register func_reg = temp_reg;
ASSERT(ic_reg != func_reg);
ASSERT(func_reg != IC_DATA_REG);
__ Comment("Increment function counter");
__ movl(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
__ movl(func_reg,
FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
__ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
}
@ -1862,8 +1862,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
__ Comment("Extract ICData initial values and receiver cid");
// ECX: IC data object (preserved).
// Load arguments descriptor into EDX.
__ movl(EDX, FieldAddress(
ECX, target::CallSiteData::arguments_descriptor_offset()));
__ movl(
ARGS_DESC_REG,
FieldAddress(ECX, target::CallSiteData::arguments_descriptor_offset()));
// Loop that checks if there is an IC data match.
Label loop, found, miss;
// ECX: IC data object (preserved).
@ -1876,7 +1877,8 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
// last time we need the argument descriptor, and we reuse EAX for the
// class IDs from the IC descriptor. In the 2-argument case we preserve
// the argument descriptor in EAX.
__ movl(EAX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
__ movl(EAX, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::count_offset()));
if (num_args == 1) {
// Load receiver into EDI.
__ movl(EDI,
@ -1937,12 +1939,13 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
__ Comment("IC miss");
// Compute address of arguments (first read number of arguments from
// arguments descriptor array and then compute address on the stack).
__ movl(EAX, FieldAddress(EDX, target::ArgumentsDescriptor::count_offset()));
__ movl(EAX, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::count_offset()));
__ leal(EAX, Address(ESP, EAX, TIMES_2, 0)); // EAX is Smi.
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
__ pushl(EDX); // Preserve arguments descriptor array.
__ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(ECX); // Preserve IC data object.
__ pushl(Immediate(0)); // Result slot.
// Push call arguments.
@ -1956,9 +1959,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
for (intptr_t i = 0; i < num_args + 1; i++) {
__ popl(EAX);
}
__ popl(EAX); // Pop returned function object into EAX.
__ popl(FUNCTION_REG); // Pop returned function object into EAX.
__ popl(ECX); // Restore IC data array.
__ popl(EDX); // Restore arguments descriptor array.
__ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
__ LeaveFrame();
Label call_target_function;
if (!FLAG_lazy_dispatchers) {
@ -1976,11 +1979,12 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStubForEntryKind(
__ addl(Address(EBX, count_offset), Immediate(target::ToRawSmi(1)));
}
__ movl(EAX, Address(EBX, target_offset));
__ movl(FUNCTION_REG, Address(EBX, target_offset));
__ Bind(&call_target_function);
__ Comment("Call target");
// EAX: Target function.
__ jmp(FieldAddress(EAX, target::Function::entry_point_offset(entry_kind)));
__ jmp(FieldAddress(FUNCTION_REG,
target::Function::entry_point_offset(entry_kind)));
#if !defined(PRODUCT)
if (optimized == kUnoptimized) {
@ -2141,12 +2145,14 @@ static void GenerateZeroArgsUnoptimizedStaticCallForEntryKind(
}
// Load arguments descriptor into EDX.
__ movl(EDX, FieldAddress(
ECX, target::CallSiteData::arguments_descriptor_offset()));
__ movl(
ARGS_DESC_REG,
FieldAddress(ECX, target::CallSiteData::arguments_descriptor_offset()));
// Get function and call it, if possible.
__ movl(EAX, Address(EBX, target_offset));
__ jmp(FieldAddress(EAX, target::Function::entry_point_offset(entry_kind)));
__ movl(FUNCTION_REG, Address(EBX, target_offset));
__ jmp(FieldAddress(FUNCTION_REG,
target::Function::entry_point_offset(entry_kind)));
#if !defined(PRODUCT)
__ Bind(&stepping);
@ -2187,18 +2193,18 @@ void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
}
// Stub for compiling a function and jumping to the compiled code.
// EDX: Arguments descriptor.
// EAX: Function.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushl(EDX); // Preserve arguments descriptor array.
__ pushl(EAX); // Pass function.
__ pushl(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushl(FUNCTION_REG); // Pass function.
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
__ popl(EAX); // Restore function.
__ popl(EDX); // Restore arguments descriptor array.
__ popl(FUNCTION_REG); // Restore function.
__ popl(ARGS_DESC_REG); // Restore arguments descriptor array.
__ LeaveFrame();
__ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
__ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
// ECX: Contains an ICData.
@ -2612,20 +2618,21 @@ void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Calls to the runtime to optimize the given function.
// EBX: function to be reoptimized.
// EDX: argument descriptor (preserved).
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ movl(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
__ pushl(EDX);
__ pushl(ARGS_DESC_REG);
__ pushl(Immediate(0)); // Setup space on stack for return value.
__ pushl(EBX);
__ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
__ popl(EAX); // Discard argument.
__ popl(EAX); // Get Function object
__ popl(EDX); // Restore argument descriptor.
__ popl(FUNCTION_REG); // Get Function object
__ popl(ARGS_DESC_REG); // Restore argument descriptor.
__ LeaveFrame();
__ movl(CODE_REG, FieldAddress(EAX, target::Function::code_offset()));
__ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
__ movl(CODE_REG,
FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ int3();
}
@ -2735,10 +2742,11 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
// Called from megamorphic calls.
// EBX: receiver (passed to target)
// ECX: target::MegamorphicCache (preserved)
// IC_DATA_REG: target::MegamorphicCache (preserved)
// Passed to target:
// EBX: target entry point
// EDX: argument descriptor
// FUNCTION_REG: target function
// ARGS_DESC_REG: argument descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
@ -2753,8 +2761,10 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
Label cid_loaded;
__ Bind(&cid_loaded);
__ pushl(EBX); // save receiver
__ movl(EBX, FieldAddress(ECX, target::MegamorphicCache::mask_offset()));
__ movl(EDI, FieldAddress(ECX, target::MegamorphicCache::buckets_offset()));
__ movl(EBX,
FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
__ movl(EDI, FieldAddress(IC_DATA_REG,
target::MegamorphicCache::buckets_offset()));
// EDI: cache buckets array.
// EBX: mask as a smi.
@ -2783,11 +2793,13 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
__ movl(EAX, FieldAddress(EDI, EDX, TIMES_4, base + target::kWordSize));
__ movl(EDX, FieldAddress(
ECX, target::CallSiteData::arguments_descriptor_offset()));
__ movl(FUNCTION_REG,
FieldAddress(EDI, EDX, TIMES_4, base + target::kWordSize));
__ movl(ARGS_DESC_REG,
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
__ popl(EBX); // restore receiver
__ jmp(FieldAddress(EAX, target::Function::entry_point_offset()));
__ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ Bind(&probe_failed);
// Probe failed, check if it is a miss.

View file

@ -697,17 +697,19 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
}
// Input parameters:
// S4: arguments descriptor array.
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
__ subi(SP, SP, 2 * target::kWordSize);
__ sx(S4, Address(SP, 1 * target::kWordSize)); // Preserve args descriptor.
__ sx(ARGS_DESC_REG,
Address(SP, 1 * target::kWordSize)); // Preserve args descriptor.
__ sx(ZR, Address(SP, 0 * target::kWordSize)); // Result slot.
__ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
__ lx(CODE_REG, Address(SP, 0 * target::kWordSize)); // Result.
__ lx(S4, Address(SP, 1 * target::kWordSize)); // Restore args descriptor.
__ lx(ARGS_DESC_REG,
Address(SP, 1 * target::kWordSize)); // Restore args descriptor.
__ addi(SP, SP, 2 * target::kWordSize);
__ LeaveStubFrame();
// Jump to the dart function.
@ -717,7 +719,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// S4: arguments descriptor array.
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -731,11 +733,11 @@ void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
// calling into the runtime.
__ EnterStubFrame();
// Setup space on stack for return value and preserve arguments descriptor.
__ PushRegistersInOrder({S4, ZR});
__ PushRegistersInOrder({ARGS_DESC_REG, ZR});
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
// Get Code object result and restore arguments descriptor array.
__ PopRegister(CODE_REG);
__ PopRegister(S4);
__ PopRegister(ARGS_DESC_REG);
// Remove the stub frame.
__ LeaveStubFrame();
// Jump to the dart function.
@ -1021,27 +1023,28 @@ void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
__ ret();
}
// S5: ICData/MegamorphicCache
// IC_DATA_REG: ICData/MegamorphicCache
static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler) {
__ EnterStubFrame();
__ lx(S4,
FieldAddress(S5, target::CallSiteData::arguments_descriptor_offset()));
__ lx(ARGS_DESC_REG,
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
// Load the receiver.
__ LoadCompressedSmiFieldFromOffset(
T2, S4, target::ArgumentsDescriptor::size_offset());
T2, ARGS_DESC_REG, target::ArgumentsDescriptor::size_offset());
__ slli(TMP, T2, target::kWordSizeLog2 - 1); // T2 is Smi.
__ add(TMP, TMP, FP);
__ LoadFromOffset(A0, TMP,
target::frame_layout.param_end_from_fp * target::kWordSize);
// Push: result slot, receiver, ICData/MegamorphicCache,
// arguments descriptor.
__ PushRegistersInOrder({ZR, A0, S5, S4});
__ PushRegistersInOrder({ZR, A0, IC_DATA_REG, ARGS_DESC_REG});
// Adjust arguments count.
__ LoadCompressedSmiFieldFromOffset(
T3, S4, target::ArgumentsDescriptor::type_args_len_offset());
T3, ARGS_DESC_REG, target::ArgumentsDescriptor::type_args_len_offset());
Label args_count_ok;
__ beqz(T3, &args_count_ok, Assembler::kNearJump);
// Include the type arguments.
@ -1069,8 +1072,8 @@ static void GenerateDispatcherCode(Assembler* assembler,
}
// Input:
// S4 - arguments descriptor
// S5 - icdata/megamorphic_cache
// ARGS_DESC_REG - arguments descriptor
// IC_DATA_REG - icdata/megamorphic_cache
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
GenerateNoSuchMethodDispatcherBody(assembler);
@ -2398,7 +2401,7 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ Drop(num_args + 1);
// Pop returned function object into R0.
// Restore arguments descriptor array and IC data array.
__ PopRegister(T0); // Pop returned function object into T0.
__ PopRegister(FUNCTION_REG); // Pop returned function object into T0.
if (save_entry_point) {
__ PopRegister(T6);
__ SmiUntag(T6);
@ -2421,7 +2424,7 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
target::ICData::TargetIndexFor(num_args) * target::kCompressedWordSize;
const intptr_t count_offset =
target::ICData::CountIndexFor(num_args) * target::kCompressedWordSize;
__ LoadCompressedFromOffset(T0, A1, target_offset);
__ LoadCompressedFromOffset(FUNCTION_REG, A1, target_offset);
if (FLAG_optimization_counter_threshold >= 0) {
// Update counter, ignore overflow.
@ -2433,15 +2436,16 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ Comment("Call target");
__ Bind(&call_target_function);
// T0: target function.
__ LoadCompressedFieldFromOffset(CODE_REG, T0,
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
if (save_entry_point) {
__ add(A7, T0, T6);
__ add(A7, FUNCTION_REG, T6);
__ lx(A7, Address(A7, 0));
} else {
__ LoadFieldFromOffset(A7, T0, target::Function::entry_point_offset());
__ LoadFieldFromOffset(A7, FUNCTION_REG,
target::Function::entry_point_offset());
}
__ jr(A7); // T0: Function, argument to lazy compile stub.
__ jr(A7); // FUNCTION_REG: Function, argument to lazy compile stub.
#if !defined(PRODUCT)
if (optimized == kUnoptimized) {
@ -2614,12 +2618,12 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
target::CallSiteData::arguments_descriptor_offset());
// Get function and call it, if possible.
__ LoadCompressedFromOffset(T0, A0, target_offset);
__ LoadCompressedFieldFromOffset(CODE_REG, T0,
__ LoadCompressedFromOffset(FUNCTION_REG, A0, target_offset);
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
__ add(A0, T0, T6);
__ add(A0, FUNCTION_REG, T6);
__ lx(TMP, Address(A0, 0));
__ jr(TMP); // T0: Function, argument to lazy compile stub.
__ jr(TMP); // FUNCTION_REG: Function, argument to lazy compile stub.
#if !defined(PRODUCT)
__ Bind(&stepping);
@ -2658,21 +2662,22 @@ void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
}
// Stub for compiling a function and jumping to the compiled code.
// S4: Arguments descriptor.
// T0: Function.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
// Preserve arg desc.
__ EnterStubFrame();
// Save arguments descriptor and pass function.
__ PushRegistersInOrder({ARGS_DESC_REG, T0});
__ PushRegistersInOrder({ARGS_DESC_REG, FUNCTION_REG});
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
__ PopRegister(T0); // Restore argument.
__ PopRegister(FUNCTION_REG); // Restore function.
__ PopRegister(ARGS_DESC_REG); // Restore arg desc.
__ LeaveStubFrame();
__ LoadCompressedFieldFromOffset(CODE_REG, T0,
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
__ LoadFieldFromOffset(TMP, T0, target::Function::entry_point_offset());
__ LoadFieldFromOffset(TMP, FUNCTION_REG,
target::Function::entry_point_offset());
__ jr(TMP);
}
@ -3077,23 +3082,26 @@ void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Calls to the runtime to optimize the given function.
// A0: function to be re-optimized.
// S4: argument descriptor (preserved).
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ LoadFromOffset(CODE_REG, THR, target::Thread::optimize_stub_offset());
__ EnterStubFrame();
__ subi(SP, SP, 3 * target::kWordSize);
__ sx(S4, Address(SP, 2 * target::kWordSize)); // Preserves args descriptor.
__ sx(ARGS_DESC_REG,
Address(SP, 2 * target::kWordSize)); // Preserves args descriptor.
__ sx(ZR, Address(SP, 1 * target::kWordSize)); // Result slot.
__ sx(A0, Address(SP, 0 * target::kWordSize)); // Function argument.
__ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
__ lx(T0, Address(SP, 1 * target::kWordSize)); // Function result.
__ lx(S4, Address(SP, 2 * target::kWordSize)); // Restore args descriptor.
__ lx(FUNCTION_REG, Address(SP, 1 * target::kWordSize)); // Function result.
__ lx(ARGS_DESC_REG,
Address(SP, 2 * target::kWordSize)); // Restore args descriptor.
__ addi(SP, SP, 3 * target::kWordSize);
__ LoadCompressedFieldFromOffset(CODE_REG, T0,
__ LoadCompressedFieldFromOffset(CODE_REG, FUNCTION_REG,
target::Function::code_offset());
__ LoadFieldFromOffset(A1, T0, target::Function::entry_point_offset());
__ LoadFieldFromOffset(A1, FUNCTION_REG,
target::Function::entry_point_offset());
__ LeaveStubFrame();
__ jr(A1);
__ ebreak();
@ -3209,12 +3217,11 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
// Called from megamorphic call sites.
// A0: receiver (passed to target)
// S5: MegamorphicCache (preserved)
// IC_DATA_REG: MegamorphicCache (preserved)
// Passed to target:
// A0: receiver
// FUNCTION_REG: target function
// CODE_REG: target Code
// S4: arguments descriptor
// S5: MegamorphicCache
// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
@ -3225,8 +3232,9 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
Label cid_loaded;
__ Bind(&cid_loaded);
__ lx(T2, FieldAddress(S5, target::MegamorphicCache::buckets_offset()));
__ lx(T1, FieldAddress(S5, target::MegamorphicCache::mask_offset()));
__ lx(T2,
FieldAddress(IC_DATA_REG, target::MegamorphicCache::buckets_offset()));
__ lx(T1, FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
// T2: cache buckets array.
// T1: mask as a smi.
@ -3259,13 +3267,15 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
__ LoadCompressed(T0, FieldAddress(TMP, base + target::kCompressedWordSize));
__ lx(A1, FieldAddress(T0, target::Function::entry_point_offset()));
__ LoadCompressed(FUNCTION_REG,
FieldAddress(TMP, base + target::kCompressedWordSize));
__ lx(A1, FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ lx(ARGS_DESC_REG,
FieldAddress(S5, target::CallSiteData::arguments_descriptor_offset()));
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
if (!FLAG_precompiled_mode) {
__ LoadCompressed(CODE_REG,
FieldAddress(T0, target::Function::code_offset()));
__ LoadCompressed(
CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
}
__ jr(A1); // T0: Function, argument to lazy compile stub.
@ -3290,12 +3300,13 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Input:
// A0 - receiver
// S5 - icdata
// IC_DATA_REG - icdata
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
__ lx(T1, FieldAddress(S5, target::ICData::entries_offset()));
__ lx(S4,
FieldAddress(S5, target::CallSiteData::arguments_descriptor_offset()));
__ lx(T1, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ lx(ARGS_DESC_REG,
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
__ AddImmediate(T1, target::Array::data_offset() - kHeapObjectTag);
// T1: first IC entry
__ LoadTaggedClassIdMayBeSmi(A1, A0);

View file

@ -794,15 +794,15 @@ void StubCodeCompiler::GenerateCallBootstrapNativeStub(Assembler* assembler) {
}
// Input parameters:
// R10: arguments descriptor array.
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushq(R10); // Preserve arguments descriptor array.
__ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
// Setup space on stack for return value.
__ pushq(Immediate(0));
__ CallRuntime(kPatchStaticCallRuntimeEntry, 0);
__ popq(CODE_REG); // Get Code object result.
__ popq(R10); // Restore arguments descriptor array.
__ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
// Remove the stub frame as we are about to jump to the dart function.
__ LeaveStubFrame();
@ -812,7 +812,7 @@ void StubCodeCompiler::GenerateCallStaticFunctionStub(Assembler* assembler) {
// Called from a static call only when an invalid code has been entered
// (invalid because its function was optimized or deoptimized).
// R10: arguments descriptor array.
// ARGS_DESC_REG: arguments descriptor array.
void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
Label monomorphic;
__ BranchOnMonomorphicCheckedEntryJIT(&monomorphic);
@ -824,12 +824,12 @@ void StubCodeCompiler::GenerateFixCallersTargetStub(Assembler* assembler) {
__ movq(CODE_REG,
Address(THR, target::Thread::fix_callers_target_code_offset()));
__ EnterStubFrame();
__ pushq(R10); // Preserve arguments descriptor array.
__ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
// Setup space on stack for return value.
__ pushq(Immediate(0));
__ CallRuntime(kFixCallersTargetRuntimeEntry, 0);
__ popq(CODE_REG); // Get Code object.
__ popq(R10); // Restore arguments descriptor array.
__ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
__ movq(RAX, FieldAddress(CODE_REG, target::Code::entry_point_offset()));
__ LeaveStubFrame();
__ jmp(RAX);
@ -1112,18 +1112,18 @@ void StubCodeCompiler::GenerateDeoptimizeStub(Assembler* assembler) {
}
// Input:
// RBX - icdata/megamorphic_cache
// IC_DATA_REG - icdata/megamorphic_cache
// RDI - arguments descriptor size
static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler,
Register receiver_reg) {
__ pushq(Immediate(0)); // Setup space on stack for result.
__ pushq(receiver_reg); // Receiver.
__ pushq(RBX); // ICData/MegamorphicCache.
__ pushq(R10); // Arguments descriptor array.
__ pushq(IC_DATA_REG); // ICData/MegamorphicCache.
__ pushq(ARGS_DESC_REG); // Arguments descriptor array.
// Adjust arguments count.
__ OBJ(cmp)(
FieldAddress(R10, target::ArgumentsDescriptor::type_args_len_offset()),
__ OBJ(cmp)(FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::type_args_len_offset()),
Immediate(0));
__ OBJ(mov)(R10, RDI);
Label args_count_ok;
@ -1143,8 +1143,8 @@ static void GenerateNoSuchMethodDispatcherBody(Assembler* assembler,
}
// Input:
// RBX - icdata/megamorphic_cache
// R10 - argument descriptor
// IC_DATA_REG - icdata/megamorphic_cache
// ARGS_DESC_REG - argument descriptor
static void GenerateDispatcherCode(Assembler* assembler,
Label* call_target_function) {
__ Comment("NoSuchMethodDispatch");
@ -1155,8 +1155,8 @@ static void GenerateDispatcherCode(Assembler* assembler,
__ EnterStubFrame();
// Load the receiver.
__ OBJ(mov)(RDI,
FieldAddress(R10, target::ArgumentsDescriptor::size_offset()));
__ OBJ(mov)(RDI, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::size_offset()));
__ movq(RAX,
Address(RBP, RDI, TIMES_HALF_WORD_SIZE,
target::frame_layout.param_end_from_fp * target::kWordSize));
@ -1165,16 +1165,17 @@ static void GenerateDispatcherCode(Assembler* assembler,
}
// Input:
// RBX - icdata/megamorphic_cache
// IC_DATA_REG - icdata/megamorphic_cache
// RDX - receiver
void StubCodeCompiler::GenerateNoSuchMethodDispatcherStub(
Assembler* assembler) {
__ EnterStubFrame();
__ movq(R10, FieldAddress(
RBX, target::CallSiteData::arguments_descriptor_offset()));
__ OBJ(mov)(RDI,
FieldAddress(R10, target::ArgumentsDescriptor::size_offset()));
__ movq(ARGS_DESC_REG,
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
__ OBJ(mov)(RDI, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::size_offset()));
GenerateNoSuchMethodDispatcherBody(assembler, /*receiver_reg=*/RDX);
}
@ -2221,7 +2222,7 @@ void StubCodeCompiler::GenerateOptimizedUsageCounterIncrement(
__ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
// Loads function into 'temp_reg', preserves 'ic_reg'.
// Loads function into 'temp_reg', preserves IC_DATA_REG.
void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
Register temp_reg) {
if (FLAG_precompiled_mode) {
@ -2229,11 +2230,11 @@ void StubCodeCompiler::GenerateUsageCounterIncrement(Assembler* assembler,
return;
}
if (FLAG_optimization_counter_threshold >= 0) {
Register ic_reg = RBX;
Register func_reg = temp_reg;
ASSERT(ic_reg != func_reg);
ASSERT(func_reg != IC_DATA_REG);
__ Comment("Increment function counter");
__ movq(func_reg, FieldAddress(ic_reg, target::ICData::owner_offset()));
__ movq(func_reg,
FieldAddress(IC_DATA_REG, target::ICData::owner_offset()));
__ incl(FieldAddress(func_reg, target::Function::usage_counter_offset()));
}
}
@ -2405,19 +2406,22 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
if (type == kInstanceCall) {
__ LoadTaggedClassIdMayBeSmi(RAX, RDX);
__ movq(R10, FieldAddress(
RBX, target::CallSiteData::arguments_descriptor_offset()));
__ movq(
ARGS_DESC_REG,
FieldAddress(RBX, target::CallSiteData::arguments_descriptor_offset()));
if (num_args == 2) {
__ OBJ(mov)(
RCX, FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
__ OBJ(mov)(RCX,
FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::count_offset()));
__ movq(R9, Address(RSP, RCX, TIMES_4, -target::kWordSize));
__ LoadTaggedClassIdMayBeSmi(RCX, R9);
}
} else {
__ movq(R10, FieldAddress(
RBX, target::CallSiteData::arguments_descriptor_offset()));
__ OBJ(mov)(RCX,
FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
__ movq(
ARGS_DESC_REG,
FieldAddress(RBX, target::CallSiteData::arguments_descriptor_offset()));
__ OBJ(mov)(RCX, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::count_offset()));
__ movq(RDX, Address(RSP, RCX, TIMES_4, 0));
__ LoadTaggedClassIdMayBeSmi(RAX, RDX);
if (num_args == 2) {
@ -2474,15 +2478,15 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ Comment("IC miss");
// Compute address of arguments (first read number of arguments from
// arguments descriptor array and then compute address on the stack).
__ OBJ(mov)(RAX,
FieldAddress(R10, target::ArgumentsDescriptor::count_offset()));
__ OBJ(mov)(RAX, FieldAddress(ARGS_DESC_REG,
target::ArgumentsDescriptor::count_offset()));
__ leaq(RAX, Address(RSP, RAX, TIMES_4, 0)); // RAX is Smi.
__ EnterStubFrame();
if (save_entry_point) {
__ SmiTag(R8); // Entry-point offset is not Smi.
__ pushq(R8); // Preserve entry point.
}
__ pushq(R10); // Preserve arguments descriptor array.
__ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushq(RBX); // Preserve IC data object.
__ pushq(Immediate(0)); // Result slot.
// Push call arguments.
@ -2496,9 +2500,9 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
for (intptr_t i = 0; i < num_args + 1; i++) {
__ popq(RAX);
}
__ popq(RAX); // Pop returned function object into RAX.
__ popq(FUNCTION_REG); // Pop returned function object into RAX.
__ popq(RBX); // Restore IC data array.
__ popq(R10); // Restore arguments descriptor array.
__ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
if (save_entry_point) {
__ popq(R8); // Restore entry point.
__ SmiUntag(R8); // Entry-point offset is not Smi.
@ -2547,7 +2551,7 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
StaticTypeExactnessState::NotExact().Encode())));
__ Bind(&exactness_ok);
}
__ LoadCompressed(RAX, Address(R13, target_offset));
__ LoadCompressed(FUNCTION_REG, Address(R13, target_offset));
if (FLAG_optimization_counter_threshold >= 0) {
__ Comment("Update ICData counter");
@ -2558,13 +2562,13 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ Comment("Call target (via specified entry point)");
__ Bind(&call_target_function);
// RAX: Target function.
__ LoadCompressed(CODE_REG,
FieldAddress(RAX, target::Function::code_offset()));
__ LoadCompressed(
CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
if (save_entry_point) {
__ addq(R8, RAX);
__ jmp(Address(R8, 0));
} else {
__ jmp(FieldAddress(RAX, target::Function::entry_point_offset()));
__ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
}
if (exactness == kCheckExactness) {
@ -2575,11 +2579,11 @@ void StubCodeCompiler::GenerateNArgsCheckInlineCacheStub(
__ addq(Address(R13, count_offset), Immediate(target::ToRawSmi(1)));
}
__ Comment("Call target (via unchecked entry point)");
__ LoadCompressed(RAX, Address(R13, target_offset));
__ LoadCompressed(CODE_REG,
FieldAddress(RAX, target::Function::code_offset()));
__ jmp(FieldAddress(
RAX, target::Function::entry_point_offset(CodeEntryKind::kUnchecked)));
__ LoadCompressed(FUNCTION_REG, Address(R13, target_offset));
__ LoadCompressed(
CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ jmp(FieldAddress(FUNCTION_REG, target::Function::entry_point_offset(
CodeEntryKind::kUnchecked)));
}
#if !defined(PRODUCT)
@ -2753,15 +2757,16 @@ void StubCodeCompiler::GenerateZeroArgsUnoptimizedStaticCallStub(
}
// Load arguments descriptor into R10.
__ movq(R10, FieldAddress(
RBX, target::CallSiteData::arguments_descriptor_offset()));
__ movq(
ARGS_DESC_REG,
FieldAddress(RBX, target::CallSiteData::arguments_descriptor_offset()));
// Get function and call it, if possible.
__ LoadCompressed(RAX, Address(R12, target_offset));
__ LoadCompressed(CODE_REG,
FieldAddress(RAX, target::Function::code_offset()));
__ LoadCompressed(FUNCTION_REG, Address(R12, target_offset));
__ LoadCompressed(
CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ addq(R8, RAX);
__ addq(R8, FUNCTION_REG);
__ jmp(Address(R8, 0));
#if !defined(PRODUCT)
@ -2799,20 +2804,21 @@ void StubCodeCompiler::GenerateTwoArgsUnoptimizedStaticCallStub(
}
// Stub for compiling a function and jumping to the compiled code.
// R10: Arguments descriptor.
// RAX: Function.
// ARGS_DESC_REG: Arguments descriptor.
// FUNCTION_REG: Function.
void StubCodeCompiler::GenerateLazyCompileStub(Assembler* assembler) {
__ EnterStubFrame();
__ pushq(R10); // Preserve arguments descriptor array.
__ pushq(RAX); // Pass function.
__ pushq(ARGS_DESC_REG); // Preserve arguments descriptor array.
__ pushq(FUNCTION_REG); // Pass function.
__ CallRuntime(kCompileFunctionRuntimeEntry, 1);
__ popq(RAX); // Restore function.
__ popq(R10); // Restore arguments descriptor array.
__ popq(FUNCTION_REG); // Restore function.
__ popq(ARGS_DESC_REG); // Restore arguments descriptor array.
__ LeaveStubFrame();
__ LoadCompressed(CODE_REG,
FieldAddress(RAX, target::Function::code_offset()));
__ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
__ LoadCompressed(
CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ movq(RCX,
FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ jmp(RCX);
}
@ -3221,21 +3227,22 @@ void StubCodeCompiler::GenerateDeoptForRewindStub(Assembler* assembler) {
// Calls to the runtime to optimize the given function.
// RDI: function to be reoptimized.
// R10: argument descriptor (preserved).
// ARGS_DESC_REG: argument descriptor (preserved).
void StubCodeCompiler::GenerateOptimizeFunctionStub(Assembler* assembler) {
__ movq(CODE_REG, Address(THR, target::Thread::optimize_stub_offset()));
__ EnterStubFrame();
__ pushq(R10); // Preserve args descriptor.
__ pushq(ARGS_DESC_REG); // Preserve args descriptor.
__ pushq(Immediate(0)); // Result slot.
__ pushq(RDI); // Arg0: function to optimize
__ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1);
__ popq(RAX); // Discard argument.
__ popq(RAX); // Get Code object.
__ popq(R10); // Restore argument descriptor.
__ popq(FUNCTION_REG); // Get Function object.
__ popq(ARGS_DESC_REG); // Restore argument descriptor.
__ LeaveStubFrame();
__ LoadCompressed(CODE_REG,
FieldAddress(RAX, target::Function::code_offset()));
__ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
__ LoadCompressed(
CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
__ movq(RCX,
FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
__ jmp(RCX);
__ int3();
}
@ -3333,10 +3340,11 @@ void StubCodeCompiler::GenerateOptimizedIdenticalWithNumberCheckStub(
// Called from megamorphic calls.
// RDX: receiver (passed to target)
// RBX: target::MegamorphicCache (preserved)
// IC_DATA_REG: target::MegamorphicCache (preserved)
// Passed to target:
// FUNCTION_REG: target function
// CODE_REG: target Code
// R10: arguments descriptor
// ARGS_DESC_REG: arguments descriptor
void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// Jump if receiver is a smi.
Label smi_case;
@ -3349,8 +3357,10 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
Label cid_loaded;
__ Bind(&cid_loaded);
__ movq(R9, FieldAddress(RBX, target::MegamorphicCache::mask_offset()));
__ movq(RDI, FieldAddress(RBX, target::MegamorphicCache::buckets_offset()));
__ movq(R9,
FieldAddress(IC_DATA_REG, target::MegamorphicCache::mask_offset()));
__ movq(RDI, FieldAddress(IC_DATA_REG,
target::MegamorphicCache::buckets_offset()));
// R9: mask as a smi.
// RDI: cache buckets array.
@ -3379,14 +3389,17 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
// proper target for the given name and arguments descriptor. If the
// illegal class id was found, the target is a cache miss handler that can
// be invoked as a normal Dart function.
__ LoadCompressed(RAX, FieldAddress(RDI, RCX, TIMES_COMPRESSED_WORD_SIZE,
__ LoadCompressed(FUNCTION_REG,
FieldAddress(RDI, RCX, TIMES_COMPRESSED_WORD_SIZE,
base + target::kCompressedWordSize));
__ movq(R10, FieldAddress(
RBX, target::CallSiteData::arguments_descriptor_offset()));
__ movq(RCX, FieldAddress(RAX, target::Function::entry_point_offset()));
__ movq(ARGS_DESC_REG,
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
__ movq(RCX,
FieldAddress(FUNCTION_REG, target::Function::entry_point_offset()));
if (!FLAG_precompiled_mode) {
__ LoadCompressed(CODE_REG,
FieldAddress(RAX, target::Function::code_offset()));
__ LoadCompressed(
CODE_REG, FieldAddress(FUNCTION_REG, target::Function::code_offset()));
}
__ jmp(RCX);
@ -3411,13 +3424,14 @@ void StubCodeCompiler::GenerateMegamorphicCallStub(Assembler* assembler) {
}
// Input:
// RBX - icdata
// IC_DATA_REG - icdata
// RDX - receiver object
void StubCodeCompiler::GenerateICCallThroughCodeStub(Assembler* assembler) {
Label loop, found, miss;
__ movq(R13, FieldAddress(RBX, target::ICData::entries_offset()));
__ movq(R10, FieldAddress(
RBX, target::CallSiteData::arguments_descriptor_offset()));
__ movq(R13, FieldAddress(IC_DATA_REG, target::ICData::entries_offset()));
__ movq(ARGS_DESC_REG,
FieldAddress(IC_DATA_REG,
target::CallSiteData::arguments_descriptor_offset()));
__ leaq(R13, FieldAddress(R13, target::Array::data_offset()));
// R13: first IC entry
__ LoadTaggedClassIdMayBeSmi(RAX, RDX);

View file

@ -316,8 +316,11 @@ const Register PP = R5; // Caches object pool pointer in generated code.
const Register DISPATCH_TABLE_REG = NOTFP; // Dispatch table register.
const Register SPREG = SP; // Stack pointer register.
const Register FPREG = FP; // Frame pointer register.
const Register IC_DATA_REG = R9; // ICData/MegamorphicCache register.
const Register ARGS_DESC_REG = R4;
const Register CODE_REG = R6;
// Set when calling Dart functions in JIT mode, used by LazyCompileStub.
const Register FUNCTION_REG = R0;
const Register THR = R10; // Caches current thread in generated code.
const Register CALLEE_SAVED_TEMP = R8;
@ -432,7 +435,6 @@ struct InitStaticFieldABI {
// Registers used inside the implementation of InitLateStaticFieldStub.
struct InitLateStaticFieldInternalRegs {
static const Register kFunctionReg = R0;
static const Register kAddressReg = R3;
static const Register kScratchReg = R4;
};
@ -446,7 +448,6 @@ struct InitInstanceFieldABI {
// Registers used inside the implementation of InitLateInstanceFieldStub.
struct InitLateInstanceFieldInternalRegs {
static const Register kFunctionReg = R0;
static const Register kAddressReg = R3;
static const Register kScratchReg = R4;
};

View file

@ -143,8 +143,11 @@ const Register TMP2 = R17;
const Register PP = R27; // Caches object pool pointer in generated code.
const Register DISPATCH_TABLE_REG = R21; // Dispatch table register.
const Register CODE_REG = R24;
// Set when calling Dart functions in JIT mode, used by LazyCompileStub.
const Register FUNCTION_REG = R0;
const Register FPREG = FP; // Frame pointer register.
const Register SPREG = R15; // Stack pointer register.
const Register IC_DATA_REG = R5; // ICData/MegamorphicCache register.
const Register ARGS_DESC_REG = R4; // Arguments descriptor register.
const Register THR = R26; // Caches current thread in generated code.
const Register CALLEE_SAVED_TEMP = R19;
@ -266,7 +269,6 @@ struct InitStaticFieldABI {
// Registers used inside the implementation of InitLateStaticFieldStub.
struct InitLateStaticFieldInternalRegs {
static const Register kFunctionReg = R0;
static const Register kAddressReg = R3;
static const Register kScratchReg = R4;
};
@ -280,7 +282,6 @@ struct InitInstanceFieldABI {
// Registers used inside the implementation of InitLateInstanceFieldStub.
struct InitLateInstanceFieldInternalRegs {
static const Register kFunctionReg = R0;
static const Register kAddressReg = R3;
static const Register kScratchReg = R4;
};

View file

@ -82,9 +82,12 @@ extern const char* const fpu_reg_names[kNumberOfXmmRegisters];
const Register TMP = kNoRegister; // No scratch register used by assembler.
const Register TMP2 = kNoRegister; // No second assembler scratch register.
const Register CODE_REG = EDI;
// Set when calling Dart functions in JIT mode, used by LazyCompileStub.
const Register FUNCTION_REG = EAX;
const Register PP = kNoRegister; // No object pool pointer.
const Register SPREG = ESP; // Stack pointer register.
const Register FPREG = EBP; // Frame pointer register.
const Register IC_DATA_REG = ECX; // ICData/MegamorphicCache register.
const Register ARGS_DESC_REG = EDX; // Arguments descriptor register.
const Register THR = ESI; // Caches current thread in generated code.
const Register CALLEE_SAVED_TEMP = EBX;
@ -166,7 +169,6 @@ struct InitStaticFieldABI {
// Registers used inside the implementation of InitLateStaticFieldStub.
struct InitLateStaticFieldInternalRegs {
static const Register kFunctionReg = EAX;
static const Register kAddressReg = ECX;
static const Register kScratchReg = EDI;
};
@ -180,7 +182,6 @@ struct InitInstanceFieldABI {
// Registers used inside the implementation of InitLateInstanceFieldStub.
struct InitLateInstanceFieldInternalRegs {
static const Register kFunctionReg = EAX;
static const Register kAddressReg = ECX;
static const Register kScratchReg = EDI;
};

View file

@ -153,6 +153,8 @@ constexpr Register FAR_TMP = S8;
constexpr Register PP = A5; // Caches object pool pointer in generated code.
constexpr Register DISPATCH_TABLE_REG = S9; // Dispatch table register.
constexpr Register CODE_REG = A2;
// Set when calling Dart functions in JIT mode, used by LazyCompileStub.
constexpr Register FUNCTION_REG = T0;
constexpr Register FPREG = FP; // Frame pointer register.
constexpr Register SPREG = SP; // Stack pointer register.
constexpr Register IC_DATA_REG = S5; // ICData/MegamorphicCache register.
@ -277,7 +279,6 @@ struct InitStaticFieldABI {
// Registers used inside the implementation of InitLateStaticFieldStub.
struct InitLateStaticFieldInternalRegs {
static const Register kFunctionReg = T0;
static const Register kAddressReg = T3;
static const Register kScratchReg = T4;
};
@ -291,8 +292,6 @@ struct InitInstanceFieldABI {
// Registers used inside the implementation of InitLateInstanceFieldStub.
struct InitLateInstanceFieldInternalRegs {
static constexpr Register kFunctionReg =
T0; // Must agreee with lazy compile stub.
static constexpr Register kAddressReg = T3;
static constexpr Register kScratchReg = T4;
};

View file

@ -120,8 +120,11 @@ const Register TMP2 = kNoRegister; // No second assembler scratch register.
const Register PP = R15;
const Register SPREG = RSP; // Stack pointer register.
const Register FPREG = RBP; // Frame pointer register.
const Register IC_DATA_REG = RBX; // ICData/MegamorphicCache register.
const Register ARGS_DESC_REG = R10; // Arguments descriptor register.
const Register CODE_REG = R12;
// Set when calling Dart functions in JIT mode, used by LazyCompileStub.
const Register FUNCTION_REG = RAX;
const Register THR = R14; // Caches current thread in generated code.
const Register CALLEE_SAVED_TEMP = RBX;
@ -235,7 +238,6 @@ struct InitStaticFieldABI {
// Registers used inside the implementation of InitLateStaticFieldStub.
struct InitLateStaticFieldInternalRegs {
static const Register kFunctionReg = RAX;
static const Register kAddressReg = RCX;
static const Register kScratchReg = RSI;
};
@ -249,7 +251,6 @@ struct InitInstanceFieldABI {
// Registers used inside the implementation of InitLateInstanceFieldStub.
struct InitLateInstanceFieldInternalRegs {
static const Register kFunctionReg = RAX;
static const Register kAddressReg = RCX;
static const Register kScratchReg = RSI;
};