[vm/ffi] Replicate transition to/from native code around FFI calls.

Change-Id: I49be874b47b63a0863ed58d26f417cb957b89380
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/98463
Commit-Queue: Samir Jindel <sjindel@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Samir Jindel 2019-04-11 11:59:54 +00:00 committed by commit-bot@chromium.org
parent 460d00616c
commit a7a87df513
36 changed files with 678 additions and 143 deletions

View file

@ -8,6 +8,7 @@
#include <stdlib.h>
#include <sys/types.h>
#include "platform/assert.h"
#include "platform/globals.h"
#if defined(HOST_OS_WINDOWS)
#include <psapi.h>
@ -469,6 +470,36 @@ DART_EXPORT void* LargePointer() {
return *reinterpret_cast<void**>(&origin);
}
// Allocates 'count'-many Mint boxes, to stress-test GC during an FFI call.
DART_EXPORT void AllocateMints(uint64_t count) {
Dart_EnterScope();
for (uint64_t i = 0; i < count; ++i) {
Dart_NewInteger(0x8000000000000001);
}
Dart_ExitScope();
}
// Calls a Dart function to allocate 'count' objects.
// Used for stress-testing GC when re-entering the API.
DART_EXPORT void AllocateThroughDart(uint64_t count) {
Dart_EnterScope();
Dart_Handle root = Dart_RootLibrary();
Dart_Handle arguments[1] = {Dart_NewIntegerFromUint64(count)};
Dart_Handle result = Dart_Invoke(
root, Dart_NewStringFromCString("testAllocationsInDartHelper"), 1,
arguments);
const char* error;
if (Dart_IsError(result)) {
Dart_StringToCString(Dart_ToString(result), &error);
fprintf(stderr, "Could not call 'testAllocationsInDartHelper': %s\n",
error);
Dart_DumpNativeStackTrace(nullptr);
Dart_PrepareToAbort();
abort();
}
Dart_ExitScope();
}
#if !defined(_WIN32)
DART_EXPORT int RedirectStderr() {
char filename[256];

View file

@ -3372,4 +3372,10 @@ DART_EXPORT bool Dart_IsPrecompiledRuntime();
*/
DART_EXPORT void Dart_DumpNativeStackTrace(void* context);
/**
* Indicate that the process is about to abort, and the Dart VM should not
* attempt to cleanup resources.
*/
DART_EXPORT void Dart_PrepareToAbort();
#endif /* INCLUDE_DART_API_H_ */ /* NOLINT */

View file

@ -547,6 +547,90 @@ void Assembler::strex(Register rd, Register rt, Register rn, Condition cond) {
Emit(encoding);
}
void Assembler::TransitionGeneratedToNative(Register destination_address,
Register addr,
Register state) {
// Save exit frame information to enable stack walking.
StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
// Mark that the thread is executing native code.
StoreToOffset(kWord, destination_address, THR, Thread::vm_tag_offset());
LoadImmediate(state, compiler::target::Thread::native_execution_state());
StoreToOffset(kWord, state, THR, Thread::execution_state_offset());
if (TargetCPUFeatures::arm_version() == ARMv5TE) {
EnterSafepointSlowly();
} else {
Label slow_path, done, retry;
LoadImmediate(addr, compiler::target::Thread::safepoint_state_offset());
add(addr, THR, Operand(addr));
Bind(&retry);
ldrex(state, addr);
cmp(state, Operand(Thread::safepoint_state_unacquired()));
b(&slow_path, NE);
mov(state, Operand(Thread::safepoint_state_acquired()));
strex(TMP, state, addr);
cmp(TMP, Operand(0)); // 0 means strex was successful.
b(&done, EQ);
b(&retry);
Bind(&slow_path);
EnterSafepointSlowly();
Bind(&done);
}
}
void Assembler::EnterSafepointSlowly() {
ldr(TMP,
Address(THR, compiler::target::Thread::enter_safepoint_stub_offset()));
ldr(TMP, FieldAddress(TMP, compiler::target::Code::entry_point_offset()));
blx(TMP);
}
void Assembler::TransitionNativeToGenerated(Register addr, Register state) {
if (TargetCPUFeatures::arm_version() == ARMv5TE) {
ExitSafepointSlowly();
} else {
Label slow_path, done, retry;
LoadImmediate(addr, compiler::target::Thread::safepoint_state_offset());
add(addr, THR, Operand(addr));
Bind(&retry);
ldrex(state, addr);
cmp(state, Operand(Thread::safepoint_state_acquired()));
b(&slow_path, NE);
mov(state, Operand(Thread::safepoint_state_unacquired()));
strex(TMP, state, addr);
cmp(TMP, Operand(0)); // 0 means strex was successful.
b(&done, EQ);
b(&retry);
Bind(&slow_path);
ExitSafepointSlowly();
Bind(&done);
}
// Mark that the thread is executing Dart code.
LoadImmediate(state, compiler::target::Thread::vm_tag_compiled_id());
StoreToOffset(kWord, state, THR, Thread::vm_tag_offset());
LoadImmediate(state, compiler::target::Thread::generated_execution_state());
StoreToOffset(kWord, state, THR, Thread::execution_state_offset());
// Reset exit frame information in Isolate structure.
LoadImmediate(state, 0);
StoreToOffset(kWord, state, THR, Thread::top_exit_frame_info_offset());
}
void Assembler::ExitSafepointSlowly() {
ldr(TMP,
Address(THR, compiler::target::Thread::exit_safepoint_stub_offset()));
ldr(TMP, FieldAddress(TMP, compiler::target::Code::entry_point_offset()));
blx(TMP);
}
void Assembler::clrex() {
ASSERT(TargetCPUFeatures::arm_version() != ARMv5TE);
int32_t encoding = (kSpecialCondition << kConditionShift) | B26 | B24 | B22 |

View file

@ -510,6 +510,13 @@ class Assembler : public AssemblerBase {
void ldrex(Register rd, Register rn, Condition cond = AL);
void strex(Register rd, Register rt, Register rn, Condition cond = AL);
// Requires two temporary registers 'scratch0' and 'scratch1' (in addition to
// TMP).
void TransitionGeneratedToNative(Register destination_address,
Register scratch0,
Register scratch1);
void TransitionNativeToGenerated(Register scratch0, Register scratch1);
// Miscellaneous instructions.
void clrex();
void nop(Condition cond = AL);
@ -1291,6 +1298,9 @@ class Assembler : public AssemblerBase {
CanBeSmi can_be_smi,
BarrierFilterMode barrier_filter_mode);
void EnterSafepointSlowly();
void ExitSafepointSlowly();
friend class dart::FlowGraphCompiler;
std::function<void(Condition, Register)>
generate_invoke_write_barrier_wrapper_;

View file

@ -1305,6 +1305,76 @@ void Assembler::LeaveDartFrame(RestorePP restore_pp) {
LeaveFrame();
}
void Assembler::TransitionGeneratedToNative(Register destination,
Register state) {
Register addr = TMP2;
// Save exit frame information to enable stack walking.
StoreToOffset(FPREG, THR,
compiler::target::Thread::top_exit_frame_info_offset());
// Mark that the thread is executing native code.
StoreToOffset(destination, THR, compiler::target::Thread::vm_tag_offset());
LoadImmediate(state, Thread::native_execution_state());
StoreToOffset(state, THR, compiler::target::Thread::execution_state_offset());
Label slow_path, done, retry;
movz(addr, Immediate(compiler::target::Thread::safepoint_state_offset()), 0);
add(addr, THR, Operand(addr));
Bind(&retry);
ldxr(state, addr);
cmp(state, Operand(Thread::safepoint_state_unacquired()));
b(&slow_path, NE);
movz(state, Immediate(Thread::safepoint_state_acquired()), 0);
stxr(TMP, state, addr);
cbz(&done, TMP); // 0 means stxr was successful.
b(&retry);
Bind(&slow_path);
ldr(addr,
Address(THR, compiler::target::Thread::enter_safepoint_stub_offset()));
ldr(addr, FieldAddress(addr, compiler::target::Code::entry_point_offset()));
blr(addr);
Bind(&done);
}
void Assembler::TransitionNativeToGenerated(Register state) {
Register addr = TMP2;
Label slow_path, done, retry;
movz(addr, Immediate(compiler::target::Thread::safepoint_state_offset()), 0);
add(addr, THR, Operand(addr));
Bind(&retry);
ldxr(state, addr);
cmp(state, Operand(Thread::safepoint_state_acquired()));
b(&slow_path, NE);
movz(state, Immediate(Thread::safepoint_state_unacquired()), 0);
stxr(TMP, state, addr);
cbz(&done, TMP); // 0 means stxr was successful.
b(&retry);
Bind(&slow_path);
ldr(addr,
Address(THR, compiler::target::Thread::exit_safepoint_stub_offset()));
ldr(addr, FieldAddress(TMP, compiler::target::Code::entry_point_offset()));
blr(addr);
Bind(&done);
// Mark that the thread is executing Dart code.
LoadImmediate(state, compiler::target::Thread::vm_tag_compiled_id());
StoreToOffset(state, THR, compiler::target::Thread::vm_tag_offset());
LoadImmediate(state, compiler::target::Thread::generated_execution_state());
StoreToOffset(state, THR, compiler::target::Thread::execution_state_offset());
// Reset exit frame information in Isolate structure.
StoreToOffset(ZR, THR,
compiler::target::Thread::top_exit_frame_info_offset());
}
void Assembler::EnterCallRuntimeFrame(intptr_t frame_size) {
Comment("EnterCallRuntimeFrame");
EnterStubFrame();

View file

@ -1261,11 +1261,9 @@ class Assembler : public AssemblerBase {
ldr(reg, Address(SP, 1 * target::kWordSize, Address::PostIndex));
}
void PushPair(Register low, Register high) {
ASSERT((low != PP) && (high != PP));
stp(low, high, Address(SP, -2 * target::kWordSize, Address::PairPreIndex));
}
void PopPair(Register low, Register high) {
ASSERT((low != PP) && (high != PP));
ldp(low, high, Address(SP, 2 * target::kWordSize, Address::PairPostIndex));
}
void PushFloat(VRegister reg) {
@ -1529,6 +1527,12 @@ class Assembler : public AssemblerBase {
void LeaveFrame();
void Ret() { ret(LR); }
// These require that CSP and SP are equal and aligned.
// These require a scratch register (in addition to TMP/TMP2).
void TransitionGeneratedToNative(Register destination_address,
Register scratch);
void TransitionNativeToGenerated(Register scratch);
void CheckCodePointer();
void RestoreCodePointer();

View file

@ -2078,6 +2078,72 @@ void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
}
}
void Assembler::TransitionGeneratedToNative(Register destination_address,
Register scratch) {
// Save exit frame information to enable stack walking.
movl(Address(THR, Thread::top_exit_frame_info_offset()), FPREG);
// Mark that the thread is executing native code.
movl(VMTagAddress(), destination_address);
movl(Address(THR, Thread::execution_state_offset()),
Immediate(compiler::target::Thread::native_execution_state()));
// Compare and swap the value at Thread::safepoint_state from unacquired to
// acquired. On success, jump to 'success'; otherwise, fallthrough.
pushl(EAX);
movl(EAX, Immediate(Thread::safepoint_state_unacquired()));
movl(scratch, Immediate(Thread::safepoint_state_acquired()));
LockCmpxchgl(Address(THR, Thread::safepoint_state_offset()), scratch);
movl(scratch, EAX);
popl(EAX);
cmpl(scratch, Immediate(Thread::safepoint_state_unacquired()));
Label done;
j(EQUAL, &done);
movl(scratch,
Address(THR, compiler::target::Thread::enter_safepoint_stub_offset()));
movl(scratch,
FieldAddress(scratch, compiler::target::Code::entry_point_offset()));
call(scratch);
Bind(&done);
}
void Assembler::TransitionNativeToGenerated(Register scratch) {
// Compare and swap the value at Thread::safepoint_state from acquired to
// unacquired. On success, jump to 'success'; otherwise, fallthrough.
pushl(EAX);
movl(EAX, Immediate(compiler::target::Thread::safepoint_state_acquired()));
movl(scratch,
Immediate(compiler::target::Thread::safepoint_state_unacquired()));
LockCmpxchgl(Address(THR, compiler::target::Thread::safepoint_state_offset()),
scratch);
movl(scratch, EAX);
popl(EAX);
cmpl(scratch, Immediate(Thread::safepoint_state_acquired()));
Label done;
j(EQUAL, &done);
movl(scratch,
Address(THR, compiler::target::Thread::exit_safepoint_stub_offset()));
movl(scratch,
FieldAddress(scratch, compiler::target::Code::entry_point_offset()));
call(scratch);
Bind(&done);
// Mark that the thread is executing Dart code.
movl(Assembler::VMTagAddress(),
Immediate(compiler::target::Thread::vm_tag_compiled_id()));
movl(Address(THR, Thread::execution_state_offset()),
Immediate(compiler::target::Thread::generated_execution_state()));
// Reset exit frame information in Isolate structure.
movl(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
}
static const intptr_t kNumberOfVolatileCpuRegisters = 3;
static const Register volatile_cpu_registers[kNumberOfVolatileCpuRegisters] = {
EAX, ECX, EDX};

View file

@ -645,6 +645,12 @@ class Assembler : public AssemblerBase {
void LeaveFrame();
void ReserveAlignedFrameSpace(intptr_t frame_space);
// Require a temporary register 'tmp'.
// Clobber all non-CPU registers (e.g. XMM registers and the "FPU stack").
void TransitionGeneratedToNative(Register destination_address,
Register scratch);
void TransitionNativeToGenerated(Register scratch);
// Create a frame for calling into runtime that preserves all volatile
// registers. Frame's RSP is guaranteed to be correctly aligned and
// frame_space bytes are reserved under it.

View file

@ -164,6 +164,63 @@ void Assembler::setcc(Condition condition, ByteRegister dst) {
EmitUint8(0xC0 + (dst & 0x07));
}
void Assembler::TransitionGeneratedToNative(Register destination_address) {
// Save exit frame information to enable stack walking.
movq(Address(THR, Thread::top_exit_frame_info_offset()), FPREG);
movq(Assembler::VMTagAddress(), destination_address);
movq(Address(THR, compiler::target::Thread::execution_state_offset()),
Immediate(compiler::target::Thread::native_execution_state()));
// Compare and swap the value at Thread::safepoint_state from unacquired to
// acquired. If the CAS fails, go to a slow-path stub.
Label done;
pushq(RAX);
movq(RAX, Immediate(Thread::safepoint_state_unacquired()));
movq(TMP, Immediate(Thread::safepoint_state_acquired()));
LockCmpxchgq(Address(THR, Thread::safepoint_state_offset()), TMP);
movq(TMP, RAX);
popq(RAX);
cmpq(TMP, Immediate(Thread::safepoint_state_unacquired()));
j(EQUAL, &done);
movq(TMP,
Address(THR, compiler::target::Thread::enter_safepoint_stub_offset()));
movq(TMP, FieldAddress(TMP, compiler::target::Code::entry_point_offset()));
CallCFunction(TMP);
Bind(&done);
}
void Assembler::TransitionNativeToGenerated() {
// Compare and swap the value at Thread::safepoint_state from acquired to
// unacquired. On success, jump to 'success'; otherwise, fallthrough.
Label done;
pushq(RAX);
movq(RAX, Immediate(Thread::safepoint_state_acquired()));
movq(TMP, Immediate(Thread::safepoint_state_unacquired()));
LockCmpxchgq(Address(THR, Thread::safepoint_state_offset()), TMP);
movq(TMP, RAX);
popq(RAX);
cmpq(TMP, Immediate(Thread::safepoint_state_acquired()));
j(EQUAL, &done);
movq(TMP,
Address(THR, compiler::target::Thread::exit_safepoint_stub_offset()));
movq(TMP, FieldAddress(TMP, compiler::target::Code::entry_point_offset()));
CallCFunction(TMP);
Bind(&done);
movq(Assembler::VMTagAddress(),
Immediate(compiler::target::Thread::vm_tag_compiled_id()));
movq(Address(THR, Thread::execution_state_offset()),
Immediate(compiler::target::Thread::generated_execution_state()));
// Reset exit frame information in Isolate structure.
movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
}
void Assembler::EmitQ(int reg,
const Address& address,
int opcode,

View file

@ -306,6 +306,9 @@ class Assembler : public AssemblerBase {
void setcc(Condition condition, ByteRegister dst);
void TransitionGeneratedToNative(Register destination_address);
void TransitionNativeToGenerated();
// Register-register, register-address and address-register instructions.
#define RR(width, name, ...) \
void name(Register dst, Register src) { Emit##width(dst, src, __VA_ARGS__); }

View file

@ -5233,7 +5233,8 @@ LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
ASSERT(((1 << CallingConventions::kFirstCalleeSavedCpuReg) &
CallingConventions::kArgumentRegisters) == 0);
#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_IA32)
#if defined(TARGET_ARCH_ARM64) || defined(TARGET_ARCH_IA32) || \
defined(TARGET_ARCH_ARM)
constexpr intptr_t kNumTemps = 2;
#else
constexpr intptr_t kNumTemps = 1;
@ -5248,7 +5249,8 @@ LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
CallingConventions::kFirstNonArgumentRegister));
summary->set_temp(0, Location::RegisterLocation(
CallingConventions::kSecondNonArgumentRegister));
#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM64)
#if defined(TARGET_ARCH_IA32) || defined(TARGET_ARCH_ARM64) || \
defined(TARGET_ARCH_ARM)
summary->set_temp(1, Location::RegisterLocation(
CallingConventions::kFirstCalleeSavedCpuReg));
#endif

View file

@ -998,10 +998,6 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ set_constant_pool_allowed(false);
__ EnterDartFrame(0, /*load_pool_pointer=*/false);
// Save exit frame information to enable stack walking as we are about
// to transition to Dart VM C++ code.
__ StoreToOffset(kWord, FP, THR, Thread::top_exit_frame_info_offset());
// Reserve space for arguments and align frame before entering C++ world.
__ ReserveAlignedFrameSpace(compiler::ffi::NumStackSlots(arg_locations_) *
kWordSize);
@ -1047,35 +1043,24 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
// Mark that the thread is executing VM code.
__ StoreToOffset(kWord, R9, THR, Thread::vm_tag_offset());
// We need to copy the return address up into the dummy stack frame so the
// stack walker will know which safepoint to use.
constexpr intptr_t kCallSequenceLength = 16;
const intptr_t call_sequence_start = __ CodeSize();
__ mov(TMP, Operand(PC));
__ str(TMP, Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize));
// For historical reasons, the PC on ARM points 8 bytes past the current
// instruction.
__ add(TMP, TMP, Operand(kCallSequenceLength - 8));
__ str(TMP, Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize));
__ blx(branch);
ASSERT(__ CodeSize() - call_sequence_start == kCallSequenceLength);
// instruction. Therefore we emit the metadata here, 8 bytes (2 instructions)
// after the original mov.
compiler->EmitCallsiteMetadata(TokenPosition::kNoSource, DeoptId::kNone,
RawPcDescriptors::Kind::kOther, locs());
// Mark that the thread is executing Dart code.
__ LoadImmediate(R2, VMTag::kDartCompiledTagId);
__ StoreToOffset(kWord, R2, THR, Thread::vm_tag_offset());
// Update information in the thread object and enter a safepoint.
__ TransitionGeneratedToNative(branch, saved_fp, locs()->temp(1).reg());
// Reset exit frame information in Isolate structure.
__ LoadImmediate(R2, 0);
__ StoreToOffset(kWord, R2, THR, Thread::top_exit_frame_info_offset());
__ blx(branch);
// Update information in the thread object and leave the safepoint.
__ TransitionNativeToGenerated(saved_fp, locs()->temp(1).reg());
// Restore the global object pool after returning from runtime (old space is
// moving, so the GOP could have been relocated).

View file

@ -887,10 +887,8 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ set_constant_pool_allowed(false);
__ EnterDartFrame(0, PP);
// Save exit frame information to enable stack walking as we are about
// to transition to Dart VM C++ code.
__ StoreToOffset(FPREG, THR,
compiler::target::Thread::top_exit_frame_info_offset());
// Save the stack limit address.
__ PushRegister(CSP);
// Make space for arguments and align the frame.
__ ReserveAlignedFrameSpace(compiler::ffi::NumStackSlots(arg_locations_) *
@ -916,43 +914,29 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
// Mark that the thread is executing VM code.
__ StoreToOffset(branch, THR, compiler::target::Thread::vm_tag_offset());
// We need to copy the return address up into the dummy stack frame so the
// We need to copy a dummy return address up into the dummy stack frame so the
// stack walker will know which safepoint to use.
const intptr_t call_sequence_start = __ CodeSize();
// 5 instructions, 4 bytes each.
constexpr intptr_t kCallSequenceLength = 5 * 4;
__ adr(temp, Immediate(kCallSequenceLength));
__ StoreToOffset(temp, FPREG, kSavedCallerPcSlotFromFp * kWordSize);
// We are entering runtime code, so the C stack pointer must be restored from
// the stack limit to the top of the stack. We cache the stack limit address
// in a callee-saved register.
__ mov(temp, CSP);
__ mov(CSP, SP);
__ blr(branch);
ASSERT(__ CodeSize() - call_sequence_start == kCallSequenceLength);
// Restore the Dart stack pointer and the saved C stack pointer.
__ mov(SP, CSP);
__ mov(CSP, temp);
__ adr(temp, Immediate(0));
compiler->EmitCallsiteMetadata(token_pos(), DeoptId::kNone,
RawPcDescriptors::Kind::kOther, locs());
// Mark that the thread is executing Dart code.
__ LoadImmediate(temp, VMTag::kDartCompiledTagId);
__ StoreToOffset(temp, THR, compiler::target::Thread::vm_tag_offset());
__ StoreToOffset(temp, FPREG, kSavedCallerPcSlotFromFp * kWordSize);
// Reset exit frame information in Isolate structure.
__ StoreToOffset(ZR, THR,
compiler::target::Thread::top_exit_frame_info_offset());
// We are entering runtime code, so the C stack pointer must be restored from
// the stack limit to the top of the stack.
__ mov(CSP, SP);
// Update information in the thread object and enter a safepoint.
__ TransitionGeneratedToNative(branch, temp);
__ blr(branch);
// Update information in the thread object and leave the safepoint.
__ TransitionNativeToGenerated(temp);
// Restore the Dart stack pointer and the saved C stack pointer.
__ mov(SP, CSP);
__ LoadFromOffset(CSP, FPREG, kFirstLocalSlotFromFp * kWordSize);
// Refresh write barrier mask.
__ ldr(BARRIER_MASK,

View file

@ -846,9 +846,9 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register saved_fp = locs()->temp(0).reg();
Register saved_fp = locs()->temp(0).reg(); // volatile
Register branch = locs()->in(TargetAddressIndex()).reg();
Register tmp = locs()->temp(1).reg();
Register tmp = locs()->temp(1).reg(); // callee-saved
// Save frame pointer because we're going to update it when we enter the exit
// frame.
@ -861,10 +861,6 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadObject(CODE_REG, Object::null_object());
__ EnterDartFrame(compiler::ffi::NumStackSlots(arg_locations_) * kWordSize);
// Save exit frame information to enable stack walking as we are about
// to transition to Dart VM C++ code.
__ movl(Address(THR, Thread::top_exit_frame_info_offset()), FPREG);
// Align frame before entering C++ world.
if (OS::ActivationFrameAlignment() > 1) {
__ andl(SPREG, Immediate(~(OS::ActivationFrameAlignment() - 1)));
@ -911,33 +907,34 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
// Mark that the thread is executing VM code.
__ movl(Assembler::VMTagAddress(), branch);
// We need to copy the return address up into the dummy stack frame so the
// We need to copy a dummy return address up into the dummy stack frame so the
// stack walker will know which safepoint to use. Unlike X64, there's no
// PC-relative 'leaq' available, so we have do a trick with 'call'.
constexpr intptr_t kCallSequenceLength = 6;
Label get_pc;
__ call(&get_pc);
__ Bind(&get_pc);
const intptr_t call_sequence_start = __ CodeSize();
__ popl(tmp);
__ movl(Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), tmp);
__ call(branch);
ASSERT(__ CodeSize() - call_sequence_start == kCallSequenceLength);
compiler->EmitCallsiteMetadata(TokenPosition::kNoSource, DeoptId::kNone,
RawPcDescriptors::Kind::kOther, locs());
__ Bind(&get_pc);
__ popl(tmp);
__ movl(Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), tmp);
__ TransitionGeneratedToNative(branch, tmp);
__ call(branch);
// The x86 calling convention requires floating point values to be returned on
// the "floating-point stack" (aka. register ST0). We don't use the
// floating-point stack in Dart, so we need to move the return value back into
// an XMM register.
if (representation() == kUnboxedDouble || representation() == kUnboxedFloat) {
__ subl(SPREG, Immediate(8));
__ fstpl(Address(SPREG, 0));
__ TransitionNativeToGenerated(tmp);
__ fldl(Address(SPREG, 0));
__ addl(SPREG, Immediate(8));
} else {
__ TransitionNativeToGenerated(tmp);
}
if (representation() == kUnboxedDouble) {
__ subl(SPREG, Immediate(8));
__ fstpl(Address(SPREG, 0));
@ -948,12 +945,6 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ movss(XMM0, Address(SPREG, 0));
}
// Mark that the thread is executing Dart code.
__ movl(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Reset exit frame information in Isolate structure.
__ movl(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
// Leave dummy exit frame.
__ LeaveFrame();

View file

@ -897,10 +897,6 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ EnterDartFrame(compiler::ffi::NumStackSlots(arg_locations_) * kWordSize,
PP);
// Save exit frame information to enable stack walking as we are about to
// transition to Dart VM C++ code.
__ movq(Address(THR, Thread::top_exit_frame_info_offset()), FPREG);
// Align frame before entering C++ world.
if (OS::ActivationFrameAlignment() > 1) {
__ andq(SPREG, Immediate(~(OS::ActivationFrameAlignment() - 1)));
@ -927,35 +923,22 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
// Mark that the thread is executing VM code.
__ movq(Assembler::VMTagAddress(), target_address);
// We need to copy the return address up into the dummy stack frame so the
// stack walker will know which safepoint to use.
#if defined(TARGET_OS_WINDOWS)
constexpr intptr_t kCallSequenceLength = 10;
#else
constexpr intptr_t kCallSequenceLength = 6;
#endif
// RIP points to the *next* instruction, so 'AddressRIPRelative' loads the
// address of the following 'movq'.
__ leaq(TMP, Address::AddressRIPRelative(kCallSequenceLength));
const intptr_t call_sequence_start = __ CodeSize();
__ movq(Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), TMP);
__ CallCFunction(target_address);
ASSERT(__ CodeSize() - call_sequence_start == kCallSequenceLength);
// We need to copy a dummy return address up into the dummy stack frame so the
// stack walker will know which safepoint to use. RIP points to the *next*
// instruction, so 'AddressRIPRelative' loads the address of the following
// 'movq'.
__ leaq(TMP, Address::AddressRIPRelative(0));
compiler->EmitCallsiteMetadata(TokenPosition::kNoSource, DeoptId::kNone,
RawPcDescriptors::Kind::kOther, locs());
__ movq(Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), TMP);
// Mark that the thread is executing Dart code.
__ movq(Assembler::VMTagAddress(), Immediate(VMTag::kDartCompiledTagId));
// Update information in the thread object and enter a safepoint.
__ TransitionGeneratedToNative(target_address);
// Reset exit frame information in Isolate structure.
__ movq(Address(THR, Thread::top_exit_frame_info_offset()), Immediate(0));
__ CallCFunction(target_address);
// Update information in the thread object and leave the safepoint.
__ TransitionNativeToGenerated();
// Although PP is a callee-saved register, it may have been moved by the GC.
__ LeaveDartFrame(compiler::kRestoreCallerPP);

View file

@ -548,6 +548,25 @@ class RegisterSet : public ValueObject {
}
}
// Adds all registers which don't have a special purpose (e.g. FP, SP, PC,
// CSP, etc.).
void AddAllGeneralRegisters() {
for (intptr_t i = kNumberOfCpuRegisters - 1; i >= 0; --i) {
Register reg = static_cast<Register>(i);
if (reg == FPREG || reg == SPREG) continue;
#if defined(TARGET_ARCH_ARM)
if (reg == PC) continue;
#elif defined(TARGET_ARCH_ARM64)
if (reg == R31) continue;
#endif
Add(Location::RegisterLocation(reg));
}
for (intptr_t i = kNumberOfFpuRegisters - 1; i >= 0; --i) {
Add(Location::FpuRegisterLocation(static_cast<FpuRegister>(i)));
}
}
void Add(Location loc, Representation rep = kTagged) {
if (loc.IsRegister()) {
cpu_registers_.Add(loc.reg());

View file

@ -214,6 +214,10 @@ void BailoutWithBranchOffsetError() {
Thread::Current()->long_jump_base()->Jump(1, Object::branch_offset_error());
}
word RuntimeEntry::OffsetFromThread() const {
return dart::Thread::OffsetFromThread(runtime_entry_);
}
namespace target {
const word kPageSize = dart::kPageSize;
@ -495,6 +499,7 @@ word Array::header_size() {
V(Thread, top_offset) \
V(Thread, top_resource_offset) \
V(Thread, vm_tag_offset) \
V(Thread, safepoint_state_offset) \
V(TimelineStream, enabled_offset) \
V(TwoByteString, data_offset) \
V(Type, arguments_offset) \
@ -513,6 +518,14 @@ word Array::header_size() {
CLASS_NAME_LIST(DEFINE_FORWARDER)
#undef DEFINE_FORWARDER
uword Thread::safepoint_state_unacquired() {
return dart::Thread::safepoint_state_unacquired();
}
uword Thread::safepoint_state_acquired() {
return dart::Thread::safepoint_state_acquired();
}
const word HeapPage::kBytesPerCardLog2 = dart::HeapPage::kBytesPerCardLog2;
const word String::kHashBits = dart::String::kHashBits;
@ -641,6 +654,30 @@ word Thread::deoptimize_stub_offset() {
return dart::Thread::deoptimize_stub_offset();
}
word Thread::enter_safepoint_stub_offset() {
return dart::Thread::enter_safepoint_stub_offset();
}
word Thread::exit_safepoint_stub_offset() {
return dart::Thread::exit_safepoint_stub_offset();
}
word Thread::execution_state_offset() {
return dart::Thread::execution_state_offset();
}
uword Thread::native_execution_state() {
return dart::Thread::ExecutionState::kThreadInNative;
}
uword Thread::generated_execution_state() {
return dart::Thread::ExecutionState::kThreadInGenerated;
}
uword Thread::vm_tag_compiled_id() {
return dart::VMTag::kDartCompiledTagId;
}
#endif // !defined(TARGET_ARCH_DBC)
#define DECLARE_CONSTANT_OFFSET_GETTER(name) \

View file

@ -218,6 +218,8 @@ class RuntimeEntry : public ValueObject {
call_(runtime_entry_, assembler, argument_count);
}
word OffsetFromThread() const;
protected:
RuntimeEntry(const dart::RuntimeEntry* runtime_entry,
RuntimeEntryCallInternal call)
@ -571,6 +573,15 @@ class Thread : public AllStatic {
static word array_write_barrier_entry_point_offset();
static word write_barrier_entry_point_offset();
static word vm_tag_offset();
static uword vm_tag_compiled_id();
static word safepoint_state_offset();
static uword safepoint_state_unacquired();
static uword safepoint_state_acquired();
static word execution_state_offset();
static uword native_execution_state();
static uword generated_execution_state();
#if !defined(TARGET_ARCH_DBC)
static word write_barrier_code_offset();
@ -593,6 +604,8 @@ class Thread : public AllStatic {
static word lazy_deopt_from_return_stub_offset();
static word lazy_deopt_from_throw_stub_offset();
static word deoptimize_stub_offset();
static word enter_safepoint_stub_offset();
static word exit_safepoint_stub_offset();
#endif // !defined(TARGET_ARCH_DBC)
static word no_scope_native_wrapper_entry_point_offset();

View file

@ -239,6 +239,26 @@ void StubCodeCompiler::GenerateBuildMethodExtractorStub(
__ Ret();
}
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers);
__ ldr(R0, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
__ blx(R0);
__ PopRegisters(all_registers);
__ Ret();
}
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers);
__ ldr(R0, Address(THR, kExitSafepointRuntimeEntry.OffsetFromThread()));
__ blx(R0);
__ PopRegisters(all_registers);
__ Ret();
}
void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(

View file

@ -175,6 +175,36 @@ void StubCodeCompiler::GenerateSharedStub(
__ ret(LR);
}
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers);
__ mov(CallingConventions::kFirstCalleeSavedCpuReg, SP);
__ ReserveAlignedFrameSpace(0);
__ mov(CSP, SP);
__ ldr(R0, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
__ blr(R0);
__ mov(SP, CallingConventions::kFirstCalleeSavedCpuReg);
__ PopRegisters(all_registers);
__ mov(CSP, SP);
__ Ret();
}
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers);
__ mov(CallingConventions::kFirstCalleeSavedCpuReg, SP);
__ ReserveAlignedFrameSpace(0);
__ mov(CSP, SP);
__ ldr(R0, Address(THR, kExitSafepointRuntimeEntry.OffsetFromThread()));
__ blr(R0);
__ mov(SP, CallingConventions::kFirstCalleeSavedCpuReg);
__ PopRegisters(all_registers);
__ mov(CSP, SP);
__ Ret();
}
// R1: The extracted method.
// R4: The type_arguments_field_offset (or 0)
void StubCodeCompiler::GenerateBuildMethodExtractorStub(

View file

@ -107,6 +107,22 @@ void StubCodeCompiler::GenerateCallToRuntimeStub(Assembler* assembler) {
__ ret();
}
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
__ pushal();
__ movl(EAX, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
__ call(EAX);
__ popal();
__ ret();
}
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
__ pushal();
__ movl(EAX, Address(THR, kExitSafepointRuntimeEntry.OffsetFromThread()));
__ call(EAX);
__ popal();
__ ret();
}
void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
__ Breakpoint();

View file

@ -2,10 +2,12 @@
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/compiler/runtime_api.h"
#include "vm/globals.h"
#define SHOULD_NOT_INCLUDE_RUNTIME
#include "vm/compiler/backend/locations.h"
#include "vm/compiler/stub_code_compiler.h"
#if defined(TARGET_ARCH_X64) && !defined(DART_PRECOMPILED_RUNTIME)
@ -166,6 +168,28 @@ void StubCodeCompiler::GenerateSharedStub(
__ ret();
}
void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers.cpu_registers(),
all_registers.fpu_registers());
__ movq(RAX, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
__ CallCFunction(RAX);
__ PopRegisters(all_registers.cpu_registers(), all_registers.fpu_registers());
__ ret();
}
void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
RegisterSet all_registers;
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers.cpu_registers(),
all_registers.fpu_registers());
__ movq(RAX, Address(THR, kExitSafepointRuntimeEntry.OffsetFromThread()));
__ CallCFunction(RAX);
__ PopRegisters(all_registers.cpu_registers(), all_registers.fpu_registers());
__ ret();
}
// RBX: The extracted method.
// RDX: The type_arguments_field_offset (or 0)
void StubCodeCompiler::GenerateBuildMethodExtractorStub(

View file

@ -224,7 +224,7 @@ class CallingConventions {
static const intptr_t kNumArgRegs = 6;
static const XmmRegister FpuArgumentRegisters[];
static const intptr_t kXmmArgumentRegisters = R(XMM0) | R(XMM1) | R(XMM2) |
static const intptr_t kFpuArgumentRegisters = R(XMM0) | R(XMM1) | R(XMM2) |
R(XMM3) | R(XMM4) | R(XMM5) |
R(XMM6) | R(XMM7);
static const intptr_t kNumFpuArgRegs = 8;

View file

@ -6341,4 +6341,8 @@ DART_EXPORT void Dart_DumpNativeStackTrace(void* context) {
#endif
}
DART_EXPORT void Dart_PrepareToAbort() {
OS::PrepareToAbort();
}
} // namespace dart

View file

@ -122,6 +122,9 @@ class OS {
// Cleanup the OS class.
static void Cleanup();
// Only implemented on Windows, prevents cleanup code from running.
static void PrepareToAbort();
DART_NORETURN static void Abort();
DART_NORETURN static void Exit(int code);

View file

@ -337,6 +337,8 @@ void OS::Init() {}
void OS::Cleanup() {}
void OS::PrepareToAbort() {}
void OS::Abort() {
abort();
}

View file

@ -258,6 +258,8 @@ void OS::Init() {
void OS::Cleanup() {}
void OS::PrepareToAbort() {}
void OS::Abort() {
abort();
}

View file

@ -659,6 +659,8 @@ void OS::Init() {}
void OS::Cleanup() {}
void OS::PrepareToAbort() {}
void OS::Abort() {
abort();
}

View file

@ -323,6 +323,8 @@ void OS::Init() {
void OS::Cleanup() {}
void OS::PrepareToAbort() {}
void OS::Abort() {
abort();
}

View file

@ -336,9 +336,13 @@ void OS::Cleanup() {
// ThreadLocalData::Cleanup();
}
void OS::Abort() {
void OS::PrepareToAbort() {
// TODO(zra): Remove once VM shuts down cleanly.
private_flag_windows_run_tls_destructors = false;
}
void OS::Abort() {
PrepareToAbort();
abort();
}

View file

@ -2739,4 +2739,20 @@ RawObject* RuntimeEntry::InterpretCall(RawFunction* function,
#endif // defined(DART_PRECOMPILED_RUNTIME)
}
extern "C" void DFLRT_EnterSafepoint(NativeArguments __unusable_) {
Thread* thread = Thread::Current();
ASSERT(thread->top_exit_frame_info() != 0);
ASSERT(thread->execution_state() == Thread::kThreadInNative);
thread->EnterSafepoint();
}
DEFINE_RAW_LEAF_RUNTIME_ENTRY(EnterSafepoint, 0, false, &DFLRT_EnterSafepoint);
extern "C" void DFLRT_ExitSafepoint(NativeArguments __unusable_) {
Thread* thread = Thread::Current();
ASSERT(thread->top_exit_frame_info() != 0);
ASSERT(thread->execution_state() == Thread::kThreadInNative);
thread->ExitSafepoint();
}
DEFINE_RAW_LEAF_RUNTIME_ENTRY(ExitSafepoint, 0, false, &DFLRT_ExitSafepoint);
} // namespace dart

View file

@ -77,7 +77,10 @@ namespace dart {
V(double, LibcAsin, double) \
V(double, LibcAtan, double) \
V(double, LibcAtan2, double, double) \
V(RawBool*, CaseInsensitiveCompareUC16, RawString*, RawSmi*, RawSmi*, RawSmi*)
V(RawBool*, CaseInsensitiveCompareUC16, RawString*, RawSmi*, RawSmi*, \
RawSmi*) \
V(void, EnterSafepoint) \
V(void, ExitSafepoint)
} // namespace dart

View file

@ -75,7 +75,9 @@ namespace dart {
V(StackOverflowSharedWithFPURegs) \
V(StackOverflowSharedWithoutFPURegs) \
V(OneArgCheckInlineCacheWithExactnessCheck) \
V(OneArgOptimizedCheckInlineCacheWithExactnessCheck)
V(OneArgOptimizedCheckInlineCacheWithExactnessCheck) \
V(EnterSafepoint) \
V(ExitSafepoint)
#else
#define VM_STUB_CODE_LIST(V) \

View file

@ -76,6 +76,8 @@ Thread::Thread(Isolate* isolate)
active_stacktrace_(Object::null()),
global_object_pool_(ObjectPool::null()),
resume_pc_(0),
execution_state_(kThreadInNative),
safepoint_state_(0),
task_kind_(kUnknownTask),
dart_stream_(NULL),
thread_lock_(new Monitor()),
@ -97,10 +99,9 @@ Thread::Thread(Isolate* isolate)
pending_functions_(GrowableObjectArray::null()),
sticky_error_(Error::null()),
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_INITIALIZERS)
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_INIT) safepoint_state_(0),
execution_state_(kThreadInNative),
REUSABLE_HANDLE_LIST(REUSABLE_HANDLE_SCOPE_INIT)
#if defined(USING_SAFE_STACK)
saved_safestack_limit_(0),
saved_safestack_limit_(0),
#endif
#if !defined(DART_PRECOMPILED_RUNTIME)
interpreter_(nullptr),

View file

@ -121,7 +121,9 @@ class Zone;
StubCode::DeoptimizeLazyFromThrow().raw(), NULL) \
V(RawCode*, slow_type_test_stub_, StubCode::SlowTypeTest().raw(), NULL) \
V(RawCode*, lazy_specialize_type_test_stub_, \
StubCode::LazySpecializeTypeTest().raw(), NULL)
StubCode::LazySpecializeTypeTest().raw(), NULL) \
V(RawCode*, enter_safepoint_stub_, StubCode::EnterSafepoint().raw(), NULL) \
V(RawCode*, exit_safepoint_stub_, StubCode::ExitSafepoint().raw(), NULL)
#endif
@ -307,6 +309,10 @@ class Thread : public ThreadState {
}
#endif
static intptr_t safepoint_state_offset() {
return OFFSET_OF(Thread, safepoint_state_);
}
TaskKind task_kind() const { return task_kind_; }
// Retrieves and clears the stack overflow flags. These are set by
@ -673,8 +679,8 @@ class Thread : public ThreadState {
do {
old_state = safepoint_state_;
new_state = SafepointRequestedField::update(value, old_state);
} while (AtomicOperations::CompareAndSwapUint32(
&safepoint_state_, old_state, new_state) != old_state);
} while (AtomicOperations::CompareAndSwapWord(&safepoint_state_, old_state,
new_state) != old_state);
return old_state;
}
static bool IsBlockedForSafepoint(uint32_t state) {
@ -706,7 +712,10 @@ class Thread : public ThreadState {
return static_cast<ExecutionState>(execution_state_);
}
void set_execution_state(ExecutionState state) {
execution_state_ = static_cast<uint32_t>(state);
execution_state_ = static_cast<uword>(state);
}
static intptr_t execution_state_offset() {
return OFFSET_OF(Thread, execution_state_);
}
virtual bool MayAllocateHandles() {
@ -714,10 +723,13 @@ class Thread : public ThreadState {
(execution_state() == kThreadInGenerated);
}
static uword safepoint_state_unacquired() { return SetAtSafepoint(false, 0); }
static uword safepoint_state_acquired() { return SetAtSafepoint(true, 0); }
bool TryEnterSafepoint() {
uint32_t new_state = SetAtSafepoint(true, 0);
if (AtomicOperations::CompareAndSwapUint32(&safepoint_state_, 0,
new_state) != 0) {
if (AtomicOperations::CompareAndSwapWord(&safepoint_state_, 0, new_state) !=
0) {
return false;
}
return true;
@ -735,8 +747,8 @@ class Thread : public ThreadState {
bool TryExitSafepoint() {
uint32_t old_state = SetAtSafepoint(true, 0);
if (AtomicOperations::CompareAndSwapUint32(&safepoint_state_, old_state,
0) != old_state) {
if (AtomicOperations::CompareAndSwapWord(&safepoint_state_, old_state, 0) !=
old_state) {
return false;
}
return true;
@ -838,6 +850,8 @@ class Thread : public ThreadState {
RawObject* active_stacktrace_;
RawObjectPool* global_object_pool_;
uword resume_pc_;
uword execution_state_;
uword safepoint_state_;
// ---- End accessed from generated code. ----
@ -889,8 +903,6 @@ class Thread : public ThreadState {
class SafepointRequestedField : public BitField<uint32_t, bool, 1, 1> {};
class BlockedForSafepointField : public BitField<uint32_t, bool, 2, 1> {};
class BypassSafepointsField : public BitField<uint32_t, bool, 3, 1> {};
uint32_t safepoint_state_;
uint32_t execution_state_;
#if defined(USING_SAFE_STACK)
uword saved_safestack_limit_;

View file

@ -18,15 +18,21 @@ import 'dylib_utils.dart';
import "package:expect/expect.dart";
import 'gc_helper.dart';
test(GCWatcher watcher, void Function() testee,
{bool mustTriggerGC: true}) async {
test(GCWatcher watcher, Function testee,
{bool mustTriggerGC: true, bool batched: false}) async {
// Warmup.
for (int i = 0; i < 1000; ++i) {
testee();
batched ? testee(1) : testee();
}
int size = await watcher.size();
for (int i = 0; i < 1000000; ++i) {
testee();
for (int i = 0; i < 1000000;) {
if (batched) {
testee(1000);
i += 1000;
} else {
testee();
i++;
}
}
int new_size = await watcher.size();
if (mustTriggerGC) {
@ -44,6 +50,8 @@ main() async {
await test(watcher, testBoxInt32, mustTriggerGC: false);
await test(watcher, testBoxDouble);
await test(watcher, testBoxPointer);
await test(watcher, testAllocateMints, batched: true);
await test(watcher, testAllocationsInDart, batched: true);
} finally {
watcher.dispose();
}
@ -56,9 +64,11 @@ typedef NativeNullaryOp64 = ffi.Int64 Function();
typedef NativeNullaryOp32 = ffi.Int32 Function();
typedef NativeNullaryOpDouble = ffi.Double Function();
typedef NativeNullaryOpPtr = ffi.Pointer<ffi.Void> Function();
typedef NativeUnaryOp = ffi.Void Function(ffi.Uint64);
typedef NullaryOp = int Function();
typedef NullaryOpDbl = double Function();
typedef NullaryOpPtr = ffi.Pointer<ffi.Void> Function();
typedef UnaryOp = void Function(int);
//// These functions return values that require boxing into different types.
@ -100,3 +110,34 @@ void testBoxPointer() {
}
}
}
final allocateMint =
ffiTestFunctions.lookupFunction<NativeUnaryOp, UnaryOp>("AllocateMints");
// Test GC in the FFI call path by calling a C function which allocates through
// the Dart API.
void testAllocateMints(int batchSize) {
allocateMint(batchSize);
}
class C {
final int i;
C(this.i);
}
C c = null;
@pragma("vm:entry-point", "call")
void testAllocationsInDartHelper(int count) {
for (int i = 0; i < count; ++i) {
c = C(i);
}
}
final allocateThroughDart = ffiTestFunctions
.lookupFunction<NativeUnaryOp, UnaryOp>("AllocateThroughDart");
// Test GC in the FFI call path by calling a C function which allocates by
// calling back into Dart ('testAllocationsInDartHelper').
void testAllocationsInDart(int batchSize) {
allocateThroughDart(batchSize);
}