[vm/ffi] FFI callbacks on X64.

For context on the design, see go/dart-ffi-callbacks

Change-Id: I2482e3c932e73f9a4c00fa7e218ff85f9328fc51
Cq-Include-Trybots: luci.dart.try:vm-kernel-linux-debug-simdbc64-try, vm-kernel-linux-release-simdbc64-try, vm-kernel-mac-debug-simdbc64-try, vm-kernel-mac-release-simdbc64-try, vm-kernel-reload-mac-debug-simdbc64-try, vm-kernel-reload-mac-release-simdbc64-try, vm-kernel-linux-debug-ia32-try, vm-dartkb-linux-debug-simarm64-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/100240
Commit-Queue: Samir Jindel <sjindel@google.com>
Reviewed-by: Daco Harkes <dacoharkes@google.com>
This commit is contained in:
Samir Jindel 2019-05-18 06:15:46 +00:00 committed by commit-bot@chromium.org
parent 9e30fbaea4
commit be209f7846
44 changed files with 1483 additions and 194 deletions

View file

@ -275,9 +275,10 @@ class _FfiUseSiteTransformer extends FfiTransformer {
}
bool _isStatic(Expression node) {
if (node is! StaticGet) return false;
return (node as StaticGet).target is Procedure;
if (node is StaticGet) {
return node.target is Procedure;
}
return node is ConstantExpression;
}
}

View file

@ -7,15 +7,19 @@
#include <stddef.h>
#include <stdlib.h>
#include <sys/types.h>
#include <csignal>
#include "platform/assert.h"
#include "platform/globals.h"
#include "vm/os_thread.h"
#if defined(HOST_OS_WINDOWS)
#include <psapi.h>
#else
#include <unistd.h>
#endif
#include <setjmp.h>
#include <signal.h>
#include <iostream>
#include <limits>
@ -23,6 +27,9 @@
namespace dart {
////////////////////////////////////////////////////////////////////////////////
// Tests for Dart -> native calls.
// Sums two ints and adds 42.
// Simple function to test trampolines.
// Also used for testing argument exception on passing null instead of a Dart
@ -449,7 +456,8 @@ DART_EXPORT float InventFloatValue() {
return retval;
}
// Functions for stress-testing GC by returning values that require boxing.
////////////////////////////////////////////////////////////////////////////////
// Functions for stress-testing.
DART_EXPORT int64_t MinInt64() {
return 0x8000000000000000;
@ -511,4 +519,213 @@ DART_EXPORT int RedirectStderr() {
}
#endif
////////////////////////////////////////////////////////////////////////////////
// Tests for callbacks.
#define CHECK(X) \
if (!(X)) { \
fprintf(stderr, "%s\n", "Check failed: " #X); \
return 1; \
}
#define CHECK_EQ(X, Y) CHECK((X) == (Y))
// Sanity test.
DART_EXPORT int TestSimpleAddition(int (*add)(int, int)) {
CHECK_EQ(add(10, 20), 30);
return 0;
}
//// Following tests are copied from above, with the role of Dart and C++ code
//// reversed.
DART_EXPORT int TestIntComputation(
int64_t (*fn)(int8_t, int16_t, int32_t, int64_t)) {
CHECK_EQ(fn(125, 250, 500, 1000), 625);
CHECK_EQ(0x7FFFFFFFFFFFFFFFLL, fn(0, 0, 0, 0x7FFFFFFFFFFFFFFFLL));
CHECK_EQ(((int64_t)-0x8000000000000000LL),
fn(0, 0, 0, -0x8000000000000000LL));
return 0;
}
DART_EXPORT int TestUintComputation(
uint64_t (*fn)(uint8_t, uint16_t, uint32_t, uint64_t)) {
CHECK_EQ(0x7FFFFFFFFFFFFFFFLL, fn(0, 0, 0, 0x7FFFFFFFFFFFFFFFLL));
CHECK_EQ(-0x8000000000000000LL, fn(0, 0, 0, -0x8000000000000000LL));
CHECK_EQ(-1, (int64_t)fn(0, 0, 0, -1));
return 0;
}
DART_EXPORT int TestSimpleMultiply(double (*fn)(double)) {
CHECK_EQ(fn(2.0), 2.0 * 1.337);
return 0;
}
DART_EXPORT int TestSimpleMultiplyFloat(float (*fn)(float)) {
CHECK(std::abs(fn(2.0) - 2.0 * 1.337) < 0.001);
return 0;
}
DART_EXPORT int TestManyInts(intptr_t (*fn)(intptr_t,
intptr_t,
intptr_t,
intptr_t,
intptr_t,
intptr_t,
intptr_t,
intptr_t,
intptr_t,
intptr_t)) {
CHECK_EQ(55, fn(1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
return 0;
}
DART_EXPORT int TestManyDoubles(double (*fn)(double,
double,
double,
double,
double,
double,
double,
double,
double,
double)) {
CHECK_EQ(55, fn(1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
return 0;
}
DART_EXPORT int TestManyArgs(double (*fn)(intptr_t a,
float b,
intptr_t c,
double d,
intptr_t e,
float f,
intptr_t g,
double h,
intptr_t i,
float j,
intptr_t k,
double l,
intptr_t m,
float n,
intptr_t o,
double p,
intptr_t q,
float r,
intptr_t s,
double t)) {
CHECK(210.0 == fn(1, 2.0, 3, 4.0, 5, 6.0, 7, 8.0, 9, 10.0, 11, 12.0, 13, 14.0,
15, 16.0, 17, 18.0, 19, 20.0));
return 0;
}
DART_EXPORT int TestStore(int64_t* (*fn)(int64_t* a)) {
int64_t p[2] = {42, 1000};
int64_t* result = fn(p);
CHECK_EQ(*result, 1337);
CHECK_EQ(p[1], 1337);
CHECK_EQ(result, p + 1);
return 0;
}
DART_EXPORT int TestReturnNull(int32_t fn()) {
CHECK_EQ(fn(), 0);
return 0;
}
DART_EXPORT int TestNullPointers(int64_t* (*fn)(int64_t* ptr)) {
CHECK_EQ(fn(nullptr), nullptr);
int64_t p[2] = {0};
CHECK_EQ(fn(p), p + 1);
return 0;
}
struct CallbackTestData {
int success;
void (*callback)();
};
#if defined(TARGET_OS_LINUX) && !defined(PRODUCT)
thread_local sigjmp_buf buf;
void CallbackTestSignalHandler(int) {
siglongjmp(buf, 1);
}
int ExpectAbort(void (*fn)()) {
fprintf(stderr, "**** EXPECT STACKTRACE TO FOLLOW. THIS IS OK. ****\n");
struct sigaction old_action;
int result = __sigsetjmp(buf, /*savesigs=*/1);
if (result == 0) {
// Install signal handler.
struct sigaction handler;
handler.sa_handler = CallbackTestSignalHandler;
sigemptyset(&handler.sa_mask);
handler.sa_flags = 0;
sigaction(SIGABRT, &handler, &old_action);
fn();
} else {
// Caught the setjmp.
sigaction(SIGABRT, &old_action, NULL);
exit(0);
}
fprintf(stderr, "Expected abort!!!\n");
exit(1);
}
void* TestCallbackOnThreadOutsideIsolate(void* parameter) {
CallbackTestData* data = reinterpret_cast<CallbackTestData*>(parameter);
data->success = ExpectAbort(data->callback);
return NULL;
}
int TestCallbackOtherThreadHelper(void* (*tester)(void*), void (*fn)()) {
CallbackTestData data = {1, fn};
pthread_attr_t attr;
int result = pthread_attr_init(&attr);
CHECK_EQ(result, 0);
pthread_t tid;
result = pthread_create(&tid, &attr, tester, &data);
CHECK_EQ(result, 0);
result = pthread_attr_destroy(&attr);
CHECK_EQ(result, 0);
void* retval;
result = pthread_join(tid, &retval);
// Doesn't actually return because the other thread will exit when the test is
// finished.
UNREACHABLE();
}
// Run a callback on another thread and verify that it triggers SIGABRT.
DART_EXPORT int TestCallbackWrongThread(void (*fn)()) {
return TestCallbackOtherThreadHelper(&TestCallbackOnThreadOutsideIsolate, fn);
}
// Verify that we get SIGABRT when invoking a native callback outside an
// isolate.
DART_EXPORT int TestCallbackOutsideIsolate(void (*fn)()) {
Dart_Isolate current = Dart_CurrentIsolate();
Dart_ExitIsolate();
CallbackTestData data = {1, fn};
TestCallbackOnThreadOutsideIsolate(&data);
Dart_EnterIsolate(current);
return data.success;
}
DART_EXPORT int TestCallbackWrongIsolate(void (*fn)()) {
return ExpectAbort(fn);
}
#endif // defined(TARGET_OS_LINUX) && !defined(PRODUCT)
} // namespace dart

View file

@ -9,6 +9,7 @@
#include "vm/class_finalizer.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/ffi.h"
#include "vm/compiler/jit/compiler.h"
#include "vm/exceptions.h"
#include "vm/log.h"
#include "vm/native_arguments.h"
@ -546,53 +547,68 @@ DEFINE_NATIVE_ENTRY(Ffi_asFunction, 1, 1) {
return raw_closure;
}
// Generates assembly to trampoline from C++ back into Dart.
static void* GenerateFfiInverseTrampoline(const Function& signature,
void* dart_entry_point) {
// Generates assembly to trampoline from native code into Dart.
static uword CompileNativeCallback(const Function& c_signature,
const Function& dart_target) {
#if defined(DART_PRECOMPILED_RUNTIME) || defined(DART_PRECOMPILER)
UNREACHABLE();
#elif !defined(TARGET_ARCH_X64)
// https://github.com/dart-lang/sdk/issues/35774
UNREACHABLE();
#elif !defined(TARGET_OS_LINUX) && !defined(TARGET_OS_MACOS) && \
!defined(TARGET_OS_WINDOWS)
// https://github.com/dart-lang/sdk/issues/35760 Arm32 && Android
// https://github.com/dart-lang/sdk/issues/35772 Arm64
// https://github.com/dart-lang/sdk/issues/35773 DBC
UNREACHABLE();
// FFI is supported, but callbacks are not.
Exceptions::ThrowUnsupportedError(
"FFI callbacks are currently supported on 64-bit Intel only.");
#else
Thread* const thread = Thread::Current();
const int32_t callback_id = thread->AllocateFfiCallbackId();
// TODO(dacoharkes): Implement this.
// https://github.com/dart-lang/sdk/issues/35761
// Look at StubCode::GenerateInvokeDartCodeStub.
UNREACHABLE();
// Create a new Function named 'FfiCallback' and stick it in the 'dart:ffi'
// library. Note that these functions will never be invoked by Dart, so it
// doesn't matter that they all have the same name.
Zone* const Z = thread->zone();
const String& name =
String::ZoneHandle(Symbols::New(Thread::Current(), "FfiCallback"));
const Library& lib = Library::Handle(Library::FfiLibrary());
const Class& owner_class = Class::Handle(lib.toplevel_class());
const Function& function =
Function::Handle(Z, Function::New(name, RawFunction::kFfiTrampoline,
/*is_static=*/true,
/*is_const=*/false,
/*is_abstract=*/false,
/*is_external=*/false,
/*is_native=*/false, owner_class,
TokenPosition::kMinSource));
function.set_is_debuggable(false);
// Set callback-specific fields which the flow-graph builder needs to generate
// the body.
function.SetFfiCSignature(c_signature);
function.SetFfiCallbackId(callback_id);
function.SetFfiCallbackTarget(dart_target);
// We compile the callback immediately because we need to return a pointer to
// the entry-point. Native calls do not use patching like Dart calls, so we
// cannot compile it lazily.
const Object& result =
Object::Handle(Z, Compiler::CompileOptimizedFunction(thread, function));
if (result.IsError()) {
Exceptions::PropagateError(Error::Cast(result));
}
ASSERT(result.IsCode());
const Code& code = Code::Cast(result);
thread->SetFfiCallbackCode(callback_id, code);
return code.EntryPoint();
#endif
}
// TODO(dacoharkes): Implement this feature.
// https://github.com/dart-lang/sdk/issues/35761
// For now, it always returns Pointer with address 0.
DEFINE_NATIVE_ENTRY(Ffi_fromFunction, 1, 1) {
GET_NATIVE_TYPE_ARGUMENT(type_arg, arguments->NativeTypeArgAt(0));
GET_NON_NULL_NATIVE_ARGUMENT(Closure, closure, arguments->NativeArgAt(0));
Function& c_signature = Function::Handle(((Type&)type_arg).signature());
const Function& native_signature =
Function::Handle(((Type&)type_arg).signature());
Function& func = Function::Handle(closure.function());
Code& code = Code::Handle(func.EnsureHasCode());
void* entryPoint = reinterpret_cast<void*>(code.EntryPoint());
THR_Print("Ffi_fromFunction: %s\n", type_arg.ToCString());
THR_Print("Ffi_fromFunction: %s\n", c_signature.ToCString());
THR_Print("Ffi_fromFunction: %s\n", closure.ToCString());
THR_Print("Ffi_fromFunction: %s\n", func.ToCString());
THR_Print("Ffi_fromFunction: %s\n", code.ToCString());
THR_Print("Ffi_fromFunction: %p\n", entryPoint);
THR_Print("Ffi_fromFunction: %" Pd "\n", code.Size());
intptr_t address = reinterpret_cast<intptr_t>(
GenerateFfiInverseTrampoline(c_signature, entryPoint));
TypeArguments& type_args = TypeArguments::Handle(zone);
type_args = TypeArguments::New(1);
type_args.SetTypeAt(Pointer::kNativeTypeArgPos, type_arg);
@ -608,9 +624,19 @@ DEFINE_NATIVE_ENTRY(Ffi_fromFunction, 1, 1) {
ClassFinalizer::FinalizeType(Class::Handle(), native_function_type);
native_function_type ^= native_function_type.Canonicalize();
address = 0; // https://github.com/dart-lang/sdk/issues/35761
// The FE verifies that the target of a 'fromFunction' is a static method, so
// the value we see here must be a static tearoff. See ffi_use_sites.dart for
// details.
//
// TODO(36748): Define hot-reload semantics of native callbacks. We may need
// to look up the target by name.
ASSERT(func.IsImplicitClosureFunction());
func = func.parent_function();
ASSERT(func.is_static());
Pointer& result = Pointer::Handle(Pointer::New(
const uword address = CompileNativeCallback(native_signature, func);
const Pointer& result = Pointer::Handle(Pointer::New(
native_function_type, Integer::Handle(zone, Integer::New(address))));
return result.raw();
@ -682,7 +708,7 @@ uint64_t* FfiMarshalledArguments::New(
} else if (loc.IsFpuRegister()) {
descr.SetFpuRegister(loc.fpu_reg(), arg_value);
} else {
ASSERT(loc.IsStackSlot());
ASSERT(loc.IsStackSlot() || loc.IsDoubleStackSlot());
ASSERT(loc.stack_index() < num_stack_slots);
descr.SetStackSlotValue(loc.stack_index(), arg_value);
}

View file

@ -53,3 +53,21 @@ class Pointer<T extends NativeType> {
@patch
void free() native "Ffi_free";
}
// This method gets called when an exception bubbles up to the native -> Dart
// boundary from an FFI native callback. Since native code does not have any
// concept of exceptions, the exception cannot be propagated any further.
// Instead, print a warning with the exception and return 0/0.0 from the
// callback.
//
// TODO(36856): Iron out the story behind exceptions.
@pragma("vm:entry-point")
void _handleExposedException(dynamic exception, dynamic stackTrace) {
print(
"==================== UNHANDLED EXCEPTION FROM FFI CALLBACK ====================");
print(
""" ** Native callbacks should not throw exceptions because they cannot be
propagated into native code. **""");
print("EXCEPTION: $exception");
print(stackTrace);
}

View file

@ -2085,9 +2085,10 @@ void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
}
void Assembler::TransitionGeneratedToNative(Register destination_address,
Register new_exit_frame,
Register scratch) {
// Save exit frame information to enable stack walking.
movl(Address(THR, Thread::top_exit_frame_info_offset()), FPREG);
movl(Address(THR, Thread::top_exit_frame_info_offset()), new_exit_frame);
// Mark that the thread is executing native code.
movl(VMTagAddress(), destination_address);

View file

@ -649,6 +649,7 @@ class Assembler : public AssemblerBase {
// Require a temporary register 'tmp'.
// Clobber all non-CPU registers (e.g. XMM registers and the "FPU stack").
void TransitionGeneratedToNative(Register destination_address,
Register new_exit_frame,
Register scratch);
void TransitionNativeToGenerated(Register scratch);

View file

@ -164,9 +164,10 @@ void Assembler::setcc(Condition condition, ByteRegister dst) {
EmitUint8(0xC0 + (dst & 0x07));
}
void Assembler::TransitionGeneratedToNative(Register destination_address) {
void Assembler::TransitionGeneratedToNative(Register destination_address,
Register new_exit_frame) {
// Save exit frame information to enable stack walking.
movq(Address(THR, Thread::top_exit_frame_info_offset()), FPREG);
movq(Address(THR, Thread::top_exit_frame_info_offset()), new_exit_frame);
movq(Assembler::VMTagAddress(), destination_address);
movq(Address(THR, compiler::target::Thread::execution_state_offset()),
@ -1517,6 +1518,18 @@ void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) {
}
}
void Assembler::EmitEntryFrameVerification() {
#if defined(DEBUG)
Label ok;
leaq(RAX, Address(RBP, target::frame_layout.exit_link_slot_from_entry_fp *
target::kWordSize));
cmpq(RAX, RSP);
j(EQUAL, &ok);
Stop("target::frame_layout.exit_link_slot_from_entry_fp mismatch");
Bind(&ok);
#endif
}
void Assembler::PushRegisters(intptr_t cpu_register_set,
intptr_t xmm_register_set) {
const intptr_t xmm_regs_count = RegisterSet::RegisterCount(xmm_register_set);

View file

@ -306,7 +306,8 @@ class Assembler : public AssemblerBase {
void setcc(Condition condition, ByteRegister dst);
void TransitionGeneratedToNative(Register destination_address);
void TransitionGeneratedToNative(Register destination_address,
Register new_exit_frame);
void TransitionNativeToGenerated();
// Register-register, register-address and address-register instructions.
@ -777,6 +778,13 @@ class Assembler : public AssemblerBase {
void LeaveFrame();
void ReserveAlignedFrameSpace(intptr_t frame_space);
// In debug mode, generates code to verify that:
// FP + kExitLinkSlotFromFp == SP
//
// Triggers breakpoint otherwise.
// Clobbers RAX.
void EmitEntryFrameVerification();
// Create a frame for calling into runtime that preserves all volatile
// registers. Frame's RSP is guaranteed to be correctly aligned and
// frame_space bytes are reserved under it.

View file

@ -142,6 +142,10 @@ void ConstantPropagator::VisitFunctionEntry(FunctionEntryInstr* block) {
}
}
void ConstantPropagator::VisitNativeEntry(NativeEntryInstr* block) {
VisitFunctionEntry(block);
}
void ConstantPropagator::VisitOsrEntry(OsrEntryInstr* block) {
for (auto def : *block->initial_definitions()) {
def->Accept(this);
@ -192,6 +196,10 @@ void ConstantPropagator::VisitReturn(ReturnInstr* instr) {
// Nothing to do.
}
void ConstantPropagator::VisitNativeReturn(NativeReturnInstr* instr) {
// Nothing to do.
}
void ConstantPropagator::VisitThrow(ThrowInstr* instr) {
// Nothing to do.
}
@ -365,6 +373,10 @@ void ConstantPropagator::VisitParameter(ParameterInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitNativeParameter(NativeParameterInstr* instr) {
SetValue(instr, non_constant_);
}
void ConstantPropagator::VisitPushArgument(PushArgumentInstr* instr) {
if (SetValue(instr, instr->value()->definition()->constant_value())) {
// The worklist implementation breaks down around push arguments,

View file

@ -1365,7 +1365,7 @@ void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id,
: ic_data.arguments_descriptor());
ASSERT(ArgumentsDescriptor(arguments_descriptor).TypeArgsLen() ==
args_info.type_args_len);
if (is_optimizing()) {
if (is_optimizing() && !ForcedOptimization()) {
EmitOptimizedStaticCall(function, arguments_descriptor,
args_info.count_with_type_args, deopt_id, token_pos,
locs, entry_kind);

View file

@ -409,6 +409,11 @@ class FlowGraphCompiler : public ValueObject {
return block_order_;
}
// If 'ForcedOptimization()' returns 'true', we are compiling in optimized
// mode for a function which cannot deoptimize. Certain optimizations, e.g.
// speculative optimizations and call patching are disabled.
bool ForcedOptimization() const { return function().ForceOptimize(); }
const FlowGraph& flow_graph() const { return flow_graph_; }
BlockEntryInstr* current_block() const { return current_block_; }

View file

@ -27,6 +27,7 @@
#include "vm/regexp_assembler_ir.h"
#include "vm/resolver.h"
#include "vm/scopes.h"
#include "vm/stack_frame.h"
#include "vm/stub_code.h"
#include "vm/symbols.h"
#include "vm/type_testing_stubs.h"
@ -3825,7 +3826,9 @@ void FunctionEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ nop();
}
#endif
__ Bind(compiler->GetJumpLabel(this));
if (tag() == Instruction::kFunctionEntry) {
__ Bind(compiler->GetJumpLabel(this));
}
// In the AOT compiler we want to reduce code size, so generate no
// fall-through code in [FlowGraphCompiler::CompileGraph()].
@ -3881,6 +3884,21 @@ void FunctionEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
}
LocationSummary* NativeEntryInstr::MakeLocationSummary(Zone* zone,
bool optimizing) const {
UNREACHABLE();
}
#if !defined(TARGET_ARCH_X64)
void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
#endif
LocationSummary* OsrEntryInstr::MakeLocationSummary(Zone* zone,
bool optimizing) const {
UNREACHABLE();
@ -3986,6 +4004,43 @@ void ParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
UNREACHABLE();
}
void NativeParameterInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
#if !defined(TARGET_ARCH_DBC)
// The native entry frame has size -kExitLinkSlotFromFp. In order to access
// the top of stack from above the entry frame, we add a constant to account
// for the the two frame pointers and return address of the entry frame.
constexpr intptr_t kEntryFramePadding = 3;
FrameRebase rebase(/*old_base=*/SPREG, /*new_base=*/FPREG,
-kExitLinkSlotFromEntryFp + kEntryFramePadding);
const Location dst = locs()->out(0);
const Location src = rebase.Rebase(loc_);
NoTemporaryAllocator no_temp;
compiler->EmitMove(dst, src, &no_temp);
#else
UNREACHABLE();
#endif
}
LocationSummary* NativeParameterInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
#if !defined(TARGET_ARCH_DBC)
ASSERT(opt);
Location input = Location::Any();
if (representation() == kUnboxedInt64 && compiler::target::kWordSize < 8) {
input = Location::Pair(Location::RequiresRegister(),
Location::RequiresFpuRegister());
} else {
input = RegisterKindForResult() == Location::kRegister
? Location::RequiresRegister()
: Location::RequiresFpuRegister();
}
return LocationSummary::Make(zone, /*num_inputs=*/0, input,
LocationSummary::kNoCall);
#else
UNREACHABLE();
#endif
}
bool ParallelMoveInstr::IsRedundant() const {
for (intptr_t i = 0; i < moves_.length(); i++) {
if (!moves_[i]->IsRedundant()) {
@ -5374,6 +5429,16 @@ Location FfiCallInstr::UnallocateStackSlots(Location in, bool is_atomic) {
}
}
LocationSummary* NativeReturnInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
LocationSummary* locs = new (zone)
LocationSummary(zone, kNumInputs, kNumTemps, LocationSummary::kNoCall);
locs->set_in(0, result_location_);
return locs;
}
#undef Z
#else
@ -5396,6 +5461,11 @@ LocationSummary* FfiCallInstr::MakeLocationSummary(Zone* zone,
return summary;
}
LocationSummary* NativeReturnInstr::MakeLocationSummary(Zone* zone,
bool opt) const {
UNREACHABLE();
}
#endif // !defined(TARGET_ARCH_DBC)
Representation FfiCallInstr::representation() const {

View file

@ -336,18 +336,21 @@ struct InstrAttrs {
M(JoinEntry, kNoGC) \
M(TargetEntry, kNoGC) \
M(FunctionEntry, kNoGC) \
M(NativeEntry, kNoGC) \
M(OsrEntry, kNoGC) \
M(IndirectEntry, kNoGC) \
M(CatchBlockEntry, kNoGC) \
M(Phi, kNoGC) \
M(Redefinition, kNoGC) \
M(Parameter, kNoGC) \
M(NativeParameter, kNoGC) \
M(LoadIndexedUnsafe, kNoGC) \
M(StoreIndexedUnsafe, kNoGC) \
M(TailCall, kNoGC) \
M(ParallelMove, kNoGC) \
M(PushArgument, kNoGC) \
M(Return, kNoGC) \
M(NativeReturn, kNoGC) \
M(Throw, kNoGC) \
M(ReThrow, kNoGC) \
M(Stop, _) \
@ -916,6 +919,28 @@ class Instruction : public ZoneAllocated {
virtual bool UseSharedSlowPathStub(bool is_optimizing) const { return false; }
// 'RegisterKindForResult()' returns the register kind necessary to hold the
// result.
//
// This is not virtual because instructions should override representation()
// instead.
Location::Kind RegisterKindForResult() const {
const Representation rep = representation();
#if !defined(TARGET_ARCH_DBC)
if ((rep == kUnboxedFloat) || (rep == kUnboxedDouble) ||
(rep == kUnboxedFloat32x4) || (rep == kUnboxedInt32x4) ||
(rep == kUnboxedFloat64x2)) {
return Location::kFpuRegister;
}
#else
// DBC supports only unboxed doubles and does not have distinguished FPU
// registers.
ASSERT((rep != kUnboxedFloat32x4) && (rep != kUnboxedInt32x4) &&
(rep != kUnboxedFloat64x2));
#endif
return Location::kRegister;
}
protected:
// GetDeoptId and/or CopyDeoptIdFrom.
friend class CallSiteInliner;
@ -1614,6 +1639,33 @@ class FunctionEntryInstr : public BlockEntryWithInitialDefs {
DISALLOW_COPY_AND_ASSIGN(FunctionEntryInstr);
};
// Represents entry into a function from native code.
//
// Native entries are not allowed to have regular parameters. They should use
// NativeParameter instead (which doesn't count as an initial definition).
class NativeEntryInstr : public FunctionEntryInstr {
public:
NativeEntryInstr(const ZoneGrowableArray<Location>* argument_locations,
GraphEntryInstr* graph_entry,
intptr_t block_id,
intptr_t try_index,
intptr_t deopt_id,
intptr_t callback_id)
: FunctionEntryInstr(graph_entry, block_id, try_index, deopt_id),
callback_id_(callback_id),
argument_locations_(argument_locations) {}
DECLARE_INSTRUCTION(NativeEntry)
PRINT_TO_SUPPORT
private:
void SaveArgument(FlowGraphCompiler* compiler, Location loc) const;
const intptr_t callback_id_;
const ZoneGrowableArray<Location>* const argument_locations_;
};
// Represents an OSR entrypoint to a function.
//
// The OSR entry has it's own initial definitions.
@ -2193,6 +2245,57 @@ class ParameterInstr : public Definition {
DISALLOW_COPY_AND_ASSIGN(ParameterInstr);
};
// Native parameters are not treated as initial definitions because they cannot
// be inlined and are only usable in optimized code. The location must be a
// stack location relative to the position of the stack (SPREG) after
// register-based arguments have been saved on entry to a native call. See
// NativeEntryInstr::EmitNativeCode for more details.
//
// TOOD(33549): Unify with ParameterInstr.
class NativeParameterInstr : public Definition {
public:
NativeParameterInstr(Location loc, Representation representation)
: loc_(loc), representation_(representation) {
if (loc.IsPairLocation()) {
for (intptr_t i : {0, 1}) {
ASSERT(loc_.Component(i).HasStackIndex() &&
loc_.Component(i).base_reg() == SPREG);
}
} else {
ASSERT(loc_.HasStackIndex() && loc_.base_reg() == SPREG);
}
}
DECLARE_INSTRUCTION(NativeParameter)
virtual Representation representation() const { return representation_; }
intptr_t InputCount() const { return 0; }
Value* InputAt(intptr_t i) const {
UNREACHABLE();
return NULL;
}
virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool HasUnknownSideEffects() const { return false; }
// TODO(sjindel): We can make this more precise.
virtual CompileType ComputeType() const { return CompileType::Dynamic(); }
virtual bool MayThrow() const { return false; }
PRINT_OPERANDS_TO_SUPPORT
private:
virtual void RawSetInputAt(intptr_t i, Value* value) { UNREACHABLE(); }
const Location loc_;
const Representation representation_;
DISALLOW_COPY_AND_ASSIGN(NativeParameterInstr);
};
// Stores a tagged pointer to a slot accessible from a fixed register. It has
// the form:
//
@ -2256,8 +2359,11 @@ class StoreIndexedUnsafeInstr : public TemplateInstruction<2, NoThrow> {
// the frame. This is asserted via `inliner.cc::CalleeGraphValidator`.
class LoadIndexedUnsafeInstr : public TemplateDefinition<1, NoThrow> {
public:
LoadIndexedUnsafeInstr(Value* index, intptr_t offset, CompileType result_type)
: offset_(offset) {
LoadIndexedUnsafeInstr(Value* index,
intptr_t offset,
CompileType result_type,
Representation representation = kTagged)
: offset_(offset), representation_(representation) {
UpdateType(result_type);
SetInputAt(0, index);
}
@ -2268,7 +2374,6 @@ class LoadIndexedUnsafeInstr : public TemplateDefinition<1, NoThrow> {
ASSERT(index == 0);
return kTagged;
}
virtual Representation representation() const { return kTagged; }
virtual bool ComputeCanDeoptimize() const { return false; }
virtual bool HasUnknownSideEffects() const { return false; }
@ -2284,6 +2389,7 @@ class LoadIndexedUnsafeInstr : public TemplateDefinition<1, NoThrow> {
private:
const intptr_t offset_;
const Representation representation_;
DISALLOW_COPY_AND_ASSIGN(LoadIndexedUnsafeInstr);
};
@ -2393,6 +2499,40 @@ class ReturnInstr : public TemplateInstruction<1, NoThrow> {
DISALLOW_COPY_AND_ASSIGN(ReturnInstr);
};
// Represents a return from a Dart function into native code.
class NativeReturnInstr : public ReturnInstr {
public:
NativeReturnInstr(TokenPosition token_pos,
Value* value,
Representation rep,
Location result_location,
intptr_t deopt_id)
: ReturnInstr(token_pos, value, deopt_id),
result_representation_(rep),
result_location_(result_location) {}
DECLARE_INSTRUCTION(NativeReturn)
PRINT_OPERANDS_TO_SUPPORT
virtual Representation RequiredInputRepresentation(intptr_t idx) const {
ASSERT(idx == 0);
return result_representation_;
}
virtual bool CanBecomeDeoptimizationTarget() const {
// Unlike ReturnInstr, NativeReturnInstr cannot be inlined (because it's
// returning into native code).
return false;
}
private:
const Representation result_representation_;
const Location result_location_;
DISALLOW_COPY_AND_ASSIGN(NativeReturnInstr);
};
class ThrowInstr : public TemplateInstruction<0, Throws> {
public:
explicit ThrowInstr(TokenPosition token_pos, intptr_t deopt_id)

View file

@ -886,7 +886,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ popl(tmp);
__ movl(Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), tmp);
__ TransitionGeneratedToNative(branch, tmp);
__ TransitionGeneratedToNative(branch, FPREG, tmp);
__ call(branch);
// The x86 calling convention requires floating point values to be returned on

View file

@ -524,12 +524,10 @@ void ClosureCallInstr::PrintOperandsTo(BufferFormatter* f) const {
void FfiCallInstr::PrintOperandsTo(BufferFormatter* f) const {
f->Print(" pointer=");
InputAt(TargetAddressIndex())->PrintTo(f);
f->Print(" signature=%s",
Type::Handle(signature_.SignatureType()).ToCString());
for (intptr_t i = 0, n = InputCount(); i < n - 1; ++i) {
f->Print(", ");
InputAt(i)->PrintTo(f);
f->Print(" (at %s) ", arg_locations_[i].ToCString());
f->Print(" (@%s)", arg_locations_[i].ToCString());
}
}
@ -1063,6 +1061,24 @@ void FunctionEntryInstr::PrintTo(BufferFormatter* f) const {
BlockEntryWithInitialDefs::PrintInitialDefinitionsTo(f);
}
void NativeEntryInstr::PrintTo(BufferFormatter* f) const {
f->Print("B%" Pd "[native function entry]:%" Pd, block_id(), GetDeoptId());
if (HasParallelMove()) {
f->Print("\n");
parallel_move()->PrintTo(f);
}
BlockEntryWithInitialDefs::PrintInitialDefinitionsTo(f);
}
void NativeReturnInstr::PrintOperandsTo(BufferFormatter* f) const {
value()->PrintTo(f);
}
void NativeParameterInstr::PrintOperandsTo(BufferFormatter* f) const {
f->Print("%s as %s", loc_.ToCString(),
RepresentationToCString(representation_));
}
void CatchBlockEntryInstr::PrintTo(BufferFormatter* f) const {
f->Print("B%" Pd "[target catch try_idx %" Pd " catch_try_idx %" Pd "]",
block_id(), try_index(), catch_try_index());

View file

@ -139,6 +139,43 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ set_constant_pool_allowed(true);
}
void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LeaveDartFrame();
// Pop dummy return address.
__ popq(TMP);
// Anything besides the return register.
const Register vm_tag_reg = RBX, old_exit_frame_reg = RCX;
__ popq(old_exit_frame_reg);
// Restore top_resource.
__ popq(TMP);
__ movq(Address(THR, compiler::target::Thread::top_resource_offset()), TMP);
__ popq(vm_tag_reg);
// TransitionGeneratedToNative will reset the exit frame info to
// old_exit_frame_reg *before* entering the safepoint.
__ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg);
// Restore C++ ABI callee-saved registers.
__ PopRegisters(CallingConventions::kCalleeSaveCpuRegisters,
CallingConventions::kCalleeSaveXmmRegisters);
// Leave the entry frame.
__ LeaveFrame();
// Leave the dummy frame holding the pushed arguments.
__ LeaveFrame();
__ ret();
// For following blocks.
__ set_constant_pool_allowed(true);
}
static Condition NegateCondition(Condition condition) {
switch (condition) {
case EQUAL:
@ -921,7 +958,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ movq(Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), TMP);
// Update information in the thread object and enter a safepoint.
__ TransitionGeneratedToNative(target_address);
__ TransitionGeneratedToNative(target_address, FPREG);
__ CallCFunction(target_address);
@ -943,6 +980,130 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ popq(TMP);
}
void NativeEntryInstr::SaveArgument(FlowGraphCompiler* compiler,
Location loc) const {
ASSERT(!loc.IsPairLocation());
if (loc.HasStackIndex()) return;
if (loc.IsRegister()) {
__ pushq(loc.reg());
} else if (loc.IsFpuRegister()) {
__ movq(TMP, loc.fpu_reg());
__ pushq(TMP);
} else {
UNREACHABLE();
}
}
void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (FLAG_precompiled_mode) {
UNREACHABLE();
}
__ Bind(compiler->GetJumpLabel(this));
// Create a dummy frame holding the pushed arguments. This simplifies
// NativeReturnInstr::EmitNativeCode.
__ EnterFrame(0);
// Save the argument registers, in reverse order.
for (intptr_t i = argument_locations_->length(); i-- > 0;) {
SaveArgument(compiler, argument_locations_->At(i));
}
// Enter the entry frame.
__ EnterFrame(0);
// Save a space for the code object.
__ PushImmediate(Immediate(0));
// InvokoeDartCodeStub saves the arguments descriptor here. We don't have one,
// but we need to follow the same frame layout for the stack walker.
__ PushImmediate(Immediate(0));
// Save ABI callee-saved registers.
__ PushRegisters(CallingConventions::kCalleeSaveCpuRegisters,
CallingConventions::kCalleeSaveXmmRegisters);
// Load the thread object.
// TODO(35765): Fix linking issue on AOT.
// TOOD(35934): Exclude native callbacks from snapshots.
//
// Create another frame to align the frame before continuing in "native" code.
{
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);
__ movq(
RAX,
Immediate(reinterpret_cast<int64_t>(DLRT_GetThreadForNativeCallback)));
__ call(RAX);
__ movq(THR, RAX);
__ LeaveFrame();
}
// Save the current VMTag on the stack.
__ movq(RAX, Assembler::VMTagAddress());
__ pushq(RAX);
// Save top resource.
__ pushq(Address(THR, compiler::target::Thread::top_resource_offset()));
__ movq(Address(THR, compiler::target::Thread::top_resource_offset()),
Immediate(0));
// Save top exit frame info. Stack walker expects it to be here.
__ pushq(
Address(THR, compiler::target::Thread::top_exit_frame_info_offset()));
// In debug mode, verify that we've pushed the top exit frame info at the
// correct offset from FP.
__ EmitEntryFrameVerification();
// TransitionNativeToGenerated will reset top exit frame info to 0 *after*
// leaving the safepoint.
__ TransitionNativeToGenerated();
// Now that the safepoint has ended, we can touch Dart objects without
// handles.
// Otherwise we'll clobber the argument sent from the caller.
COMPILE_ASSERT(RAX != CallingConventions::kArg1Reg);
__ movq(CallingConventions::kArg1Reg, Immediate(callback_id_));
__ movq(RAX, Address(THR, compiler::target::Thread::
verify_callback_isolate_entry_point_offset()));
__ call(RAX);
// Load the code object.
__ movq(RAX, Address(THR, compiler::target::Thread::callback_code_offset()));
__ movq(RAX, FieldAddress(
RAX, compiler::target::GrowableObjectArray::data_offset()));
__ movq(CODE_REG,
FieldAddress(RAX, compiler::target::Array::data_offset() +
callback_id_ * compiler::target::kWordSize));
// Put the code object in the reserved slot.
__ movq(Address(FPREG, kPcMarkerSlotFromFp * compiler::target::kWordSize),
CODE_REG);
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
__ movq(PP, Address(THR,
compiler::target::Thread::global_object_pool_offset()));
} else {
__ xorq(PP, PP); // GC-safe value into PP.
}
// Push a dummy return address which suggests that we are inside of
// InvokeDartCodeStub. This is how the stack walker detects an entry frame.
__ movq(
RAX,
Address(THR, compiler::target::Thread::invoke_dart_code_stub_offset()));
__ pushq(FieldAddress(RAX, compiler::target::Code::entry_point_offset()));
// Continue with Dart frame setup.
FunctionEntryInstr::EmitNativeCode(compiler);
}
static bool CanBeImmediateIndex(Value* index, intptr_t cid) {
if (!index->definition()->IsConstant()) return false;
const Object& constant = index->definition()->AsConstant()->value();
@ -1665,10 +1826,10 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label ok, fail_label;
Label* deopt =
compiler->is_optimizing()
? compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField)
: NULL;
Label* deopt = NULL;
if (compiler->is_optimizing()) {
deopt = compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField);
}
Label* fail = (deopt != NULL) ? deopt : &fail_label;

View file

@ -824,25 +824,6 @@ static Location::Kind RegisterKindFromPolicy(Location loc) {
}
}
static Location::Kind RegisterKindForResult(Instruction* instr) {
const Representation rep = instr->representation();
#if !defined(TARGET_ARCH_DBC)
if ((rep == kUnboxedFloat) || (rep == kUnboxedDouble) ||
(rep == kUnboxedFloat32x4) || (rep == kUnboxedInt32x4) ||
(rep == kUnboxedFloat64x2)) {
return Location::kFpuRegister;
} else {
return Location::kRegister;
}
#else
// DBC supports only unboxed doubles and does not have distinguished FPU
// registers.
ASSERT((rep != kUnboxedFloat32x4) && (rep != kUnboxedInt32x4) &&
(rep != kUnboxedFloat64x2));
return Location::kRegister;
#endif
}
//
// When describing shape of live ranges in comments below we are going to use
// the following notation:
@ -991,11 +972,11 @@ void FlowGraphAllocator::ConnectIncomingPhiMoves(JoinEntryInstr* join) {
// All phi resolution moves are connected. Phi's live range is
// complete.
AssignSafepoints(phi, range);
CompleteRange(range, RegisterKindForResult(phi));
CompleteRange(range, phi->RegisterKindForResult());
if (is_pair_phi) {
LiveRange* second_range = GetLiveRange(ToSecondPairVreg(vreg));
AssignSafepoints(phi, second_range);
CompleteRange(second_range, RegisterKindForResult(phi));
CompleteRange(second_range, phi->RegisterKindForResult());
}
move_idx += is_pair_phi ? 2 : 1;
@ -1303,7 +1284,7 @@ void FlowGraphAllocator::ProcessOneOutput(BlockEntryInstr* block,
}
AssignSafepoints(def, range);
CompleteRange(range, RegisterKindForResult(def));
CompleteRange(range, def->RegisterKindForResult());
}
// Create and update live ranges corresponding to instruction's inputs,

View file

@ -9,6 +9,8 @@
#include "platform/globals.h"
#include "vm/compiler/backend/locations.h"
#include "vm/compiler/runtime_api.h"
#include "vm/growable_array.h"
#include "vm/stack_frame.h"
namespace dart {
@ -182,7 +184,7 @@ template <class CallingConventions,
class Location,
class Register,
class FpuRegister>
class ArgumentFrameState : public ValueObject {
class ArgumentAllocator : public ValueObject {
public:
Location AllocateArgument(Representation rep) {
switch (rep) {
@ -207,9 +209,13 @@ class ArgumentFrameState : public ValueObject {
}
// Argument must be spilled.
if ((rep == kUnboxedInt64 || rep == kUnboxedDouble) &&
compiler::target::kWordSize == 4) {
if (rep == kUnboxedInt64 && compiler::target::kWordSize == 4) {
return AllocateAlignedStackSlots(rep);
} else if (rep == kUnboxedDouble) {
// By convention, we always use DoubleStackSlot for doubles, even on
// 64-bit systems.
ASSERT(!CallingConventions::kAlignArguments);
return AllocateDoubleStackSlot();
} else {
return AllocateStackSlot();
}
@ -221,6 +227,13 @@ class ArgumentFrameState : public ValueObject {
CallingConventions::kStackPointerRegister);
}
Location AllocateDoubleStackSlot() {
const Location result = Location::DoubleStackSlot(
stack_height_in_slots, CallingConventions::kStackPointerRegister);
stack_height_in_slots += 8 / compiler::target::kWordSize;
return result;
}
// Allocates a pair of stack slots where the first stack slot is aligned to an
// 8-byte boundary, if necessary.
Location AllocateAlignedStackSlots(Representation rep) {
@ -287,6 +300,68 @@ class ArgumentFrameState : public ValueObject {
intptr_t stack_height_in_slots = 0;
};
ZoneGrowableArray<Location>*
CallbackArgumentTranslator::TranslateArgumentLocations(
const ZoneGrowableArray<Location>& arg_locs) {
auto& pushed_locs = *(new ZoneGrowableArray<Location>(arg_locs.length()));
CallbackArgumentTranslator translator;
for (intptr_t i = 0, n = arg_locs.length(); i < n; i++) {
translator.AllocateArgument(arg_locs[i]);
}
for (intptr_t i = 0, n = arg_locs.length(); i < n; ++i) {
pushed_locs.Add(translator.TranslateArgument(arg_locs[i]));
}
return &pushed_locs;
}
void CallbackArgumentTranslator::AllocateArgument(Location arg) {
if (arg.IsPairLocation()) {
AllocateArgument(arg.Component(0));
AllocateArgument(arg.Component(1));
return;
}
if (arg.HasStackIndex()) return;
ASSERT(arg.IsRegister() || arg.IsFpuRegister());
if (arg.IsRegister()) {
argument_slots_required_++;
} else {
argument_slots_required_ += 8 / compiler::target::kWordSize;
}
}
Location CallbackArgumentTranslator::TranslateArgument(Location arg) {
if (arg.IsPairLocation()) {
const Location low = TranslateArgument(arg.Component(0));
const Location high = TranslateArgument(arg.Component(1));
return Location::Pair(low, high);
}
if (arg.HasStackIndex()) {
// Add extra slots after the saved arguments for the return address and
// frame pointer of the dummy arguments frame, which will be between the
// saved argument registers and stack arguments. Also add slots for the
// shadow space if present (factored into
// kCallbackSlotsBeforeSavedArguments).
FrameRebase rebase(
/*old_base=*/SPREG, /*new_base=*/SPREG,
/*stack_delta=*/argument_slots_required_ +
kCallbackSlotsBeforeSavedArguments);
return rebase.Rebase(arg);
}
if (arg.IsRegister()) {
return Location::StackSlot(argument_slots_used_++, SPREG);
}
ASSERT(arg.IsFpuRegister());
const Location result =
Location::DoubleStackSlot(argument_slots_used_, SPREG);
argument_slots_used_ += 8 / compiler::target::kWordSize;
return result;
}
// Takes a list of argument representations, and converts it to a list of
// argument locations based on calling convention.
template <class CallingConventions,
@ -299,7 +374,7 @@ ZoneGrowableArray<Location>* ArgumentLocationsBase(
auto result = new ZoneGrowableArray<Location>(num_arguments);
// Loop through all arguments and assign a register or a stack location.
ArgumentFrameState<CallingConventions, Location, Register, FpuRegister>
ArgumentAllocator<CallingConventions, Location, Register, FpuRegister>
frame_state;
for (intptr_t i = 0; i < num_arguments; i++) {
Representation rep = arg_reps[i];

View file

@ -108,6 +108,31 @@ class FfiSignatureDescriptor : public ValueObject {
#endif // defined(TARGET_ARCH_DBC)
// This classes translates the ABI location of arguments into the locations they
// will inhabit after entry-frame setup in the invocation of a native callback.
//
// Native -> Dart callbacks must push all the arguments before executing any
// Dart code because the reading the Thread from TLS requires calling a native
// stub, and the argument registers are volatile on all ABIs we support.
//
// To avoid complicating initial definitions, all callback arguments are read
// off the stack from their pushed locations, so this class updates the argument
// positions to account for this.
//
// See 'NativeEntryInstr::EmitNativeCode' for details.
class CallbackArgumentTranslator : public ValueObject {
public:
static ZoneGrowableArray<Location>* TranslateArgumentLocations(
const ZoneGrowableArray<Location>& arg_locs);
private:
void AllocateArgument(Location arg);
Location TranslateArgument(Location arg);
intptr_t argument_slots_used_ = 0;
intptr_t argument_slots_required_ = 0;
};
} // namespace ffi
} // namespace compiler

View file

@ -16,6 +16,7 @@
#include "vm/compiler/jit/compiler.h"
#include "vm/kernel_loader.h"
#include "vm/longjump.h"
#include "vm/native_entry.h"
#include "vm/object_store.h"
#include "vm/report.h"
#include "vm/resolver.h"
@ -2440,6 +2441,31 @@ Fragment FlowGraphBuilder::FfiUnboxedExtend(Representation representation,
return Fragment(extend);
}
Fragment FlowGraphBuilder::FfiExceptionalReturnValue(
const AbstractType& result_type,
Representation representation) {
ASSERT(optimizing_);
Object& result = Object::ZoneHandle(Z, Object::null());
if (representation == kUnboxedFloat || representation == kUnboxedDouble) {
result = Double::New(0.0, Heap::kOld);
} else {
result = Integer::New(0, Heap::kOld);
}
Fragment code;
code += Constant(result);
code += UnboxTruncate(representation);
return code;
}
#if !defined(TARGET_ARCH_DBC)
Fragment FlowGraphBuilder::NativeReturn(Representation result) {
auto* instr = new (Z)
NativeReturnInstr(TokenPosition::kNoSource, Pop(), result,
compiler::ffi::ResultLocation(result), DeoptId::kNone);
return Fragment(instr);
}
#endif
Fragment FlowGraphBuilder::FfiPointerFromAddress(const Type& result_type) {
Fragment test;
TargetEntryInstr* null_entry;
@ -2498,8 +2524,69 @@ Fragment FlowGraphBuilder::BitCast(Representation from, Representation to) {
return Fragment(instr);
}
Fragment FlowGraphBuilder::FfiConvertArgumentToDart(
const AbstractType& ffi_type,
const Representation native_representation) {
Fragment body;
if (compiler::ffi::NativeTypeIsPointer(ffi_type)) {
body += Box(kUnboxedFfiIntPtr);
body += FfiPointerFromAddress(Type::Cast(ffi_type));
} else if (compiler::ffi::NativeTypeIsVoid(ffi_type)) {
body += Drop();
body += NullConstant();
} else {
const Representation from_rep = native_representation;
const Representation to_rep = compiler::ffi::TypeRepresentation(ffi_type);
if (from_rep != to_rep) {
body += BitCast(from_rep, to_rep);
} else {
body += FfiUnboxedExtend(from_rep, ffi_type);
}
body += Box(to_rep);
}
return body;
}
Fragment FlowGraphBuilder::FfiConvertArgumentToNative(
const Function& function,
const AbstractType& ffi_type,
const Representation native_representation) {
Fragment body;
// Check for 'null'. Only ffi.Pointers are allowed to be null.
if (!compiler::ffi::NativeTypeIsPointer(ffi_type)) {
body += LoadLocal(MakeTemporary());
body <<=
new (Z) CheckNullInstr(Pop(), String::ZoneHandle(Z, function.name()),
GetNextDeoptId(), TokenPosition::kNoSource);
}
if (compiler::ffi::NativeTypeIsPointer(ffi_type)) {
body += LoadAddressFromFfiPointer();
body += UnboxTruncate(kUnboxedFfiIntPtr);
} else {
Representation from_rep = compiler::ffi::TypeRepresentation(ffi_type);
body += UnboxTruncate(from_rep);
Representation to_rep = native_representation;
if (from_rep != to_rep) {
body += BitCast(from_rep, to_rep);
} else {
body += FfiUnboxedExtend(from_rep, ffi_type);
}
}
return body;
}
FlowGraph* FlowGraphBuilder::BuildGraphOfFfiTrampoline(
const Function& function) {
if (function.FfiCallbackTarget() != Function::null()) {
return BuildGraphOfFfiCallback(function);
} else {
return BuildGraphOfFfiNative(function);
}
}
FlowGraph* FlowGraphBuilder::BuildGraphOfFfiNative(const Function& function) {
graph_entry_ =
new (Z) GraphEntryInstr(*parsed_function_, Compiler::kNoOSRDeoptId);
@ -2532,29 +2619,7 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfFfiTrampoline(
for (intptr_t pos = 1; pos < function.num_fixed_parameters(); pos++) {
body += LoadLocal(parsed_function_->ParameterVariable(pos));
ffi_type = signature.ParameterTypeAt(pos);
// Check for 'null'. Only ffi.Pointers are allowed to be null.
if (!compiler::ffi::NativeTypeIsPointer(ffi_type)) {
body += LoadLocal(parsed_function_->ParameterVariable(pos));
body <<=
new (Z) CheckNullInstr(Pop(), String::ZoneHandle(Z, function.name()),
GetNextDeoptId(), TokenPosition::kNoSource);
}
if (compiler::ffi::NativeTypeIsPointer(ffi_type)) {
body += LoadAddressFromFfiPointer();
body += UnboxTruncate(kUnboxedFfiIntPtr);
} else {
Representation from_rep = compiler::ffi::TypeRepresentation(ffi_type);
body += UnboxTruncate(from_rep);
Representation to_rep = arg_reps[pos - 1];
if (from_rep != to_rep) {
body += BitCast(from_rep, to_rep);
} else {
body += FfiUnboxedExtend(from_rep, ffi_type);
}
}
body += FfiConvertArgumentToNative(function, ffi_type, arg_reps[pos - 1]);
}
// Push the function pointer, which is stored (boxed) in the first slot of the
@ -2569,34 +2634,109 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfFfiTrampoline(
body += FfiCall(signature, arg_reps, arg_locs, arg_host_locs);
ffi_type = signature.result_type();
if (compiler::ffi::NativeTypeIsPointer(ffi_type)) {
body += Box(kUnboxedFfiIntPtr);
body += FfiPointerFromAddress(Type::Cast(ffi_type));
} else if (compiler::ffi::NativeTypeIsVoid(ffi_type)) {
body += Drop();
body += NullConstant();
} else {
#if !defined(TARGET_ARCH_DBC)
Representation from_rep = compiler::ffi::ResultRepresentation(signature);
const Representation from_rep =
compiler::ffi::ResultRepresentation(signature);
#else
Representation from_rep =
compiler::ffi::ResultHostRepresentation(signature);
const Representation from_rep =
compiler::ffi::ResultHostRepresentation(signature);
#endif // !defined(TARGET_ARCH_DBC)
Representation to_rep = compiler::ffi::TypeRepresentation(ffi_type);
if (from_rep != to_rep) {
body += BitCast(from_rep, to_rep);
} else {
body += FfiUnboxedExtend(from_rep, ffi_type);
}
body += Box(to_rep);
}
body += FfiConvertArgumentToDart(ffi_type, from_rep);
body += Return(TokenPosition::kNoSource);
return new (Z) FlowGraph(*parsed_function_, graph_entry_, last_used_block_id_,
prologue_info);
}
FlowGraph* FlowGraphBuilder::BuildGraphOfFfiCallback(const Function& function) {
#if !defined(TARGET_ARCH_DBC)
const Function& signature = Function::ZoneHandle(Z, function.FfiCSignature());
const auto& arg_reps = *compiler::ffi::ArgumentRepresentations(signature);
const auto& arg_locs = *compiler::ffi::ArgumentLocations(arg_reps);
const auto& callback_locs =
*compiler::ffi::CallbackArgumentTranslator::TranslateArgumentLocations(
arg_locs);
graph_entry_ =
new (Z) GraphEntryInstr(*parsed_function_, Compiler::kNoOSRDeoptId);
auto* const native_entry = new (Z) NativeEntryInstr(
&arg_locs, graph_entry_, AllocateBlockId(), CurrentTryIndex(),
GetNextDeoptId(), function.FfiCallbackId());
graph_entry_->set_normal_entry(native_entry);
Fragment function_body(native_entry);
function_body += CheckStackOverflowInPrologue(function.token_pos());
// Wrap the entire method in a big try/catch. This is important to ensure that
// the VM does not crash if the callback throws an exception.
const intptr_t try_handler_index = AllocateTryIndex();
Fragment body = TryCatch(try_handler_index);
++try_depth_;
// Box and push the arguments.
AbstractType& ffi_type = AbstractType::Handle(Z);
for (intptr_t i = 0, n = callback_locs.length(); i < n; ++i) {
ffi_type = signature.ParameterTypeAt(i + 1);
auto* parameter =
new (Z) NativeParameterInstr(callback_locs[i], arg_reps[i]);
Push(parameter);
body <<= parameter;
body += FfiConvertArgumentToDart(ffi_type, arg_reps[i]);
body += PushArgument();
}
// Call the target.
//
// TODO(36748): Determine the hot-reload semantics of callbacks and update the
// rebind-rule accordingly.
body += StaticCall(TokenPosition::kNoSource,
Function::ZoneHandle(Z, function.FfiCallbackTarget()),
callback_locs.length(), Array::empty_array(),
ICData::kNoRebind);
ffi_type = signature.result_type();
const Representation result_rep =
compiler::ffi::ResultRepresentation(signature);
body += FfiConvertArgumentToNative(function, ffi_type, result_rep);
body += NativeReturn(result_rep);
--try_depth_;
function_body += body;
++catch_depth_;
Fragment catch_body =
CatchBlockEntry(Array::empty_array(), try_handler_index,
/*needs_stacktrace=*/true, /*is_synthesized=*/true);
catch_body += LoadLocal(CurrentException());
catch_body += PushArgument();
catch_body += LoadLocal(CurrentStackTrace());
catch_body += PushArgument();
// Find '_handleExposedException(e, st)' from ffi_patch.dart and call it.
const Library& ffi_lib =
Library::Handle(Z, Library::LookupLibrary(thread_, Symbols::DartFfi()));
const Function& handler = Function::ZoneHandle(
Z, ffi_lib.LookupFunctionAllowPrivate(Symbols::HandleExposedException()));
ASSERT(!handler.IsNull());
catch_body += StaticCall(TokenPosition::kNoSource, handler, /*num_args=*/2,
/*arg_names=*/Array::empty_array(), ICData::kStatic);
catch_body += Drop();
catch_body += FfiExceptionalReturnValue(ffi_type, result_rep);
catch_body += NativeReturn(result_rep);
--catch_depth_;
PrologueInfo prologue_info(-1, -1);
return new (Z) FlowGraph(*parsed_function_, graph_entry_, last_used_block_id_,
prologue_info);
#else
UNREACHABLE();
#endif
}
void FlowGraphBuilder::SetCurrentTryCatchBlock(TryCatchBlock* try_catch_block) {
try_catch_block_ = try_catch_block;
SetCurrentTryIndex(try_catch_block == nullptr ? kInvalidTryIndex

View file

@ -101,6 +101,8 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {
FlowGraph* BuildGraphOfNoSuchMethodDispatcher(const Function& function);
FlowGraph* BuildGraphOfInvokeFieldDispatcher(const Function& function);
FlowGraph* BuildGraphOfFfiTrampoline(const Function& function);
FlowGraph* BuildGraphOfFfiCallback(const Function& function);
FlowGraph* BuildGraphOfFfiNative(const Function& function);
Fragment NativeFunctionBody(const Function& function,
LocalVariable* first_parameter);
@ -236,6 +238,26 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {
// the pointer.
Fragment FfiPointerFromAddress(const Type& result_type);
// Pushes an (unboxed) bogus value returned when a native -> Dart callback
// throws an exception.
Fragment FfiExceptionalReturnValue(const AbstractType& result_type,
const Representation target);
// Pops a Dart object and push the unboxed native version, according to the
// semantics of FFI argument translation.
Fragment FfiConvertArgumentToNative(
const Function& function,
const AbstractType& ffi_type,
const Representation native_representation);
// Reverse of 'FfiConvertArgumentToNative'.
Fragment FfiConvertArgumentToDart(const AbstractType& ffi_type,
const Representation native_representation);
// Return from a native -> Dart callback. Can only be used in conjunction with
// NativeEntry and NativeParameter are used.
Fragment NativeReturn(Representation result);
// Bit-wise cast between representations.
// Pops the input and pushes the converted result.
// Currently only works with equal sizes and floating point <-> integer.

View file

@ -390,6 +390,17 @@ ScopeBuildingResult* ScopeBuilder::BuildScopes() {
: Object::dynamic_type().raw()));
scope_->InsertParameterAt(i, variable);
}
// Callbacks need try/catch variables.
if (function.IsFfiTrampoline() &&
function.FfiCallbackTarget() != Function::null()) {
++depth_.try_;
AddTryVariables();
--depth_.try_;
++depth_.catch_;
AddCatchVariables();
FinalizeCatchVariables();
--depth_.catch_;
}
break;
case RawFunction::kSignatureFunction:
case RawFunction::kIrregexpFunction:

View file

@ -500,6 +500,7 @@ word Array::header_size() {
V(Thread, top_resource_offset) \
V(Thread, vm_tag_offset) \
V(Thread, safepoint_state_offset) \
V(Thread, callback_code_offset) \
V(TimelineStream, enabled_offset) \
V(TwoByteString, data_offset) \
V(Type, arguments_offset) \
@ -582,6 +583,10 @@ word Thread::write_barrier_entry_point_offset() {
word Thread::array_write_barrier_entry_point_offset() {
return dart::Thread::array_write_barrier_entry_point_offset();
}
word Thread::verify_callback_isolate_entry_point_offset() {
return dart::Thread::verify_callback_entry_offset();
}
#endif // !defined(TARGET_ARCH_DBC)
#if defined(TARGET_ARCH_ARM) || defined(TARGET_ARCH_ARM64) || \

View file

@ -572,6 +572,7 @@ class Thread : public AllStatic {
static word write_barrier_wrappers_thread_offset(intptr_t regno);
static word array_write_barrier_entry_point_offset();
static word write_barrier_entry_point_offset();
static word verify_callback_isolate_entry_point_offset();
static word vm_tag_offset();
static uword vm_tag_compiled_id();
@ -583,6 +584,8 @@ class Thread : public AllStatic {
static uword native_execution_state();
static uword generated_execution_state();
static word callback_code_offset();
#if !defined(TARGET_ARCH_DBC)
static word write_barrier_code_offset();
static word array_write_barrier_code_offset();

View file

@ -290,6 +290,10 @@ void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
__ Ret();
}
void StubCodeCompiler::GenerateVerifyCallbackStub(Assembler* assembler) {
__ Breakpoint();
}
void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
GenerateSharedStub(

View file

@ -235,6 +235,10 @@ void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
__ Ret();
}
void StubCodeCompiler::GenerateVerifyCallbackStub(Assembler* assembler) {
__ Breakpoint();
}
// R1: The extracted method.
// R4: The type_arguments_field_offset (or 0)
void StubCodeCompiler::GenerateBuildMethodExtractorStub(

View file

@ -154,6 +154,10 @@ void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
__ ret();
}
void StubCodeCompiler::GenerateVerifyCallbackStub(Assembler* assembler) {
__ Breakpoint();
}
void StubCodeCompiler::GenerateNullErrorSharedWithoutFPURegsStub(
Assembler* assembler) {
__ Breakpoint();

View file

@ -204,8 +204,13 @@ void StubCodeCompiler::GenerateEnterSafepointStub(Assembler* assembler) {
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers.cpu_registers(),
all_registers.fpu_registers());
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);
__ movq(RAX, Address(THR, kEnterSafepointRuntimeEntry.OffsetFromThread()));
__ CallCFunction(RAX);
__ LeaveFrame();
__ PopRegisters(all_registers.cpu_registers(), all_registers.fpu_registers());
__ ret();
}
@ -215,12 +220,31 @@ void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
all_registers.AddAllGeneralRegisters();
__ PushRegisters(all_registers.cpu_registers(),
all_registers.fpu_registers());
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);
__ movq(RAX, Address(THR, kExitSafepointRuntimeEntry.OffsetFromThread()));
__ CallCFunction(RAX);
__ LeaveFrame();
__ PopRegisters(all_registers.cpu_registers(), all_registers.fpu_registers());
__ ret();
}
void StubCodeCompiler::GenerateVerifyCallbackStub(Assembler* assembler) {
// SP points to return address, which needs to be the second argument to
// VerifyCallbackIsolate.
__ movq(CallingConventions::kArg2Reg, Address(SPREG, 0));
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);
__ movq(RAX,
Address(THR, kVerifyCallbackIsolateRuntimeEntry.OffsetFromThread()));
__ CallCFunction(RAX);
__ LeaveFrame();
__ ret();
}
// RBX: The extracted method.
// RDX: The type_arguments_field_offset (or 0)
void StubCodeCompiler::GenerateBuildMethodExtractorStub(
@ -1086,19 +1110,8 @@ void StubCodeCompiler::GenerateInvokeDartCodeStub(Assembler* assembler) {
__ pushq(RAX);
// The constant target::frame_layout.exit_link_slot_from_entry_fp must be kept
// in sync with the code below.
#if defined(DEBUG)
{
Label ok;
__ leaq(RAX,
Address(RBP, target::frame_layout.exit_link_slot_from_entry_fp *
target::kWordSize));
__ cmpq(RAX, RSP);
__ j(EQUAL, &ok);
__ Stop("target::frame_layout.exit_link_slot_from_entry_fp mismatch");
__ Bind(&ok);
}
#endif
// in sync with the code above.
__ EmitEntryFrameVerification();
__ movq(Address(THR, target::Thread::top_exit_frame_info_offset()),
Immediate(0));

View file

@ -995,6 +995,12 @@ void Exceptions::ThrowRangeError(const char* argument_name,
Exceptions::ThrowByType(Exceptions::kRange, args);
}
void Exceptions::ThrowUnsupportedError(const char* msg) {
const Array& args = Array::Handle(Array::New(1));
args.SetAt(0, String::Handle(String::New(msg)));
Exceptions::ThrowByType(Exceptions::kUnsupported, args);
}
void Exceptions::ThrowRangeErrorMsg(const char* msg) {
const Array& args = Array::Handle(Array::New(1));
args.SetAt(0, String::Handle(String::New(msg)));

View file

@ -84,6 +84,7 @@ class Exceptions : AllStatic {
intptr_t expected_from,
intptr_t expected_to);
DART_NORETURN static void ThrowRangeErrorMsg(const char* msg);
DART_NORETURN static void ThrowUnsupportedError(const char* msg);
DART_NORETURN static void ThrowCompileTimeError(const LanguageError& error);
// Returns a RawInstance if the exception is successfully created,

View file

@ -6065,6 +6065,34 @@ RawFunction* Function::FfiCSignature() const {
return FfiTrampolineData::Cast(obj).c_signature();
}
int32_t Function::FfiCallbackId() const {
ASSERT(IsFfiTrampoline());
const Object& obj = Object::Handle(raw_ptr()->data_);
ASSERT(!obj.IsNull());
return FfiTrampolineData::Cast(obj).callback_id();
}
void Function::SetFfiCallbackId(int32_t value) const {
ASSERT(IsFfiTrampoline());
const Object& obj = Object::Handle(raw_ptr()->data_);
ASSERT(!obj.IsNull());
FfiTrampolineData::Cast(obj).set_callback_id(value);
}
RawFunction* Function::FfiCallbackTarget() const {
ASSERT(IsFfiTrampoline());
const Object& obj = Object::Handle(raw_ptr()->data_);
ASSERT(!obj.IsNull());
return FfiTrampolineData::Cast(obj).callback_target();
}
void Function::SetFfiCallbackTarget(const Function& target) const {
ASSERT(IsFfiTrampoline());
const Object& obj = Object::Handle(raw_ptr()->data_);
ASSERT(!obj.IsNull());
FfiTrampolineData::Cast(obj).set_callback_target(target);
}
RawType* Function::SignatureType() const {
Type& type = Type::Handle(ExistingSignatureType());
if (type.IsNull()) {
@ -8218,12 +8246,22 @@ void FfiTrampolineData::set_c_signature(const Function& value) const {
StorePointer(&raw_ptr()->c_signature_, value.raw());
}
void FfiTrampolineData::set_callback_target(const Function& value) const {
StorePointer(&raw_ptr()->callback_target_, value.raw());
}
void FfiTrampolineData::set_callback_id(int32_t callback_id) const {
StoreNonPointer(&raw_ptr()->callback_id_, callback_id);
}
RawFfiTrampolineData* FfiTrampolineData::New() {
ASSERT(Object::ffi_trampoline_data_class() != Class::null());
RawObject* raw =
Object::Allocate(FfiTrampolineData::kClassId,
FfiTrampolineData::InstanceSize(), Heap::kOld);
return reinterpret_cast<RawFfiTrampolineData*>(raw);
RawFfiTrampolineData* data = reinterpret_cast<RawFfiTrampolineData*>(raw);
data->ptr()->callback_id_ = -1;
return data;
}
const char* FfiTrampolineData::ToCString() const {

View file

@ -2005,6 +2005,20 @@ class Function : public Object {
// Can only be used on FFI trampolines.
RawFunction* FfiCSignature() const;
// Can only be called on FFI trampolines.
// -1 for Dart -> native calls.
int32_t FfiCallbackId() const;
// Can only be called on FFI trampolines.
void SetFfiCallbackId(int32_t value) const;
// Can only be called on FFI trampolines.
// Null for Dart -> native calls.
RawFunction* FfiCallbackTarget() const;
// Can only be called on FFI trampolines.
void SetFfiCallbackTarget(const Function& target) const;
// Return a new function with instantiated result and parameter types.
RawFunction* InstantiateSignatureFrom(
const TypeArguments& instantiator_type_arguments,
@ -3135,6 +3149,12 @@ class FfiTrampolineData : public Object {
RawFunction* c_signature() const { return raw_ptr()->c_signature_; }
void set_c_signature(const Function& value) const;
RawFunction* callback_target() const { return raw_ptr()->callback_target_; }
void set_callback_target(const Function& value) const;
int32_t callback_id() const { return raw_ptr()->callback_id_; }
void set_callback_id(int32_t value) const;
static RawFfiTrampolineData* New();
FINAL_HEAP_OBJECT_IMPLEMENTATION(FfiTrampolineData, Object);
@ -5005,7 +5025,7 @@ class Code : public Object {
return ContainsInstructionAt(raw(), addr);
}
static bool ContainsInstructionAt(RawCode* code, uword addr) {
static bool ContainsInstructionAt(const RawCode* code, uword addr) {
return Instructions::ContainsPc(code->ptr()->instructions_, addr);
}
@ -8272,6 +8292,14 @@ class GrowableObjectArray : public Instance {
static RawGrowableObjectArray* New(const Array& array,
Heap::Space space = Heap::kNew);
static RawSmi* NoSafepointLength(const RawGrowableObjectArray* array) {
return array->ptr()->length_;
}
static RawArray* NoSafepointData(const RawGrowableObjectArray* array) {
return array->ptr()->data_;
}
private:
RawArray* DataArray() const { return data()->ptr(); }
RawObject** ObjectAddr(intptr_t index) const {

View file

@ -1013,7 +1013,19 @@ class RawFfiTrampolineData : public RawObject {
VISIT_FROM(RawObject*, signature_type_);
RawType* signature_type_;
RawFunction* c_signature_;
VISIT_TO(RawObject*, c_signature_);
// Target Dart method for callbacks, otherwise null.
RawFunction* callback_target_;
VISIT_TO(RawObject*, callback_target_);
// Callback id for callbacks, otherwise 0.
//
// The callbacks ids are used so that native callbacks can lookup their own
// code objects, since native code doesn't pass code objects into function
// calls. The callback id is also used to for verifying that callbacks are
// called on the correct isolate. See DLRT_VerifyCallbackIsolate for details.
uint32_t callback_id_;
};
class RawField : public RawObject {

View file

@ -26,6 +26,7 @@
#include "vm/service_isolate.h"
#include "vm/stack_frame.h"
#include "vm/symbols.h"
#include "vm/thread.h"
#include "vm/thread_registry.h"
#include "vm/type_testing_stubs.h"
@ -2765,4 +2766,29 @@ extern "C" void DFLRT_ExitSafepoint(NativeArguments __unusable_) {
}
DEFINE_RAW_LEAF_RUNTIME_ENTRY(ExitSafepoint, 0, false, &DFLRT_ExitSafepoint);
// Not registered as a runtime entry because we can't use Thread to look it up.
extern "C" Thread* DLRT_GetThreadForNativeCallback() {
Thread* const thread = Thread::Current();
if (thread == nullptr) {
FATAL("Cannot invoke native callback outside an isolate.");
}
if (thread->no_callback_scope_depth() != 0) {
FATAL("Cannot invoke native callback when API callbacks are prohibited.");
}
if (!thread->IsMutatorThread()) {
FATAL("Native callbacks must be invoked on the mutator thread.");
}
return thread;
}
extern "C" void DLRT_VerifyCallbackIsolate(int32_t callback_id,
uword return_address) {
Thread::Current()->VerifyCallbackIsolate(callback_id, return_address);
}
DEFINE_RAW_LEAF_RUNTIME_ENTRY(
VerifyCallbackIsolate,
1,
false /* is_float */,
reinterpret_cast<RuntimeFunction>(&DLRT_VerifyCallbackIsolate));
} // namespace dart

View file

@ -145,6 +145,9 @@ class RuntimeEntry : public BaseRuntimeEntry {
RUNTIME_ENTRY_LIST(DECLARE_RUNTIME_ENTRY)
LEAF_RUNTIME_ENTRY_LIST(DECLARE_LEAF_RUNTIME_ENTRY)
// Expected to be called inside a safepoint.
extern "C" Thread* DLRT_GetThreadForNativeCallback();
const char* DeoptReasonToCString(ICData::DeoptReasonId deopt_reason);
void DeoptimizeAt(const Code& optimized_code, StackFrame* frame);

View file

@ -84,7 +84,8 @@ namespace dart {
V(RawBool*, CaseInsensitiveCompareUTF16, RawString*, RawSmi*, RawSmi*, \
RawSmi*) \
V(void, EnterSafepoint) \
V(void, ExitSafepoint)
V(void, ExitSafepoint) \
V(void, VerifyCallbackIsolate, int32_t, uword)
} // namespace dart

View file

@ -471,6 +471,13 @@ DART_FORCE_INLINE static uword LocalVarAddress(uword fp, intptr_t index) {
return fp + LocalVarIndex(0, index) * kWordSize;
}
#if !defined(TARGET_ARCH_X64)
// For FFI native -> Dart callbacks, the number of stack slots between arguments
// passed on stack and arguments saved in callback prologue. This placeholder
// here is for unsupported architectures.
constexpr intptr_t kCallbackSlotsBeforeSavedArguments = -1;
#endif
} // namespace dart
#endif // RUNTIME_VM_STACK_FRAME_H_

View file

@ -5,6 +5,8 @@
#ifndef RUNTIME_VM_STACK_FRAME_X64_H_
#define RUNTIME_VM_STACK_FRAME_X64_H_
#include "vm/constants_x64.h"
namespace dart {
/* X64 Dart Frame Layout
@ -52,6 +54,13 @@ static const int kExitLinkSlotFromEntryFp = -32;
static const int kExitLinkSlotFromEntryFp = -10;
#endif // defined(_WIN64)
// For FFI native -> Dart callbacks, the number of stack slots between arguments
// passed on stack and arguments saved in callback prologue. 2 = return adddress
// (1) + saved frame pointer (1). Also add slots for the shadow space, if
// present.
constexpr intptr_t kCallbackSlotsBeforeSavedArguments =
2 + CallingConventions::kShadowSpaceBytes;
} // namespace dart
#endif // RUNTIME_VM_STACK_FRAME_X64_H_

View file

@ -77,7 +77,8 @@ namespace dart {
V(OneArgCheckInlineCacheWithExactnessCheck) \
V(OneArgOptimizedCheckInlineCacheWithExactnessCheck) \
V(EnterSafepoint) \
V(ExitSafepoint)
V(ExitSafepoint) \
V(VerifyCallback)
#else
#define VM_STUB_CODE_LIST(V) \

View file

@ -179,6 +179,7 @@ class ObjectPointerVisitor;
V(GetterPrefix, "get:") \
V(GreaterEqualOperator, ">=") \
V(GrowRegExpStack, "_growRegExpStack") \
V(HandleExposedException, "_handleExposedException") \
V(HaveSameRuntimeType, "_haveSameRuntimeType") \
V(Hide, "hide") \
V(ICData, "ICData") \

View file

@ -6,6 +6,7 @@
#include "vm/dart_api_state.h"
#include "vm/growable_array.h"
#include "vm/heap/safepoint.h"
#include "vm/isolate.h"
#include "vm/json_stream.h"
#include "vm/lockers.h"
@ -76,6 +77,7 @@ Thread::Thread(Isolate* isolate)
resume_pc_(0),
execution_state_(kThreadInNative),
safepoint_state_(0),
ffi_callback_code_(GrowableObjectArray::null()),
task_kind_(kUnknownTask),
dart_stream_(NULL),
thread_lock_(),
@ -668,6 +670,7 @@ void Thread::VisitObjectPointers(ObjectPointerVisitor* visitor,
visitor->VisitPointer(reinterpret_cast<RawObject**>(&active_stacktrace_));
visitor->VisitPointer(reinterpret_cast<RawObject**>(&sticky_error_));
visitor->VisitPointer(reinterpret_cast<RawObject**>(&async_stack_trace_));
visitor->VisitPointer(reinterpret_cast<RawObject**>(&ffi_callback_code_));
#if !defined(DART_PRECOMPILED_RUNTIME)
if (interpreter() != NULL) {
@ -936,4 +939,45 @@ DisableThreadInterruptsScope::~DisableThreadInterruptsScope() {
}
}
const intptr_t kInitialCallbackIdsReserved = 1024;
int32_t Thread::AllocateFfiCallbackId() {
Zone* Z = isolate()->current_zone();
if (ffi_callback_code_ == GrowableObjectArray::null()) {
ffi_callback_code_ = GrowableObjectArray::New(kInitialCallbackIdsReserved);
}
const auto& array = GrowableObjectArray::Handle(Z, ffi_callback_code_);
array.Add(Code::Handle(Z, Code::null()));
return array.Length() - 1;
}
void Thread::SetFfiCallbackCode(int32_t callback_id, const Code& code) {
Zone* Z = isolate()->current_zone();
const auto& array = GrowableObjectArray::Handle(Z, ffi_callback_code_);
array.SetAt(callback_id, code);
}
void Thread::VerifyCallbackIsolate(int32_t callback_id, uword entry) {
NoSafepointScope _;
const RawGrowableObjectArray* const array = ffi_callback_code_;
if (array == GrowableObjectArray::null()) {
FATAL("Cannot invoke callback on incorrect isolate.");
}
const RawSmi* const length_smi =
GrowableObjectArray::NoSafepointLength(array);
const intptr_t length = Smi::Value(length_smi);
if (callback_id < 0 || callback_id >= length) {
FATAL("Cannot invoke callback on incorrect isolate.");
}
RawObject** const code_array =
Array::DataOf(GrowableObjectArray::NoSafepointData(array));
const RawCode* const code = Code::RawCast(code_array[callback_id]);
if (!Code::ContainsInstructionAt(code, entry)) {
FATAL("Cannot invoke callback on incorrect isolate.");
}
}
} // namespace dart

View file

@ -168,8 +168,8 @@ class Zone;
V(uword, monomorphic_miss_entry_, StubCode::MonomorphicMiss().EntryPoint(), \
0) \
V(uword, optimize_entry_, StubCode::OptimizeFunction().EntryPoint(), 0) \
V(uword, deoptimize_entry_, StubCode::Deoptimize().EntryPoint(), 0)
V(uword, deoptimize_entry_, StubCode::Deoptimize().EntryPoint(), 0) \
V(uword, verify_callback_entry_, StubCode::VerifyCallback().EntryPoint(), 0)
#endif
#define CACHED_ADDRESSES_LIST(V) \
@ -315,6 +315,10 @@ class Thread : public ThreadState {
return OFFSET_OF(Thread, safepoint_state_);
}
static intptr_t callback_code_offset() {
return OFFSET_OF(Thread, ffi_callback_code_);
}
TaskKind task_kind() const { return task_kind_; }
// Retrieves and clears the stack overflow flags. These are set by
@ -768,6 +772,13 @@ class Thread : public ThreadState {
}
}
int32_t AllocateFfiCallbackId();
void SetFfiCallbackCode(int32_t callback_id, const Code& code);
// Ensure that 'entry' points within the code of the callback identified by
// 'callback_id'. Aborts otherwise.
void VerifyCallbackIsolate(int32_t callback_id, uword entry);
Thread* next() const { return next_; }
// Visit all object pointers.
@ -861,6 +872,7 @@ class Thread : public ThreadState {
uword resume_pc_;
uword execution_state_;
uword safepoint_state_;
RawGrowableObjectArray* ffi_callback_code_;
// ---- End accessed from generated code. ----

View file

@ -25,6 +25,9 @@ negative_function_test: Skip
[ $arch == x64 || $arch == arm64 || $arch == simdbc64 ]
enable_structs_test: SkipByDesign # Tests that structs don't work on 32-bit systems.
[ $arch != x64 ]
function_callbacks_test: Skip # Issue 35761
[ $runtime == dart_precompiled ]
*: Skip # AOT is not yet supported: dartbug.com/35765

View file

@ -8,71 +8,192 @@
library FfiTest;
import 'dart:ffi' as ffi;
import 'dart:io';
import 'dart:ffi';
import 'dart:isolate';
import 'dylib_utils.dart';
import "package:expect/expect.dart";
import 'coordinate.dart';
typedef NativeCallbackTest = Int32 Function(Pointer);
typedef NativeCallbackTestFn = int Function(Pointer);
typedef NativeCoordinateOp = Coordinate Function(Coordinate);
final DynamicLibrary testLibrary = dlopenPlatformSpecific("ffi_test_functions");
typedef CoordinateTrice = Coordinate Function(
ffi.Pointer<ffi.NativeFunction<NativeCoordinateOp>>, Coordinate);
class Test {
final String name;
final Pointer callback;
final bool skip;
void main() {
testFunctionWithFunctionPointer();
testNativeFunctionWithFunctionPointer();
Test(this.name, this.callback, {bool skipIf: false}) : skip = skipIf {}
void run() {
if (skip) return;
final NativeCallbackTestFn tester = testLibrary
.lookupFunction<NativeCallbackTest, NativeCallbackTestFn>("Test$name");
final int testCode = tester(callback);
if (testCode != 0) {
Expect.fail("Test $name failed.");
}
}
}
ffi.DynamicLibrary ffiTestFunctions =
dlopenPlatformSpecific("ffi_test_functions");
typedef SimpleAdditionType = Int32 Function(Int32, Int32);
int simpleAddition(int x, int y) => x + y;
/// pass a pointer to a c function as an argument to a c function
void testFunctionWithFunctionPointer() {
ffi.Pointer<ffi.NativeFunction<NativeCoordinateOp>>
transposeCoordinatePointer =
ffiTestFunctions.lookup("TransposeCoordinate");
typedef IntComputationType = Int64 Function(Int8, Int16, Int32, Int64);
int intComputation(int a, int b, int c, int d) => d - c + b - a;
ffi.Pointer<ffi.NativeFunction<CoordinateTrice>> p2 =
ffiTestFunctions.lookup("CoordinateUnOpTrice");
CoordinateTrice coordinateUnOpTrice = p2.asFunction();
typedef UintComputationType = Uint64 Function(Uint8, Uint16, Uint32, Uint64);
int uintComputation(int a, int b, int c, int d) => d - c + b - a;
Coordinate c1 = Coordinate(10.0, 20.0, null);
c1.next = c1;
typedef SimpleMultiplyType = Double Function(Double);
double simpleMultiply(double x) => x * 1.337;
Coordinate result = coordinateUnOpTrice(transposeCoordinatePointer, c1);
typedef SimpleMultiplyFloatType = Float Function(Float);
double simpleMultiplyFloat(double x) => x * 1.337;
print(result.runtimeType);
print(result.x);
print(result.y);
c1.free();
typedef ManyIntsType = IntPtr Function(IntPtr, IntPtr, IntPtr, IntPtr, IntPtr,
IntPtr, IntPtr, IntPtr, IntPtr, IntPtr);
int manyInts(
int a, int b, int c, int d, int e, int f, int g, int h, int i, int j) {
return a + b + c + d + e + f + g + h + i + j;
}
typedef BinaryOp = int Function(int, int);
typedef NativeIntptrBinOp = ffi.IntPtr Function(ffi.IntPtr, ffi.IntPtr);
typedef NativeIntptrBinOpLookup
= ffi.Pointer<ffi.NativeFunction<NativeIntptrBinOp>> Function();
void testNativeFunctionWithFunctionPointer() {
ffi.Pointer<ffi.NativeFunction<NativeIntptrBinOpLookup>> p1 =
ffiTestFunctions.lookup("IntptrAdditionClosure");
NativeIntptrBinOpLookup intptrAdditionClosure = p1.asFunction();
ffi.Pointer<ffi.NativeFunction<NativeIntptrBinOp>> intptrAdditionPointer =
intptrAdditionClosure();
BinaryOp intptrAddition = intptrAdditionPointer.asFunction();
Expect.equals(37, intptrAddition(10, 27));
typedef ManyDoublesType = Double Function(Double, Double, Double, Double,
Double, Double, Double, Double, Double, Double);
double manyDoubles(double a, double b, double c, double d, double e, double f,
double g, double h, double i, double j) {
return a + b + c + d + e + f + g + h + i + j;
}
int myPlus(int a, int b) => a + b;
typedef ManyArgsType = Double Function(
IntPtr,
Float,
IntPtr,
Double,
IntPtr,
Float,
IntPtr,
Double,
IntPtr,
Float,
IntPtr,
Double,
IntPtr,
Float,
IntPtr,
Double,
IntPtr,
Float,
IntPtr,
Double);
double manyArgs(
int _1,
double _2,
int _3,
double _4,
int _5,
double _6,
int _7,
double _8,
int _9,
double _10,
int _11,
double _12,
int _13,
double _14,
int _15,
double _16,
int _17,
double _18,
int _19,
double _20) {
return _1 +
_2 +
_3 +
_4 +
_5 +
_6 +
_7 +
_8 +
_9 +
_10 +
_11 +
_12 +
_13 +
_14 +
_15 +
_16 +
_17 +
_18 +
_19 +
_20;
}
typedef NativeApplyTo42And74Type = ffi.IntPtr Function(
ffi.Pointer<ffi.NativeFunction<NativeIntptrBinOp>>);
typedef StoreType = Pointer<Int64> Function(Pointer<Int64>);
Pointer<Int64> store(Pointer<Int64> ptr) => ptr.elementAt(1)..store(1337);
typedef ApplyTo42And74Type = int Function(
ffi.Pointer<ffi.NativeFunction<NativeIntptrBinOp>>);
typedef NullPointersType = Pointer<Int64> Function(Pointer<Int64>);
Pointer<Int64> nullPointers(Pointer<Int64> ptr) => ptr?.elementAt(1);
typedef ReturnNullType = Int32 Function();
int returnNull() {
print('Expect "unhandled exception" error message to follow.');
return null;
}
typedef ReturnVoid = Void Function();
void returnVoid() {}
final List<Test> testcases = [
Test("SimpleAddition", fromFunction<SimpleAdditionType>(simpleAddition)),
Test("IntComputation", fromFunction<IntComputationType>(intComputation)),
Test("UintComputation", fromFunction<UintComputationType>(uintComputation)),
Test("SimpleMultiply", fromFunction<SimpleMultiplyType>(simpleMultiply)),
Test("SimpleMultiplyFloat",
fromFunction<SimpleMultiplyFloatType>(simpleMultiplyFloat)),
Test("ManyInts", fromFunction<ManyIntsType>(manyInts)),
Test("ManyDoubles", fromFunction<ManyDoublesType>(manyDoubles)),
Test("ManyArgs", fromFunction<ManyArgsType>(manyArgs)),
Test("Store", fromFunction<StoreType>(store)),
Test("NullPointers", fromFunction<NullPointersType>(nullPointers)),
Test("ReturnNull", fromFunction<ReturnNullType>(returnNull)),
];
testCallbackWrongThread() =>
Test("CallbackWrongThread", fromFunction<ReturnVoid>(returnVoid)).run();
testCallbackOutsideIsolate() =>
Test("CallbackOutsideIsolate", fromFunction<ReturnVoid>(returnVoid)).run();
isolateHelper(int callbackPointer) {
final Pointer<Void> ptr = fromAddress(callbackPointer);
final NativeCallbackTestFn tester =
testLibrary.lookupFunction<NativeCallbackTest, NativeCallbackTestFn>(
"TestCallbackWrongIsolate");
Expect.equals(0, tester(ptr));
}
testCallbackWrongIsolate() async {
final int callbackPointer = fromFunction<ReturnVoid>(returnVoid).address;
final ReceivePort exitPort = ReceivePort();
await Isolate.spawn(isolateHelper, callbackPointer,
errorsAreFatal: true, onExit: exitPort.sendPort);
await exitPort.first;
}
void main() async {
testcases.forEach((t) => t.run());
// These tests terminate the process after successful completion, so we have
// to run them separately.
//
// Since they use signal handlers they only run on Linux.
if (Platform.isLinux && !const bool.fromEnvironment("dart.vm.product")) {
testCallbackWrongThread(); //# 01: ok
testCallbackOutsideIsolate(); //# 02: ok
await testCallbackWrongIsolate(); //# 03: ok
}
}