[vm/ffi] Ensure there's a single Function object per ffi-callback + exceptional-return combination

Right each `Pointer.fromFunction()` invocation will lead to creation of
a new ffi trampoline function & it's following JITed code. In AOT we
have exactly one ffi trampoline per target/native-signature/exceptional-return
combination.
=> This CL ensures we have only one such function.

Furthermore each `Pointer.fromFunction()` will currently perform 2
runtime calls in JIT: One to create a `Function` object, the other to
JIT that function & register callback metadata.
=> This CL ensures we won't do a runtime call to get a function, instead
   do it at compile-time (as in AOT)

Furthermore we eagerly assign a callback-id to the unique/deduped ffi
trampoline callbacks. Only when the application requests a pointer, do
we populate metadata on the `Thread` object.

This CL doesn't (yet) change the fact that in JIT mode we have
isolate-specific jit trampolines (that will call now shared ffi trampoline
functions).

We also avoid baking in C++ runtime function pointers in generated
code. As a result we can now preserve ffi trampolines across AppJIT
serialization.

As a nice side-effect, we remove 100 lines of code.

TEST=ffi{,_2}/ffi_callback_unique_test

Issue https://github.com/dart-lang/sdk/issues/50611

Change-Id: I458831a47b041a088086f28f825de2a3849f6adc
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/273420
Reviewed-by: Daco Harkes <dacoharkes@google.com>
Commit-Queue: Martin Kustermann <kustermann@google.com>
This commit is contained in:
Martin Kustermann 2022-12-07 20:47:13 +00:00 committed by Commit Queue
parent 2f864c0a5d
commit 4be2981c2d
22 changed files with 287 additions and 310 deletions

View file

@ -74,88 +74,35 @@ DEFINE_NATIVE_ENTRY(Ffi_asFunctionInternal, 2, 2) {
CLASS_LIST_FFI_NUMERIC_FIXED_SIZE(DEFINE_NATIVE_ENTRY_AS_EXTERNAL_TYPED_DATA)
#undef DEFINE_NATIVE_ENTRY_AS_EXTERNAL_TYPED_DATA
DEFINE_NATIVE_ENTRY(Ffi_nativeCallbackFunction, 1, 2) {
#if defined(DART_PRECOMPILED_RUNTIME) || defined(DART_PRECOMPILER)
// Calls to this function are removed by the flow-graph builder in AOT.
// See StreamingFlowGraphBuilder::BuildFfiNativeCallbackFunction().
UNREACHABLE();
#else
GET_NATIVE_TYPE_ARGUMENT(type_arg, arguments->NativeTypeArgAt(0));
GET_NON_NULL_NATIVE_ARGUMENT(Closure, closure, arguments->NativeArgAt(0));
GET_NON_NULL_NATIVE_ARGUMENT(Instance, exceptional_return,
arguments->NativeArgAt(1));
ASSERT(type_arg.IsInstantiated() && type_arg.IsFunctionType());
const FunctionType& native_signature = FunctionType::Cast(type_arg);
Function& func = Function::Handle(zone, closure.function());
// The FE verifies that the target of a 'fromFunction' is a static method, so
// the value we see here must be a static tearoff. See ffi_use_sites.dart for
// details.
//
// TODO(36748): Define hot-reload semantics of native callbacks. We may need
// to look up the target by name.
ASSERT(func.IsImplicitClosureFunction());
func = func.parent_function();
ASSERT(func.is_static());
// AbiSpecificTypes can have an incomplete mapping.
const char* error = nullptr;
compiler::ffi::NativeFunctionTypeFromFunctionType(zone, native_signature,
&error);
if (error != nullptr) {
Exceptions::ThrowCompileTimeError(LanguageError::Handle(
zone, LanguageError::New(String::Handle(zone, String::New(error)))));
}
// We are returning an object which is not an Instance here. This is only OK
// because we know that the result will be passed directly to
// _pointerFromFunction and will not leak out into user code.
arguments->SetReturn(
Function::Handle(zone, compiler::ffi::NativeCallbackFunction(
native_signature, func, exceptional_return,
/*register_function=*/false)));
// Because we have already set the return value.
return Object::sentinel().ptr();
#endif
}
DEFINE_NATIVE_ENTRY(Ffi_pointerFromFunction, 1, 1) {
const Function& function =
Function::CheckedHandle(zone, arguments->NativeArg0());
Code& code = Code::Handle(zone);
#if defined(DART_PRECOMPILED_RUNTIME)
code = function.CurrentCode();
#else
// We compile the callback immediately because we need to return a pointer to
// the entry-point. Native calls do not use patching like Dart calls, so we
// cannot compile it lazily.
const Object& result = Object::Handle(
zone, Compiler::CompileOptimizedFunction(thread, function));
if (result.IsError()) {
Exceptions::PropagateError(Error::Cast(result));
}
ASSERT(result.IsCode());
code ^= result.ptr();
#endif
const auto& function = Function::CheckedHandle(zone, arguments->NativeArg0());
const auto& code =
Code::Handle(zone, FLAG_precompiled_mode ? function.CurrentCode()
: function.EnsureHasCode());
ASSERT(!code.IsNull());
thread->SetFfiCallbackCode(function.FfiCallbackId(), code);
#ifdef TARGET_ARCH_IA32
#if defined(TARGET_ARCH_IA32)
// On ia32, store the stack delta that we need to use when returning.
const intptr_t stack_return_delta =
function.FfiCSignatureReturnsStruct() && CallingConventions::kUsesRet4
? compiler::target::kWordSize
: 0;
thread->SetFfiCallbackStackReturn(function.FfiCallbackId(),
stack_return_delta);
#else
const intptr_t stack_return_delta = 0;
#endif
thread->SetFfiCallbackCode(function, code, stack_return_delta);
uword entry_point = code.EntryPoint();
// In JIT we use one more indirection:
// * AOT: Native -> Ffi Trampoline -> Dart function
// * JIT: Native -> Jit trampoline -> Ffi Trampoline -> Dart function
//
// We do that since ffi trampoline code lives in Dart heap. During GC we can
// flip page protections from RX to RW to GC JITed code. During that time
// machine code on such pages cannot be executed. Native code therefore has to
// perform the safepoint transition before executing code in Dart heap (which
// is why we use the jit trampoline).
#if !defined(DART_PRECOMPILED_RUNTIME)
if (NativeCallbackTrampolines::Enabled()) {
entry_point = isolate->native_callback_trampolines()->TrampolineForId(

View file

@ -1382,13 +1382,7 @@ class FfiTrampolineDataSerializationCluster : public SerializationCluster {
FfiTrampolineDataPtr const data = objects_[i];
AutoTraceObject(data);
WriteFromTo(data);
if (s->kind() == Snapshot::kFullAOT) {
s->Write<int32_t>(data->untag()->callback_id_);
} else {
// FFI callbacks can only be written to AOT snapshots.
ASSERT(data->untag()->callback_target() == Object::null());
}
s->Write<int32_t>(data->untag()->callback_id_);
}
}
@ -1416,8 +1410,7 @@ class FfiTrampolineDataDeserializationCluster : public DeserializationCluster {
Deserializer::InitializeHeader(data, kFfiTrampolineDataCid,
FfiTrampolineData::InstanceSize());
d.ReadFromTo(data);
data->untag()->callback_id_ =
d_->kind() == Snapshot::kFullAOT ? d.Read<int32_t>() : -1;
data->untag()->callback_id_ = d.Read<int32_t>();
}
}
};

View file

@ -390,7 +390,6 @@ namespace dart {
V(Ffi_address, 1) \
V(Ffi_fromAddress, 1) \
V(Ffi_asFunctionInternal, 2) \
V(Ffi_nativeCallbackFunction, 2) \
V(Ffi_pointerFromFunction, 1) \
V(Ffi_dl_open, 1) \
V(Ffi_dl_lookup, 2) \

View file

@ -1642,26 +1642,12 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ PushNativeCalleeSavedRegisters();
// Load the thread object. If we were called by a trampoline, the thread is
// already loaded.
if (FLAG_precompiled_mode) {
// Load the thread object. If we were called by a JIT trampoline, the thread
// is already loaded.
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
if (!NativeCallbackTrampolines::Enabled()) {
compiler->LoadBSSEntry(BSS::Relocation::DRT_GetThreadForNativeCallback, R1,
R0);
} else if (!NativeCallbackTrampolines::Enabled()) {
// In JIT mode, we can just paste the address of the runtime entry into the
// generated code directly. This is not a problem since we don't save
// callbacks into JIT snapshots.
ASSERT(kWordSize == compiler::target::kWordSize);
__ LoadImmediate(
R1, static_cast<compiler::target::uword>(
reinterpret_cast<uword>(DLRT_GetThreadForNativeCallback)));
}
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
// Load the thread object. If we were called by a trampoline, the thread is
// already loaded.
if (!NativeCallbackTrampolines::Enabled()) {
// Create another frame to align the frame before continuing in "native"
// code.
__ EnterFrame(1 << FP, 0);

View file

@ -1527,22 +1527,12 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ PushNativeCalleeSavedRegisters();
// Load the thread object. If we were called by a trampoline, the thread is
// already loaded.
if (FLAG_precompiled_mode) {
// Load the thread object. If we were called by a JIT trampoline, the thread
// is already loaded.
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
if (!NativeCallbackTrampolines::Enabled()) {
compiler->LoadBSSEntry(BSS::Relocation::DRT_GetThreadForNativeCallback, R1,
R0);
} else if (!NativeCallbackTrampolines::Enabled()) {
// In JIT mode, we can just paste the address of the runtime entry into the
// generated code directly. This is not a problem since we don't save
// callbacks into JIT snapshots.
__ LoadImmediate(
R1, reinterpret_cast<int64_t>(DLRT_GetThreadForNativeCallback));
}
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
if (!NativeCallbackTrampolines::Enabled()) {
// Create another frame to align the frame before continuing in "native"
// code.
__ EnterFrame(0);

View file

@ -1207,23 +1207,8 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
// Load the thread object.
//
// Create another frame to align the frame before continuing in "native" code.
// If we were called by a trampoline, it has already loaded the thread.
ASSERT(!FLAG_precompiled_mode); // No relocation for AOT linking.
if (!NativeCallbackTrampolines::Enabled()) {
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(compiler::target::kWordSize);
__ movl(compiler::Address(SPREG, 0), compiler::Immediate(callback_id));
__ movl(EAX, compiler::Immediate(reinterpret_cast<intptr_t>(
DLRT_GetThreadForNativeCallback)));
__ call(EAX);
__ movl(THR, EAX);
__ LeaveFrame();
}
// The thread object was already loaded by a JIT trampoline.
ASSERT(NativeCallbackTrampolines::Enabled());
// Save the current VMTag on the stack.
__ movl(ECX, compiler::Assembler::VMTagAddress());

View file

@ -925,8 +925,7 @@ const Function& FlowGraphDeserializer::ReadTrait<const Function&>::Read(
const Instance& exceptional_return = d->Read<const Instance&>();
return Function::ZoneHandle(
zone, compiler::ffi::NativeCallbackFunction(
c_signature, callback_target, exceptional_return,
/*register_function=*/true));
c_signature, callback_target, exceptional_return));
} else {
const String& name = d->Read<const String&>();
const FunctionType& signature = d->Read<const FunctionType&>();

View file

@ -1429,23 +1429,12 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Save ABI callee-saved registers.
__ PushRegisters(kCalleeSaveRegistersSet);
// Load the address of DLRT_GetThreadForNativeCallback without using Thread.
if (FLAG_precompiled_mode) {
// Load the thread object. If we were called by a JIT trampoline, the thread
// is already loaded.
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
if (!NativeCallbackTrampolines::Enabled()) {
compiler->LoadBSSEntry(BSS::Relocation::DRT_GetThreadForNativeCallback, RAX,
RCX);
} else if (!NativeCallbackTrampolines::Enabled()) {
// In JIT mode, we can just paste the address of the runtime entry into the
// generated code directly. This is not a problem since we don't save
// callbacks into JIT snapshots.
__ movq(RAX, compiler::Immediate(reinterpret_cast<intptr_t>(
DLRT_GetThreadForNativeCallback)));
}
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
// Create another frame to align the frame before continuing in "native" code.
// If we were called by a trampoline, it has already loaded the thread.
if (!NativeCallbackTrampolines::Enabled()) {
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);

View file

@ -17,8 +17,7 @@ namespace ffi {
FunctionPtr NativeCallbackFunction(const FunctionType& c_signature,
const Function& dart_target,
const Instance& exceptional_return,
bool register_function) {
const Instance& exceptional_return) {
Thread* const thread = Thread::Current();
Zone* const zone = thread->zone();
Function& function = Function::Handle(zone);
@ -74,16 +73,31 @@ FunctionPtr NativeCallbackFunction(const FunctionType& c_signature,
signature ^= ClassFinalizer::FinalizeType(signature);
function.SetSignature(signature);
if (register_function) {
ObjectStore* object_store = thread->isolate_group()->object_store();
{
// Ensure only one thread updates the cache of deduped ffi trampoline
// functions.
auto isolate_group = thread->isolate_group();
SafepointWriteRwLocker ml(thread, isolate_group->program_lock());
auto object_store = isolate_group->object_store();
if (object_store->ffi_callback_functions() == Array::null()) {
FfiCallbackFunctionSet set(
HashTables::New<FfiCallbackFunctionSet>(/*initial_capacity=*/4));
object_store->set_ffi_callback_functions(set.Release());
}
FfiCallbackFunctionSet set(object_store->ffi_callback_functions());
const intptr_t entry_count_before = set.NumOccupied();
function ^= set.InsertOrGet(function);
const intptr_t entry_count_after = set.NumOccupied();
object_store->set_ffi_callback_functions(set.Release());
if (entry_count_before != entry_count_after) {
function.AssignFfiCallbackId(entry_count_before);
} else {
ASSERT(function.FfiCallbackId() != -1);
}
}
return function.ptr();

View file

@ -21,8 +21,7 @@ namespace ffi {
FunctionPtr NativeCallbackFunction(const FunctionType& c_signature,
const Function& dart_target,
const Instance& exceptional_return,
bool register_function);
const Instance& exceptional_return);
} // namespace ffi

View file

@ -3391,10 +3391,7 @@ Fragment StreamingFlowGraphBuilder::BuildStaticInvocation(TokenPosition* p) {
case MethodRecognizer::kFfiAsFunctionInternal:
return BuildFfiAsFunctionInternal();
case MethodRecognizer::kFfiNativeCallbackFunction:
if (CompilerState::Current().is_aot()) {
return BuildFfiNativeCallbackFunction();
}
break;
return BuildFfiNativeCallbackFunction();
case MethodRecognizer::kFfiLoadAbiSpecificInt:
return BuildLoadAbiSpecificInt(/*at_index=*/false);
case MethodRecognizer::kFfiLoadAbiSpecificIntAtIndex:
@ -6381,10 +6378,9 @@ Fragment StreamingFlowGraphBuilder::BuildFfiNativeCallbackFunction() {
compiler::ffi::NativeFunctionTypeFromFunctionType(zone_, native_sig, &error);
ReportIfNotNull(error);
const Function& result = Function::ZoneHandle(
Z,
compiler::ffi::NativeCallbackFunction(
native_sig, target, exceptional_return, /*register_function=*/true));
const Function& result =
Function::ZoneHandle(Z, compiler::ffi::NativeCallbackFunction(
native_sig, target, exceptional_return));
code += Constant(result);
return code;

View file

@ -974,6 +974,7 @@ bool FlowGraphBuilder::IsRecognizedMethodForFlowGraph(
case MethodRecognizer::kFfiLoadDouble:
case MethodRecognizer::kFfiLoadDoubleUnaligned:
case MethodRecognizer::kFfiLoadPointer:
case MethodRecognizer::kFfiNativeCallbackFunction:
case MethodRecognizer::kFfiStoreInt8:
case MethodRecognizer::kFfiStoreInt16:
case MethodRecognizer::kFfiStoreInt32:
@ -1279,6 +1280,14 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
ASSERT_EQUAL(function.NumParameters(), 0);
body += IntConstant(static_cast<int64_t>(compiler::ffi::TargetAbi()));
break;
case MethodRecognizer::kFfiNativeCallbackFunction: {
const auto& error = String::ZoneHandle(
Z, Symbols::New(thread_,
"This function should be handled on call site."));
body += Constant(error);
body += ThrowException(TokenPosition::kNoSource);
break;
}
case MethodRecognizer::kFfiLoadInt8:
case MethodRecognizer::kFfiLoadInt16:
case MethodRecognizer::kFfiLoadInt32:

View file

@ -8054,22 +8054,27 @@ bool Function::FfiCSignatureReturnsStruct() const {
int32_t Function::FfiCallbackId() const {
ASSERT(IsFfiTrampoline());
if (FfiCallbackTarget() == Object::null()) {
return -1;
}
const Object& obj = Object::Handle(data());
ASSERT(FfiCallbackTarget() != Object::null());
const auto& obj = Object::Handle(data());
ASSERT(!obj.IsNull());
const FfiTrampolineData& trampoline_data = FfiTrampolineData::Cast(obj);
int32_t callback_id = trampoline_data.callback_id();
#if defined(DART_PRECOMPILED_RUNTIME)
ASSERT(callback_id >= 0);
#else
if (callback_id < 0) {
callback_id = Thread::Current()->AllocateFfiCallbackId();
trampoline_data.set_callback_id(callback_id);
}
#endif
return callback_id;
const auto& trampoline_data = FfiTrampolineData::Cast(obj);
ASSERT(trampoline_data.callback_id() != -1);
return trampoline_data.callback_id();
}
void Function::AssignFfiCallbackId(int32_t callback_id) const {
ASSERT(IsFfiTrampoline());
ASSERT(FfiCallbackTarget() != Object::null());
const auto& obj = Object::Handle(data());
ASSERT(!obj.IsNull());
const auto& trampoline_data = FfiTrampolineData::Cast(obj);
ASSERT(trampoline_data.callback_id() == -1);
trampoline_data.set_callback_id(callback_id);
}
bool Function::FfiIsLeaf() const {
@ -25911,6 +25916,19 @@ TypedDataPtr TypedData::New(intptr_t class_id,
return result.ptr();
}
TypedDataPtr TypedData::Grow(const TypedData& current,
intptr_t len,
Heap::Space space) {
ASSERT(len > current.Length());
const auto& new_td =
TypedData::Handle(TypedData::New(current.GetClassId(), len, space));
{
NoSafepointScope no_safepoint_scope;
memcpy(new_td.DataAddr(0), current.DataAddr(0), current.LengthInBytes());
}
return new_td.ptr();
}
const char* TypedData::ToCString() const {
const Class& cls = Class::Handle(clazz());
return cls.ScrubbedNameCString();

View file

@ -2769,6 +2769,9 @@ class Function : public Object {
// -1 for Dart -> native calls.
int32_t FfiCallbackId() const;
// Should be called when ffi trampoline function object is created.
void AssignFfiCallbackId(int32_t callback_id) const;
// Can only be called on FFI trampolines.
bool FfiIsLeaf() const;
@ -11227,6 +11230,10 @@ class TypedData : public TypedDataBase {
intptr_t len,
Heap::Space space = Heap::kNew);
static TypedDataPtr Grow(const TypedData& current,
intptr_t len,
Heap::Space space = Heap::kNew);
static void Copy(const TypedDataBase& dst,
intptr_t dst_offset_in_bytes,
const TypedDataBase& src,

View file

@ -268,7 +268,9 @@ class ObjectPointerVisitor;
RW(Code, nullable_type_parameter_tts_stub) \
RW(Code, type_parameter_tts_stub) \
RW(Code, unreachable_tts_stub) \
RW(Array, ffi_callback_functions) \
RW(Code, slow_tts_stub) \
/* Roots for JIT/AOT snapshots are up until here (see to_snapshot() below)*/ \
RW(Code, await_stub) \
RW(Code, clone_suspend_state_stub) \
RW(Code, init_async_stub) \
@ -285,7 +287,6 @@ class ObjectPointerVisitor;
RW(GrowableObjectArray, instructions_tables) \
RW(Array, obfuscation_map) \
RW(Array, loading_unit_uris) \
RW(Array, ffi_callback_functions) \
RW(Class, ffi_pointer_class) \
RW(Class, ffi_native_type_class) \
// Please remember the last entry must be referred in the 'to' function below.

View file

@ -2310,6 +2310,37 @@ ISOLATE_UNIT_TEST_CASE(GrowableObjectArray) {
EXPECT_EQ(1, new_array.Length());
}
ISOLATE_UNIT_TEST_CASE(TypedData_Grow) {
const intptr_t kSmallSize = 42;
const intptr_t kLargeSize = 1000;
Random random(42);
for (classid_t cid = kTypedDataInt8ArrayCid; cid < kByteDataViewCid;
cid += 4) {
ASSERT(IsTypedDataClassId(cid));
const auto& small = TypedData::Handle(TypedData::New(cid, kSmallSize));
EXPECT_EQ(small.LengthInBytes(), kSmallSize * small.ElementSizeInBytes());
for (intptr_t i = 0; i < TypedData::ElementSizeFor(cid) * kSmallSize; i++) {
small.SetUint8(i, static_cast<uint8_t>(random.NextUInt64() & 0xff));
}
const auto& big = TypedData::Handle(TypedData::Grow(small, kLargeSize));
EXPECT_EQ(small.GetClassId(), big.GetClassId());
EXPECT_EQ(big.LengthInBytes(), kLargeSize * big.ElementSizeInBytes());
for (intptr_t i = 0; i < TypedData::ElementSizeFor(cid) * kSmallSize; i++) {
EXPECT_EQ(small.GetUint8(i), big.GetUint8(i));
}
for (intptr_t i = TypedData::ElementSizeFor(cid) * kSmallSize;
i < TypedData::ElementSizeFor(cid) * kLargeSize; i++) {
EXPECT_EQ(0, big.GetUint8(i));
}
}
}
ISOLATE_UNIT_TEST_CASE(InternalTypedData) {
uint8_t data[] = {253, 254, 255, 0, 1, 2, 3, 4};
intptr_t data_length = ARRAY_SIZE(data);

View file

@ -1037,95 +1037,84 @@ DisableThreadInterruptsScope::~DisableThreadInterruptsScope() {
}
}
const intptr_t kInitialCallbackIdsReserved = 16;
int32_t Thread::AllocateFfiCallbackId() {
Zone* Z = Thread::Current()->zone();
if (ffi_callback_code_ == GrowableObjectArray::null()) {
ffi_callback_code_ = GrowableObjectArray::New(kInitialCallbackIdsReserved);
}
const auto& array = GrowableObjectArray::Handle(Z, ffi_callback_code_);
array.Add(Code::Handle(Z, Code::null()));
const int32_t id = array.Length() - 1;
// Allocate a native callback trampoline if necessary.
#if !defined(DART_PRECOMPILED_RUNTIME)
if (NativeCallbackTrampolines::Enabled()) {
auto* const tramps = isolate()->native_callback_trampolines();
ASSERT(tramps->next_callback_id() == id);
tramps->AllocateTrampoline();
}
#endif
return id;
}
void Thread::SetFfiCallbackCode(int32_t callback_id, const Code& code) {
Zone* Z = Thread::Current()->zone();
/// In AOT the callback ID might have been allocated during compilation but
/// 'ffi_callback_code_' is initialized to empty again when the program
/// starts. Therefore we may need to initialize or expand it to accomodate
/// the callback ID.
void Thread::EnsureFfiCallbackMetadata(intptr_t callback_id) {
static constexpr intptr_t kInitialCallbackIdsReserved = 16;
if (ffi_callback_code_ == GrowableObjectArray::null()) {
ffi_callback_code_ = GrowableObjectArray::New(kInitialCallbackIdsReserved);
}
const auto& array = GrowableObjectArray::Handle(Z, ffi_callback_code_);
if (callback_id >= array.Length()) {
const int32_t capacity = array.Capacity();
if (callback_id >= capacity) {
// Ensure both that we grow enough and an exponential growth strategy.
const int32_t new_capacity =
Utils::Maximum(callback_id + 1, capacity * 2);
array.Grow(new_capacity);
}
array.SetLength(callback_id + 1);
}
array.SetAt(callback_id, code);
}
void Thread::SetFfiCallbackStackReturn(int32_t callback_id,
intptr_t stack_return_delta) {
#if defined(TARGET_ARCH_IA32)
#else
UNREACHABLE();
#endif
Zone* Z = Thread::Current()->zone();
/// In AOT the callback ID might have been allocated during compilation but
/// 'ffi_callback_code_' is initialized to empty again when the program
/// starts. Therefore we may need to initialize or expand it to accomodate
/// the callback ID.
if (ffi_callback_stack_return_ == TypedData::null()) {
ffi_callback_stack_return_ = TypedData::New(
kTypedDataInt8ArrayCid, kInitialCallbackIdsReserved, Heap::kOld);
}
#endif // defined(TARGET_ARCH_IA32)
auto& array = TypedData::Handle(Z, ffi_callback_stack_return_);
const auto& code_array =
GrowableObjectArray::Handle(zone(), ffi_callback_code_);
#if !defined(DART_PRECOMPILED_RUNTIME)
auto* const tramps = isolate()->native_callback_trampolines();
#if defined(TARGET_ARCH_IA32)
auto& stack_array = TypedData::Handle(zone(), ffi_callback_stack_return_);
#endif
#endif
if (callback_id >= array.Length()) {
const int32_t capacity = array.Length();
if (callback_id >= capacity) {
// Ensure both that we grow enough and an exponential growth strategy.
const int32_t new_capacity =
Utils::Maximum(callback_id + 1, capacity * 2);
const auto& new_array = TypedData::Handle(
Z, TypedData::New(kTypedDataUint8ArrayCid, new_capacity, Heap::kOld));
for (intptr_t i = 0; i < capacity; i++) {
new_array.SetUint8(i, array.GetUint8(i));
}
array ^= new_array.ptr();
ffi_callback_stack_return_ = new_array.ptr();
#if !defined(DART_PRECOMPILED_RUNTIME)
// Verify invariants of the 3 arrays hold.
ASSERT(code_array.Length() == tramps->next_callback_id());
#if defined(TARGET_ARCH_IA32)
ASSERT(code_array.Length() <= stack_array.Length());
#endif
#endif // !defined(DART_PRECOMPILED_RUNTIME)
if (code_array.Length() <= callback_id) {
// Ensure we've enough space in the 3 arrays.
while (!(callback_id < code_array.Length())) {
code_array.Add(Code::null_object());
#if !defined(DART_PRECOMPILED_RUNTIME)
tramps->AllocateTrampoline();
#endif
}
#if defined(TARGET_ARCH_IA32)
if (callback_id >= stack_array.Length()) {
const int32_t capacity = stack_array.Length();
if (callback_id >= capacity) {
// Ensure both that we grow enough and an exponential growth strategy.
const int32_t new_capacity =
Utils::Maximum(callback_id + 1, capacity * 2);
stack_array = TypedData::Grow(stack_array, new_capacity);
ffi_callback_stack_return_ = stack_array.ptr();
}
}
#endif // defined(TARGET_ARCH_IA32)
}
ASSERT(callback_id < array.Length());
array.SetUint8(callback_id, stack_return_delta);
#if !defined(DART_PRECOMPILED_RUNTIME)
// Verify invariants of the 3 arrays (still) hold.
ASSERT(code_array.Length() == tramps->next_callback_id());
#if defined(TARGET_ARCH_IA32)
ASSERT(code_array.Length() <= stack_array.Length());
#endif
#endif
ASSERT(callback_id < code_array.Length());
}
void Thread::SetFfiCallbackCode(const Function& ffi_trampoline,
const Code& code,
intptr_t stack_return_delta) {
const intptr_t callback_id = ffi_trampoline.FfiCallbackId();
EnsureFfiCallbackMetadata(callback_id);
const auto& code_array =
GrowableObjectArray::Handle(zone(), ffi_callback_code_);
code_array.SetAt(callback_id, code);
#if defined(TARGET_ARCH_IA32)
const auto& stack_delta_array =
TypedData::Handle(zone(), ffi_callback_stack_return_);
stack_delta_array.SetUint8(callback_id, stack_return_delta);
#endif // defined(TARGET_ARCH_IA32)
}
void Thread::VerifyCallbackIsolate(int32_t callback_id, uword entry) {

View file

@ -1072,20 +1072,13 @@ class Thread : public ThreadState {
}
}
int32_t AllocateFfiCallbackId();
// Store 'code' for the native callback identified by 'callback_id'.
//
// Expands the callback code array as necessary to accomodate the callback
// ID.
void SetFfiCallbackCode(int32_t callback_id, const Code& code);
// Store 'stack_return' for the native callback identified by 'callback_id'.
//
// Expands the callback stack return array as necessary to accomodate the
// callback ID.
void SetFfiCallbackStackReturn(int32_t callback_id,
intptr_t stack_return_delta);
void SetFfiCallbackCode(const Function& ffi_trampoline,
const Code& code,
intptr_t stack_return_delta);
// Ensure that 'callback_id' refers to a valid callback in this isolate.
//
@ -1351,6 +1344,10 @@ class Thread : public ThreadState {
void FinishEntering(TaskKind kind);
void PrepareLeaving();
// Ensures that we have allocated neccessary thread-local data structures for
// [callback_id].
void EnsureFfiCallbackMetadata(intptr_t callback_id);
static void SetCurrent(Thread* current) { OSThread::SetCurrentTLS(current); }
#define REUSABLE_FRIEND_DECLARATION(name) \

View file

@ -30,7 +30,6 @@ void main() {
testInlineArray();
testInlineArray2();
testAsFunction();
testFromFunction();
}
void testSizeOf() {
@ -135,26 +134,3 @@ void testAsFunction() {
.asFunction<IncompleteArrayStruct Function()>();
});
}
int myIncr(int a) => a + 1;
IncompleteArrayStruct myIncompleteReturn() =>
nullptr.cast<IncompleteArrayStruct>().ref;
int myIncompleteArg(IncompleteArrayStruct a) => 5;
void testFromFunction() {
Expect.throws(() {
Pointer.fromFunction<Incomplete Function(Int32)>(myIncr, 3);
});
Expect.throws(() {
Pointer.fromFunction<Int32 Function(Incomplete)>(myIncr, 3);
});
Expect.throws(() {
Pointer.fromFunction<IncompleteArrayStruct Function()>(myIncompleteReturn);
});
Expect.throws(() {
Pointer.fromFunction<Int32 Function(IncompleteArrayStruct)>(
myIncompleteArg, 3);
});
}

View file

@ -0,0 +1,38 @@
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
import 'dart:ffi';
import 'package:expect/expect.dart';
void main() {
final a = <int>[];
final b = <int>[];
for (int i = 0; i < 10; ++i) {
// Several pointers for same call site.
a.add(
Pointer.fromFunction<Int Function()>(nativeToDartCallback, 0).address);
b.add(
Pointer.fromFunction<Int Function()>(nativeToDartCallback, 1).address);
}
// Another pointer from a different call site.
a.add(Pointer.fromFunction<Int Function()>(nativeToDartCallback, 0).address);
b.add(Pointer.fromFunction<Int Function()>(nativeToDartCallback, 1).address);
ensureEqualEntries(a);
ensureEqualEntries(b);
// The two functions have different exceptional return and should have
// therefore a different ffi trampoline.
Expect.notEquals(a.first, b.first);
}
void ensureEqualEntries(List<int> entries) {
final first = entries.first;
for (int i = 1; i < entries.length; ++i) {
Expect.equals(first, entries[i]);
}
}
int nativeToDartCallback() => 42;

View file

@ -32,7 +32,6 @@ void main() {
testInlineArray();
testInlineArray2();
testAsFunction();
testFromFunction();
}
void testSizeOf() {
@ -137,26 +136,3 @@ void testAsFunction() {
.asFunction<IncompleteArrayStruct Function()>();
});
}
int myIncr(int a) => a + 1;
IncompleteArrayStruct myIncompleteReturn() =>
nullptr.cast<IncompleteArrayStruct>().ref;
int myIncompleteArg(IncompleteArrayStruct a) => 5;
void testFromFunction() {
Expect.throws(() {
Pointer.fromFunction<Incomplete Function(Int32)>(myIncr, 3);
});
Expect.throws(() {
Pointer.fromFunction<Int32 Function(Incomplete)>(myIncr, 3);
});
Expect.throws(() {
Pointer.fromFunction<IncompleteArrayStruct Function()>(myIncompleteReturn);
});
Expect.throws(() {
Pointer.fromFunction<Int32 Function(IncompleteArrayStruct)>(
myIncompleteArg, 3);
});
}

View file

@ -0,0 +1,38 @@
// Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
import 'dart:ffi';
import 'package:expect/expect.dart';
void main() {
final a = <int>[];
final b = <int>[];
for (int i = 0; i < 10; ++i) {
// Several pointers for same call site.
a.add(
Pointer.fromFunction<Int Function()>(nativeToDartCallback, 0).address);
b.add(
Pointer.fromFunction<Int Function()>(nativeToDartCallback, 1).address);
}
// Another pointer from a different call site.
a.add(Pointer.fromFunction<Int Function()>(nativeToDartCallback, 0).address);
b.add(Pointer.fromFunction<Int Function()>(nativeToDartCallback, 1).address);
ensureEqualEntries(a);
ensureEqualEntries(b);
// The two functions have different exceptional return and should have
// therefore a different ffi trampoline.
Expect.notEquals(a.first, b.first);
}
void ensureEqualEntries(List<int> entries) {
final first = entries.first;
for (int i = 1; i < entries.length; ++i) {
Expect.equals(first, entries[i]);
}
}
int nativeToDartCallback() => 42;