Reland "[vm] Migrate FFI callbacks to the new metadata system."

This reverts https://dart-review.googlesource.com/c/sdk/+/306674

Patchset 1 is a pure rollback of the rollback.
Patchset 2 is https://dart-review.googlesource.com/c/sdk/+/306316
Patchset 4+ is the forward fix for the Fuchsia issues.

The Fuchsia bug that we're fixing (or working around), is that
VirtualMemory::DuplicateRX doesn't work on Fuchsia. A proper fix will
require special casing it, like on MacOS. In the mean time we can avoid
using this function by only allowing one page of trampolines on Fuchsia.
Unfortunately, when I removed the BSS stuff from the original CL, it
was necessary to duplicate even the first page, so I've had to add that
stuff back just for Fuchsia.

Change-Id: Id42de78ee5de126bcc83bfa4148f6efb4045f976
Bug: https://github.com/dart-lang/sdk/issues/52579
Bug: https://buganizer.corp.google.com/issues/284959841
Fixes: https://github.com/dart-lang/sdk/issues/52581
TEST=CI, especially vm-fuchsia-release-x64-try
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/306676
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Commit-Queue: Liam Appelbe <liama@google.com>
Reviewed-by: Daco Harkes <dacoharkes@google.com>
This commit is contained in:
Liam Appelbe 2023-06-06 02:07:58 +00:00 committed by Commit Queue
parent 369a8312aa
commit 07f587504b
45 changed files with 1557 additions and 976 deletions

View file

@ -13,6 +13,7 @@
#include "vm/class_id.h"
#include "vm/compiler/ffi/native_type.h"
#include "vm/exceptions.h"
#include "vm/ffi_callback_metadata.h"
#include "vm/flags.h"
#include "vm/heap/gc_shared.h"
#include "vm/log.h"
@ -39,31 +40,8 @@ DEFINE_NATIVE_ENTRY(Ffi_asFunctionInternal, 2, 2) {
DEFINE_NATIVE_ENTRY(Ffi_pointerFromFunction, 1, 1) {
const auto& function = Function::CheckedHandle(zone, arguments->NativeArg0());
const auto& code =
Code::Handle(zone, FLAG_precompiled_mode ? function.CurrentCode()
: function.EnsureHasCode());
ASSERT(!code.IsNull());
uword entry_point = code.EntryPoint();
// In JIT we use one more indirection:
// * AOT: Native -> Ffi Trampoline -> Dart function
// * JIT: Native -> Jit trampoline -> Ffi Trampoline -> Dart function
//
// We do that since ffi trampoline code lives in Dart heap. During GC we can
// flip page protections from RX to RW to GC JITed code. During that time
// machine code on such pages cannot be executed. Native code therefore has to
// perform the safepoint transition before executing code in Dart heap (which
// is why we use the jit trampoline).
#if !defined(DART_PRECOMPILED_RUNTIME)
if (NativeCallbackTrampolines::Enabled()) {
entry_point =
isolate->group()->native_callback_trampolines()->TrampolineForId(
function.FfiCallbackId());
}
#endif
return Pointer::New(entry_point);
void* pointer = isolate->CreateSyncFfiCallback(zone, function);
return Pointer::New(reinterpret_cast<uword>(pointer));
}
DEFINE_NATIVE_ENTRY(DartNativeApiFunctionPointer, 0, 1) {

View file

@ -271,6 +271,17 @@ void Expect::Null(const T p) {
} \
} while (false)
#define ASSERT_LESS_OR_EQUAL(actual, expected) \
do { \
if ((actual) > (expected)) { \
const std::string actual_str = std::to_string(actual); \
const std::string expected_str = std::to_string(expected); \
dart::Assert(__FILE__, __LINE__) \
.Fail("expected \"%s\" = %s >= actual \"%s\" = %s", #expected, \
expected_str.c_str(), #actual, actual_str.c_str()); \
} \
} while (false)
#define ASSERT_IMPLIES(antecedent, consequent) \
do { \
if (antecedent) { \
@ -299,6 +310,10 @@ void Expect::Null(const T p) {
do { \
} while (false && ((expected) != (actual)))
#define ASSERT_LESS_OR_EQUAL(expected, actual) \
do { \
} while (false && ((actual) > (expected)))
#define ASSERT_IMPLIES(antecedent, consequent) \
do { \
} while (false && (!(antecedent) || (consequent)))

View file

@ -44,7 +44,6 @@ void StackResource::Init(ThreadState* thread) {
// We can only have longjumps and exceptions when there is a current
// thread and isolate. If there is no current thread, we don't need to
// protect this case.
// TODO(23807): Eliminate this special case.
if (thread != nullptr) {
ASSERT(Thread::Current() == thread);
thread_ = thread;

View file

@ -6470,21 +6470,6 @@ class ProgramDeserializationRoots : public DeserializationRoots {
unit.set_base_objects(refs);
}
#if !defined(DART_PRECOMPILED_RUNTIME)
// The JIT trampolines are allocated at runtime and not part of the
// snapshot. So if the snapshot contains ffi-callback-trampolines we'll
// have to allocate corresponding JIT trampolines.
auto* const tramps = isolate_group->native_callback_trampolines();
const auto& ffi_callback_code =
GrowableObjectArray::Handle(object_store->ffi_callback_code());
if (!ffi_callback_code.IsNull()) {
const intptr_t len = ffi_callback_code.Length();
while (tramps->next_callback_id() < len) {
tramps->AllocateTrampoline();
}
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
// Setup native resolver for bootstrap impl.
Bootstrap::SetupNativeResolver();
}

View file

@ -38,12 +38,10 @@ void BSS::Initialize(Thread* current, uword* bss_start, bool vm) {
instructions - dso_base, bss_start);
}
if (!vm) {
// Fill values at isolate-only indices.
InitializeBSSEntry(Relocation::DRT_GetThreadForNativeCallback,
reinterpret_cast<uword>(DLRT_GetThreadForNativeCallback),
bss_start);
}
// TODO(52579): Remove.
InitializeBSSEntry(Relocation::DRT_GetFfiCallbackMetadata,
reinterpret_cast<uword>(DLRT_GetFfiCallbackMetadata),
bss_start);
}
} // namespace dart

View file

@ -17,16 +17,18 @@ class BSS : public AllStatic {
// stored at the index.
enum class Relocation : intptr_t {
InstructionsRelocatedAddress,
// End of shared entries.
DRT_GetThreadForNativeCallback,
// End of isolate-only entries.
DRT_GetFfiCallbackMetadata, // TODO(52579): Remove.
EndOfVmEntries,
// We don't have any isolate group specific entries at the moment.
EndOfIsolateGroupEntries = EndOfVmEntries,
};
static constexpr intptr_t kVmEntryCount =
static_cast<intptr_t>(Relocation::InstructionsRelocatedAddress) + 1;
static_cast<intptr_t>(Relocation::EndOfVmEntries);
static constexpr intptr_t kIsolateEntryCount =
static_cast<intptr_t>(Relocation::DRT_GetThreadForNativeCallback) + 1;
static constexpr intptr_t kIsolateGroupEntryCount =
static_cast<intptr_t>(Relocation::EndOfIsolateGroupEntries);
static constexpr intptr_t RelocationIndex(Relocation reloc) {
return static_cast<intptr_t>(reloc);

View file

@ -1646,11 +1646,10 @@ void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Pop(vm_tag_reg);
// If we were called by a trampoline, it will enter the safepoint on our
// behalf.
__ TransitionGeneratedToNative(
vm_tag_reg, old_exit_frame_reg, old_exit_through_ffi_reg, tmp,
/*enter_safepoint=*/!NativeCallbackTrampolines::Enabled());
// The trampoline that called us will enter the safepoint on our behalf.
__ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
old_exit_through_ffi_reg, tmp,
/*enter_safepoint=*/false);
__ PopNativeCalleeSavedRegisters();
@ -1697,24 +1696,6 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ PushNativeCalleeSavedRegisters();
// Load the thread object. If we were called by a JIT trampoline, the thread
// is already loaded.
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
if (!NativeCallbackTrampolines::Enabled()) {
compiler->LoadBSSEntry(BSS::Relocation::DRT_GetThreadForNativeCallback, R1,
R0);
// Create another frame to align the frame before continuing in "native"
// code.
__ EnterFrame(1 << FP, 0);
__ ReserveAlignedFrameSpace(0);
__ LoadImmediate(R0, callback_id);
__ blx(R1);
__ mov(THR, compiler::Operand(R0));
__ LeaveFrame(1 << FP);
}
// Save the current VMTag on the stack.
__ LoadFromOffset(R0, THR, compiler::target::Thread::vm_tag_offset());
__ Push(R0);
@ -1739,8 +1720,7 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ EmitEntryFrameVerification(R0);
// Either DLRT_GetThreadForNativeCallback or the callback trampoline (caller)
// will leave the safepoint for us.
// The callback trampoline (caller) has already left the safepoint for us.
__ TransitionNativeToGenerated(/*scratch0=*/R0, /*scratch1=*/R1,
/*exit_safepoint=*/false);
@ -1748,6 +1728,8 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// handles.
// Load the code object.
const Function& target_function = marshaller_.dart_signature();
const intptr_t callback_id = target_function.FfiCallbackId();
__ LoadFromOffset(R0, THR, compiler::target::Thread::isolate_group_offset());
__ LoadFromOffset(R0, R0,
compiler::target::IsolateGroup::object_store_offset());

View file

@ -1519,13 +1519,11 @@ void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ StoreToOffset(tmp, THR, compiler::target::Thread::top_resource_offset());
// Reset the exit frame info to old_exit_frame_reg *before* entering the
// safepoint.
//
// If we were called by a trampoline, it will enter the safepoint on our
// safepoint. The trampoline that called us will enter the safepoint on our
// behalf.
__ TransitionGeneratedToNative(
vm_tag_reg, old_exit_frame_reg, old_exit_through_ffi_reg,
/*enter_safepoint=*/!NativeCallbackTrampolines::Enabled());
__ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
old_exit_through_ffi_reg,
/*enter_safepoint=*/false);
__ PopNativeCalleeSavedRegisters();
@ -1573,24 +1571,6 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ PushNativeCalleeSavedRegisters();
// Load the thread object. If we were called by a JIT trampoline, the thread
// is already loaded.
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
if (!NativeCallbackTrampolines::Enabled()) {
compiler->LoadBSSEntry(BSS::Relocation::DRT_GetThreadForNativeCallback, R1,
R0);
// Create another frame to align the frame before continuing in "native"
// code.
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);
__ LoadImmediate(R0, callback_id);
__ blr(R1);
__ mov(THR, R0);
__ LeaveFrame();
}
// Now that we have THR, we can set CSP.
__ SetupCSPFromThread(THR);
@ -1627,14 +1607,15 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// correct offset from FP.
__ EmitEntryFrameVerification();
// Either DLRT_GetThreadForNativeCallback or the callback trampoline (caller)
// will leave the safepoint for us.
// The callback trampoline (caller) has already left the safepoint for us.
__ TransitionNativeToGenerated(R0, /*exit_safepoint=*/false);
// Now that the safepoint has ended, we can touch Dart objects without
// handles.
// Load the code object.
const Function& target_function = marshaller_.dart_signature();
const intptr_t callback_id = target_function.FfiCallbackId();
__ LoadFromOffset(R0, THR, compiler::target::Thread::isolate_group_offset());
__ LoadFromOffset(R0, R0,
compiler::target::IsolateGroup::object_store_offset());

View file

@ -366,14 +366,12 @@ void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ movl(old_exit_through_ffi_reg, vm_tag_reg);
__ popl(vm_tag_reg);
// This will reset the exit frame info to old_exit_frame_reg *before* entering
// the safepoint.
//
// If we were called by a trampoline, it will enter the safepoint on our
// Reset the exit frame info to old_exit_frame_reg *before* entering the
// safepoint. The trampoline that called us will enter the safepoint on our
// behalf.
__ TransitionGeneratedToNative(
vm_tag_reg, old_exit_frame_reg, old_exit_through_ffi_reg,
/*enter_safepoint=*/!NativeCallbackTrampolines::Enabled());
__ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
old_exit_through_ffi_reg,
/*enter_safepoint=*/false);
// Move XMM0 into ST0 if needed.
if (return_in_st0) {
@ -1253,11 +1251,6 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ pushl(ESI);
__ pushl(EDI);
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
// The thread object was already loaded by a JIT trampoline.
ASSERT(NativeCallbackTrampolines::Enabled());
// Save the current VMTag on the stack.
__ movl(ECX, compiler::Assembler::VMTagAddress());
__ pushl(ECX);
@ -1280,13 +1273,14 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// correct offset from FP.
__ EmitEntryFrameVerification();
// Either DLRT_GetThreadForNativeCallback or the callback trampoline (caller)
// will leave the safepoint for us.
// The callback trampoline (caller) has already left the safepoint for us.
__ TransitionNativeToGenerated(EAX, /*exit_safepoint=*/false);
// Now that the safepoint has ended, we can hold Dart objects with bare hands.
// Load the code object.
const Function& target_function = marshaller_.dart_signature();
const intptr_t callback_id = target_function.FfiCallbackId();
__ movl(EAX, compiler::Address(
THR, compiler::target::Thread::isolate_group_offset()));
__ movl(EAX, compiler::Address(

View file

@ -1693,13 +1693,11 @@ void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ StoreToOffset(tmp, THR, compiler::target::Thread::top_resource_offset());
// Reset the exit frame info to old_exit_frame_reg *before* entering the
// safepoint.
//
// If we were called by a trampoline, it will enter the safepoint on our
// safepoint. The trampoline that called us will enter the safepoint on our
// behalf.
__ TransitionGeneratedToNative(
vm_tag_reg, old_exit_frame_reg, old_exit_through_ffi_reg,
/*enter_safepoint=*/!NativeCallbackTrampolines::Enabled());
__ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
old_exit_through_ffi_reg,
/*enter_safepoint=*/false);
__ PopNativeCalleeSavedRegisters();
@ -1738,34 +1736,6 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ PushNativeCalleeSavedRegisters();
// Load the thread object. If we were called by a trampoline, the thread is
// already loaded.
if (FLAG_precompiled_mode) {
compiler->LoadBSSEntry(BSS::Relocation::DRT_GetThreadForNativeCallback, A1,
A0);
} else if (!NativeCallbackTrampolines::Enabled()) {
// In JIT mode, we can just paste the address of the runtime entry into the
// generated code directly. This is not a problem since we don't save
// callbacks into JIT snapshots.
__ LoadImmediate(
A1, reinterpret_cast<int64_t>(DLRT_GetThreadForNativeCallback));
}
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
if (!NativeCallbackTrampolines::Enabled()) {
// Create another frame to align the frame before continuing in "native"
// code.
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);
__ LoadImmediate(A0, callback_id);
__ jalr(A1);
__ mv(THR, A0);
__ LeaveFrame();
}
#if defined(USING_SHADOW_CALL_STACK)
#error Unimplemented
#endif
@ -1795,14 +1765,15 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// correct offset from FP.
__ EmitEntryFrameVerification();
// Either DLRT_GetThreadForNativeCallback or the callback trampoline (caller)
// will leave the safepoint for us.
// The callback trampoline (caller) has already left the safepoint for us.
__ TransitionNativeToGenerated(A0, /*exit_safepoint=*/false);
// Now that the safepoint has ended, we can touch Dart objects without
// handles.
// Load the code object.
const Function& target_function = marshaller_.dart_signature();
const intptr_t callback_id = target_function.FfiCallbackId();
__ LoadFromOffset(A0, THR, compiler::target::Thread::isolate_group_offset());
__ LoadFromOffset(A0, A0,
compiler::target::IsolateGroup::object_store_offset());

View file

@ -482,11 +482,10 @@ void NativeReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ popq(vm_tag_reg);
// If we were called by a trampoline, it will enter the safepoint on our
// behalf.
__ TransitionGeneratedToNative(
vm_tag_reg, old_exit_frame_reg, old_exit_through_ffi_reg,
/*enter_safepoint=*/!NativeCallbackTrampolines::Enabled());
// The trampoline that called us will enter the safepoint on our behalf.
__ TransitionGeneratedToNative(vm_tag_reg, old_exit_frame_reg,
old_exit_through_ffi_reg,
/*enter_safepoint=*/false);
// Restore C++ ABI callee-saved registers.
__ PopRegisters(kCalleeSaveRegistersSet);
@ -1492,23 +1491,6 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// Save ABI callee-saved registers.
__ PushRegisters(kCalleeSaveRegistersSet);
// Load the thread object. If we were called by a JIT trampoline, the thread
// is already loaded.
const intptr_t callback_id = marshaller_.dart_signature().FfiCallbackId();
if (!NativeCallbackTrampolines::Enabled()) {
compiler->LoadBSSEntry(BSS::Relocation::DRT_GetThreadForNativeCallback, RAX,
RCX);
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);
COMPILE_ASSERT(RAX != CallingConventions::kArg1Reg);
__ movq(CallingConventions::kArg1Reg, compiler::Immediate(callback_id));
__ CallCFunction(RAX);
__ movq(THR, RAX);
__ LeaveFrame();
}
// Save the current VMTag on the stack.
__ movq(RAX, compiler::Assembler::VMTagAddress());
__ pushq(RAX);
@ -1531,11 +1513,12 @@ void NativeEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// correct offset from FP.
__ EmitEntryFrameVerification();
// Either DLRT_GetThreadForNativeCallback or the callback trampoline (caller)
// will leave the safepoint for us.
// The callback trampoline (caller) has already left the safepoint for us.
__ TransitionNativeToGenerated(/*exit_safepoint=*/false);
// Load the code object.
const Function& target_function = marshaller_.dart_signature();
const intptr_t callback_id = target_function.FfiCallbackId();
__ movq(RAX, compiler::Address(
THR, compiler::target::Thread::isolate_group_offset()));
__ movq(RAX, compiler::Address(

View file

@ -116,52 +116,13 @@ static void EnsureFfiCallbackMetadata(Thread* thread, intptr_t callback_id) {
GrowableObjectArray::New(kInitialCallbackIdsReserved, Heap::kOld);
object_store->set_ffi_callback_code(code_array);
}
#if defined(TARGET_ARCH_IA32)
auto& stack_array =
TypedData::Handle(zone, object_store->ffi_callback_stack_return());
if (stack_array.IsNull()) {
stack_array = TypedData::New(kTypedDataInt8ArrayCid,
kInitialCallbackIdsReserved, Heap::kOld);
object_store->set_ffi_callback_stack_return(stack_array);
}
ASSERT(code_array.Length() <= stack_array.Length());
#endif
#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_PRECOMPILER)
auto* const tramps = thread->isolate_group()->native_callback_trampolines();
ASSERT(code_array.Length() == tramps->next_callback_id());
#endif
if (code_array.Length() <= callback_id) {
// Ensure we've enough space in the arrays.
while (!(callback_id < code_array.Length())) {
code_array.Add(Code::null_object());
#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_PRECOMPILER)
tramps->AllocateTrampoline();
#endif
}
#if defined(TARGET_ARCH_IA32)
if (callback_id >= stack_array.Length()) {
const int32_t capacity = stack_array.Length();
if (callback_id >= capacity) {
// Ensure both that we grow enough and an exponential growth strategy.
const int32_t new_capacity =
Utils::Maximum(callback_id + 1, capacity * 2);
stack_array = TypedData::Grow(stack_array, new_capacity);
object_store->set_ffi_callback_stack_return(stack_array);
}
}
#endif // defined(TARGET_ARCH_IA32)
}
#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(DART_PRECOMPILER)
// Verify invariants of the 3 arrays (still) hold.
ASSERT(code_array.Length() == tramps->next_callback_id());
#if defined(TARGET_ARCH_IA32)
ASSERT(code_array.Length() <= stack_array.Length());
#endif
#endif
ASSERT(callback_id < code_array.Length());
}
@ -177,19 +138,6 @@ void SetFfiCallbackCode(Thread* thread,
const auto& code_array =
GrowableObjectArray::Handle(zone, object_store->ffi_callback_code());
code_array.SetAt(callback_id, code);
#if defined(TARGET_ARCH_IA32)
// On ia32, store the stack delta that we need to use when returning.
const intptr_t stack_return_delta =
ffi_trampoline.FfiCSignatureReturnsStruct() &&
CallingConventions::kUsesRet4
? compiler::target::kWordSize
: 0;
const auto& stack_delta_array =
TypedData::Handle(zone, object_store->ffi_callback_stack_return());
stack_delta_array.SetUint8(callback_id, stack_return_delta);
#endif // defined(TARGET_ARCH_IA32)
}
} // namespace ffi

View file

@ -12,6 +12,7 @@
#include "vm/compiler/ffi/native_location.h"
#include "vm/compiler/ffi/native_type.h"
#include "vm/exceptions.h"
#include "vm/ffi_callback_metadata.h"
#include "vm/log.h"
#include "vm/object_store.h"
#include "vm/raw_object.h"
@ -637,13 +638,11 @@ class CallbackArgumentTranslator : public ValueObject {
// shadow space if present (factored into
// kCallbackSlotsBeforeSavedArguments).
//
// Finally, if we are using NativeCallbackTrampolines, factor in the extra
// stack space corresponding to those trampolines' frames (above the entry
// frame).
intptr_t stack_delta = kCallbackSlotsBeforeSavedArguments;
if (NativeCallbackTrampolines::Enabled()) {
stack_delta += StubCodeCompiler::kNativeCallbackTrampolineStackDelta;
}
// Finally, for NativeCallbackTrampolines, factor in the extra stack space
// corresponding to those trampolines' frames (above the entry frame).
const intptr_t stack_delta =
kCallbackSlotsBeforeSavedArguments +
FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta;
FrameRebase rebase(
zone,
/*old_base=*/SPREG, /*new_base=*/SPREG,

View file

@ -1317,7 +1317,6 @@ class ObjectStore : public AllStatic {
static word type_type_offset();
static word ffi_callback_code_offset();
static word ffi_callback_stack_return_offset();
static word suspend_state_await_offset();
static word suspend_state_await_with_type_check_offset();

View file

@ -260,8 +260,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x88;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x284;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x288;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x228;
static constexpr dart::compiler::target::word
@ -968,8 +966,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -1676,8 +1672,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x88;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x284;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x288;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x228;
static constexpr dart::compiler::target::word
@ -2383,8 +2377,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -3095,8 +3087,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -3804,8 +3794,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -4513,8 +4501,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x88;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x284;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x288;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x228;
static constexpr dart::compiler::target::word
@ -5222,8 +5208,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -5925,8 +5909,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x88;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x284;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x288;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x228;
static constexpr dart::compiler::target::word
@ -6625,8 +6607,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -7325,8 +7305,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x88;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x284;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x288;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x228;
static constexpr dart::compiler::target::word
@ -8024,8 +8002,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -8728,8 +8704,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -9429,8 +9403,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -10130,8 +10102,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x88;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x284;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x288;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x228;
static constexpr dart::compiler::target::word
@ -10831,8 +10801,6 @@ static constexpr dart::compiler::target::word ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -11568,8 +11536,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x88;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x284;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x288;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x228;
static constexpr dart::compiler::target::word
@ -12351,8 +12317,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -13137,8 +13101,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -13922,8 +13884,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -14707,8 +14667,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -15494,8 +15452,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x88;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x284;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x288;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x228;
static constexpr dart::compiler::target::word
@ -16278,8 +16234,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -17055,8 +17009,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x88;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x284;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x288;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x228;
static constexpr dart::compiler::target::word
@ -17829,8 +17781,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -18606,8 +18556,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -19382,8 +19330,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -20158,8 +20104,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word
@ -20936,8 +20880,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x88;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x284;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x288;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x228;
static constexpr dart::compiler::target::word
@ -21711,8 +21653,6 @@ static constexpr dart::compiler::target::word AOT_ObjectStore_type_type_offset =
0x110;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_code_offset = 0x508;
static constexpr dart::compiler::target::word
AOT_ObjectStore_ffi_callback_stack_return_offset = 0x510;
static constexpr dart::compiler::target::word
AOT_ObjectStore_suspend_state_await_offset = 0x450;
static constexpr dart::compiler::target::word

View file

@ -197,7 +197,6 @@
FIELD(ObjectStore, string_type_offset) \
FIELD(ObjectStore, type_type_offset) \
FIELD(ObjectStore, ffi_callback_code_offset) \
FIELD(ObjectStore, ffi_callback_stack_return_offset) \
FIELD(ObjectStore, suspend_state_await_offset) \
FIELD(ObjectStore, suspend_state_await_with_type_check_offset) \
FIELD(ObjectStore, suspend_state_handle_exception_offset) \

View file

@ -13,6 +13,7 @@
#include "vm/compiler/stub_code_compiler.h"
#include "vm/code_descriptors.h"
#include "vm/compiler/api/type_check_mode.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/locations.h"
@ -2545,6 +2546,17 @@ void StubCodeCompiler::GenerateCloneSuspendStateStub() {
__ Ret();
}
void StubCodeCompiler::InsertBSSRelocation(BSS::Relocation reloc) {
ASSERT(pc_descriptors_list_ != nullptr);
const intptr_t pc_offset = assembler->InsertAlignedRelocation(reloc);
pc_descriptors_list_->AddDescriptor(
UntaggedPcDescriptors::kBSSRelocation, pc_offset,
/*deopt_id=*/DeoptId::kNone,
/*root_pos=*/TokenPosition::kNoSource,
/*try_index=*/-1,
/*yield_index=*/UntaggedPcDescriptors::kInvalidYieldIndex);
}
} // namespace compiler
} // namespace dart

View file

@ -22,6 +22,7 @@ namespace dart {
// Forward declarations.
class Code;
class DescriptorList;
namespace compiler {
@ -50,7 +51,8 @@ using UnresolvedPcRelativeCalls = GrowableArray<UnresolvedPcRelativeCall*>;
class StubCodeCompiler {
public:
explicit StubCodeCompiler(Assembler* assembler_) : assembler(assembler_) {}
StubCodeCompiler(Assembler* assembler_, DescriptorList* pc_descriptors_list)
: assembler(assembler_), pc_descriptors_list_(pc_descriptors_list) {}
Assembler* assembler;
@ -104,44 +106,6 @@ class StubCodeCompiler {
void GenerateUsageCounterIncrement(Register temp_reg);
void GenerateOptimizedUsageCounterIncrement();
#if defined(TARGET_ARCH_X64)
static constexpr intptr_t kNativeCallbackTrampolineSize = 10;
#if defined(DART_COMPRESSED_POINTERS)
static constexpr intptr_t kNativeCallbackSharedStubSize = 236;
#else
static constexpr intptr_t kNativeCallbackSharedStubSize = 228;
#endif
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
#elif defined(TARGET_ARCH_IA32)
static constexpr intptr_t kNativeCallbackTrampolineSize = 10;
static constexpr intptr_t kNativeCallbackSharedStubSize = 152;
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 4;
#elif defined(TARGET_ARCH_ARM)
static constexpr intptr_t kNativeCallbackTrampolineSize = 12;
static constexpr intptr_t kNativeCallbackSharedStubSize = 148;
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 4;
#elif defined(TARGET_ARCH_ARM64)
static constexpr intptr_t kNativeCallbackTrampolineSize = 12;
#if defined(DART_COMPRESSED_POINTERS)
static constexpr intptr_t kNativeCallbackSharedStubSize = 268;
#else
static constexpr intptr_t kNativeCallbackSharedStubSize = 244;
#endif
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
#elif defined(TARGET_ARCH_RISCV32)
static constexpr intptr_t kNativeCallbackTrampolineSize = 8;
static constexpr intptr_t kNativeCallbackSharedStubSize = 238;
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
#elif defined(TARGET_ARCH_RISCV64)
static constexpr intptr_t kNativeCallbackTrampolineSize = 8;
static constexpr intptr_t kNativeCallbackSharedStubSize = 210;
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
#else
#error What architecture?
#endif
void GenerateJITCallbackTrampolines(intptr_t next_callback_id);
// Calculates the offset (in words) from FP to the provided [cpu_register].
//
// Assumes
@ -200,6 +164,16 @@ class StubCodeCompiler {
intptr_t return_function_offset_in_object_store,
intptr_t return_stub_offset_in_thread);
void GenerateLoadBSSEntry(BSS::Relocation relocation,
Register dst,
Register tmp);
void InsertBSSRelocation(BSS::Relocation reloc);
void GenerateLoadFfiCallbackMetadataRuntimeFunction(uword function_index,
Register dst);
DescriptorList* pc_descriptors_list_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(StubCodeCompiler);
};

View file

@ -21,6 +21,7 @@
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/locations.h"
#include "vm/constants.h"
#include "vm/ffi_callback_metadata.h"
#include "vm/instructions.h"
#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
@ -372,45 +373,84 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
__ bx(R4);
}
#if !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateJITCallbackTrampolines(
intptr_t next_callback_id) {
#if defined(USING_SIMULATOR)
// TODO(37299): FFI is not support in SIMARM.
void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
Register dst,
Register tmp) {
compiler::Label skip_reloc;
__ b(&skip_reloc);
InsertBSSRelocation(relocation);
__ Bind(&skip_reloc);
// For historical reasons, the PC on ARM points 8 bytes (two instructions)
// past the current instruction.
__ sub(tmp, PC,
compiler::Operand(Instr::kPCReadOffset + compiler::target::kWordSize));
// tmp holds the address of the relocation.
__ ldr(dst, compiler::Address(tmp));
// dst holds the relocation itself: tmp - bss_start.
// tmp = tmp + (bss_start - tmp) = bss_start
__ add(tmp, tmp, compiler::Operand(dst));
// tmp holds the start of the BSS section.
// Load the "get-thread" routine: *bss_start.
__ ldr(dst, compiler::Address(tmp));
}
void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction(
uword function_index,
Register dst) {
// Keep in sync with FfiCallbackMetadata::EnsureFirstTrampolinePageLocked.
// Note: If the stub was aligned, this could be a single PC relative load.
// Load a pointer to the beginning of the stub into dst.
const intptr_t code_size = __ CodeSize();
__ SubImmediate(dst, PC, Instr::kPCReadOffset + code_size);
// Round dst down to the page size.
__ AndImmediate(dst, dst, FfiCallbackMetadata::kPageMask);
// Load the function from the function table.
__ LoadFromOffset(
dst,
Address(dst, FfiCallbackMetadata::RuntimeFunctionOffset(function_index)));
}
void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
#if defined(USING_SIMULATOR) && !defined(DART_PRECOMPILER)
// TODO(37299): FFI is not supported in SIMARM.
__ Breakpoint();
#else
Label done;
Label body;
// TMP is volatile and not used for passing any arguments.
COMPILE_ASSERT(!IsCalleeSavedRegister(TMP) && !IsArgumentRegister(TMP));
for (intptr_t i = 0;
i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage(); ++i) {
// We don't use LoadImmediate because we need the trampoline size to be
// fixed independently of the callback ID.
//
// PC points two instructions ahead of the current one -- directly where we
// store the callback ID.
__ ldr(TMP, Address(PC, 0));
__ b(&done);
__ Emit(next_callback_id + i);
for (intptr_t i = 0; i < FfiCallbackMetadata::NumCallbackTrampolinesPerPage();
++i) {
// The FfiCallbackMetadata table is keyed by the trampoline entry point. So
// look up the current PC, then jump to the shared section. The PC is offset
// by Instr::kPCReadOffset, which is subtracted below.
__ mov(TMP, Operand(PC));
__ b(&body);
}
ASSERT(__ CodeSize() ==
kNativeCallbackTrampolineSize *
NativeCallbackTrampolines::NumCallbackTrampolinesPerPage());
FfiCallbackMetadata::kNativeCallbackTrampolineSize *
FfiCallbackMetadata::NumCallbackTrampolinesPerPage());
__ Bind(&done);
__ Bind(&body);
const intptr_t shared_stub_start = __ CodeSize();
// Save THR (callee-saved), R4 & R5 (temporaries, callee-saved), and LR.
COMPILE_ASSERT(StubCodeCompiler::kNativeCallbackTrampolineStackDelta == 4);
COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 4);
SPILLS_LR_TO_FRAME(
__ PushList((1 << LR) | (1 << THR) | (1 << R4) | (1 << R5)));
// Don't rely on TMP being preserved by assembler macros anymore.
__ mov(R4, Operand(TMP));
// The PC is in TMP, but is offset by kPCReadOffset. To get the actual
// trampoline entry point we need to subtract that.
__ sub(R4, TMP, Operand(Instr::kPCReadOffset));
COMPILE_ASSERT(IsCalleeSavedRegister(R4));
COMPILE_ASSERT(!IsArgumentRegister(THR));
@ -421,52 +461,56 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// Load the thread, verify the callback ID and exit the safepoint.
//
// We exit the safepoint inside DLRT_GetThreadForNativeCallbackTrampoline
// in order to safe code size on this shared stub.
// We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to save
// code size on this shared stub.
{
__ mov(R0, Operand(R4));
// We also need to look up the entry point for the trampoline. This is
// returned using a pointer passed to the second arg of the C function
// below. We aim that pointer at a reserved stack slot.
__ sub(SP, SP, Operand(compiler::target::kWordSize));
__ mov(R1, Operand(SP));
// We also need to know if this is a sync or async callback. This is also
// returned by pointer.
__ sub(SP, SP, Operand(compiler::target::kWordSize));
__ mov(R2, Operand(SP));
__ EnterFrame(1 << FP, 0);
__ ReserveAlignedFrameSpace(0);
__ mov(R0, Operand(R4));
#if defined(DART_TARGET_OS_FUCHSIA)
// TODO(52579): Remove.
if (FLAG_precompiled_mode) {
GenerateLoadBSSEntry(BSS::Relocation::DRT_GetFfiCallbackMetadata, R4,
TMP);
} else {
Label call;
__ ldr(R4, Address(PC, 0));
__ b(&call);
__ Emit(reinterpret_cast<intptr_t>(&DLRT_GetFfiCallbackMetadata));
__ Bind(&call);
}
#else
GenerateLoadFfiCallbackMetadataRuntimeFunction(
FfiCallbackMetadata::kGetFfiCallbackMetadata, R4);
#endif // defined(DART_TARGET_OS_FUCHSIA)
// Since DLRT_GetThreadForNativeCallbackTrampoline can theoretically be
// loaded anywhere, we use the same trick as before to ensure a predictable
// instruction sequence.
Label call;
__ ldr(R1, Address(PC, 0));
__ b(&call);
__ Emit(
reinterpret_cast<intptr_t>(&DLRT_GetThreadForNativeCallbackTrampoline));
__ Bind(&call);
__ blx(R1);
__ blx(R4);
__ mov(THR, Operand(R0));
__ LeaveFrame(1 << FP);
// The trampoline type is at the top of the stack. Pop it into R4.
__ Pop(R4);
// Entry point is now at the top of the stack. Pop it into R5.
__ Pop(R5);
}
__ PopRegisters(argument_registers);
COMPILE_ASSERT(!IsArgumentRegister(R8));
// Load the code object.
__ LoadFromOffset(R5, THR, compiler::target::Thread::isolate_group_offset());
__ LoadFromOffset(R5, R5,
compiler::target::IsolateGroup::object_store_offset());
__ LoadFromOffset(R5, R5,
compiler::target::ObjectStore::ffi_callback_code_offset());
__ LoadFieldFromOffset(R5, R5,
compiler::target::GrowableObjectArray::data_offset());
__ ldr(R5, __ ElementAddressForRegIndex(
/*is_load=*/true,
/*external=*/false,
/*array_cid=*/kArrayCid,
/*index_scale, smi-tagged=*/compiler::target::kWordSize * 2,
/*index_unboxed=*/false,
/*array=*/R5,
/*index=*/R4));
__ LoadFieldFromOffset(R5, R5, compiler::target::Code::entry_point_offset());
// On entry to the function, there will be four extra slots on the stack:
// saved THR, R4, R5 and the return address. The target will know to skip
// them.
@ -478,17 +522,17 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// Returns.
__ PopList((1 << PC) | (1 << THR) | (1 << R4) | (1 << R5));
ASSERT((__ CodeSize() - shared_stub_start) == kNativeCallbackSharedStubSize);
ASSERT(__ CodeSize() <= VirtualMemory::PageSize());
ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
FfiCallbackMetadata::kNativeCallbackSharedStubSize);
ASSERT_LESS_OR_EQUAL(__ CodeSize(), FfiCallbackMetadata::kPageSize);
#if defined(DEBUG)
while (__ CodeSize() < VirtualMemory::PageSize()) {
while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
__ Breakpoint();
}
#endif
#endif
}
#endif // !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateDispatchTableNullErrorStub() {
__ EnterStubFrame();

View file

@ -20,6 +20,7 @@
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/locations.h"
#include "vm/constants.h"
#include "vm/ffi_callback_metadata.h"
#include "vm/instructions.h"
#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
@ -425,44 +426,75 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
__ ret(R19);
}
#if !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateJITCallbackTrampolines(
intptr_t next_callback_id) {
#if !defined(HOST_ARCH_ARM64)
// TODO(37299): FFI is not support in SIMARM64.
void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
Register dst,
Register tmp) {
compiler::Label skip_reloc;
__ b(&skip_reloc);
InsertBSSRelocation(relocation);
__ Bind(&skip_reloc);
__ adr(tmp, compiler::Immediate(-compiler::target::kWordSize));
// tmp holds the address of the relocation.
__ ldr(dst, compiler::Address(tmp));
// dst holds the relocation itself: tmp - bss_start.
// tmp = tmp + (bss_start - tmp) = bss_start
__ add(tmp, tmp, compiler::Operand(dst));
// tmp holds the start of the BSS section.
// Load the "get-thread" routine: *bss_start.
__ ldr(dst, compiler::Address(tmp));
}
void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction(
uword function_index,
Register dst) {
// Keep in sync with FfiCallbackMetadata::EnsureFirstTrampolinePageLocked.
// Note: If the stub was aligned, this could be a single PC relative load.
// Load a pointer to the beginning of the stub into dst.
const intptr_t code_size = __ CodeSize();
__ adr(dst, Immediate(-code_size));
// Round dst down to the page size.
__ andi(dst, dst, Immediate(FfiCallbackMetadata::kPageMask));
// Load the function from the function table.
__ LoadFromOffset(
dst,
Address(dst, FfiCallbackMetadata::RuntimeFunctionOffset(function_index)));
}
void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
#if defined(USING_SIMULATOR) && !defined(DART_PRECOMPILER)
// TODO(37299): FFI is not supported in SIMARM64.
__ Breakpoint();
#else
Label done;
Label body;
// R9 is volatile and not used for passing any arguments.
COMPILE_ASSERT(!IsCalleeSavedRegister(R9) && !IsArgumentRegister(R9));
for (intptr_t i = 0;
i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage(); ++i) {
// We don't use LoadImmediate because we need the trampoline size to be
// fixed independently of the callback ID.
//
// Instead we paste the callback ID directly in the code load it
// PC-relative.
__ ldr(R9, compiler::Address::PC(2 * Instr::kInstrSize));
__ b(&done);
__ Emit(next_callback_id + i);
for (intptr_t i = 0; i < FfiCallbackMetadata::NumCallbackTrampolinesPerPage();
++i) {
// The FfiCallbackMetadata table is keyed by the trampoline entry point. So
// look up the current PC, then jump to the shared section.
__ adr(R9, Immediate(0));
__ b(&body);
}
ASSERT(__ CodeSize() ==
kNativeCallbackTrampolineSize *
NativeCallbackTrampolines::NumCallbackTrampolinesPerPage());
ASSERT_EQUAL(__ CodeSize(),
FfiCallbackMetadata::kNativeCallbackTrampolineSize *
FfiCallbackMetadata::NumCallbackTrampolinesPerPage());
__ Bind(&done);
__ Bind(&body);
const intptr_t shared_stub_start = __ CodeSize();
// The load of the callback ID might have incorrect higher-order bits, since
// we only emit a 32-bit callback ID.
__ uxtw(R9, R9);
// Save THR (callee-saved) and LR on the real C stack (CSP). Keeps it
// aligned.
COMPILE_ASSERT(StubCodeCompiler::kNativeCallbackTrampolineStackDelta == 2);
COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 2);
SPILLS_LR_TO_FRAME(__ stp(
THR, LR, Address(CSP, -2 * target::kWordSize, Address::PairPreIndex)));
@ -473,80 +505,68 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
all_registers.Add(Location::RegisterLocation(
CallingConventions::kPointerToReturnStructRegisterCall));
// The call below might clobber R9 (volatile, holding callback_id).
all_registers.Add(Location::RegisterLocation(R9));
// Load the thread, verify the callback ID and exit the safepoint.
//
// We exit the safepoint inside DLRT_GetThreadForNativeCallbackTrampoline
// in order to safe code size on this shared stub.
// We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to save
// code size on this shared stub.
{
__ mov(SP, CSP);
__ EnterFrame(0);
__ PushRegisters(all_registers);
__ mov(R0, R9);
// We also need to look up the entry point for the trampoline. This is
// returned using a pointer passed to the second arg of the C function
// below. We aim that pointer at a reserved stack slot.
__ AddImmediate(SP, SP, -compiler::target::kWordSize);
__ mov(R1, SP);
// We also need to know if this is a sync or async callback. This is also
// returned by pointer.
__ AddImmediate(SP, SP, -compiler::target::kWordSize);
__ mov(R2, SP);
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);
__ mov(CSP, SP);
// Since DLRT_GetThreadForNativeCallbackTrampoline can theoretically be
// loaded anywhere, we use the same trick as before to ensure a predictable
// instruction sequence.
Label call;
__ mov(R0, R9);
__ ldr(R1, compiler::Address::PC(2 * Instr::kInstrSize));
__ b(&call);
#if defined(DART_TARGET_OS_FUCHSIA)
// TODO(52579): Remove.
if (FLAG_precompiled_mode) {
GenerateLoadBSSEntry(BSS::Relocation::DRT_GetFfiCallbackMetadata, R4, R9);
} else {
Label call;
__ ldr(R4, compiler::Address::PC(2 * Instr::kInstrSize));
__ b(&call);
__ Emit64(reinterpret_cast<int64_t>(&DLRT_GetFfiCallbackMetadata));
__ Bind(&call);
}
#else
GenerateLoadFfiCallbackMetadataRuntimeFunction(
FfiCallbackMetadata::kGetFfiCallbackMetadata, R4);
#endif // defined(DART_TARGET_OS_FUCHSIA)
__ Emit64(
reinterpret_cast<int64_t>(&DLRT_GetThreadForNativeCallbackTrampoline));
__ Bind(&call);
__ blr(R1);
__ blr(R4);
__ mov(THR, R0);
__ LeaveFrame();
// The trampoline type is at the top of the stack. Pop it into R9.
__ Pop(R9);
// Entry point is now at the top of the stack. Pop it into R10.
COMPILE_ASSERT(!IsCalleeSavedRegister(R10) && !IsArgumentRegister(R10));
__ Pop(R10);
__ PopRegisters(all_registers);
__ LeaveFrame();
__ mov(CSP, SP);
}
COMPILE_ASSERT(!IsCalleeSavedRegister(R10) && !IsArgumentRegister(R10));
// Load the code object.
__ LoadFromOffset(R10, THR, compiler::target::Thread::isolate_group_offset());
__ LoadFromOffset(R10, R10,
compiler::target::IsolateGroup::object_store_offset());
__ LoadFromOffset(R10, R10,
compiler::target::ObjectStore::ffi_callback_code_offset());
#if defined(DART_COMPRESSED_POINTERS)
// Partially setup HEAP_BITS for LoadCompressed[FieldFromOffset].
ASSERT(IsAbiPreservedRegister(HEAP_BITS)); // Need to save and restore.
__ Push(HEAP_BITS);
__ ldr(HEAP_BITS, compiler::Address(THR, target::Thread::heap_base_offset()));
__ LsrImmediate(HEAP_BITS, HEAP_BITS, 32);
#endif
__ LoadCompressedFieldFromOffset(
R10, R10, compiler::target::GrowableObjectArray::data_offset());
__ LoadCompressed(
R10,
__ ElementAddressForRegIndex(
/*external=*/false,
/*array_cid=*/kArrayCid,
/*index_scale, smi-tagged=*/compiler::target::kCompressedWordSize * 2,
/*index_unboxed=*/false,
/*array=*/R10,
/*index=*/R9,
/*temp=*/TMP));
#if defined(DART_COMPRESSED_POINTERS)
__ Pop(HEAP_BITS);
#endif
__ LoadFieldFromOffset(R10, R10,
compiler::target::Code::entry_point_offset());
// Clobbers all volatile registers, including the callback ID in R9.
// Resets CSP and SP, important for EnterSafepoint below.
__ blr(R10);
@ -560,17 +580,17 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
__ ret();
ASSERT((__ CodeSize() - shared_stub_start) == kNativeCallbackSharedStubSize);
ASSERT(__ CodeSize() <= VirtualMemory::PageSize());
ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
FfiCallbackMetadata::kNativeCallbackSharedStubSize);
ASSERT_LESS_OR_EQUAL(__ CodeSize(), FfiCallbackMetadata::kPageSize);
#if defined(DEBUG)
while (__ CodeSize() < VirtualMemory::PageSize()) {
while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
__ Breakpoint();
}
#endif
#endif // !defined(HOST_ARCH_ARM64)
}
#endif // !defined(DART_PRECOMPILER)
// R1: The extracted method.
// R4: The type_arguments_field_offset (or 0)

View file

@ -19,6 +19,7 @@
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/locations.h"
#include "vm/constants.h"
#include "vm/ffi_callback_metadata.h"
#include "vm/instructions.h"
#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
@ -189,6 +190,13 @@ void StubCodeCompiler::GenerateExitSafepointIgnoreUnwindInProgressStub() {
kExitSafepointIgnoreUnwindInProgressRuntimeEntry.OffsetFromThread());
}
void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
Register dst,
Register tmp) {
// Only used in AOT.
__ Breakpoint();
}
// Calls a native function inside a safepoint.
//
// On entry:
@ -210,131 +218,108 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
__ jmp(EBX);
}
void StubCodeCompiler::GenerateJITCallbackTrampolines(
intptr_t next_callback_id) {
Label done, ret_4;
void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
Label ret_4;
// EAX is volatile and doesn't hold any arguments.
COMPILE_ASSERT(!IsArgumentRegister(EAX) && !IsCalleeSavedRegister(EAX));
for (intptr_t i = 0;
i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage(); ++i) {
__ movl(EAX, compiler::Immediate(next_callback_id + i));
__ jmp(&done);
Label body, load_tramp_addr;
const intptr_t kCallLength = 5;
for (intptr_t i = 0; i < FfiCallbackMetadata::NumCallbackTrampolinesPerPage();
++i) {
// The FfiCallbackMetadata table is keyed by the trampoline entry point. So
// look up the current PC, then jump to the shared section. There's no easy
// way to get the PC in ia32 so we have to do a call, grab the return adress
// from the stack, then return here (mismatched call/ret causes problems),
// then jump to the shared section.
const intptr_t size_before = __ CodeSize();
__ call(&load_tramp_addr);
const intptr_t size_after = __ CodeSize();
ASSERT_EQUAL(size_after - size_before, kCallLength);
__ jmp(&body);
}
ASSERT(__ CodeSize() ==
kNativeCallbackTrampolineSize *
NativeCallbackTrampolines::NumCallbackTrampolinesPerPage());
__ Bind(&done);
ASSERT_EQUAL(__ CodeSize(),
FfiCallbackMetadata::kNativeCallbackTrampolineSize *
FfiCallbackMetadata::NumCallbackTrampolinesPerPage());
const intptr_t shared_stub_start = __ CodeSize();
__ Bind(&load_tramp_addr);
// Load the return adress into EAX, and subtract the size of the call
// instruction. This is our original trampoline address.
__ movl(EAX, Address(SPREG, 0));
__ subl(EAX, Immediate(kCallLength));
__ ret();
__ Bind(&body);
// Save THR and EBX which are callee-saved.
__ pushl(THR);
__ pushl(EBX);
// We need the callback ID after the call for return stack.
__ pushl(EAX);
// THR & return address
COMPILE_ASSERT(StubCodeCompiler::kNativeCallbackTrampolineStackDelta == 4);
COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 4);
// Load the thread, verify the callback ID and exit the safepoint.
//
// We exit the safepoint inside DLRT_GetThreadForNativeCallbackTrampoline
// in order to save code size on this shared stub.
// We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to safe
// code size on this shared stub.
{
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(compiler::target::kWordSize);
// entry_point, trampoline_type, &trampoline_type, &entry_point, trampoline
// ^------ GetFfiCallbackMetadata args ------^
__ ReserveAlignedFrameSpace(5 * target::kWordSize);
__ movl(compiler::Address(SPREG, 0), EAX);
__ movl(EAX, compiler::Immediate(reinterpret_cast<int64_t>(
DLRT_GetThreadForNativeCallbackTrampoline)));
// Trampoline arg.
__ movl(Address(SPREG, 0 * target::kWordSize), EAX);
// Pointer to trampoline type stack slot.
__ movl(EAX, SPREG);
__ addl(EAX, Immediate(3 * target::kWordSize));
__ movl(Address(SPREG, 2 * target::kWordSize), EAX);
// Pointer to entry point stack slot.
__ addl(EAX, Immediate(target::kWordSize));
__ movl(Address(SPREG, 1 * target::kWordSize), EAX);
__ movl(EAX,
Immediate(reinterpret_cast<int64_t>(DLRT_GetFfiCallbackMetadata)));
__ call(EAX);
__ movl(THR, EAX);
__ movl(EAX, compiler::Address(SPREG, 0));
// Save the trampoline type in EBX, and the entry point in ECX.
__ movl(EBX, Address(SPREG, 3 * target::kWordSize));
__ movl(ECX, Address(SPREG, 4 * target::kWordSize));
__ LeaveFrame();
// Save the trampoline type to the stack, because we'll need it after the
// call to decide whether to ret() or ret(4).
__ pushl(EBX);
}
COMPILE_ASSERT(!IsCalleeSavedRegister(ECX) && !IsArgumentRegister(ECX));
COMPILE_ASSERT(ECX != THR);
// Load the target from the thread.
__ movl(ECX, compiler::Address(
THR, compiler::target::Thread::isolate_group_offset()));
__ movl(ECX, compiler::Address(
ECX, compiler::target::IsolateGroup::object_store_offset()));
__ movl(ECX,
compiler::Address(
ECX, compiler::target::ObjectStore::ffi_callback_code_offset()));
__ movl(ECX, compiler::FieldAddress(
ECX, compiler::target::GrowableObjectArray::data_offset()));
__ movl(ECX, __ ElementAddressForRegIndex(
/*external=*/false,
/*array_cid=*/kArrayCid,
/*index, smi-tagged=*/compiler::target::kWordSize * 2,
/*index_unboxed=*/false,
/*array=*/ECX,
/*index=*/EAX));
__ movl(ECX, compiler::FieldAddress(
ECX, compiler::target::Code::entry_point_offset()));
// On entry to the function, there will be two extra slots on the stack:
// the saved THR and the return address. The target will know to skip them.
__ call(ECX);
// Register state:
// - callee saved registers (should be restored)
// - EBX available as scratch because we restore it later.
// - ESI(THR) contains thread
// - EDI
// - return registers (should not be touched)
// - EAX
// - EDX
// - available scratch registers
// - ECX free
// Load the return stack delta from the thread.
__ movl(ECX, compiler::Address(
THR, compiler::target::Thread::isolate_group_offset()));
__ movl(ECX, compiler::Address(
ECX, compiler::target::IsolateGroup::object_store_offset()));
__ movl(
ECX,
compiler::Address(
ECX,
compiler::target::ObjectStore::ffi_callback_stack_return_offset()));
__ popl(EBX); // Compiler callback id.
__ movzxb(EBX, __ ElementAddressForRegIndex(
/*external=*/false,
/*array_cid=*/kTypedDataUint8ArrayCid,
/*index=*/1,
/*index_unboxed=*/false,
/*array=*/ECX,
/*index=*/EBX));
#if defined(DEBUG)
// Stack delta should be either 0 or 4.
Label check_done;
__ BranchIfZero(EBX, &check_done);
__ CompareImmediate(EBX, compiler::target::kWordSize);
__ BranchIf(EQUAL, &check_done);
__ Breakpoint();
__ Bind(&check_done);
#endif
// Takes care to not clobber *any* registers (besides scratch).
__ EnterFullSafepoint(/*scratch=*/ECX);
// Pop the trampoline type into ECX.
__ popl(ECX);
// Restore callee-saved registers.
__ movl(ECX, EBX);
__ popl(EBX);
__ popl(THR);
__ cmpl(ECX, compiler::Immediate(Smi::RawValue(0)));
__ j(NOT_EQUAL, &ret_4, compiler::Assembler::kNearJump);
__ cmpl(ECX, Immediate(static_cast<uword>(
FfiCallbackMetadata::TrampolineType::kSync)));
__ j(NOT_EQUAL, &ret_4, Assembler::kNearJump);
__ ret();
__ Bind(&ret_4);
@ -342,11 +327,12 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// 'kNativeCallbackSharedStubSize' is an upper bound because the exact
// instruction size can vary slightly based on OS calling conventions.
ASSERT((__ CodeSize() - shared_stub_start) <= kNativeCallbackSharedStubSize);
ASSERT(__ CodeSize() <= VirtualMemory::PageSize());
ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
FfiCallbackMetadata::kNativeCallbackSharedStubSize);
ASSERT_LESS_OR_EQUAL(__ CodeSize(), FfiCallbackMetadata::kPageSize);
#if defined(DEBUG)
while (__ CodeSize() < VirtualMemory::PageSize()) {
while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
__ Breakpoint();
}
#endif

View file

@ -20,6 +20,7 @@
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/backend/locations.h"
#include "vm/constants.h"
#include "vm/ffi_callback_metadata.h"
#include "vm/instructions.h"
#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
@ -292,37 +293,76 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
__ jr(S3);
}
#if !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateJITCallbackTrampolines(
intptr_t next_callback_id) {
#if defined(USING_SIMULATOR)
// TODO(37299): FFI is not support in SIMRISCV32/64.
void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
Register dst,
Register tmp) {
compiler::Label skip_reloc;
__ j(&skip_reloc, compiler::Assembler::kNearJump);
InsertBSSRelocation(relocation);
__ Bind(&skip_reloc);
__ auipc(tmp, 0);
__ addi(tmp, tmp, -compiler::target::kWordSize);
// tmp holds the address of the relocation.
__ lx(dst, compiler::Address(tmp));
// dst holds the relocation itself: tmp - bss_start.
// tmp = tmp + (bss_start - tmp) = bss_start
__ add(tmp, tmp, dst);
// tmp holds the start of the BSS section.
// Load the "get-thread" routine: *bss_start.
__ lx(dst, compiler::Address(tmp));
}
void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction(
uword function_index,
Register dst) {
// Keep in sync with FfiCallbackMetadata::EnsureFirstTrampolinePageLocked.
// Note: If the stub was aligned, this could be a single PC relative load.
// Load a pointer to the beginning of the stub into dst.
const intptr_t code_size = __ CodeSize();
__ auipc(dst, 0);
__ AddImmediate(dst, -code_size);
// Round dst down to the page size.
__ AndImmediate(dst, FfiCallbackMetadata::kPageMask);
// Load the function from the function table.
__ LoadFromOffset(
dst,
Address(dst, FfiCallbackMetadata::RuntimeFunctionOffset(function_index)));
}
void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
#if defined(USING_SIMULATOR) && !defined(DART_PRECOMPILER)
// TODO(37299): FFI is not supported in SIMRISCV32/64.
__ ebreak();
#else
Label loaded_callback_id_hi;
Label body;
// T1 is volatile and not used for passing any arguments.
COMPILE_ASSERT(!IsCalleeSavedRegister(T1) && !IsArgumentRegister(T1));
for (intptr_t i = 0;
i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage(); ++i) {
// We don't use LoadImmediate because we need the trampoline size to be
// fixed independently of the callback ID.
// lui has 20 bits of range.
__ lui_fixed(T1, (next_callback_id + i) << 12);
__ j(&loaded_callback_id_hi);
for (intptr_t i = 0; i < FfiCallbackMetadata::NumCallbackTrampolinesPerPage();
++i) {
// The FfiCallbackMetadata table is keyed by the trampoline entry point. So
// look up the current PC, then jump to the shared section.
__ auipc(T1, 0);
__ j(&body);
}
ASSERT(__ CodeSize() ==
kNativeCallbackTrampolineSize *
NativeCallbackTrampolines::NumCallbackTrampolinesPerPage());
ASSERT_EQUAL(__ CodeSize(),
FfiCallbackMetadata::kNativeCallbackTrampolineSize *
FfiCallbackMetadata::NumCallbackTrampolinesPerPage());
const intptr_t shared_stub_start = __ CodeSize();
__ Bind(&loaded_callback_id_hi);
__ srai(T1, T1, 12);
__ Bind(&body);
// Save THR (callee-saved) and RA. Keeps stack aligned.
COMPILE_ASSERT(StubCodeCompiler::kNativeCallbackTrampolineStackDelta == 2);
COMPILE_ASSERT(FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta == 2);
__ PushRegisterPair(RA, THR);
COMPILE_ASSERT(!IsArgumentRegister(THR));
@ -334,37 +374,51 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// Load the thread, verify the callback ID and exit the safepoint.
//
// We exit the safepoint inside DLRT_GetThreadForNativeCallbackTrampoline
// in order to safe code size on this shared stub.
// We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to save
// code size on this shared stub.
{
__ PushRegisters(all_registers);
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);
// Reserve one slot for the entry point and one for the tramp abi.
__ ReserveAlignedFrameSpace(2 * target::kWordSize);
// Since DLRT_GetThreadForNativeCallbackTrampoline can theoretically be
// loaded anywhere, we use the same trick as before to ensure a predictable
// instruction sequence.
// Since DLRT_GetFfiCallbackMetadata can theoretically be loaded anywhere,
// we use the same trick as before to ensure a predictable instruction
// sequence.
Label call;
__ mv(A0, T1);
__ mv(A0, T1); // trampoline
__ mv(A1, SPREG); // out_entry_point
__ addi(A2, SPREG, target::kWordSize); // out_trampoline_type
const intptr_t kPCRelativeLoadOffset = 12;
intptr_t start = __ CodeSize();
__ auipc(T1, 0);
__ lx(T1, Address(T1, kPCRelativeLoadOffset));
__ j(&call);
#if defined(DART_TARGET_OS_FUCHSIA)
// TODO(52579): Remove.
if (FLAG_precompiled_mode) {
GenerateLoadBSSEntry(BSS::Relocation::DRT_GetFfiCallbackMetadata, T1, T2);
} else {
const intptr_t kPCRelativeLoadOffset = 12;
intptr_t start = __ CodeSize();
__ auipc(T1, 0);
__ lx(T1, Address(T1, kPCRelativeLoadOffset));
__ j(&call);
ASSERT_EQUAL(__ CodeSize() - start, kPCRelativeLoadOffset);
ASSERT_EQUAL(__ CodeSize() - start, kPCRelativeLoadOffset);
#if XLEN == 32
__ Emit32(
reinterpret_cast<int32_t>(&DLRT_GetThreadForNativeCallbackTrampoline));
__ Emit32(reinterpret_cast<int32_t>(&DLRT_GetFfiCallbackMetadata));
#else
__ Emit64(
reinterpret_cast<int64_t>(&DLRT_GetThreadForNativeCallbackTrampoline));
__ Emit64(reinterpret_cast<int64_t>(&DLRT_GetFfiCallbackMetadata));
#endif
}
#else
GenerateLoadFfiCallbackMetadataRuntimeFunction(
FfiCallbackMetadata::kGetFfiCallbackMetadata, T1);
#endif // defined(DART_TARGET_OS_FUCHSIA)
__ Bind(&call);
__ jalr(T1);
__ mv(THR, A0);
__ lx(T2, Address(SPREG, 0)); // entry_point
__ lx(T3, Address(SPREG, target::kWordSize)); // trampoline_type
__ LeaveFrame();
@ -374,26 +428,6 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
COMPILE_ASSERT(!IsCalleeSavedRegister(T2) && !IsArgumentRegister(T2));
COMPILE_ASSERT(!IsCalleeSavedRegister(T3) && !IsArgumentRegister(T3));
// Load the code object.
__ LoadFromOffset(T2, THR, compiler::target::Thread::isolate_group_offset());
__ LoadFromOffset(T2, T2,
compiler::target::IsolateGroup::object_store_offset());
__ LoadFromOffset(T2, T2,
compiler::target::ObjectStore::ffi_callback_code_offset());
__ LoadCompressedFieldFromOffset(
T2, T2, compiler::target::GrowableObjectArray::data_offset());
__ LoadCompressed(
T2,
__ ElementAddressForRegIndex(
/*external=*/false,
/*array_cid=*/kArrayCid,
/*index_scale, smi-tagged=*/compiler::target::kCompressedWordSize * 2,
/*index_unboxed=*/false,
/*array=*/T2,
/*index=*/T1,
/*temp=*/T3));
__ LoadFieldFromOffset(T2, T2, compiler::target::Code::entry_point_offset());
// Clobbers all volatile registers, including the callback ID in T1.
__ jalr(T2);
@ -403,18 +437,17 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
__ PopRegisterPair(RA, THR);
__ ret();
ASSERT_EQUAL((__ CodeSize() - shared_stub_start),
kNativeCallbackSharedStubSize);
ASSERT(__ CodeSize() <= VirtualMemory::PageSize());
ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
FfiCallbackMetadata::kNativeCallbackSharedStubSize);
ASSERT_LESS_OR_EQUAL(__ CodeSize(), FfiCallbackMetadata::kPageSize);
#if defined(DEBUG)
while (__ CodeSize() < VirtualMemory::PageSize()) {
while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
__ ebreak();
}
#endif
#endif
}
#endif // !defined(DART_PRECOMPILER)
// T1: The extracted method.
// T4: The type_arguments_field_offset (or 0)

View file

@ -23,6 +23,7 @@
#include "vm/compiler/api/type_check_mode.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/constants.h"
#include "vm/ffi_callback_metadata.h"
#include "vm/instructions.h"
#include "vm/static_type_exactness_state.h"
#include "vm/tags.h"
@ -385,29 +386,80 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub() {
__ ret();
}
#if !defined(DART_PRECOMPILER)
void StubCodeCompiler::GenerateLoadBSSEntry(BSS::Relocation relocation,
Register dst,
Register tmp) {
compiler::Label skip_reloc;
__ jmp(&skip_reloc);
InsertBSSRelocation(relocation);
const intptr_t reloc_end = __ CodeSize();
__ Bind(&skip_reloc);
const intptr_t kLeaqLength = 7;
__ leaq(dst, compiler::Address::AddressRIPRelative(
-kLeaqLength - compiler::target::kWordSize));
ASSERT((__ CodeSize() - reloc_end) == kLeaqLength);
// dst holds the address of the relocation.
__ movq(tmp, compiler::Address(dst, 0));
// tmp holds the relocation itself: dst - bss_start.
// dst = dst + (bss_start - dst) = bss_start
__ addq(dst, tmp);
// dst holds the start of the BSS section.
// Load the routine.
__ movq(dst, compiler::Address(dst, 0));
}
void StubCodeCompiler::GenerateLoadFfiCallbackMetadataRuntimeFunction(
uword function_index,
Register dst) {
// Keep in sync with FfiCallbackMetadata::EnsureFirstTrampolinePageLocked.
// Note: If the stub was aligned, this could be a single PC relative load.
// Load a pointer to the beginning of the stub into dst.
const intptr_t kLeaqLength = 7;
const intptr_t code_size = __ CodeSize();
__ leaq(dst, Address::AddressRIPRelative(-kLeaqLength - code_size));
// Round dst down to the page size.
__ andq(dst, Immediate(FfiCallbackMetadata::kPageMask));
// Load the function from the function table.
__ LoadFromOffset(
dst,
Address(dst, FfiCallbackMetadata::RuntimeFunctionOffset(function_index)));
}
static const RegisterSet kArgumentRegisterSet(
CallingConventions::kArgumentRegisters,
CallingConventions::kFpuArgumentRegisters);
void StubCodeCompiler::GenerateJITCallbackTrampolines(
intptr_t next_callback_id) {
Label done;
void StubCodeCompiler::GenerateFfiCallbackTrampolineStub() {
// RAX is volatile and not used for passing any arguments.
COMPILE_ASSERT(!IsCalleeSavedRegister(RAX) && !IsArgumentRegister(RAX));
for (intptr_t i = 0;
i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage(); ++i) {
__ movq(RAX, compiler::Immediate(next_callback_id + i));
__ jmp(&done);
Label body;
for (intptr_t i = 0; i < FfiCallbackMetadata::NumCallbackTrampolinesPerPage();
++i) {
// The FfiCallbackMetadata table is keyed by the trampoline entry point. So
// look up the current PC, then jump to the shared section. RIP gives us the
// address of the next instruction, so to get the true entry point, we have
// to subtract the size of the leaq instruction.
const intptr_t kLeaqLength = 7;
const intptr_t size_before = __ CodeSize();
__ leaq(RAX, Address::AddressRIPRelative(-kLeaqLength));
const intptr_t size_after = __ CodeSize();
ASSERT_EQUAL(size_after - size_before, kLeaqLength);
__ jmp(&body);
}
ASSERT_EQUAL(__ CodeSize(),
kNativeCallbackTrampolineSize *
NativeCallbackTrampolines::NumCallbackTrampolinesPerPage());
FfiCallbackMetadata::kNativeCallbackTrampolineSize *
FfiCallbackMetadata::NumCallbackTrampolinesPerPage());
__ Bind(&done);
__ Bind(&body);
const intptr_t shared_stub_start = __ CodeSize();
@ -415,38 +467,64 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
__ pushq(THR);
// 2 = THR & return address
COMPILE_ASSERT(2 == StubCodeCompiler::kNativeCallbackTrampolineStackDelta);
// Save the callback ID.
__ pushq(RAX);
COMPILE_ASSERT(2 == FfiCallbackMetadata::kNativeCallbackTrampolineStackDelta);
// Save all registers which might hold arguments.
__ PushRegisters(kArgumentRegisterSet);
// Load the thread, verify the callback ID and exit the safepoint.
//
// We exit the safepoint inside DLRT_GetThreadForNativeCallbackTrampoline
// in order to save code size on this shared stub.
// We exit the safepoint inside DLRT_GetFfiCallbackMetadata in order to safe
// code size on this shared stub.
{
COMPILE_ASSERT(RAX != CallingConventions::kArg1Reg);
__ movq(CallingConventions::kArg1Reg, RAX);
// We also need to look up the entry point for the trampoline. This is
// returned using a pointer passed to the second arg of the C function
// below. We aim that pointer at a reserved stack slot.
COMPILE_ASSERT(RAX != CallingConventions::kArg2Reg);
__ pushq(Immediate(0)); // Reserve a stack slot for the entry point.
__ movq(CallingConventions::kArg2Reg, RSP);
// We also need to know if this is a sync or async callback. This is also
// returned by pointer.
COMPILE_ASSERT(RAX != CallingConventions::kArg3Reg);
__ pushq(Immediate(0)); // Reserve a stack slot for the trampoline type.
__ movq(CallingConventions::kArg3Reg, RSP);
#if defined(DART_TARGET_OS_FUCHSIA)
// TODO(52579): Remove.
if (FLAG_precompiled_mode) {
GenerateLoadBSSEntry(BSS::Relocation::DRT_GetFfiCallbackMetadata, RAX,
TMP);
} else {
__ movq(RAX, Immediate(
reinterpret_cast<int64_t>(DLRT_GetFfiCallbackMetadata)));
}
#else
GenerateLoadFfiCallbackMetadataRuntimeFunction(
FfiCallbackMetadata::kGetFfiCallbackMetadata, RAX);
#endif // defined(DART_TARGET_OS_FUCHSIA)
__ EnterFrame(0);
__ ReserveAlignedFrameSpace(0);
COMPILE_ASSERT(RAX != CallingConventions::kArg1Reg);
__ movq(CallingConventions::kArg1Reg, RAX);
__ movq(RAX, compiler::Immediate(reinterpret_cast<int64_t>(
DLRT_GetThreadForNativeCallbackTrampoline)));
__ CallCFunction(RAX);
__ movq(THR, RAX);
__ LeaveFrame();
// The trampoline type is at the top of the stack. Pop it into RAX.
__ popq(RAX);
// Entry point is now at the top of the stack. Pop it into TMP.
__ popq(TMP);
}
// Restore the arguments.
__ PopRegisters(kArgumentRegisterSet);
// Restore the callback ID.
__ popq(RAX);
// Current state:
//
// Stack:
@ -454,34 +532,9 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// <return address>
// <saved THR>
//
// Registers: Like entry, except RAX == callback_id and THR == thread
// Registers: Like entry, except TMP == target, RAX == abi, and THR == thread
// All argument registers are untouched.
COMPILE_ASSERT(!IsCalleeSavedRegister(TMP) && !IsArgumentRegister(TMP));
// Load the target from the thread.
__ movq(TMP, compiler::Address(
THR, compiler::target::Thread::isolate_group_offset()));
__ movq(TMP, compiler::Address(
TMP, compiler::target::IsolateGroup::object_store_offset()));
__ movq(TMP,
compiler::Address(
TMP, compiler::target::ObjectStore::ffi_callback_code_offset()));
__ LoadCompressed(
TMP, compiler::FieldAddress(
TMP, compiler::target::GrowableObjectArray::data_offset()));
__ LoadCompressed(
TMP,
__ ElementAddressForRegIndex(
/*external=*/false,
/*array_cid=*/kArrayCid,
/*index_scale, smi-tagged=*/compiler::target::kCompressedWordSize * 2,
/*index_unboxed=*/false,
/*array=*/TMP,
/*index=*/RAX));
__ movq(TMP, compiler::FieldAddress(
TMP, compiler::target::Code::entry_point_offset()));
// On entry to the function, there will be two extra slots on the stack:
// the saved THR and the return address. The target will know to skip them.
__ call(TMP);
@ -496,16 +549,16 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
// 'kNativeCallbackSharedStubSize' is an upper bound because the exact
// instruction size can vary slightly based on OS calling conventions.
ASSERT((__ CodeSize() - shared_stub_start) <= kNativeCallbackSharedStubSize);
ASSERT(__ CodeSize() <= VirtualMemory::PageSize());
ASSERT_LESS_OR_EQUAL(__ CodeSize() - shared_stub_start,
FfiCallbackMetadata::kNativeCallbackSharedStubSize);
ASSERT_LESS_OR_EQUAL(__ CodeSize(), FfiCallbackMetadata::kPageSize);
#if defined(DEBUG)
while (__ CodeSize() < VirtualMemory::PageSize()) {
while (__ CodeSize() < FfiCallbackMetadata::kPageSize) {
__ Breakpoint();
}
#endif
}
#endif // !defined(DART_PRECOMPILER)
// RBX: The extracted method.
// RDX: The type_arguments_field_offset (or 0)

View file

@ -20,6 +20,7 @@
#if defined(DART_PRECOMPILED_RUNTIME) && defined(DART_TARGET_OS_LINUX)
#include "vm/elf.h"
#endif
#include "vm/ffi_callback_metadata.h"
#include "vm/flags.h"
#include "vm/handles.h"
#include "vm/heap/become.h"
@ -341,6 +342,7 @@ char* Dart::DartInit(const Dart_InitializeParams* params) {
StoreBuffer::Init();
MarkingStack::Init();
TargetCPUFeatures::Init();
FfiCallbackMetadata::Init();
#if defined(USING_SIMULATOR)
Simulator::Init();
@ -764,6 +766,7 @@ char* Dart::Cleanup() {
SubtypeTestCache::Cleanup();
ArgumentsDescriptor::Cleanup();
OffsetsTable::Cleanup();
FfiCallbackMetadata::Cleanup();
TargetCPUFeatures::Cleanup();
MarkingStack::Cleanup();
StoreBuffer::Cleanup();

View file

@ -1192,7 +1192,7 @@ void Elf::CreateBSS() {
label = kVmBssLabel;
} else if (strcmp(portion.symbol_name,
kIsolateSnapshotInstructionsAsmSymbol) == 0) {
size = BSS::kIsolateEntryCount * compiler::target::kWordSize;
size = BSS::kIsolateGroupEntryCount * compiler::target::kWordSize;
symbol_name = kIsolateSnapshotBssAsmSymbol;
label = kIsolateBssLabel;
} else {

View file

@ -0,0 +1,289 @@
// Copyright (c) 2023, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/ffi_callback_metadata.h"
#include "vm/flag_list.h"
#include "vm/object.h"
#include "vm/runtime_entry.h"
#include "vm/stub_code.h"
namespace dart {
FfiCallbackMetadata::FfiCallbackMetadata() {}
void FfiCallbackMetadata::EnsureStubPageLocked() {
// Assumes lock_ is already locked for writing.
if (stub_page_ != nullptr) {
return;
}
// Keep in sync with GenerateLoadFfiCallbackMetadataRuntimeFunction.
// The FfiCallbackTrampoline stub is designed to take up 1 page of memory. At
// the moment it's not aligned though, so we need to do some alignment math
// here. So when we duplicate it below, we're wasting some memory because the
// stub probably straddles 2 aligned pages. It would be better to align the
// stub inside the stub code compiler, but we don't have a way of doing that
// at the moment.
// TODO(https://dartbug.com/52498): Align the stub.
// | page | page | pages |
// [ alignment ][ stub ][ alignment ][ functions ][ metadata ][ alignment ]
ASSERT_LESS_OR_EQUAL(VirtualMemory::PageSize(), kPageSize);
const Code& trampoline_code = StubCode::FfiCallbackTrampoline();
const uword code_start = trampoline_code.EntryPoint();
const uword page_start = Utils::RoundDown(code_start, kPageSize);
const uword code_end_aligned = page_start + 2 * kPageSize;
ASSERT_LESS_OR_EQUAL(code_start + trampoline_code.Size(), code_end_aligned);
const uword functions_start = code_end_aligned;
const uword functions_size =
kNumRuntimeFunctions * compiler::target::kWordSize;
const uword metadata_start = functions_start + functions_size;
const uword metadata_size =
NumCallbackTrampolinesPerPage() * sizeof(Metadata);
const uword metadata_end = metadata_start + metadata_size;
const uword page_end = Utils::RoundUp(metadata_end, kPageSize);
stub_page_ = VirtualMemory::ForImagePage(reinterpret_cast<void*>(page_start),
code_end_aligned - page_start);
offset_of_first_trampoline_in_page_ = code_start - page_start;
offset_of_first_runtime_function_in_page_ = functions_start - page_start;
offset_of_first_metadata_in_page_ = metadata_start - page_start;
size_of_trampoline_page_ = page_end - page_start;
#if defined(DART_TARGET_OS_FUCHSIA)
// On Fuchsia we can't currently duplicate pages, so use the first page of
// trampolines. Store the stub page's metadata in a separately allocated RW
// page.
// TODO(https://dartbug.com/52579): Remove.
fuchsia_metadata_page_ = VirtualMemory::AllocateAligned(
size_of_trampoline_page_, kPageSize, /*is_executable=*/false,
/*is_compressed=*/false, "FfiCallbackMetadata::TrampolinePage");
AddAllTrampolinesToFreeListLocked(page_start);
#endif // defined(DART_TARGET_OS_FUCHSIA)
}
FfiCallbackMetadata::~FfiCallbackMetadata() {
// Unmap all the trampoline pages. 'VirtualMemory's are new-allocated.
delete stub_page_;
for (intptr_t i = 0; i < trampoline_pages_.length(); ++i) {
delete trampoline_pages_[i];
}
#if defined(DART_TARGET_OS_FUCHSIA)
// TODO(https://dartbug.com/52579): Remove.
delete fuchsia_metadata_page_;
#endif // defined(DART_TARGET_OS_FUCHSIA)
}
void FfiCallbackMetadata::Init() {
ASSERT(singleton_ == nullptr);
singleton_ = new FfiCallbackMetadata();
}
void FfiCallbackMetadata::Cleanup() {
ASSERT(singleton_ != nullptr);
delete singleton_;
singleton_ = nullptr;
}
FfiCallbackMetadata* FfiCallbackMetadata::Instance() {
ASSERT(singleton_ != nullptr);
return singleton_;
}
void FfiCallbackMetadata::FillRuntimeFunction(VirtualMemory* page,
uword index,
void* function) {
uword offset = offset_of_first_runtime_function_in_page_ +
index * compiler::target::kWordSize;
void** slot = reinterpret_cast<void**>(page->start() + offset);
*slot = function;
}
VirtualMemory* FfiCallbackMetadata::AllocateTrampolinePage() {
#if defined(DART_TARGET_OS_FUCHSIA)
return nullptr;
#else
VirtualMemory* new_page = VirtualMemory::AllocateAligned(
size_of_trampoline_page_, kPageSize, /*is_executable=*/false,
/*is_compressed=*/false, "FfiCallbackMetadata::TrampolinePage");
if (new_page == nullptr) {
return nullptr;
}
if (!stub_page_->DuplicateRX(new_page)) {
delete new_page;
return nullptr;
}
return new_page;
#endif // defined(DART_TARGET_OS_FUCHSIA)
}
void FfiCallbackMetadata::AddAllTrampolinesToFreeListLocked(uword page_start) {
// Assumes lock_ is already locked for writing.
const intptr_t trampolines_per_page = NumCallbackTrampolinesPerPage();
for (intptr_t i = 0; i < trampolines_per_page; ++i) {
const Trampoline trampoline = reinterpret_cast<Trampoline>(
page_start + offset_of_first_trampoline_in_page_ +
i * kNativeCallbackTrampolineSize);
AddToFreeListLocked(trampoline, LookupEntryLocked(trampoline));
}
}
void FfiCallbackMetadata::EnsureFreeListNotEmptyLocked() {
EnsureStubPageLocked();
// Assumes lock_ is already locked for writing.
if (free_list_head_ != nullptr) {
return;
}
VirtualMemory* new_page = AllocateTrampolinePage();
if (new_page == nullptr) {
Exceptions::ThrowOOM();
}
trampoline_pages_.Add(new_page);
// Fill in the runtime functions.
FillRuntimeFunction(new_page, kGetFfiCallbackMetadata,
reinterpret_cast<void*>(DLRT_GetFfiCallbackMetadata));
AddAllTrampolinesToFreeListLocked(new_page->start());
}
FfiCallbackMetadata::Trampoline
FfiCallbackMetadata::AllocateTrampolineLocked() {
// Assumes lock_ is already locked for writing.
EnsureFreeListNotEmptyLocked();
ASSERT(free_list_head_ != nullptr);
const Trampoline trampoline = free_list_head_;
auto* entry = LookupEntryLocked(trampoline);
free_list_head_ = entry->free_list_next_;
if (free_list_head_ == nullptr) {
ASSERT(free_list_tail_ == trampoline);
free_list_tail_ = nullptr;
}
return trampoline;
}
void FfiCallbackMetadata::AddToFreeListLocked(Trampoline trampoline,
Metadata* entry) {
// Assumes lock_ is already locked for writing.
if (free_list_tail_ == nullptr) {
ASSERT(free_list_head_ == nullptr);
free_list_head_ = free_list_tail_ = trampoline;
} else {
ASSERT(free_list_head_ != nullptr);
auto* tail = LookupEntryLocked(free_list_tail_);
ASSERT(!tail->IsLive());
ASSERT(tail->free_list_next_ == nullptr);
tail->free_list_next_ = trampoline;
free_list_tail_ = trampoline;
}
entry->target_isolate_ = nullptr;
entry->free_list_next_ = nullptr;
}
void FfiCallbackMetadata::DeleteSyncTrampolines(Trampoline* sync_list_head) {
WriteRwLocker locker(Thread::Current(), &lock_);
for (Trampoline trampoline = *sync_list_head; trampoline != nullptr;) {
auto* entry = LookupEntryLocked(trampoline);
ASSERT(entry != nullptr);
const Trampoline next_trampoline = entry->sync_list_next();
AddToFreeListLocked(trampoline, entry);
trampoline = next_trampoline;
}
*sync_list_head = nullptr;
}
FfiCallbackMetadata::Trampoline FfiCallbackMetadata::CreateFfiCallback(
Isolate* isolate,
Zone* zone,
const Function& function,
Trampoline* sync_list_head) {
const auto& code =
Code::Handle(zone, FLAG_precompiled_mode ? function.CurrentCode()
: function.EnsureHasCode());
ASSERT(!code.IsNull());
const uword target_entry_point = code.EntryPoint();
const Trampoline sync_list_next = *sync_list_head;
TrampolineType trampoline_type = TrampolineType::kSync;
#if defined(TARGET_ARCH_IA32)
// On ia32, store the stack delta that we need to use when returning.
const intptr_t stack_return_delta =
function.FfiCSignatureReturnsStruct() && CallingConventions::kUsesRet4
? compiler::target::kWordSize
: 0;
if (stack_return_delta != 0) {
ASSERT(stack_return_delta == 4);
trampoline_type = TrampolineType::kSyncStackDelta4;
}
#endif
WriteRwLocker locker(Thread::Current(), &lock_);
const Trampoline trampoline = AllocateTrampolineLocked();
*sync_list_head = trampoline;
*LookupEntryLocked(trampoline) =
Metadata(isolate, target_entry_point, sync_list_next, trampoline_type);
return trampoline;
}
FfiCallbackMetadata::Trampoline FfiCallbackMetadata::CreateSyncFfiCallback(
Isolate* isolate,
Zone* zone,
const Function& function,
Trampoline* sync_list_head) {
return CreateFfiCallback(isolate, zone, function, sync_list_head);
}
FfiCallbackMetadata::Metadata* FfiCallbackMetadata::LookupEntryLocked(
Trampoline trampoline) const {
// Assumes lock_ is already locked for reading or writing.
const uword location = reinterpret_cast<uword>(trampoline);
// The location that the trampoline would be if the code page was aligned.
const uword aligned_location = location - offset_of_first_trampoline_in_page_;
// Since the code page isn't aligned, the trampoline may actually be in the
// following page. So round down the aligned_location, not the raw location.
const uword page_start = Utils::RoundDown(aligned_location, kPageSize);
const uword offset = aligned_location - page_start;
ASSERT_EQUAL(offset % kNativeCallbackTrampolineSize, 0);
const intptr_t index = offset / kNativeCallbackTrampolineSize;
ASSERT(index < NumCallbackTrampolinesPerPage());
#if defined(DART_TARGET_OS_FUCHSIA)
// On Fuchsia the metadata page is separate to the trampoline page.
// TODO(https://dartbug.com/52579): Remove.
const uword metadata_table =
fuchsia_metadata_page_->start() + offset_of_first_metadata_in_page_;
#else
const uword metadata_table = page_start + offset_of_first_metadata_in_page_;
#endif // defined(DART_TARGET_OS_FUCHSIA)
return reinterpret_cast<Metadata*>(metadata_table) + index;
}
FfiCallbackMetadata::Metadata FfiCallbackMetadata::LookupMetadataForTrampoline(
Trampoline trampoline) const {
// Note: The locker's thread may be null because this method is explicitly
// designed to be usable outside of a VM thread.
ReadRwLocker locker(Thread::Current(), &lock_);
return *LookupEntryLocked(trampoline);
}
FfiCallbackMetadata* FfiCallbackMetadata::singleton_ = nullptr;
} // namespace dart

View file

@ -0,0 +1,239 @@
// Copyright (c) 2023, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_FFI_CALLBACK_METADATA_H_
#define RUNTIME_VM_FFI_CALLBACK_METADATA_H_
#include "platform/growable_array.h"
#include "platform/utils.h"
#include "vm/hash_map.h"
#include "vm/lockers.h"
#include "vm/virtual_memory.h"
namespace dart {
// Stores metadata related to FFI callbacks (Dart functions that are assigned a
// function pointer that can be invoked by native code). This is essentially a
// map from trampoline pointer to Metadata, with some logic to assign and memory
// manage those trampolines.
//
// In the past, callbacks were primarily identified by an integer ID, but in
// this class we identify them by their trampoline pointer to solve a very
// specific issue. The trampolines are allocated in pages. On iOS in AOT mode,
// we can't create new executable memory, but we can duplicate existing memory.
// When we were using numeric IDs to identify the trampolines, each trampoline
// page was different, because the IDs were embedded in the machine code. So we
// couldn't use trampolines in AOT mode. But if we key the metadata table by the
// trampoline pointer, then the trampoline just has to look up the PC at the
// start of the trampoline function, so the machine code will always be the
// same. This means we can just duplicate the trampoline page, allowing us to
// unify the FFI callback implementation across JIT and AOT, even on iOS.
class FfiCallbackMetadata {
public:
using Trampoline = void*;
enum class TrampolineType : uint8_t {
kSync = 0,
kAsync = 1,
#if defined(TARGET_ARCH_IA32)
kSyncStackDelta4 = 2,
#endif
};
enum RuntimeFunctions {
kGetFfiCallbackMetadata,
kNumRuntimeFunctions,
};
static void Init();
static void Cleanup();
// Returns the FfiCallbackMetadata singleton.
static FfiCallbackMetadata* Instance();
// Creates a sync callback trampoline for the given function.
Trampoline CreateSyncFfiCallback(Isolate* isolate,
Zone* zone,
const Function& function,
Trampoline* sync_list_head);
// Deletes all the sync trampolines in the list.
void DeleteSyncTrampolines(Trampoline* sync_list_head);
// FFI callback metadata for any sync or async trampoline.
class Metadata {
Isolate* target_isolate_ = nullptr;
union {
// IsLive()
struct {
// Note: This is a pointer into an an Instructions object. This is only
// safe because Instructions objects are never moved by the GC.
uword target_entry_point_;
Trampoline sync_list_next_;
TrampolineType trampoline_type_;
};
// !IsLive()
Trampoline free_list_next_;
};
Metadata()
: target_entry_point_(0),
sync_list_next_(nullptr),
trampoline_type_(TrampolineType::kSync) {}
Metadata(Isolate* target_isolate,
uword target_entry_point,
Trampoline sync_list_next,
TrampolineType trampoline_type)
: target_isolate_(target_isolate),
target_entry_point_(target_entry_point),
sync_list_next_(sync_list_next),
trampoline_type_(trampoline_type) {}
public:
friend class FfiCallbackMetadata;
bool operator==(const Metadata& other) const {
return target_isolate_ == other.target_isolate_ &&
target_entry_point_ == other.target_entry_point_ &&
sync_list_next_ == other.sync_list_next_ &&
trampoline_type_ == other.trampoline_type_;
}
bool operator!=(const Metadata& other) const { return !(*this == other); }
// Whether the callback is still alive.
bool IsLive() const { return target_isolate_ != 0; }
// The target isolate. The isolate that owns the callback. Sync callbacks
// must be invoked on this isolate. Async callbacks will send a message to
// this isolate.
Isolate* target_isolate() const {
ASSERT(IsLive());
return target_isolate_;
}
// The Dart entrypoint for the callback, which the trampoline invokes.
uword target_entry_point() const {
ASSERT(IsLive());
return target_entry_point_;
}
// To efficiently delete all the sync callbacks for a isolate, they are
// stored in a singly-linked list. This is the next link in that list.
Trampoline sync_list_next() const {
ASSERT(IsLive());
return sync_list_next_;
}
// Tells FfiCallbackTrampolineStub how to call into the entry point. Mostly
// it's just a flag for whether this is a sync or async callback, but on
// IA32 it also encodes whether there's a stack delta of 4 to deal with.
TrampolineType trampoline_type() const {
ASSERT(IsLive());
return trampoline_type_;
}
};
// Returns the Metadata object for the given trampoline.
Metadata LookupMetadataForTrampoline(Trampoline trampoline) const;
// The number of trampolines that can be stored on a single page.
static intptr_t NumCallbackTrampolinesPerPage() {
return (kPageSize - kNativeCallbackSharedStubSize) /
kNativeCallbackTrampolineSize;
}
// Size of the trampoline page. Ideally we'd use VirtualMemory::PageSize(),
// but that varies across machines, and we need it to be consistent between
// host and target since it affects stub code generation. So kPageSize may be
// an overestimate of the target's VirtualMemory::PageSize(), but we try to
// get it as close as possible to avoid wasting memory.
#if defined(DART_TARGET_OS_LINUX) && defined(TARGET_ARCH_ARM64)
static constexpr intptr_t kPageSize = 64 * KB;
#elif defined(DART_TARGET_OS_MACOS) && defined(TARGET_ARCH_ARM64)
static constexpr intptr_t kPageSize = 16 * KB;
#elif defined(DART_TARGET_OS_FUCHSIA)
// Fuchsia only gets one page, so make it big.
// TODO(https://dartbug.com/52579): Remove.
static constexpr intptr_t kPageSize = 64 * KB;
#else
static constexpr intptr_t kPageSize = 4 * KB;
#endif
static constexpr intptr_t kPageMask = ~(kPageSize - 1);
// Offset from the start of the trampoline code page to a specific slot in the
// runtime function table.
static uword RuntimeFunctionOffset(uword function_index) {
return 2 * kPageSize + function_index * compiler::target::kWordSize;
}
#if defined(TARGET_ARCH_X64)
static constexpr intptr_t kNativeCallbackTrampolineSize = 12;
static constexpr intptr_t kNativeCallbackSharedStubSize = 289;
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
#elif defined(TARGET_ARCH_IA32)
static constexpr intptr_t kNativeCallbackTrampolineSize = 10;
static constexpr intptr_t kNativeCallbackSharedStubSize = 142;
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 4;
#elif defined(TARGET_ARCH_ARM)
static constexpr intptr_t kNativeCallbackTrampolineSize = 8;
static constexpr intptr_t kNativeCallbackSharedStubSize = 196;
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 4;
#elif defined(TARGET_ARCH_ARM64)
static constexpr intptr_t kNativeCallbackTrampolineSize = 8;
static constexpr intptr_t kNativeCallbackSharedStubSize = 296;
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
#elif defined(TARGET_ARCH_RISCV32)
static constexpr intptr_t kNativeCallbackTrampolineSize = 8;
static constexpr intptr_t kNativeCallbackSharedStubSize = 230;
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
#elif defined(TARGET_ARCH_RISCV64)
static constexpr intptr_t kNativeCallbackTrampolineSize = 8;
static constexpr intptr_t kNativeCallbackSharedStubSize = 202;
static constexpr intptr_t kNativeCallbackTrampolineStackDelta = 2;
#else
#error What architecture?
#endif
private:
FfiCallbackMetadata();
~FfiCallbackMetadata();
void EnsureStubPageLocked();
void AddAllTrampolinesToFreeListLocked(uword page_start);
void AddToFreeListLocked(Trampoline trampoline, Metadata* entry);
void FillRuntimeFunction(VirtualMemory* page, uword index, void* function);
VirtualMemory* AllocateTrampolinePage();
void EnsureFreeListNotEmptyLocked();
Trampoline AllocateTrampolineLocked();
Trampoline TryAllocateFromFreeListLocked();
Trampoline CreateFfiCallback(Isolate* isolate,
Zone* zone,
const Function& function,
Trampoline* sync_list_head);
Metadata* LookupEntryLocked(Trampoline trampoline) const;
static FfiCallbackMetadata* singleton_;
mutable RwLock lock_;
VirtualMemory* stub_page_ = nullptr;
MallocGrowableArray<VirtualMemory*> trampoline_pages_;
uword offset_of_first_trampoline_in_page_ = 0;
uword offset_of_first_runtime_function_in_page_ = 0;
uword offset_of_first_metadata_in_page_ = 0;
uword size_of_trampoline_page_ = 0;
Trampoline free_list_head_ = nullptr;
Trampoline free_list_tail_ = nullptr;
#if defined(DART_TARGET_OS_FUCHSIA)
// TODO(https://dartbug.com/52579): Remove.
VirtualMemory* fuchsia_metadata_page_ = nullptr;
#endif // defined(DART_TARGET_OS_FUCHSIA)
DISALLOW_COPY_AND_ASSIGN(FfiCallbackMetadata);
};
} // namespace dart
#endif // RUNTIME_VM_FFI_CALLBACK_METADATA_H_

View file

@ -0,0 +1,279 @@
// Copyright (c) 2023, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/ffi_callback_metadata.h"
#include <memory>
#include <thread> // NOLINT(build/c++11)
#include <unordered_set>
#include <vector>
#include "include/dart_api.h"
#include "platform/assert.h"
#include "vm/class_finalizer.h"
#include "vm/compiler/ffi/callback.h"
#include "vm/compiler/jit/compiler.h"
#include "vm/message_handler.h"
#include "vm/object.h"
#include "vm/port.h"
#include "vm/symbols.h"
#include "vm/unit_test.h"
namespace dart {
FunctionPtr CreateTestFunction() {
const auto& ffi_lib = Library::Handle(Library::FfiLibrary());
const auto& ffi_void = Class::Handle(ffi_lib.LookupClass(Symbols::FfiVoid()));
const auto& ffi_void_type =
Type::Handle(Type::NewNonParameterizedType(ffi_void));
auto* thread = Thread::Current();
const char* kScriptChars =
R"(
void testFunction() {
}
)";
Dart_Handle library;
{
TransitionVMToNative transition(thread);
library = TestCase::LoadTestScript(kScriptChars, nullptr);
EXPECT_VALID(library);
}
const auto& lib =
Library::Handle(Library::RawCast(Api::UnwrapHandle(library)));
EXPECT(ClassFinalizer::ProcessPendingClasses());
const auto& cls = Class::Handle(lib.toplevel_class());
EXPECT(!cls.IsNull());
const auto& error = cls.EnsureIsFinalized(thread);
EXPECT(error == Error::null());
auto& function_name = String::Handle(String::New("testFunction"));
const auto& func = Function::Handle(cls.LookupStaticFunction(function_name));
EXPECT(!func.IsNull());
FunctionType& signature = FunctionType::Handle(FunctionType::New());
signature.set_result_type(ffi_void_type);
signature.SetIsFinalized();
signature ^= signature.Canonicalize(thread);
const auto& callback = Function::Handle(compiler::ffi::NativeCallbackFunction(
signature, func, Instance::Handle(Instance::null())));
const auto& result = Object::Handle(
thread->zone(), Compiler::CompileFunction(thread, callback));
EXPECT(!result.IsError());
return callback.ptr();
}
class FakeMessageHandler : public MessageHandler {
public:
MessageStatus HandleMessage(std::unique_ptr<Message> message) override {
return MessageHandler::kOK;
}
};
VM_UNIT_TEST_CASE(FfiCallbackMetadata_CreateSyncFfiCallback) {
auto* fcm = FfiCallbackMetadata::Instance();
FfiCallbackMetadata::Trampoline tramp1 = nullptr;
FfiCallbackMetadata::Trampoline tramp2 = nullptr;
{
TestIsolateScope isolate_scope;
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
ASSERT(isolate == isolate_scope.isolate());
TransitionNativeToVM transition(thread);
StackZone stack_zone(thread);
HandleScope handle_scope(thread);
auto* zone = thread->zone();
const auto& func = Function::Handle(CreateTestFunction());
const auto& code = Code::Handle(func.EnsureHasCode());
EXPECT(!code.IsNull());
tramp1 = isolate->CreateSyncFfiCallback(zone, func);
EXPECT_NE(tramp1, nullptr);
FfiCallbackMetadata::Metadata m1 = fcm->LookupMetadataForTrampoline(tramp1);
EXPECT(m1.IsLive());
EXPECT_EQ(m1.target_isolate(), isolate);
EXPECT_EQ(m1.target_entry_point(), code.EntryPoint());
EXPECT_EQ(static_cast<uint8_t>(m1.trampoline_type()),
static_cast<uint8_t>(FfiCallbackMetadata::TrampolineType::kSync));
EXPECT_EQ(isolate->ffi_callback_sync_list_head(), tramp1);
EXPECT_EQ(m1.sync_list_next(), nullptr);
tramp2 = isolate->CreateSyncFfiCallback(zone, func);
EXPECT_NE(tramp2, nullptr);
EXPECT_NE(tramp2, tramp1);
FfiCallbackMetadata::Metadata m2 = fcm->LookupMetadataForTrampoline(tramp2);
EXPECT(m2.IsLive());
EXPECT_EQ(m2.target_isolate(), isolate);
EXPECT_EQ(m2.target_entry_point(), code.EntryPoint());
EXPECT_EQ(static_cast<uint8_t>(m2.trampoline_type()),
static_cast<uint8_t>(FfiCallbackMetadata::TrampolineType::kSync));
EXPECT_EQ(isolate->ffi_callback_sync_list_head(), tramp2);
EXPECT_EQ(m2.sync_list_next(), tramp1);
EXPECT_EQ(m1.sync_list_next(), nullptr);
}
{
// Isolate has shut down, so all sync callbacks should be deleted.
FfiCallbackMetadata::Metadata m1 = fcm->LookupMetadataForTrampoline(tramp1);
EXPECT(!m1.IsLive());
FfiCallbackMetadata::Metadata m2 = fcm->LookupMetadataForTrampoline(tramp2);
EXPECT(!m2.IsLive());
}
}
VM_UNIT_TEST_CASE(FfiCallbackMetadata_DeleteSyncTrampolines) {
static constexpr int kIterations = 1000;
TestIsolateScope isolate_scope;
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
ASSERT(isolate == isolate_scope.isolate());
TransitionNativeToVM transition(thread);
StackZone stack_zone(thread);
HandleScope handle_scope(thread);
auto* fcm = FfiCallbackMetadata::Instance();
std::unordered_set<FfiCallbackMetadata::Trampoline> sync_tramps;
FfiCallbackMetadata::Trampoline sync_list_head = nullptr;
const auto& sync_func = Function::Handle(CreateTestFunction());
const auto& sync_code = Code::Handle(sync_func.EnsureHasCode());
EXPECT(!sync_code.IsNull());
for (int itr = 0; itr < kIterations; ++itr) {
sync_tramps.insert(fcm->CreateSyncFfiCallback(isolate, thread->zone(),
sync_func, &sync_list_head));
}
// Verify all the callbacks.
for (FfiCallbackMetadata::Trampoline tramp : sync_tramps) {
auto metadata = fcm->LookupMetadataForTrampoline(tramp);
EXPECT(metadata.IsLive());
EXPECT_EQ(metadata.target_isolate(), isolate);
EXPECT_EQ(metadata.target_entry_point(), sync_code.EntryPoint());
EXPECT_EQ(static_cast<uint8_t>(metadata.trampoline_type()),
static_cast<uint8_t>(FfiCallbackMetadata::TrampolineType::kSync));
}
// Verify the list of callbacks.
uword sync_list_length = 0;
for (FfiCallbackMetadata::Trampoline tramp = sync_list_head; tramp != 0;) {
++sync_list_length;
auto metadata = fcm->LookupMetadataForTrampoline(tramp);
EXPECT(metadata.IsLive());
EXPECT_EQ(metadata.target_isolate(), isolate);
EXPECT_EQ(sync_tramps.count(tramp), 1u);
tramp = metadata.sync_list_next();
}
EXPECT_EQ(sync_list_length, sync_tramps.size());
// Delete all callbacks and verify they're destroyed.
fcm->DeleteSyncTrampolines(&sync_list_head);
EXPECT_EQ(sync_list_head, nullptr);
for (FfiCallbackMetadata::Trampoline tramp : sync_tramps) {
EXPECT(!fcm->LookupMetadataForTrampoline(tramp).IsLive());
}
}
static void RunBigRandomMultithreadedTest(uint64_t seed) {
static constexpr int kIterations = 1000;
TestIsolateScope isolate_scope;
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
ASSERT(isolate == isolate_scope.isolate());
TransitionNativeToVM transition(thread);
StackZone stack_zone(thread);
HandleScope handle_scope(thread);
auto* fcm = FfiCallbackMetadata::Instance();
Random random(seed);
std::unordered_set<FfiCallbackMetadata::Trampoline> sync_tramps;
FfiCallbackMetadata::Trampoline sync_list_head = nullptr;
const auto& sync_func = Function::Handle(CreateTestFunction());
const auto& sync_code = Code::Handle(sync_func.EnsureHasCode());
EXPECT(!sync_code.IsNull());
for (int itr = 0; itr < kIterations; ++itr) {
// Do a random action:
// - Allocate a sync callback from one of the threads
// - Allocate an async callback from one of the threads
// - Delete an async callback
// - Delete all the sync callbacks for an isolate
// Sync callbacks. Randomly create and destroy them, but make destruction
// rare since all sync callbacks for the isolate are deleted at once.
if ((random.NextUInt32() % 100) == 0) {
// Delete.
fcm->DeleteSyncTrampolines(&sync_list_head);
// It would be nice to verify that all the sync_tramps have been deleted,
// but this is flaky because other threads can recycle these trampolines
// before we finish checking all of them.
EXPECT_EQ(sync_list_head, nullptr);
sync_tramps.clear();
} else {
// Create.
sync_tramps.insert(fcm->CreateSyncFfiCallback(
isolate, thread->zone(), sync_func, &sync_list_head));
}
// Verify all the sync callbacks.
for (FfiCallbackMetadata::Trampoline tramp : sync_tramps) {
auto metadata = fcm->LookupMetadataForTrampoline(tramp);
EXPECT(metadata.IsLive());
EXPECT_EQ(metadata.target_isolate(), isolate);
EXPECT_EQ(metadata.target_entry_point(), sync_code.EntryPoint());
EXPECT_EQ(
static_cast<uint8_t>(metadata.trampoline_type()),
static_cast<uint8_t>(FfiCallbackMetadata::TrampolineType::kSync));
}
// Verify the isolate's list of sync callbacks.
uword sync_list_length = 0;
for (FfiCallbackMetadata::Trampoline tramp = sync_list_head; tramp != 0;) {
++sync_list_length;
auto metadata = fcm->LookupMetadataForTrampoline(tramp);
EXPECT(metadata.IsLive());
EXPECT_EQ(metadata.target_isolate(), isolate);
EXPECT_EQ(sync_tramps.count(tramp), 1u);
tramp = metadata.sync_list_next();
}
EXPECT_EQ(sync_list_length, sync_tramps.size());
}
// Delete all remaining callbacks.
fcm->DeleteSyncTrampolines(&sync_list_head);
EXPECT_EQ(sync_list_head, nullptr);
}
ISOLATE_UNIT_TEST_CASE(FfiCallbackMetadata_BigRandomMultithreadedTest) {
static constexpr int kThreads = 5;
std::vector<std::thread> threads;
Random random;
for (int i = 0; i < kThreads; ++i) {
threads.push_back(
std::thread(RunBigRandomMultithreadedTest, random.NextUInt64()));
}
for (auto& thread : threads) {
thread.join();
}
}
} // namespace dart

View file

@ -1,107 +0,0 @@
// Copyright (c) 2023, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/ffi_callback_trampolines.h"
#if !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/code_comments.h"
#include "vm/code_observers.h"
#include "vm/compiler/assembler/assembler.h"
#include "vm/compiler/assembler/disassembler.h"
#include "vm/exceptions.h"
#endif // !defined(DART_PRECOMPILED_RUNTIME)
namespace dart {
#if !defined(DART_PRECOMPILED_RUNTIME)
uword NativeCallbackTrampolines::TrampolineForId(int32_t callback_id) {
#if defined(DART_PRECOMPILER)
ASSERT(!Enabled());
UNREACHABLE();
#else
const intptr_t trampolines_per_page = NumCallbackTrampolinesPerPage();
const intptr_t page_index = callback_id / trampolines_per_page;
const uword entry_point = trampoline_pages_[page_index]->start();
return entry_point +
(callback_id % trampolines_per_page) *
compiler::StubCodeCompiler::kNativeCallbackTrampolineSize;
#endif
}
void NativeCallbackTrampolines::AllocateTrampoline() {
#if defined(DART_PRECOMPILER)
ASSERT(!Enabled());
UNREACHABLE();
#else
// Callback IDs are limited to 32-bits for trampoline compactness.
if (kWordSize == 8 &&
!Utils::IsInt(32, next_callback_id_ + NumCallbackTrampolinesPerPage())) {
Exceptions::ThrowOOM();
}
if (trampolines_left_on_page_ == 0) {
// Fuchsia requires memory to be allocated with ZX_RIGHT_EXECUTE in order
// to be flipped to kReadExecute after being kReadWrite.
VirtualMemory* const memory = VirtualMemory::AllocateAligned(
/*size=*/VirtualMemory::PageSize(),
/*alignment=*/VirtualMemory::PageSize(),
/*is_executable=*/true,
/*is_compressed=*/false,
/*name=*/"Dart VM FFI callback trampolines");
memory->Protect(VirtualMemory::kReadWrite);
if (memory == nullptr) {
Exceptions::ThrowOOM();
}
trampoline_pages_.Add(memory);
compiler::Assembler assembler(/*object_pool_builder=*/nullptr);
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
stubCodeCompiler.GenerateJITCallbackTrampolines(next_callback_id_);
MemoryRegion region(memory->address(), memory->size());
assembler.FinalizeInstructions(region);
memory->Protect(VirtualMemory::kReadExecute);
#if !defined(PRODUCT)
const char* name = "FfiJitCallbackTrampolines";
if (CodeObservers::AreActive()) {
const auto& comments = CreateCommentsFrom(&assembler);
CodeObservers::NotifyAll(name,
/*base=*/memory->start(),
/*prologue_offset=*/0,
/*size=*/assembler.CodeSize(),
/*optimized=*/false, // not really relevant
&comments);
}
#endif
#if !defined(PRODUCT) || defined(FORCE_INCLUDE_DISASSEMBLER)
if (FLAG_disassemble_stubs && FLAG_support_disassembler) {
DisassembleToStdout formatter;
THR_Print(
"Code for native callback trampolines "
"[%" Pd " -> %" Pd "]: {\n",
next_callback_id_,
next_callback_id_ + NumCallbackTrampolinesPerPage() - 1);
const auto& comments = CreateCommentsFrom(&assembler);
Disassembler::Disassemble(memory->start(),
memory->start() + assembler.CodeSize(),
&formatter, &comments);
}
#endif
trampolines_left_on_page_ = NumCallbackTrampolinesPerPage();
}
trampolines_left_on_page_--;
next_callback_id_++;
#endif // defined(DART_PRECOMPILER)
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
} // namespace dart

View file

@ -1,78 +0,0 @@
// Copyright (c) 2023, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_FFI_CALLBACK_TRAMPOLINES_H_
#define RUNTIME_VM_FFI_CALLBACK_TRAMPOLINES_H_
#include "platform/allocation.h"
#include "platform/growable_array.h"
#include "vm/flag_list.h"
#include "vm/virtual_memory.h"
#if !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/compiler/stub_code_compiler.h"
#endif // !defined(DART_PRECOMPILED_RUNTIME)
namespace dart {
#if !defined(DART_PRECOMPILED_RUNTIME)
// In JIT mode, when write-protection is enabled without dual-mapping, we cannot
// rely on Instructions generated in the Isolate's heap to be executable while
// native code is running in a safepoint. This means that native code cannot
// directly invoke FFI callback trampolines.
//
// To solve this, we create trampolines tied to consecutive sequences of
// callback IDs which leave the safepoint before invoking the FFI callback,
// and re-enter the safepoint on return from the callback.
//
// Since we can never map these trampolines RX -> RW, we eagerly generate as
// many as will fit on a single page, since pages are the smallest granularity
// of memory protection.
//
// See also:
// - StubCodeCompiler::GenerateJITCallbackTrampolines
// - {NativeEntryInstr, NativeReturnInstr}::EmitNativeCode
DECLARE_FLAG(bool, write_protect_code);
class NativeCallbackTrampolines : public ValueObject {
public:
static bool Enabled() { return !FLAG_precompiled_mode; }
static intptr_t NumCallbackTrampolinesPerPage() {
return (VirtualMemory::PageSize() -
compiler::StubCodeCompiler::kNativeCallbackSharedStubSize) /
compiler::StubCodeCompiler::kNativeCallbackTrampolineSize;
}
NativeCallbackTrampolines() {}
~NativeCallbackTrampolines() {
// Unmap all the trampoline pages. 'VirtualMemory's are new-allocated.
for (intptr_t i = 0; i < trampoline_pages_.length(); ++i) {
delete trampoline_pages_[i];
}
}
// For each callback ID, we have an entry in Thread::ffi_callback_code_ and
// a trampoline here. These arrays must be kept in sync and this method is
// exposed to assert that.
intptr_t next_callback_id() const { return next_callback_id_; }
// Allocates a callback trampoline corresponding to the callback id
// 'next_callback_id()'. Returns an entrypoint to the trampoline.
void AllocateTrampoline();
// Get the entrypoint for a previously allocated callback ID.
uword TrampolineForId(int32_t callback_id);
private:
MallocGrowableArray<VirtualMemory*> trampoline_pages_;
intptr_t trampolines_left_on_page_ = 0;
intptr_t next_callback_id_ = 0;
DISALLOW_COPY_AND_ASSIGN(NativeCallbackTrampolines);
};
#endif // !defined(DART_PRECOMPILED_RUNTIME)
} // namespace dart
#endif // RUNTIME_VM_FFI_CALLBACK_TRAMPOLINES_H_

View file

@ -1361,7 +1361,8 @@ const char* ImageWriter::Deobfuscate(Zone* zone,
void AssemblyImageWriter::WriteBss(bool vm) {
EnterSection(ProgramSection::Bss, vm, ImageWriter::kBssAlignment);
auto const entry_count = vm ? BSS::kVmEntryCount : BSS::kIsolateEntryCount;
auto const entry_count =
vm ? BSS::kVmEntryCount : BSS::kIsolateGroupEntryCount;
for (intptr_t i = 0; i < entry_count; i++) {
// All bytes in the .bss section must be zero.
WriteTargetWord(0);

View file

@ -20,6 +20,7 @@
#include "vm/debugger.h"
#include "vm/deopt_instructions.h"
#include "vm/dispatch_table.h"
#include "vm/ffi_callback_metadata.h"
#include "vm/flags.h"
#include "vm/heap/heap.h"
#include "vm/heap/pointer_block.h"
@ -335,9 +336,6 @@ IsolateGroup::IsolateGroup(std::shared_ptr<IsolateGroupSource> source,
start_time_micros_(OS::GetCurrentMonotonicMicros()),
is_system_isolate_group_(source->flags.is_system_isolate),
random_(),
#if !defined(DART_PRECOMPILED_RUNTIME)
native_callback_trampolines_(),
#endif
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
last_reload_timestamp_(OS::GetCurrentTimeMillis()),
reload_every_n_stack_overflow_checks_(FLAG_reload_every),
@ -2308,6 +2306,13 @@ void Isolate::LowLevelShutdown() {
delete message_handler();
set_message_handler(nullptr);
// Clean up any synchronous FFI callbacks registered with this isolate. Skip
// if this isolate never registered any.
if (ffi_callback_sync_list_head_ != nullptr) {
FfiCallbackMetadata::Instance()->DeleteSyncTrampolines(
&ffi_callback_sync_list_head_);
}
#if !defined(PRODUCT)
if (FLAG_dump_megamorphic_stats) {
MegamorphicCacheTable::PrintSizes(this);
@ -2501,7 +2506,7 @@ void Isolate::LowLevelCleanup(Isolate* isolate) {
// memory might have become unreachable. We should evaluate how to best
// inform the GC about this situation.
}
} // namespace dart
}
Dart_InitializeIsolateCallback Isolate::initialize_callback_ = nullptr;
Dart_IsolateGroupCreateCallback Isolate::create_group_callback_ = nullptr;
@ -3514,6 +3519,11 @@ void Isolate::WaitForOutstandingSpawns() {
}
}
void* Isolate::CreateSyncFfiCallback(Zone* zone, const Function& function) {
return FfiCallbackMetadata::Instance()->CreateSyncFfiCallback(
this, zone, function, &ffi_callback_sync_list_head_);
}
#if !defined(PRODUCT)
void IsolateGroup::CloneClassTableForReload() {
RELEASE_ASSERT(class_table_ == heap_walk_class_table_);

View file

@ -37,10 +37,6 @@
#include "vm/token_position.h"
#include "vm/virtual_memory.h"
#if !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/ffi_callback_trampolines.h"
#endif // !defined(DART_PRECOMPILED_RUNTIME)
namespace dart {
// Forward declarations.
@ -420,12 +416,6 @@ class IsolateGroup : public IntrusiveDListEntry<IsolateGroup> {
bool is_system_isolate_group() const { return is_system_isolate_group_; }
#if !defined(DART_PRECOMPILED_RUNTIME)
NativeCallbackTrampolines* native_callback_trampolines() {
return &native_callback_trampolines_;
}
#endif
// IsolateGroup-specific flag handling.
static void FlagsInitialize(Dart_IsolateFlags* api_flags);
void FlagsCopyTo(Dart_IsolateFlags* api_flags);
@ -837,10 +827,6 @@ class IsolateGroup : public IntrusiveDListEntry<IsolateGroup> {
bool is_system_isolate_group_;
Random random_;
#if !defined(DART_PRECOMPILED_RUNTIME)
NativeCallbackTrampolines native_callback_trampolines_;
#endif
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
int64_t last_reload_timestamp_;
std::shared_ptr<IsolateGroupReloadContext> group_reload_context_;
@ -1256,6 +1242,11 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
deopt_context_ = value;
}
void* CreateSyncFfiCallback(Zone* zone, const Function& function);
// Visible for testing.
void* ffi_callback_sync_list_head() { return ffi_callback_sync_list_head_; }
intptr_t BlockClassFinalization() {
ASSERT(defer_finalization_count_ >= 0);
return defer_finalization_count_++;
@ -1646,6 +1637,7 @@ class Isolate : public BaseIsolate, public IntrusiveDListEntry<Isolate> {
MessageHandler* message_handler_ = nullptr;
intptr_t defer_finalization_count_ = 0;
DeoptContext* deopt_context_ = nullptr;
void* ffi_callback_sync_list_head_ = nullptr;
GrowableObjectArrayPtr tag_table_;

View file

@ -207,7 +207,6 @@ class ObjectPointerVisitor;
RW(Array, unique_dynamic_targets) \
RW(GrowableObjectArray, megamorphic_cache_table) \
RW(GrowableObjectArray, ffi_callback_code) \
RW(TypedData, ffi_callback_stack_return) \
RW(Code, build_generic_method_extractor_code) \
RW(Code, build_nongeneric_method_extractor_code) \
RW(Code, dispatch_table_null_error_stub) \

View file

@ -17,6 +17,7 @@
#include "vm/debugger.h"
#include "vm/double_conversion.h"
#include "vm/exceptions.h"
#include "vm/ffi_callback_metadata.h"
#include "vm/flags.h"
#include "vm/heap/verifier.h"
#include "vm/instructions.h"
@ -3844,44 +3845,31 @@ DEFINE_RAW_LEAF_RUNTIME_ENTRY(ExitSafepointIgnoreUnwindInProgress,
false,
&DFLRT_ExitSafepointIgnoreUnwindInProgress);
// Ensure that 'callback_id' refers to a valid callback.
//
// If "entry != 0", additionally checks that entry is inside the instructions
// of this callback.
//
// Aborts if any of these conditions fails.
static void VerifyCallbackIdMetadata(Thread* thread,
int32_t callback_id,
uword entry) {
NoSafepointScope _;
#if defined(DART_HOST_OS_WINDOWS)
#pragma intrinsic(_ReturnAddress)
#endif
const GrowableObjectArrayPtr array =
thread->isolate_group()->object_store()->ffi_callback_code();
if (array == GrowableObjectArray::null()) {
FATAL("Cannot invoke callback on incorrect isolate.");
// This is called by a native callback trampoline
// (see StubCodeCompiler::GenerateFfiCallbackTrampolineStub). Not registered as
// a runtime entry because we can't use Thread to look it up.
extern "C" Thread* DLRT_GetFfiCallbackMetadata(void* trampoline,
uword* out_entry_point,
uword* out_trampoline_type) {
CHECK_STACK_ALIGNMENT;
TRACE_RUNTIME_CALL("GetFfiCallbackMetadata %p", trampoline);
auto metadata =
FfiCallbackMetadata::Instance()->LookupMetadataForTrampoline(trampoline);
if (!metadata.IsLive()) {
FATAL("Callback invoked after it has been deleted.");
}
const SmiPtr length_smi = GrowableObjectArray::NoSafepointLength(array);
const intptr_t length = Smi::Value(length_smi);
Isolate* target_isolate = metadata.target_isolate();
ASSERT(out_entry_point != nullptr);
*out_entry_point = metadata.target_entry_point();
ASSERT(out_trampoline_type != nullptr);
*out_trampoline_type = static_cast<uword>(metadata.trampoline_type());
if (callback_id < 0 || callback_id >= length) {
FATAL("Cannot invoke callback on incorrect isolate.");
}
if (entry != 0) {
CompressedObjectPtr* const code_array =
Array::DataOf(GrowableObjectArray::NoSafepointData(array));
const CodePtr code =
Code::RawCast(code_array[callback_id].Decompress(array.heap_base()));
if (!Code::ContainsInstructionAt(code, entry)) {
FATAL("Cannot invoke callback on incorrect isolate.");
}
}
}
// Not registered as a runtime entry because we can't use Thread to look it up.
static Thread* GetThreadForNativeCallback(uword callback_id,
uword return_address) {
Thread* const thread = Thread::Current();
if (thread == nullptr) {
FATAL("Cannot invoke native callback outside an isolate.");
@ -3895,6 +3883,9 @@ static Thread* GetThreadForNativeCallback(uword callback_id,
if (!thread->IsDartMutatorThread()) {
FATAL("Native callbacks must be invoked on the mutator thread.");
}
if (thread->isolate() != target_isolate) {
FATAL("Cannot invoke native callback from a different isolate.");
}
// Set the execution state to VM while waiting for the safepoint to end.
// This isn't strictly necessary but enables tests to check that we're not
@ -3902,44 +3893,15 @@ static Thread* GetThreadForNativeCallback(uword callback_id,
thread->set_execution_state(Thread::kThreadInVM);
thread->ExitSafepoint();
VerifyCallbackIdMetadata(thread, callback_id, return_address);
TRACE_RUNTIME_CALL("GetFfiCallbackMetadata thread %p", thread);
TRACE_RUNTIME_CALL("GetFfiCallbackMetadata entry_point %p",
(void*)*out_entry_point);
TRACE_RUNTIME_CALL("GetFfiCallbackMetadata trampoline_type %p",
(void*)*out_trampoline_type);
return thread;
}
#if defined(DART_HOST_OS_WINDOWS)
#pragma intrinsic(_ReturnAddress)
#endif
// This is called directly by NativeEntryInstr. At the moment we enter this
// routine, the caller is generated code in the Isolate heap. Therefore we check
// that the return address (caller) corresponds to the declared callback ID's
// code within this Isolate.
extern "C" Thread* DLRT_GetThreadForNativeCallback(uword callback_id) {
CHECK_STACK_ALIGNMENT;
TRACE_RUNTIME_CALL("GetThreadForNativeCallback %" Pd, callback_id);
#if defined(DART_HOST_OS_WINDOWS)
void* return_address = _ReturnAddress();
#else
void* return_address = __builtin_return_address(0);
#endif
Thread* return_value = GetThreadForNativeCallback(
callback_id, reinterpret_cast<uword>(return_address));
TRACE_RUNTIME_CALL("GetThreadForNativeCallback returning %p", return_value);
return return_value;
}
// This is called by a native callback trampoline
// (see StubCodeCompiler::GenerateJITCallbackTrampolines). There is no need to
// check the return address because the trampoline will use the callback ID to
// look up the generated code. We still check that the callback ID is valid for
// this isolate.
extern "C" Thread* DLRT_GetThreadForNativeCallbackTrampoline(
uword callback_id) {
CHECK_STACK_ALIGNMENT;
return GetThreadForNativeCallback(callback_id, 0);
}
extern "C" ApiLocalScope* DLRT_EnterHandleScope(Thread* thread) {
CHECK_STACK_ALIGNMENT;
TRACE_RUNTIME_CALL("EnterHandleScope %p", thread);

View file

@ -158,8 +158,9 @@ RUNTIME_ENTRY_LIST(DECLARE_RUNTIME_ENTRY)
LEAF_RUNTIME_ENTRY_LIST(DECLARE_LEAF_RUNTIME_ENTRY)
// Expected to be called inside a safepoint.
extern "C" Thread* DLRT_GetThreadForNativeCallback(uword callback_id);
extern "C" Thread* DLRT_GetThreadForNativeCallbackTrampoline(uword callback_id);
extern "C" Thread* DLRT_GetFfiCallbackMetadata(void* trampoline,
uword* out_entry_point,
uword* out_callback_kind);
// For creating scoped handles in FFI trampolines.
extern "C" ApiLocalScope* DLRT_EnterHandleScope(Thread* thread);

View file

@ -98,11 +98,18 @@ CodePtr StubCode::Generate(const char* name,
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
compiler::Assembler assembler(object_pool_builder);
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
Zone* zone = thread->zone();
auto* pc_descriptors_list = new (zone) DescriptorList(zone);
compiler::StubCodeCompiler stubCodeCompiler(&assembler, pc_descriptors_list);
(stubCodeCompiler.*GenerateStub)();
const Code& code = Code::Handle(Code::FinalizeCodeAndNotify(
name, nullptr, &assembler, Code::PoolAttachment::kNotAttachPool,
/*optimized=*/false));
const Code& code = Code::Handle(
zone, Code::FinalizeCodeAndNotify(name, nullptr, &assembler,
Code::PoolAttachment::kNotAttachPool,
/*optimized=*/false));
const PcDescriptors& descriptors = PcDescriptors::Handle(
zone, pc_descriptors_list->FinalizePcDescriptors(code.PayloadStart()));
code.set_pc_descriptors(descriptors);
#ifndef PRODUCT
if (FLAG_support_disassembler && FLAG_disassemble_stubs) {
Disassembler::DisassembleStub(name, code);
@ -221,7 +228,7 @@ CodePtr StubCode::GetAllocationStubForClass(const Class& cls) {
compiler::Assembler assembler(wrapper);
compiler::UnresolvedPcRelativeCalls unresolved_calls;
const char* name = cls.ToCString();
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
compiler::StubCodeCompiler stubCodeCompiler(&assembler, nullptr);
stubCodeCompiler.GenerateAllocationStubForClass(
&unresolved_calls, cls, allocate_object_stub,
allocate_object_parametrized_stub);
@ -317,7 +324,7 @@ CodePtr StubCode::GetBuildMethodExtractorStub(compiler::ObjectPoolBuilder* pool,
compiler::ObjectPoolBuilder object_pool_builder;
compiler::Assembler assembler(pool != nullptr ? pool : &object_pool_builder);
compiler::StubCodeCompiler stubCodeCompiler(&assembler);
compiler::StubCodeCompiler stubCodeCompiler(&assembler, nullptr);
stubCodeCompiler.GenerateBuildMethodExtractorStub(
closure_allocation_stub, context_allocation_stub, generic);

View file

@ -138,6 +138,7 @@ namespace dart {
V(ExitSafepoint) \
V(ExitSafepointIgnoreUnwindInProgress) \
V(CallNativeThroughSafepoint) \
V(FfiCallbackTrampoline) \
V(InitStaticField) \
V(InitLateStaticField) \
V(InitLateFinalStaticField) \

View file

@ -27,10 +27,6 @@
#include "vm/timeline.h"
#include "vm/zone.h"
#if !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/ffi_callback_trampolines.h"
#endif // !defined(DART_PRECOMPILED_RUNTIME)
namespace dart {
#if !defined(PRODUCT)

View file

@ -7,6 +7,10 @@
#include "platform/assert.h"
#include "platform/utils.h"
#if defined(DART_HOST_OS_MACOS)
#include <mach/mach.h>
#endif
namespace dart {
bool VirtualMemory::InSamePage(uword address0, uword address1) {
@ -43,4 +47,49 @@ VirtualMemory* VirtualMemory::ForImagePage(void* pointer, uword size) {
return memory;
}
#if !defined(DART_TARGET_OS_FUCHSIA)
// TODO(52579): Reenable on Fuchsia.
bool VirtualMemory::DuplicateRX(VirtualMemory* target) {
ASSERT_LESS_OR_EQUAL(size(), target->size());
#if defined(DART_HOST_OS_MACOS)
// Mac is special cased because iOS doesn't allow allocating new executable
// memory, so the default approach would fail. We are allowed to make new
// mappings of existing executable memory using vm_remap though, which is
// effectively the same for non-writable memory.
const mach_port_t task = mach_task_self();
const vm_address_t source_address = reinterpret_cast<vm_address_t>(address());
const vm_size_t mem_size = size();
const vm_prot_t read_execute = VM_PROT_READ | VM_PROT_EXECUTE;
vm_prot_t current_protection = read_execute;
vm_prot_t max_protection = read_execute;
vm_address_t target_address =
reinterpret_cast<vm_address_t>(target->address());
kern_return_t status = vm_remap(
task, &target_address, mem_size,
/*mask=*/0,
/*flags=*/VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, task, source_address,
/*copy=*/true, &current_protection, &max_protection,
/*inheritance=*/VM_INHERIT_NONE);
if (status != KERN_SUCCESS) {
return false;
}
ASSERT(reinterpret_cast<void*>(target_address) == target->address());
ASSERT_EQUAL(current_protection & read_execute, read_execute);
ASSERT_EQUAL(max_protection & read_execute, read_execute);
return true;
#else // defined(DART_HOST_OS_MACOS)
// TODO(52497): Use dual mapping on platforms where it's supported.
// Check that target doesn't overlap with this.
ASSERT(target->start() >= end() || target->end() <= start());
memcpy(target->address(), address(), size()); // NOLINT
Protect(target->address(), size(), kReadExecute);
return true;
#endif // defined(DART_HOST_OS_MACOS)
}
#endif // !defined(DART_TARGET_OS_FUCHSIA)
} // namespace dart

View file

@ -63,6 +63,17 @@ class VirtualMemory {
bool is_compressed,
const char* name);
// Duplicates `this` memory into the `target` memory. This is designed to work
// on all platforms, including iOS, which doesn't allow creating new
// executable memory.
//
// Assumes
// * `this` has RX protection.
// * `target` has RW protection, and is at least as large as `this`.
#if !defined(DART_TARGET_OS_FUCHSIA)
bool DuplicateRX(VirtualMemory* target);
#endif // !defined(DART_TARGET_OS_FUCHSIA)
// Returns the cached page size. Use only if Init() has been called.
static intptr_t PageSize() {
ASSERT(page_size_ != 0);

View file

@ -89,4 +89,41 @@ VM_UNIT_TEST_CASE(FreeVirtualMemory) {
}
}
#if !defined(DART_TARGET_OS_FUCHSIA)
// TODO(https://dartbug.com/52579): Reenable on Fuchsia.
static int testFunction(int x) {
return x * 2;
}
NO_SANITIZE_UNDEFINED("function") // See https://dartbug.com/52440
VM_UNIT_TEST_CASE(DuplicateRXVirtualMemory) {
const uword page_size = VirtualMemory::PageSize();
const uword pointer = reinterpret_cast<uword>(&testFunction);
const uword page_start = Utils::RoundDown(pointer, page_size);
const uword offset = pointer - page_start;
// Grab 2 * page_size, in case testFunction happens to land near the end of
// the page.
VirtualMemory* vm = VirtualMemory::ForImagePage(
reinterpret_cast<void*>(page_start), 2 * page_size);
EXPECT_NE(nullptr, vm);
VirtualMemory* vm2 = VirtualMemory::AllocateAligned(
vm->size(), kPageSize, /*is_executable=*/false,
/*is_compressed=*/false, "FfiCallbackMetadata::TrampolinePage");
bool ok = vm->DuplicateRX(vm2);
EXPECT_EQ(true, ok);
auto testFunction2 = reinterpret_cast<int (*)(int)>(vm2->start() + offset);
EXPECT_NE(&testFunction, testFunction2);
EXPECT_EQ(246, testFunction2(123));
delete vm;
delete vm2;
}
#endif // !defined(DART_TARGET_OS_FUCHSIA)
} // namespace dart

View file

@ -105,8 +105,8 @@ vm_sources = [
"exceptions.h",
"experimental_features.cc",
"experimental_features.h",
"ffi_callback_trampolines.cc",
"ffi_callback_trampolines.h",
"ffi_callback_metadata.cc",
"ffi_callback_metadata.h",
"field_table.cc",
"field_table.h",
"finalizable_data.h",
@ -414,6 +414,7 @@ vm_sources_tests = [
"exceptions_test.cc",
"fixed_cache_test.cc",
"flags_test.cc",
"ffi_callback_metadata_test.cc",
"growable_array_test.cc",
"guard_field_test.cc",
"handles_test.cc",

View file

@ -16,9 +16,6 @@ void main() {
b.add(
Pointer.fromFunction<Int Function()>(nativeToDartCallback, 1).address);
}
// Another pointer from a different call site.
a.add(Pointer.fromFunction<Int Function()>(nativeToDartCallback, 0).address);
b.add(Pointer.fromFunction<Int Function()>(nativeToDartCallback, 1).address);
ensureEqualEntries(a);
ensureEqualEntries(b);

View file

@ -16,9 +16,6 @@ void main() {
b.add(
Pointer.fromFunction<Int Function()>(nativeToDartCallback, 1).address);
}
// Another pointer from a different call site.
a.add(Pointer.fromFunction<Int Function()>(nativeToDartCallback, 0).address);
b.add(Pointer.fromFunction<Int Function()>(nativeToDartCallback, 1).address);
ensureEqualEntries(a);
ensureEqualEntries(b);