mirror of
https://github.com/dart-lang/sdk
synced 2024-11-02 12:24:24 +00:00
[vm/ffi] Support varargs in the backend
On ARM64 macos and ios, when varargs are used, the first vararg blocks all cpu and fpu registers. On Windows x64, when varargs are used, floating point arguments are passed _both_ in the integer and double register. The Windows logic requires a new kind of native location: `BothNativeLocations`, which signals that a value needs to be copied to both locations before an FFI call, and can be copied from any of the two locations when getting an FFI callback. TEST=runtime/vm/compiler/ffi/unit_tests/variadic_double/x64_win.expect Note that integer arguments already block out the corresponding xmm registers on Windows x64. On System-V, an upper bound of the number of XMM registers used must be passed in AL. (Not reflected in the unit tests here, but will be in the dependent CL.) On ARM (32 bit), using varargs forces the calling convention to be in softfp mode even on hardfp supported devices. On RISC-V, the FPU registers are blocked when using varargs. TEST=runtime/vm/compiler/ffi/native_calling_convention_test.cc Test outputs in: runtime/vm/compiler/ffi/unit_tests/variadic_* Run test with `tools/test.py ffi_unit`. Bug: https://github.com/dart-lang/sdk/issues/38578 Change-Id: Ic568f8156c1c28ac3d6a2144805edf8caaa0169c Cq-Include-Trybots: luci.dart.try:vm-precomp-ffi-qemu-linux-release-arm-try Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/278342 Reviewed-by: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
parent
a43badab9f
commit
1854bb00c6
121 changed files with 1308 additions and 116 deletions
|
@ -21,36 +21,47 @@ namespace ffi {
|
|||
const intptr_t kNoFpuRegister = -1;
|
||||
|
||||
#if !defined(FFI_UNIT_TESTS)
|
||||
// In Soft FP, floats and doubles get passed in integer registers.
|
||||
static bool SoftFpAbi() {
|
||||
// In Soft FP and vararg calls, floats and doubles get passed in integer
|
||||
// registers.
|
||||
static bool SoftFpAbi(bool has_varargs) {
|
||||
#if defined(TARGET_ARCH_ARM)
|
||||
if (has_varargs) {
|
||||
return true;
|
||||
}
|
||||
return !TargetCPUFeatures::hardfp_supported();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
#else // !defined(FFI_UNIT_TESTS)
|
||||
static bool SoftFpAbi() {
|
||||
static bool SoftFpAbi(bool has_varargs) {
|
||||
#if defined(TARGET_ARCH_ARM) && defined(DART_TARGET_OS_ANDROID)
|
||||
return true;
|
||||
#elif defined(TARGET_ARCH_ARM)
|
||||
return has_varargs;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
#endif // !defined(FFI_UNIT_TESTS)
|
||||
|
||||
// In Soft FP, floats are treated as 4 byte ints, and doubles as 8 byte ints.
|
||||
static const NativeType& ConvertIfSoftFp(Zone* zone, const NativeType& rep) {
|
||||
if (SoftFpAbi() && rep.IsFloat()) {
|
||||
ASSERT(rep.IsFloat());
|
||||
if (rep.SizeInBytes() == 4) {
|
||||
return *new (zone) NativePrimitiveType(kInt32);
|
||||
}
|
||||
if (rep.SizeInBytes() == 8) {
|
||||
return *new (zone) NativePrimitiveType(kInt64);
|
||||
}
|
||||
static const NativeType& ConvertFloatToInt(Zone* zone, const NativeType& type) {
|
||||
ASSERT(type.IsFloat());
|
||||
if (type.SizeInBytes() == 4) {
|
||||
return *new (zone) NativePrimitiveType(kInt32);
|
||||
}
|
||||
return rep;
|
||||
ASSERT(type.SizeInBytes() == 8);
|
||||
return *new (zone) NativePrimitiveType(kInt64);
|
||||
}
|
||||
|
||||
// In Soft FP, floats are treated as 4 byte ints, and doubles as 8 byte ints.
|
||||
static const NativeType& ConvertIfSoftFp(Zone* zone,
|
||||
const NativeType& type,
|
||||
bool has_varargs) {
|
||||
if (SoftFpAbi(has_varargs) && type.IsFloat()) {
|
||||
return ConvertFloatToInt(zone, type);
|
||||
}
|
||||
return type;
|
||||
}
|
||||
|
||||
// The native dual of `kUnboxedFfiIntPtr`.
|
||||
|
@ -64,15 +75,65 @@ const PrimitiveType kFfiIntPtr =
|
|||
// of argument locations.
|
||||
class ArgumentAllocator : public ValueObject {
|
||||
public:
|
||||
explicit ArgumentAllocator(Zone* zone) : zone_(zone) {}
|
||||
explicit ArgumentAllocator(Zone* zone, bool has_varargs)
|
||||
: has_varargs_(has_varargs), zone_(zone) {}
|
||||
|
||||
const NativeLocation& AllocateArgument(const NativeType& payload_type) {
|
||||
const auto& payload_type_converted = ConvertIfSoftFp(zone_, payload_type);
|
||||
const NativeLocation& AllocateArgumentVariadic(const NativeType& payload_type,
|
||||
bool is_first_vararg = false,
|
||||
bool is_vararg = false) {
|
||||
#if defined(TARGET_ARCH_ARM64) && \
|
||||
(defined(DART_TARGET_OS_MACOS_IOS) || defined(DART_TARGET_OS_MACOS))
|
||||
if (is_first_vararg) {
|
||||
// Block all registers.
|
||||
BlockAllFpuRegisters();
|
||||
cpu_regs_used = CallingConventions::kNumArgRegs;
|
||||
}
|
||||
#endif
|
||||
#if defined(TARGET_ARCH_RISCV64) || defined(TARGET_ARCH_RISCV32)
|
||||
if (is_first_vararg) {
|
||||
// Block all FPU registers.
|
||||
BlockAllFpuRegisters();
|
||||
}
|
||||
#endif
|
||||
const auto& result = AllocateArgument(payload_type, is_vararg);
|
||||
#if defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
|
||||
if (has_varargs_) {
|
||||
if (result.IsRegisters()) {
|
||||
// If an integer register is used, block the corresponding xmm register.
|
||||
ASSERT(CallingConventions::kArgumentIntRegXorFpuReg);
|
||||
} else if (result.IsFpuRegisters()) {
|
||||
// If an xmm register is used, also the corresponding integer register.
|
||||
ASSERT(CallingConventions::kArgumentIntRegXorFpuReg);
|
||||
const auto& fpu_reg_location = result.AsFpuRegisters();
|
||||
const FpuRegisterKind kind = kQuadFpuReg;
|
||||
ASSERT(fpu_reg_location.fpu_reg_kind() == kind);
|
||||
FpuRegister fpu_register = fpu_reg_location.fpu_reg();
|
||||
const intptr_t reg_index = fpu_register;
|
||||
ASSERT(cpu_regs_used == reg_index + 1);
|
||||
Register cpu_register =
|
||||
CallingConventions::ArgumentRegisters[reg_index];
|
||||
const auto& container_type = ConvertFloatToInt(zone_, payload_type);
|
||||
const auto& cpu_reg_location = *new (zone_) NativeRegistersLocation(
|
||||
zone_, payload_type, container_type, cpu_register);
|
||||
return *new (zone_)
|
||||
BothNativeLocations(fpu_reg_location, cpu_reg_location);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
const NativeLocation& AllocateArgument(const NativeType& payload_type,
|
||||
|
||||
bool is_vararg = false) {
|
||||
const auto& payload_type_converted =
|
||||
ConvertIfSoftFp(zone_, payload_type, has_varargs_);
|
||||
if (payload_type_converted.IsFloat()) {
|
||||
return AllocateFloat(payload_type);
|
||||
return AllocateFloat(payload_type, is_vararg);
|
||||
}
|
||||
if (payload_type_converted.IsInt()) {
|
||||
return AllocateInt(payload_type);
|
||||
return AllocateInt(payload_type, is_vararg);
|
||||
}
|
||||
|
||||
// Compounds are laid out differently per ABI, so they are implemented
|
||||
|
@ -82,11 +143,11 @@ class ArgumentAllocator : public ValueObject {
|
|||
// even if the parts of a compound fit in 1 cpu or fpu register it will
|
||||
// be nested in a MultipleNativeLocations.
|
||||
const NativeCompoundType& compound_type = payload_type.AsCompound();
|
||||
return AllocateCompound(compound_type);
|
||||
return AllocateCompound(compound_type, is_vararg);
|
||||
}
|
||||
|
||||
private:
|
||||
const NativeLocation& AllocateFloat(const NativeType& payload_type) {
|
||||
const NativeLocation& AllocateFloat(const NativeType& payload_type,
|
||||
bool is_vararg) {
|
||||
const auto kind = FpuRegKind(payload_type);
|
||||
const intptr_t reg_index = FirstFreeFpuRegisterIndex(kind);
|
||||
if (reg_index != kNoFpuRegister) {
|
||||
|
@ -116,24 +177,16 @@ class ArgumentAllocator : public ValueObject {
|
|||
// After using up F registers, start bitcasting to X registers.
|
||||
if (HasAvailableCpuRegisters(1)) {
|
||||
const Register reg = AllocateCpuRegister();
|
||||
const auto& container_type = *new (zone_) NativePrimitiveType(kInt64);
|
||||
const auto& container_type = ConvertFloatToInt(zone_, payload_type);
|
||||
return *new (zone_)
|
||||
NativeRegistersLocation(zone_, payload_type, container_type, reg);
|
||||
}
|
||||
#elif defined(TARGET_ARCH_RISCV32)
|
||||
// After using up F registers, start bitcasting to X register (pairs).
|
||||
if ((payload_type.SizeInBytes() == 4) && HasAvailableCpuRegisters(1)) {
|
||||
const Register reg = AllocateCpuRegister();
|
||||
const auto& container_type = *new (zone_) NativePrimitiveType(kInt32);
|
||||
return *new (zone_)
|
||||
NativeRegistersLocation(zone_, payload_type, container_type, reg);
|
||||
}
|
||||
if ((payload_type.SizeInBytes() == 8) && HasAvailableCpuRegisters(2)) {
|
||||
const Register reg1 = AllocateCpuRegister();
|
||||
const Register reg2 = AllocateCpuRegister();
|
||||
const auto& container_type = *new (zone_) NativePrimitiveType(kInt64);
|
||||
return *new (zone_) NativeRegistersLocation(zone_, payload_type,
|
||||
container_type, reg1, reg2);
|
||||
if (((payload_type.SizeInBytes() == 4) && HasAvailableCpuRegisters(1)) ||
|
||||
((payload_type.SizeInBytes() == 8) && HasAvailableCpuRegisters(2))) {
|
||||
const auto& container_type = ConvertFloatToInt(zone_, payload_type);
|
||||
return AllocateInt(payload_type, container_type, is_vararg);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -144,18 +197,14 @@ class ArgumentAllocator : public ValueObject {
|
|||
return AllocateStack(payload_type);
|
||||
}
|
||||
|
||||
const NativeLocation& AllocateInt(const NativeType& payload_type) {
|
||||
const auto& payload_type_converted = ConvertIfSoftFp(zone_, payload_type);
|
||||
|
||||
// Some calling conventions require the callee to make the lowest 32 bits
|
||||
// in registers non-garbage.
|
||||
const auto& container_type =
|
||||
CallingConventions::kArgumentRegisterExtension == kExtendedTo4
|
||||
? payload_type_converted.WidenTo4Bytes(zone_)
|
||||
: payload_type_converted;
|
||||
const NativeLocation& AllocateInt(const NativeType& payload_type,
|
||||
const NativeType& container_type,
|
||||
bool is_vararg) {
|
||||
if (target::kWordSize == 4 && payload_type.SizeInBytes() == 8) {
|
||||
if (CallingConventions::kArgumentRegisterAlignment ==
|
||||
kAlignedToWordSizeAndValueSize) {
|
||||
kAlignedToWordSizeAndValueSize ||
|
||||
(is_vararg && CallingConventions::kArgumentRegisterAlignmentVarArgs ==
|
||||
kAlignedToWordSizeAndValueSize)) {
|
||||
cpu_regs_used += cpu_regs_used % 2;
|
||||
}
|
||||
if (cpu_regs_used + 2 <= CallingConventions::kNumArgRegs) {
|
||||
|
@ -174,11 +223,27 @@ class ArgumentAllocator : public ValueObject {
|
|||
return AllocateStack(payload_type);
|
||||
}
|
||||
|
||||
// Constructs a container type.
|
||||
const NativeLocation& AllocateInt(const NativeType& payload_type,
|
||||
bool is_vararg) {
|
||||
const auto& payload_type_converted =
|
||||
ConvertIfSoftFp(zone_, payload_type, has_varargs_);
|
||||
|
||||
// Some calling conventions require the callee to make the lowest 32 bits
|
||||
// in registers non-garbage.
|
||||
const auto& container_type =
|
||||
CallingConventions::kArgumentRegisterExtension == kExtendedTo4
|
||||
? payload_type_converted.WidenTo4Bytes(zone_)
|
||||
: payload_type_converted;
|
||||
|
||||
return AllocateInt(payload_type, container_type, is_vararg);
|
||||
}
|
||||
|
||||
#if defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
|
||||
// If fits in two fpu and/or cpu registers, transfer in those. Otherwise,
|
||||
// transfer on stack.
|
||||
const NativeLocation& AllocateCompound(
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
|
||||
bool is_vararg) {
|
||||
const intptr_t size = payload_type.SizeInBytes();
|
||||
if (size <= 16 && size > 0 && !payload_type.ContainsUnalignedMembers()) {
|
||||
intptr_t required_regs =
|
||||
|
@ -222,8 +287,8 @@ class ArgumentAllocator : public ValueObject {
|
|||
// If struct fits in a single register and size is a power of two, then
|
||||
// use a single register and sign extend.
|
||||
// Otherwise, pass a pointer to a copy.
|
||||
const NativeLocation& AllocateCompound(
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
|
||||
bool is_vararg) {
|
||||
const NativeCompoundType& compound_type = payload_type.AsCompound();
|
||||
const intptr_t size = compound_type.SizeInBytes();
|
||||
if (size <= 8 && Utils::IsPowerOfTwo(size)) {
|
||||
|
@ -251,8 +316,8 @@ class ArgumentAllocator : public ValueObject {
|
|||
#endif // defined(TARGET_ARCH_X64) && defined(DART_TARGET_OS_WINDOWS)
|
||||
|
||||
#if defined(TARGET_ARCH_IA32)
|
||||
const NativeLocation& AllocateCompound(
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
|
||||
bool is_vararg) {
|
||||
return AllocateStack(payload_type);
|
||||
}
|
||||
#endif // defined(TARGET_ARCH_IA32)
|
||||
|
@ -260,10 +325,10 @@ class ArgumentAllocator : public ValueObject {
|
|||
#if defined(TARGET_ARCH_ARM)
|
||||
// Transfer homogeneous floats in FPU registers, and allocate the rest
|
||||
// in 4 or 8 size chunks in registers and stack.
|
||||
const NativeLocation& AllocateCompound(
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
|
||||
bool is_vararg) {
|
||||
const auto& compound_type = payload_type.AsCompound();
|
||||
if (compound_type.ContainsHomogeneousFloats() && !SoftFpAbi() &&
|
||||
if (compound_type.ContainsHomogeneousFloats() && !SoftFpAbi(has_varargs_) &&
|
||||
compound_type.NumPrimitiveMembersRecursive() <= 4) {
|
||||
const auto& elem_type = compound_type.FirstPrimitiveMember();
|
||||
const intptr_t size = compound_type.SizeInBytes();
|
||||
|
@ -325,8 +390,8 @@ class ArgumentAllocator : public ValueObject {
|
|||
// Slightly different from Arm32. FPU registers don't alias the same way,
|
||||
// structs up to 16 bytes block remaining registers if they do not fit in
|
||||
// registers, and larger structs go on stack always.
|
||||
const NativeLocation& AllocateCompound(
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
|
||||
bool is_vararg) {
|
||||
const auto& compound_type = payload_type.AsCompound();
|
||||
const intptr_t size = compound_type.SizeInBytes();
|
||||
if (compound_type.ContainsHomogeneousFloats() &&
|
||||
|
@ -352,7 +417,7 @@ class ArgumentAllocator : public ValueObject {
|
|||
MultipleNativeLocations(compound_type, multiple_locations);
|
||||
}
|
||||
BlockAllFpuRegisters();
|
||||
return AllocateStack(payload_type);
|
||||
return AllocateStack(payload_type, is_vararg);
|
||||
}
|
||||
|
||||
if (size <= 16) {
|
||||
|
@ -392,8 +457,8 @@ class ArgumentAllocator : public ValueObject {
|
|||
#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
|
||||
// See RISC-V ABIs Specification
|
||||
// https://github.com/riscv-non-isa/riscv-elf-psabi-doc/releases
|
||||
const NativeLocation& AllocateCompound(
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeLocation& AllocateCompound(const NativeCompoundType& payload_type,
|
||||
bool is_vararg) {
|
||||
const auto& compound_type = payload_type.AsCompound();
|
||||
|
||||
// 2.2. Hardware Floating-point Calling Convention.
|
||||
|
@ -492,8 +557,9 @@ class ArgumentAllocator : public ValueObject {
|
|||
return result;
|
||||
}
|
||||
|
||||
const NativeLocation& AllocateStack(const NativeType& payload_type) {
|
||||
align_stack(payload_type.AlignmentInBytesStack());
|
||||
const NativeLocation& AllocateStack(const NativeType& payload_type,
|
||||
bool is_vararg = false) {
|
||||
align_stack(payload_type.AlignmentInBytesStack(is_vararg));
|
||||
const intptr_t size = payload_type.SizeInBytes();
|
||||
// If the stack arguments are not packed, the 32 lowest bits should not
|
||||
// contain garbage.
|
||||
|
@ -505,7 +571,7 @@ class ArgumentAllocator : public ValueObject {
|
|||
payload_type, container_type, CallingConventions::kStackPointerRegister,
|
||||
stack_height_in_bytes);
|
||||
stack_height_in_bytes += size;
|
||||
align_stack(payload_type.AlignmentInBytesStack());
|
||||
align_stack(payload_type.AlignmentInBytesStack(is_vararg));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -513,9 +579,9 @@ class ArgumentAllocator : public ValueObject {
|
|||
stack_height_in_bytes = Utils::RoundUp(stack_height_in_bytes, alignment);
|
||||
}
|
||||
|
||||
static int NumFpuRegisters(FpuRegisterKind kind) {
|
||||
int NumFpuRegisters(FpuRegisterKind kind) const {
|
||||
#if defined(TARGET_ARCH_ARM)
|
||||
if (SoftFpAbi()) return 0;
|
||||
if (has_varargs_) return 0;
|
||||
if (kind == kSingleFpuReg) return CallingConventions::kNumSFpuArgRegs;
|
||||
if (kind == kDoubleFpuReg) return CallingConventions::kNumDFpuArgRegs;
|
||||
#endif // defined(TARGET_ARCH_ARM)
|
||||
|
@ -578,6 +644,7 @@ class ArgumentAllocator : public ValueObject {
|
|||
// Every bit denotes 32 bits of FPU registers.
|
||||
intptr_t fpu_reg_parts_used = 0;
|
||||
intptr_t stack_height_in_bytes = 0;
|
||||
const bool has_varargs_;
|
||||
Zone* zone_;
|
||||
};
|
||||
|
||||
|
@ -585,26 +652,33 @@ class ArgumentAllocator : public ValueObject {
|
|||
static NativeLocations& ArgumentLocations(
|
||||
Zone* zone,
|
||||
const ZoneGrowableArray<const NativeType*>& arg_reps,
|
||||
const NativeLocation& return_location) {
|
||||
const NativeLocation& return_location,
|
||||
intptr_t var_args_index) {
|
||||
intptr_t num_arguments = arg_reps.length();
|
||||
auto& result = *new (zone) NativeLocations(zone, num_arguments);
|
||||
|
||||
// Loop through all arguments and assign a register or a stack location.
|
||||
// Allocate result pointer for composite returns first.
|
||||
ArgumentAllocator frame_state(zone);
|
||||
const bool has_varargs =
|
||||
var_args_index != NativeFunctionType::kNoVariadicArguments;
|
||||
ArgumentAllocator frame_state(zone, has_varargs);
|
||||
#if !defined(TARGET_ARCH_ARM64)
|
||||
// Arm64 allocates the pointer in R8, which is not an argument location.
|
||||
if (return_location.IsPointerToMemory()) {
|
||||
const auto& pointer_location =
|
||||
return_location.AsPointerToMemory().pointer_location();
|
||||
const auto& pointer_location_allocated =
|
||||
frame_state.AllocateArgument(pointer_location.payload_type());
|
||||
frame_state.AllocateArgumentVariadic(pointer_location.payload_type());
|
||||
ASSERT(pointer_location.Equals(pointer_location_allocated));
|
||||
}
|
||||
#endif
|
||||
|
||||
for (intptr_t i = 0; i < num_arguments; i++) {
|
||||
const NativeType& rep = *arg_reps[i];
|
||||
result.Add(&frame_state.AllocateArgument(rep));
|
||||
const bool is_first_vararg = has_varargs && i == var_args_index;
|
||||
const bool is_vararg = has_varargs && i >= var_args_index;
|
||||
result.Add(
|
||||
&frame_state.AllocateArgumentVariadic(rep, is_first_vararg, is_vararg));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -644,7 +718,8 @@ static const NativeLocation& PointerToMemoryResultLocation(
|
|||
#if defined(TARGET_ARCH_X64) && !defined(DART_TARGET_OS_WINDOWS)
|
||||
static const NativeLocation& CompoundResultLocation(
|
||||
Zone* zone,
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeCompoundType& payload_type,
|
||||
bool has_varargs) {
|
||||
const intptr_t size = payload_type.SizeInBytes();
|
||||
if (size <= 16 && size > 0 && !payload_type.ContainsUnalignedMembers()) {
|
||||
// Allocate the same as argument, but use return registers instead of
|
||||
|
@ -700,7 +775,8 @@ static const NativeLocation& CompoundResultLocation(
|
|||
// Otherwise, pass a pointer to memory.
|
||||
static const NativeLocation& CompoundResultLocation(
|
||||
Zone* zone,
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeCompoundType& payload_type,
|
||||
bool has_varargs) {
|
||||
const intptr_t size = payload_type.SizeInBytes();
|
||||
if (size <= 8 && size > 0 && Utils::IsPowerOfTwo(size)) {
|
||||
NativeLocations& multiple_locations = *new (zone) NativeLocations(zone, 1);
|
||||
|
@ -718,7 +794,8 @@ static const NativeLocation& CompoundResultLocation(
|
|||
#if defined(TARGET_ARCH_IA32) && !defined(DART_TARGET_OS_WINDOWS)
|
||||
static const NativeLocation& CompoundResultLocation(
|
||||
Zone* zone,
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeCompoundType& payload_type,
|
||||
bool has_varargs) {
|
||||
return PointerToMemoryResultLocation(zone, payload_type);
|
||||
}
|
||||
#endif // defined(TARGET_ARCH_IA32) && !defined(DART_TARGET_OS_WINDOWS)
|
||||
|
@ -727,7 +804,8 @@ static const NativeLocation& CompoundResultLocation(
|
|||
// Windows uses up to two return registers, while Linux does not.
|
||||
static const NativeLocation& CompoundResultLocation(
|
||||
Zone* zone,
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeCompoundType& payload_type,
|
||||
bool has_varargs) {
|
||||
const intptr_t size = payload_type.SizeInBytes();
|
||||
if (size <= 8 && Utils::IsPowerOfTwo(size)) {
|
||||
NativeLocations& multiple_locations =
|
||||
|
@ -752,9 +830,10 @@ static const NativeLocation& CompoundResultLocation(
|
|||
// location passed in by pointer.
|
||||
static const NativeLocation& CompoundResultLocation(
|
||||
Zone* zone,
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeCompoundType& payload_type,
|
||||
bool has_varargs) {
|
||||
const intptr_t num_members = payload_type.NumPrimitiveMembersRecursive();
|
||||
if (payload_type.ContainsHomogeneousFloats() && !SoftFpAbi() &&
|
||||
if (payload_type.ContainsHomogeneousFloats() && !SoftFpAbi(has_varargs) &&
|
||||
num_members <= 4) {
|
||||
NativeLocations& multiple_locations =
|
||||
*new (zone) NativeLocations(zone, num_members);
|
||||
|
@ -784,9 +863,11 @@ static const NativeLocation& CompoundResultLocation(
|
|||
// otherwise a pointer to the result location is passed in.
|
||||
static const NativeLocation& CompoundResultLocation(
|
||||
Zone* zone,
|
||||
const NativeCompoundType& payload_type) {
|
||||
ArgumentAllocator frame_state(zone);
|
||||
const auto& location_as_argument = frame_state.AllocateArgument(payload_type);
|
||||
const NativeCompoundType& payload_type,
|
||||
bool has_varargs) {
|
||||
ArgumentAllocator frame_state(zone, has_varargs);
|
||||
const auto& location_as_argument =
|
||||
frame_state.AllocateArgumentVariadic(payload_type);
|
||||
if (!location_as_argument.IsStack() &&
|
||||
!location_as_argument.IsPointerToMemory()) {
|
||||
return location_as_argument;
|
||||
|
@ -798,11 +879,13 @@ static const NativeLocation& CompoundResultLocation(
|
|||
#if defined(TARGET_ARCH_RISCV32) || defined(TARGET_ARCH_RISCV64)
|
||||
static const NativeLocation& CompoundResultLocation(
|
||||
Zone* zone,
|
||||
const NativeCompoundType& payload_type) {
|
||||
const NativeCompoundType& payload_type,
|
||||
bool has_varargs) {
|
||||
// First or first and second argument registers if it fits, otherwise a
|
||||
// pointer to the result location is passed in.
|
||||
ArgumentAllocator frame_state(zone);
|
||||
const auto& location_as_argument = frame_state.AllocateArgument(payload_type);
|
||||
ArgumentAllocator frame_state(zone, has_varargs);
|
||||
const auto& location_as_argument =
|
||||
frame_state.AllocateArgumentVariadic(payload_type);
|
||||
if (!location_as_argument.IsStack() &&
|
||||
!location_as_argument.IsPointerToMemory()) {
|
||||
return location_as_argument;
|
||||
|
@ -813,8 +896,10 @@ static const NativeLocation& CompoundResultLocation(
|
|||
|
||||
// Location for the result of a C signature function.
|
||||
static const NativeLocation& ResultLocation(Zone* zone,
|
||||
const NativeType& payload_type) {
|
||||
const auto& payload_type_converted = ConvertIfSoftFp(zone, payload_type);
|
||||
const NativeType& payload_type,
|
||||
bool has_varargs) {
|
||||
const auto& payload_type_converted =
|
||||
ConvertIfSoftFp(zone, payload_type, has_varargs);
|
||||
const auto& container_type =
|
||||
CallingConventions::kReturnRegisterExtension == kExtendedTo4
|
||||
? payload_type_converted.WidenTo4Bytes(zone)
|
||||
|
@ -840,19 +925,23 @@ static const NativeLocation& ResultLocation(Zone* zone,
|
|||
// Compounds are laid out differently per ABI, so they are implemented
|
||||
// per ABI.
|
||||
const auto& compound_type = payload_type.AsCompound();
|
||||
return CompoundResultLocation(zone, compound_type);
|
||||
return CompoundResultLocation(zone, compound_type, has_varargs);
|
||||
}
|
||||
|
||||
const NativeCallingConvention& NativeCallingConvention::FromSignature(
|
||||
Zone* zone,
|
||||
const NativeFunctionType& signature) {
|
||||
const bool contains_varargs = signature.variadic_arguments_index() !=
|
||||
NativeFunctionType::kNoVariadicArguments;
|
||||
// With struct return values, a possible pointer to a return value can
|
||||
// occupy an argument position. Hence, allocate return value first.
|
||||
const auto& return_location = ResultLocation(zone, signature.return_type());
|
||||
const auto& return_location =
|
||||
ResultLocation(zone, signature.return_type(), contains_varargs);
|
||||
const auto& argument_locations =
|
||||
ArgumentLocations(zone, signature.argument_types(), return_location);
|
||||
return *new (zone)
|
||||
NativeCallingConvention(argument_locations, return_location);
|
||||
ArgumentLocations(zone, signature.argument_types(), return_location,
|
||||
signature.variadic_arguments_index());
|
||||
return *new (zone) NativeCallingConvention(argument_locations,
|
||||
return_location, contains_varargs);
|
||||
}
|
||||
|
||||
intptr_t NativeCallingConvention::StackTopInBytes() const {
|
||||
|
|
|
@ -36,6 +36,7 @@ class NativeCallingConvention : public ZoneAllocated {
|
|||
return argument_locations_;
|
||||
}
|
||||
const NativeLocation& return_location() const { return return_location_; }
|
||||
bool contains_varargs() const { return contains_varargs_; }
|
||||
|
||||
intptr_t StackTopInBytes() const;
|
||||
|
||||
|
@ -48,12 +49,15 @@ class NativeCallingConvention : public ZoneAllocated {
|
|||
|
||||
private:
|
||||
NativeCallingConvention(const NativeLocations& argument_locations,
|
||||
const NativeLocation& return_location)
|
||||
const NativeLocation& return_location,
|
||||
bool contains_varargs)
|
||||
: argument_locations_(argument_locations),
|
||||
return_location_(return_location) {}
|
||||
return_location_(return_location),
|
||||
contains_varargs_(contains_varargs) {}
|
||||
|
||||
const NativeLocations& argument_locations_;
|
||||
const NativeLocation& return_location_;
|
||||
const bool contains_varargs_;
|
||||
};
|
||||
|
||||
} // namespace ffi
|
||||
|
|
|
@ -14,11 +14,7 @@ namespace ffi {
|
|||
const NativeCallingConvention& RunSignatureTest(
|
||||
dart::Zone* zone,
|
||||
const char* name,
|
||||
const NativeTypes& argument_types,
|
||||
const NativeType& return_type) {
|
||||
const auto& native_signature =
|
||||
*new (zone) NativeFunctionType(argument_types, return_type);
|
||||
|
||||
const NativeFunctionType& native_signature) {
|
||||
const auto& native_calling_convention =
|
||||
NativeCallingConvention::FromSignature(zone, native_signature);
|
||||
|
||||
|
@ -47,6 +43,17 @@ const NativeCallingConvention& RunSignatureTest(
|
|||
return native_calling_convention;
|
||||
}
|
||||
|
||||
const NativeCallingConvention& RunSignatureTest(
|
||||
dart::Zone* zone,
|
||||
const char* name,
|
||||
const NativeTypes& argument_types,
|
||||
const NativeType& return_type) {
|
||||
const auto& native_signature =
|
||||
*new (zone) NativeFunctionType(argument_types, return_type);
|
||||
|
||||
return RunSignatureTest(zone, name, native_signature);
|
||||
}
|
||||
|
||||
UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_int8x10) {
|
||||
const auto& int8type = *new (Z) NativePrimitiveType(kInt8);
|
||||
|
||||
|
@ -729,6 +736,176 @@ UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_regress_fuchsia105336) {
|
|||
RunSignatureTest(Z, "regress_fuchsia105336", arguments, void_type);
|
||||
}
|
||||
|
||||
// Binding in Dart with variadic arguments:
|
||||
// `IntPtr Function(IntPtr, VarArgs<(IntPtr, IntPtr, IntPtr, IntPtr)>)`
|
||||
//
|
||||
// See the *.expect in ./unit_tests for this behavior.
|
||||
UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_variadic_int) {
|
||||
#if defined(TARGET_ARCH_IS_32_BIT)
|
||||
const auto& intptr_type = *new (Z) NativePrimitiveType(kInt32);
|
||||
#elif defined(TARGET_ARCH_IS_64_BIT)
|
||||
const auto& intptr_type = *new (Z) NativePrimitiveType(kInt64);
|
||||
#endif
|
||||
|
||||
auto& arguments = *new (Z) NativeTypes(Z, 5);
|
||||
arguments.Add(&intptr_type);
|
||||
arguments.Add(&intptr_type);
|
||||
arguments.Add(&intptr_type);
|
||||
arguments.Add(&intptr_type);
|
||||
arguments.Add(&intptr_type);
|
||||
|
||||
const auto& native_signature = *new (Z) NativeFunctionType(
|
||||
arguments, intptr_type, /*variadic_arguments_index=*/1);
|
||||
|
||||
RunSignatureTest(Z, "variadic_int", native_signature);
|
||||
}
|
||||
|
||||
// Binding in Dart with variadic arguments:
|
||||
// `Double Function(Double, VarArgs<(Double, Double, Double, Double)>)`
|
||||
//
|
||||
// See the *.expect in ./unit_tests for this behavior.
|
||||
UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_variadic_double) {
|
||||
const auto& double_type = *new (Z) NativePrimitiveType(kDouble);
|
||||
|
||||
auto& arguments = *new (Z) NativeTypes(Z, 5);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type);
|
||||
|
||||
const auto& native_signature = *new (Z) NativeFunctionType(
|
||||
arguments, double_type, /*variadic_arguments_index=*/1);
|
||||
|
||||
RunSignatureTest(Z, "variadic_double", native_signature);
|
||||
}
|
||||
|
||||
// Binding in Dart with variadic arguments:
|
||||
// `Double Function(Double, VarArgs<(Struct20BytesHomogeneousFloat, Double)>)`
|
||||
//
|
||||
// See the *.expect in ./unit_tests for this behavior.
|
||||
UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_variadic_with_struct) {
|
||||
const auto& double_type = *new (Z) NativePrimitiveType(kDouble);
|
||||
const auto& float_type = *new (Z) NativePrimitiveType(kFloat);
|
||||
|
||||
auto& member_types = *new (Z) NativeTypes(Z, 5);
|
||||
member_types.Add(&float_type);
|
||||
member_types.Add(&float_type);
|
||||
member_types.Add(&float_type);
|
||||
member_types.Add(&float_type);
|
||||
member_types.Add(&float_type);
|
||||
const auto& struct_type = NativeStructType::FromNativeTypes(Z, member_types);
|
||||
|
||||
auto& arguments = *new (Z) NativeTypes(Z, 3);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&struct_type);
|
||||
arguments.Add(&double_type);
|
||||
|
||||
const auto& native_signature = *new (Z) NativeFunctionType(
|
||||
arguments, double_type, /*variadic_arguments_index=*/1);
|
||||
|
||||
RunSignatureTest(Z, "variadic_with_struct", native_signature);
|
||||
}
|
||||
|
||||
// Binding in Dart with variadic arguments.
|
||||
//
|
||||
// Especially macos_arm64 is interesting due to stack alignment.
|
||||
//
|
||||
// See the *.expect in ./unit_tests for this behavior.
|
||||
UNIT_TEST_CASE_WITH_ZONE(
|
||||
NativeCallingConvention_variadic_with_homogenous_struct) {
|
||||
const auto& double_type = *new (Z) NativePrimitiveType(kDouble);
|
||||
const auto& float_type = *new (Z) NativePrimitiveType(kFloat);
|
||||
const auto& int64_type = *new (Z) NativePrimitiveType(kInt64);
|
||||
const auto& int32_type = *new (Z) NativePrimitiveType(kInt32);
|
||||
|
||||
auto& member_types = *new (Z) NativeTypes(Z, 3);
|
||||
member_types.Add(&float_type);
|
||||
member_types.Add(&float_type);
|
||||
member_types.Add(&float_type);
|
||||
const auto& struct_type = NativeStructType::FromNativeTypes(Z, member_types);
|
||||
|
||||
auto& arguments = *new (Z) NativeTypes(Z, 13);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type); // Exhaust FPU registers
|
||||
arguments.Add(&float_type); // Misalign stack.
|
||||
arguments.Add(
|
||||
&struct_type); // Homogenous struct, not aligned to wordsize on stack.
|
||||
arguments.Add(&int64_type); // Start varargs.
|
||||
arguments.Add(&int32_type); // Misalign stack again.
|
||||
arguments.Add(
|
||||
&struct_type); // Homogenous struct, aligned to wordsize on stack.
|
||||
|
||||
const auto& native_signature = *new (Z) NativeFunctionType(
|
||||
arguments, double_type, /*variadic_arguments_index=*/11);
|
||||
|
||||
RunSignatureTest(Z, "variadic_with_homogenous_struct", native_signature);
|
||||
}
|
||||
|
||||
// Binding in Dart with variadic arguments.
|
||||
//
|
||||
// Especially linux_riscv32 is interesting due to register alignment.
|
||||
//
|
||||
// See the *.expect in ./unit_tests for this behavior.
|
||||
UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_variadic_register_alignment) {
|
||||
const auto& double_type = *new (Z) NativePrimitiveType(kDouble);
|
||||
|
||||
auto& member_types = *new (Z) NativeTypes(Z, 4);
|
||||
member_types.Add(&double_type);
|
||||
member_types.Add(&double_type);
|
||||
member_types.Add(&double_type);
|
||||
member_types.Add(&double_type);
|
||||
const auto& struct_type = NativeStructType::FromNativeTypes(Z, member_types);
|
||||
|
||||
auto& arguments = *new (Z) NativeTypes(Z, 13);
|
||||
arguments.Add(&double_type);
|
||||
arguments.Add(&double_type); // Passed in int register pair on RISC-V 32.
|
||||
arguments.Add(
|
||||
&struct_type); // Passed using single integer register on RISC-V 32.
|
||||
arguments.Add(
|
||||
&double_type); // Passed in _aligned_ int register pair on RISC-V 32.
|
||||
|
||||
const auto& native_signature = *new (Z) NativeFunctionType(
|
||||
arguments, double_type, /*variadic_arguments_index=*/1);
|
||||
|
||||
RunSignatureTest(Z, "variadic_register_alignment", native_signature);
|
||||
}
|
||||
|
||||
// Variadic function in C:
|
||||
// `int ioctl(int, unsigned long, ...)`
|
||||
//
|
||||
// Binding in Dart with single variadic argument:
|
||||
// `Int32 Function(Int32, Int64, VarArgs<Pointer<Void>>)`
|
||||
//
|
||||
// https://github.com/dart-lang/sdk/issues/49460
|
||||
//
|
||||
// See the *.expect in ./unit_tests for this behavior.
|
||||
UNIT_TEST_CASE_WITH_ZONE(NativeCallingConvention_regress49460) {
|
||||
const auto& int32_type = *new (Z) NativePrimitiveType(kInt32);
|
||||
const auto& int64_type = *new (Z) NativePrimitiveType(kInt64);
|
||||
#if defined(TARGET_ARCH_IS_32_BIT)
|
||||
const auto& intptr_type = *new (Z) NativePrimitiveType(kInt32);
|
||||
#elif defined(TARGET_ARCH_IS_64_BIT)
|
||||
const auto& intptr_type = *new (Z) NativePrimitiveType(kInt64);
|
||||
#endif
|
||||
|
||||
auto& arguments = *new (Z) NativeTypes(Z, 3);
|
||||
arguments.Add(&int32_type);
|
||||
arguments.Add(&int64_type);
|
||||
arguments.Add(&intptr_type); // pointer
|
||||
|
||||
const auto& native_signature = *new (Z) NativeFunctionType(
|
||||
arguments, int32_type, /*variadic_arguments_index=*/2);
|
||||
|
||||
RunSignatureTest(Z, "regress49460", native_signature);
|
||||
}
|
||||
|
||||
} // namespace ffi
|
||||
} // namespace compiler
|
||||
} // namespace dart
|
||||
|
|
|
@ -99,6 +99,11 @@ const PointerToMemoryLocation& NativeLocation::AsPointerToMemory() const {
|
|||
return static_cast<const PointerToMemoryLocation&>(*this);
|
||||
}
|
||||
|
||||
const BothNativeLocations& NativeLocation::AsBoth() const {
|
||||
ASSERT(IsBoth());
|
||||
return static_cast<const BothNativeLocations&>(*this);
|
||||
}
|
||||
|
||||
#if !defined(FFI_UNIT_TESTS)
|
||||
Location NativeRegistersLocation::AsLocation() const {
|
||||
ASSERT(IsExpressibleAsLocation());
|
||||
|
@ -354,6 +359,14 @@ void MultipleNativeLocations::PrintTo(BaseTextBuffer* f) const {
|
|||
PrintRepresentations(f, *this);
|
||||
}
|
||||
|
||||
void BothNativeLocations::PrintTo(BaseTextBuffer* f) const {
|
||||
f->Printf("B(");
|
||||
location0_.PrintTo(f);
|
||||
f->Printf(", ");
|
||||
location1_.PrintTo(f);
|
||||
f->Printf(")");
|
||||
}
|
||||
|
||||
#if !defined(FFI_UNIT_TESTS)
|
||||
const char* NativeLocation::ToCString() const {
|
||||
return ToCString(Thread::Current()->zone());
|
||||
|
|
|
@ -32,6 +32,7 @@ class NativeFpuRegistersLocation;
|
|||
class NativeStackLocation;
|
||||
class MultipleNativeLocations;
|
||||
class PointerToMemoryLocation;
|
||||
class BothNativeLocations;
|
||||
|
||||
// NativeLocation objects are used in the FFI to describe argument and return
|
||||
// value locations in all native ABIs that the FFI supports.
|
||||
|
@ -99,6 +100,7 @@ class NativeLocation : public ZoneAllocated {
|
|||
virtual bool IsStack() const { return false; }
|
||||
virtual bool IsMultiple() const { return false; }
|
||||
virtual bool IsPointerToMemory() const { return false; }
|
||||
virtual bool IsBoth() const { return false; }
|
||||
|
||||
virtual bool IsExpressibleAsLocation() const { return false; }
|
||||
#if !defined(FFI_UNIT_TESTS)
|
||||
|
@ -119,6 +121,7 @@ class NativeLocation : public ZoneAllocated {
|
|||
const NativeStackLocation& AsStack() const;
|
||||
const MultipleNativeLocations& AsMultiple() const;
|
||||
const PointerToMemoryLocation& AsPointerToMemory() const;
|
||||
const BothNativeLocations& AsBoth() const;
|
||||
|
||||
// Retrieve one part from this location when it is split into multiple parts.
|
||||
virtual NativeLocation& Split(Zone* zone,
|
||||
|
@ -454,6 +457,48 @@ class MultipleNativeLocations : public NativeLocation {
|
|||
DISALLOW_COPY_AND_ASSIGN(MultipleNativeLocations);
|
||||
};
|
||||
|
||||
// The location of a value that is in two locations.
|
||||
//
|
||||
// Should only happen on win_x64 with variadic arguments.
|
||||
class BothNativeLocations : public NativeLocation {
|
||||
public:
|
||||
BothNativeLocations(const NativeLocation& location0,
|
||||
const NativeLocation& location1)
|
||||
: NativeLocation(location0.payload_type(), location0.container_type()),
|
||||
location0_(location0),
|
||||
location1_(location1) {}
|
||||
virtual ~BothNativeLocations() {}
|
||||
|
||||
virtual bool IsBoth() const { return true; }
|
||||
|
||||
virtual void PrintTo(BaseTextBuffer* f) const;
|
||||
|
||||
virtual NativeLocation& WithOtherNativeType(
|
||||
Zone* zone,
|
||||
const NativeType& new_payload_type,
|
||||
const NativeType& new_container_type) const {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
virtual intptr_t StackTopInBytes() const {
|
||||
// Only used with registers.
|
||||
return 0;
|
||||
}
|
||||
|
||||
const NativeLocation& location(intptr_t index) const {
|
||||
ASSERT(index == 0 || index == 1);
|
||||
if (index == 0) {
|
||||
return location0_;
|
||||
}
|
||||
return location1_;
|
||||
}
|
||||
|
||||
private:
|
||||
const NativeLocation& location0_;
|
||||
const NativeLocation& location1_;
|
||||
DISALLOW_COPY_AND_ASSIGN(BothNativeLocations);
|
||||
};
|
||||
|
||||
#if !defined(FFI_UNIT_TESTS)
|
||||
// Return a memory operand for stack slot locations.
|
||||
compiler::Address NativeLocationToStackSlotAddress(
|
||||
|
|
|
@ -127,7 +127,7 @@ intptr_t NativePrimitiveType::SizeInBytes() const {
|
|||
return fundamental_size_in_bytes[representation_];
|
||||
}
|
||||
|
||||
intptr_t NativePrimitiveType::AlignmentInBytesStack() const {
|
||||
intptr_t NativePrimitiveType::AlignmentInBytesStack(bool is_vararg) const {
|
||||
switch (CallingConventions::kArgumentStackAlignment) {
|
||||
case kAlignedToWordSize:
|
||||
// The default is to align stack arguments to word size.
|
||||
|
@ -178,6 +178,7 @@ NativeStructType& NativeStructType::FromNativeTypes(Zone* zone,
|
|||
// If this struct is passed on the stack, it should be aligned to the largest
|
||||
// alignment of its members when passing those members on the stack.
|
||||
intptr_t alignment_stack = kAtLeast1ByteAligned;
|
||||
intptr_t alignment_stack_vararg = kAtLeast1ByteAligned;
|
||||
#if (defined(DART_TARGET_OS_MACOS_IOS) || defined(DART_TARGET_OS_MACOS)) && \
|
||||
defined(TARGET_ARCH_ARM64)
|
||||
// On iOS64 and MacOS arm64 stack values can be less aligned than wordSize,
|
||||
|
@ -193,6 +194,7 @@ NativeStructType& NativeStructType::FromNativeTypes(Zone* zone,
|
|||
if (!ContainsHomogeneousFloatsInternal(members)) {
|
||||
alignment_stack = compiler::target::kWordSize;
|
||||
}
|
||||
alignment_stack_vararg = compiler::target::kWordSize;
|
||||
#endif
|
||||
|
||||
auto& member_offsets =
|
||||
|
@ -212,11 +214,14 @@ NativeStructType& NativeStructType::FromNativeTypes(Zone* zone,
|
|||
offset += member_size;
|
||||
alignment_field = Utils::Maximum(alignment_field, member_align_field);
|
||||
alignment_stack = Utils::Maximum(alignment_stack, member_align_stack);
|
||||
alignment_stack_vararg =
|
||||
Utils::Maximum(alignment_stack_vararg, member_align_stack);
|
||||
}
|
||||
const intptr_t size = Utils::RoundUp(offset, alignment_field);
|
||||
|
||||
return *new (zone) NativeStructType(members, member_offsets, size,
|
||||
alignment_field, alignment_stack);
|
||||
return *new (zone)
|
||||
NativeStructType(members, member_offsets, size, alignment_field,
|
||||
alignment_stack, alignment_stack_vararg);
|
||||
}
|
||||
|
||||
// Keep consistent with
|
||||
|
@ -738,6 +743,9 @@ void NativeFunctionType::PrintTo(BaseTextBuffer* f) const {
|
|||
if (i > 0) {
|
||||
f->AddString(", ");
|
||||
}
|
||||
if (i == variadic_arguments_index_) {
|
||||
f->AddString("varargs: ");
|
||||
}
|
||||
argument_types_[i]->PrintTo(f);
|
||||
}
|
||||
f->AddString(") => ");
|
||||
|
|
|
@ -90,7 +90,7 @@ class NativeType : public ZoneAllocated {
|
|||
virtual intptr_t SizeInBytes() const = 0;
|
||||
|
||||
// The alignment in bytes of this representation on the stack.
|
||||
virtual intptr_t AlignmentInBytesStack() const = 0;
|
||||
virtual intptr_t AlignmentInBytesStack(bool is_vararg = false) const = 0;
|
||||
|
||||
// The alignment in bytes of this representation as member of a composite.
|
||||
virtual intptr_t AlignmentInBytesField() const = 0;
|
||||
|
@ -193,7 +193,7 @@ class NativePrimitiveType : public NativeType {
|
|||
virtual bool IsSigned() const;
|
||||
|
||||
virtual intptr_t SizeInBytes() const;
|
||||
virtual intptr_t AlignmentInBytesStack() const;
|
||||
virtual intptr_t AlignmentInBytesStack(bool is_vararg = false) const;
|
||||
virtual intptr_t AlignmentInBytesField() const;
|
||||
|
||||
#if !defined(DART_PRECOMPILED_RUNTIME)
|
||||
|
@ -246,7 +246,7 @@ class NativeArrayType : public NativeType {
|
|||
virtual intptr_t AlignmentInBytesField() const {
|
||||
return element_type_.AlignmentInBytesField();
|
||||
}
|
||||
virtual intptr_t AlignmentInBytesStack() const {
|
||||
virtual intptr_t AlignmentInBytesStack(bool is_vararg = false) const {
|
||||
return element_type_.AlignmentInBytesStack();
|
||||
}
|
||||
|
||||
|
@ -284,7 +284,9 @@ class NativeCompoundType : public NativeType {
|
|||
|
||||
virtual intptr_t SizeInBytes() const { return size_; }
|
||||
virtual intptr_t AlignmentInBytesField() const { return alignment_field_; }
|
||||
virtual intptr_t AlignmentInBytesStack() const { return alignment_stack_; }
|
||||
virtual intptr_t AlignmentInBytesStack(bool is_vararg = false) const {
|
||||
return alignment_stack_;
|
||||
}
|
||||
|
||||
virtual bool Equals(const NativeType& other) const;
|
||||
|
||||
|
@ -353,6 +355,13 @@ class NativeStructType : public NativeCompoundType {
|
|||
|
||||
virtual bool IsStruct() const { return true; }
|
||||
|
||||
virtual intptr_t AlignmentInBytesStack(bool is_vararg = false) const {
|
||||
if (is_vararg) {
|
||||
return alignment_stack_vararg_;
|
||||
}
|
||||
return alignment_stack_;
|
||||
}
|
||||
|
||||
#if !defined(DART_PRECOMPILED_RUNTIME)
|
||||
virtual bool ContainsOnlyFloats(Range range) const;
|
||||
#endif // !defined(DART_PRECOMPILED_RUNTIME)
|
||||
|
@ -371,10 +380,13 @@ class NativeStructType : public NativeCompoundType {
|
|||
const ZoneGrowableArray<intptr_t>& member_offsets,
|
||||
intptr_t size,
|
||||
intptr_t alignment_field,
|
||||
intptr_t alignment_stack)
|
||||
intptr_t alignment_stack,
|
||||
intptr_t alignment_stack_vararg)
|
||||
: NativeCompoundType(members, size, alignment_field, alignment_stack),
|
||||
alignment_stack_vararg_(alignment_stack_vararg),
|
||||
member_offsets_(member_offsets) {}
|
||||
|
||||
const intptr_t alignment_stack_vararg_;
|
||||
const ZoneGrowableArray<intptr_t>& member_offsets_;
|
||||
};
|
||||
|
||||
|
@ -406,8 +418,11 @@ class NativeUnionType : public NativeCompoundType {
|
|||
class NativeFunctionType : public ZoneAllocated {
|
||||
public:
|
||||
NativeFunctionType(const NativeTypes& argument_types,
|
||||
const NativeType& return_type)
|
||||
: argument_types_(argument_types), return_type_(return_type) {}
|
||||
const NativeType& return_type,
|
||||
intptr_t variadic_arguments_index = kNoVariadicArguments)
|
||||
: argument_types_(argument_types),
|
||||
return_type_(return_type),
|
||||
variadic_arguments_index_(variadic_arguments_index) {}
|
||||
|
||||
#if !defined(DART_PRECOMPILED_RUNTIME) && !defined(FFI_UNIT_TESTS)
|
||||
static const NativeFunctionType* FromUnboxedRepresentation(
|
||||
|
@ -418,6 +433,9 @@ class NativeFunctionType : public ZoneAllocated {
|
|||
|
||||
const NativeTypes& argument_types() const { return argument_types_; }
|
||||
const NativeType& return_type() const { return return_type_; }
|
||||
intptr_t variadic_arguments_index() const {
|
||||
return variadic_arguments_index_;
|
||||
}
|
||||
|
||||
void PrintTo(BaseTextBuffer* f) const;
|
||||
const char* ToCString(Zone* zone) const;
|
||||
|
@ -425,9 +443,13 @@ class NativeFunctionType : public ZoneAllocated {
|
|||
const char* ToCString() const;
|
||||
#endif
|
||||
|
||||
static const intptr_t kNoVariadicArguments = INTPTR_MAX;
|
||||
|
||||
private:
|
||||
const NativeTypes& argument_types_;
|
||||
const NativeType& return_type_;
|
||||
// If no variadic arguments, then kNoVariadicArguments.
|
||||
const intptr_t variadic_arguments_index_;
|
||||
};
|
||||
|
||||
} // namespace ffi
|
||||
|
|
|
@ -6,14 +6,14 @@ fa4 float
|
|||
fa5 float
|
||||
fa6 float
|
||||
fa7 float
|
||||
a0 int64[float]
|
||||
a1 int64[float]
|
||||
a2 int64[float]
|
||||
a3 int64[float]
|
||||
a4 int64[float]
|
||||
a5 int64[float]
|
||||
a6 int64[float]
|
||||
a7 int64[float]
|
||||
a0 int32[float]
|
||||
a1 int32[float]
|
||||
a2 int32[float]
|
||||
a3 int32[float]
|
||||
a4 int32[float]
|
||||
a5 int32[float]
|
||||
a6 int32[float]
|
||||
a7 int32[float]
|
||||
S+0 float
|
||||
S+8 float
|
||||
S+16 float
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
r0 int32
|
||||
r1 int64
|
||||
r2 int64
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,5 @@
|
|||
r0 int32
|
||||
r1 int64
|
||||
r2 int64
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,5 @@
|
|||
r0 int32
|
||||
r1 int64
|
||||
S+0 int64
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,5 @@
|
|||
r0 int32
|
||||
r1 int64
|
||||
r2 int64
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,5 @@
|
|||
r0 int32
|
||||
r1 int64
|
||||
S+0 int64
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,5 @@
|
|||
r0 int32
|
||||
(r2, r3) int64
|
||||
S+0 int32
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,5 @@
|
|||
r0 int32
|
||||
(r2, r3) int64
|
||||
S+0 int32
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,5 @@
|
|||
r0 int32
|
||||
(r2, r3) int64
|
||||
S+0 int32
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,5 @@
|
|||
S+0 int32
|
||||
S+4 int64
|
||||
S+12 int32
|
||||
=>
|
||||
eax int32
|
|
@ -0,0 +1,5 @@
|
|||
S+0 int32
|
||||
S+4 int64
|
||||
S+12 int32
|
||||
=>
|
||||
eax int32
|
|
@ -0,0 +1,5 @@
|
|||
S+0 int32
|
||||
S+4 int64
|
||||
S+12 int32
|
||||
=>
|
||||
eax int32
|
|
@ -0,0 +1,5 @@
|
|||
a0 int32
|
||||
(a1, a2) int64
|
||||
a3 int32
|
||||
=>
|
||||
a0 int32
|
|
@ -0,0 +1,5 @@
|
|||
a0 int32
|
||||
a1 int64
|
||||
a2 int64
|
||||
=>
|
||||
a0 int32
|
|
@ -0,0 +1,5 @@
|
|||
rdi int32
|
||||
rsi int64
|
||||
rdx int64
|
||||
=>
|
||||
rax int32
|
|
@ -0,0 +1,5 @@
|
|||
rdi int32
|
||||
rsi int64
|
||||
rdx int64
|
||||
=>
|
||||
rax int32
|
|
@ -0,0 +1,5 @@
|
|||
rdi int32
|
||||
rsi int64
|
||||
rdx int64
|
||||
=>
|
||||
rax int32
|
|
@ -0,0 +1,5 @@
|
|||
rdi int32
|
||||
rsi int64
|
||||
rdx int64
|
||||
=>
|
||||
rax int32
|
|
@ -0,0 +1,5 @@
|
|||
rcx int32
|
||||
rdx int64
|
||||
r8 int64
|
||||
=>
|
||||
rax int32
|
|
@ -0,0 +1,7 @@
|
|||
v0 double
|
||||
v1 double
|
||||
v2 double
|
||||
v3 double
|
||||
v4 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,7 @@
|
|||
v0 double
|
||||
v1 double
|
||||
v2 double
|
||||
v3 double
|
||||
v4 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,7 @@
|
|||
v0 double
|
||||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,7 @@
|
|||
v0 double
|
||||
v1 double
|
||||
v2 double
|
||||
v3 double
|
||||
v4 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,7 @@
|
|||
v0 double
|
||||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,7 @@
|
|||
(r0, r1) int64[double]
|
||||
(r2, r3) int64[double]
|
||||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
=>
|
||||
(r0, r1) int64[double]
|
|
@ -0,0 +1,7 @@
|
|||
(r0, r1) int64[double]
|
||||
(r2, r3) int64[double]
|
||||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
=>
|
||||
(r0, r1) int64[double]
|
|
@ -0,0 +1,7 @@
|
|||
(r0, r1) int64[double]
|
||||
(r2, r3) int64[double]
|
||||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
=>
|
||||
(r0, r1) int64[double]
|
|
@ -0,0 +1,7 @@
|
|||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
S+32 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,7 @@
|
|||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
S+32 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,7 @@
|
|||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
S+32 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,7 @@
|
|||
fa0 double
|
||||
(a0, a1) int64[double]
|
||||
(a2, a3) int64[double]
|
||||
(a4, a5) int64[double]
|
||||
(a6, a7) int64[double]
|
||||
=>
|
||||
fa0 double
|
|
@ -0,0 +1,7 @@
|
|||
fa0 double
|
||||
a0 int64[double]
|
||||
a1 int64[double]
|
||||
a2 int64[double]
|
||||
a3 int64[double]
|
||||
=>
|
||||
fa0 double
|
|
@ -0,0 +1,7 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
xmm2 double
|
||||
xmm3 double
|
||||
xmm4 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,7 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
xmm2 double
|
||||
xmm3 double
|
||||
xmm4 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,7 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
xmm2 double
|
||||
xmm3 double
|
||||
xmm4 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,7 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
xmm2 double
|
||||
xmm3 double
|
||||
xmm4 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,7 @@
|
|||
B(xmm0 double, rcx int64[double])
|
||||
B(xmm1 double, rdx int64[double])
|
||||
B(xmm2 double, r8 int64[double])
|
||||
B(xmm3 double, r9 int64[double])
|
||||
S+0 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,7 @@
|
|||
r0 int64
|
||||
r1 int64
|
||||
r2 int64
|
||||
r3 int64
|
||||
r4 int64
|
||||
=>
|
||||
r0 int64
|
|
@ -0,0 +1,7 @@
|
|||
r0 int64
|
||||
r1 int64
|
||||
r2 int64
|
||||
r3 int64
|
||||
r4 int64
|
||||
=>
|
||||
r0 int64
|
|
@ -0,0 +1,7 @@
|
|||
r0 int64
|
||||
S+0 int64
|
||||
S+8 int64
|
||||
S+16 int64
|
||||
S+24 int64
|
||||
=>
|
||||
r0 int64
|
|
@ -0,0 +1,7 @@
|
|||
r0 int64
|
||||
r1 int64
|
||||
r2 int64
|
||||
r3 int64
|
||||
r4 int64
|
||||
=>
|
||||
r0 int64
|
|
@ -0,0 +1,7 @@
|
|||
r0 int64
|
||||
S+0 int64
|
||||
S+8 int64
|
||||
S+16 int64
|
||||
S+24 int64
|
||||
=>
|
||||
r0 int64
|
|
@ -0,0 +1,7 @@
|
|||
r0 int32
|
||||
r1 int32
|
||||
r2 int32
|
||||
r3 int32
|
||||
S+0 int32
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,7 @@
|
|||
r0 int32
|
||||
r1 int32
|
||||
r2 int32
|
||||
r3 int32
|
||||
S+0 int32
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,7 @@
|
|||
r0 int32
|
||||
r1 int32
|
||||
r2 int32
|
||||
r3 int32
|
||||
S+0 int32
|
||||
=>
|
||||
r0 int32
|
|
@ -0,0 +1,7 @@
|
|||
S+0 int32
|
||||
S+4 int32
|
||||
S+8 int32
|
||||
S+12 int32
|
||||
S+16 int32
|
||||
=>
|
||||
eax int32
|
|
@ -0,0 +1,7 @@
|
|||
S+0 int32
|
||||
S+4 int32
|
||||
S+8 int32
|
||||
S+12 int32
|
||||
S+16 int32
|
||||
=>
|
||||
eax int32
|
|
@ -0,0 +1,7 @@
|
|||
S+0 int32
|
||||
S+4 int32
|
||||
S+8 int32
|
||||
S+12 int32
|
||||
S+16 int32
|
||||
=>
|
||||
eax int32
|
|
@ -0,0 +1,7 @@
|
|||
a0 int32
|
||||
a1 int32
|
||||
a2 int32
|
||||
a3 int32
|
||||
a4 int32
|
||||
=>
|
||||
a0 int32
|
|
@ -0,0 +1,7 @@
|
|||
a0 int64
|
||||
a1 int64
|
||||
a2 int64
|
||||
a3 int64
|
||||
a4 int64
|
||||
=>
|
||||
a0 int64
|
|
@ -0,0 +1,7 @@
|
|||
rdi int64
|
||||
rsi int64
|
||||
rdx int64
|
||||
rcx int64
|
||||
r8 int64
|
||||
=>
|
||||
rax int64
|
|
@ -0,0 +1,7 @@
|
|||
rdi int64
|
||||
rsi int64
|
||||
rdx int64
|
||||
rcx int64
|
||||
r8 int64
|
||||
=>
|
||||
rax int64
|
|
@ -0,0 +1,7 @@
|
|||
rdi int64
|
||||
rsi int64
|
||||
rdx int64
|
||||
rcx int64
|
||||
r8 int64
|
||||
=>
|
||||
rax int64
|
|
@ -0,0 +1,7 @@
|
|||
rdi int64
|
||||
rsi int64
|
||||
rdx int64
|
||||
rcx int64
|
||||
r8 int64
|
||||
=>
|
||||
rax int64
|
|
@ -0,0 +1,7 @@
|
|||
rcx int64
|
||||
rdx int64
|
||||
r8 int64
|
||||
r9 int64
|
||||
S+0 int64
|
||||
=>
|
||||
rax int64
|
|
@ -0,0 +1,6 @@
|
|||
v0 double
|
||||
v1 double
|
||||
M(v2 double, v3 double, v4 double, v5 double) Struct(size: 32)
|
||||
v6 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,6 @@
|
|||
v0 double
|
||||
v1 double
|
||||
M(v2 double, v3 double, v4 double, v5 double) Struct(size: 32)
|
||||
v6 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,6 @@
|
|||
v0 double
|
||||
S+0 double
|
||||
S+8 Struct(size: 32)
|
||||
S+40 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,6 @@
|
|||
v0 double
|
||||
v1 double
|
||||
M(v2 double, v3 double, v4 double, v5 double) Struct(size: 32)
|
||||
v6 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,6 @@
|
|||
v0 double
|
||||
S+0 double
|
||||
S+8 Struct(size: 32)
|
||||
S+40 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,6 @@
|
|||
(r0, r1) int64[double]
|
||||
(r2, r3) int64[double]
|
||||
M(S+0 int64, S+8 int64, S+16 int64, S+24 int64) Struct(size: 32)
|
||||
S+32 double
|
||||
=>
|
||||
(r0, r1) int64[double]
|
|
@ -0,0 +1,6 @@
|
|||
(r0, r1) int64[double]
|
||||
(r2, r3) int64[double]
|
||||
M(S+0 int64, S+8 int64, S+16 int64, S+24 int64) Struct(size: 32)
|
||||
S+32 double
|
||||
=>
|
||||
(r0, r1) int64[double]
|
|
@ -0,0 +1,6 @@
|
|||
(r0, r1) int64[double]
|
||||
(r2, r3) int64[double]
|
||||
M(S+0 int64, S+8 int64, S+16 int64, S+24 int64) Struct(size: 32)
|
||||
S+32 double
|
||||
=>
|
||||
(r0, r1) int64[double]
|
|
@ -0,0 +1,6 @@
|
|||
S+0 double
|
||||
S+8 double
|
||||
S+16 Struct(size: 32)
|
||||
S+48 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,6 @@
|
|||
S+0 double
|
||||
S+8 double
|
||||
S+16 Struct(size: 32)
|
||||
S+48 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,6 @@
|
|||
S+0 double
|
||||
S+8 double
|
||||
S+16 Struct(size: 32)
|
||||
S+48 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,6 @@
|
|||
fa0 double
|
||||
(a0, a1) int64[double]
|
||||
P(a2 uint32) Struct(size: 32)
|
||||
(a4, a5) int64[double]
|
||||
=>
|
||||
fa0 double
|
|
@ -0,0 +1,6 @@
|
|||
fa0 double
|
||||
a0 int64[double]
|
||||
P(a1 int64) Struct(size: 32)
|
||||
a2 int64[double]
|
||||
=>
|
||||
fa0 double
|
|
@ -0,0 +1,6 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
S+0 Struct(size: 32)
|
||||
xmm2 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,6 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
S+0 Struct(size: 32)
|
||||
xmm2 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,6 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
S+0 Struct(size: 32)
|
||||
xmm2 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,6 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
S+0 Struct(size: 32)
|
||||
xmm2 double
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,6 @@
|
|||
B(xmm0 double, rcx int64[double])
|
||||
B(xmm1 double, rdx int64[double])
|
||||
P(r8 int64) Struct(size: 32)
|
||||
B(xmm3 double, r9 int64[double])
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,15 @@
|
|||
v0 double
|
||||
v1 double
|
||||
v2 double
|
||||
v3 double
|
||||
v4 double
|
||||
v5 double
|
||||
v6 double
|
||||
v7 double
|
||||
S+0 float
|
||||
S+8 Struct(size: 12)
|
||||
r0 int64
|
||||
r1 int32
|
||||
S+24 Struct(size: 12)
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,15 @@
|
|||
v0 double
|
||||
v1 double
|
||||
v2 double
|
||||
v3 double
|
||||
v4 double
|
||||
v5 double
|
||||
v6 double
|
||||
v7 double
|
||||
S+0 float
|
||||
S+8 Struct(size: 12)
|
||||
r0 int64
|
||||
r1 int32
|
||||
S+24 Struct(size: 12)
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,15 @@
|
|||
v0 double
|
||||
v1 double
|
||||
v2 double
|
||||
v3 double
|
||||
v4 double
|
||||
v5 double
|
||||
v6 double
|
||||
v7 double
|
||||
S+0 float
|
||||
S+4 Struct(size: 12)
|
||||
r0 int64
|
||||
S+16 int32
|
||||
S+24 Struct(size: 12)
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,15 @@
|
|||
v0 double
|
||||
v1 double
|
||||
v2 double
|
||||
v3 double
|
||||
v4 double
|
||||
v5 double
|
||||
v6 double
|
||||
v7 double
|
||||
S+0 float
|
||||
S+8 Struct(size: 12)
|
||||
r0 int64
|
||||
r1 int32
|
||||
S+24 Struct(size: 12)
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,15 @@
|
|||
v0 double
|
||||
v1 double
|
||||
v2 double
|
||||
v3 double
|
||||
v4 double
|
||||
v5 double
|
||||
v6 double
|
||||
v7 double
|
||||
S+0 float
|
||||
S+4 Struct(size: 12)
|
||||
r0 int64
|
||||
S+16 int32
|
||||
S+24 Struct(size: 12)
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,15 @@
|
|||
(r0, r1) int64[double]
|
||||
(r2, r3) int64[double]
|
||||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
S+32 double
|
||||
S+40 double
|
||||
S+48 float
|
||||
M(S+52 int32, S+56 int32, S+60 int32) Struct(size: 12)
|
||||
S+64 int64
|
||||
S+72 int32
|
||||
M(S+76 int32, S+80 int32, S+84 int32) Struct(size: 12)
|
||||
=>
|
||||
(r0, r1) int64[double]
|
|
@ -0,0 +1,15 @@
|
|||
(r0, r1) int64[double]
|
||||
(r2, r3) int64[double]
|
||||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
S+32 double
|
||||
S+40 double
|
||||
S+48 float
|
||||
M(S+52 int32, S+56 int32, S+60 int32) Struct(size: 12)
|
||||
S+64 int64
|
||||
S+72 int32
|
||||
M(S+76 int32, S+80 int32, S+84 int32) Struct(size: 12)
|
||||
=>
|
||||
(r0, r1) int64[double]
|
|
@ -0,0 +1,15 @@
|
|||
(r0, r1) int64[double]
|
||||
(r2, r3) int64[double]
|
||||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
S+32 double
|
||||
S+40 double
|
||||
S+48 float
|
||||
M(S+52 int32, S+56 int32, S+60 int32) Struct(size: 12)
|
||||
S+64 int64
|
||||
S+72 int32
|
||||
M(S+76 int32, S+80 int32, S+84 int32) Struct(size: 12)
|
||||
=>
|
||||
(r0, r1) int64[double]
|
|
@ -0,0 +1,15 @@
|
|||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
S+32 double
|
||||
S+40 double
|
||||
S+48 double
|
||||
S+56 double
|
||||
S+64 float
|
||||
S+68 Struct(size: 12)
|
||||
S+80 int64
|
||||
S+88 int32
|
||||
S+92 Struct(size: 12)
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,15 @@
|
|||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
S+32 double
|
||||
S+40 double
|
||||
S+48 double
|
||||
S+56 double
|
||||
S+64 float
|
||||
S+68 Struct(size: 12)
|
||||
S+80 int64
|
||||
S+88 int32
|
||||
S+92 Struct(size: 12)
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,15 @@
|
|||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
S+32 double
|
||||
S+40 double
|
||||
S+48 double
|
||||
S+56 double
|
||||
S+64 float
|
||||
S+68 Struct(size: 12)
|
||||
S+80 int64
|
||||
S+88 int32
|
||||
S+92 Struct(size: 12)
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,15 @@
|
|||
fa0 double
|
||||
fa1 double
|
||||
fa2 double
|
||||
fa3 double
|
||||
fa4 double
|
||||
fa5 double
|
||||
fa6 double
|
||||
fa7 double
|
||||
a0 int32[float]
|
||||
P(a1 uint32) Struct(size: 12)
|
||||
(a2, a3) int64
|
||||
a4 int32
|
||||
P(a5 uint32) Struct(size: 12)
|
||||
=>
|
||||
fa0 double
|
|
@ -0,0 +1,15 @@
|
|||
fa0 double
|
||||
fa1 double
|
||||
fa2 double
|
||||
fa3 double
|
||||
fa4 double
|
||||
fa5 double
|
||||
fa6 double
|
||||
fa7 double
|
||||
a0 int32[float]
|
||||
M(a1 int64, a2 int64) Struct(size: 12)
|
||||
a3 int64
|
||||
a4 int32
|
||||
M(a5 int64, a6 int64) Struct(size: 12)
|
||||
=>
|
||||
fa0 double
|
|
@ -0,0 +1,15 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
xmm2 double
|
||||
xmm3 double
|
||||
xmm4 double
|
||||
xmm5 double
|
||||
xmm6 double
|
||||
xmm7 double
|
||||
S+0 float
|
||||
S+8 Struct(size: 12)
|
||||
rdi int64
|
||||
rsi int32
|
||||
S+24 Struct(size: 12)
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,15 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
xmm2 double
|
||||
xmm3 double
|
||||
xmm4 double
|
||||
xmm5 double
|
||||
xmm6 double
|
||||
xmm7 double
|
||||
S+0 float
|
||||
S+8 Struct(size: 12)
|
||||
rdi int64
|
||||
rsi int32
|
||||
S+24 Struct(size: 12)
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,15 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
xmm2 double
|
||||
xmm3 double
|
||||
xmm4 double
|
||||
xmm5 double
|
||||
xmm6 double
|
||||
xmm7 double
|
||||
S+0 float
|
||||
S+8 Struct(size: 12)
|
||||
rdi int64
|
||||
rsi int32
|
||||
S+24 Struct(size: 12)
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,15 @@
|
|||
xmm0 double
|
||||
xmm1 double
|
||||
xmm2 double
|
||||
xmm3 double
|
||||
xmm4 double
|
||||
xmm5 double
|
||||
xmm6 double
|
||||
xmm7 double
|
||||
S+0 float
|
||||
S+8 Struct(size: 12)
|
||||
rdi int64
|
||||
rsi int32
|
||||
S+24 Struct(size: 12)
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,15 @@
|
|||
B(xmm0 double, rcx int64[double])
|
||||
B(xmm1 double, rdx int64[double])
|
||||
B(xmm2 double, r8 int64[double])
|
||||
B(xmm3 double, r9 int64[double])
|
||||
S+0 double
|
||||
S+8 double
|
||||
S+16 double
|
||||
S+24 double
|
||||
S+32 float
|
||||
P(S+40 int64) Struct(size: 12)
|
||||
S+48 int64
|
||||
S+56 int32
|
||||
P(S+64 int64) Struct(size: 12)
|
||||
=>
|
||||
xmm0 double
|
|
@ -0,0 +1,5 @@
|
|||
v0 double
|
||||
P(r0 int64) Struct(size: 20)
|
||||
v1 double
|
||||
=>
|
||||
v0 double
|
|
@ -0,0 +1,5 @@
|
|||
v0 double
|
||||
P(r0 int64) Struct(size: 20)
|
||||
v1 double
|
||||
=>
|
||||
v0 double
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue