LibJIT: Make X86_64Assembler::native_call take u64 instead of void*

Now that x86-specific Assembler will be compiled on every architecture
we can't rely on void* being the right width.
It also fixes compilation on targets which have void*
be different length from u64 (WASM in particular).
This commit is contained in:
Nikodem Rabuliński 2023-11-05 21:16:08 +01:00 committed by Andreas Kling
parent 8aa35f4fab
commit bacbd830fe
2 changed files with 4 additions and 4 deletions

View file

@ -731,7 +731,7 @@ struct X86_64Assembler {
}
void native_call(
void* callee,
u64 callee,
Vector<Operand> const& preserved_registers = {},
Vector<Operand> const& stack_arguments = {})
{
@ -746,14 +746,14 @@ struct X86_64Assembler {
push(stack_argument);
// load callee into RAX
mov(Operand::Register(Reg::RAX), Operand::Imm(bit_cast<u64>(callee)));
mov(Operand::Register(Reg::RAX), Operand::Imm(callee));
// call RAX
emit8(0xff);
emit_modrm_slash(2, Operand::Register(Reg::RAX));
if (!stack_arguments.is_empty() || needs_aligning)
add(Operand::Register(Reg::RSP), Operand::Imm((stack_arguments.size() + (needs_aligning ? 1 : 0)) * sizeof(void*)));
add(Operand::Register(Reg::RSP), Operand::Imm((stack_arguments.size() + (needs_aligning ? 1 : 0)) * sizeof(u64)));
for (auto const& reg : preserved_registers)
pop(reg);

View file

@ -1919,7 +1919,7 @@ void Compiler::native_call(void* function_address, Vector<Assembler::Operand> co
{
// NOTE: We don't preserve caller-saved registers when making a native call.
// This means that they may have changed after we return from the call.
m_assembler.native_call(function_address, { Assembler::Operand::Register(ARG0) }, stack_arguments);
m_assembler.native_call(bit_cast<u64>(function_address), { Assembler::Operand::Register(ARG0) }, stack_arguments);
}
OwnPtr<NativeExecutable> Compiler::compile(Bytecode::Executable& bytecode_executable)