2008-05-19 23:59:38 +00:00
|
|
|
/*
|
|
|
|
* Tiny Code Generator for QEMU
|
|
|
|
*
|
|
|
|
* Copyright (c) 2008 Andrzej Zaborowski
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
2008-10-05 09:59:14 +00:00
|
|
|
|
2013-10-03 19:51:24 +00:00
|
|
|
#include "tcg-be-ldst.h"
|
|
|
|
|
2013-06-06 17:21:37 +00:00
|
|
|
/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */
|
|
|
|
#ifndef __ARM_ARCH
|
|
|
|
# if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \
|
|
|
|
|| defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \
|
|
|
|
|| defined(__ARM_ARCH_7EM__)
|
|
|
|
# define __ARM_ARCH 7
|
|
|
|
# elif defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \
|
|
|
|
|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \
|
|
|
|
|| defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__)
|
|
|
|
# define __ARM_ARCH 6
|
|
|
|
# elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5E__) \
|
|
|
|
|| defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \
|
|
|
|
|| defined(__ARM_ARCH_5TEJ__)
|
|
|
|
# define __ARM_ARCH 5
|
|
|
|
# else
|
|
|
|
# define __ARM_ARCH 4
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
|
2013-06-06 17:46:35 +00:00
|
|
|
static int arm_arch = __ARM_ARCH;
|
|
|
|
|
2013-06-06 17:21:37 +00:00
|
|
|
#if defined(__ARM_ARCH_5T__) \
|
|
|
|
|| defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
|
|
|
|
# define use_armv5t_instructions 1
|
2010-04-09 18:52:48 +00:00
|
|
|
#else
|
2013-06-06 17:21:37 +00:00
|
|
|
# define use_armv5t_instructions use_armv6_instructions
|
2010-04-09 18:52:48 +00:00
|
|
|
#endif
|
|
|
|
|
2013-06-06 17:46:35 +00:00
|
|
|
#define use_armv6_instructions (__ARM_ARCH >= 6 || arm_arch >= 6)
|
|
|
|
#define use_armv7_instructions (__ARM_ARCH >= 7 || arm_arch >= 7)
|
2010-04-09 18:52:48 +00:00
|
|
|
|
2013-05-02 11:18:38 +00:00
|
|
|
#ifndef use_idiv_instructions
|
|
|
|
bool use_idiv_instructions;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_GETAUXVAL
|
|
|
|
# include <sys/auxv.h>
|
|
|
|
#endif
|
|
|
|
|
2008-10-05 09:59:14 +00:00
|
|
|
#ifndef NDEBUG
|
|
|
|
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
2008-05-19 23:59:38 +00:00
|
|
|
"%r0",
|
|
|
|
"%r1",
|
|
|
|
"%r2",
|
|
|
|
"%r3",
|
|
|
|
"%r4",
|
|
|
|
"%r5",
|
|
|
|
"%r6",
|
|
|
|
"%r7",
|
|
|
|
"%r8",
|
|
|
|
"%r9",
|
|
|
|
"%r10",
|
|
|
|
"%r11",
|
|
|
|
"%r12",
|
|
|
|
"%r13",
|
|
|
|
"%r14",
|
2010-04-09 18:52:48 +00:00
|
|
|
"%pc",
|
2008-05-19 23:59:38 +00:00
|
|
|
};
|
2008-10-05 09:59:14 +00:00
|
|
|
#endif
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2008-10-05 09:59:14 +00:00
|
|
|
static const int tcg_target_reg_alloc_order[] = {
|
2008-05-19 23:59:38 +00:00
|
|
|
TCG_REG_R4,
|
|
|
|
TCG_REG_R5,
|
|
|
|
TCG_REG_R6,
|
|
|
|
TCG_REG_R7,
|
|
|
|
TCG_REG_R8,
|
|
|
|
TCG_REG_R9,
|
|
|
|
TCG_REG_R10,
|
|
|
|
TCG_REG_R11,
|
|
|
|
TCG_REG_R13,
|
2010-04-09 18:52:48 +00:00
|
|
|
TCG_REG_R0,
|
|
|
|
TCG_REG_R1,
|
|
|
|
TCG_REG_R2,
|
|
|
|
TCG_REG_R3,
|
|
|
|
TCG_REG_R12,
|
2008-05-19 23:59:38 +00:00
|
|
|
TCG_REG_R14,
|
|
|
|
};
|
|
|
|
|
2008-10-05 09:59:14 +00:00
|
|
|
static const int tcg_target_call_iarg_regs[4] = {
|
2008-05-19 23:59:38 +00:00
|
|
|
TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3
|
|
|
|
};
|
2008-10-05 09:59:14 +00:00
|
|
|
static const int tcg_target_call_oarg_regs[2] = {
|
2008-05-19 23:59:38 +00:00
|
|
|
TCG_REG_R0, TCG_REG_R1
|
|
|
|
};
|
|
|
|
|
2013-03-12 16:50:25 +00:00
|
|
|
#define TCG_REG_TMP TCG_REG_R12
|
2013-03-12 16:49:04 +00:00
|
|
|
|
2013-08-20 22:30:10 +00:00
|
|
|
static inline void reloc_abs32(void *code_ptr, intptr_t target)
|
tcg/arm: fix branch target change during code retranslation
QEMU uses code retranslation to restore the CPU state when an exception
happens. For it to work the retranslation must not modify the generated
code. This is what is currently implemented in ARM TCG.
However on CPU that don't have icache/dcache/memory synchronised like
ARM, this requirement is stronger and code retranslation must not modify
the generated code "atomically", as the cache line might be flushed
at any moment (interrupt, exception, task switching), even if not
triggered by QEMU. The probability for this to happen is very low, and
depends on cache size and associativiy, machine load, interrupts, so the
symptoms are might happen randomly.
This requirement is currently not followed in tcg/arm, for the
load/store code, which basically has the following structure:
1) tlb access code is written
2) conditional fast path code is written
3) branch is written with a temporary target
4) slow path code is written
5) branch target is updated
The cache lines corresponding to the retranslated code is not flushed
after code retranslation as the generated code is supposed to be the
same. However if the cache line corresponding to the branch instruction
is flushed between step 3 and 5, and is not flushed again before the
code is executed again, the branch target is wrong. In the guest, the
symptoms are MMU page fault at a random addresses, which leads to
kernel page fault or segmentation faults.
The patch fixes this issue by avoiding writing the branch target until
it is known, that is by writing only the branch instruction first, and
later only the offset.
This fixes booting linux guests on ARM hosts (tested: arm, i386, mips,
mipsel, sh4, sparc).
Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-01-06 21:43:13 +00:00
|
|
|
{
|
|
|
|
*(uint32_t *) code_ptr = target;
|
|
|
|
}
|
|
|
|
|
2013-08-20 22:30:10 +00:00
|
|
|
static inline void reloc_pc24(void *code_ptr, intptr_t target)
|
tcg/arm: fix branch target change during code retranslation
QEMU uses code retranslation to restore the CPU state when an exception
happens. For it to work the retranslation must not modify the generated
code. This is what is currently implemented in ARM TCG.
However on CPU that don't have icache/dcache/memory synchronised like
ARM, this requirement is stronger and code retranslation must not modify
the generated code "atomically", as the cache line might be flushed
at any moment (interrupt, exception, task switching), even if not
triggered by QEMU. The probability for this to happen is very low, and
depends on cache size and associativiy, machine load, interrupts, so the
symptoms are might happen randomly.
This requirement is currently not followed in tcg/arm, for the
load/store code, which basically has the following structure:
1) tlb access code is written
2) conditional fast path code is written
3) branch is written with a temporary target
4) slow path code is written
5) branch target is updated
The cache lines corresponding to the retranslated code is not flushed
after code retranslation as the generated code is supposed to be the
same. However if the cache line corresponding to the branch instruction
is flushed between step 3 and 5, and is not flushed again before the
code is executed again, the branch target is wrong. In the guest, the
symptoms are MMU page fault at a random addresses, which leads to
kernel page fault or segmentation faults.
The patch fixes this issue by avoiding writing the branch target until
it is known, that is by writing only the branch instruction first, and
later only the offset.
This fixes booting linux guests on ARM hosts (tested: arm, i386, mips,
mipsel, sh4, sparc).
Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-01-06 21:43:13 +00:00
|
|
|
{
|
2013-08-20 22:30:10 +00:00
|
|
|
uint32_t offset = ((target - ((intptr_t)code_ptr + 8)) >> 2);
|
tcg/arm: fix branch target change during code retranslation
QEMU uses code retranslation to restore the CPU state when an exception
happens. For it to work the retranslation must not modify the generated
code. This is what is currently implemented in ARM TCG.
However on CPU that don't have icache/dcache/memory synchronised like
ARM, this requirement is stronger and code retranslation must not modify
the generated code "atomically", as the cache line might be flushed
at any moment (interrupt, exception, task switching), even if not
triggered by QEMU. The probability for this to happen is very low, and
depends on cache size and associativiy, machine load, interrupts, so the
symptoms are might happen randomly.
This requirement is currently not followed in tcg/arm, for the
load/store code, which basically has the following structure:
1) tlb access code is written
2) conditional fast path code is written
3) branch is written with a temporary target
4) slow path code is written
5) branch target is updated
The cache lines corresponding to the retranslated code is not flushed
after code retranslation as the generated code is supposed to be the
same. However if the cache line corresponding to the branch instruction
is flushed between step 3 and 5, and is not flushed again before the
code is executed again, the branch target is wrong. In the guest, the
symptoms are MMU page fault at a random addresses, which leads to
kernel page fault or segmentation faults.
The patch fixes this issue by avoiding writing the branch target until
it is known, that is by writing only the branch instruction first, and
later only the offset.
This fixes booting linux guests on ARM hosts (tested: arm, i386, mips,
mipsel, sh4, sparc).
Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-01-06 21:43:13 +00:00
|
|
|
|
|
|
|
*(uint32_t *) code_ptr = ((*(uint32_t *) code_ptr) & ~0xffffff)
|
|
|
|
| (offset & 0xffffff);
|
|
|
|
}
|
|
|
|
|
2008-05-20 11:26:40 +00:00
|
|
|
static void patch_reloc(uint8_t *code_ptr, int type,
|
2013-08-20 22:30:10 +00:00
|
|
|
intptr_t value, intptr_t addend)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case R_ARM_ABS32:
|
tcg/arm: fix branch target change during code retranslation
QEMU uses code retranslation to restore the CPU state when an exception
happens. For it to work the retranslation must not modify the generated
code. This is what is currently implemented in ARM TCG.
However on CPU that don't have icache/dcache/memory synchronised like
ARM, this requirement is stronger and code retranslation must not modify
the generated code "atomically", as the cache line might be flushed
at any moment (interrupt, exception, task switching), even if not
triggered by QEMU. The probability for this to happen is very low, and
depends on cache size and associativiy, machine load, interrupts, so the
symptoms are might happen randomly.
This requirement is currently not followed in tcg/arm, for the
load/store code, which basically has the following structure:
1) tlb access code is written
2) conditional fast path code is written
3) branch is written with a temporary target
4) slow path code is written
5) branch target is updated
The cache lines corresponding to the retranslated code is not flushed
after code retranslation as the generated code is supposed to be the
same. However if the cache line corresponding to the branch instruction
is flushed between step 3 and 5, and is not flushed again before the
code is executed again, the branch target is wrong. In the guest, the
symptoms are MMU page fault at a random addresses, which leads to
kernel page fault or segmentation faults.
The patch fixes this issue by avoiding writing the branch target until
it is known, that is by writing only the branch instruction first, and
later only the offset.
This fixes booting linux guests on ARM hosts (tested: arm, i386, mips,
mipsel, sh4, sparc).
Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-01-06 21:43:13 +00:00
|
|
|
reloc_abs32(code_ptr, value);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case R_ARM_CALL:
|
|
|
|
case R_ARM_JUMP24:
|
|
|
|
default:
|
|
|
|
tcg_abort();
|
|
|
|
|
|
|
|
case R_ARM_PC24:
|
tcg/arm: fix branch target change during code retranslation
QEMU uses code retranslation to restore the CPU state when an exception
happens. For it to work the retranslation must not modify the generated
code. This is what is currently implemented in ARM TCG.
However on CPU that don't have icache/dcache/memory synchronised like
ARM, this requirement is stronger and code retranslation must not modify
the generated code "atomically", as the cache line might be flushed
at any moment (interrupt, exception, task switching), even if not
triggered by QEMU. The probability for this to happen is very low, and
depends on cache size and associativiy, machine load, interrupts, so the
symptoms are might happen randomly.
This requirement is currently not followed in tcg/arm, for the
load/store code, which basically has the following structure:
1) tlb access code is written
2) conditional fast path code is written
3) branch is written with a temporary target
4) slow path code is written
5) branch target is updated
The cache lines corresponding to the retranslated code is not flushed
after code retranslation as the generated code is supposed to be the
same. However if the cache line corresponding to the branch instruction
is flushed between step 3 and 5, and is not flushed again before the
code is executed again, the branch target is wrong. In the guest, the
symptoms are MMU page fault at a random addresses, which leads to
kernel page fault or segmentation faults.
The patch fixes this issue by avoiding writing the branch target until
it is known, that is by writing only the branch instruction first, and
later only the offset.
This fixes booting linux guests on ARM hosts (tested: arm, i386, mips,
mipsel, sh4, sparc).
Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-01-06 21:43:13 +00:00
|
|
|
reloc_pc24(code_ptr, value);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-05 05:12:30 +00:00
|
|
|
#define TCG_CT_CONST_ARM 0x100
|
|
|
|
#define TCG_CT_CONST_INV 0x200
|
|
|
|
#define TCG_CT_CONST_NEG 0x400
|
|
|
|
#define TCG_CT_CONST_ZERO 0x800
|
2013-03-05 05:36:45 +00:00
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
/* parse target specific constraints */
|
2008-10-05 09:59:14 +00:00
|
|
|
static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
|
|
|
const char *ct_str;
|
|
|
|
|
|
|
|
ct_str = *pct_str;
|
|
|
|
switch (ct_str[0]) {
|
2009-07-18 12:20:30 +00:00
|
|
|
case 'I':
|
2013-03-05 05:36:45 +00:00
|
|
|
ct->ct |= TCG_CT_CONST_ARM;
|
|
|
|
break;
|
|
|
|
case 'K':
|
|
|
|
ct->ct |= TCG_CT_CONST_INV;
|
|
|
|
break;
|
2013-03-05 06:06:21 +00:00
|
|
|
case 'N': /* The gcc constraint letter is L, already used here. */
|
|
|
|
ct->ct |= TCG_CT_CONST_NEG;
|
|
|
|
break;
|
2013-03-05 05:12:30 +00:00
|
|
|
case 'Z':
|
|
|
|
ct->ct |= TCG_CT_CONST_ZERO;
|
|
|
|
break;
|
2009-07-18 12:20:30 +00:00
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
case 'r':
|
|
|
|
ct->ct |= TCG_CT_REG;
|
|
|
|
tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
|
|
|
|
break;
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
/* qemu_ld address */
|
|
|
|
case 'l':
|
2008-05-19 23:59:38 +00:00
|
|
|
ct->ct |= TCG_CT_REG;
|
|
|
|
tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
|
2010-04-09 18:52:48 +00:00
|
|
|
#ifdef CONFIG_SOFTMMU
|
2013-07-28 00:09:47 +00:00
|
|
|
/* r0-r2,lr will be overwritten when reading the tlb entry,
|
2010-04-09 18:52:48 +00:00
|
|
|
so don't use these. */
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
|
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
|
2012-08-26 13:40:02 +00:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
|
2013-07-28 00:09:47 +00:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
|
2010-04-09 18:52:48 +00:00
|
|
|
#endif
|
2008-05-24 23:12:19 +00:00
|
|
|
break;
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
/* qemu_st address & data_reg */
|
|
|
|
case 's':
|
2008-05-19 23:59:38 +00:00
|
|
|
ct->ct |= TCG_CT_REG;
|
|
|
|
tcg_regset_set32(ct->u.regs, 0, (1 << TCG_TARGET_NB_REGS) - 1);
|
2013-03-13 06:18:30 +00:00
|
|
|
/* r0-r2 will be overwritten when reading the tlb entry (softmmu only)
|
|
|
|
and r0-r1 doing the byte swapping, so don't use these. */
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R0);
|
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R1);
|
2013-03-13 06:18:30 +00:00
|
|
|
#if defined(CONFIG_SOFTMMU)
|
|
|
|
/* Avoid clashes with registers being used for helper args */
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R2);
|
2012-09-02 15:28:56 +00:00
|
|
|
#if TARGET_LONG_BITS == 64
|
2012-08-26 13:40:02 +00:00
|
|
|
/* Avoid clashes with registers being used for helper args */
|
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
|
|
|
|
#endif
|
2013-07-28 00:09:47 +00:00
|
|
|
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R14);
|
2008-05-19 23:59:38 +00:00
|
|
|
#endif
|
2010-04-09 18:52:48 +00:00
|
|
|
break;
|
2008-05-19 23:59:38 +00:00
|
|
|
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
ct_str++;
|
|
|
|
*pct_str = ct_str;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-08-22 12:29:09 +00:00
|
|
|
static inline uint32_t rotl(uint32_t val, int n)
|
|
|
|
{
|
|
|
|
return (val << n) | (val >> (32 - n));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARM immediates for ALU instructions are made of an unsigned 8-bit
|
|
|
|
right-rotated by an even amount between 0 and 30. */
|
|
|
|
static inline int encode_imm(uint32_t imm)
|
|
|
|
{
|
2009-08-24 23:12:25 +00:00
|
|
|
int shift;
|
|
|
|
|
2009-08-22 12:29:09 +00:00
|
|
|
/* simple case, only lower bits */
|
|
|
|
if ((imm & ~0xff) == 0)
|
|
|
|
return 0;
|
|
|
|
/* then try a simple even shift */
|
|
|
|
shift = ctz32(imm) & ~1;
|
|
|
|
if (((imm >> shift) & ~0xff) == 0)
|
|
|
|
return 32 - shift;
|
|
|
|
/* now try harder with rotations */
|
|
|
|
if ((rotl(imm, 2) & ~0xff) == 0)
|
|
|
|
return 2;
|
|
|
|
if ((rotl(imm, 4) & ~0xff) == 0)
|
|
|
|
return 4;
|
|
|
|
if ((rotl(imm, 6) & ~0xff) == 0)
|
|
|
|
return 6;
|
|
|
|
/* imm can't be encoded */
|
|
|
|
return -1;
|
|
|
|
}
|
2009-07-18 12:20:30 +00:00
|
|
|
|
|
|
|
static inline int check_fit_imm(uint32_t imm)
|
|
|
|
{
|
2009-08-22 12:29:09 +00:00
|
|
|
return encode_imm(imm) >= 0;
|
2009-07-18 12:20:30 +00:00
|
|
|
}
|
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
/* Test if a constant matches the constraint.
|
|
|
|
* TODO: define constraints for:
|
|
|
|
*
|
|
|
|
* ldr/str offset: between -0xfff and 0xfff
|
|
|
|
* ldrh/strh offset: between -0xff and 0xff
|
|
|
|
* mov operand2: values represented with x << (2 * y), x < 0x100
|
|
|
|
* add, sub, eor...: ditto
|
|
|
|
*/
|
|
|
|
static inline int tcg_target_const_match(tcg_target_long val,
|
2013-03-05 05:36:45 +00:00
|
|
|
const TCGArgConstraint *arg_ct)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
|
|
|
int ct;
|
|
|
|
ct = arg_ct->ct;
|
2013-03-05 05:36:45 +00:00
|
|
|
if (ct & TCG_CT_CONST) {
|
2008-05-19 23:59:38 +00:00
|
|
|
return 1;
|
2013-03-05 05:36:45 +00:00
|
|
|
} else if ((ct & TCG_CT_CONST_ARM) && check_fit_imm(val)) {
|
2009-07-18 12:20:30 +00:00
|
|
|
return 1;
|
2013-03-05 05:36:45 +00:00
|
|
|
} else if ((ct & TCG_CT_CONST_INV) && check_fit_imm(~val)) {
|
|
|
|
return 1;
|
2013-03-05 06:06:21 +00:00
|
|
|
} else if ((ct & TCG_CT_CONST_NEG) && check_fit_imm(-val)) {
|
|
|
|
return 1;
|
2013-03-05 05:12:30 +00:00
|
|
|
} else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
|
|
|
|
return 1;
|
2013-03-05 05:36:45 +00:00
|
|
|
} else {
|
2008-05-19 23:59:38 +00:00
|
|
|
return 0;
|
2013-03-05 05:36:45 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 02:51:56 +00:00
|
|
|
#define TO_CPSR (1 << 20)
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
typedef enum {
|
2013-03-12 02:51:56 +00:00
|
|
|
ARITH_AND = 0x0 << 21,
|
|
|
|
ARITH_EOR = 0x1 << 21,
|
|
|
|
ARITH_SUB = 0x2 << 21,
|
|
|
|
ARITH_RSB = 0x3 << 21,
|
|
|
|
ARITH_ADD = 0x4 << 21,
|
|
|
|
ARITH_ADC = 0x5 << 21,
|
|
|
|
ARITH_SBC = 0x6 << 21,
|
|
|
|
ARITH_RSC = 0x7 << 21,
|
|
|
|
ARITH_TST = 0x8 << 21 | TO_CPSR,
|
|
|
|
ARITH_CMP = 0xa << 21 | TO_CPSR,
|
|
|
|
ARITH_CMN = 0xb << 21 | TO_CPSR,
|
|
|
|
ARITH_ORR = 0xc << 21,
|
|
|
|
ARITH_MOV = 0xd << 21,
|
|
|
|
ARITH_BIC = 0xe << 21,
|
|
|
|
ARITH_MVN = 0xf << 21,
|
2013-03-12 22:06:53 +00:00
|
|
|
|
|
|
|
INSN_LDR_IMM = 0x04100000,
|
|
|
|
INSN_LDR_REG = 0x06100000,
|
|
|
|
INSN_STR_IMM = 0x04000000,
|
|
|
|
INSN_STR_REG = 0x06000000,
|
|
|
|
|
|
|
|
INSN_LDRH_IMM = 0x005000b0,
|
|
|
|
INSN_LDRH_REG = 0x001000b0,
|
|
|
|
INSN_LDRSH_IMM = 0x005000f0,
|
|
|
|
INSN_LDRSH_REG = 0x001000f0,
|
|
|
|
INSN_STRH_IMM = 0x004000b0,
|
|
|
|
INSN_STRH_REG = 0x000000b0,
|
|
|
|
|
|
|
|
INSN_LDRB_IMM = 0x04500000,
|
|
|
|
INSN_LDRB_REG = 0x06500000,
|
|
|
|
INSN_LDRSB_IMM = 0x005000d0,
|
|
|
|
INSN_LDRSB_REG = 0x001000d0,
|
|
|
|
INSN_STRB_IMM = 0x04400000,
|
|
|
|
INSN_STRB_REG = 0x06400000,
|
2013-03-13 06:18:30 +00:00
|
|
|
|
|
|
|
INSN_LDRD_IMM = 0x004000d0,
|
2013-07-27 18:42:51 +00:00
|
|
|
INSN_LDRD_REG = 0x000000d0,
|
|
|
|
INSN_STRD_IMM = 0x004000f0,
|
|
|
|
INSN_STRD_REG = 0x000000f0,
|
2013-03-12 22:06:53 +00:00
|
|
|
} ARMInsn;
|
2008-05-19 23:59:38 +00:00
|
|
|
|
|
|
|
#define SHIFT_IMM_LSL(im) (((im) << 7) | 0x00)
|
|
|
|
#define SHIFT_IMM_LSR(im) (((im) << 7) | 0x20)
|
|
|
|
#define SHIFT_IMM_ASR(im) (((im) << 7) | 0x40)
|
|
|
|
#define SHIFT_IMM_ROR(im) (((im) << 7) | 0x60)
|
|
|
|
#define SHIFT_REG_LSL(rs) (((rs) << 8) | 0x10)
|
|
|
|
#define SHIFT_REG_LSR(rs) (((rs) << 8) | 0x30)
|
|
|
|
#define SHIFT_REG_ASR(rs) (((rs) << 8) | 0x50)
|
|
|
|
#define SHIFT_REG_ROR(rs) (((rs) << 8) | 0x70)
|
|
|
|
|
|
|
|
enum arm_cond_code_e {
|
|
|
|
COND_EQ = 0x0,
|
|
|
|
COND_NE = 0x1,
|
|
|
|
COND_CS = 0x2, /* Unsigned greater or equal */
|
|
|
|
COND_CC = 0x3, /* Unsigned less than */
|
|
|
|
COND_MI = 0x4, /* Negative */
|
|
|
|
COND_PL = 0x5, /* Zero or greater */
|
|
|
|
COND_VS = 0x6, /* Overflow */
|
|
|
|
COND_VC = 0x7, /* No overflow */
|
|
|
|
COND_HI = 0x8, /* Unsigned greater than */
|
|
|
|
COND_LS = 0x9, /* Unsigned less or equal */
|
|
|
|
COND_GE = 0xa,
|
|
|
|
COND_LT = 0xb,
|
|
|
|
COND_GT = 0xc,
|
|
|
|
COND_LE = 0xd,
|
|
|
|
COND_AL = 0xe,
|
|
|
|
};
|
|
|
|
|
2012-09-24 21:21:40 +00:00
|
|
|
static const uint8_t tcg_cond_to_arm_cond[] = {
|
2008-05-19 23:59:38 +00:00
|
|
|
[TCG_COND_EQ] = COND_EQ,
|
|
|
|
[TCG_COND_NE] = COND_NE,
|
|
|
|
[TCG_COND_LT] = COND_LT,
|
|
|
|
[TCG_COND_GE] = COND_GE,
|
|
|
|
[TCG_COND_LE] = COND_LE,
|
|
|
|
[TCG_COND_GT] = COND_GT,
|
|
|
|
/* unsigned */
|
|
|
|
[TCG_COND_LTU] = COND_CC,
|
|
|
|
[TCG_COND_GEU] = COND_CS,
|
|
|
|
[TCG_COND_LEU] = COND_LS,
|
|
|
|
[TCG_COND_GTU] = COND_HI,
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | 0x0a000000 |
|
|
|
|
(((offset - 8) >> 2) & 0x00ffffff));
|
|
|
|
}
|
|
|
|
|
2008-05-23 18:50:44 +00:00
|
|
|
static inline void tcg_out_b_noaddr(TCGContext *s, int cond)
|
|
|
|
{
|
2011-01-10 17:30:05 +00:00
|
|
|
/* We pay attention here to not modify the branch target by skipping
|
|
|
|
the corresponding bytes. This ensure that caches and memory are
|
|
|
|
kept coherent during retranslation. */
|
2008-05-23 18:50:44 +00:00
|
|
|
s->code_ptr += 3;
|
|
|
|
tcg_out8(s, (cond << 4) | 0x0a);
|
2013-07-28 00:09:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_bl_noaddr(TCGContext *s, int cond)
|
|
|
|
{
|
|
|
|
/* We pay attention here to not modify the branch target by skipping
|
|
|
|
the corresponding bytes. This ensure that caches and memory are
|
|
|
|
kept coherent during retranslation. */
|
|
|
|
s->code_ptr += 3;
|
|
|
|
tcg_out8(s, (cond << 4) | 0x0b);
|
2008-05-23 18:50:44 +00:00
|
|
|
}
|
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
static inline void tcg_out_bl(TCGContext *s, int cond, int32_t offset)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | 0x0b000000 |
|
|
|
|
(((offset - 8) >> 2) & 0x00ffffff));
|
|
|
|
}
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
static inline void tcg_out_blx(TCGContext *s, int cond, int rn)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | 0x012fff30 | rn);
|
|
|
|
}
|
|
|
|
|
2011-03-16 15:21:31 +00:00
|
|
|
static inline void tcg_out_blx_imm(TCGContext *s, int32_t offset)
|
|
|
|
{
|
|
|
|
tcg_out32(s, 0xfa000000 | ((offset & 2) << 23) |
|
|
|
|
(((offset - 8) >> 2) & 0x00ffffff));
|
|
|
|
}
|
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
static inline void tcg_out_dat_reg(TCGContext *s,
|
|
|
|
int cond, int opc, int rd, int rn, int rm, int shift)
|
|
|
|
{
|
2013-03-12 02:51:56 +00:00
|
|
|
tcg_out32(s, (cond << 28) | (0 << 25) | opc |
|
2008-05-19 23:59:38 +00:00
|
|
|
(rn << 16) | (rd << 12) | shift | rm);
|
|
|
|
}
|
|
|
|
|
2013-03-13 22:24:33 +00:00
|
|
|
static inline void tcg_out_nop(TCGContext *s)
|
|
|
|
{
|
|
|
|
if (use_armv7_instructions) {
|
|
|
|
/* Architected nop introduced in v6k. */
|
|
|
|
/* ??? This is an MSR (imm) 0,0,0 insn. Anyone know if this
|
|
|
|
also Just So Happened to do nothing on pre-v6k so that we
|
|
|
|
don't need to conditionalize it? */
|
|
|
|
tcg_out32(s, 0xe320f000);
|
|
|
|
} else {
|
|
|
|
/* Prior to that the assembler uses mov r0, r0. */
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, 0, 0, 0, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-26 13:40:02 +00:00
|
|
|
static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
|
|
|
|
{
|
|
|
|
/* Simple reg-reg move, optimising out the 'do nothing' case */
|
|
|
|
if (rd != rm) {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV, rd, 0, rm, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
static inline void tcg_out_dat_imm(TCGContext *s,
|
|
|
|
int cond, int opc, int rd, int rn, int im)
|
|
|
|
{
|
2013-03-12 02:51:56 +00:00
|
|
|
tcg_out32(s, (cond << 28) | (1 << 25) | opc |
|
2008-05-19 23:59:38 +00:00
|
|
|
(rn << 16) | (rd << 12) | im);
|
|
|
|
}
|
|
|
|
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 07:16:24 +00:00
|
|
|
static void tcg_out_movi32(TCGContext *s, int cond, int rd, uint32_t arg)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 07:16:24 +00:00
|
|
|
int rot, opc, rn;
|
|
|
|
|
|
|
|
/* For armv7, make sure not to use movw+movt when mov/mvn would do.
|
|
|
|
Speed things up by only checking when movt would be required.
|
|
|
|
Prior to armv7, have one go at fully rotated immediates before
|
|
|
|
doing the decomposition thing below. */
|
|
|
|
if (!use_armv7_instructions || (arg & 0xffff0000)) {
|
|
|
|
rot = encode_imm(arg);
|
|
|
|
if (rot >= 0) {
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_MOV, rd, 0,
|
|
|
|
rotl(arg, rot) | (rot << 7));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
rot = encode_imm(~arg);
|
|
|
|
if (rot >= 0) {
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_MVN, rd, 0,
|
|
|
|
rotl(~arg, rot) | (rot << 7));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Use movw + movt. */
|
|
|
|
if (use_armv7_instructions) {
|
2010-04-09 18:52:48 +00:00
|
|
|
/* movw */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x03000000 | (rd << 12)
|
|
|
|
| ((arg << 4) & 0x000f0000) | (arg & 0xfff));
|
2011-01-06 21:43:13 +00:00
|
|
|
if (arg & 0xffff0000) {
|
2010-04-09 18:52:48 +00:00
|
|
|
/* movt */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x03400000 | (rd << 12)
|
|
|
|
| ((arg >> 12) & 0x000f0000) | ((arg >> 16) & 0xfff));
|
|
|
|
}
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 07:16:24 +00:00
|
|
|
return;
|
|
|
|
}
|
2011-01-06 21:43:13 +00:00
|
|
|
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 07:16:24 +00:00
|
|
|
/* TODO: This is very suboptimal, we can easily have a constant
|
|
|
|
pool somewhere after all the instructions. */
|
|
|
|
opc = ARITH_MOV;
|
|
|
|
rn = 0;
|
|
|
|
/* If we have lots of leading 1's, we can shorten the sequence by
|
|
|
|
beginning with mvn and then clearing higher bits with eor. */
|
|
|
|
if (clz32(~arg) > clz32(arg)) {
|
|
|
|
opc = ARITH_MVN, arg = ~arg;
|
2011-01-06 21:43:13 +00:00
|
|
|
}
|
tcg-arm: Improve constant generation
Try fully rotated arguments to mov and mvn before trying movt
or full decomposition. Begin decomposition with mvn when it
looks like it'll help. Examples include
-: mov r9, #0x00000fa0
-: orr r9, r9, #0x000ee000
-: orr r9, r9, #0x0ff00000
-: orr r9, r9, #0xf0000000
+: mvn r9, #0x0000005f
+: eor r9, r9, #0x00011000
Reviewed-by: Aurelien Jarno <aurelien@aurel32.net>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-03-05 07:16:24 +00:00
|
|
|
do {
|
|
|
|
int i = ctz32(arg) & ~1;
|
|
|
|
rot = ((32 - i) << 7) & 0xf00;
|
|
|
|
tcg_out_dat_imm(s, cond, opc, rd, rn, ((arg >> i) & 0xff) | rot);
|
|
|
|
arg &= ~(0xff << i);
|
|
|
|
|
|
|
|
opc = ARITH_EOR;
|
|
|
|
rn = rd;
|
|
|
|
} while (arg);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2012-09-26 18:48:54 +00:00
|
|
|
static inline void tcg_out_dat_rI(TCGContext *s, int cond, int opc, TCGArg dst,
|
|
|
|
TCGArg lhs, TCGArg rhs, int rhs_is_const)
|
|
|
|
{
|
|
|
|
/* Emit either the reg,imm or reg,reg form of a data-processing insn.
|
|
|
|
* rhs must satisfy the "rI" constraint.
|
|
|
|
*/
|
|
|
|
if (rhs_is_const) {
|
|
|
|
int rot = encode_imm(rhs);
|
|
|
|
assert(rot >= 0);
|
|
|
|
tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-05 05:36:45 +00:00
|
|
|
static void tcg_out_dat_rIK(TCGContext *s, int cond, int opc, int opinv,
|
|
|
|
TCGReg dst, TCGReg lhs, TCGArg rhs,
|
|
|
|
bool rhs_is_const)
|
|
|
|
{
|
|
|
|
/* Emit either the reg,imm or reg,reg form of a data-processing insn.
|
|
|
|
* rhs must satisfy the "rIK" constraint.
|
|
|
|
*/
|
|
|
|
if (rhs_is_const) {
|
|
|
|
int rot = encode_imm(rhs);
|
|
|
|
if (rot < 0) {
|
|
|
|
rhs = ~rhs;
|
|
|
|
rot = encode_imm(rhs);
|
|
|
|
assert(rot >= 0);
|
|
|
|
opc = opinv;
|
|
|
|
}
|
|
|
|
tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-05 06:06:21 +00:00
|
|
|
static void tcg_out_dat_rIN(TCGContext *s, int cond, int opc, int opneg,
|
|
|
|
TCGArg dst, TCGArg lhs, TCGArg rhs,
|
|
|
|
bool rhs_is_const)
|
|
|
|
{
|
|
|
|
/* Emit either the reg,imm or reg,reg form of a data-processing insn.
|
|
|
|
* rhs must satisfy the "rIN" constraint.
|
|
|
|
*/
|
|
|
|
if (rhs_is_const) {
|
|
|
|
int rot = encode_imm(rhs);
|
|
|
|
if (rot < 0) {
|
|
|
|
rhs = -rhs;
|
|
|
|
rot = encode_imm(rhs);
|
|
|
|
assert(rot >= 0);
|
|
|
|
opc = opneg;
|
|
|
|
}
|
|
|
|
tcg_out_dat_imm(s, cond, opc, dst, lhs, rotl(rhs, rot) | (rot << 7));
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, opc, dst, lhs, rhs, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-12 17:34:18 +00:00
|
|
|
static inline void tcg_out_mul32(TCGContext *s, int cond, TCGReg rd,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 17:34:18 +00:00
|
|
|
/* if ArchVersion() < 6 && d == n then UNPREDICTABLE; */
|
|
|
|
if (!use_armv6_instructions && rd == rn) {
|
|
|
|
if (rd == rm) {
|
|
|
|
/* rd == rn == rm; copy an input to tmp first. */
|
|
|
|
tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
|
|
|
|
rm = rn = TCG_REG_TMP;
|
|
|
|
} else {
|
|
|
|
rn = rm;
|
|
|
|
rm = rd;
|
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
2013-03-12 17:34:18 +00:00
|
|
|
/* mul */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x90 | (rd << 16) | (rm << 8) | rn);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 17:34:18 +00:00
|
|
|
static inline void tcg_out_umull32(TCGContext *s, int cond, TCGReg rd0,
|
|
|
|
TCGReg rd1, TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 17:34:18 +00:00
|
|
|
/* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
|
|
|
|
if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
|
|
|
|
if (rd0 == rm || rd1 == rm) {
|
|
|
|
tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
|
|
|
|
rn = TCG_REG_TMP;
|
|
|
|
} else {
|
|
|
|
TCGReg t = rn;
|
|
|
|
rn = rm;
|
|
|
|
rm = t;
|
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
2013-03-12 17:34:18 +00:00
|
|
|
/* umull */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x00800090 |
|
|
|
|
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 17:34:18 +00:00
|
|
|
static inline void tcg_out_smull32(TCGContext *s, int cond, TCGReg rd0,
|
|
|
|
TCGReg rd1, TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 17:34:18 +00:00
|
|
|
/* if ArchVersion() < 6 && (dHi == n || dLo == n) then UNPREDICTABLE; */
|
|
|
|
if (!use_armv6_instructions && (rd0 == rn || rd1 == rn)) {
|
|
|
|
if (rd0 == rm || rd1 == rm) {
|
|
|
|
tcg_out_mov_reg(s, cond, TCG_REG_TMP, rn);
|
|
|
|
rn = TCG_REG_TMP;
|
|
|
|
} else {
|
|
|
|
TCGReg t = rn;
|
|
|
|
rn = rm;
|
|
|
|
rm = t;
|
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
2013-03-12 17:34:18 +00:00
|
|
|
/* smull */
|
|
|
|
tcg_out32(s, (cond << 28) | 0x00c00090 |
|
|
|
|
(rd1 << 16) | (rd0 << 12) | (rm << 8) | rn);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 05:11:30 +00:00
|
|
|
static inline void tcg_out_sdiv(TCGContext *s, int cond, int rd, int rn, int rm)
|
|
|
|
{
|
|
|
|
tcg_out32(s, 0x0710f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_udiv(TCGContext *s, int cond, int rd, int rn, int rm)
|
|
|
|
{
|
|
|
|
tcg_out32(s, 0x0730f010 | (cond << 28) | (rd << 16) | rn | (rm << 8));
|
|
|
|
}
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
static inline void tcg_out_ext8s(TCGContext *s, int cond,
|
|
|
|
int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* sxtb */
|
|
|
|
tcg_out32(s, 0x06af0070 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
2010-04-25 03:46:22 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 18:52:48 +00:00
|
|
|
rd, 0, rn, SHIFT_IMM_LSL(24));
|
2010-04-25 03:46:22 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 18:52:48 +00:00
|
|
|
rd, 0, rd, SHIFT_IMM_ASR(24));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
static inline void tcg_out_ext8u(TCGContext *s, int cond,
|
|
|
|
int rd, int rn)
|
|
|
|
{
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_AND, rd, rn, 0xff);
|
|
|
|
}
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
static inline void tcg_out_ext16s(TCGContext *s, int cond,
|
|
|
|
int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* sxth */
|
|
|
|
tcg_out32(s, 0x06bf0070 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
2010-04-25 03:46:22 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 18:52:48 +00:00
|
|
|
rd, 0, rn, SHIFT_IMM_LSL(16));
|
2010-04-25 03:46:22 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 18:52:48 +00:00
|
|
|
rd, 0, rd, SHIFT_IMM_ASR(16));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ext16u(TCGContext *s, int cond,
|
|
|
|
int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* uxth */
|
|
|
|
tcg_out32(s, 0x06ff0070 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
2010-04-25 03:46:22 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 18:52:48 +00:00
|
|
|
rd, 0, rn, SHIFT_IMM_LSL(16));
|
2010-04-25 03:46:22 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2010-04-09 18:52:48 +00:00
|
|
|
rd, 0, rd, SHIFT_IMM_LSR(16));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
static inline void tcg_out_bswap16s(TCGContext *s, int cond, int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* revsh */
|
|
|
|
tcg_out32(s, 0x06ff0fb0 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2013-03-12 16:49:04 +00:00
|
|
|
TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2013-03-12 16:49:04 +00:00
|
|
|
TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_ASR(16));
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_ORR,
|
2013-03-12 16:49:04 +00:00
|
|
|
rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
static inline void tcg_out_bswap16(TCGContext *s, int cond, int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* rev16 */
|
|
|
|
tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2013-03-12 16:49:04 +00:00
|
|
|
TCG_REG_TMP, 0, rn, SHIFT_IMM_LSL(24));
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2013-03-12 16:49:04 +00:00
|
|
|
TCG_REG_TMP, 0, TCG_REG_TMP, SHIFT_IMM_LSR(16));
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_ORR,
|
2013-03-12 16:49:04 +00:00
|
|
|
rd, TCG_REG_TMP, rn, SHIFT_IMM_LSR(8));
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-10-09 19:53:11 +00:00
|
|
|
/* swap the two low bytes assuming that the two high input bytes and the
|
|
|
|
two high output bit can hold any value. */
|
|
|
|
static inline void tcg_out_bswap16st(TCGContext *s, int cond, int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* rev16 */
|
|
|
|
tcg_out32(s, 0x06bf0fb0 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
2013-03-12 16:49:04 +00:00
|
|
|
TCG_REG_TMP, 0, rn, SHIFT_IMM_LSR(8));
|
|
|
|
tcg_out_dat_imm(s, cond, ARITH_AND, TCG_REG_TMP, TCG_REG_TMP, 0xff);
|
2012-10-09 19:53:11 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_ORR,
|
2013-03-12 16:49:04 +00:00
|
|
|
rd, TCG_REG_TMP, rn, SHIFT_IMM_LSL(8));
|
2012-10-09 19:53:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
static inline void tcg_out_bswap32(TCGContext *s, int cond, int rd, int rn)
|
|
|
|
{
|
|
|
|
if (use_armv6_instructions) {
|
|
|
|
/* rev */
|
|
|
|
tcg_out32(s, 0x06bf0f30 | (cond << 28) | (rd << 12) | rn);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_EOR,
|
2013-03-12 16:49:04 +00:00
|
|
|
TCG_REG_TMP, rn, rn, SHIFT_IMM_ROR(16));
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_dat_imm(s, cond, ARITH_BIC,
|
2013-03-12 16:49:04 +00:00
|
|
|
TCG_REG_TMP, TCG_REG_TMP, 0xff | 0x800);
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV,
|
|
|
|
rd, 0, rn, SHIFT_IMM_ROR(8));
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_EOR,
|
2013-03-12 16:49:04 +00:00
|
|
|
rd, rd, TCG_REG_TMP, SHIFT_IMM_LSR(8));
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-05 05:12:30 +00:00
|
|
|
bool tcg_target_deposit_valid(int ofs, int len)
|
|
|
|
{
|
|
|
|
/* ??? Without bfi, we could improve over generic code by combining
|
|
|
|
the right-shift from a non-zero ofs with the orr. We do run into
|
|
|
|
problems when rd == rs, and the mask generated from ofs+len doesn't
|
|
|
|
fit into an immediate. We would have to be careful not to pessimize
|
|
|
|
wrt the optimizations performed on the expanded code. */
|
|
|
|
return use_armv7_instructions;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_deposit(TCGContext *s, int cond, TCGReg rd,
|
|
|
|
TCGArg a1, int ofs, int len, bool const_a1)
|
|
|
|
{
|
|
|
|
if (const_a1) {
|
|
|
|
/* bfi becomes bfc with rn == 15. */
|
|
|
|
a1 = 15;
|
|
|
|
}
|
|
|
|
/* bfi/bfc */
|
|
|
|
tcg_out32(s, 0x07c00010 | (cond << 28) | (rd << 12) | a1
|
|
|
|
| (ofs << 7) | ((ofs + len - 1) << 16));
|
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
/* Note that this routine is used for both LDR and LDRH formats, so we do
|
|
|
|
not wish to include an immediate shift at this point. */
|
|
|
|
static void tcg_out_memop_r(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm, bool u, bool p, bool w)
|
|
|
|
{
|
|
|
|
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24)
|
|
|
|
| (w << 21) | (rn << 16) | (rt << 12) | rm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_memop_8(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8, bool p, bool w)
|
|
|
|
{
|
|
|
|
bool u = 1;
|
|
|
|
if (imm8 < 0) {
|
|
|
|
imm8 = -imm8;
|
|
|
|
u = 0;
|
|
|
|
}
|
|
|
|
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
|
|
|
|
(rn << 16) | (rt << 12) | ((imm8 & 0xf0) << 4) | (imm8 & 0xf));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_memop_12(TCGContext *s, int cond, ARMInsn opc, TCGReg rt,
|
|
|
|
TCGReg rn, int imm12, bool p, bool w)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
bool u = 1;
|
|
|
|
if (imm12 < 0) {
|
|
|
|
imm12 = -imm12;
|
|
|
|
u = 0;
|
|
|
|
}
|
|
|
|
tcg_out32(s, (cond << 28) | opc | (u << 23) | (p << 24) | (w << 21) |
|
|
|
|
(rn << 16) | (rt << 12) | imm12);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld32_12(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm12)
|
|
|
|
{
|
|
|
|
tcg_out_memop_12(s, cond, INSN_LDR_IMM, rt, rn, imm12, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_st32_12(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm12)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_12(s, cond, INSN_STR_IMM, rt, rn, imm12, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_ld32_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_st32_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-07-27 18:42:51 +00:00
|
|
|
static inline void tcg_out_ldrd_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
|
|
|
{
|
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRD_IMM, rt, rn, imm8, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ldrd_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRD_REG, rt, rn, rm, 1, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_strd_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
|
|
|
{
|
|
|
|
tcg_out_memop_8(s, cond, INSN_STRD_IMM, rt, rn, imm8, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_strd_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
|
|
|
{
|
|
|
|
tcg_out_memop_r(s, cond, INSN_STRD_REG, rt, rn, rm, 1, 1, 0);
|
|
|
|
}
|
|
|
|
|
2008-05-24 20:07:07 +00:00
|
|
|
/* Register pre-increment with base writeback. */
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_ld32_rwb(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-24 20:07:07 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDR_REG, rt, rn, rm, 1, 1, 1);
|
2008-05-24 20:07:07 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_st32_rwb(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-24 20:07:07 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_r(s, cond, INSN_STR_REG, rt, rn, rm, 1, 1, 1);
|
2008-05-24 20:07:07 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_ld16u_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRH_IMM, rt, rn, imm8, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_st16_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_8(s, cond, INSN_STRH_IMM, rt, rn, imm8, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_ld16u_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRH_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_st16_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_r(s, cond, INSN_STRH_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_ld16s_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRSH_IMM, rt, rn, imm8, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_ld16s_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRSH_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_ld8_12(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm12)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_12(s, cond, INSN_LDRB_IMM, rt, rn, imm12, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_st8_12(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm12)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_12(s, cond, INSN_STRB_IMM, rt, rn, imm12, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_ld8_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRB_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_st8_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_r(s, cond, INSN_STRB_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_ld8s_8(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, int imm8)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_8(s, cond, INSN_LDRSB_IMM, rt, rn, imm8, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
2013-03-12 22:06:53 +00:00
|
|
|
static inline void tcg_out_ld8s_r(TCGContext *s, int cond, TCGReg rt,
|
|
|
|
TCGReg rn, TCGReg rm)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-12 22:06:53 +00:00
|
|
|
tcg_out_memop_r(s, cond, INSN_LDRSB_REG, rt, rn, rm, 1, 1, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld32u(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld32_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-19 23:59:38 +00:00
|
|
|
} else
|
|
|
|
tcg_out_ld32_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_st32(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_st32_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-19 23:59:38 +00:00
|
|
|
} else
|
|
|
|
tcg_out_st32_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld16u(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld16u_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-19 23:59:38 +00:00
|
|
|
} else
|
|
|
|
tcg_out_ld16u_8(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld16s(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld16s_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-19 23:59:38 +00:00
|
|
|
} else
|
|
|
|
tcg_out_ld16s_8(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
static inline void tcg_out_st16(TCGContext *s, int cond,
|
2008-05-19 23:59:38 +00:00
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_st16_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-19 23:59:38 +00:00
|
|
|
} else
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_st16_8(s, cond, rd, rn, offset);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld8u(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld8_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-19 23:59:38 +00:00
|
|
|
} else
|
|
|
|
tcg_out_ld8_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_ld8s(TCGContext *s, int cond,
|
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xff || offset < -0xff) {
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_ld8s_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-19 23:59:38 +00:00
|
|
|
} else
|
|
|
|
tcg_out_ld8s_8(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
static inline void tcg_out_st8(TCGContext *s, int cond,
|
2008-05-19 23:59:38 +00:00
|
|
|
int rd, int rn, int32_t offset)
|
|
|
|
{
|
|
|
|
if (offset > 0xfff || offset < -0xfff) {
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, offset);
|
|
|
|
tcg_out_st8_r(s, cond, rd, rn, TCG_REG_TMP);
|
2008-05-19 23:59:38 +00:00
|
|
|
} else
|
|
|
|
tcg_out_st8_12(s, cond, rd, rn, offset);
|
|
|
|
}
|
|
|
|
|
2013-07-28 00:09:47 +00:00
|
|
|
/* The _goto case is normally between TBs within the same code buffer, and
|
|
|
|
* with the code buffer limited to 16MB we wouldn't need the long case.
|
|
|
|
* But we also use it for the tail-call to the qemu_ld/st helpers, which does.
|
2011-12-12 15:37:31 +00:00
|
|
|
*/
|
2008-05-19 23:59:38 +00:00
|
|
|
static inline void tcg_out_goto(TCGContext *s, int cond, uint32_t addr)
|
|
|
|
{
|
2013-07-28 00:09:47 +00:00
|
|
|
int32_t disp = addr - (tcg_target_long) s->code_ptr;
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2013-07-28 00:09:47 +00:00
|
|
|
if ((addr & 1) == 0 && disp - 8 < 0x01fffffd && disp - 8 > -0x01fffffd) {
|
|
|
|
tcg_out_b(s, cond, disp);
|
|
|
|
return;
|
2011-03-16 15:21:31 +00:00
|
|
|
}
|
|
|
|
|
2013-07-28 00:09:47 +00:00
|
|
|
tcg_out_movi32(s, cond, TCG_REG_TMP, addr);
|
|
|
|
if (use_armv5t_instructions) {
|
|
|
|
tcg_out_bx(s, cond, TCG_REG_TMP);
|
|
|
|
} else {
|
|
|
|
if (addr & 1) {
|
|
|
|
tcg_abort();
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
2013-07-28 00:09:47 +00:00
|
|
|
tcg_out_mov_reg(s, cond, TCG_REG_PC, TCG_REG_TMP);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-12 15:37:31 +00:00
|
|
|
/* The call case is mostly used for helpers - so it's not unreasonable
|
|
|
|
* for them to be beyond branch range */
|
2011-03-16 15:21:31 +00:00
|
|
|
static inline void tcg_out_call(TCGContext *s, uint32_t addr)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
|
|
|
int32_t val;
|
|
|
|
|
|
|
|
val = addr - (tcg_target_long) s->code_ptr;
|
2011-03-16 15:21:31 +00:00
|
|
|
if (val - 8 < 0x02000000 && val - 8 >= -0x02000000) {
|
|
|
|
if (addr & 1) {
|
|
|
|
/* Use BLX if the target is in Thumb mode */
|
2013-07-04 18:20:26 +00:00
|
|
|
if (!use_armv5t_instructions) {
|
2011-03-16 15:21:31 +00:00
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
tcg_out_blx_imm(s, val);
|
|
|
|
} else {
|
|
|
|
tcg_out_bl(s, COND_AL, val);
|
|
|
|
}
|
2013-03-13 20:40:43 +00:00
|
|
|
} else if (use_armv7_instructions) {
|
|
|
|
tcg_out_movi32(s, COND_AL, TCG_REG_TMP, addr);
|
|
|
|
tcg_out_blx(s, COND_AL, TCG_REG_TMP);
|
2011-03-16 15:21:31 +00:00
|
|
|
} else {
|
2011-12-12 15:37:31 +00:00
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R14, TCG_REG_PC, 4);
|
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
|
|
|
|
tcg_out32(s, addr);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_callr(TCGContext *s, int cond, int arg)
|
|
|
|
{
|
2013-07-04 18:20:26 +00:00
|
|
|
if (use_armv5t_instructions) {
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_blx(s, cond, arg);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, cond, ARITH_MOV, TCG_REG_R14, 0,
|
|
|
|
TCG_REG_PC, SHIFT_IMM_LSL(0));
|
|
|
|
tcg_out_bx(s, cond, arg);
|
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_goto_label(TCGContext *s, int cond, int label_index)
|
|
|
|
{
|
|
|
|
TCGLabel *l = &s->labels[label_index];
|
|
|
|
|
2013-04-23 20:07:40 +00:00
|
|
|
if (l->has_value) {
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_out_goto(s, cond, l->u.value);
|
|
|
|
} else {
|
|
|
|
tcg_out_reloc(s, s->code_ptr, R_ARM_PC24, label_index, 31337);
|
2008-05-23 18:50:44 +00:00
|
|
|
tcg_out_b_noaddr(s, cond);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_SOFTMMU
|
2013-07-28 00:09:47 +00:00
|
|
|
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
|
|
|
* int mmu_idx, uintptr_t ra)
|
|
|
|
*/
|
|
|
|
static const void * const qemu_ld_helpers[8] = {
|
|
|
|
helper_ret_ldub_mmu,
|
|
|
|
helper_ret_lduw_mmu,
|
|
|
|
helper_ret_ldul_mmu,
|
|
|
|
helper_ret_ldq_mmu,
|
|
|
|
|
|
|
|
helper_ret_ldsb_mmu,
|
|
|
|
helper_ret_ldsw_mmu,
|
|
|
|
helper_ret_ldul_mmu,
|
|
|
|
helper_ret_ldq_mmu,
|
2011-09-18 14:55:46 +00:00
|
|
|
};
|
|
|
|
|
2013-07-28 00:09:47 +00:00
|
|
|
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
|
|
|
* uintxx_t val, int mmu_idx, uintptr_t ra)
|
|
|
|
*/
|
2011-09-18 14:55:46 +00:00
|
|
|
static const void * const qemu_st_helpers[4] = {
|
2013-07-28 00:09:47 +00:00
|
|
|
helper_ret_stb_mmu,
|
|
|
|
helper_ret_stw_mmu,
|
|
|
|
helper_ret_stl_mmu,
|
|
|
|
helper_ret_stq_mmu,
|
2011-09-18 14:55:46 +00:00
|
|
|
};
|
2012-08-26 13:40:02 +00:00
|
|
|
|
|
|
|
/* Helper routines for marshalling helper function arguments into
|
|
|
|
* the correct registers and stack.
|
|
|
|
* argreg is where we want to put this argument, arg is the argument itself.
|
|
|
|
* Return value is the updated argreg ready for the next call.
|
|
|
|
* Note that argreg 0..3 is real registers, 4+ on stack.
|
|
|
|
*
|
|
|
|
* We provide routines for arguments which are: immediate, 32 bit
|
|
|
|
* value in register, 16 and 8 bit values in register (which must be zero
|
|
|
|
* extended before use) and 64 bit value in a lo:hi register pair.
|
|
|
|
*/
|
2013-03-13 00:11:40 +00:00
|
|
|
#define DEFINE_TCG_OUT_ARG(NAME, ARGTYPE, MOV_ARG, EXT_ARG) \
|
|
|
|
static TCGReg NAME(TCGContext *s, TCGReg argreg, ARGTYPE arg) \
|
|
|
|
{ \
|
|
|
|
if (argreg < 4) { \
|
|
|
|
MOV_ARG(s, COND_AL, argreg, arg); \
|
|
|
|
} else { \
|
|
|
|
int ofs = (argreg - 4) * 4; \
|
|
|
|
EXT_ARG; \
|
|
|
|
assert(ofs + 4 <= TCG_STATIC_CALL_ARGS_SIZE); \
|
|
|
|
tcg_out_st32_12(s, COND_AL, arg, TCG_REG_CALL_STACK, ofs); \
|
|
|
|
} \
|
|
|
|
return argreg + 1; \
|
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_TCG_OUT_ARG(tcg_out_arg_imm32, uint32_t, tcg_out_movi32,
|
2013-03-12 16:49:04 +00:00
|
|
|
(tcg_out_movi32(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
|
2013-03-13 00:11:40 +00:00
|
|
|
DEFINE_TCG_OUT_ARG(tcg_out_arg_reg8, TCGReg, tcg_out_ext8u,
|
2013-03-12 16:49:04 +00:00
|
|
|
(tcg_out_ext8u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
|
2013-03-13 00:11:40 +00:00
|
|
|
DEFINE_TCG_OUT_ARG(tcg_out_arg_reg16, TCGReg, tcg_out_ext16u,
|
2013-03-12 16:49:04 +00:00
|
|
|
(tcg_out_ext16u(s, COND_AL, TCG_REG_TMP, arg), arg = TCG_REG_TMP))
|
2013-03-13 00:11:40 +00:00
|
|
|
DEFINE_TCG_OUT_ARG(tcg_out_arg_reg32, TCGReg, tcg_out_mov_reg, )
|
|
|
|
|
|
|
|
static TCGReg tcg_out_arg_reg64(TCGContext *s, TCGReg argreg,
|
|
|
|
TCGReg arglo, TCGReg arghi)
|
2012-08-26 13:40:02 +00:00
|
|
|
{
|
|
|
|
/* 64 bit arguments must go in even/odd register pairs
|
|
|
|
* and in 8-aligned stack slots.
|
|
|
|
*/
|
|
|
|
if (argreg & 1) {
|
|
|
|
argreg++;
|
|
|
|
}
|
2013-08-28 18:16:16 +00:00
|
|
|
if (use_armv6_instructions && argreg >= 4
|
|
|
|
&& (arglo & 1) == 0 && arghi == arglo + 1) {
|
|
|
|
tcg_out_strd_8(s, COND_AL, arglo,
|
|
|
|
TCG_REG_CALL_STACK, (argreg - 4) * 4);
|
|
|
|
return argreg + 2;
|
|
|
|
} else {
|
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, arglo);
|
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, arghi);
|
|
|
|
return argreg;
|
|
|
|
}
|
2012-08-26 13:40:02 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2008-05-24 20:07:07 +00:00
|
|
|
#define TLB_SHIFT (CPU_TLB_ENTRY_BITS + CPU_TLB_BITS)
|
|
|
|
|
2013-08-28 21:40:52 +00:00
|
|
|
/* We're expecting to use an 8-bit immediate and to mask. */
|
|
|
|
QEMU_BUILD_BUG_ON(CPU_TLB_BITS > 8);
|
|
|
|
|
|
|
|
/* We're expecting to use an 8-bit immediate add + 8-bit ldrd offset.
|
|
|
|
Using the offset of the second entry in the last tlb table ensures
|
|
|
|
that we can index all of the elements of the first entry. */
|
|
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table[NB_MMU_MODES - 1][1])
|
|
|
|
> 0xffff);
|
|
|
|
|
2013-08-30 15:45:53 +00:00
|
|
|
/* Load and compare a TLB entry, leaving the flags set. Returns the register
|
|
|
|
containing the addend of the tlb entry. Clobbers R0, R1, R2, TMP. */
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2013-08-30 15:45:53 +00:00
|
|
|
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
|
2013-09-03 23:16:47 +00:00
|
|
|
TCGMemOp s_bits, int mem_index, bool is_load)
|
2013-03-13 01:18:07 +00:00
|
|
|
{
|
2013-03-13 06:18:30 +00:00
|
|
|
TCGReg base = TCG_AREG0;
|
2013-08-30 15:16:00 +00:00
|
|
|
int cmp_off =
|
|
|
|
(is_load
|
|
|
|
? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
|
|
|
|
: offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
|
|
|
|
int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
|
2013-03-13 06:18:30 +00:00
|
|
|
|
2008-05-23 18:51:15 +00:00
|
|
|
/* Should generate something like the following:
|
2013-03-13 06:18:30 +00:00
|
|
|
* shr tmp, addr_reg, #TARGET_PAGE_BITS (1)
|
2013-08-30 15:16:00 +00:00
|
|
|
* add r2, env, #high
|
2013-03-13 06:18:30 +00:00
|
|
|
* and r0, tmp, #(CPU_TLB_SIZE - 1) (2)
|
|
|
|
* add r2, r2, r0, lsl #CPU_TLB_ENTRY_BITS (3)
|
2013-08-30 15:16:00 +00:00
|
|
|
* ldr r0, [r2, #cmp] (4)
|
2013-03-13 06:18:30 +00:00
|
|
|
* tst addr_reg, #s_mask
|
2013-08-30 16:48:56 +00:00
|
|
|
* ldr r1, [r2, #add] (5)
|
|
|
|
* cmpeq r0, tmp, lsl #TARGET_PAGE_BITS
|
2008-05-23 18:51:15 +00:00
|
|
|
*/
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, TCG_REG_TMP,
|
2013-03-13 01:18:07 +00:00
|
|
|
0, addrlo, SHIFT_IMM_LSR(TARGET_PAGE_BITS));
|
2013-03-13 06:18:30 +00:00
|
|
|
|
2013-08-28 21:40:52 +00:00
|
|
|
/* We checked that the offset is contained within 16 bits above. */
|
2013-08-30 15:16:00 +00:00
|
|
|
if (add_off > 0xfff || (use_armv6_instructions && cmp_off > 0xff)) {
|
2013-03-13 06:18:30 +00:00
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R2, base,
|
2013-08-30 15:16:00 +00:00
|
|
|
(24 << 7) | (cmp_off >> 8));
|
2013-03-13 06:18:30 +00:00
|
|
|
base = TCG_REG_R2;
|
2013-08-30 15:16:00 +00:00
|
|
|
add_off -= cmp_off & 0xff00;
|
|
|
|
cmp_off &= 0xff;
|
2013-03-13 06:18:30 +00:00
|
|
|
}
|
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_AND,
|
2013-03-12 16:49:04 +00:00
|
|
|
TCG_REG_R0, TCG_REG_TMP, CPU_TLB_SIZE - 1);
|
2013-03-13 06:18:30 +00:00
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_R2, base,
|
2010-04-09 18:52:48 +00:00
|
|
|
TCG_REG_R0, SHIFT_IMM_LSL(CPU_TLB_ENTRY_BITS));
|
2013-03-13 01:18:07 +00:00
|
|
|
|
2013-03-13 06:18:30 +00:00
|
|
|
/* Load the tlb comparator. Use ldrd if needed and available,
|
|
|
|
but due to how the pointer needs setting up, ldm isn't useful.
|
|
|
|
Base arm5 doesn't have ldrd, but armv5te does. */
|
|
|
|
if (use_armv6_instructions && TARGET_LONG_BITS == 64) {
|
2013-08-30 15:16:00 +00:00
|
|
|
tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_REG_R2, cmp_off);
|
2013-03-13 06:18:30 +00:00
|
|
|
} else {
|
2013-08-30 15:16:00 +00:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_R2, cmp_off);
|
2013-03-13 06:18:30 +00:00
|
|
|
if (TARGET_LONG_BITS == 64) {
|
2013-08-30 15:16:00 +00:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_R1, TCG_REG_R2, cmp_off + 4);
|
2013-03-13 06:18:30 +00:00
|
|
|
}
|
2012-10-09 19:53:11 +00:00
|
|
|
}
|
2013-03-13 01:18:07 +00:00
|
|
|
|
2008-05-24 20:07:07 +00:00
|
|
|
/* Check alignment. */
|
2013-03-13 01:18:07 +00:00
|
|
|
if (s_bits) {
|
2013-03-13 06:18:30 +00:00
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_TST,
|
2013-03-13 01:18:07 +00:00
|
|
|
0, addrlo, (1 << s_bits) - 1);
|
|
|
|
}
|
|
|
|
|
2013-08-30 16:48:56 +00:00
|
|
|
/* Load the tlb addend. */
|
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_R2, TCG_REG_R2, add_off);
|
|
|
|
|
2013-03-13 06:18:30 +00:00
|
|
|
tcg_out_dat_reg(s, (s_bits ? COND_EQ : COND_AL), ARITH_CMP, 0,
|
|
|
|
TCG_REG_R0, TCG_REG_TMP, SHIFT_IMM_LSL(TARGET_PAGE_BITS));
|
|
|
|
|
2013-03-13 01:18:07 +00:00
|
|
|
if (TARGET_LONG_BITS == 64) {
|
|
|
|
tcg_out_dat_reg(s, COND_EQ, ARITH_CMP, 0,
|
|
|
|
TCG_REG_R1, addrhi, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
2013-08-30 15:16:00 +00:00
|
|
|
|
2013-08-30 16:48:56 +00:00
|
|
|
return TCG_REG_R2;
|
2013-03-13 01:18:07 +00:00
|
|
|
}
|
2013-03-13 22:24:33 +00:00
|
|
|
|
|
|
|
/* Record the context of a call to the out of line helper code for the slow
|
|
|
|
path for a load or store, so that we can later generate the correct
|
|
|
|
helper code. */
|
2013-09-03 23:16:47 +00:00
|
|
|
static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
|
2013-03-13 22:24:33 +00:00
|
|
|
int data_reg, int data_reg2, int addrlo_reg,
|
|
|
|
int addrhi_reg, int mem_index,
|
|
|
|
uint8_t *raddr, uint8_t *label_ptr)
|
|
|
|
{
|
2013-10-03 19:51:24 +00:00
|
|
|
TCGLabelQemuLdst *label = new_ldst_label(s);
|
2013-03-13 22:24:33 +00:00
|
|
|
|
|
|
|
label->is_ld = is_ld;
|
|
|
|
label->opc = opc;
|
|
|
|
label->datalo_reg = data_reg;
|
|
|
|
label->datahi_reg = data_reg2;
|
|
|
|
label->addrlo_reg = addrlo_reg;
|
|
|
|
label->addrhi_reg = addrhi_reg;
|
|
|
|
label->mem_index = mem_index;
|
|
|
|
label->raddr = raddr;
|
|
|
|
label->label_ptr[0] = label_ptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
|
|
|
{
|
|
|
|
TCGReg argreg, data_reg, data_reg2;
|
2013-09-03 23:16:47 +00:00
|
|
|
TCGMemOp opc = lb->opc & MO_SSIZE;
|
2013-07-28 00:09:47 +00:00
|
|
|
uintptr_t func;
|
2013-03-13 22:24:33 +00:00
|
|
|
|
|
|
|
reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
|
|
|
|
|
|
|
|
argreg = tcg_out_arg_reg32(s, TCG_REG_R0, TCG_AREG0);
|
|
|
|
if (TARGET_LONG_BITS == 64) {
|
|
|
|
argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
|
|
|
|
} else {
|
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
|
|
|
|
}
|
|
|
|
argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
|
2013-07-28 00:09:47 +00:00
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
|
|
|
|
|
|
|
|
/* For armv6 we can use the canonical unsigned helpers and minimize
|
|
|
|
icache usage. For pre-armv6, use the signed helpers since we do
|
|
|
|
not have a single insn sign-extend. */
|
|
|
|
if (use_armv6_instructions) {
|
2013-09-03 23:16:47 +00:00
|
|
|
func = (uintptr_t)qemu_ld_helpers[opc & MO_SIZE];
|
2013-07-28 00:09:47 +00:00
|
|
|
} else {
|
|
|
|
func = (uintptr_t)qemu_ld_helpers[opc];
|
2013-09-03 23:16:47 +00:00
|
|
|
if (opc & MO_SIGN) {
|
|
|
|
opc = MO_UL;
|
2013-07-28 00:09:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
tcg_out_call(s, func);
|
2013-03-13 22:24:33 +00:00
|
|
|
|
|
|
|
data_reg = lb->datalo_reg;
|
|
|
|
data_reg2 = lb->datahi_reg;
|
2013-07-28 00:09:47 +00:00
|
|
|
switch (opc) {
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_SB:
|
2013-03-13 22:24:33 +00:00
|
|
|
tcg_out_ext8s(s, COND_AL, data_reg, TCG_REG_R0);
|
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_SW:
|
2013-03-13 22:24:33 +00:00
|
|
|
tcg_out_ext16s(s, COND_AL, data_reg, TCG_REG_R0);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
|
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_Q:
|
2013-08-30 16:12:32 +00:00
|
|
|
if (data_reg != TCG_REG_R1) {
|
|
|
|
tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
|
|
|
|
tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1);
|
|
|
|
} else if (data_reg2 != TCG_REG_R0) {
|
|
|
|
tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1);
|
|
|
|
tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_R0);
|
|
|
|
} else {
|
|
|
|
tcg_out_mov_reg(s, COND_AL, TCG_REG_TMP, TCG_REG_R0);
|
|
|
|
tcg_out_mov_reg(s, COND_AL, data_reg2, TCG_REG_R1);
|
|
|
|
tcg_out_mov_reg(s, COND_AL, data_reg, TCG_REG_TMP);
|
|
|
|
}
|
2013-03-13 22:24:33 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_out_goto(s, COND_AL, (tcg_target_long)lb->raddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
|
|
|
{
|
|
|
|
TCGReg argreg, data_reg, data_reg2;
|
2013-09-03 23:16:47 +00:00
|
|
|
TCGMemOp s_bits = lb->opc & MO_SIZE;
|
2013-03-13 22:24:33 +00:00
|
|
|
|
|
|
|
reloc_pc24(lb->label_ptr[0], (tcg_target_long)s->code_ptr);
|
|
|
|
|
|
|
|
argreg = TCG_REG_R0;
|
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, TCG_AREG0);
|
|
|
|
if (TARGET_LONG_BITS == 64) {
|
|
|
|
argreg = tcg_out_arg_reg64(s, argreg, lb->addrlo_reg, lb->addrhi_reg);
|
|
|
|
} else {
|
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
data_reg = lb->datalo_reg;
|
|
|
|
data_reg2 = lb->datahi_reg;
|
2013-09-03 23:16:47 +00:00
|
|
|
switch (s_bits) {
|
|
|
|
case MO_8:
|
2013-03-13 22:24:33 +00:00
|
|
|
argreg = tcg_out_arg_reg8(s, argreg, data_reg);
|
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_16:
|
2013-03-13 22:24:33 +00:00
|
|
|
argreg = tcg_out_arg_reg16(s, argreg, data_reg);
|
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_32:
|
|
|
|
default:
|
2013-03-13 22:24:33 +00:00
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, data_reg);
|
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_64:
|
2013-03-13 22:24:33 +00:00
|
|
|
argreg = tcg_out_arg_reg64(s, argreg, data_reg, data_reg2);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
|
2013-07-28 00:09:47 +00:00
|
|
|
argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
|
2013-03-13 22:24:33 +00:00
|
|
|
|
2013-07-28 00:09:47 +00:00
|
|
|
/* Tail-call to the helper, which will return to the fast path. */
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_goto(s, COND_AL, (tcg_target_long) qemu_st_helpers[s_bits]);
|
2013-03-13 22:24:33 +00:00
|
|
|
}
|
2013-03-13 01:18:07 +00:00
|
|
|
#endif /* SOFTMMU */
|
|
|
|
|
2013-09-03 23:16:47 +00:00
|
|
|
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGMemOp opc)
|
2013-03-13 01:18:07 +00:00
|
|
|
{
|
|
|
|
TCGReg addr_reg, data_reg, data_reg2;
|
2013-09-03 23:16:47 +00:00
|
|
|
TCGMemOp bswap = opc & MO_BSWAP;
|
|
|
|
TCGMemOp s_bits = opc & MO_SIZE;
|
2013-03-13 01:18:07 +00:00
|
|
|
#ifdef CONFIG_SOFTMMU
|
2013-09-03 23:16:47 +00:00
|
|
|
int mem_index;
|
2013-08-30 15:45:53 +00:00
|
|
|
TCGReg addr_reg2, addend;
|
2013-03-13 22:24:33 +00:00
|
|
|
uint8_t *label_ptr;
|
2013-03-13 01:18:07 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
data_reg = *args++;
|
2013-09-03 23:16:47 +00:00
|
|
|
data_reg2 = (s_bits == MO_64 ? *args++ : 0);
|
2013-03-13 01:18:07 +00:00
|
|
|
addr_reg = *args++;
|
|
|
|
#ifdef CONFIG_SOFTMMU
|
|
|
|
addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
|
|
|
|
mem_index = *args;
|
|
|
|
|
2013-08-30 15:45:53 +00:00
|
|
|
addend = tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits, mem_index, 1);
|
2013-03-13 01:18:07 +00:00
|
|
|
|
2013-07-28 00:09:47 +00:00
|
|
|
/* This a conditional BL only to load a pointer within this opcode into LR
|
|
|
|
for the slow path. We will not be using the value for a tail call. */
|
2013-03-13 22:24:33 +00:00
|
|
|
label_ptr = s->code_ptr;
|
2013-07-28 00:09:47 +00:00
|
|
|
tcg_out_bl_noaddr(s, COND_NE);
|
2013-03-13 22:24:33 +00:00
|
|
|
|
2013-09-03 23:16:47 +00:00
|
|
|
switch (opc & MO_SSIZE) {
|
|
|
|
case MO_UB:
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_ld8_r(s, COND_AL, data_reg, addr_reg, addend);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_SB:
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_ld8s_r(s, COND_AL, data_reg, addr_reg, addend);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_UW:
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, addend);
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
2013-03-13 22:24:33 +00:00
|
|
|
tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_SW:
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_ld16u_r(s, COND_AL, data_reg, addr_reg, addend);
|
2013-03-13 22:24:33 +00:00
|
|
|
tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
|
2010-04-09 18:52:48 +00:00
|
|
|
} else {
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_ld16s_r(s, COND_AL, data_reg, addr_reg, addend);
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_UL:
|
2008-05-19 23:59:38 +00:00
|
|
|
default:
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_ld32_r(s, COND_AL, data_reg, addr_reg, addend);
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
2013-03-13 22:24:33 +00:00
|
|
|
tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_Q:
|
2013-08-30 16:12:32 +00:00
|
|
|
{
|
|
|
|
/* Be careful not to modify data_reg and data_reg2
|
|
|
|
for the slow path below. */
|
|
|
|
TCGReg dl = (bswap ? data_reg2 : data_reg);
|
|
|
|
TCGReg dh = (bswap ? data_reg : data_reg2);
|
|
|
|
|
|
|
|
if (use_armv6_instructions && (dl & 1) == 0 && dh == dl + 1) {
|
|
|
|
tcg_out_ldrd_r(s, COND_AL, dl, addr_reg, addend);
|
|
|
|
} else if (dl != addend) {
|
|
|
|
tcg_out_ld32_rwb(s, COND_AL, dl, addend, addr_reg);
|
|
|
|
tcg_out_ld32_12(s, COND_AL, dh, addend, 4);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
|
|
|
|
addend, addr_reg, SHIFT_IMM_LSL(0));
|
|
|
|
tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0);
|
|
|
|
tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4);
|
|
|
|
}
|
|
|
|
if (bswap) {
|
|
|
|
tcg_out_bswap32(s, COND_AL, dh, dh);
|
|
|
|
tcg_out_bswap32(s, COND_AL, dl, dl);
|
|
|
|
}
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-03-13 22:24:33 +00:00
|
|
|
add_qemu_ldst_label(s, 1, opc, data_reg, data_reg2, addr_reg, addr_reg2,
|
|
|
|
mem_index, s->code_ptr, label_ptr);
|
2009-07-17 11:48:08 +00:00
|
|
|
#else /* !CONFIG_SOFTMMU */
|
|
|
|
if (GUEST_BASE) {
|
|
|
|
uint32_t offset = GUEST_BASE;
|
2013-03-13 01:18:07 +00:00
|
|
|
int i, rot;
|
2009-07-17 11:48:08 +00:00
|
|
|
|
|
|
|
while (offset) {
|
|
|
|
i = ctz32(offset) & ~1;
|
|
|
|
rot = ((32 - i) << 7) & 0xf00;
|
|
|
|
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_TMP, addr_reg,
|
2009-07-17 11:48:08 +00:00
|
|
|
((offset >> i) & 0xff) | rot);
|
2013-03-12 16:49:04 +00:00
|
|
|
addr_reg = TCG_REG_TMP;
|
2009-07-17 11:48:08 +00:00
|
|
|
offset &= ~(0xff << i);
|
|
|
|
}
|
|
|
|
}
|
2013-09-03 23:16:47 +00:00
|
|
|
switch (opc & MO_SSIZE) {
|
|
|
|
case MO_UB:
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_out_ld8_12(s, COND_AL, data_reg, addr_reg, 0);
|
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_SB:
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_out_ld8s_8(s, COND_AL, data_reg, addr_reg, 0);
|
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_UW:
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
|
|
|
tcg_out_bswap16(s, COND_AL, data_reg, data_reg);
|
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_SW:
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
|
|
|
tcg_out_ld16u_8(s, COND_AL, data_reg, addr_reg, 0);
|
|
|
|
tcg_out_bswap16s(s, COND_AL, data_reg, data_reg);
|
|
|
|
} else {
|
|
|
|
tcg_out_ld16s_8(s, COND_AL, data_reg, addr_reg, 0);
|
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_UL:
|
2008-05-19 23:59:38 +00:00
|
|
|
default:
|
|
|
|
tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
|
|
|
tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
|
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_Q:
|
2013-07-27 18:42:51 +00:00
|
|
|
if (use_armv6_instructions && !bswap
|
|
|
|
&& (data_reg & 1) == 0 && data_reg2 == data_reg + 1) {
|
|
|
|
tcg_out_ldrd_8(s, COND_AL, data_reg, addr_reg, 0);
|
|
|
|
} else if (use_armv6_instructions && bswap
|
|
|
|
&& (data_reg2 & 1) == 0 && data_reg == data_reg2 + 1) {
|
|
|
|
tcg_out_ldrd_8(s, COND_AL, data_reg2, addr_reg, 0);
|
|
|
|
} else if (data_reg == addr_reg) {
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
|
|
|
|
tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
|
tcg-arm: fix qemu_ld64
Emulating fldl on arm doesn't seem to work too well. It's the way
qemu_ld64 is translated to arm instructions.
tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
Consider case where data_reg==0, data_reg2==1, and addr_reg==0. First load
overwrited addr_reg. So let's put an if (data_ref==addr_reg).
(Pablo Virolainen)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6808 c046a42c-6fe2-441c-8c8c-71466251a162
2009-03-10 21:43:25 +00:00
|
|
|
} else {
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, bswap ? 4 : 0);
|
|
|
|
tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, bswap ? 0 : 4);
|
|
|
|
}
|
|
|
|
if (bswap) {
|
|
|
|
tcg_out_bswap32(s, COND_AL, data_reg, data_reg);
|
|
|
|
tcg_out_bswap32(s, COND_AL, data_reg2, data_reg2);
|
tcg-arm: fix qemu_ld64
Emulating fldl on arm doesn't seem to work too well. It's the way
qemu_ld64 is translated to arm instructions.
tcg_out_ld32_12(s, COND_AL, data_reg, addr_reg, 0);
tcg_out_ld32_12(s, COND_AL, data_reg2, addr_reg, 4);
Consider case where data_reg==0, data_reg2==1, and addr_reg==0. First load
overwrited addr_reg. So let's put an if (data_ref==addr_reg).
(Pablo Virolainen)
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6808 c046a42c-6fe2-441c-8c8c-71466251a162
2009-03-10 21:43:25 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2013-09-03 23:16:47 +00:00
|
|
|
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGMemOp opc)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-03-13 01:18:07 +00:00
|
|
|
TCGReg addr_reg, data_reg, data_reg2;
|
2013-09-03 23:16:47 +00:00
|
|
|
TCGMemOp bswap = opc & MO_BSWAP;
|
|
|
|
TCGMemOp s_bits = opc & MO_SIZE;
|
2008-05-19 23:59:38 +00:00
|
|
|
#ifdef CONFIG_SOFTMMU
|
2013-09-03 23:16:47 +00:00
|
|
|
int mem_index;
|
2013-08-30 15:45:53 +00:00
|
|
|
TCGReg addr_reg2, addend;
|
2013-03-13 22:24:33 +00:00
|
|
|
uint8_t *label_ptr;
|
2008-05-19 23:59:38 +00:00
|
|
|
#endif
|
2010-04-09 18:52:48 +00:00
|
|
|
#ifdef TARGET_WORDS_BIGENDIAN
|
|
|
|
bswap = 1;
|
|
|
|
#else
|
|
|
|
bswap = 0;
|
|
|
|
#endif
|
2013-03-13 01:18:07 +00:00
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
data_reg = *args++;
|
2013-09-03 23:16:47 +00:00
|
|
|
data_reg2 = (s_bits == MO_64 ? *args++ : 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
addr_reg = *args++;
|
|
|
|
#ifdef CONFIG_SOFTMMU
|
2013-03-13 01:18:07 +00:00
|
|
|
addr_reg2 = (TARGET_LONG_BITS == 64 ? *args++ : 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
mem_index = *args;
|
|
|
|
|
2013-08-30 15:45:53 +00:00
|
|
|
addend = tcg_out_tlb_read(s, addr_reg, addr_reg2, s_bits, mem_index, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2013-09-03 23:16:47 +00:00
|
|
|
switch (s_bits) {
|
|
|
|
case MO_8:
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_st8_r(s, COND_EQ, data_reg, addr_reg, addend);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_16:
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
2013-07-28 00:09:47 +00:00
|
|
|
tcg_out_bswap16st(s, COND_EQ, TCG_REG_R0, data_reg);
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_st16_r(s, COND_EQ, TCG_REG_R0, addr_reg, addend);
|
2010-04-09 18:52:48 +00:00
|
|
|
} else {
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_st16_r(s, COND_EQ, data_reg, addr_reg, addend);
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_32:
|
2008-05-19 23:59:38 +00:00
|
|
|
default:
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
2013-07-28 00:09:47 +00:00
|
|
|
tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_st32_r(s, COND_EQ, TCG_REG_R0, addr_reg, addend);
|
2010-04-09 18:52:48 +00:00
|
|
|
} else {
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_st32_r(s, COND_EQ, data_reg, addr_reg, addend);
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_64:
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
2013-07-28 00:09:47 +00:00
|
|
|
tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg2);
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_st32_rwb(s, COND_EQ, TCG_REG_R0, addend, addr_reg);
|
2013-07-28 00:09:47 +00:00
|
|
|
tcg_out_bswap32(s, COND_EQ, TCG_REG_R0, data_reg);
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_st32_12(s, COND_EQ, TCG_REG_R0, addend, 4);
|
2013-07-27 18:42:51 +00:00
|
|
|
} else if (use_armv6_instructions
|
|
|
|
&& (data_reg & 1) == 0 && data_reg2 == data_reg + 1) {
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_strd_r(s, COND_EQ, data_reg, addr_reg, addend);
|
2010-04-09 18:52:48 +00:00
|
|
|
} else {
|
2013-08-30 15:45:53 +00:00
|
|
|
tcg_out_st32_rwb(s, COND_EQ, data_reg, addend, addr_reg);
|
|
|
|
tcg_out_st32_12(s, COND_EQ, data_reg2, addend, 4);
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-07-28 00:09:47 +00:00
|
|
|
/* The conditional call must come last, as we're going to return here. */
|
|
|
|
label_ptr = s->code_ptr;
|
|
|
|
tcg_out_bl_noaddr(s, COND_NE);
|
|
|
|
|
2013-03-13 22:24:33 +00:00
|
|
|
add_qemu_ldst_label(s, 0, opc, data_reg, data_reg2, addr_reg, addr_reg2,
|
|
|
|
mem_index, s->code_ptr, label_ptr);
|
2009-07-17 11:48:08 +00:00
|
|
|
#else /* !CONFIG_SOFTMMU */
|
|
|
|
if (GUEST_BASE) {
|
|
|
|
uint32_t offset = GUEST_BASE;
|
|
|
|
int i;
|
|
|
|
int rot;
|
|
|
|
|
|
|
|
while (offset) {
|
|
|
|
i = ctz32(offset) & ~1;
|
|
|
|
rot = ((32 - i) << 7) & 0xf00;
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_ADD, TCG_REG_R1, addr_reg,
|
2009-07-17 11:48:08 +00:00
|
|
|
((offset >> i) & 0xff) | rot);
|
2010-04-09 18:52:48 +00:00
|
|
|
addr_reg = TCG_REG_R1;
|
2009-07-17 11:48:08 +00:00
|
|
|
offset &= ~(0xff << i);
|
|
|
|
}
|
|
|
|
}
|
2013-09-03 23:16:47 +00:00
|
|
|
switch (s_bits) {
|
|
|
|
case MO_8:
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_out_st8_12(s, COND_AL, data_reg, addr_reg, 0);
|
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_16:
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
2012-10-09 19:53:11 +00:00
|
|
|
tcg_out_bswap16st(s, COND_AL, TCG_REG_R0, data_reg);
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addr_reg, 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_st16_8(s, COND_AL, data_reg, addr_reg, 0);
|
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_32:
|
2008-05-19 23:59:38 +00:00
|
|
|
default:
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
|
|
|
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
|
|
|
|
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
|
|
|
|
} else {
|
|
|
|
tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
|
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2013-09-03 23:16:47 +00:00
|
|
|
case MO_64:
|
2010-04-09 18:52:48 +00:00
|
|
|
if (bswap) {
|
|
|
|
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg2);
|
|
|
|
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 0);
|
|
|
|
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, data_reg);
|
|
|
|
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addr_reg, 4);
|
2013-07-27 18:42:51 +00:00
|
|
|
} else if (use_armv6_instructions
|
|
|
|
&& (data_reg & 1) == 0 && data_reg2 == data_reg + 1) {
|
|
|
|
tcg_out_strd_8(s, COND_AL, data_reg, addr_reg, 0);
|
2010-04-09 18:52:48 +00:00
|
|
|
} else {
|
|
|
|
tcg_out_st32_12(s, COND_AL, data_reg, addr_reg, 0);
|
|
|
|
tcg_out_st32_12(s, COND_AL, data_reg2, addr_reg, 4);
|
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint8_t *tb_ret_addr;
|
|
|
|
|
2010-03-19 18:12:29 +00:00
|
|
|
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
2008-05-19 23:59:38 +00:00
|
|
|
const TCGArg *args, const int *const_args)
|
|
|
|
{
|
2013-03-12 02:51:56 +00:00
|
|
|
TCGArg a0, a1, a2, a3, a4, a5;
|
2008-05-19 23:59:38 +00:00
|
|
|
int c;
|
|
|
|
|
|
|
|
switch (opc) {
|
|
|
|
case INDEX_op_exit_tb:
|
2013-04-29 15:08:23 +00:00
|
|
|
if (use_armv7_instructions || check_fit_imm(args[0])) {
|
|
|
|
tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
|
|
|
|
tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
|
|
|
|
} else {
|
2008-12-01 02:17:12 +00:00
|
|
|
uint8_t *ld_ptr = s->code_ptr;
|
2013-04-29 15:08:23 +00:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
|
2008-12-01 02:17:12 +00:00
|
|
|
tcg_out_goto(s, COND_AL, (tcg_target_ulong) tb_ret_addr);
|
2013-04-29 15:08:23 +00:00
|
|
|
*ld_ptr = (uint8_t) (s->code_ptr - ld_ptr) - 8;
|
|
|
|
tcg_out32(s, args[0]);
|
2008-12-01 02:17:12 +00:00
|
|
|
}
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_goto_tb:
|
|
|
|
if (s->tb_jmp_offset) {
|
|
|
|
/* Direct jump method */
|
2008-12-01 02:17:12 +00:00
|
|
|
#if defined(USE_DIRECT_JUMP)
|
2008-05-19 23:59:38 +00:00
|
|
|
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
|
tcg/arm: fix branch target change during code retranslation
QEMU uses code retranslation to restore the CPU state when an exception
happens. For it to work the retranslation must not modify the generated
code. This is what is currently implemented in ARM TCG.
However on CPU that don't have icache/dcache/memory synchronised like
ARM, this requirement is stronger and code retranslation must not modify
the generated code "atomically", as the cache line might be flushed
at any moment (interrupt, exception, task switching), even if not
triggered by QEMU. The probability for this to happen is very low, and
depends on cache size and associativiy, machine load, interrupts, so the
symptoms are might happen randomly.
This requirement is currently not followed in tcg/arm, for the
load/store code, which basically has the following structure:
1) tlb access code is written
2) conditional fast path code is written
3) branch is written with a temporary target
4) slow path code is written
5) branch target is updated
The cache lines corresponding to the retranslated code is not flushed
after code retranslation as the generated code is supposed to be the
same. However if the cache line corresponding to the branch instruction
is flushed between step 3 and 5, and is not flushed again before the
code is executed again, the branch target is wrong. In the guest, the
symptoms are MMU page fault at a random addresses, which leads to
kernel page fault or segmentation faults.
The patch fixes this issue by avoiding writing the branch target until
it is known, that is by writing only the branch instruction first, and
later only the offset.
This fixes booting linux guests on ARM hosts (tested: arm, i386, mips,
mipsel, sh4, sparc).
Acked-by: Edgar E. Iglesias <edgar.iglesias@gmail.com>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-01-06 21:43:13 +00:00
|
|
|
tcg_out_b_noaddr(s, COND_AL);
|
2008-05-19 23:59:38 +00:00
|
|
|
#else
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, -4);
|
2008-05-19 23:59:38 +00:00
|
|
|
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
|
|
|
|
tcg_out32(s, 0);
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
/* Indirect jump method */
|
|
|
|
#if 1
|
|
|
|
c = (int) (s->tb_next + args[0]) - ((int) s->code_ptr + 8);
|
|
|
|
if (c > 0xfff || c < -0xfff) {
|
|
|
|
tcg_out_movi32(s, COND_AL, TCG_REG_R0,
|
|
|
|
(tcg_target_long) (s->tb_next + args[0]));
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
} else
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_PC, c);
|
2008-05-19 23:59:38 +00:00
|
|
|
#else
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_R0, TCG_REG_PC, 0);
|
|
|
|
tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, 0);
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_out32(s, (tcg_target_long) (s->tb_next + args[0]));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
|
|
|
|
break;
|
|
|
|
case INDEX_op_call:
|
|
|
|
if (const_args[0])
|
2011-03-16 15:21:31 +00:00
|
|
|
tcg_out_call(s, args[0]);
|
2008-05-19 23:59:38 +00:00
|
|
|
else
|
|
|
|
tcg_out_callr(s, COND_AL, args[0]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_br:
|
|
|
|
tcg_out_goto_label(s, COND_AL, args[0]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_ld8u_i32:
|
|
|
|
tcg_out_ld8u(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld8s_i32:
|
|
|
|
tcg_out_ld8s(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld16u_i32:
|
|
|
|
tcg_out_ld16u(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld16s_i32:
|
|
|
|
tcg_out_ld16s(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ld_i32:
|
|
|
|
tcg_out_ld32u(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_st8_i32:
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_st8(s, COND_AL, args[0], args[1], args[2]);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_st16_i32:
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_st16(s, COND_AL, args[0], args[1], args[2]);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_st_i32:
|
|
|
|
tcg_out_st32(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case INDEX_op_mov_i32:
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV,
|
|
|
|
args[0], 0, args[1], SHIFT_IMM_LSL(0));
|
|
|
|
break;
|
|
|
|
case INDEX_op_movi_i32:
|
|
|
|
tcg_out_movi32(s, COND_AL, args[0], args[1]);
|
|
|
|
break;
|
2012-09-26 18:48:55 +00:00
|
|
|
case INDEX_op_movcond_i32:
|
|
|
|
/* Constraints mean that v2 is always in the same register as dest,
|
|
|
|
* so we only need to do "if condition passed, move v1 to dest".
|
|
|
|
*/
|
2013-03-12 01:21:59 +00:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
|
|
|
args[1], args[2], const_args[2]);
|
|
|
|
tcg_out_dat_rIK(s, tcg_cond_to_arm_cond[args[5]], ARITH_MOV,
|
|
|
|
ARITH_MVN, args[0], 0, args[3], const_args[3]);
|
2012-09-26 18:48:55 +00:00
|
|
|
break;
|
2008-05-19 23:59:38 +00:00
|
|
|
case INDEX_op_add_i32:
|
2013-03-05 06:06:21 +00:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_ADD, ARITH_SUB,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
break;
|
2008-05-19 23:59:38 +00:00
|
|
|
case INDEX_op_sub_i32:
|
2013-03-12 01:04:14 +00:00
|
|
|
if (const_args[1]) {
|
|
|
|
if (const_args[2]) {
|
|
|
|
tcg_out_movi32(s, COND_AL, args[0], args[1] - args[2]);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSB,
|
|
|
|
args[0], args[2], args[1], 1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_SUB, ARITH_ADD,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
}
|
2013-03-05 06:06:21 +00:00
|
|
|
break;
|
2008-05-19 23:59:38 +00:00
|
|
|
case INDEX_op_and_i32:
|
2013-03-05 05:36:45 +00:00
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_AND, ARITH_BIC,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
break;
|
2010-03-02 23:13:43 +00:00
|
|
|
case INDEX_op_andc_i32:
|
2013-03-05 05:36:45 +00:00
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_BIC, ARITH_AND,
|
|
|
|
args[0], args[1], args[2], const_args[2]);
|
|
|
|
break;
|
2008-05-19 23:59:38 +00:00
|
|
|
case INDEX_op_or_i32:
|
|
|
|
c = ARITH_ORR;
|
|
|
|
goto gen_arith;
|
|
|
|
case INDEX_op_xor_i32:
|
|
|
|
c = ARITH_EOR;
|
|
|
|
/* Fall through. */
|
|
|
|
gen_arith:
|
2012-09-26 18:48:54 +00:00
|
|
|
tcg_out_dat_rI(s, COND_AL, c, args[0], args[1], args[2], const_args[2]);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_add2_i32:
|
2013-03-12 02:51:56 +00:00
|
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
|
|
a3 = args[3], a4 = args[4], a5 = args[5];
|
|
|
|
if (a0 == a3 || (a0 == a5 && !const_args[5])) {
|
2013-03-12 16:49:04 +00:00
|
|
|
a0 = TCG_REG_TMP;
|
2013-03-12 02:51:56 +00:00
|
|
|
}
|
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_ADD | TO_CPSR, ARITH_SUB | TO_CPSR,
|
|
|
|
a0, a2, a4, const_args[4]);
|
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_ADC, ARITH_SBC,
|
|
|
|
a1, a3, a5, const_args[5]);
|
|
|
|
tcg_out_mov_reg(s, COND_AL, args[0], a0);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_sub2_i32:
|
2013-03-12 02:51:56 +00:00
|
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
|
|
a3 = args[3], a4 = args[4], a5 = args[5];
|
|
|
|
if ((a0 == a3 && !const_args[3]) || (a0 == a5 && !const_args[5])) {
|
2013-03-12 16:49:04 +00:00
|
|
|
a0 = TCG_REG_TMP;
|
2013-03-12 02:51:56 +00:00
|
|
|
}
|
|
|
|
if (const_args[2]) {
|
|
|
|
if (const_args[4]) {
|
|
|
|
tcg_out_movi32(s, COND_AL, a0, a4);
|
|
|
|
a4 = a0;
|
|
|
|
}
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSB | TO_CPSR, a0, a4, a2, 1);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_SUB | TO_CPSR,
|
|
|
|
ARITH_ADD | TO_CPSR, a0, a2, a4, const_args[4]);
|
|
|
|
}
|
|
|
|
if (const_args[3]) {
|
|
|
|
if (const_args[5]) {
|
|
|
|
tcg_out_movi32(s, COND_AL, a1, a5);
|
|
|
|
a5 = a1;
|
|
|
|
}
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_RSC, a1, a5, a3, 1);
|
|
|
|
} else {
|
|
|
|
tcg_out_dat_rIK(s, COND_AL, ARITH_SBC, ARITH_ADC,
|
|
|
|
a1, a3, a5, const_args[5]);
|
|
|
|
}
|
|
|
|
tcg_out_mov_reg(s, COND_AL, args[0], a0);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2008-05-20 11:26:40 +00:00
|
|
|
case INDEX_op_neg_i32:
|
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_RSB, args[0], args[1], 0);
|
|
|
|
break;
|
2009-08-22 11:55:06 +00:00
|
|
|
case INDEX_op_not_i32:
|
|
|
|
tcg_out_dat_reg(s, COND_AL,
|
|
|
|
ARITH_MVN, args[0], 0, args[1], SHIFT_IMM_LSL(0));
|
|
|
|
break;
|
2008-05-19 23:59:38 +00:00
|
|
|
case INDEX_op_mul_i32:
|
|
|
|
tcg_out_mul32(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_mulu2_i32:
|
|
|
|
tcg_out_umull32(s, COND_AL, args[0], args[1], args[2], args[3]);
|
|
|
|
break;
|
2013-02-20 07:51:58 +00:00
|
|
|
case INDEX_op_muls2_i32:
|
|
|
|
tcg_out_smull32(s, COND_AL, args[0], args[1], args[2], args[3]);
|
|
|
|
break;
|
2008-05-19 23:59:38 +00:00
|
|
|
/* XXX: Perhaps args[2] & 0x1f is wrong */
|
|
|
|
case INDEX_op_shl_i32:
|
|
|
|
c = const_args[2] ?
|
|
|
|
SHIFT_IMM_LSL(args[2] & 0x1f) : SHIFT_REG_LSL(args[2]);
|
|
|
|
goto gen_shift32;
|
|
|
|
case INDEX_op_shr_i32:
|
|
|
|
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_LSR(args[2] & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0) : SHIFT_REG_LSR(args[2]);
|
|
|
|
goto gen_shift32;
|
|
|
|
case INDEX_op_sar_i32:
|
|
|
|
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ASR(args[2] & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0) : SHIFT_REG_ASR(args[2]);
|
2010-04-09 18:52:48 +00:00
|
|
|
goto gen_shift32;
|
|
|
|
case INDEX_op_rotr_i32:
|
|
|
|
c = const_args[2] ? (args[2] & 0x1f) ? SHIFT_IMM_ROR(args[2] & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0) : SHIFT_REG_ROR(args[2]);
|
2008-05-19 23:59:38 +00:00
|
|
|
/* Fall through. */
|
|
|
|
gen_shift32:
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1], c);
|
|
|
|
break;
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
case INDEX_op_rotl_i32:
|
|
|
|
if (const_args[2]) {
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
|
|
|
|
((0x20 - args[2]) & 0x1f) ?
|
|
|
|
SHIFT_IMM_ROR((0x20 - args[2]) & 0x1f) :
|
|
|
|
SHIFT_IMM_LSL(0));
|
|
|
|
} else {
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_out_dat_imm(s, COND_AL, ARITH_RSB, TCG_REG_TMP, args[1], 0x20);
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, args[0], 0, args[1],
|
2013-03-12 16:49:04 +00:00
|
|
|
SHIFT_REG_ROR(TCG_REG_TMP));
|
2010-04-09 18:52:48 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
case INDEX_op_brcond_i32:
|
2013-03-12 01:21:59 +00:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
2012-09-26 18:48:54 +00:00
|
|
|
args[0], args[1], const_args[1]);
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[2]], args[3]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_brcond2_i32:
|
|
|
|
/* The resulting conditions are:
|
|
|
|
* TCG_COND_EQ --> a0 == a2 && a1 == a3,
|
|
|
|
* TCG_COND_NE --> (a0 != a2 && a1 == a3) || a1 != a3,
|
|
|
|
* TCG_COND_LT(U) --> (a0 < a2 && a1 == a3) || a1 < a3,
|
|
|
|
* TCG_COND_GE(U) --> (a0 >= a2 && a1 == a3) || (a1 >= a3 && a1 != a3),
|
|
|
|
* TCG_COND_LE(U) --> (a0 <= a2 && a1 == a3) || (a1 <= a3 && a1 != a3),
|
|
|
|
* TCG_COND_GT(U) --> (a0 > a2 && a1 == a3) || a1 > a3,
|
|
|
|
*/
|
2013-03-12 01:21:59 +00:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
|
|
|
args[1], args[3], const_args[3]);
|
|
|
|
tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
|
|
|
|
args[0], args[2], const_args[2]);
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_out_goto_label(s, tcg_cond_to_arm_cond[args[4]], args[5]);
|
|
|
|
break;
|
2010-03-01 21:33:48 +00:00
|
|
|
case INDEX_op_setcond_i32:
|
2013-03-12 01:21:59 +00:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
|
|
|
args[1], args[2], const_args[2]);
|
2010-03-01 21:33:48 +00:00
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[3]],
|
|
|
|
ARITH_MOV, args[0], 0, 1);
|
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[3])],
|
|
|
|
ARITH_MOV, args[0], 0, 0);
|
|
|
|
break;
|
2010-03-01 21:33:49 +00:00
|
|
|
case INDEX_op_setcond2_i32:
|
|
|
|
/* See brcond2_i32 comment */
|
2013-03-12 01:21:59 +00:00
|
|
|
tcg_out_dat_rIN(s, COND_AL, ARITH_CMP, ARITH_CMN, 0,
|
|
|
|
args[2], args[4], const_args[4]);
|
|
|
|
tcg_out_dat_rIN(s, COND_EQ, ARITH_CMP, ARITH_CMN, 0,
|
|
|
|
args[1], args[3], const_args[3]);
|
2010-03-01 21:33:49 +00:00
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[args[5]],
|
|
|
|
ARITH_MOV, args[0], 0, 1);
|
|
|
|
tcg_out_dat_imm(s, tcg_cond_to_arm_cond[tcg_invert_cond(args[5])],
|
|
|
|
ARITH_MOV, args[0], 0, 0);
|
2010-03-02 21:26:04 +00:00
|
|
|
break;
|
2008-05-19 23:59:38 +00:00
|
|
|
|
|
|
|
case INDEX_op_qemu_ld8u:
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_qemu_ld(s, args, MO_UB);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_qemu_ld8s:
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_qemu_ld(s, args, MO_SB);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_qemu_ld16u:
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_qemu_ld(s, args, MO_TEUW);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_qemu_ld16s:
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_qemu_ld(s, args, MO_TESW);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2010-03-19 19:00:26 +00:00
|
|
|
case INDEX_op_qemu_ld32:
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_qemu_ld(s, args, MO_TEUL);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_qemu_ld64:
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_qemu_ld(s, args, MO_TEQ);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
2008-05-20 11:26:40 +00:00
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
case INDEX_op_qemu_st8:
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_qemu_st(s, args, MO_UB);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_qemu_st16:
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_qemu_st(s, args, MO_TEUW);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_qemu_st32:
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_qemu_st(s, args, MO_TEUL);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_qemu_st64:
|
2013-09-03 23:16:47 +00:00
|
|
|
tcg_out_qemu_st(s, args, MO_TEQ);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
case INDEX_op_bswap16_i32:
|
|
|
|
tcg_out_bswap16(s, COND_AL, args[0], args[1]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_bswap32_i32:
|
|
|
|
tcg_out_bswap32(s, COND_AL, args[0], args[1]);
|
|
|
|
break;
|
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
case INDEX_op_ext8s_i32:
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_ext8s(s, COND_AL, args[0], args[1]);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
case INDEX_op_ext16s_i32:
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_out_ext16s(s, COND_AL, args[0], args[1]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_ext16u_i32:
|
|
|
|
tcg_out_ext16u(s, COND_AL, args[0], args[1]);
|
2008-05-19 23:59:38 +00:00
|
|
|
break;
|
|
|
|
|
2013-03-05 05:12:30 +00:00
|
|
|
case INDEX_op_deposit_i32:
|
|
|
|
tcg_out_deposit(s, COND_AL, args[0], args[2],
|
|
|
|
args[3], args[4], const_args[2]);
|
|
|
|
break;
|
|
|
|
|
2013-03-12 05:11:30 +00:00
|
|
|
case INDEX_op_div_i32:
|
|
|
|
tcg_out_sdiv(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
case INDEX_op_divu_i32:
|
|
|
|
tcg_out_udiv(s, COND_AL, args[0], args[1], args[2]);
|
|
|
|
break;
|
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
default:
|
|
|
|
tcg_abort();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const TCGTargetOpDef arm_op_defs[] = {
|
|
|
|
{ INDEX_op_exit_tb, { } },
|
|
|
|
{ INDEX_op_goto_tb, { } },
|
|
|
|
{ INDEX_op_call, { "ri" } },
|
|
|
|
{ INDEX_op_br, { } },
|
|
|
|
|
|
|
|
{ INDEX_op_mov_i32, { "r", "r" } },
|
|
|
|
{ INDEX_op_movi_i32, { "r" } },
|
|
|
|
|
|
|
|
{ INDEX_op_ld8u_i32, { "r", "r" } },
|
|
|
|
{ INDEX_op_ld8s_i32, { "r", "r" } },
|
|
|
|
{ INDEX_op_ld16u_i32, { "r", "r" } },
|
|
|
|
{ INDEX_op_ld16s_i32, { "r", "r" } },
|
|
|
|
{ INDEX_op_ld_i32, { "r", "r" } },
|
|
|
|
{ INDEX_op_st8_i32, { "r", "r" } },
|
|
|
|
{ INDEX_op_st16_i32, { "r", "r" } },
|
|
|
|
{ INDEX_op_st_i32, { "r", "r" } },
|
|
|
|
|
|
|
|
/* TODO: "r", "r", "ri" */
|
2013-03-05 06:06:21 +00:00
|
|
|
{ INDEX_op_add_i32, { "r", "r", "rIN" } },
|
2013-03-12 01:04:14 +00:00
|
|
|
{ INDEX_op_sub_i32, { "r", "rI", "rIN" } },
|
2008-05-19 23:59:38 +00:00
|
|
|
{ INDEX_op_mul_i32, { "r", "r", "r" } },
|
|
|
|
{ INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
|
2013-02-20 07:51:58 +00:00
|
|
|
{ INDEX_op_muls2_i32, { "r", "r", "r", "r" } },
|
2013-03-05 05:36:45 +00:00
|
|
|
{ INDEX_op_and_i32, { "r", "r", "rIK" } },
|
|
|
|
{ INDEX_op_andc_i32, { "r", "r", "rIK" } },
|
2009-07-18 12:20:30 +00:00
|
|
|
{ INDEX_op_or_i32, { "r", "r", "rI" } },
|
|
|
|
{ INDEX_op_xor_i32, { "r", "r", "rI" } },
|
2008-05-20 11:26:40 +00:00
|
|
|
{ INDEX_op_neg_i32, { "r", "r" } },
|
2009-08-22 11:55:06 +00:00
|
|
|
{ INDEX_op_not_i32, { "r", "r" } },
|
2008-05-19 23:59:38 +00:00
|
|
|
|
|
|
|
{ INDEX_op_shl_i32, { "r", "r", "ri" } },
|
|
|
|
{ INDEX_op_shr_i32, { "r", "r", "ri" } },
|
|
|
|
{ INDEX_op_sar_i32, { "r", "r", "ri" } },
|
2010-04-09 18:52:48 +00:00
|
|
|
{ INDEX_op_rotl_i32, { "r", "r", "ri" } },
|
|
|
|
{ INDEX_op_rotr_i32, { "r", "r", "ri" } },
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2013-03-12 01:21:59 +00:00
|
|
|
{ INDEX_op_brcond_i32, { "r", "rIN" } },
|
|
|
|
{ INDEX_op_setcond_i32, { "r", "r", "rIN" } },
|
|
|
|
{ INDEX_op_movcond_i32, { "r", "r", "rIN", "rIK", "0" } },
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2013-03-12 02:51:56 +00:00
|
|
|
{ INDEX_op_add2_i32, { "r", "r", "r", "r", "rIN", "rIK" } },
|
|
|
|
{ INDEX_op_sub2_i32, { "r", "r", "rI", "rI", "rIN", "rIK" } },
|
2013-03-12 01:21:59 +00:00
|
|
|
{ INDEX_op_brcond2_i32, { "r", "r", "rIN", "rIN" } },
|
|
|
|
{ INDEX_op_setcond2_i32, { "r", "r", "r", "rIN", "rIN" } },
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2010-03-20 11:10:20 +00:00
|
|
|
#if TARGET_LONG_BITS == 32
|
2010-04-09 18:52:48 +00:00
|
|
|
{ INDEX_op_qemu_ld8u, { "r", "l" } },
|
|
|
|
{ INDEX_op_qemu_ld8s, { "r", "l" } },
|
|
|
|
{ INDEX_op_qemu_ld16u, { "r", "l" } },
|
|
|
|
{ INDEX_op_qemu_ld16s, { "r", "l" } },
|
|
|
|
{ INDEX_op_qemu_ld32, { "r", "l" } },
|
2013-08-30 16:12:32 +00:00
|
|
|
{ INDEX_op_qemu_ld64, { "r", "r", "l" } },
|
2010-04-09 18:52:48 +00:00
|
|
|
|
|
|
|
{ INDEX_op_qemu_st8, { "s", "s" } },
|
|
|
|
{ INDEX_op_qemu_st16, { "s", "s" } },
|
|
|
|
{ INDEX_op_qemu_st32, { "s", "s" } },
|
2013-04-23 15:38:50 +00:00
|
|
|
{ INDEX_op_qemu_st64, { "s", "s", "s" } },
|
2010-03-20 11:10:20 +00:00
|
|
|
#else
|
2010-04-09 18:52:48 +00:00
|
|
|
{ INDEX_op_qemu_ld8u, { "r", "l", "l" } },
|
|
|
|
{ INDEX_op_qemu_ld8s, { "r", "l", "l" } },
|
|
|
|
{ INDEX_op_qemu_ld16u, { "r", "l", "l" } },
|
|
|
|
{ INDEX_op_qemu_ld16s, { "r", "l", "l" } },
|
|
|
|
{ INDEX_op_qemu_ld32, { "r", "l", "l" } },
|
2013-08-30 16:12:32 +00:00
|
|
|
{ INDEX_op_qemu_ld64, { "r", "r", "l", "l" } },
|
2010-04-09 18:52:48 +00:00
|
|
|
|
|
|
|
{ INDEX_op_qemu_st8, { "s", "s", "s" } },
|
|
|
|
{ INDEX_op_qemu_st16, { "s", "s", "s" } },
|
|
|
|
{ INDEX_op_qemu_st32, { "s", "s", "s" } },
|
2013-04-23 15:38:50 +00:00
|
|
|
{ INDEX_op_qemu_st64, { "s", "s", "s", "s" } },
|
2010-03-20 11:10:20 +00:00
|
|
|
#endif
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
{ INDEX_op_bswap16_i32, { "r", "r" } },
|
|
|
|
{ INDEX_op_bswap32_i32, { "r", "r" } },
|
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
{ INDEX_op_ext8s_i32, { "r", "r" } },
|
|
|
|
{ INDEX_op_ext16s_i32, { "r", "r" } },
|
2010-04-09 18:52:48 +00:00
|
|
|
{ INDEX_op_ext16u_i32, { "r", "r" } },
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2013-03-05 05:12:30 +00:00
|
|
|
{ INDEX_op_deposit_i32, { "r", "0", "rZ" } },
|
|
|
|
|
2013-03-12 05:11:30 +00:00
|
|
|
{ INDEX_op_div_i32, { "r", "r", "r" } },
|
|
|
|
{ INDEX_op_divu_i32, { "r", "r", "r" } },
|
|
|
|
|
2008-05-19 23:59:38 +00:00
|
|
|
{ -1 },
|
|
|
|
};
|
|
|
|
|
2010-06-03 00:26:56 +00:00
|
|
|
static void tcg_target_init(TCGContext *s)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-06-06 17:46:35 +00:00
|
|
|
#if defined(CONFIG_GETAUXVAL)
|
|
|
|
/* Only probe for the platform and capabilities if we havn't already
|
|
|
|
determined maximum values at compile time. */
|
|
|
|
# if !defined(use_idiv_instructions)
|
2013-05-02 11:18:38 +00:00
|
|
|
{
|
|
|
|
unsigned long hwcap = getauxval(AT_HWCAP);
|
|
|
|
use_idiv_instructions = (hwcap & HWCAP_ARM_IDIVA) != 0;
|
|
|
|
}
|
2013-06-06 17:46:35 +00:00
|
|
|
# endif
|
|
|
|
if (__ARM_ARCH < 7) {
|
|
|
|
const char *pl = (const char *)getauxval(AT_PLATFORM);
|
|
|
|
if (pl != NULL && pl[0] == 'v' && pl[1] >= '4' && pl[1] <= '9') {
|
|
|
|
arm_arch = pl[1] - '0';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* GETAUXVAL */
|
2013-05-02 11:18:38 +00:00
|
|
|
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffff);
|
2008-05-19 23:59:38 +00:00
|
|
|
tcg_regset_set32(tcg_target_call_clobber_regs, 0,
|
2010-04-09 18:52:48 +00:00
|
|
|
(1 << TCG_REG_R0) |
|
|
|
|
(1 << TCG_REG_R1) |
|
|
|
|
(1 << TCG_REG_R2) |
|
|
|
|
(1 << TCG_REG_R3) |
|
|
|
|
(1 << TCG_REG_R12) |
|
|
|
|
(1 << TCG_REG_R14));
|
2008-05-19 23:59:38 +00:00
|
|
|
|
|
|
|
tcg_regset_clear(s->reserved_regs);
|
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
|
2013-03-12 16:49:04 +00:00
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
|
2010-04-09 18:52:48 +00:00
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_PC);
|
2008-05-19 23:59:38 +00:00
|
|
|
|
|
|
|
tcg_add_target_add_op_defs(arm_op_defs);
|
|
|
|
}
|
|
|
|
|
2011-11-09 08:03:34 +00:00
|
|
|
static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
|
2013-08-21 00:07:26 +00:00
|
|
|
TCGReg arg1, intptr_t arg2)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
|
|
|
tcg_out_ld32u(s, COND_AL, arg, arg1, arg2);
|
|
|
|
}
|
|
|
|
|
2011-11-09 08:03:34 +00:00
|
|
|
static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
2013-08-21 00:07:26 +00:00
|
|
|
TCGReg arg1, intptr_t arg2)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
|
|
|
tcg_out_st32(s, COND_AL, arg, arg1, arg2);
|
|
|
|
}
|
|
|
|
|
2011-11-09 08:03:34 +00:00
|
|
|
static inline void tcg_out_mov(TCGContext *s, TCGType type,
|
|
|
|
TCGReg ret, TCGReg arg)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
|
|
|
tcg_out_dat_reg(s, COND_AL, ARITH_MOV, ret, 0, arg, SHIFT_IMM_LSL(0));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void tcg_out_movi(TCGContext *s, TCGType type,
|
2011-11-09 08:03:34 +00:00
|
|
|
TCGReg ret, tcg_target_long arg)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
|
|
|
tcg_out_movi32(s, COND_AL, ret, arg);
|
|
|
|
}
|
|
|
|
|
2013-06-05 14:55:33 +00:00
|
|
|
/* Compute frame size via macros, to share between tcg_target_qemu_prologue
|
|
|
|
and tcg_register_jit. */
|
|
|
|
|
|
|
|
#define PUSH_SIZE ((11 - 4 + 1 + 1) * sizeof(tcg_target_long))
|
|
|
|
|
|
|
|
#define FRAME_SIZE \
|
|
|
|
((PUSH_SIZE \
|
|
|
|
+ TCG_STATIC_CALL_ARGS_SIZE \
|
|
|
|
+ CPU_TEMP_BUF_NLONGS * sizeof(long) \
|
|
|
|
+ TCG_TARGET_STACK_ALIGN - 1) \
|
|
|
|
& -TCG_TARGET_STACK_ALIGN)
|
|
|
|
|
2010-06-03 00:26:56 +00:00
|
|
|
static void tcg_target_qemu_prologue(TCGContext *s)
|
2008-05-19 23:59:38 +00:00
|
|
|
{
|
2013-06-05 14:55:33 +00:00
|
|
|
int stack_addend;
|
2013-03-13 00:11:40 +00:00
|
|
|
|
|
|
|
/* Calling convention requires us to save r4-r11 and lr. */
|
|
|
|
/* stmdb sp!, { r4 - r11, lr } */
|
|
|
|
tcg_out32(s, (COND_AL << 28) | 0x092d4ff0);
|
2011-05-15 16:03:25 +00:00
|
|
|
|
2013-06-05 14:55:33 +00:00
|
|
|
/* Reserve callee argument and tcg temp space. */
|
|
|
|
stack_addend = FRAME_SIZE - PUSH_SIZE;
|
2013-03-13 00:11:40 +00:00
|
|
|
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_SUB, TCG_REG_CALL_STACK,
|
2013-06-05 14:55:33 +00:00
|
|
|
TCG_REG_CALL_STACK, stack_addend, 1);
|
2013-03-13 00:11:40 +00:00
|
|
|
tcg_set_frame(s, TCG_REG_CALL_STACK, TCG_STATIC_CALL_ARGS_SIZE,
|
|
|
|
CPU_TEMP_BUF_NLONGS * sizeof(long));
|
2010-03-05 07:35:07 +00:00
|
|
|
|
2011-05-15 16:03:25 +00:00
|
|
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
|
2008-05-19 23:59:38 +00:00
|
|
|
|
2011-05-15 16:03:25 +00:00
|
|
|
tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
|
2008-05-19 23:59:38 +00:00
|
|
|
tb_ret_addr = s->code_ptr;
|
|
|
|
|
2013-03-13 00:11:40 +00:00
|
|
|
/* Epilogue. We branch here via tb_ret_addr. */
|
|
|
|
tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
|
2013-06-05 14:55:33 +00:00
|
|
|
TCG_REG_CALL_STACK, stack_addend, 1);
|
2013-03-13 00:11:40 +00:00
|
|
|
|
|
|
|
/* ldmia sp!, { r4 - r11, pc } */
|
|
|
|
tcg_out32(s, (COND_AL << 28) | 0x08bd8ff0);
|
2008-05-19 23:59:38 +00:00
|
|
|
}
|
2013-06-05 14:55:33 +00:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
DebugFrameCIE cie;
|
|
|
|
DebugFrameFDEHeader fde;
|
|
|
|
uint8_t fde_def_cfa[4];
|
|
|
|
uint8_t fde_reg_ofs[18];
|
|
|
|
} DebugFrame;
|
|
|
|
|
|
|
|
#define ELF_HOST_MACHINE EM_ARM
|
|
|
|
|
|
|
|
/* We're expecting a 2 byte uleb128 encoded value. */
|
|
|
|
QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
|
|
|
|
|
|
|
|
static DebugFrame debug_frame = {
|
|
|
|
.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
|
|
|
|
.cie.id = -1,
|
|
|
|
.cie.version = 1,
|
|
|
|
.cie.code_align = 1,
|
|
|
|
.cie.data_align = 0x7c, /* sleb128 -4 */
|
|
|
|
.cie.return_column = 14,
|
|
|
|
|
|
|
|
/* Total FDE size does not include the "len" member. */
|
|
|
|
.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
|
|
|
|
|
|
|
|
.fde_def_cfa = {
|
|
|
|
12, 13, /* DW_CFA_def_cfa sp, ... */
|
|
|
|
(FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
|
|
|
|
(FRAME_SIZE >> 7)
|
|
|
|
},
|
|
|
|
.fde_reg_ofs = {
|
|
|
|
/* The following must match the stmdb in the prologue. */
|
|
|
|
0x8e, 1, /* DW_CFA_offset, lr, -4 */
|
|
|
|
0x8b, 2, /* DW_CFA_offset, r11, -8 */
|
|
|
|
0x8a, 3, /* DW_CFA_offset, r10, -12 */
|
|
|
|
0x89, 4, /* DW_CFA_offset, r9, -16 */
|
|
|
|
0x88, 5, /* DW_CFA_offset, r8, -20 */
|
|
|
|
0x87, 6, /* DW_CFA_offset, r7, -24 */
|
|
|
|
0x86, 7, /* DW_CFA_offset, r6, -28 */
|
|
|
|
0x85, 8, /* DW_CFA_offset, r5, -32 */
|
|
|
|
0x84, 9, /* DW_CFA_offset, r4, -36 */
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
void tcg_register_jit(void *buf, size_t buf_size)
|
|
|
|
{
|
|
|
|
debug_frame.fde.func_start = (tcg_target_long) buf;
|
|
|
|
debug_frame.fde.func_len = buf_size;
|
|
|
|
|
|
|
|
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
|
|
|
|
}
|