diff --git a/target-alpha/translate.c b/target-alpha/translate.c index 2849ede85c..1fd9c3be96 100644 --- a/target-alpha/translate.c +++ b/target-alpha/translate.c @@ -2007,7 +2007,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) REQUIRE_REG_31(rb); t32 = tcg_temp_new_i32(); va = load_gpr(ctx, ra); - tcg_gen_trunc_i64_i32(t32, va); + tcg_gen_extrl_i64_i32(t32, va); gen_helper_memory_to_s(vc, t32); tcg_temp_free_i32(t32); break; @@ -2027,7 +2027,7 @@ static ExitStatus translate_one(DisasContext *ctx, uint32_t insn) REQUIRE_REG_31(rb); t32 = tcg_temp_new_i32(); va = load_gpr(ctx, ra); - tcg_gen_trunc_i64_i32(t32, va); + tcg_gen_extrl_i64_i32(t32, va); gen_helper_memory_to_f(vc, t32); tcg_temp_free_i32(t32); break; diff --git a/target-arm/translate-a64.c b/target-arm/translate-a64.c index 689f2be896..5c13e153d4 100644 --- a/target-arm/translate-a64.c +++ b/target-arm/translate-a64.c @@ -528,9 +528,9 @@ static inline void gen_set_NZ64(TCGv_i64 result) TCGv_i64 flag = tcg_temp_new_i64(); tcg_gen_setcondi_i64(TCG_COND_NE, flag, result, 0); - tcg_gen_trunc_i64_i32(cpu_ZF, flag); + tcg_gen_extrl_i64_i32(cpu_ZF, flag); tcg_gen_shri_i64(flag, result, 32); - tcg_gen_trunc_i64_i32(cpu_NF, flag); + tcg_gen_extrl_i64_i32(cpu_NF, flag); tcg_temp_free_i64(flag); } @@ -540,8 +540,8 @@ static inline void gen_logic_CC(int sf, TCGv_i64 result) if (sf) { gen_set_NZ64(result); } else { - tcg_gen_trunc_i64_i32(cpu_ZF, result); - tcg_gen_trunc_i64_i32(cpu_NF, result); + tcg_gen_extrl_i64_i32(cpu_ZF, result); + tcg_gen_extrl_i64_i32(cpu_NF, result); } tcg_gen_movi_i32(cpu_CF, 0); tcg_gen_movi_i32(cpu_VF, 0); @@ -559,7 +559,7 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_movi_i64(tmp, 0); tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp); - tcg_gen_trunc_i64_i32(cpu_CF, flag); + tcg_gen_extrl_i64_i32(cpu_CF, flag); gen_set_NZ64(result); @@ -568,7 +568,7 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_andc_i64(flag, flag, tmp); tcg_temp_free_i64(tmp); tcg_gen_shri_i64(flag, flag, 32); - tcg_gen_trunc_i64_i32(cpu_VF, flag); + tcg_gen_extrl_i64_i32(cpu_VF, flag); tcg_gen_mov_i64(dest, result); tcg_temp_free_i64(result); @@ -580,8 +580,8 @@ static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_movi_i32(tmp, 0); - tcg_gen_trunc_i64_i32(t0_32, t0); - tcg_gen_trunc_i64_i32(t1_32, t1); + tcg_gen_extrl_i64_i32(t0_32, t0); + tcg_gen_extrl_i64_i32(t1_32, t1); tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp); tcg_gen_mov_i32(cpu_ZF, cpu_NF); tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32); @@ -609,7 +609,7 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) gen_set_NZ64(result); tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1); - tcg_gen_trunc_i64_i32(cpu_CF, flag); + tcg_gen_extrl_i64_i32(cpu_CF, flag); tcg_gen_xor_i64(flag, result, t0); tmp = tcg_temp_new_i64(); @@ -617,7 +617,7 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_and_i64(flag, flag, tmp); tcg_temp_free_i64(tmp); tcg_gen_shri_i64(flag, flag, 32); - tcg_gen_trunc_i64_i32(cpu_VF, flag); + tcg_gen_extrl_i64_i32(cpu_VF, flag); tcg_gen_mov_i64(dest, result); tcg_temp_free_i64(flag); tcg_temp_free_i64(result); @@ -627,8 +627,8 @@ static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) TCGv_i32 t1_32 = tcg_temp_new_i32(); TCGv_i32 tmp; - tcg_gen_trunc_i64_i32(t0_32, t0); - tcg_gen_trunc_i64_i32(t1_32, t1); + tcg_gen_extrl_i64_i32(t0_32, t0); + tcg_gen_extrl_i64_i32(t1_32, t1); tcg_gen_sub_i32(cpu_NF, t0_32, t1_32); tcg_gen_mov_i32(cpu_ZF, cpu_NF); tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32); @@ -670,14 +670,14 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) tcg_gen_extu_i32_i64(cf_64, cpu_CF); tcg_gen_add2_i64(result, cf_64, t0, tmp, cf_64, tmp); tcg_gen_add2_i64(result, cf_64, result, cf_64, t1, tmp); - tcg_gen_trunc_i64_i32(cpu_CF, cf_64); + tcg_gen_extrl_i64_i32(cpu_CF, cf_64); gen_set_NZ64(result); tcg_gen_xor_i64(vf_64, result, t0); tcg_gen_xor_i64(tmp, t0, t1); tcg_gen_andc_i64(vf_64, vf_64, tmp); tcg_gen_shri_i64(vf_64, vf_64, 32); - tcg_gen_trunc_i64_i32(cpu_VF, vf_64); + tcg_gen_extrl_i64_i32(cpu_VF, vf_64); tcg_gen_mov_i64(dest, result); @@ -691,8 +691,8 @@ static void gen_adc_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1) t1_32 = tcg_temp_new_i32(); tmp = tcg_const_i32(0); - tcg_gen_trunc_i64_i32(t0_32, t0); - tcg_gen_trunc_i64_i32(t1_32, t1); + tcg_gen_extrl_i64_i32(t0_32, t0); + tcg_gen_extrl_i64_i32(t1_32, t1); tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, cpu_CF, tmp); tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1_32, tmp); @@ -1301,7 +1301,7 @@ static void gen_set_nzcv(TCGv_i64 tcg_rt) TCGv_i32 nzcv = tcg_temp_new_i32(); /* take NZCV from R[t] */ - tcg_gen_trunc_i64_i32(nzcv, tcg_rt); + tcg_gen_extrl_i64_i32(nzcv, tcg_rt); /* bit 31, N */ tcg_gen_andi_i32(cpu_NF, nzcv, (1U << 31)); @@ -3131,8 +3131,8 @@ static void shift_reg(TCGv_i64 dst, TCGv_i64 src, int sf, TCGv_i32 t0, t1; t0 = tcg_temp_new_i32(); t1 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(t0, src); - tcg_gen_trunc_i64_i32(t1, shift_amount); + tcg_gen_extrl_i64_i32(t0, src); + tcg_gen_extrl_i64_i32(t1, shift_amount); tcg_gen_rotr_i32(t0, t0, t1); tcg_gen_extu_i32_i64(dst, t0); tcg_temp_free_i32(t0); @@ -3680,7 +3680,7 @@ static void handle_clz(DisasContext *s, unsigned int sf, gen_helper_clz64(tcg_rd, tcg_rn); } else { TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn); + tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); gen_helper_clz(tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_tmp32); @@ -3698,7 +3698,7 @@ static void handle_cls(DisasContext *s, unsigned int sf, gen_helper_cls64(tcg_rd, tcg_rn); } else { TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn); + tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); gen_helper_cls32(tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_tmp32); @@ -3716,7 +3716,7 @@ static void handle_rbit(DisasContext *s, unsigned int sf, gen_helper_rbit64(tcg_rd, tcg_rn); } else { TCGv_i32 tcg_tmp32 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tcg_tmp32, tcg_rn); + tcg_gen_extrl_i64_i32(tcg_tmp32, tcg_rn); gen_helper_rbit(tcg_tmp32, tcg_tmp32); tcg_gen_extu_i32_i64(tcg_rd, tcg_tmp32); tcg_temp_free_i32(tcg_tmp32); @@ -5475,16 +5475,16 @@ static void disas_simd_across_lanes(DisasContext *s, uint32_t insn) assert(elements == 4); read_vec_element(s, tcg_elt, rn, 0, MO_32); - tcg_gen_trunc_i64_i32(tcg_elt1, tcg_elt); + tcg_gen_extrl_i64_i32(tcg_elt1, tcg_elt); read_vec_element(s, tcg_elt, rn, 1, MO_32); - tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt); + tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt); do_minmaxop(s, tcg_elt1, tcg_elt2, opcode, is_min, fpst); read_vec_element(s, tcg_elt, rn, 2, MO_32); - tcg_gen_trunc_i64_i32(tcg_elt2, tcg_elt); + tcg_gen_extrl_i64_i32(tcg_elt2, tcg_elt); read_vec_element(s, tcg_elt, rn, 3, MO_32); - tcg_gen_trunc_i64_i32(tcg_elt3, tcg_elt); + tcg_gen_extrl_i64_i32(tcg_elt3, tcg_elt); do_minmaxop(s, tcg_elt2, tcg_elt3, opcode, is_min, fpst); @@ -7647,7 +7647,7 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, static NeonGenNarrowFn * const xtnfns[3] = { gen_helper_neon_narrow_u8, gen_helper_neon_narrow_u16, - tcg_gen_trunc_i64_i32, + tcg_gen_extrl_i64_i32, }; static NeonGenNarrowEnvFn * const sqxtunfns[3] = { gen_helper_neon_unarrow_sat8, @@ -7681,10 +7681,10 @@ static void handle_2misc_narrow(DisasContext *s, bool scalar, } else { TCGv_i32 tcg_lo = tcg_temp_new_i32(); TCGv_i32 tcg_hi = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tcg_lo, tcg_op); + tcg_gen_extrl_i64_i32(tcg_lo, tcg_op); gen_helper_vfp_fcvt_f32_to_f16(tcg_lo, tcg_lo, cpu_env); tcg_gen_shri_i64(tcg_op, tcg_op, 32); - tcg_gen_trunc_i64_i32(tcg_hi, tcg_op); + tcg_gen_extrl_i64_i32(tcg_hi, tcg_op); gen_helper_vfp_fcvt_f32_to_f16(tcg_hi, tcg_hi, cpu_env); tcg_gen_deposit_i32(tcg_res[pass], tcg_lo, tcg_hi, 16, 16); tcg_temp_free_i32(tcg_lo); @@ -8593,7 +8593,7 @@ static void handle_3rd_wide(DisasContext *s, int is_q, int is_u, int size, static void do_narrow_high_u32(TCGv_i32 res, TCGv_i64 in) { tcg_gen_shri_i64(in, in, 32); - tcg_gen_trunc_i64_i32(res, in); + tcg_gen_extrl_i64_i32(res, in); } static void do_narrow_round_high_u32(TCGv_i32 res, TCGv_i64 in) diff --git a/target-arm/translate.c b/target-arm/translate.c index 69ac18c108..e27634f3c8 100644 --- a/target-arm/translate.c +++ b/target-arm/translate.c @@ -1557,7 +1557,7 @@ static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest) } else { tmp = tcg_temp_new_i32(); iwmmxt_load_reg(cpu_V0, rd); - tcg_gen_trunc_i64_i32(tmp, cpu_V0); + tcg_gen_extrl_i64_i32(tmp, cpu_V0); } tcg_gen_andi_i32(tmp, tmp, mask); tcg_gen_mov_i32(dest, tmp); @@ -1581,9 +1581,9 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) rdhi = (insn >> 16) & 0xf; if (insn & ARM_CP_RW_BIT) { /* TMRRC */ iwmmxt_load_reg(cpu_V0, wrd); - tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0); + tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0); tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); - tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0); + tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0); } else { /* TMCRR */ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); iwmmxt_store_reg(cpu_V0, wrd); @@ -1638,15 +1638,15 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) if (insn & (1 << 22)) { /* WSTRD */ gen_aa32_st64(cpu_M0, addr, get_mem_index(s)); } else { /* WSTRW wRd */ - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_extrl_i64_i32(tmp, cpu_M0); gen_aa32_st32(tmp, addr, get_mem_index(s)); } } else { if (insn & (1 << 22)) { /* WSTRH */ - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_extrl_i64_i32(tmp, cpu_M0); gen_aa32_st16(tmp, addr, get_mem_index(s)); } else { /* WSTRB */ - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_extrl_i64_i32(tmp, cpu_M0); gen_aa32_st8(tmp, addr, get_mem_index(s)); } } @@ -1946,7 +1946,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) switch ((insn >> 22) & 3) { case 0: tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3); - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_extrl_i64_i32(tmp, cpu_M0); if (insn & 8) { tcg_gen_ext8s_i32(tmp, tmp); } else { @@ -1955,7 +1955,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) break; case 1: tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4); - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_extrl_i64_i32(tmp, cpu_M0); if (insn & 8) { tcg_gen_ext16s_i32(tmp, tmp); } else { @@ -1964,7 +1964,7 @@ static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) break; case 2: tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5); - tcg_gen_trunc_i64_i32(tmp, cpu_M0); + tcg_gen_extrl_i64_i32(tmp, cpu_M0); break; } store_reg(s, rd, tmp); @@ -2627,9 +2627,9 @@ static int disas_dsp_insn(DisasContext *s, uint32_t insn) if (insn & ARM_CP_RW_BIT) { /* MRA */ iwmmxt_load_reg(cpu_V0, acc); - tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0); + tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0); tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); - tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0); + tcg_gen_extrl_i64_i32(cpu_R[rdhi], cpu_V0); tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1); } else { /* MAR */ tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); @@ -2951,7 +2951,7 @@ static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp, } else { gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst); } - tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res); + tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res); tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd)); tcg_temp_free_i32(tcg_tmp); tcg_temp_free_i64(tcg_res); @@ -4683,7 +4683,7 @@ static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src) switch (size) { case 0: gen_helper_neon_narrow_u8(dest, src); break; case 1: gen_helper_neon_narrow_u16(dest, src); break; - case 2: tcg_gen_trunc_i64_i32(dest, src); break; + case 2: tcg_gen_extrl_i64_i32(dest, src); break; default: abort(); } } @@ -6254,7 +6254,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) break; case 2: tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); - tcg_gen_trunc_i64_i32(tmp, cpu_V0); + tcg_gen_extrl_i64_i32(tmp, cpu_V0); break; default: abort(); } @@ -6269,7 +6269,7 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) case 2: tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31); tcg_gen_shri_i64(cpu_V0, cpu_V0, 32); - tcg_gen_trunc_i64_i32(tmp, cpu_V0); + tcg_gen_extrl_i64_i32(tmp, cpu_V0); break; default: abort(); } @@ -7224,11 +7224,11 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn) tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset); } tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_gen_extrl_i64_i32(tmp, tmp64); store_reg(s, rt, tmp); tcg_gen_shri_i64(tmp64, tmp64, 32); tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_gen_extrl_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); store_reg(s, rt2, tmp); } else { @@ -7334,11 +7334,11 @@ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) { TCGv_i32 tmp; tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, val); + tcg_gen_extrl_i64_i32(tmp, val); store_reg(s, rlow, tmp); tmp = tcg_temp_new_i32(); tcg_gen_shri_i64(val, val, 32); - tcg_gen_trunc_i64_i32(tmp, val); + tcg_gen_extrl_i64_i32(tmp, val); store_reg(s, rhigh, tmp); } @@ -8013,7 +8013,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) tmp64 = gen_muls_i64_i32(tmp, tmp2); tcg_gen_shri_i64(tmp64, tmp64, 16); tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_gen_extrl_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); if ((sh & 2) == 0) { tmp2 = load_reg(s, rn); @@ -8679,7 +8679,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) } tcg_gen_shri_i64(tmp64, tmp64, 32); tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_gen_extrl_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); store_reg(s, rn, tmp); break; @@ -9749,7 +9749,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw tmp64 = gen_muls_i64_i32(tmp, tmp2); tcg_gen_shri_i64(tmp64, tmp64, 16); tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_gen_extrl_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); if (rs != 15) { @@ -9773,7 +9773,7 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw } tcg_gen_shri_i64(tmp64, tmp64, 32); tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, tmp64); + tcg_gen_extrl_i64_i32(tmp, tmp64); tcg_temp_free_i64(tmp64); break; case 7: /* Unsigned sum of absolute differences. */ diff --git a/target-cris/translate.c b/target-cris/translate.c index 3e59601eb4..5699826c8b 100644 --- a/target-cris/translate.c +++ b/target-cris/translate.c @@ -2604,9 +2604,9 @@ static int dec_movem_mr(CPUCRISState *env, DisasContext *dc) tcg_temp_free(addr); for (i = 0; i < (nr >> 1); i++) { - tcg_gen_trunc_i64_i32(cpu_R[i * 2], tmp[i]); + tcg_gen_extrl_i64_i32(cpu_R[i * 2], tmp[i]); tcg_gen_shri_i64(tmp[i], tmp[i], 32); - tcg_gen_trunc_i64_i32(cpu_R[i * 2 + 1], tmp[i]); + tcg_gen_extrl_i64_i32(cpu_R[i * 2 + 1], tmp[i]); tcg_temp_free_i64(tmp[i]); } if (nr & 1) { diff --git a/target-m68k/translate.c b/target-m68k/translate.c index a57d2415c9..3cdf6652aa 100644 --- a/target-m68k/translate.c +++ b/target-m68k/translate.c @@ -2680,7 +2680,7 @@ DISAS_INSN(from_mac) if (s->env->macsr & MACSR_FI) { gen_helper_get_macf(rx, cpu_env, acc); } else if ((s->env->macsr & MACSR_OMC) == 0) { - tcg_gen_trunc_i64_i32(rx, acc); + tcg_gen_extrl_i64_i32(rx, acc); } else if (s->env->macsr & MACSR_SU) { gen_helper_get_macs(rx, acc); } else { diff --git a/target-microblaze/translate.c b/target-microblaze/translate.c index f4e969b29c..47ac18015e 100644 --- a/target-microblaze/translate.c +++ b/target-microblaze/translate.c @@ -598,9 +598,9 @@ static void t_gen_muls(TCGv d, TCGv d2, TCGv a, TCGv b) tcg_gen_ext_i32_i64(t1, b); tcg_gen_mul_i64(t0, t0, t1); - tcg_gen_trunc_i64_i32(d, t0); + tcg_gen_extrl_i64_i32(d, t0); tcg_gen_shri_i64(t0, t0, 32); - tcg_gen_trunc_i64_i32(d2, t0); + tcg_gen_extrl_i64_i32(d2, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); @@ -618,9 +618,9 @@ static void t_gen_mulu(TCGv d, TCGv d2, TCGv a, TCGv b) tcg_gen_extu_i32_i64(t1, b); tcg_gen_mul_i64(t0, t0, t1); - tcg_gen_trunc_i64_i32(d, t0); + tcg_gen_extrl_i64_i32(d, t0); tcg_gen_shri_i64(t0, t0, 32); - tcg_gen_trunc_i64_i32(d2, t0); + tcg_gen_extrl_i64_i32(d2, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); diff --git a/target-mips/translate.c b/target-mips/translate.c index 98cf72de74..93cb4f2731 100644 --- a/target-mips/translate.c +++ b/target-mips/translate.c @@ -1629,7 +1629,7 @@ static void gen_load_fpr32(DisasContext *ctx, TCGv_i32 t, int reg) if (ctx->hflags & MIPS_HFLAG_FRE) { generate_exception(ctx, EXCP_RI); } - tcg_gen_trunc_i64_i32(t, fpu_f64[reg]); + tcg_gen_extrl_i64_i32(t, fpu_f64[reg]); } static void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg) @@ -1649,7 +1649,7 @@ static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) if (ctx->hflags & MIPS_HFLAG_F64) { TCGv_i64 t64 = tcg_temp_new_i64(); tcg_gen_shri_i64(t64, fpu_f64[reg], 32); - tcg_gen_trunc_i64_i32(t, t64); + tcg_gen_extrl_i64_i32(t, t64); tcg_temp_free_i64(t64); } else { gen_load_fpr32(ctx, t, reg | 1); diff --git a/target-openrisc/translate.c b/target-openrisc/translate.c index a62cbf4011..aca1242bdb 100644 --- a/target-openrisc/translate.c +++ b/target-openrisc/translate.c @@ -279,7 +279,7 @@ static void dec_calc(DisasContext *dc, uint32_t insn) tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_extu_i32_i64(tb, cpu_R[rb]); tcg_gen_add_i64(td, ta, tb); - tcg_gen_trunc_i64_i32(res, td); + tcg_gen_extrl_i64_i32(res, td); tcg_gen_shri_i64(td, td, 31); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ @@ -324,7 +324,7 @@ static void dec_calc(DisasContext *dc, uint32_t insn) tcg_gen_shri_i64(tcy, tcy, 10); tcg_gen_add_i64(td, ta, tb); tcg_gen_add_i64(td, td, tcy); - tcg_gen_trunc_i64_i32(res, td); + tcg_gen_extrl_i64_i32(res, td); tcg_gen_shri_i64(td, td, 32); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ @@ -366,7 +366,7 @@ static void dec_calc(DisasContext *dc, uint32_t insn) tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_extu_i32_i64(tb, cpu_R[rb]); tcg_gen_sub_i64(td, ta, tb); - tcg_gen_trunc_i64_i32(res, td); + tcg_gen_extrl_i64_i32(res, td); tcg_gen_shri_i64(td, td, 31); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ @@ -779,9 +779,9 @@ static void dec_misc(DisasContext *dc, uint32_t insn) tcg_gen_ext_i32_i64(t1, dst); tcg_gen_concat_i32_i64(t2, maclo, machi); tcg_gen_add_i64(t2, t2, t1); - tcg_gen_trunc_i64_i32(maclo, t2); + tcg_gen_extrl_i64_i32(maclo, t2); tcg_gen_shri_i64(t2, t2, 32); - tcg_gen_trunc_i64_i32(machi, t2); + tcg_gen_extrl_i64_i32(machi, t2); tcg_temp_free_i32(dst); tcg_temp_free(ttmp); tcg_temp_free_i64(t1); @@ -898,7 +898,7 @@ static void dec_misc(DisasContext *dc, uint32_t insn) TCGv_i32 sr_ove = tcg_temp_local_new_i32(); tcg_gen_extu_i32_i64(ta, cpu_R[ra]); tcg_gen_addi_i64(td, ta, sign_extend(I16, 16)); - tcg_gen_trunc_i64_i32(res, td); + tcg_gen_extrl_i64_i32(res, td); tcg_gen_shri_i64(td, td, 32); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ @@ -934,7 +934,7 @@ static void dec_misc(DisasContext *dc, uint32_t insn) tcg_gen_extu_i32_i64(tcy, sr_cy); tcg_gen_addi_i64(td, ta, sign_extend(I16, 16)); tcg_gen_add_i64(td, td, tcy); - tcg_gen_trunc_i64_i32(res, td); + tcg_gen_extrl_i64_i32(res, td); tcg_gen_shri_i64(td, td, 32); tcg_gen_andi_i64(td, td, 0x3); /* Jump to lab when no overflow. */ @@ -1073,9 +1073,9 @@ static void dec_mac(DisasContext *dc, uint32_t insn) tcg_gen_ext_i32_i64(t1, t0); tcg_gen_concat_i32_i64(t2, maclo, machi); tcg_gen_add_i64(t2, t2, t1); - tcg_gen_trunc_i64_i32(maclo, t2); + tcg_gen_extrl_i64_i32(maclo, t2); tcg_gen_shri_i64(t2, t2, 32); - tcg_gen_trunc_i64_i32(machi, t2); + tcg_gen_extrl_i64_i32(machi, t2); tcg_temp_free_i32(t0); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); @@ -1092,9 +1092,9 @@ static void dec_mac(DisasContext *dc, uint32_t insn) tcg_gen_ext_i32_i64(t1, t0); tcg_gen_concat_i32_i64(t2, maclo, machi); tcg_gen_sub_i64(t2, t2, t1); - tcg_gen_trunc_i64_i32(maclo, t2); + tcg_gen_extrl_i64_i32(maclo, t2); tcg_gen_shri_i64(t2, t2, 32); - tcg_gen_trunc_i64_i32(machi, t2); + tcg_gen_extrl_i64_i32(machi, t2); tcg_temp_free_i32(t0); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); diff --git a/target-s390x/translate.c b/target-s390x/translate.c index c748290d5c..2bca33acca 100644 --- a/target-s390x/translate.c +++ b/target-s390x/translate.c @@ -811,7 +811,7 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_LTGT0_32: c->is_64 = false; c->u.s32.a = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(c->u.s32.a, cc_dst); + tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst); c->u.s32.b = tcg_const_i32(0); break; case CC_OP_LTGT_32: @@ -819,9 +819,9 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) case CC_OP_SUBU_32: c->is_64 = false; c->u.s32.a = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(c->u.s32.a, cc_src); + tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src); c->u.s32.b = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(c->u.s32.b, cc_dst); + tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst); break; case CC_OP_LTGT0_64: @@ -851,11 +851,11 @@ static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) c->is_64 = false; c->u.s32.a = tcg_temp_new_i32(); c->u.s32.b = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(c->u.s32.a, cc_vr); + tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr); if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { tcg_gen_movi_i32(c->u.s32.b, 0); } else { - tcg_gen_trunc_i64_i32(c->u.s32.b, cc_src); + tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src); } break; @@ -1532,7 +1532,7 @@ static ExitStatus op_bct32(DisasContext *s, DisasOps *o) store_reg32_i64(r1, t); c.u.s32.a = tcg_temp_new_i32(); c.u.s32.b = tcg_const_i32(0); - tcg_gen_trunc_i64_i32(c.u.s32.a, t); + tcg_gen_extrl_i64_i32(c.u.s32.a, t); tcg_temp_free_i64(t); return help_branch(s, &c, is_imm, imm, o->in2); @@ -1556,7 +1556,7 @@ static ExitStatus op_bcth(DisasContext *s, DisasOps *o) store_reg32h_i64(r1, t); c.u.s32.a = tcg_temp_new_i32(); c.u.s32.b = tcg_const_i32(0); - tcg_gen_trunc_i64_i32(c.u.s32.a, t); + tcg_gen_extrl_i64_i32(c.u.s32.a, t); tcg_temp_free_i64(t); return help_branch(s, &c, 1, imm, o->in2); @@ -1599,8 +1599,8 @@ static ExitStatus op_bx32(DisasContext *s, DisasOps *o) tcg_gen_add_i64(t, regs[r1], regs[r3]); c.u.s32.a = tcg_temp_new_i32(); c.u.s32.b = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(c.u.s32.a, t); - tcg_gen_trunc_i64_i32(c.u.s32.b, regs[r3 | 1]); + tcg_gen_extrl_i64_i32(c.u.s32.a, t); + tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]); store_reg32_i64(r1, t); tcg_temp_free_i64(t); @@ -1905,7 +1905,7 @@ static ExitStatus op_clm(DisasContext *s, DisasOps *o) { TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); TCGv_i32 t1 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(t1, o->in1); + tcg_gen_extrl_i64_i32(t1, o->in1); potential_page_fault(s); gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2); set_cc_static(s); @@ -1977,7 +1977,7 @@ static ExitStatus op_cs(DisasContext *s, DisasOps *o) /* Store CC back to cc_op. Wait until after the store so that any exception gets the old cc_op value. */ - tcg_gen_trunc_i64_i32(cc_op, cc); + tcg_gen_extrl_i64_i32(cc_op, cc); tcg_temp_free_i64(cc); set_cc_static(s); return NO_EXIT; @@ -2027,7 +2027,7 @@ static ExitStatus op_cdsg(DisasContext *s, DisasOps *o) /* Save back state now that we've passed all exceptions. */ tcg_gen_mov_i64(regs[r1], outh); tcg_gen_mov_i64(regs[r1 + 1], outl); - tcg_gen_trunc_i64_i32(cc_op, cc); + tcg_gen_extrl_i64_i32(cc_op, cc); tcg_temp_free_i64(outh); tcg_temp_free_i64(outl); tcg_temp_free_i64(cc); @@ -2051,7 +2051,7 @@ static ExitStatus op_cvd(DisasContext *s, DisasOps *o) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i32 t2 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(t2, o->in1); + tcg_gen_extrl_i64_i32(t2, o->in1); gen_helper_cvd(t1, t2); tcg_temp_free_i32(t2); tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s)); @@ -3235,8 +3235,8 @@ static ExitStatus op_rll32(DisasContext *s, DisasOps *o) TCGv_i32 t1 = tcg_temp_new_i32(); TCGv_i32 t2 = tcg_temp_new_i32(); TCGv_i32 to = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(t1, o->in1); - tcg_gen_trunc_i64_i32(t2, o->in2); + tcg_gen_extrl_i64_i32(t1, o->in1); + tcg_gen_extrl_i64_i32(t2, o->in2); tcg_gen_rotl_i32(to, t1, t2); tcg_gen_extu_i32_i64(o->out, to); tcg_temp_free_i32(t1); diff --git a/target-sh4/translate.c b/target-sh4/translate.c index 3b4a1b5cea..be0cb321cf 100644 --- a/target-sh4/translate.c +++ b/target-sh4/translate.c @@ -288,10 +288,10 @@ static inline void gen_load_fpr64(TCGv_i64 t, int reg) static inline void gen_store_fpr64 (TCGv_i64 t, int reg) { TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(tmp, t); + tcg_gen_extrl_i64_i32(tmp, t); tcg_gen_mov_i32(cpu_fregs[reg + 1], tmp); tcg_gen_shri_i64(t, t, 32); - tcg_gen_trunc_i64_i32(tmp, t); + tcg_gen_extrl_i64_i32(tmp, t); tcg_gen_mov_i32(cpu_fregs[reg], tmp); tcg_temp_free_i32(tmp); } diff --git a/target-sparc/translate.c b/target-sparc/translate.c index c58dd4e95b..48fc2abe63 100644 --- a/target-sparc/translate.c +++ b/target-sparc/translate.c @@ -164,7 +164,7 @@ static TCGv_i32 gen_load_fpr_F(DisasContext *dc, unsigned int src) TCGv_i64 t = tcg_temp_new_i64(); tcg_gen_shri_i64(t, cpu_fpr[src / 2], 32); - tcg_gen_trunc_i64_i32(ret, t); + tcg_gen_extrl_i64_i32(ret, t); tcg_temp_free_i64(t); return ret; @@ -379,8 +379,8 @@ static TCGv_i32 gen_add32_carry32(void) #if TARGET_LONG_BITS == 64 cc_src1_32 = tcg_temp_new_i32(); cc_src2_32 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_dst); - tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src); + tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_dst); + tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src); #else cc_src1_32 = cpu_cc_dst; cc_src2_32 = cpu_cc_src; @@ -405,8 +405,8 @@ static TCGv_i32 gen_sub32_carry32(void) #if TARGET_LONG_BITS == 64 cc_src1_32 = tcg_temp_new_i32(); cc_src2_32 = tcg_temp_new_i32(); - tcg_gen_trunc_i64_i32(cc_src1_32, cpu_cc_src); - tcg_gen_trunc_i64_i32(cc_src2_32, cpu_cc_src2); + tcg_gen_extrl_i64_i32(cc_src1_32, cpu_cc_src); + tcg_gen_extrl_i64_i32(cc_src2_32, cpu_cc_src2); #else cc_src1_32 = cpu_cc_src; cc_src2_32 = cpu_cc_src2; @@ -2254,11 +2254,11 @@ static void gen_fmovs(DisasContext *dc, DisasCompare *cmp, int rd, int rs) the later. */ c32 = tcg_temp_new_i32(); if (cmp->is_bool) { - tcg_gen_trunc_i64_i32(c32, cmp->c1); + tcg_gen_extrl_i64_i32(c32, cmp->c1); } else { TCGv_i64 c64 = tcg_temp_new_i64(); tcg_gen_setcond_i64(cmp->cond, c64, cmp->c1, cmp->c2); - tcg_gen_trunc_i64_i32(c32, c64); + tcg_gen_extrl_i64_i32(c32, c64); tcg_temp_free_i64(c64); } diff --git a/target-tricore/translate.c b/target-tricore/translate.c index 70f09300ee..f02bef41ee 100644 --- a/target-tricore/translate.c +++ b/target-tricore/translate.c @@ -540,14 +540,14 @@ static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) tcg_gen_mul_i64(t1, t1, t3); tcg_gen_add_i64(t1, t2, t1); - tcg_gen_trunc_i64_i32(ret, t1); + tcg_gen_extrl_i64_i32(ret, t1); /* calc V t1 > 0x7fffffff */ tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL); /* t1 < -0x80000000 */ tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL); tcg_gen_or_i64(t2, t2, t3); - tcg_gen_trunc_i64_i32(cpu_PSW_V, t2); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t2); tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* Calc SV bit */ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); @@ -621,7 +621,7 @@ gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, /* only the add overflows, if t2 < t1 calc V bit */ tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1); - tcg_gen_trunc_i64_i32(cpu_PSW_V, t2); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t2); tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* Calc SV bit */ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); @@ -1110,12 +1110,12 @@ gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_sari_i64(t2, t2, up_shift); tcg_gen_add_i64(t3, t1, t2); - tcg_gen_trunc_i64_i32(temp3, t3); + tcg_gen_extrl_i64_i32(temp3, t3); /* calc v bit */ tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL); tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL); tcg_gen_or_i64(t1, t1, t2); - tcg_gen_trunc_i64_i32(cpu_PSW_V, t1); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t1); tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* We produce an overflow on the host if the mul before was (0x80000000 * 0x80000000) << 1). If this is the @@ -1356,14 +1356,14 @@ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) tcg_gen_mul_i64(t1, t1, t3); tcg_gen_sub_i64(t1, t2, t1); - tcg_gen_trunc_i64_i32(ret, t1); + tcg_gen_extrl_i64_i32(ret, t1); /* calc V t2 > 0x7fffffff */ tcg_gen_setcondi_i64(TCG_COND_GT, t3, t1, 0x7fffffffLL); /* result < -0x80000000 */ tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL); tcg_gen_or_i64(t2, t2, t3); - tcg_gen_trunc_i64_i32(cpu_PSW_V, t2); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t2); tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* Calc SV bit */ @@ -1445,7 +1445,7 @@ gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_extr_i64_i32(ret_low, ret_high, t3); /* calc V bit, only the sub can overflow, if t1 > t2 */ tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2); - tcg_gen_trunc_i64_i32(cpu_PSW_V, t1); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t1); tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* Calc SV bit */ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); @@ -1973,12 +1973,12 @@ gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_add_i64(t2, t2, t4); tcg_gen_sub_i64(t3, t1, t2); - tcg_gen_trunc_i64_i32(temp3, t3); + tcg_gen_extrl_i64_i32(temp3, t3); /* calc v bit */ tcg_gen_setcondi_i64(TCG_COND_GT, t1, t3, 0x7fffffffLL); tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL); tcg_gen_or_i64(t1, t1, t2); - tcg_gen_trunc_i64_i32(cpu_PSW_V, t1); + tcg_gen_extrl_i64_i32(cpu_PSW_V, t1); tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); /* Calc SV bit */ tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); diff --git a/target-xtensa/translate.c b/target-xtensa/translate.c index f2118c24c0..a29b3e61bc 100644 --- a/target-xtensa/translate.c +++ b/target-xtensa/translate.c @@ -1544,7 +1544,7 @@ static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) TCGv_i64 tmp = tcg_temp_new_i64(); \ tcg_gen_extu_i32_i64(tmp, reg); \ tcg_gen_##cmd##_i64(v, v, tmp); \ - tcg_gen_trunc_i64_i32(cpu_R[RRR_R], v); \ + tcg_gen_extrl_i64_i32(cpu_R[RRR_R], v); \ tcg_temp_free_i64(v); \ tcg_temp_free_i64(tmp); \ } while (0) diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 6b59eedf74..6da083a1e9 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -694,11 +694,6 @@ static inline void tcg_gen_concat32_i64(TCGv_i64 ret, TCGv_i64 lo, TCGv_i64 hi) tcg_gen_deposit_i64(ret, lo, hi, 32, 32); } -static inline void tcg_gen_trunc_i64_i32(TCGv_i32 ret, TCGv_i64 arg) -{ - tcg_gen_extrl_i64_i32(ret, arg); -} - /* QEMU specific operations. */ #ifndef TARGET_LONG_BITS @@ -854,7 +849,7 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) #define tcg_gen_divu_tl tcg_gen_divu_i64 #define tcg_gen_remu_tl tcg_gen_remu_i64 #define tcg_gen_discard_tl tcg_gen_discard_i64 -#define tcg_gen_trunc_tl_i32 tcg_gen_trunc_i64_i32 +#define tcg_gen_trunc_tl_i32 tcg_gen_extrl_i64_i32 #define tcg_gen_trunc_i64_tl tcg_gen_mov_i64 #define tcg_gen_extu_i32_tl tcg_gen_extu_i32_i64 #define tcg_gen_ext_i32_tl tcg_gen_ext_i32_i64 @@ -933,7 +928,7 @@ static inline void tcg_gen_qemu_st64(TCGv_i64 arg, TCGv addr, int mem_index) #define tcg_gen_remu_tl tcg_gen_remu_i32 #define tcg_gen_discard_tl tcg_gen_discard_i32 #define tcg_gen_trunc_tl_i32 tcg_gen_mov_i32 -#define tcg_gen_trunc_i64_tl tcg_gen_trunc_i64_i32 +#define tcg_gen_trunc_i64_tl tcg_gen_extrl_i64_i32 #define tcg_gen_extu_i32_tl tcg_gen_mov_i32 #define tcg_gen_ext_i32_tl tcg_gen_mov_i32 #define tcg_gen_extu_tl_i64 tcg_gen_extu_i32_i64