target/arm: [tcg] Port to init_disas_context

Incrementally paves the way towards using the generic instruction translation
loop.

Signed-off-by: Lluís Vilanova <vilanova@ac.upc.edu>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Alex Benneé <alex.benee@linaro.org>
Message-Id: <150002316201.22386.12115078843605656029.stgit@frigg.lan>
[rth: Adjust for max_insns interface change.]
Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
Lluís Vilanova 2017-07-14 12:06:02 +03:00 committed by Richard Henderson
parent dcba3a8d44
commit 1d8a553523

View file

@ -11824,32 +11824,12 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
return false;
}
/* generate intermediate code for basic block 'tb'. */
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
static int arm_tr_init_disas_context(DisasContextBase *dcbase,
CPUState *cs, int max_insns)
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
CPUARMState *env = cs->env_ptr;
ARMCPU *cpu = arm_env_get_cpu(env);
DisasContext dc1, *dc = &dc1;
target_ulong next_page_start;
int max_insns;
bool end_of_page;
/* generate intermediate code */
/* The A64 decoder has its own top level loop, because it doesn't need
* the A32/T32 complexity to do with conditional execution/IT blocks/etc.
*/
if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
gen_intermediate_code_a64(&dc->base, cs, tb);
return;
}
dc->base.tb = tb;
dc->base.pc_first = tb->pc;
dc->base.pc_next = dc->base.pc_first;
dc->base.is_jmp = DISAS_NEXT;
dc->base.num_insns = 0;
dc->base.singlestep_enabled = cs->singlestep_enabled;
dc->pc = dc->base.pc_first;
dc->condjmp = 0;
@ -11860,23 +11840,23 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
*/
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
!arm_el_is_aa64(env, 3);
dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
dc->sctlr_b = ARM_TBFLAG_SCTLR_B(tb->flags);
dc->be_data = ARM_TBFLAG_BE_DATA(tb->flags) ? MO_BE : MO_LE;
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(tb->flags));
dc->thumb = ARM_TBFLAG_THUMB(dc->base.tb->flags);
dc->sctlr_b = ARM_TBFLAG_SCTLR_B(dc->base.tb->flags);
dc->be_data = ARM_TBFLAG_BE_DATA(dc->base.tb->flags) ? MO_BE : MO_LE;
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(dc->base.tb->flags) >> 4;
dc->mmu_idx = core_to_arm_mmu_idx(env, ARM_TBFLAG_MMUIDX(dc->base.tb->flags));
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
dc->user = (dc->current_el == 0);
#endif
dc->ns = ARM_TBFLAG_NS(tb->flags);
dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(tb->flags);
dc->ns = ARM_TBFLAG_NS(dc->base.tb->flags);
dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(dc->base.tb->flags);
dc->vfp_enabled = ARM_TBFLAG_VFPEN(dc->base.tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(dc->base.tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(dc->base.tb->flags);
dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(dc->base.tb->flags);
dc->v7m_handler_mode = ARM_TBFLAG_HANDLER(dc->base.tb->flags);
dc->cp_regs = cpu->cp_regs;
dc->features = env->features;
@ -11895,11 +11875,12 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
* emit code to generate a software step exception
* end the TB
*/
dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
dc->ss_active = ARM_TBFLAG_SS_ACTIVE(dc->base.tb->flags);
dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(dc->base.tb->flags);
dc->is_ldex = false;
dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
cpu_F0s = tcg_temp_new_i32();
cpu_F1s = tcg_temp_new_i32();
cpu_F0d = tcg_temp_new_i64();
@ -11908,6 +11889,36 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
cpu_V1 = cpu_F1d;
/* FIXME: cpu_M0 can probably be the same as cpu_V0. */
cpu_M0 = tcg_temp_new_i64();
return max_insns;
}
/* generate intermediate code for basic block 'tb'. */
void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
{
CPUARMState *env = cs->env_ptr;
DisasContext dc1, *dc = &dc1;
target_ulong next_page_start;
int max_insns;
bool end_of_page;
/* generate intermediate code */
/* The A64 decoder has its own top level loop, because it doesn't need
* the A32/T32 complexity to do with conditional execution/IT blocks/etc.
*/
if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
gen_intermediate_code_a64(&dc->base, cs, tb);
return;
}
dc->base.tb = tb;
dc->base.pc_first = dc->base.tb->pc;
dc->base.pc_next = dc->base.pc_first;
dc->base.is_jmp = DISAS_NEXT;
dc->base.num_insns = 0;
dc->base.singlestep_enabled = cs->singlestep_enabled;
next_page_start = (dc->base.pc_first & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
max_insns = tb->cflags & CF_COUNT_MASK;
if (max_insns == 0) {
@ -11916,6 +11927,7 @@ void gen_intermediate_code(CPUState *cs, TranslationBlock *tb)
if (max_insns > TCG_MAX_INSNS) {
max_insns = TCG_MAX_INSNS;
}
max_insns = arm_tr_init_disas_context(&dc->base, cs, max_insns);
gen_tb_start(tb);