mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
target/hppa: Include PSW_P in tb flags and mmu index
Use a separate mmu index for PSW_P enabled vs disabled. This means we can elide the tlb flush in cpu_hppa_put_psw when PSW_P changes. This turns out to be the majority of all tlb flushes. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
3e01f1147a
commit
bb67ec32a0
4 changed files with 29 additions and 26 deletions
|
@ -30,21 +30,33 @@
|
|||
basis. It's probably easier to fall back to a strong memory model. */
|
||||
#define TCG_GUEST_DEFAULT_MO TCG_MO_ALL
|
||||
|
||||
#define MMU_KERNEL_IDX 11
|
||||
#define MMU_PL1_IDX 12
|
||||
#define MMU_PL2_IDX 13
|
||||
#define MMU_USER_IDX 14
|
||||
#define MMU_PHYS_IDX 15
|
||||
#define MMU_KERNEL_IDX 7
|
||||
#define MMU_KERNEL_P_IDX 8
|
||||
#define MMU_PL1_IDX 9
|
||||
#define MMU_PL1_P_IDX 10
|
||||
#define MMU_PL2_IDX 11
|
||||
#define MMU_PL2_P_IDX 12
|
||||
#define MMU_USER_IDX 13
|
||||
#define MMU_USER_P_IDX 14
|
||||
#define MMU_PHYS_IDX 15
|
||||
|
||||
#define PRIV_TO_MMU_IDX(priv) (MMU_KERNEL_IDX + (priv))
|
||||
#define MMU_IDX_TO_PRIV(mmu_idx) ((mmu_idx) - MMU_KERNEL_IDX)
|
||||
#define MMU_IDX_TO_PRIV(MIDX) (((MIDX) - MMU_KERNEL_IDX) / 2)
|
||||
#define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1)
|
||||
#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX)
|
||||
|
||||
#define TARGET_INSN_START_EXTRA_WORDS 1
|
||||
|
||||
/* No need to flush MMU_PHYS_IDX */
|
||||
#define HPPA_MMU_FLUSH_MASK \
|
||||
(1 << MMU_KERNEL_IDX | 1 << MMU_PL1_IDX | \
|
||||
1 << MMU_PL2_IDX | 1 << MMU_USER_IDX)
|
||||
(1 << MMU_KERNEL_IDX | 1 << MMU_KERNEL_P_IDX | \
|
||||
1 << MMU_PL1_IDX | 1 << MMU_PL1_P_IDX | \
|
||||
1 << MMU_PL2_IDX | 1 << MMU_PL2_P_IDX | \
|
||||
1 << MMU_USER_IDX | 1 << MMU_USER_P_IDX)
|
||||
|
||||
/* Indicies to flush for access_id changes. */
|
||||
#define HPPA_MMU_FLUSH_P_MASK \
|
||||
(1 << MMU_KERNEL_P_IDX | 1 << MMU_PL1_P_IDX | \
|
||||
1 << MMU_PL2_P_IDX | 1 << MMU_USER_P_IDX)
|
||||
|
||||
/* Hardware exceptions, interrupts, faults, and traps. */
|
||||
#define EXCP_HPMC 1 /* high priority machine check */
|
||||
|
@ -249,7 +261,7 @@ static inline int cpu_mmu_index(CPUHPPAState *env, bool ifetch)
|
|||
return MMU_USER_IDX;
|
||||
#else
|
||||
if (env->psw & (ifetch ? PSW_C : PSW_D)) {
|
||||
return PRIV_TO_MMU_IDX(env->iaoq_f & 3);
|
||||
return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P);
|
||||
}
|
||||
return MMU_PHYS_IDX; /* mmu disabled */
|
||||
#endif
|
||||
|
@ -299,8 +311,8 @@ static inline void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc,
|
|||
*cs_base = env->iaoq_b & -4;
|
||||
flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
|
||||
#else
|
||||
/* ??? E, T, H, L, B, P bits need to be here, when implemented. */
|
||||
flags |= env->psw & (PSW_W | PSW_C | PSW_D);
|
||||
/* ??? E, T, H, L, B bits need to be here, when implemented. */
|
||||
flags |= env->psw & (PSW_W | PSW_C | PSW_D | PSW_P);
|
||||
flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
|
||||
|
||||
*pc = (env->psw & PSW_C
|
||||
|
|
|
@ -51,7 +51,6 @@ target_ureg cpu_hppa_get_psw(CPUHPPAState *env)
|
|||
|
||||
void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg psw)
|
||||
{
|
||||
target_ureg old_psw = env->psw;
|
||||
target_ureg cb = 0;
|
||||
|
||||
env->psw = psw & ~(PSW_N | PSW_V | PSW_CB);
|
||||
|
@ -67,13 +66,6 @@ void cpu_hppa_put_psw(CPUHPPAState *env, target_ureg psw)
|
|||
cb |= ((psw >> 9) & 1) << 8;
|
||||
cb |= ((psw >> 8) & 1) << 4;
|
||||
env->psw_cb = cb;
|
||||
|
||||
/* If PSW_P changes, it affects how we translate addresses. */
|
||||
if ((psw ^ old_psw) & PSW_P) {
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
||||
|
|
|
@ -144,7 +144,7 @@ int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
|
|||
}
|
||||
|
||||
/* access_id == 0 means public page and no check is performed */
|
||||
if ((env->psw & PSW_P) && ent->access_id) {
|
||||
if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
|
||||
/* If bits [31:1] match, and bit 0 is set, suppress write. */
|
||||
int match = ent->access_id * 2 + 1;
|
||||
|
||||
|
@ -373,9 +373,7 @@ void HELPER(ptlbe)(CPUHPPAState *env)
|
|||
|
||||
void cpu_hppa_change_prot_id(CPUHPPAState *env)
|
||||
{
|
||||
if (env->psw & PSW_P) {
|
||||
tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
|
||||
}
|
||||
tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
|
||||
}
|
||||
|
||||
void HELPER(change_prot_id)(CPUHPPAState *env)
|
||||
|
|
|
@ -4071,8 +4071,9 @@ static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
|||
ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
|
||||
#else
|
||||
ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
|
||||
ctx->mmu_idx = (ctx->tb_flags & PSW_D ?
|
||||
PRIV_TO_MMU_IDX(ctx->privilege) : MMU_PHYS_IDX);
|
||||
ctx->mmu_idx = (ctx->tb_flags & PSW_D
|
||||
? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
|
||||
: MMU_PHYS_IDX);
|
||||
|
||||
/* Recover the IAOQ values from the GVA + PRIV. */
|
||||
uint64_t cs_base = ctx->base.tb->cs_base;
|
||||
|
|
Loading…
Reference in a new issue