From f9c0322a5ff5dc28be41290a26d0133e86a6ea4c Mon Sep 17 00:00:00 2001 From: Robert Hoo Date: Wed, 21 Apr 2021 14:23:52 +0800 Subject: [PATCH] i386/cpu_dump: support AVX512 ZMM regs dump Since commit fa4518741e (target-i386: Rename struct XMMReg to ZMMReg), CPUX86State.xmm_regs[] has already been extended to 512bit to support AVX512. Also, other qemu level supports for AVX512 registers are there for years. But in x86_cpu_dump_state(), still only dump XMM registers no matter YMM/ZMM is enabled. This patch is to complement this, let it dump XMM/YMM/ZMM accordingly. Signed-off-by: Robert Hoo Reviewed-by: Richard Henderson Message-Id: <1618986232-73826-1-git-send-email-robert.hu@linux.intel.com> Signed-off-by: Eduardo Habkost --- target/i386/cpu-dump.c | 63 ++++++++++++++++++++++++++++++++---------- 1 file changed, 48 insertions(+), 15 deletions(-) diff --git a/target/i386/cpu-dump.c b/target/i386/cpu-dump.c index aac21f1f60..02b635a52c 100644 --- a/target/i386/cpu-dump.c +++ b/target/i386/cpu-dump.c @@ -478,6 +478,11 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags) qemu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer); if (flags & CPU_DUMP_FPU) { int fptag; + const uint64_t avx512_mask = XSTATE_OPMASK_MASK | \ + XSTATE_ZMM_Hi256_MASK | \ + XSTATE_Hi16_ZMM_MASK | \ + XSTATE_YMM_MASK | XSTATE_SSE_MASK, + avx_mask = XSTATE_YMM_MASK | XSTATE_SSE_MASK; fptag = 0; for(i = 0; i < 8; i++) { fptag |= ((!env->fptags[i]) << i); @@ -499,21 +504,49 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags) else qemu_fprintf(f, " "); } - if (env->hflags & HF_CS64_MASK) - nb = 16; - else - nb = 8; - for(i=0;ixmm_regs[i].ZMM_L(3), - env->xmm_regs[i].ZMM_L(2), - env->xmm_regs[i].ZMM_L(1), - env->xmm_regs[i].ZMM_L(0)); - if ((i & 1) == 1) - qemu_fprintf(f, "\n"); - else - qemu_fprintf(f, " "); + + if ((env->xcr0 & avx512_mask) == avx512_mask) { + /* XSAVE enabled AVX512 */ + for (i = 0; i < NB_OPMASK_REGS; i++) { + qemu_fprintf(f, "Opmask%02d=%016"PRIx64"%s", i, + env->opmask_regs[i], ((i & 3) == 3) ? "\n" : " "); + } + + nb = (env->hflags & HF_CS64_MASK) ? 32 : 8; + for (i = 0; i < nb; i++) { + qemu_fprintf(f, "ZMM%02d=%016"PRIx64" %016"PRIx64" %016"PRIx64 + " %016"PRIx64" %016"PRIx64" %016"PRIx64 + " %016"PRIx64" %016"PRIx64"\n", + i, + env->xmm_regs[i].ZMM_Q(7), + env->xmm_regs[i].ZMM_Q(6), + env->xmm_regs[i].ZMM_Q(5), + env->xmm_regs[i].ZMM_Q(4), + env->xmm_regs[i].ZMM_Q(3), + env->xmm_regs[i].ZMM_Q(2), + env->xmm_regs[i].ZMM_Q(1), + env->xmm_regs[i].ZMM_Q(0)); + } + } else if ((env->xcr0 & avx_mask) == avx_mask) { + /* XSAVE enabled AVX */ + nb = env->hflags & HF_CS64_MASK ? 16 : 8; + for (i = 0; i < nb; i++) { + qemu_fprintf(f, "YMM%02d=%016"PRIx64" %016"PRIx64" %016"PRIx64 + " %016"PRIx64"\n", i, + env->xmm_regs[i].ZMM_Q(3), + env->xmm_regs[i].ZMM_Q(2), + env->xmm_regs[i].ZMM_Q(1), + env->xmm_regs[i].ZMM_Q(0)); + } + } else { /* SSE and below cases */ + nb = env->hflags & HF_CS64_MASK ? 16 : 8; + for (i = 0; i < nb; i++) { + qemu_fprintf(f, "XMM%02d=%016"PRIx64" %016"PRIx64"%s", + i, + env->xmm_regs[i].ZMM_Q(1), + env->xmm_regs[i].ZMM_Q(0), + (i & 1) ? "\n" : " "); + } } } if (flags & CPU_DUMP_CODE) {