mirror of
https://github.com/torvalds/linux
synced 2024-10-19 17:58:44 +00:00
KVM: PPC: Book3S HV P9: Read machine check registers while MSR[RI] is 0
SRR0/1, DAR, DSISR must all be protected from machine check which can clobber them. Ensure MSR[RI] is clear while they are live. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210528090752.3542186-17-npiggin@gmail.com
This commit is contained in:
parent
c00366e237
commit
6d770e3fe9
|
@ -3571,11 +3571,16 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
mtspr(SPRN_BESCR, vcpu->arch.bescr);
|
||||
mtspr(SPRN_WORT, vcpu->arch.wort);
|
||||
mtspr(SPRN_TIDR, vcpu->arch.tid);
|
||||
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
|
||||
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
|
||||
mtspr(SPRN_AMR, vcpu->arch.amr);
|
||||
mtspr(SPRN_UAMOR, vcpu->arch.uamor);
|
||||
|
||||
/*
|
||||
* DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
|
||||
* clear (or hstate set appropriately to catch those registers
|
||||
* being clobbered if we take a MCE or SRESET), so those are done
|
||||
* later.
|
||||
*/
|
||||
|
||||
if (!(vcpu->arch.ctrl & 1))
|
||||
mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
|
||||
|
||||
|
@ -3618,6 +3623,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
hvregs.vcpu_token = vcpu->vcpu_id;
|
||||
}
|
||||
hvregs.hdec_expiry = time_limit;
|
||||
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
|
||||
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
|
||||
trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
|
||||
__pa(&vcpu->arch.regs));
|
||||
kvmhv_restore_hv_return_state(vcpu, &hvregs);
|
||||
|
|
|
@ -122,6 +122,7 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
|
|||
s64 hdec;
|
||||
u64 tb, purr, spurr;
|
||||
u64 *exsave;
|
||||
bool ri_set;
|
||||
unsigned long msr = mfmsr();
|
||||
int trap;
|
||||
unsigned long host_hfscr = mfspr(SPRN_HFSCR);
|
||||
|
@ -192,9 +193,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
|
|||
*/
|
||||
mtspr(SPRN_HDEC, hdec);
|
||||
|
||||
mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
|
||||
mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
|
||||
|
||||
start_timing(vcpu, &vcpu->arch.rm_entry);
|
||||
|
||||
vcpu->arch.ceded = 0;
|
||||
|
@ -220,6 +218,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
|
|||
*/
|
||||
mtspr(SPRN_HDSISR, HDSISR_CANARY);
|
||||
|
||||
__mtmsrd(0, 1); /* clear RI */
|
||||
|
||||
mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
|
||||
mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
|
||||
mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
|
||||
mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
|
||||
|
||||
accumulate_time(vcpu, &vcpu->arch.guest_time);
|
||||
|
||||
local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_FAST;
|
||||
|
@ -237,7 +242,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
|
|||
|
||||
/* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
|
||||
trap = local_paca->kvm_hstate.scratch0 & ~0x2;
|
||||
|
||||
/* HSRR interrupts leave MSR[RI] unchanged, SRR interrupts clear it. */
|
||||
ri_set = false;
|
||||
if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) {
|
||||
if (trap != BOOK3S_INTERRUPT_SYSCALL &&
|
||||
(vcpu->arch.shregs.msr & MSR_RI))
|
||||
ri_set = true;
|
||||
exsave = local_paca->exgen;
|
||||
} else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) {
|
||||
exsave = local_paca->exnmi;
|
||||
|
@ -247,6 +258,22 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
|
|||
|
||||
vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
|
||||
vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
|
||||
|
||||
/*
|
||||
* Only set RI after reading machine check regs (DAR, DSISR, SRR0/1)
|
||||
* and hstate scratch (which we need to move into exsave to make
|
||||
* re-entrant vs SRESET/MCE)
|
||||
*/
|
||||
if (ri_set) {
|
||||
if (unlikely(!(mfmsr() & MSR_RI))) {
|
||||
__mtmsrd(MSR_RI, 1);
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
} else {
|
||||
WARN_ON_ONCE(mfmsr() & MSR_RI);
|
||||
__mtmsrd(MSR_RI, 1);
|
||||
}
|
||||
|
||||
vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
|
||||
vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
|
||||
vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
|
||||
|
|
Loading…
Reference in a new issue