mirror of
https://github.com/torvalds/linux
synced 2024-10-08 20:34:15 +00:00
ARM: KVM: invalidate BTB on guest exit for Cortex-A12/A17
In order to avoid aliasing attacks against the branch predictor, let's invalidate the BTB on guest exit. This is made complicated by the fact that we cannot take a branch before invalidating the BTB. We only apply this to A12 and A17, which are the only two ARM cores on which this useful. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> Boot-tested-by: Tony Lindgren <tony@atomide.com> Reviewed-by: Tony Lindgren <tony@atomide.com>
This commit is contained in:
parent
c44f366ea7
commit
3f7e8e2e1e
|
@ -61,8 +61,6 @@ struct kvm_vcpu;
|
||||||
extern char __kvm_hyp_init[];
|
extern char __kvm_hyp_init[];
|
||||||
extern char __kvm_hyp_init_end[];
|
extern char __kvm_hyp_init_end[];
|
||||||
|
|
||||||
extern char __kvm_hyp_vector[];
|
|
||||||
|
|
||||||
extern void __kvm_flush_vm_context(void);
|
extern void __kvm_flush_vm_context(void);
|
||||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
||||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||||
|
|
|
@ -297,7 +297,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
|
||||||
|
|
||||||
static inline void *kvm_get_hyp_vector(void)
|
static inline void *kvm_get_hyp_vector(void)
|
||||||
{
|
{
|
||||||
return kvm_ksym_ref(__kvm_hyp_vector);
|
switch(read_cpuid_part()) {
|
||||||
|
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||||
|
case ARM_CPU_PART_CORTEX_A12:
|
||||||
|
case ARM_CPU_PART_CORTEX_A17:
|
||||||
|
{
|
||||||
|
extern char __kvm_hyp_vector_bp_inv[];
|
||||||
|
return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
extern char __kvm_hyp_vector[];
|
||||||
|
return kvm_ksym_ref(__kvm_hyp_vector);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int kvm_map_vectors(void)
|
static inline int kvm_map_vectors(void)
|
||||||
|
|
|
@ -71,6 +71,66 @@ __kvm_hyp_vector:
|
||||||
W(b) hyp_irq
|
W(b) hyp_irq
|
||||||
W(b) hyp_fiq
|
W(b) hyp_fiq
|
||||||
|
|
||||||
|
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||||
|
.align 5
|
||||||
|
__kvm_hyp_vector_bp_inv:
|
||||||
|
.global __kvm_hyp_vector_bp_inv
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We encode the exception entry in the bottom 3 bits of
|
||||||
|
* SP, and we have to guarantee to be 8 bytes aligned.
|
||||||
|
*/
|
||||||
|
W(add) sp, sp, #1 /* Reset 7 */
|
||||||
|
W(add) sp, sp, #1 /* Undef 6 */
|
||||||
|
W(add) sp, sp, #1 /* Syscall 5 */
|
||||||
|
W(add) sp, sp, #1 /* Prefetch abort 4 */
|
||||||
|
W(add) sp, sp, #1 /* Data abort 3 */
|
||||||
|
W(add) sp, sp, #1 /* HVC 2 */
|
||||||
|
W(add) sp, sp, #1 /* IRQ 1 */
|
||||||
|
W(nop) /* FIQ 0 */
|
||||||
|
|
||||||
|
mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
|
||||||
|
isb
|
||||||
|
|
||||||
|
#ifdef CONFIG_THUMB2_KERNEL
|
||||||
|
/*
|
||||||
|
* Yet another silly hack: Use VPIDR as a temp register.
|
||||||
|
* Thumb2 is really a pain, as SP cannot be used with most
|
||||||
|
* of the bitwise instructions. The vect_br macro ensures
|
||||||
|
* things gets cleaned-up.
|
||||||
|
*/
|
||||||
|
mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
|
||||||
|
mov r0, sp
|
||||||
|
and r0, r0, #7
|
||||||
|
sub sp, sp, r0
|
||||||
|
push {r1, r2}
|
||||||
|
mov r1, r0
|
||||||
|
mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
|
||||||
|
mrc p15, 0, r2, c0, c0, 0 /* MIDR */
|
||||||
|
mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
.macro vect_br val, targ
|
||||||
|
ARM( eor sp, sp, #\val )
|
||||||
|
ARM( tst sp, #7 )
|
||||||
|
ARM( eorne sp, sp, #\val )
|
||||||
|
|
||||||
|
THUMB( cmp r1, #\val )
|
||||||
|
THUMB( popeq {r1, r2} )
|
||||||
|
|
||||||
|
beq \targ
|
||||||
|
.endm
|
||||||
|
|
||||||
|
vect_br 0, hyp_fiq
|
||||||
|
vect_br 1, hyp_irq
|
||||||
|
vect_br 2, hyp_hvc
|
||||||
|
vect_br 3, hyp_dabt
|
||||||
|
vect_br 4, hyp_pabt
|
||||||
|
vect_br 5, hyp_svc
|
||||||
|
vect_br 6, hyp_undef
|
||||||
|
vect_br 7, hyp_reset
|
||||||
|
#endif
|
||||||
|
|
||||||
.macro invalid_vector label, cause
|
.macro invalid_vector label, cause
|
||||||
.align
|
.align
|
||||||
\label: mov r0, #\cause
|
\label: mov r0, #\cause
|
||||||
|
@ -149,7 +209,14 @@ hyp_hvc:
|
||||||
bx ip
|
bx ip
|
||||||
|
|
||||||
1:
|
1:
|
||||||
push {lr}
|
/*
|
||||||
|
* Pushing r2 here is just a way of keeping the stack aligned to
|
||||||
|
* 8 bytes on any path that can trigger a HYP exception. Here,
|
||||||
|
* we may well be about to jump into the guest, and the guest
|
||||||
|
* exit would otherwise be badly decoded by our fancy
|
||||||
|
* "decode-exception-without-a-branch" code...
|
||||||
|
*/
|
||||||
|
push {r2, lr}
|
||||||
|
|
||||||
mov lr, r0
|
mov lr, r0
|
||||||
mov r0, r1
|
mov r0, r1
|
||||||
|
@ -159,7 +226,7 @@ hyp_hvc:
|
||||||
THUMB( orr lr, #1)
|
THUMB( orr lr, #1)
|
||||||
blx lr @ Call the HYP function
|
blx lr @ Call the HYP function
|
||||||
|
|
||||||
pop {lr}
|
pop {r2, lr}
|
||||||
eret
|
eret
|
||||||
|
|
||||||
guest_trap:
|
guest_trap:
|
||||||
|
|
Loading…
Reference in a new issue