Merge branch kvm-arm64/vgic-6.8 into kvmarm-master/next

* kvm-arm64/vgic-6.8:
  : .
  : Fix for the GICv4.1 vSGI pending state being set/cleared from
  : userspace, and some cleanup to the MMIO and userspace accessors
  : for the pending state.
  :
  : Also a fix for a potential UAF in the ITS translation cache.
  : .
  KVM: arm64: vgic-its: Avoid potential UAF in LPI translation cache
  KVM: arm64: vgic-v3: Reinterpret user ISPENDR writes as I{C,S}PENDR
  KVM: arm64: vgic: Use common accessor for writes to ICPENDR
  KVM: arm64: vgic: Use common accessor for writes to ISPENDR
  KVM: arm64: vgic-v4: Restore pending state on host userspace write

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2024-01-04 19:28:15 +00:00
commit f4af13bd93
3 changed files with 53 additions and 81 deletions

View file

@ -590,7 +590,11 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
unsigned long flags;
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
irq = __vgic_its_check_cache(dist, db, devid, eventid);
if (irq)
vgic_get_irq_kref(irq);
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
return irq;
@ -769,6 +773,7 @@ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
vgic_queue_irq_unlock(kvm, irq, flags);
vgic_put_irq(kvm, irq);
return 0;
}

View file

@ -357,31 +357,13 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
unsigned long flags;
int ret;
for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
ret = vgic_uaccess_write_spending(vcpu, addr, len, val);
if (ret)
return ret;
raw_spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i, &val)) {
/*
* pending_latch is set irrespective of irq type
* (level or edge) to avoid dependency that VM should
* restore irq config before pending info.
*/
irq->pending_latch = true;
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else {
irq->pending_latch = false;
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
vgic_put_irq(vcpu->kvm, irq);
}
return 0;
return vgic_uaccess_write_cpending(vcpu, addr, len, ~val);
}
/* We want to avoid outer shareable. */

View file

@ -301,9 +301,8 @@ static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
}
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
static void __set_pending(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len,
unsigned long val, bool is_user)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
@ -312,14 +311,22 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
/* GICD_ISPENDR0 SGI bits are WI */
if (is_vgic_v2_sgi(vcpu, irq)) {
/* GICD_ISPENDR0 SGI bits are WI when written from the guest. */
if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
vgic_put_irq(vcpu->kvm, irq);
continue;
}
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/*
* GICv2 SGIs are terribly broken. We can't restore
* the source of the interrupt, so just pick the vcpu
* itself as the source...
*/
if (is_vgic_v2_sgi(vcpu, irq))
irq->source |= BIT(vcpu->vcpu_id);
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
/* HW SGI? Ask the GIC to inject it */
int err;
@ -335,7 +342,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
}
irq->pending_latch = true;
if (irq->hw)
if (irq->hw && !is_user)
vgic_irq_set_phys_active(irq, true);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
@ -343,33 +350,18 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
}
}
void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
__set_pending(vcpu, addr, len, val, false);
}
int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
/*
* GICv2 SGIs are terribly broken. We can't restore
* the source of the interrupt, so just pick the vcpu
* itself as the source...
*/
if (is_vgic_v2_sgi(vcpu, irq))
irq->source |= BIT(vcpu->vcpu_id);
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq);
}
__set_pending(vcpu, addr, len, val, true);
return 0;
}
@ -394,9 +386,9 @@ static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
vgic_irq_set_phys_active(irq, false);
}
void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
static void __clear_pending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val, bool is_user)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
@ -405,14 +397,22 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
/* GICD_ICPENDR0 SGI bits are WI */
if (is_vgic_v2_sgi(vcpu, irq)) {
/* GICD_ICPENDR0 SGI bits are WI when written from the guest. */
if (is_vgic_v2_sgi(vcpu, irq) && !is_user) {
vgic_put_irq(vcpu->kvm, irq);
continue;
}
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/*
* More fun with GICv2 SGIs! If we're clearing one of them
* from userspace, which source vcpu to clear? Let's not
* even think of it, and blow the whole set.
*/
if (is_vgic_v2_sgi(vcpu, irq))
irq->source = 0;
if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
/* HW SGI? Ask the GIC to clear its pending bit */
int err;
@ -427,7 +427,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
continue;
}
if (irq->hw)
if (irq->hw && !is_user)
vgic_hw_irq_cpending(vcpu, irq);
else
irq->pending_latch = false;
@ -437,33 +437,18 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
}
}
void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
__clear_pending(vcpu, addr, len, val, false);
}
int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len,
unsigned long val)
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
unsigned long flags;
for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
/*
* More fun with GICv2 SGIs! If we're clearing one of them
* from userspace, which source vcpu to clear? Let's not
* even think of it, and blow the whole set.
*/
if (is_vgic_v2_sgi(vcpu, irq))
irq->source = 0;
irq->pending_latch = false;
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
__clear_pending(vcpu, addr, len, val, true);
return 0;
}