mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
vhost: fix possible wrap in SVQ descriptor ring
QEMU invokes vhost_svq_add() when adding a guest's element
into SVQ. In vhost_svq_add(), it uses vhost_svq_available_slots()
to check whether QEMU can add the element into SVQ. If there is
enough space, then QEMU combines some out descriptors and some
in descriptors into one descriptor chain, and adds it into
`svq->vring.desc` by vhost_svq_vring_write_descs().
Yet the problem is that, `svq->shadow_avail_idx - svq->shadow_used_idx`
in vhost_svq_available_slots() returns the number of occupied elements,
or the number of descriptor chains, instead of the number of occupied
descriptors, which may cause wrapping in SVQ descriptor ring.
Here is an example. In vhost_handle_guest_kick(), QEMU forwards
as many available buffers to device by virtqueue_pop() and
vhost_svq_add_element(). virtqueue_pop() returns a guest's element,
and then this element is added into SVQ by vhost_svq_add_element(),
a wrapper to vhost_svq_add(). If QEMU invokes virtqueue_pop() and
vhost_svq_add_element() `svq->vring.num` times,
vhost_svq_available_slots() thinks QEMU just ran out of slots and
everything should work fine. But in fact, virtqueue_pop() returns
`svq->vring.num` elements or descriptor chains, more than
`svq->vring.num` descriptors due to guest memory fragmentation,
and this causes wrapping in SVQ descriptor ring.
This bug is valid even before marking the descriptors used.
If the guest memory is fragmented, SVQ must add chains
so it can try to add more descriptors than possible.
This patch solves it by adding `num_free` field in
VhostShadowVirtqueue structure and updating this field
in vhost_svq_add() and vhost_svq_get_buf(), to record
the number of free descriptors.
Fixes: 100890f7ca
("vhost: Shadow virtqueue buffers forwarding")
Signed-off-by: Hawkins Jiawei <yin31149@gmail.com>
Acked-by: Eugenio Pérez <eperezma@redhat.com>
Message-Id: <20230509084817.3973-1-yin31149@gmail.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
This commit is contained in:
parent
297e818219
commit
5d410557de
2 changed files with 7 additions and 1 deletions
|
@ -68,7 +68,7 @@ bool vhost_svq_valid_features(uint64_t features, Error **errp)
|
|||
*/
|
||||
static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq)
|
||||
{
|
||||
return svq->vring.num - (svq->shadow_avail_idx - svq->shadow_used_idx);
|
||||
return svq->num_free;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -263,6 +263,7 @@ int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
svq->num_free -= ndescs;
|
||||
svq->desc_state[qemu_head].elem = elem;
|
||||
svq->desc_state[qemu_head].ndescs = ndescs;
|
||||
vhost_svq_kick(svq);
|
||||
|
@ -449,6 +450,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
|
|||
last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id);
|
||||
svq->desc_next[last_used_chain] = svq->free_head;
|
||||
svq->free_head = used_elem.id;
|
||||
svq->num_free += num;
|
||||
|
||||
*len = used_elem.len;
|
||||
return g_steal_pointer(&svq->desc_state[used_elem.id].elem);
|
||||
|
@ -659,6 +661,7 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
|||
svq->iova_tree = iova_tree;
|
||||
|
||||
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
|
||||
svq->num_free = svq->vring.num;
|
||||
driver_size = vhost_svq_driver_area_size(svq);
|
||||
device_size = vhost_svq_device_area_size(svq);
|
||||
svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size);
|
||||
|
|
|
@ -107,6 +107,9 @@ typedef struct VhostShadowVirtqueue {
|
|||
|
||||
/* Next head to consume from the device */
|
||||
uint16_t last_used_idx;
|
||||
|
||||
/* Size of SVQ vring free descriptors */
|
||||
uint16_t num_free;
|
||||
} VhostShadowVirtqueue;
|
||||
|
||||
bool vhost_svq_valid_features(uint64_t features, Error **errp);
|
||||
|
|
Loading…
Reference in a new issue