dataplane: change vring API to use VirtQueueElement

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Paolo Bonzini 2013-12-10 13:27:00 +01:00 committed by Stefan Hajnoczi
parent 781c117f37
commit 8c1b566fd1
3 changed files with 72 additions and 76 deletions

View file

@ -35,7 +35,7 @@ enum {
typedef struct {
struct iocb iocb; /* Linux AIO control block */
QEMUIOVector *inhdr; /* iovecs for virtio_blk_inhdr */
unsigned int head; /* vring descriptor index */
VirtQueueElement *elem; /* saved data from the virtqueue */
struct iovec *bounce_iov; /* used if guest buffers are unaligned */
QEMUIOVector *read_qiov; /* for read completion /w bounce buffer */
} VirtIOBlockRequest;
@ -96,7 +96,7 @@ static void complete_request(struct iocb *iocb, ssize_t ret, void *opaque)
len = 0;
}
trace_virtio_blk_data_plane_complete_request(s, req->head, ret);
trace_virtio_blk_data_plane_complete_request(s, req->elem->index, ret);
if (req->read_qiov) {
assert(req->bounce_iov);
@ -118,12 +118,12 @@ static void complete_request(struct iocb *iocb, ssize_t ret, void *opaque)
* written to, but for virtio-blk it seems to be the number of bytes
* transferred plus the status bytes.
*/
vring_push(&s->vring, req->head, len + sizeof(hdr));
vring_push(&s->vring, req->elem, len + sizeof(hdr));
req->elem = NULL;
s->num_reqs--;
}
static void complete_request_early(VirtIOBlockDataPlane *s, unsigned int head,
static void complete_request_early(VirtIOBlockDataPlane *s, VirtQueueElement *elem,
QEMUIOVector *inhdr, unsigned char status)
{
struct virtio_blk_inhdr hdr = {
@ -134,26 +134,26 @@ static void complete_request_early(VirtIOBlockDataPlane *s, unsigned int head,
qemu_iovec_destroy(inhdr);
g_slice_free(QEMUIOVector, inhdr);
vring_push(&s->vring, head, sizeof(hdr));
vring_push(&s->vring, elem, sizeof(hdr));
notify_guest(s);
}
/* Get disk serial number */
static void do_get_id_cmd(VirtIOBlockDataPlane *s,
struct iovec *iov, unsigned int iov_cnt,
unsigned int head, QEMUIOVector *inhdr)
VirtQueueElement *elem, QEMUIOVector *inhdr)
{
char id[VIRTIO_BLK_ID_BYTES];
/* Serial number not NUL-terminated when shorter than buffer */
strncpy(id, s->blk->serial ? s->blk->serial : "", sizeof(id));
iov_from_buf(iov, iov_cnt, 0, id, sizeof(id));
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK);
complete_request_early(s, elem, inhdr, VIRTIO_BLK_S_OK);
}
static int do_rdwr_cmd(VirtIOBlockDataPlane *s, bool read,
struct iovec *iov, unsigned int iov_cnt,
long long offset, unsigned int head,
struct iovec *iov, unsigned iov_cnt,
long long offset, VirtQueueElement *elem,
QEMUIOVector *inhdr)
{
struct iocb *iocb;
@ -186,19 +186,20 @@ static int do_rdwr_cmd(VirtIOBlockDataPlane *s, bool read,
/* Fill in virtio block metadata needed for completion */
VirtIOBlockRequest *req = container_of(iocb, VirtIOBlockRequest, iocb);
req->head = head;
req->elem = elem;
req->inhdr = inhdr;
req->bounce_iov = bounce_iov;
req->read_qiov = read_qiov;
return 0;
}
static int process_request(IOQueue *ioq, struct iovec iov[],
unsigned int out_num, unsigned int in_num,
unsigned int head)
static int process_request(IOQueue *ioq, VirtQueueElement *elem)
{
VirtIOBlockDataPlane *s = container_of(ioq, VirtIOBlockDataPlane, ioqueue);
struct iovec *in_iov = &iov[out_num];
struct iovec *iov = elem->out_sg;
struct iovec *in_iov = elem->in_sg;
unsigned out_num = elem->out_num;
unsigned in_num = elem->in_num;
struct virtio_blk_outhdr outhdr;
QEMUIOVector *inhdr;
size_t in_size;
@ -229,29 +230,29 @@ static int process_request(IOQueue *ioq, struct iovec iov[],
switch (outhdr.type) {
case VIRTIO_BLK_T_IN:
do_rdwr_cmd(s, true, in_iov, in_num, outhdr.sector * 512, head, inhdr);
do_rdwr_cmd(s, true, in_iov, in_num, outhdr.sector * 512, elem, inhdr);
return 0;
case VIRTIO_BLK_T_OUT:
do_rdwr_cmd(s, false, iov, out_num, outhdr.sector * 512, head, inhdr);
do_rdwr_cmd(s, false, iov, out_num, outhdr.sector * 512, elem, inhdr);
return 0;
case VIRTIO_BLK_T_SCSI_CMD:
/* TODO support SCSI commands */
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_UNSUPP);
complete_request_early(s, elem, inhdr, VIRTIO_BLK_S_UNSUPP);
return 0;
case VIRTIO_BLK_T_FLUSH:
/* TODO fdsync not supported by Linux AIO, do it synchronously here! */
if (qemu_fdatasync(s->fd) < 0) {
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_IOERR);
complete_request_early(s, elem, inhdr, VIRTIO_BLK_S_IOERR);
} else {
complete_request_early(s, head, inhdr, VIRTIO_BLK_S_OK);
complete_request_early(s, elem, inhdr, VIRTIO_BLK_S_OK);
}
return 0;
case VIRTIO_BLK_T_GET_ID:
do_get_id_cmd(s, in_iov, in_num, head, inhdr);
do_get_id_cmd(s, in_iov, in_num, elem, inhdr);
return 0;
default:
@ -267,29 +268,8 @@ static void handle_notify(EventNotifier *e)
VirtIOBlockDataPlane *s = container_of(e, VirtIOBlockDataPlane,
host_notifier);
/* There is one array of iovecs into which all new requests are extracted
* from the vring. Requests are read from the vring and the translated
* descriptors are written to the iovecs array. The iovecs do not have to
* persist across handle_notify() calls because the kernel copies the
* iovecs on io_submit().
*
* Handling io_submit() EAGAIN may require storing the requests across
* handle_notify() calls until the kernel has sufficient resources to
* accept more I/O. This is not implemented yet.
*/
struct iovec iovec[VRING_MAX];
struct iovec *end = &iovec[VRING_MAX];
struct iovec *iov = iovec;
/* When a request is read from the vring, the index of the first descriptor
* (aka head) is returned so that the completed request can be pushed onto
* the vring later.
*
* The number of hypervisor read-only iovecs is out_num. The number of
* hypervisor write-only iovecs is in_num.
*/
int head;
unsigned int out_num = 0, in_num = 0;
VirtQueueElement *elem;
int ret;
unsigned int num_queued;
event_notifier_test_and_clear(&s->host_notifier);
@ -298,30 +278,31 @@ static void handle_notify(EventNotifier *e)
vring_disable_notification(s->vdev, &s->vring);
for (;;) {
head = vring_pop(s->vdev, &s->vring, iov, end, &out_num, &in_num);
if (head < 0) {
ret = vring_pop(s->vdev, &s->vring, &elem);
if (ret < 0) {
assert(elem == NULL);
break; /* no more requests */
}
trace_virtio_blk_data_plane_process_request(s, out_num, in_num,
head);
trace_virtio_blk_data_plane_process_request(s, elem->out_num,
elem->in_num, elem->index);
if (process_request(&s->ioqueue, iov, out_num, in_num, head) < 0) {
if (process_request(&s->ioqueue, elem) < 0) {
vring_set_broken(&s->vring);
vring_free_element(elem);
ret = -EFAULT;
break;
}
iov += out_num + in_num;
}
if (likely(head == -EAGAIN)) { /* vring emptied */
if (likely(ret == -EAGAIN)) { /* vring emptied */
/* Re-enable guest->host notifies and stop processing the vring.
* But if the guest has snuck in more descriptors, keep processing.
*/
if (vring_enable_notification(s->vdev, &s->vring)) {
break;
}
} else { /* head == -ENOBUFS or fatal error, iovecs[] is depleted */
} else { /* ret == -ENOBUFS or fatal error, iovecs[] is depleted */
/* Since there are no iovecs[] left, stop processing for now. Do
* not re-enable guest->host notifies since the I/O completion
* handler knows to check for more vring descriptors anyway.

View file

@ -111,29 +111,32 @@ bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
}
static int get_desc(Vring *vring,
struct iovec iov[], struct iovec *iov_end,
unsigned int *out_num, unsigned int *in_num,
static int get_desc(Vring *vring, VirtQueueElement *elem,
struct vring_desc *desc)
{
unsigned *num;
struct iovec *iov;
hwaddr *addr;
if (desc->flags & VRING_DESC_F_WRITE) {
num = in_num;
num = &elem->in_num;
iov = &elem->in_sg[*num];
addr = &elem->in_addr[*num];
} else {
num = out_num;
num = &elem->out_num;
iov = &elem->out_sg[*num];
addr = &elem->out_addr[*num];
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(*in_num)) {
if (unlikely(elem->in_num)) {
error_report("Descriptor has out after in");
return -EFAULT;
}
}
/* Stop for now if there are not enough iovecs available. */
iov += *in_num + *out_num;
if (iov >= iov_end) {
if (*num >= VIRTQUEUE_MAX_SIZE) {
return -ENOBUFS;
}
@ -147,14 +150,13 @@ static int get_desc(Vring *vring,
}
iov->iov_len = desc->len;
*addr = desc->addr;
*num += 1;
return 0;
}
/* This is stolen from linux/drivers/vhost/vhost.c. */
static int get_indirect(Vring *vring,
struct iovec iov[], struct iovec *iov_end,
unsigned int *out_num, unsigned int *in_num,
static int get_indirect(Vring *vring, VirtQueueElement *elem,
struct vring_desc *indirect)
{
struct vring_desc desc;
@ -212,7 +214,7 @@ static int get_indirect(Vring *vring,
return -EFAULT;
}
ret = get_desc(vring, iov, iov_end, out_num, in_num, &desc);
ret = get_desc(vring, elem, &desc);
if (ret < 0) {
vring->broken |= (ret == -EFAULT);
return ret;
@ -222,6 +224,11 @@ static int get_indirect(Vring *vring,
return 0;
}
void vring_free_element(VirtQueueElement *elem)
{
g_slice_free(VirtQueueElement, elem);
}
/* This looks in the virtqueue and for the first available buffer, and converts
* it to an iovec for convenient access. Since descriptors consist of some
* number of output then some number of input descriptors, it's actually two
@ -234,12 +241,12 @@ static int get_indirect(Vring *vring,
* Stolen from linux/drivers/vhost/vhost.c.
*/
int vring_pop(VirtIODevice *vdev, Vring *vring,
struct iovec iov[], struct iovec *iov_end,
unsigned int *out_num, unsigned int *in_num)
VirtQueueElement **p_elem)
{
struct vring_desc desc;
unsigned int i, head, found = 0, num = vring->vr.num;
uint16_t avail_idx, last_avail_idx;
VirtQueueElement *elem = NULL;
int ret;
/* If there was a fatal error then refuse operation */
@ -273,6 +280,10 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
* the index we've seen. */
head = vring->vr.avail->ring[last_avail_idx % num];
elem = g_slice_new(VirtQueueElement);
elem->index = head;
elem->in_num = elem->out_num = 0;
/* If their number is silly, that's an error. */
if (unlikely(head >= num)) {
error_report("Guest says index %u > %u is available", head, num);
@ -284,9 +295,6 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
vring_avail_event(&vring->vr) = vring->vr.avail->idx;
}
/* When we start there are none of either input nor output. */
*out_num = *in_num = 0;
i = head;
do {
if (unlikely(i >= num)) {
@ -306,14 +314,14 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
barrier();
if (desc.flags & VRING_DESC_F_INDIRECT) {
int ret = get_indirect(vring, iov, iov_end, out_num, in_num, &desc);
int ret = get_indirect(vring, elem, &desc);
if (ret < 0) {
goto out;
}
continue;
}
ret = get_desc(vring, iov, iov_end, out_num, in_num, &desc);
ret = get_desc(vring, elem, &desc);
if (ret < 0) {
goto out;
}
@ -323,6 +331,7 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
/* On success, increment avail index. */
vring->last_avail_idx++;
*p_elem = elem;
return head;
out:
@ -330,6 +339,10 @@ out:
if (ret == -EFAULT) {
vring->broken = true;
}
if (elem) {
vring_free_element(elem);
}
*p_elem = NULL;
return ret;
}
@ -337,11 +350,14 @@ out:
*
* Stolen from linux/drivers/vhost/vhost.c.
*/
void vring_push(Vring *vring, unsigned int head, int len)
void vring_push(Vring *vring, VirtQueueElement *elem, int len)
{
struct vring_used_elem *used;
unsigned int head = elem->index;
uint16_t new;
vring_free_element(elem);
/* Don't touch vring if a fatal error occurred */
if (vring->broken) {
return;

View file

@ -54,9 +54,8 @@ void vring_teardown(Vring *vring, VirtIODevice *vdev, int n);
void vring_disable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_enable_notification(VirtIODevice *vdev, Vring *vring);
bool vring_should_notify(VirtIODevice *vdev, Vring *vring);
int vring_pop(VirtIODevice *vdev, Vring *vring,
struct iovec iov[], struct iovec *iov_end,
unsigned int *out_num, unsigned int *in_num);
void vring_push(Vring *vring, unsigned int head, int len);
int vring_pop(VirtIODevice *vdev, Vring *vring, VirtQueueElement **elem);
void vring_push(Vring *vring, VirtQueueElement *elem, int len);
void vring_free_element(VirtQueueElement *elem);
#endif /* VRING_H */