mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
40f4a21895
The following scenario leads to an assertion failure in qio_channel_yield(): 1. Request coroutine calls qio_channel_yield() successfully when sending would block on the socket. It is now yielded. 2. nbd_read_reply_entry() calls nbd_recv_coroutines_enter_all() because nbd_receive_reply() failed. 3. Request coroutine is entered and returns from qio_channel_yield(). Note that the socket fd handler has not fired yet so ioc->write_coroutine is still set. 4. Request coroutine attempts to send the request body with nbd_rwv() but the socket would still block. qio_channel_yield() is called again and assert(!ioc->write_coroutine) is hit. The problem is that nbd_read_reply_entry() does not distinguish between request coroutines that are waiting to receive a reply and those that are not. This patch adds a per-request bool receiving flag so nbd_read_reply_entry() can avoid spurious aio_wake() calls. Reported-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-Id: <20170822125113.5025-1-stefanha@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Tested-by: Eric Blake <eblake@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
63 lines
2 KiB
C
63 lines
2 KiB
C
#ifndef NBD_CLIENT_H
|
|
#define NBD_CLIENT_H
|
|
|
|
#include "qemu-common.h"
|
|
#include "block/nbd.h"
|
|
#include "block/block_int.h"
|
|
#include "io/channel-socket.h"
|
|
|
|
/* #define DEBUG_NBD */
|
|
|
|
#if defined(DEBUG_NBD)
|
|
#define logout(fmt, ...) \
|
|
fprintf(stderr, "nbd\t%-24s" fmt, __func__, ##__VA_ARGS__)
|
|
#else
|
|
#define logout(fmt, ...) ((void)0)
|
|
#endif
|
|
|
|
#define MAX_NBD_REQUESTS 16
|
|
|
|
typedef struct {
|
|
Coroutine *coroutine;
|
|
bool receiving; /* waiting for read_reply_co? */
|
|
} NBDClientRequest;
|
|
|
|
typedef struct NBDClientSession {
|
|
QIOChannelSocket *sioc; /* The master data channel */
|
|
QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
|
|
NBDExportInfo info;
|
|
|
|
CoMutex send_mutex;
|
|
CoQueue free_sema;
|
|
Coroutine *read_reply_co;
|
|
int in_flight;
|
|
|
|
NBDClientRequest requests[MAX_NBD_REQUESTS];
|
|
NBDReply reply;
|
|
bool quit;
|
|
} NBDClientSession;
|
|
|
|
NBDClientSession *nbd_get_client_session(BlockDriverState *bs);
|
|
|
|
int nbd_client_init(BlockDriverState *bs,
|
|
QIOChannelSocket *sock,
|
|
const char *export_name,
|
|
QCryptoTLSCreds *tlscreds,
|
|
const char *hostname,
|
|
Error **errp);
|
|
void nbd_client_close(BlockDriverState *bs);
|
|
|
|
int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes);
|
|
int nbd_client_co_flush(BlockDriverState *bs);
|
|
int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
|
uint64_t bytes, QEMUIOVector *qiov, int flags);
|
|
int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
|
int bytes, BdrvRequestFlags flags);
|
|
int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
|
|
uint64_t bytes, QEMUIOVector *qiov, int flags);
|
|
|
|
void nbd_client_detach_aio_context(BlockDriverState *bs);
|
|
void nbd_client_attach_aio_context(BlockDriverState *bs,
|
|
AioContext *new_context);
|
|
|
|
#endif /* NBD_CLIENT_H */
|