mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
nbd patches for 2021-09-27
- Vladimir Sementsov-Ogievskiy: Rework coroutines of qemu NBD client to improve reconnect support - Eric Blake: Relax server in regards to NBD_OPT_LIST_META_CONTEXT - Vladimir Sementsov-Ogievskiy: Plumb up 64-bit bulk-zeroing support in block layer, in preparation for future NBD spec extensions - Nir Soffer: Default to writeback cache in qemu-nbd -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEccLMIrHEYCkn0vOqp6FrSiUnQ2oFAmFU1a4ACgkQp6FrSiUn Q2obEggAq4KgnBILBip6TsFc9p7yzVpK9qWzri0vhJHOI7p+dE5Bxqt8sLBEdMQP OiV5WE/ZRHogGULXXJChBmUTK2/z0U57AXVvbCpKWgr48kSj818dk8uhuUjBeWaM 5Qc8PsH+/Rij5WRlVmTePu7QMwXp1h3+gkw3fmLlHRkvzO4MmOBn1lrOqnCxSGbo xnFvfdeplNiexmpImdO6QSaHfDsmnUqOFpBPZUsKXfdHOiRqg2I3eU1ibv8eQH7B oTtBxWI0KHc6kWaJUWqZbEe4ChJTHvAsVdZxmB+il1diIl46lq+s/Zn7nT+0HXTD pS/Fws9rrGKS/7PnGeFOfskVVCRnIw== =VGqR -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/ericb/tags/pull-nbd-2021-09-27-v2' into staging nbd patches for 2021-09-27 - Vladimir Sementsov-Ogievskiy: Rework coroutines of qemu NBD client to improve reconnect support - Eric Blake: Relax server in regards to NBD_OPT_LIST_META_CONTEXT - Vladimir Sementsov-Ogievskiy: Plumb up 64-bit bulk-zeroing support in block layer, in preparation for future NBD spec extensions - Nir Soffer: Default to writeback cache in qemu-nbd # gpg: Signature made Wed 29 Sep 2021 22:07:58 BST # gpg: using RSA key 71C2CC22B1C4602927D2F3AAA7A16B4A2527436A # gpg: Good signature from "Eric Blake <eblake@redhat.com>" [full] # gpg: aka "Eric Blake (Free Software Programmer) <ebb9@byu.net>" [full] # gpg: aka "[jpeg image of size 6874]" [full] # Primary key fingerprint: 71C2 CC22 B1C4 6029 27D2 F3AA A7A1 6B4A 2527 436A * remotes/ericb/tags/pull-nbd-2021-09-27-v2: block/nbd: check that received handle is valid block/nbd: drop connection_co block/nbd: refactor nbd_recv_coroutines_wake_all() block/nbd: move nbd_recv_coroutines_wake_all() up block/nbd: nbd_channel_error() shutdown channel unconditionally nbd/client-connection: nbd_co_establish_connection(): fix non set errp nbd/server: Allow LIST_META_CONTEXT without STRUCTURED_REPLY block/io: allow 64bit discard requests block: use int64_t instead of int in driver discard handlers block: make BlockLimits::max_pdiscard 64bit block/io: allow 64bit write-zeroes requests block: use int64_t instead of int in driver write_zeroes handlers block: make BlockLimits::max_pwrite_zeroes 64bit block: use int64_t instead of uint64_t in copy_range driver handlers block: use int64_t instead of uint64_t in driver write handlers block: use int64_t instead of uint64_t in driver read handlers qcow2: check request on vmstate save/load path block/io: bring request check to bdrv_co_(read,write)v_vmstate qemu-nbd: Change default cache mode to writeback Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
98850d84f7
45 changed files with 583 additions and 594 deletions
|
@ -631,8 +631,8 @@ static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
blkdebug_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -652,8 +652,8 @@ blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
blkdebug_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
blkdebug_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -684,7 +684,7 @@ static int blkdebug_co_flush(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes,
|
||||
int64_t offset, int64_t bytes,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
uint32_t align = MAX(bs->bl.request_alignment,
|
||||
|
@ -717,7 +717,7 @@ static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
uint32_t align = bs->bl.pdiscard_alignment;
|
||||
int err;
|
||||
|
|
|
@ -301,8 +301,8 @@ static void blk_log_writes_refresh_limits(BlockDriverState *bs, Error **errp)
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
blk_log_writes_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
blk_log_writes_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
|
||||
}
|
||||
|
@ -460,16 +460,16 @@ blk_log_writes_co_do_file_pdiscard(BlkLogWritesFileReq *fr)
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
blk_log_writes_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
blk_log_writes_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
return blk_log_writes_co_log(bs, offset, bytes, qiov, flags,
|
||||
blk_log_writes_co_do_file_pwritev, 0, false);
|
||||
}
|
||||
|
||||
static int coroutine_fn
|
||||
blk_log_writes_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
|
||||
BdrvRequestFlags flags)
|
||||
blk_log_writes_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
return blk_log_writes_co_log(bs, offset, bytes, NULL, flags,
|
||||
blk_log_writes_co_do_file_pwrite_zeroes, 0,
|
||||
|
@ -484,9 +484,9 @@ static int coroutine_fn blk_log_writes_co_flush_to_disk(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
blk_log_writes_co_pdiscard(BlockDriverState *bs, int64_t offset, int count)
|
||||
blk_log_writes_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
{
|
||||
return blk_log_writes_co_log(bs, offset, count, NULL, 0,
|
||||
return blk_log_writes_co_log(bs, offset, bytes, NULL, 0,
|
||||
blk_log_writes_co_do_file_pdiscard,
|
||||
LOG_DISCARD_FLAG, false);
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ static void block_request_create(uint64_t reqid, BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
uint64_t reqid = blkreplay_next_id();
|
||||
int ret = bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
|
||||
|
@ -83,7 +83,7 @@ static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
uint64_t reqid = blkreplay_next_id();
|
||||
int ret = bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
|
||||
|
@ -94,7 +94,7 @@ static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes, BdrvRequestFlags flags)
|
||||
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
uint64_t reqid = blkreplay_next_id();
|
||||
int ret = bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
|
||||
|
@ -105,7 +105,7 @@ static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn blkreplay_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
uint64_t reqid = blkreplay_next_id();
|
||||
int ret = bdrv_co_pdiscard(bs->file, offset, bytes);
|
||||
|
|
|
@ -221,8 +221,8 @@ blkverify_co_prwv(BlockDriverState *bs, BlkverifyRequest *r, uint64_t offset,
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
blkverify_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
blkverify_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BlkverifyRequest r;
|
||||
QEMUIOVector raw_qiov;
|
||||
|
@ -250,8 +250,8 @@ blkverify_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
blkverify_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
blkverify_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BlkverifyRequest r;
|
||||
return blkverify_co_prwv(bs, &r, offset, bytes, qiov, qiov, flags, true);
|
||||
|
|
|
@ -238,8 +238,8 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
bochs_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
bochs_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVBochsState *s = bs->opaque;
|
||||
uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
|
||||
|
|
|
@ -245,8 +245,8 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num)
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
cloop_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
cloop_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVCloopState *s = bs->opaque;
|
||||
uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
|
||||
|
|
|
@ -207,7 +207,7 @@ static const BlockJobDriver commit_job_driver = {
|
|||
};
|
||||
|
||||
static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
|
||||
}
|
||||
|
|
|
@ -40,8 +40,8 @@ typedef struct BDRVCopyBeforeWriteState {
|
|||
} BDRVCopyBeforeWriteState;
|
||||
|
||||
static coroutine_fn int cbw_co_preadv(
|
||||
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn cbw_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
int ret = cbw_do_copy_before_write(bs, offset, bytes, 0);
|
||||
if (ret < 0) {
|
||||
|
@ -75,7 +75,7 @@ static int coroutine_fn cbw_co_pdiscard(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn cbw_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes, BdrvRequestFlags flags)
|
||||
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
int ret = cbw_do_copy_before_write(bs, offset, bytes, flags);
|
||||
if (ret < 0) {
|
||||
|
@ -86,9 +86,10 @@ static int coroutine_fn cbw_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static coroutine_fn int cbw_co_pwritev(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret = cbw_do_copy_before_write(bs, offset, bytes, flags);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -128,10 +128,10 @@ static int64_t cor_getlength(BlockDriverState *bs)
|
|||
|
||||
|
||||
static int coroutine_fn cor_co_preadv_part(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
size_t qiov_offset,
|
||||
int flags)
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int64_t n;
|
||||
int local_flags;
|
||||
|
@ -181,10 +181,11 @@ static int coroutine_fn cor_co_preadv_part(BlockDriverState *bs,
|
|||
|
||||
|
||||
static int coroutine_fn cor_co_pwritev_part(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
uint64_t bytes,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
size_t qiov_offset, int flags)
|
||||
size_t qiov_offset,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset,
|
||||
flags);
|
||||
|
@ -192,7 +193,7 @@ static int coroutine_fn cor_co_pwritev_part(BlockDriverState *bs,
|
|||
|
||||
|
||||
static int coroutine_fn cor_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes,
|
||||
int64_t offset, int64_t bytes,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
|
||||
|
@ -200,15 +201,15 @@ static int coroutine_fn cor_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
|
||||
|
||||
static int coroutine_fn cor_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
return bdrv_co_pdiscard(bs->file, offset, bytes);
|
||||
}
|
||||
|
||||
|
||||
static int coroutine_fn cor_co_pwritev_compressed(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
uint64_t bytes,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
QEMUIOVector *qiov)
|
||||
{
|
||||
return bdrv_co_pwritev(bs->file, offset, bytes, qiov,
|
||||
|
|
|
@ -397,8 +397,8 @@ static int block_crypto_reopen_prepare(BDRVReopenState *state,
|
|||
#define BLOCK_CRYPTO_MAX_IO_SIZE (1024 * 1024)
|
||||
|
||||
static coroutine_fn int
|
||||
block_crypto_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
block_crypto_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BlockCrypto *crypto = bs->opaque;
|
||||
uint64_t cur_bytes; /* number of bytes in current iteration */
|
||||
|
@ -460,8 +460,8 @@ block_crypto_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|||
|
||||
|
||||
static coroutine_fn int
|
||||
block_crypto_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
block_crypto_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BlockCrypto *crypto = bs->opaque;
|
||||
uint64_t cur_bytes; /* number of bytes in current iteration */
|
||||
|
|
|
@ -896,7 +896,8 @@ out:
|
|||
}
|
||||
|
||||
static int coroutine_fn curl_co_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
CURLAIOCB acb = {
|
||||
.co = qemu_coroutine_self(),
|
||||
|
|
|
@ -689,8 +689,8 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVDMGState *s = bs->opaque;
|
||||
uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
|
||||
|
|
|
@ -2077,16 +2077,16 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
|
|||
return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
|
||||
}
|
||||
|
||||
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov,
|
||||
int flags)
|
||||
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ);
|
||||
}
|
||||
|
||||
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov,
|
||||
int flags)
|
||||
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
assert(flags == 0);
|
||||
return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE);
|
||||
|
@ -2942,7 +2942,8 @@ static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret)
|
|||
}
|
||||
|
||||
static coroutine_fn int
|
||||
raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev)
|
||||
raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
bool blkdev)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
RawPosixAIOData acb;
|
||||
|
@ -2966,13 +2967,13 @@ raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev)
|
|||
}
|
||||
|
||||
static coroutine_fn int
|
||||
raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
|
||||
raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
{
|
||||
return raw_do_pdiscard(bs, offset, bytes, false);
|
||||
}
|
||||
|
||||
static int coroutine_fn
|
||||
raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
|
||||
raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
BdrvRequestFlags flags, bool blkdev)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
|
@ -3040,7 +3041,7 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes,
|
|||
|
||||
static int coroutine_fn raw_co_pwrite_zeroes(
|
||||
BlockDriverState *bs, int64_t offset,
|
||||
int bytes, BdrvRequestFlags flags)
|
||||
int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false);
|
||||
}
|
||||
|
@ -3203,8 +3204,8 @@ static void raw_abort_perm_update(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static int coroutine_fn raw_co_copy_range_from(
|
||||
BlockDriverState *bs, BdrvChild *src, uint64_t src_offset,
|
||||
BdrvChild *dst, uint64_t dst_offset, uint64_t bytes,
|
||||
BlockDriverState *bs, BdrvChild *src, int64_t src_offset,
|
||||
BdrvChild *dst, int64_t dst_offset, int64_t bytes,
|
||||
BdrvRequestFlags read_flags, BdrvRequestFlags write_flags)
|
||||
{
|
||||
return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
|
||||
|
@ -3213,10 +3214,10 @@ static int coroutine_fn raw_co_copy_range_from(
|
|||
|
||||
static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs,
|
||||
BdrvChild *src,
|
||||
uint64_t src_offset,
|
||||
int64_t src_offset,
|
||||
BdrvChild *dst,
|
||||
uint64_t dst_offset,
|
||||
uint64_t bytes,
|
||||
int64_t dst_offset,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags)
|
||||
{
|
||||
|
@ -3591,7 +3592,7 @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
|
|||
#endif /* linux */
|
||||
|
||||
static coroutine_fn int
|
||||
hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
|
||||
hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
int ret;
|
||||
|
@ -3605,7 +3606,7 @@ hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
|
|||
}
|
||||
|
||||
static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes, BdrvRequestFlags flags)
|
||||
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
int rc;
|
||||
|
||||
|
|
|
@ -440,8 +440,8 @@ fail:
|
|||
}
|
||||
|
||||
static BlockAIOCB *raw_aio_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags,
|
||||
BlockCompletionFunc *cb, void *opaque)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
|
@ -455,8 +455,8 @@ static BlockAIOCB *raw_aio_preadv(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static BlockAIOCB *raw_aio_pwritev(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags,
|
||||
BlockCompletionFunc *cb, void *opaque)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
|
|
|
@ -63,10 +63,10 @@ static int64_t compress_getlength(BlockDriverState *bs)
|
|||
|
||||
|
||||
static int coroutine_fn compress_co_preadv_part(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
size_t qiov_offset,
|
||||
int flags)
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset,
|
||||
flags);
|
||||
|
@ -74,10 +74,11 @@ static int coroutine_fn compress_co_preadv_part(BlockDriverState *bs,
|
|||
|
||||
|
||||
static int coroutine_fn compress_co_pwritev_part(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
uint64_t bytes,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
size_t qiov_offset, int flags)
|
||||
size_t qiov_offset,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset,
|
||||
flags | BDRV_REQ_WRITE_COMPRESSED);
|
||||
|
@ -85,7 +86,7 @@ static int coroutine_fn compress_co_pwritev_part(BlockDriverState *bs,
|
|||
|
||||
|
||||
static int coroutine_fn compress_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes,
|
||||
int64_t offset, int64_t bytes,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags);
|
||||
|
@ -93,7 +94,7 @@ static int coroutine_fn compress_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
|
||||
|
||||
static int coroutine_fn compress_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
return bdrv_co_pdiscard(bs->file, offset, bytes);
|
||||
}
|
||||
|
|
|
@ -891,6 +891,7 @@ out:
|
|||
static void qemu_gluster_refresh_limits(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
bs->bl.max_transfer = GLUSTER_MAX_TRANSFER;
|
||||
bs->bl.max_pdiscard = SIZE_MAX;
|
||||
}
|
||||
|
||||
static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
|
||||
|
@ -1003,19 +1004,19 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state)
|
|||
#ifdef CONFIG_GLUSTERFS_ZEROFILL
|
||||
static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset,
|
||||
int size,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
GlusterAIOCB acb;
|
||||
BDRVGlusterState *s = bs->opaque;
|
||||
|
||||
acb.size = size;
|
||||
acb.size = bytes;
|
||||
acb.ret = 0;
|
||||
acb.coroutine = qemu_coroutine_self();
|
||||
acb.aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
|
||||
ret = glfs_zerofill_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb);
|
||||
if (ret < 0) {
|
||||
return -errno;
|
||||
}
|
||||
|
@ -1297,18 +1298,20 @@ error:
|
|||
|
||||
#ifdef CONFIG_GLUSTERFS_DISCARD
|
||||
static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int size)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
int ret;
|
||||
GlusterAIOCB acb;
|
||||
BDRVGlusterState *s = bs->opaque;
|
||||
|
||||
assert(bytes <= SIZE_MAX); /* rely on max_pdiscard */
|
||||
|
||||
acb.size = 0;
|
||||
acb.ret = 0;
|
||||
acb.coroutine = qemu_coroutine_self();
|
||||
acb.aio_context = bdrv_get_aio_context(bs);
|
||||
|
||||
ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb);
|
||||
ret = glfs_discard_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb);
|
||||
if (ret < 0) {
|
||||
return -errno;
|
||||
}
|
||||
|
|
44
block/io.c
44
block/io.c
|
@ -956,9 +956,9 @@ bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
|
|||
return waited;
|
||||
}
|
||||
|
||||
static int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset,
|
||||
Error **errp)
|
||||
int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset,
|
||||
Error **errp)
|
||||
{
|
||||
/*
|
||||
* Check generic offset/bytes correctness
|
||||
|
@ -1230,7 +1230,8 @@ out:
|
|||
static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
size_t qiov_offset, int flags)
|
||||
size_t qiov_offset,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
int64_t sector_num;
|
||||
|
@ -1868,7 +1869,8 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
|
|||
int head = 0;
|
||||
int tail = 0;
|
||||
|
||||
int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
|
||||
int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
|
||||
INT64_MAX);
|
||||
int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
|
||||
bs->bl.request_alignment);
|
||||
int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
|
||||
|
@ -2073,7 +2075,8 @@ bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
|
|||
*/
|
||||
static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
|
||||
BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
|
||||
int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
|
||||
int64_t align, QEMUIOVector *qiov, size_t qiov_offset,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BlockDriverState *bs = child->bs;
|
||||
BlockDriver *drv = bs->drv;
|
||||
|
@ -2246,7 +2249,11 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
|
|||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
|
||||
if (flags & BDRV_REQ_ZERO_WRITE) {
|
||||
ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
|
||||
} else {
|
||||
ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
|
||||
}
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -2810,7 +2817,12 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
|
|||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
BlockDriverState *child_bs = bdrv_primary_bs(bs);
|
||||
int ret = -ENOTSUP;
|
||||
int ret;
|
||||
|
||||
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!drv) {
|
||||
return -ENOMEDIUM;
|
||||
|
@ -2822,6 +2834,8 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
|
|||
ret = drv->bdrv_load_vmstate(bs, qiov, pos);
|
||||
} else if (child_bs) {
|
||||
ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
|
||||
} else {
|
||||
ret = -ENOTSUP;
|
||||
}
|
||||
|
||||
bdrv_dec_in_flight(bs);
|
||||
|
@ -2834,7 +2848,12 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
|
|||
{
|
||||
BlockDriver *drv = bs->drv;
|
||||
BlockDriverState *child_bs = bdrv_primary_bs(bs);
|
||||
int ret = -ENOTSUP;
|
||||
int ret;
|
||||
|
||||
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!drv) {
|
||||
return -ENOMEDIUM;
|
||||
|
@ -2846,6 +2865,8 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
|
|||
ret = drv->bdrv_save_vmstate(bs, qiov, pos);
|
||||
} else if (child_bs) {
|
||||
ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
|
||||
} else {
|
||||
ret = -ENOTSUP;
|
||||
}
|
||||
|
||||
bdrv_dec_in_flight(bs);
|
||||
|
@ -3035,7 +3056,8 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
|
|||
int64_t bytes)
|
||||
{
|
||||
BdrvTrackedRequest req;
|
||||
int max_pdiscard, ret;
|
||||
int ret;
|
||||
int64_t max_pdiscard;
|
||||
int head, tail, align;
|
||||
BlockDriverState *bs = child->bs;
|
||||
|
||||
|
@ -3082,7 +3104,7 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
|
|||
goto out;
|
||||
}
|
||||
|
||||
max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
|
||||
max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
|
||||
align);
|
||||
assert(max_pdiscard >= bs->bl.request_alignment);
|
||||
|
||||
|
|
|
@ -427,14 +427,14 @@ static int64_t sector_qemu2lun(int64_t sector, IscsiLun *iscsilun)
|
|||
return sector * BDRV_SECTOR_SIZE / iscsilun->block_size;
|
||||
}
|
||||
|
||||
static bool is_byte_request_lun_aligned(int64_t offset, int count,
|
||||
static bool is_byte_request_lun_aligned(int64_t offset, int64_t bytes,
|
||||
IscsiLun *iscsilun)
|
||||
{
|
||||
if (offset % iscsilun->block_size || count % iscsilun->block_size) {
|
||||
if (offset % iscsilun->block_size || bytes % iscsilun->block_size) {
|
||||
error_report("iSCSI misaligned request: "
|
||||
"iscsilun->block_size %u, offset %" PRIi64
|
||||
", count %d",
|
||||
iscsilun->block_size, offset, count);
|
||||
", bytes %" PRIi64,
|
||||
iscsilun->block_size, offset, bytes);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -1138,7 +1138,8 @@ iscsi_getlength(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static int
|
||||
coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
|
||||
coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes)
|
||||
{
|
||||
IscsiLun *iscsilun = bs->opaque;
|
||||
struct IscsiTask iTask;
|
||||
|
@ -1154,6 +1155,12 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't want to overflow list.num which is uint32_t.
|
||||
* We rely on our max_pdiscard.
|
||||
*/
|
||||
assert(bytes / iscsilun->block_size <= UINT32_MAX);
|
||||
|
||||
list.lba = offset / iscsilun->block_size;
|
||||
list.num = bytes / iscsilun->block_size;
|
||||
|
||||
|
@ -1202,12 +1209,12 @@ out_unlock:
|
|||
|
||||
static int
|
||||
coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
||||
int bytes, BdrvRequestFlags flags)
|
||||
int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
IscsiLun *iscsilun = bs->opaque;
|
||||
struct IscsiTask iTask;
|
||||
uint64_t lba;
|
||||
uint32_t nb_blocks;
|
||||
uint64_t nb_blocks;
|
||||
bool use_16_for_ws = iscsilun->use_16_for_rw;
|
||||
int r = 0;
|
||||
|
||||
|
@ -1247,11 +1254,21 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
|||
iscsi_co_init_iscsitask(iscsilun, &iTask);
|
||||
retry:
|
||||
if (use_16_for_ws) {
|
||||
/*
|
||||
* iscsi_writesame16_task num_blocks argument is uint32_t. We rely here
|
||||
* on our max_pwrite_zeroes limit.
|
||||
*/
|
||||
assert(nb_blocks <= UINT32_MAX);
|
||||
iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba,
|
||||
iscsilun->zeroblock, iscsilun->block_size,
|
||||
nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
|
||||
0, 0, iscsi_co_generic_cb, &iTask);
|
||||
} else {
|
||||
/*
|
||||
* iscsi_writesame10_task num_blocks argument is uint16_t. We rely here
|
||||
* on our max_pwrite_zeroes limit.
|
||||
*/
|
||||
assert(nb_blocks <= UINT16_MAX);
|
||||
iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba,
|
||||
iscsilun->zeroblock, iscsilun->block_size,
|
||||
nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP),
|
||||
|
@ -2061,20 +2078,19 @@ static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
|
|||
}
|
||||
|
||||
if (iscsilun->lbp.lbpu) {
|
||||
if (iscsilun->bl.max_unmap < 0xffffffff / block_size) {
|
||||
bs->bl.max_pdiscard =
|
||||
iscsilun->bl.max_unmap * iscsilun->block_size;
|
||||
}
|
||||
bs->bl.max_pdiscard =
|
||||
MIN_NON_ZERO(iscsilun->bl.max_unmap * iscsilun->block_size,
|
||||
(uint64_t)UINT32_MAX * iscsilun->block_size);
|
||||
bs->bl.pdiscard_alignment =
|
||||
iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
|
||||
} else {
|
||||
bs->bl.pdiscard_alignment = iscsilun->block_size;
|
||||
}
|
||||
|
||||
if (iscsilun->bl.max_ws_len < 0xffffffff / block_size) {
|
||||
bs->bl.max_pwrite_zeroes =
|
||||
iscsilun->bl.max_ws_len * iscsilun->block_size;
|
||||
}
|
||||
bs->bl.max_pwrite_zeroes =
|
||||
MIN_NON_ZERO(iscsilun->bl.max_ws_len * iscsilun->block_size,
|
||||
max_xfer_len * iscsilun->block_size);
|
||||
|
||||
if (iscsilun->lbp.lbpws) {
|
||||
bs->bl.pwrite_zeroes_alignment =
|
||||
iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
|
||||
|
@ -2169,10 +2185,10 @@ static void coroutine_fn iscsi_co_invalidate_cache(BlockDriverState *bs,
|
|||
|
||||
static int coroutine_fn iscsi_co_copy_range_from(BlockDriverState *bs,
|
||||
BdrvChild *src,
|
||||
uint64_t src_offset,
|
||||
int64_t src_offset,
|
||||
BdrvChild *dst,
|
||||
uint64_t dst_offset,
|
||||
uint64_t bytes,
|
||||
int64_t dst_offset,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags)
|
||||
{
|
||||
|
@ -2310,10 +2326,10 @@ static void iscsi_xcopy_data(struct iscsi_data *data,
|
|||
|
||||
static int coroutine_fn iscsi_co_copy_range_to(BlockDriverState *bs,
|
||||
BdrvChild *src,
|
||||
uint64_t src_offset,
|
||||
int64_t src_offset,
|
||||
BdrvChild *dst,
|
||||
uint64_t dst_offset,
|
||||
uint64_t bytes,
|
||||
int64_t dst_offset,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags)
|
||||
{
|
||||
|
|
|
@ -1402,7 +1402,7 @@ static void coroutine_fn active_write_settle(MirrorOp *op)
|
|||
}
|
||||
|
||||
static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
|
||||
}
|
||||
|
@ -1456,7 +1456,7 @@ out:
|
|||
}
|
||||
|
||||
static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
MirrorBDSOpaque *s = bs->opaque;
|
||||
QEMUIOVector bounce_qiov;
|
||||
|
@ -1501,14 +1501,14 @@ static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes, BdrvRequestFlags flags)
|
||||
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
|
||||
flags);
|
||||
}
|
||||
|
||||
static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
|
||||
NULL, 0);
|
||||
|
|
439
block/nbd.c
439
block/nbd.c
|
@ -57,7 +57,8 @@
|
|||
typedef struct {
|
||||
Coroutine *coroutine;
|
||||
uint64_t offset; /* original offset of the request */
|
||||
bool receiving; /* waiting for connection_co? */
|
||||
bool receiving; /* sleeping in the yield in nbd_receive_replies */
|
||||
bool reply_possible; /* reply header not yet received */
|
||||
} NBDClientRequest;
|
||||
|
||||
typedef enum NBDClientState {
|
||||
|
@ -73,14 +74,10 @@ typedef struct BDRVNBDState {
|
|||
|
||||
CoMutex send_mutex;
|
||||
CoQueue free_sema;
|
||||
Coroutine *connection_co;
|
||||
Coroutine *teardown_co;
|
||||
QemuCoSleep reconnect_sleep;
|
||||
bool drained;
|
||||
bool wait_drained_end;
|
||||
|
||||
CoMutex receive_mutex;
|
||||
int in_flight;
|
||||
NBDClientState state;
|
||||
bool wait_in_flight;
|
||||
|
||||
QEMUTimer *reconnect_delay_timer;
|
||||
|
||||
|
@ -127,33 +124,44 @@ static bool nbd_client_connected(BDRVNBDState *s)
|
|||
return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED;
|
||||
}
|
||||
|
||||
static bool nbd_recv_coroutine_wake_one(NBDClientRequest *req)
|
||||
{
|
||||
if (req->receiving) {
|
||||
req->receiving = false;
|
||||
aio_co_wake(req->coroutine);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void nbd_recv_coroutines_wake(BDRVNBDState *s, bool all)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
|
||||
if (nbd_recv_coroutine_wake_one(&s->requests[i]) && !all) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void nbd_channel_error(BDRVNBDState *s, int ret)
|
||||
{
|
||||
if (nbd_client_connected(s)) {
|
||||
qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
|
||||
}
|
||||
|
||||
if (ret == -EIO) {
|
||||
if (nbd_client_connected(s)) {
|
||||
s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
|
||||
NBD_CLIENT_CONNECTING_NOWAIT;
|
||||
}
|
||||
} else {
|
||||
if (nbd_client_connected(s)) {
|
||||
qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
|
||||
}
|
||||
s->state = NBD_CLIENT_QUIT;
|
||||
}
|
||||
}
|
||||
|
||||
static void nbd_recv_coroutines_wake_all(BDRVNBDState *s)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
|
||||
NBDClientRequest *req = &s->requests[i];
|
||||
|
||||
if (req->coroutine && req->receiving) {
|
||||
req->receiving = false;
|
||||
aio_co_wake(req->coroutine);
|
||||
}
|
||||
}
|
||||
nbd_recv_coroutines_wake(s, true);
|
||||
}
|
||||
|
||||
static void reconnect_delay_timer_del(BDRVNBDState *s)
|
||||
|
@ -170,6 +178,7 @@ static void reconnect_delay_timer_cb(void *opaque)
|
|||
|
||||
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
|
||||
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
|
||||
nbd_co_establish_connection_cancel(s->conn);
|
||||
while (qemu_co_enter_next(&s->free_sema, NULL)) {
|
||||
/* Resume all queued requests */
|
||||
}
|
||||
|
@ -192,113 +201,21 @@ static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
|
|||
timer_mod(s->reconnect_delay_timer, expire_time_ns);
|
||||
}
|
||||
|
||||
static void nbd_client_detach_aio_context(BlockDriverState *bs)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
|
||||
/* Timer is deleted in nbd_client_co_drain_begin() */
|
||||
assert(!s->reconnect_delay_timer);
|
||||
/*
|
||||
* If reconnect is in progress we may have no ->ioc. It will be
|
||||
* re-instantiated in the proper aio context once the connection is
|
||||
* reestablished.
|
||||
*/
|
||||
if (s->ioc) {
|
||||
qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
|
||||
}
|
||||
}
|
||||
|
||||
static void nbd_client_attach_aio_context_bh(void *opaque)
|
||||
{
|
||||
BlockDriverState *bs = opaque;
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
|
||||
if (s->connection_co) {
|
||||
/*
|
||||
* The node is still drained, so we know the coroutine has yielded in
|
||||
* nbd_read_eof(), the only place where bs->in_flight can reach 0, or
|
||||
* it is entered for the first time. Both places are safe for entering
|
||||
* the coroutine.
|
||||
*/
|
||||
qemu_aio_coroutine_enter(bs->aio_context, s->connection_co);
|
||||
}
|
||||
bdrv_dec_in_flight(bs);
|
||||
}
|
||||
|
||||
static void nbd_client_attach_aio_context(BlockDriverState *bs,
|
||||
AioContext *new_context)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
|
||||
/*
|
||||
* s->connection_co is either yielded from nbd_receive_reply or from
|
||||
* nbd_co_reconnect_loop()
|
||||
*/
|
||||
if (nbd_client_connected(s)) {
|
||||
qio_channel_attach_aio_context(QIO_CHANNEL(s->ioc), new_context);
|
||||
}
|
||||
|
||||
bdrv_inc_in_flight(bs);
|
||||
|
||||
/*
|
||||
* Need to wait here for the BH to run because the BH must run while the
|
||||
* node is still drained.
|
||||
*/
|
||||
aio_wait_bh_oneshot(new_context, nbd_client_attach_aio_context_bh, bs);
|
||||
}
|
||||
|
||||
static void coroutine_fn nbd_client_co_drain_begin(BlockDriverState *bs)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
|
||||
s->drained = true;
|
||||
qemu_co_sleep_wake(&s->reconnect_sleep);
|
||||
|
||||
nbd_co_establish_connection_cancel(s->conn);
|
||||
|
||||
reconnect_delay_timer_del(s);
|
||||
|
||||
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
|
||||
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
|
||||
qemu_co_queue_restart_all(&s->free_sema);
|
||||
}
|
||||
}
|
||||
|
||||
static void coroutine_fn nbd_client_co_drain_end(BlockDriverState *bs)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
|
||||
s->drained = false;
|
||||
if (s->wait_drained_end) {
|
||||
s->wait_drained_end = false;
|
||||
aio_co_wake(s->connection_co);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void nbd_teardown_connection(BlockDriverState *bs)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
|
||||
assert(!s->in_flight);
|
||||
|
||||
if (s->ioc) {
|
||||
/* finish any pending coroutines */
|
||||
qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
|
||||
yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
|
||||
nbd_yank, s->bs);
|
||||
object_unref(OBJECT(s->ioc));
|
||||
s->ioc = NULL;
|
||||
}
|
||||
|
||||
s->state = NBD_CLIENT_QUIT;
|
||||
if (s->connection_co) {
|
||||
qemu_co_sleep_wake(&s->reconnect_sleep);
|
||||
nbd_co_establish_connection_cancel(s->conn);
|
||||
}
|
||||
if (qemu_in_coroutine()) {
|
||||
s->teardown_co = qemu_coroutine_self();
|
||||
/* connection_co resumes us when it terminates */
|
||||
qemu_coroutine_yield();
|
||||
s->teardown_co = NULL;
|
||||
} else {
|
||||
BDRV_POLL_WHILE(bs, s->connection_co);
|
||||
}
|
||||
assert(!s->connection_co);
|
||||
}
|
||||
|
||||
static bool nbd_client_connecting(BDRVNBDState *s)
|
||||
|
@ -363,10 +280,11 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
|
|||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
int ret;
|
||||
bool blocking = nbd_client_connecting_wait(s);
|
||||
|
||||
assert(!s->ioc);
|
||||
|
||||
s->ioc = nbd_co_establish_connection(s->conn, &s->info, true, errp);
|
||||
s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp);
|
||||
if (!s->ioc) {
|
||||
return -ECONNREFUSED;
|
||||
}
|
||||
|
@ -402,29 +320,22 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* called under s->send_mutex */
|
||||
static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
|
||||
{
|
||||
if (!nbd_client_connecting(s)) {
|
||||
return;
|
||||
}
|
||||
assert(nbd_client_connecting(s));
|
||||
assert(s->in_flight == 0);
|
||||
|
||||
/* Wait for completion of all in-flight requests */
|
||||
|
||||
qemu_co_mutex_lock(&s->send_mutex);
|
||||
|
||||
while (s->in_flight > 0) {
|
||||
qemu_co_mutex_unlock(&s->send_mutex);
|
||||
nbd_recv_coroutines_wake_all(s);
|
||||
s->wait_in_flight = true;
|
||||
qemu_coroutine_yield();
|
||||
s->wait_in_flight = false;
|
||||
qemu_co_mutex_lock(&s->send_mutex);
|
||||
}
|
||||
|
||||
qemu_co_mutex_unlock(&s->send_mutex);
|
||||
|
||||
if (!nbd_client_connecting(s)) {
|
||||
return;
|
||||
if (nbd_client_connecting_wait(s) && s->reconnect_delay &&
|
||||
!s->reconnect_delay_timer)
|
||||
{
|
||||
/*
|
||||
* It's first reconnect attempt after switching to
|
||||
* NBD_CLIENT_CONNECTING_WAIT
|
||||
*/
|
||||
reconnect_delay_timer_init(s,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
|
||||
s->reconnect_delay * NANOSECONDS_PER_SECOND);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -444,135 +355,73 @@ static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
|
|||
nbd_co_do_establish_connection(s->bs, NULL);
|
||||
}
|
||||
|
||||
static coroutine_fn void nbd_co_reconnect_loop(BDRVNBDState *s)
|
||||
static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle)
|
||||
{
|
||||
uint64_t timeout = 1 * NANOSECONDS_PER_SECOND;
|
||||
uint64_t max_timeout = 16 * NANOSECONDS_PER_SECOND;
|
||||
int ret;
|
||||
uint64_t ind = HANDLE_TO_INDEX(s, handle), ind2;
|
||||
QEMU_LOCK_GUARD(&s->receive_mutex);
|
||||
|
||||
if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
|
||||
reconnect_delay_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
|
||||
s->reconnect_delay * NANOSECONDS_PER_SECOND);
|
||||
}
|
||||
|
||||
nbd_reconnect_attempt(s);
|
||||
|
||||
while (nbd_client_connecting(s)) {
|
||||
if (s->drained) {
|
||||
bdrv_dec_in_flight(s->bs);
|
||||
s->wait_drained_end = true;
|
||||
while (s->drained) {
|
||||
/*
|
||||
* We may be entered once from nbd_client_attach_aio_context_bh
|
||||
* and then from nbd_client_co_drain_end. So here is a loop.
|
||||
*/
|
||||
qemu_coroutine_yield();
|
||||
}
|
||||
bdrv_inc_in_flight(s->bs);
|
||||
} else {
|
||||
qemu_co_sleep_ns_wakeable(&s->reconnect_sleep,
|
||||
QEMU_CLOCK_REALTIME, timeout);
|
||||
if (s->drained) {
|
||||
continue;
|
||||
}
|
||||
if (timeout < max_timeout) {
|
||||
timeout *= 2;
|
||||
}
|
||||
}
|
||||
|
||||
nbd_reconnect_attempt(s);
|
||||
}
|
||||
|
||||
reconnect_delay_timer_del(s);
|
||||
}
|
||||
|
||||
static coroutine_fn void nbd_connection_entry(void *opaque)
|
||||
{
|
||||
BDRVNBDState *s = opaque;
|
||||
uint64_t i;
|
||||
int ret = 0;
|
||||
Error *local_err = NULL;
|
||||
|
||||
while (qatomic_load_acquire(&s->state) != NBD_CLIENT_QUIT) {
|
||||
/*
|
||||
* The NBD client can only really be considered idle when it has
|
||||
* yielded from qio_channel_readv_all_eof(), waiting for data. This is
|
||||
* the point where the additional scheduled coroutine entry happens
|
||||
* after nbd_client_attach_aio_context().
|
||||
*
|
||||
* Therefore we keep an additional in_flight reference all the time and
|
||||
* only drop it temporarily here.
|
||||
*/
|
||||
|
||||
if (nbd_client_connecting(s)) {
|
||||
nbd_co_reconnect_loop(s);
|
||||
while (true) {
|
||||
if (s->reply.handle == handle) {
|
||||
/* We are done */
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!nbd_client_connected(s)) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (s->reply.handle != 0) {
|
||||
/*
|
||||
* Some other request is being handled now. It should already be
|
||||
* woken by whoever set s->reply.handle (or never wait in this
|
||||
* yield). So, we should not wake it here.
|
||||
*/
|
||||
ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
|
||||
assert(!s->requests[ind2].receiving);
|
||||
|
||||
s->requests[ind].receiving = true;
|
||||
qemu_co_mutex_unlock(&s->receive_mutex);
|
||||
|
||||
qemu_coroutine_yield();
|
||||
/*
|
||||
* We may be woken for 3 reasons:
|
||||
* 1. From this function, executing in parallel coroutine, when our
|
||||
* handle is received.
|
||||
* 2. From nbd_channel_error(), when connection is lost.
|
||||
* 3. From nbd_co_receive_one_chunk(), when previous request is
|
||||
* finished and s->reply.handle set to 0.
|
||||
* Anyway, it's OK to lock the mutex and go to the next iteration.
|
||||
*/
|
||||
|
||||
qemu_co_mutex_lock(&s->receive_mutex);
|
||||
assert(!s->requests[ind].receiving);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* We are under mutex and handle is 0. We have to do the dirty work. */
|
||||
assert(s->reply.handle == 0);
|
||||
ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, &local_err);
|
||||
|
||||
if (local_err) {
|
||||
trace_nbd_read_reply_entry_fail(ret, error_get_pretty(local_err));
|
||||
error_free(local_err);
|
||||
local_err = NULL;
|
||||
}
|
||||
ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL);
|
||||
if (ret <= 0) {
|
||||
nbd_channel_error(s, ret ? ret : -EIO);
|
||||
continue;
|
||||
ret = ret ? ret : -EIO;
|
||||
nbd_channel_error(s, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* There's no need for a mutex on the receive side, because the
|
||||
* handler acts as a synchronization point and ensures that only
|
||||
* one coroutine is called until the reply finishes.
|
||||
*/
|
||||
i = HANDLE_TO_INDEX(s, s->reply.handle);
|
||||
if (i >= MAX_NBD_REQUESTS ||
|
||||
!s->requests[i].coroutine ||
|
||||
!s->requests[i].receiving ||
|
||||
(nbd_reply_is_structured(&s->reply) && !s->info.structured_reply))
|
||||
{
|
||||
if (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply) {
|
||||
nbd_channel_error(s, -EINVAL);
|
||||
continue;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're woken up again by the request itself. Note that there
|
||||
* is no race between yielding and reentering connection_co. This
|
||||
* is because:
|
||||
*
|
||||
* - if the request runs on the same AioContext, it is only
|
||||
* entered after we yield
|
||||
*
|
||||
* - if the request runs on a different AioContext, reentering
|
||||
* connection_co happens through a bottom half, which can only
|
||||
* run after we yield.
|
||||
*/
|
||||
s->requests[i].receiving = false;
|
||||
aio_co_wake(s->requests[i].coroutine);
|
||||
qemu_coroutine_yield();
|
||||
if (s->reply.handle == handle) {
|
||||
/* We are done */
|
||||
return 0;
|
||||
}
|
||||
ind2 = HANDLE_TO_INDEX(s, s->reply.handle);
|
||||
if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].reply_possible) {
|
||||
nbd_channel_error(s, -EINVAL);
|
||||
return -EINVAL;
|
||||
}
|
||||
nbd_recv_coroutine_wake_one(&s->requests[ind2]);
|
||||
}
|
||||
|
||||
qemu_co_queue_restart_all(&s->free_sema);
|
||||
nbd_recv_coroutines_wake_all(s);
|
||||
bdrv_dec_in_flight(s->bs);
|
||||
|
||||
s->connection_co = NULL;
|
||||
if (s->ioc) {
|
||||
qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
|
||||
yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
|
||||
nbd_yank, s->bs);
|
||||
object_unref(OBJECT(s->ioc));
|
||||
s->ioc = NULL;
|
||||
}
|
||||
|
||||
if (s->teardown_co) {
|
||||
aio_co_wake(s->teardown_co);
|
||||
}
|
||||
aio_wait_kick();
|
||||
}
|
||||
|
||||
static int nbd_co_send_request(BlockDriverState *bs,
|
||||
|
@ -583,10 +432,17 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
|||
int rc, i = -1;
|
||||
|
||||
qemu_co_mutex_lock(&s->send_mutex);
|
||||
while (s->in_flight == MAX_NBD_REQUESTS || nbd_client_connecting_wait(s)) {
|
||||
|
||||
while (s->in_flight == MAX_NBD_REQUESTS ||
|
||||
(!nbd_client_connected(s) && s->in_flight > 0))
|
||||
{
|
||||
qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
|
||||
}
|
||||
|
||||
if (nbd_client_connecting(s)) {
|
||||
nbd_reconnect_attempt(s);
|
||||
}
|
||||
|
||||
if (!nbd_client_connected(s)) {
|
||||
rc = -EIO;
|
||||
goto err;
|
||||
|
@ -606,6 +462,7 @@ static int nbd_co_send_request(BlockDriverState *bs,
|
|||
s->requests[i].coroutine = qemu_coroutine_self();
|
||||
s->requests[i].offset = request->from;
|
||||
s->requests[i].receiving = false;
|
||||
s->requests[i].reply_possible = true;
|
||||
|
||||
request->handle = INDEX_TO_HANDLE(s, i);
|
||||
|
||||
|
@ -633,10 +490,6 @@ err:
|
|||
if (i != -1) {
|
||||
s->requests[i].coroutine = NULL;
|
||||
s->in_flight--;
|
||||
}
|
||||
if (s->in_flight == 0 && s->wait_in_flight) {
|
||||
aio_co_wake(s->connection_co);
|
||||
} else {
|
||||
qemu_co_queue_next(&s->free_sema);
|
||||
}
|
||||
}
|
||||
|
@ -935,10 +788,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk(
|
|||
}
|
||||
*request_ret = 0;
|
||||
|
||||
/* Wait until we're woken up by nbd_connection_entry. */
|
||||
s->requests[i].receiving = true;
|
||||
qemu_coroutine_yield();
|
||||
assert(!s->requests[i].receiving);
|
||||
nbd_receive_replies(s, handle);
|
||||
if (!nbd_client_connected(s)) {
|
||||
error_setg(errp, "Connection closed");
|
||||
return -EIO;
|
||||
|
@ -1031,14 +881,7 @@ static coroutine_fn int nbd_co_receive_one_chunk(
|
|||
}
|
||||
s->reply.handle = 0;
|
||||
|
||||
if (s->connection_co && !s->wait_in_flight) {
|
||||
/*
|
||||
* We must check s->wait_in_flight, because we may entered by
|
||||
* nbd_recv_coroutines_wake_all(), in this case we should not
|
||||
* wake connection_co here, it will woken by last request.
|
||||
*/
|
||||
aio_co_wake(s->connection_co);
|
||||
}
|
||||
nbd_recv_coroutines_wake(s, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1149,11 +992,7 @@ break_loop:
|
|||
|
||||
qemu_co_mutex_lock(&s->send_mutex);
|
||||
s->in_flight--;
|
||||
if (s->in_flight == 0 && s->wait_in_flight) {
|
||||
aio_co_wake(s->connection_co);
|
||||
} else {
|
||||
qemu_co_queue_next(&s->free_sema);
|
||||
}
|
||||
qemu_co_queue_next(&s->free_sema);
|
||||
qemu_co_mutex_unlock(&s->send_mutex);
|
||||
|
||||
return false;
|
||||
|
@ -1322,8 +1161,9 @@ static int nbd_co_request(BlockDriverState *bs, NBDRequest *request,
|
|||
return ret ? ret : request_ret;
|
||||
}
|
||||
|
||||
static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||
static int nbd_client_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret, request_ret;
|
||||
Error *local_err = NULL;
|
||||
|
@ -1380,8 +1220,9 @@ static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
|
|||
return ret ? ret : request_ret;
|
||||
}
|
||||
|
||||
static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||
static int nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
NBDRequest request = {
|
||||
|
@ -1405,15 +1246,17 @@ static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
|||
}
|
||||
|
||||
static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
||||
int bytes, BdrvRequestFlags flags)
|
||||
int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
NBDRequest request = {
|
||||
.type = NBD_CMD_WRITE_ZEROES,
|
||||
.from = offset,
|
||||
.len = bytes,
|
||||
.len = bytes, /* .len is uint32_t actually */
|
||||
};
|
||||
|
||||
assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */
|
||||
|
||||
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
|
||||
if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
|
||||
return -ENOTSUP;
|
||||
|
@ -1453,15 +1296,17 @@ static int nbd_client_co_flush(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset,
|
||||
int bytes)
|
||||
int64_t bytes)
|
||||
{
|
||||
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
|
||||
NBDRequest request = {
|
||||
.type = NBD_CMD_TRIM,
|
||||
.from = offset,
|
||||
.len = bytes,
|
||||
.len = bytes, /* len is uint32_t */
|
||||
};
|
||||
|
||||
assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */
|
||||
|
||||
assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
|
||||
if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
|
||||
return 0;
|
||||
|
@ -1969,6 +1814,7 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
s->bs = bs;
|
||||
qemu_co_mutex_init(&s->send_mutex);
|
||||
qemu_co_queue_init(&s->free_sema);
|
||||
qemu_co_mutex_init(&s->receive_mutex);
|
||||
|
||||
if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) {
|
||||
return -EEXIST;
|
||||
|
@ -1983,14 +1829,13 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
|
|||
s->x_dirty_bitmap, s->tlscreds);
|
||||
|
||||
/* TODO: Configurable retry-until-timeout behaviour. */
|
||||
s->state = NBD_CLIENT_CONNECTING_WAIT;
|
||||
ret = nbd_do_establish_connection(bs, errp);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
s->connection_co = qemu_coroutine_create(nbd_connection_entry, s);
|
||||
bdrv_inc_in_flight(bs);
|
||||
aio_co_schedule(bdrv_get_aio_context(bs), s->connection_co);
|
||||
nbd_client_connection_enable_retry(s->conn);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -2144,6 +1989,8 @@ static void nbd_cancel_in_flight(BlockDriverState *bs)
|
|||
s->state = NBD_CLIENT_CONNECTING_NOWAIT;
|
||||
qemu_co_queue_restart_all(&s->free_sema);
|
||||
}
|
||||
|
||||
nbd_co_establish_connection_cancel(s->conn);
|
||||
}
|
||||
|
||||
static BlockDriver bdrv_nbd = {
|
||||
|
@ -2164,10 +2011,6 @@ static BlockDriver bdrv_nbd = {
|
|||
.bdrv_refresh_limits = nbd_refresh_limits,
|
||||
.bdrv_co_truncate = nbd_co_truncate,
|
||||
.bdrv_getlength = nbd_getlength,
|
||||
.bdrv_detach_aio_context = nbd_client_detach_aio_context,
|
||||
.bdrv_attach_aio_context = nbd_client_attach_aio_context,
|
||||
.bdrv_co_drain_begin = nbd_client_co_drain_begin,
|
||||
.bdrv_co_drain_end = nbd_client_co_drain_end,
|
||||
.bdrv_refresh_filename = nbd_refresh_filename,
|
||||
.bdrv_co_block_status = nbd_client_co_block_status,
|
||||
.bdrv_dirname = nbd_dirname,
|
||||
|
@ -2193,10 +2036,6 @@ static BlockDriver bdrv_nbd_tcp = {
|
|||
.bdrv_refresh_limits = nbd_refresh_limits,
|
||||
.bdrv_co_truncate = nbd_co_truncate,
|
||||
.bdrv_getlength = nbd_getlength,
|
||||
.bdrv_detach_aio_context = nbd_client_detach_aio_context,
|
||||
.bdrv_attach_aio_context = nbd_client_attach_aio_context,
|
||||
.bdrv_co_drain_begin = nbd_client_co_drain_begin,
|
||||
.bdrv_co_drain_end = nbd_client_co_drain_end,
|
||||
.bdrv_refresh_filename = nbd_refresh_filename,
|
||||
.bdrv_co_block_status = nbd_client_co_block_status,
|
||||
.bdrv_dirname = nbd_dirname,
|
||||
|
@ -2222,10 +2061,6 @@ static BlockDriver bdrv_nbd_unix = {
|
|||
.bdrv_refresh_limits = nbd_refresh_limits,
|
||||
.bdrv_co_truncate = nbd_co_truncate,
|
||||
.bdrv_getlength = nbd_getlength,
|
||||
.bdrv_detach_aio_context = nbd_client_detach_aio_context,
|
||||
.bdrv_attach_aio_context = nbd_client_attach_aio_context,
|
||||
.bdrv_co_drain_begin = nbd_client_co_drain_begin,
|
||||
.bdrv_co_drain_end = nbd_client_co_drain_end,
|
||||
.bdrv_refresh_filename = nbd_refresh_filename,
|
||||
.bdrv_co_block_status = nbd_client_co_block_status,
|
||||
.bdrv_dirname = nbd_dirname,
|
||||
|
|
12
block/nfs.c
12
block/nfs.c
|
@ -262,9 +262,9 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data,
|
|||
nfs_co_generic_bh_cb, task);
|
||||
}
|
||||
|
||||
static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *iov,
|
||||
int flags)
|
||||
static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *iov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
NFSClient *client = bs->opaque;
|
||||
NFSRPC task;
|
||||
|
@ -296,9 +296,9 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *iov,
|
||||
int flags)
|
||||
static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *iov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
NFSClient *client = bs->opaque;
|
||||
NFSRPC task;
|
||||
|
|
18
block/null.c
18
block/null.c
|
@ -116,8 +116,9 @@ static coroutine_fn int null_co_common(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static coroutine_fn int null_co_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVNullState *s = bs->opaque;
|
||||
|
||||
|
@ -129,8 +130,9 @@ static coroutine_fn int null_co_preadv(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static coroutine_fn int null_co_pwritev(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return null_co_common(bs);
|
||||
}
|
||||
|
@ -187,8 +189,8 @@ static inline BlockAIOCB *null_aio_common(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static BlockAIOCB *null_aio_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags,
|
||||
BlockCompletionFunc *cb,
|
||||
void *opaque)
|
||||
{
|
||||
|
@ -202,8 +204,8 @@ static BlockAIOCB *null_aio_preadv(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static BlockAIOCB *null_aio_pwritev(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags,
|
||||
BlockCompletionFunc *cb,
|
||||
void *opaque)
|
||||
{
|
||||
|
|
48
block/nvme.c
48
block/nvme.c
|
@ -1251,15 +1251,17 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|||
}
|
||||
|
||||
static coroutine_fn int nvme_co_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return nvme_co_prw(bs, offset, bytes, qiov, false, flags);
|
||||
}
|
||||
|
||||
static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return nvme_co_prw(bs, offset, bytes, qiov, true, flags);
|
||||
}
|
||||
|
@ -1294,19 +1296,29 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
|
|||
|
||||
static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset,
|
||||
int bytes,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
|
||||
NVMeRequest *req;
|
||||
|
||||
uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
|
||||
uint32_t cdw12;
|
||||
|
||||
if (!s->supports_write_zeroes) {
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
if (bytes == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF;
|
||||
/*
|
||||
* We should not lose information. pwrite_zeroes_alignment and
|
||||
* max_pwrite_zeroes guarantees it.
|
||||
*/
|
||||
assert(((cdw12 + 1) << s->blkshift) == bytes);
|
||||
|
||||
NvmeCmd cmd = {
|
||||
.opcode = NVME_CMD_WRITE_ZEROES,
|
||||
.nsid = cpu_to_le32(s->nsid),
|
||||
|
@ -1348,7 +1360,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
|
||||
static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset,
|
||||
int bytes)
|
||||
int64_t bytes)
|
||||
{
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
NVMeQueuePair *ioq = s->queues[INDEX_IO(0)];
|
||||
|
@ -1375,6 +1387,14 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
|
|||
|
||||
assert(s->queue_count > 1);
|
||||
|
||||
/*
|
||||
* Filling the @buf requires @offset and @bytes to satisfy restrictions
|
||||
* defined in nvme_refresh_limits().
|
||||
*/
|
||||
assert(QEMU_IS_ALIGNED(bytes, 1UL << s->blkshift));
|
||||
assert(QEMU_IS_ALIGNED(offset, 1UL << s->blkshift));
|
||||
assert((bytes >> s->blkshift) <= UINT32_MAX);
|
||||
|
||||
buf = qemu_try_memalign(s->page_size, s->page_size);
|
||||
if (!buf) {
|
||||
return -ENOMEM;
|
||||
|
@ -1470,6 +1490,18 @@ static void nvme_refresh_limits(BlockDriverState *bs, Error **errp)
|
|||
bs->bl.opt_mem_alignment = s->page_size;
|
||||
bs->bl.request_alignment = s->page_size;
|
||||
bs->bl.max_transfer = s->max_transfer;
|
||||
|
||||
/*
|
||||
* Look at nvme_co_pwrite_zeroes: after shift and decrement we should get
|
||||
* at most 0xFFFF
|
||||
*/
|
||||
bs->bl.max_pwrite_zeroes = 1ULL << (s->blkshift + 16);
|
||||
bs->bl.pwrite_zeroes_alignment = MAX(bs->bl.request_alignment,
|
||||
1UL << s->blkshift);
|
||||
|
||||
bs->bl.max_pdiscard = (uint64_t)UINT32_MAX << s->blkshift;
|
||||
bs->bl.pdiscard_alignment = MAX(bs->bl.request_alignment,
|
||||
1UL << s->blkshift);
|
||||
}
|
||||
|
||||
static void nvme_detach_aio_context(BlockDriverState *bs)
|
||||
|
|
|
@ -227,15 +227,15 @@ static void preallocate_reopen_abort(BDRVReopenState *state)
|
|||
}
|
||||
|
||||
static coroutine_fn int preallocate_co_preadv_part(
|
||||
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset, int flags)
|
||||
BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags)
|
||||
{
|
||||
return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset,
|
||||
flags);
|
||||
}
|
||||
|
||||
static int coroutine_fn preallocate_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
return bdrv_co_pdiscard(bs->file, offset, bytes);
|
||||
}
|
||||
|
@ -337,7 +337,7 @@ static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset,
|
|||
}
|
||||
|
||||
static int coroutine_fn preallocate_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes, BdrvRequestFlags flags)
|
||||
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
bool want_merge_zero =
|
||||
!(flags & ~(BDRV_REQ_ZERO_WRITE | BDRV_REQ_NO_FALLBACK));
|
||||
|
@ -349,11 +349,11 @@ static int coroutine_fn preallocate_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static coroutine_fn int preallocate_co_pwritev_part(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
uint64_t bytes,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
size_t qiov_offset,
|
||||
int flags)
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
handle_write(bs, offset, bytes, false);
|
||||
|
||||
|
|
16
block/qcow.c
16
block/qcow.c
|
@ -617,9 +617,9 @@ static void qcow_refresh_limits(BlockDriverState *bs, Error **errp)
|
|||
bs->bl.request_alignment = BDRV_SECTOR_SIZE;
|
||||
}
|
||||
|
||||
static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov,
|
||||
int flags)
|
||||
static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int offset_in_cluster;
|
||||
|
@ -714,9 +714,9 @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov,
|
||||
int flags)
|
||||
static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
int offset_in_cluster;
|
||||
|
@ -1047,8 +1047,8 @@ static int qcow_make_empty(BlockDriverState *bs)
|
|||
/* XXX: put compressed sectors first, then all the cluster aligned
|
||||
tables to avoid losing bytes in alignment */
|
||||
static coroutine_fn int
|
||||
qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov)
|
||||
qcow_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov)
|
||||
{
|
||||
BDRVQcowState *s = bs->opaque;
|
||||
z_stream strm;
|
||||
|
|
|
@ -505,7 +505,19 @@ static int coroutine_fn do_perform_cow_read(BlockDriverState *bs,
|
|||
return -ENOMEDIUM;
|
||||
}
|
||||
|
||||
/* Call .bdrv_co_readv() directly instead of using the public block-layer
|
||||
/*
|
||||
* We never deal with requests that don't satisfy
|
||||
* bdrv_check_qiov_request(), and aligning requests to clusters never
|
||||
* breaks this condition. So, do some assertions before calling
|
||||
* bs->drv->bdrv_co_preadv_part() which has int64_t arguments.
|
||||
*/
|
||||
assert(src_cluster_offset <= INT64_MAX);
|
||||
assert(src_cluster_offset + offset_in_cluster <= INT64_MAX);
|
||||
assert(qiov->size <= INT64_MAX);
|
||||
bdrv_check_qiov_request(src_cluster_offset + offset_in_cluster, qiov->size,
|
||||
qiov, 0, &error_abort);
|
||||
/*
|
||||
* Call .bdrv_co_readv() directly instead of using the public block-layer
|
||||
* interface. This avoids double I/O throttling and request tracking,
|
||||
* which can lead to deadlock when block layer copy-on-read is enabled.
|
||||
*/
|
||||
|
|
|
@ -2310,9 +2310,10 @@ static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task)
|
|||
}
|
||||
|
||||
static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
size_t qiov_offset, int flags)
|
||||
size_t qiov_offset,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int ret = 0;
|
||||
|
@ -2596,8 +2597,8 @@ static coroutine_fn int qcow2_co_pwritev_task_entry(AioTask *task)
|
|||
}
|
||||
|
||||
static coroutine_fn int qcow2_co_pwritev_part(
|
||||
BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset, int flags)
|
||||
BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int offset_in_cluster;
|
||||
|
@ -3940,7 +3941,7 @@ static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
|
|||
}
|
||||
|
||||
static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes, BdrvRequestFlags flags)
|
||||
int64_t offset, int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
@ -3995,7 +3996,7 @@ static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
int ret;
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
@ -4025,9 +4026,9 @@ static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
|
|||
|
||||
static int coroutine_fn
|
||||
qcow2_co_copy_range_from(BlockDriverState *bs,
|
||||
BdrvChild *src, uint64_t src_offset,
|
||||
BdrvChild *dst, uint64_t dst_offset,
|
||||
uint64_t bytes, BdrvRequestFlags read_flags,
|
||||
BdrvChild *src, int64_t src_offset,
|
||||
BdrvChild *dst, int64_t dst_offset,
|
||||
int64_t bytes, BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
@ -4108,9 +4109,9 @@ out:
|
|||
|
||||
static int coroutine_fn
|
||||
qcow2_co_copy_range_to(BlockDriverState *bs,
|
||||
BdrvChild *src, uint64_t src_offset,
|
||||
BdrvChild *dst, uint64_t dst_offset,
|
||||
uint64_t bytes, BdrvRequestFlags read_flags,
|
||||
BdrvChild *src, int64_t src_offset,
|
||||
BdrvChild *dst, int64_t dst_offset,
|
||||
int64_t bytes, BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
@ -4630,7 +4631,7 @@ static coroutine_fn int qcow2_co_pwritev_compressed_task_entry(AioTask *task)
|
|||
*/
|
||||
static coroutine_fn int
|
||||
qcow2_co_pwritev_compressed_part(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
|
@ -5227,24 +5228,55 @@ static int qcow2_has_zero_init(BlockDriverState *bs)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the request to vmstate. On success return
|
||||
* qcow2_vm_state_offset(bs) + @pos
|
||||
*/
|
||||
static int64_t qcow2_check_vmstate_request(BlockDriverState *bs,
|
||||
QEMUIOVector *qiov, int64_t pos)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int64_t vmstate_offset = qcow2_vm_state_offset(s);
|
||||
int ret;
|
||||
|
||||
/* Incoming requests must be OK */
|
||||
bdrv_check_qiov_request(pos, qiov->size, qiov, 0, &error_abort);
|
||||
|
||||
if (INT64_MAX - pos < vmstate_offset) {
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
pos += vmstate_offset;
|
||||
ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
|
||||
int64_t pos)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos);
|
||||
if (offset < 0) {
|
||||
return offset;
|
||||
}
|
||||
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
|
||||
return bs->drv->bdrv_co_pwritev_part(bs, qcow2_vm_state_offset(s) + pos,
|
||||
qiov->size, qiov, 0, 0);
|
||||
return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0);
|
||||
}
|
||||
|
||||
static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
|
||||
int64_t pos)
|
||||
{
|
||||
BDRVQcow2State *s = bs->opaque;
|
||||
int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos);
|
||||
if (offset < 0) {
|
||||
return offset;
|
||||
}
|
||||
|
||||
BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
|
||||
return bs->drv->bdrv_co_preadv_part(bs, qcow2_vm_state_offset(s) + pos,
|
||||
qiov->size, qiov, 0, 0);
|
||||
return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -582,6 +582,7 @@ static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
|
|||
BDRVQEDState *s = bs->opaque;
|
||||
|
||||
bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
|
||||
bs->bl.max_pwrite_zeroes = QEMU_ALIGN_DOWN(INT_MAX, s->header.cluster_size);
|
||||
}
|
||||
|
||||
/* We have nothing to do for QED reopen, stubs just return
|
||||
|
@ -1397,7 +1398,7 @@ static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs,
|
|||
|
||||
static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset,
|
||||
int bytes,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVQEDState *s = bs->opaque;
|
||||
|
@ -1408,6 +1409,12 @@ static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
*/
|
||||
QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes);
|
||||
|
||||
/*
|
||||
* QED is not prepared for 63bit write-zero requests, so rely on
|
||||
* max_pwrite_zeroes.
|
||||
*/
|
||||
assert(bytes <= INT_MAX);
|
||||
|
||||
/* Fall back if the request is not aligned */
|
||||
if (qed_offset_into_cluster(s, offset) ||
|
||||
qed_offset_into_cluster(s, bytes)) {
|
||||
|
|
|
@ -663,8 +663,8 @@ static int read_fifo_child(QuorumAIOCB *acb)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int quorum_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||
static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVQuorumState *s = bs->opaque;
|
||||
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
|
||||
|
@ -714,8 +714,9 @@ static void write_quorum_entry(void *opaque)
|
|||
}
|
||||
}
|
||||
|
||||
static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov, int flags)
|
||||
static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVQuorumState *s = bs->opaque;
|
||||
QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags);
|
||||
|
@ -745,7 +746,7 @@ static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
|||
}
|
||||
|
||||
static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
||||
int bytes, BdrvRequestFlags flags)
|
||||
int64_t bytes, BdrvRequestFlags flags)
|
||||
|
||||
{
|
||||
return quorum_co_pwritev(bs, offset, bytes, NULL,
|
||||
|
|
|
@ -181,8 +181,8 @@ static void raw_reopen_abort(BDRVReopenState *state)
|
|||
}
|
||||
|
||||
/* Check and adjust the offset, against 'offset' and 'size' options. */
|
||||
static inline int raw_adjust_offset(BlockDriverState *bs, uint64_t *offset,
|
||||
uint64_t bytes, bool is_write)
|
||||
static inline int raw_adjust_offset(BlockDriverState *bs, int64_t *offset,
|
||||
int64_t bytes, bool is_write)
|
||||
{
|
||||
BDRVRawState *s = bs->opaque;
|
||||
|
||||
|
@ -201,9 +201,9 @@ static inline int raw_adjust_offset(BlockDriverState *bs, uint64_t *offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov,
|
||||
int flags)
|
||||
static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -216,9 +216,9 @@ static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset,
|
|||
return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
|
||||
}
|
||||
|
||||
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov,
|
||||
int flags)
|
||||
static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
void *buf = NULL;
|
||||
BlockDriver *drv;
|
||||
|
@ -289,12 +289,12 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes,
|
||||
int64_t offset, int64_t bytes,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = raw_adjust_offset(bs, (uint64_t *)&offset, bytes, true);
|
||||
ret = raw_adjust_offset(bs, &offset, bytes, true);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -302,11 +302,11 @@ static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn raw_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = raw_adjust_offset(bs, (uint64_t *)&offset, bytes, true);
|
||||
ret = raw_adjust_offset(bs, &offset, bytes, true);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
@ -532,10 +532,10 @@ static int raw_probe_geometry(BlockDriverState *bs, HDGeometry *geo)
|
|||
|
||||
static int coroutine_fn raw_co_copy_range_from(BlockDriverState *bs,
|
||||
BdrvChild *src,
|
||||
uint64_t src_offset,
|
||||
int64_t src_offset,
|
||||
BdrvChild *dst,
|
||||
uint64_t dst_offset,
|
||||
uint64_t bytes,
|
||||
int64_t dst_offset,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags)
|
||||
{
|
||||
|
@ -551,10 +551,10 @@ static int coroutine_fn raw_co_copy_range_from(BlockDriverState *bs,
|
|||
|
||||
static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs,
|
||||
BdrvChild *src,
|
||||
uint64_t src_offset,
|
||||
int64_t src_offset,
|
||||
BdrvChild *dst,
|
||||
uint64_t dst_offset,
|
||||
uint64_t bytes,
|
||||
int64_t dst_offset,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags)
|
||||
{
|
||||
|
|
20
block/rbd.c
20
block/rbd.c
|
@ -1164,17 +1164,17 @@ static int coroutine_fn qemu_rbd_start_co(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int
|
||||
coroutine_fn qemu_rbd_co_preadv(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov,
|
||||
int flags)
|
||||
coroutine_fn qemu_rbd_co_preadv(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return qemu_rbd_start_co(bs, offset, bytes, qiov, flags, RBD_AIO_READ);
|
||||
}
|
||||
|
||||
static int
|
||||
coroutine_fn qemu_rbd_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov,
|
||||
int flags)
|
||||
coroutine_fn qemu_rbd_co_pwritev(BlockDriverState *bs, int64_t offset,
|
||||
int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVRBDState *s = bs->opaque;
|
||||
/*
|
||||
|
@ -1197,17 +1197,17 @@ static int coroutine_fn qemu_rbd_co_flush(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static int coroutine_fn qemu_rbd_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int count)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
return qemu_rbd_start_co(bs, offset, count, NULL, 0, RBD_AIO_DISCARD);
|
||||
return qemu_rbd_start_co(bs, offset, bytes, NULL, 0, RBD_AIO_DISCARD);
|
||||
}
|
||||
|
||||
#ifdef LIBRBD_SUPPORTS_WRITE_ZEROES
|
||||
static int
|
||||
coroutine_fn qemu_rbd_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
||||
int count, BdrvRequestFlags flags)
|
||||
int64_t bytes, BdrvRequestFlags flags)
|
||||
{
|
||||
return qemu_rbd_start_co(bs, offset, count, NULL, flags,
|
||||
return qemu_rbd_start_co(bs, offset, bytes, NULL, flags,
|
||||
RBD_AIO_WRITE_ZEROES);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -112,8 +112,9 @@ static int64_t throttle_getlength(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static int coroutine_fn throttle_co_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
|
||||
ThrottleGroupMember *tgm = bs->opaque;
|
||||
|
@ -123,8 +124,9 @@ static int coroutine_fn throttle_co_preadv(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn throttle_co_pwritev(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
ThrottleGroupMember *tgm = bs->opaque;
|
||||
throttle_group_co_io_limits_intercept(tgm, bytes, true);
|
||||
|
@ -133,7 +135,7 @@ static int coroutine_fn throttle_co_pwritev(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn throttle_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset, int bytes,
|
||||
int64_t offset, int64_t bytes,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
ThrottleGroupMember *tgm = bs->opaque;
|
||||
|
@ -143,7 +145,7 @@ static int coroutine_fn throttle_co_pwrite_zeroes(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn throttle_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
ThrottleGroupMember *tgm = bs->opaque;
|
||||
throttle_group_co_io_limits_intercept(tgm, bytes, true);
|
||||
|
@ -152,8 +154,8 @@ static int coroutine_fn throttle_co_pdiscard(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn throttle_co_pwritev_compressed(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
uint64_t bytes,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
QEMUIOVector *qiov)
|
||||
{
|
||||
return throttle_co_pwritev(bs, offset, bytes, qiov,
|
||||
|
|
|
@ -75,13 +75,13 @@ luring_resubmit_short_read(void *s, void *luringcb, int nread) "LuringState %p l
|
|||
|
||||
# qcow2.c
|
||||
qcow2_add_task(void *co, void *bs, void *pool, const char *action, int cluster_type, uint64_t host_offset, uint64_t offset, uint64_t bytes, void *qiov, size_t qiov_offset) "co %p bs %p pool %p: %s: cluster_type %d file_cluster_offset %" PRIu64 " offset %" PRIu64 " bytes %" PRIu64 " qiov %p qiov_offset %zu"
|
||||
qcow2_writev_start_req(void *co, int64_t offset, int bytes) "co %p offset 0x%" PRIx64 " bytes %d"
|
||||
qcow2_writev_start_req(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64
|
||||
qcow2_writev_done_req(void *co, int ret) "co %p ret %d"
|
||||
qcow2_writev_start_part(void *co) "co %p"
|
||||
qcow2_writev_done_part(void *co, int cur_bytes) "co %p cur_bytes %d"
|
||||
qcow2_writev_data(void *co, uint64_t offset) "co %p offset 0x%" PRIx64
|
||||
qcow2_pwrite_zeroes_start_req(void *co, int64_t offset, int count) "co %p offset 0x%" PRIx64 " count %d"
|
||||
qcow2_pwrite_zeroes(void *co, int64_t offset, int count) "co %p offset 0x%" PRIx64 " count %d"
|
||||
qcow2_pwrite_zeroes_start_req(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64
|
||||
qcow2_pwrite_zeroes(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64
|
||||
qcow2_skip_cow(void *co, uint64_t offset, int nb_clusters) "co %p offset 0x%" PRIx64 " nb_clusters %d"
|
||||
|
||||
# qcow2-cluster.c
|
||||
|
@ -152,8 +152,8 @@ nvme_write_zeroes(void *s, uint64_t offset, uint64_t bytes, int flags) "s %p off
|
|||
nvme_qiov_unaligned(const void *qiov, int n, void *base, size_t size, int align) "qiov %p n %d base %p size 0x%zx align 0x%x"
|
||||
nvme_prw_buffered(void *s, uint64_t offset, uint64_t bytes, int niov, int is_write) "s %p offset 0x%"PRIx64" bytes %"PRId64" niov %d is_write %d"
|
||||
nvme_rw_done(void *s, int is_write, uint64_t offset, uint64_t bytes, int ret) "s %p is_write %d offset 0x%"PRIx64" bytes %"PRId64" ret %d"
|
||||
nvme_dsm(void *s, uint64_t offset, uint64_t bytes) "s %p offset 0x%"PRIx64" bytes %"PRId64""
|
||||
nvme_dsm_done(void *s, uint64_t offset, uint64_t bytes, int ret) "s %p offset 0x%"PRIx64" bytes %"PRId64" ret %d"
|
||||
nvme_dsm(void *s, int64_t offset, int64_t bytes) "s %p offset 0x%"PRIx64" bytes %"PRId64""
|
||||
nvme_dsm_done(void *s, int64_t offset, int64_t bytes, int ret) "s %p offset 0x%"PRIx64" bytes %"PRId64" ret %d"
|
||||
nvme_dma_map_flush(void *s) "s %p"
|
||||
nvme_free_req_queue_wait(void *s, unsigned q_index) "s %p q #%u"
|
||||
nvme_create_queue_pair(unsigned q_index, void *q, size_t size, void *aio_context, int fd) "index %u q %p size %zu aioctx %p fd %d"
|
||||
|
|
|
@ -544,8 +544,8 @@ static int coroutine_fn vdi_co_block_status(BlockDriverState *bs,
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
vdi_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVVdiState *s = bs->opaque;
|
||||
QEMUIOVector local_qiov;
|
||||
|
@ -600,8 +600,8 @@ vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
vdi_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVVdiState *s = bs->opaque;
|
||||
QEMUIOVector local_qiov;
|
||||
|
|
14
block/vmdk.c
14
block/vmdk.c
|
@ -1888,8 +1888,8 @@ static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
vmdk_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
vmdk_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
int ret;
|
||||
|
@ -2068,8 +2068,8 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset,
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
vmdk_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
vmdk_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
BDRVVmdkState *s = bs->opaque;
|
||||
|
@ -2080,8 +2080,8 @@ vmdk_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
|
||||
uint64_t bytes, QEMUIOVector *qiov)
|
||||
vmdk_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov)
|
||||
{
|
||||
if (bytes == 0) {
|
||||
/* The caller will write bytes 0 to signal EOF.
|
||||
|
@ -2109,7 +2109,7 @@ vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
|
|||
|
||||
static int coroutine_fn vmdk_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
int64_t offset,
|
||||
int bytes,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
|
|
|
@ -608,8 +608,8 @@ static int vpc_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
vpc_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
vpc_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVVPCState *s = bs->opaque;
|
||||
int ret;
|
||||
|
@ -658,8 +658,8 @@ fail:
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
vpc_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
vpc_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVVPCState *s = bs->opaque;
|
||||
int64_t image_offset;
|
||||
|
|
|
@ -1522,8 +1522,8 @@ static int vvfat_read(BlockDriverState *bs, int64_t sector_num,
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
vvfat_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
vvfat_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
BDRVVVFATState *s = bs->opaque;
|
||||
|
@ -3061,8 +3061,8 @@ DLOG(checkpoint());
|
|||
}
|
||||
|
||||
static int coroutine_fn
|
||||
vvfat_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
vvfat_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, BdrvRequestFlags flags)
|
||||
{
|
||||
int ret;
|
||||
BDRVVVFATState *s = bs->opaque;
|
||||
|
|
|
@ -99,8 +99,10 @@ driver options if ``--image-opts`` is specified.
|
|||
|
||||
.. option:: --cache=CACHE
|
||||
|
||||
The cache mode to be used with the file. See the documentation of
|
||||
the emulator's ``-drive cache=...`` option for allowed values.
|
||||
The cache mode to be used with the file. Valid values are:
|
||||
``none``, ``writeback`` (the default), ``writethrough``,
|
||||
``directsync`` and ``unsafe``. See the documentation of
|
||||
the emulator's ``-drive cache=...`` option for more info.
|
||||
|
||||
.. option:: -n, --nocache
|
||||
|
||||
|
|
|
@ -94,6 +94,9 @@ typedef struct BdrvTrackedRequest {
|
|||
struct BdrvTrackedRequest *waiting_for;
|
||||
} BdrvTrackedRequest;
|
||||
|
||||
int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset,
|
||||
Error **errp);
|
||||
int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp);
|
||||
|
||||
struct BlockDriver {
|
||||
|
@ -232,11 +235,11 @@ struct BlockDriver {
|
|||
|
||||
/* aio */
|
||||
BlockAIOCB *(*bdrv_aio_preadv)(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque);
|
||||
BlockAIOCB *(*bdrv_aio_pwritev)(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque);
|
||||
BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
|
||||
BlockCompletionFunc *cb, void *opaque);
|
||||
BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs,
|
||||
|
@ -262,10 +265,11 @@ struct BlockDriver {
|
|||
* The buffer in @qiov may point directly to guest memory.
|
||||
*/
|
||||
int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags);
|
||||
int coroutine_fn (*bdrv_co_preadv_part)(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset, int flags);
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags);
|
||||
int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
|
||||
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags);
|
||||
/**
|
||||
|
@ -284,10 +288,11 @@ struct BlockDriver {
|
|||
* The buffer in @qiov may point directly to guest memory.
|
||||
*/
|
||||
int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags);
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags);
|
||||
int coroutine_fn (*bdrv_co_pwritev_part)(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, size_t qiov_offset, int flags);
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
|
||||
BdrvRequestFlags flags);
|
||||
|
||||
/*
|
||||
* Efficiently zero a region of the disk image. Typically an image format
|
||||
|
@ -296,9 +301,9 @@ struct BlockDriver {
|
|||
* will be called instead.
|
||||
*/
|
||||
int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs,
|
||||
int64_t offset, int bytes, BdrvRequestFlags flags);
|
||||
int64_t offset, int64_t bytes, BdrvRequestFlags flags);
|
||||
int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs,
|
||||
int64_t offset, int bytes);
|
||||
int64_t offset, int64_t bytes);
|
||||
|
||||
/* Map [offset, offset + nbytes) range onto a child of @bs to copy from,
|
||||
* and invoke bdrv_co_copy_range_from(child, ...), or invoke
|
||||
|
@ -309,10 +314,10 @@ struct BlockDriver {
|
|||
*/
|
||||
int coroutine_fn (*bdrv_co_copy_range_from)(BlockDriverState *bs,
|
||||
BdrvChild *src,
|
||||
uint64_t offset,
|
||||
int64_t offset,
|
||||
BdrvChild *dst,
|
||||
uint64_t dst_offset,
|
||||
uint64_t bytes,
|
||||
int64_t dst_offset,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags);
|
||||
|
||||
|
@ -326,10 +331,10 @@ struct BlockDriver {
|
|||
*/
|
||||
int coroutine_fn (*bdrv_co_copy_range_to)(BlockDriverState *bs,
|
||||
BdrvChild *src,
|
||||
uint64_t src_offset,
|
||||
int64_t src_offset,
|
||||
BdrvChild *dst,
|
||||
uint64_t dst_offset,
|
||||
uint64_t bytes,
|
||||
int64_t dst_offset,
|
||||
int64_t bytes,
|
||||
BdrvRequestFlags read_flags,
|
||||
BdrvRequestFlags write_flags);
|
||||
|
||||
|
@ -434,10 +439,9 @@ struct BlockDriver {
|
|||
Error **errp);
|
||||
|
||||
int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov);
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov);
|
||||
int coroutine_fn (*bdrv_co_pwritev_compressed_part)(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
|
||||
size_t qiov_offset);
|
||||
int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset);
|
||||
|
||||
int (*bdrv_snapshot_create)(BlockDriverState *bs,
|
||||
QEMUSnapshotInfo *sn_info);
|
||||
|
@ -670,11 +674,12 @@ typedef struct BlockLimits {
|
|||
* otherwise. */
|
||||
uint32_t request_alignment;
|
||||
|
||||
/* Maximum number of bytes that can be discarded at once (since it
|
||||
* is signed, it must be < 2G, if set). Must be multiple of
|
||||
* pdiscard_alignment, but need not be power of 2. May be 0 if no
|
||||
* inherent 32-bit limit */
|
||||
int32_t max_pdiscard;
|
||||
/*
|
||||
* Maximum number of bytes that can be discarded at once. Must be multiple
|
||||
* of pdiscard_alignment, but need not be power of 2. May be 0 if no
|
||||
* inherent 64-bit limit.
|
||||
*/
|
||||
int64_t max_pdiscard;
|
||||
|
||||
/* Optimal alignment for discard requests in bytes. A power of 2
|
||||
* is best but not mandatory. Must be a multiple of
|
||||
|
@ -682,10 +687,11 @@ typedef struct BlockLimits {
|
|||
* that is set. May be 0 if bl.request_alignment is good enough */
|
||||
uint32_t pdiscard_alignment;
|
||||
|
||||
/* Maximum number of bytes that can zeroized at once (since it is
|
||||
* signed, it must be < 2G, if set). Must be multiple of
|
||||
* pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */
|
||||
int32_t max_pwrite_zeroes;
|
||||
/*
|
||||
* Maximum number of bytes that can zeroized at once. Must be multiple of
|
||||
* pwrite_zeroes_alignment. 0 means no limit.
|
||||
*/
|
||||
int64_t max_pwrite_zeroes;
|
||||
|
||||
/* Optimal alignment for write zeroes requests in bytes. A power
|
||||
* of 2 is best but not mandatory. Must be a multiple of
|
||||
|
|
|
@ -318,6 +318,7 @@ nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info,
|
|||
}
|
||||
|
||||
if (!blocking) {
|
||||
error_setg(errp, "No connection at the moment");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1434,9 +1434,7 @@ nbd_read_eof(BlockDriverState *bs, QIOChannel *ioc, void *buffer, size_t size,
|
|||
|
||||
len = qio_channel_readv(ioc, &iov, 1, errp);
|
||||
if (len == QIO_CHANNEL_ERR_BLOCK) {
|
||||
bdrv_dec_in_flight(bs);
|
||||
qio_channel_yield(ioc, G_IO_IN);
|
||||
bdrv_inc_in_flight(bs);
|
||||
continue;
|
||||
} else if (len < 0) {
|
||||
return -EIO;
|
||||
|
|
|
@ -980,7 +980,7 @@ static int nbd_negotiate_meta_queries(NBDClient *client,
|
|||
size_t i;
|
||||
size_t count = 0;
|
||||
|
||||
if (!client->structured_reply) {
|
||||
if (client->opt == NBD_OPT_SET_META_CONTEXT && !client->structured_reply) {
|
||||
return nbd_opt_invalid(client, errp,
|
||||
"request option '%s' when structured reply "
|
||||
"is not negotiated",
|
||||
|
|
|
@ -135,7 +135,9 @@ static void usage(const char *name)
|
|||
" 'snapshot.id=[ID],snapshot.name=[NAME]', or\n"
|
||||
" '[ID_OR_NAME]'\n"
|
||||
" -n, --nocache disable host cache\n"
|
||||
" --cache=MODE set cache mode (none, writeback, ...)\n"
|
||||
" --cache=MODE set cache mode used to access the disk image, the\n"
|
||||
" valid options are: 'none', 'writeback' (default),\n"
|
||||
" 'writethrough', 'directsync' and 'unsafe'\n"
|
||||
" --aio=MODE set AIO mode (native, io_uring or threads)\n"
|
||||
" --discard=MODE set discard mode (ignore, unmap)\n"
|
||||
" --detect-zeroes=MODE set detect-zeroes mode (off, on, unmap)\n"
|
||||
|
@ -552,7 +554,7 @@ int main(int argc, char **argv)
|
|||
bool alloc_depth = false;
|
||||
const char *tlscredsid = NULL;
|
||||
bool imageOpts = false;
|
||||
bool writethrough = true;
|
||||
bool writethrough = false; /* Client will flush as needed. */
|
||||
bool fork_process = false;
|
||||
bool list = false;
|
||||
int old_stderr = -1;
|
||||
|
|
|
@ -65,8 +65,9 @@ static void co_reenter_bh(void *opaque)
|
|||
}
|
||||
|
||||
static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVTestState *s = bs->opaque;
|
||||
|
||||
|
@ -1106,8 +1107,9 @@ static void bdrv_test_top_close(BlockDriverState *bs)
|
|||
}
|
||||
|
||||
static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVTestTopState *tts = bs->opaque;
|
||||
return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags);
|
||||
|
@ -1855,10 +1857,10 @@ static void bdrv_replace_test_close(BlockDriverState *bs)
|
|||
* Set .has_read to true and return success.
|
||||
*/
|
||||
static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs,
|
||||
uint64_t offset,
|
||||
uint64_t bytes,
|
||||
int64_t offset,
|
||||
int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
int flags)
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
BDRVReplaceTestState *s = bs->opaque;
|
||||
|
||||
|
|
|
@ -31,15 +31,24 @@
|
|||
#include "qemu/main-loop.h"
|
||||
#include "iothread.h"
|
||||
|
||||
static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs,
|
||||
uint64_t offset, uint64_t bytes,
|
||||
QEMUIOVector *qiov, int flags)
|
||||
static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs,
|
||||
int64_t offset, int64_t bytes,
|
||||
QEMUIOVector *qiov,
|
||||
BdrvRequestFlags flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs,
|
||||
int64_t offset, int bytes)
|
||||
int64_t offset, int64_t bytes)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -66,8 +75,8 @@ static BlockDriver bdrv_test = {
|
|||
.format_name = "test",
|
||||
.instance_size = 1,
|
||||
|
||||
.bdrv_co_preadv = bdrv_test_co_prwv,
|
||||
.bdrv_co_pwritev = bdrv_test_co_prwv,
|
||||
.bdrv_co_preadv = bdrv_test_co_preadv,
|
||||
.bdrv_co_pwritev = bdrv_test_co_pwritev,
|
||||
.bdrv_co_pdiscard = bdrv_test_co_pdiscard,
|
||||
.bdrv_co_truncate = bdrv_test_co_truncate,
|
||||
.bdrv_co_block_status = bdrv_test_co_block_status,
|
||||
|
|
Loading…
Reference in a new issue