mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
block-copy: refactor copy_range handling
Currently we update s->use_copy_range and s->copy_size in block_copy_do_copy(). It's not very good: 1. block_copy_do_copy() is intended to be a simple function, that wraps bdrv_co_<io> functions for need of block copy. That's why we don't pass BlockCopyTask into it. So, block_copy_do_copy() is bad place for manipulation with generic state of block-copy process 2. We are going to make block-copy thread-safe. So, it's good to move manipulation with state of block-copy to the places where we'll need critical sections anyway, to not introduce extra synchronization primitives in block_copy_do_copy(). Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20210528141628.44287-3-vsementsov@virtuozzo.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
parent
8146b357d0
commit
bed9523471
1 changed files with 49 additions and 23 deletions
|
@ -65,6 +65,7 @@ typedef struct BlockCopyTask {
|
|||
int64_t offset;
|
||||
int64_t bytes;
|
||||
bool zeroes;
|
||||
bool copy_range;
|
||||
QLIST_ENTRY(BlockCopyTask) list;
|
||||
CoQueue wait_queue; /* coroutines blocked on this task */
|
||||
} BlockCopyTask;
|
||||
|
@ -183,6 +184,7 @@ static BlockCopyTask *block_copy_task_create(BlockCopyState *s,
|
|||
.call_state = call_state,
|
||||
.offset = offset,
|
||||
.bytes = bytes,
|
||||
.copy_range = s->use_copy_range,
|
||||
};
|
||||
qemu_co_queue_init(&task->wait_queue);
|
||||
QLIST_INSERT_HEAD(&s->tasks, task, list);
|
||||
|
@ -342,11 +344,18 @@ static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
|
|||
*
|
||||
* No sync here: nor bitmap neighter intersecting requests handling, only copy.
|
||||
*
|
||||
* @copy_range is an in-out argument: if *copy_range is false, copy_range is not
|
||||
* done. If *copy_range is true, copy_range is attempted. If the copy_range
|
||||
* attempt fails, the function falls back to the usual read+write and
|
||||
* *copy_range is set to false. *copy_range and zeroes must not be true
|
||||
* simultaneously.
|
||||
*
|
||||
* Returns 0 on success.
|
||||
*/
|
||||
static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
|
||||
int64_t offset, int64_t bytes,
|
||||
bool zeroes, bool *error_is_read)
|
||||
bool zeroes, bool *copy_range,
|
||||
bool *error_is_read)
|
||||
{
|
||||
int ret;
|
||||
int64_t nbytes = MIN(offset + bytes, s->len) - offset;
|
||||
|
@ -359,6 +368,7 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
|
|||
assert(offset + bytes <= s->len ||
|
||||
offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
|
||||
assert(nbytes < INT_MAX);
|
||||
assert(!(*copy_range && zeroes));
|
||||
|
||||
if (zeroes) {
|
||||
ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
|
||||
|
@ -370,32 +380,15 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (s->use_copy_range) {
|
||||
if (*copy_range) {
|
||||
ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
|
||||
0, s->write_flags);
|
||||
if (ret < 0) {
|
||||
trace_block_copy_copy_range_fail(s, offset, ret);
|
||||
s->use_copy_range = false;
|
||||
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
|
||||
*copy_range = false;
|
||||
/* Fallback to read+write with allocated buffer */
|
||||
} else {
|
||||
if (s->use_copy_range) {
|
||||
/*
|
||||
* Successful copy-range. Now increase copy_size. copy_range
|
||||
* does not respect max_transfer (it's a TODO), so we factor
|
||||
* that in here.
|
||||
*
|
||||
* Note: we double-check s->use_copy_range for the case when
|
||||
* parallel block-copy request unsets it during previous
|
||||
* bdrv_co_copy_range call.
|
||||
*/
|
||||
s->copy_size =
|
||||
MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
|
||||
QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
|
||||
s->target),
|
||||
s->cluster_size));
|
||||
}
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -431,14 +424,44 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void block_copy_handle_copy_range_result(BlockCopyState *s,
|
||||
bool is_success)
|
||||
{
|
||||
if (!s->use_copy_range) {
|
||||
/* already disabled */
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_success) {
|
||||
/*
|
||||
* Successful copy-range. Now increase copy_size. copy_range
|
||||
* does not respect max_transfer (it's a TODO), so we factor
|
||||
* that in here.
|
||||
*/
|
||||
s->copy_size =
|
||||
MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
|
||||
QEMU_ALIGN_DOWN(block_copy_max_transfer(s->source,
|
||||
s->target),
|
||||
s->cluster_size));
|
||||
} else {
|
||||
/* Copy-range failed, disable it. */
|
||||
s->use_copy_range = false;
|
||||
s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
|
||||
}
|
||||
}
|
||||
|
||||
static coroutine_fn int block_copy_task_entry(AioTask *task)
|
||||
{
|
||||
BlockCopyTask *t = container_of(task, BlockCopyTask, task);
|
||||
bool error_is_read = false;
|
||||
bool copy_range = t->copy_range;
|
||||
int ret;
|
||||
|
||||
ret = block_copy_do_copy(t->s, t->offset, t->bytes, t->zeroes,
|
||||
&error_is_read);
|
||||
©_range, &error_is_read);
|
||||
if (t->copy_range) {
|
||||
block_copy_handle_copy_range_result(t->s, copy_range);
|
||||
}
|
||||
if (ret < 0) {
|
||||
if (!t->call_state->ret) {
|
||||
t->call_state->ret = ret;
|
||||
|
@ -619,7 +642,10 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
|
|||
g_free(task);
|
||||
continue;
|
||||
}
|
||||
task->zeroes = ret & BDRV_BLOCK_ZERO;
|
||||
if (ret & BDRV_BLOCK_ZERO) {
|
||||
task->zeroes = true;
|
||||
task->copy_range = false;
|
||||
}
|
||||
|
||||
if (s->speed) {
|
||||
if (!call_state->ignore_ratelimit) {
|
||||
|
|
Loading…
Reference in a new issue