mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
stream: rework backing-file changing
Stream in stream_prepare calls bdrv_change_backing_file() to change backing-file in the metadata of bs. It may use either backing-file parameter given by user or just take filename of base on job start. Backing file format is determined by base on job finish. There are some problems with this design, we solve only two by this patch: 1. Consider scenario with backing-file unset. Current concept of stream supports changing of the base during the job (we don't freeze link to the base). So, we should not save base filename at job start, - let's determine name of the base on job finish. 2. Using direct base to determine filename and format is not very good: base node may be a filter, so its filename may be JSON, and format_name is not good for storing into qcow2 metadata as backing file format. - let's use unfiltered_base Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> [vsementsov: change commit subject, change logic in stream_prepare] Message-Id: <20201216061703.70908-10-vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
This commit is contained in:
parent
e275458b29
commit
000e5a1cda
2 changed files with 6 additions and 11 deletions
|
@ -65,6 +65,7 @@ static int stream_prepare(Job *job)
|
|||
BlockDriverState *bs = blk_bs(bjob->blk);
|
||||
BlockDriverState *unfiltered_bs = bdrv_skip_filters(bs);
|
||||
BlockDriverState *base = bdrv_filter_or_cow_bs(s->above_base);
|
||||
BlockDriverState *unfiltered_base = bdrv_skip_filters(base);
|
||||
Error *local_err = NULL;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -73,10 +74,10 @@ static int stream_prepare(Job *job)
|
|||
|
||||
if (bdrv_cow_child(unfiltered_bs)) {
|
||||
const char *base_id = NULL, *base_fmt = NULL;
|
||||
if (base) {
|
||||
base_id = s->backing_file_str;
|
||||
if (base->drv) {
|
||||
base_fmt = base->drv->format_name;
|
||||
if (unfiltered_base) {
|
||||
base_id = s->backing_file_str ?: unfiltered_base->filename;
|
||||
if (unfiltered_base->drv) {
|
||||
base_fmt = unfiltered_base->drv->format_name;
|
||||
}
|
||||
}
|
||||
bdrv_set_backing_hd(unfiltered_bs, base, &local_err);
|
||||
|
|
|
@ -2511,7 +2511,6 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
|||
BlockDriverState *base_bs = NULL;
|
||||
AioContext *aio_context;
|
||||
Error *local_err = NULL;
|
||||
const char *base_name = NULL;
|
||||
int job_flags = JOB_DEFAULT;
|
||||
|
||||
if (!has_on_error) {
|
||||
|
@ -2539,7 +2538,6 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
|||
goto out;
|
||||
}
|
||||
assert(bdrv_get_aio_context(base_bs) == aio_context);
|
||||
base_name = base;
|
||||
}
|
||||
|
||||
if (has_base_node) {
|
||||
|
@ -2554,7 +2552,6 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
|||
}
|
||||
assert(bdrv_get_aio_context(base_bs) == aio_context);
|
||||
bdrv_refresh_filename(base_bs);
|
||||
base_name = base_bs->filename;
|
||||
}
|
||||
|
||||
/* Check for op blockers in the whole chain between bs and base */
|
||||
|
@ -2574,9 +2571,6 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* backing_file string overrides base bs filename */
|
||||
base_name = has_backing_file ? backing_file : base_name;
|
||||
|
||||
if (has_auto_finalize && !auto_finalize) {
|
||||
job_flags |= JOB_MANUAL_FINALIZE;
|
||||
}
|
||||
|
@ -2584,7 +2578,7 @@ void qmp_block_stream(bool has_job_id, const char *job_id, const char *device,
|
|||
job_flags |= JOB_MANUAL_DISMISS;
|
||||
}
|
||||
|
||||
stream_start(has_job_id ? job_id : NULL, bs, base_bs, base_name,
|
||||
stream_start(has_job_id ? job_id : NULL, bs, base_bs, backing_file,
|
||||
job_flags, has_speed ? speed : 0, on_error,
|
||||
filter_node_name, &local_err);
|
||||
if (local_err) {
|
||||
|
|
Loading…
Reference in a new issue