block: Mark block_job_add_bdrv() GRAPH_WRLOCK

Instead of taking the writer lock internally, require callers to already
hold it when calling block_job_add_bdrv(). These callers will typically
already hold the graph lock once the locking work is completed, which
means that they can't call functions that take it internally.

Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20231027155333.420094-6-kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Kevin Wolf 2023-10-27 17:53:14 +02:00
parent 03b9eaca54
commit f3bbc53dc5
8 changed files with 45 additions and 15 deletions

View file

@ -374,7 +374,6 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
assert(bs); assert(bs);
assert(target); assert(target);
GLOBAL_STATE_CODE(); GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
/* QMP interface protects us from these cases */ /* QMP interface protects us from these cases */
assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL); assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL);
@ -385,31 +384,33 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
return NULL; return NULL;
} }
bdrv_graph_rdlock_main_loop();
if (!bdrv_is_inserted(bs)) { if (!bdrv_is_inserted(bs)) {
error_setg(errp, "Device is not inserted: %s", error_setg(errp, "Device is not inserted: %s",
bdrv_get_device_name(bs)); bdrv_get_device_name(bs));
return NULL; goto error_rdlock;
} }
if (!bdrv_is_inserted(target)) { if (!bdrv_is_inserted(target)) {
error_setg(errp, "Device is not inserted: %s", error_setg(errp, "Device is not inserted: %s",
bdrv_get_device_name(target)); bdrv_get_device_name(target));
return NULL; goto error_rdlock;
} }
if (compress && !bdrv_supports_compressed_writes(target)) { if (compress && !bdrv_supports_compressed_writes(target)) {
error_setg(errp, "Compression is not supported for this drive %s", error_setg(errp, "Compression is not supported for this drive %s",
bdrv_get_device_name(target)); bdrv_get_device_name(target));
return NULL; goto error_rdlock;
} }
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
return NULL; goto error_rdlock;
} }
if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) { if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
return NULL; goto error_rdlock;
} }
bdrv_graph_rdunlock_main_loop();
if (perf->max_workers < 1 || perf->max_workers > INT_MAX) { if (perf->max_workers < 1 || perf->max_workers > INT_MAX) {
error_setg(errp, "max-workers must be between 1 and %d", INT_MAX); error_setg(errp, "max-workers must be between 1 and %d", INT_MAX);
@ -437,6 +438,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
len = bdrv_getlength(bs); len = bdrv_getlength(bs);
if (len < 0) { if (len < 0) {
GRAPH_RDLOCK_GUARD_MAINLOOP();
error_setg_errno(errp, -len, "Unable to get length for '%s'", error_setg_errno(errp, -len, "Unable to get length for '%s'",
bdrv_get_device_or_node_name(bs)); bdrv_get_device_or_node_name(bs));
goto error; goto error;
@ -444,6 +446,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
target_len = bdrv_getlength(target); target_len = bdrv_getlength(target);
if (target_len < 0) { if (target_len < 0) {
GRAPH_RDLOCK_GUARD_MAINLOOP();
error_setg_errno(errp, -target_len, "Unable to get length for '%s'", error_setg_errno(errp, -target_len, "Unable to get length for '%s'",
bdrv_get_device_or_node_name(bs)); bdrv_get_device_or_node_name(bs));
goto error; goto error;
@ -493,8 +496,10 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
block_copy_set_speed(bcs, speed); block_copy_set_speed(bcs, speed);
/* Required permissions are taken by copy-before-write filter target */ /* Required permissions are taken by copy-before-write filter target */
bdrv_graph_wrlock(target);
block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
&error_abort); &error_abort);
bdrv_graph_wrunlock();
return &job->common; return &job->common;
@ -507,4 +512,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
} }
return NULL; return NULL;
error_rdlock:
bdrv_graph_rdunlock_main_loop();
return NULL;
} }

View file

@ -342,6 +342,7 @@ void commit_start(const char *job_id, BlockDriverState *bs,
*/ */
iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE; iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
bdrv_graph_wrlock(top);
for (iter = top; iter != base; iter = bdrv_filter_or_cow_bs(iter)) { for (iter = top; iter != base; iter = bdrv_filter_or_cow_bs(iter)) {
if (iter == filtered_base) { if (iter == filtered_base) {
/* /*
@ -354,16 +355,20 @@ void commit_start(const char *job_id, BlockDriverState *bs,
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
iter_shared_perms, errp); iter_shared_perms, errp);
if (ret < 0) { if (ret < 0) {
bdrv_graph_wrunlock();
goto fail; goto fail;
} }
} }
if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) { if (bdrv_freeze_backing_chain(commit_top_bs, base, errp) < 0) {
bdrv_graph_wrunlock();
goto fail; goto fail;
} }
s->chain_frozen = true; s->chain_frozen = true;
ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp); ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
bdrv_graph_wrunlock();
if (ret < 0) { if (ret < 0) {
goto fail; goto fail;
} }

View file

@ -1888,11 +1888,13 @@ static BlockJob *mirror_start_job(
*/ */
bdrv_disable_dirty_bitmap(s->dirty_bitmap); bdrv_disable_dirty_bitmap(s->dirty_bitmap);
bdrv_graph_wrlock(bs);
ret = block_job_add_bdrv(&s->common, "source", bs, 0, ret = block_job_add_bdrv(&s->common, "source", bs, 0,
BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
BLK_PERM_CONSISTENT_READ, BLK_PERM_CONSISTENT_READ,
errp); errp);
if (ret < 0) { if (ret < 0) {
bdrv_graph_wrunlock();
goto fail; goto fail;
} }
@ -1937,14 +1939,17 @@ static BlockJob *mirror_start_job(
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
iter_shared_perms, errp); iter_shared_perms, errp);
if (ret < 0) { if (ret < 0) {
bdrv_graph_wrunlock();
goto fail; goto fail;
} }
} }
if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
bdrv_graph_wrunlock();
goto fail; goto fail;
} }
} }
bdrv_graph_wrunlock();
QTAILQ_INIT(&s->ops_in_flight); QTAILQ_INIT(&s->ops_in_flight);

View file

@ -352,8 +352,10 @@ void stream_start(const char *job_id, BlockDriverState *bs,
* already have our own plans. Also don't allow resize as the image size is * already have our own plans. Also don't allow resize as the image size is
* queried only at the job start and then cached. * queried only at the job start and then cached.
*/ */
bdrv_graph_wrlock(bs);
if (block_job_add_bdrv(&s->common, "active node", bs, 0, if (block_job_add_bdrv(&s->common, "active node", bs, 0,
basic_flags | BLK_PERM_WRITE, errp)) { basic_flags | BLK_PERM_WRITE, errp)) {
bdrv_graph_wrunlock();
goto fail; goto fail;
} }
@ -373,9 +375,11 @@ void stream_start(const char *job_id, BlockDriverState *bs,
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
basic_flags, errp); basic_flags, errp);
if (ret < 0) { if (ret < 0) {
bdrv_graph_wrunlock();
goto fail; goto fail;
} }
} }
bdrv_graph_wrunlock();
s->base_overlay = base_overlay; s->base_overlay = base_overlay;
s->above_base = above_base; s->above_base = above_base;

View file

@ -248,10 +248,8 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
} }
aio_context_acquire(ctx); aio_context_acquire(ctx);
} }
bdrv_graph_wrlock(bs);
c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job, c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
errp); errp);
bdrv_graph_wrunlock();
if (need_context_ops) { if (need_context_ops) {
aio_context_release(ctx); aio_context_release(ctx);
if (job->job.aio_context != qemu_get_aio_context()) { if (job->job.aio_context != qemu_get_aio_context()) {
@ -515,7 +513,8 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
BlockJob *job; BlockJob *job;
int ret; int ret;
GLOBAL_STATE_CODE(); GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
bdrv_graph_wrlock(bs);
if (job_id == NULL && !(flags & JOB_INTERNAL)) { if (job_id == NULL && !(flags & JOB_INTERNAL)) {
job_id = bdrv_get_device_name(bs); job_id = bdrv_get_device_name(bs);
@ -524,6 +523,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs), job = job_create(job_id, &driver->job_driver, txn, bdrv_get_aio_context(bs),
flags, cb, opaque, errp); flags, cb, opaque, errp);
if (job == NULL) { if (job == NULL) {
bdrv_graph_wrunlock();
return NULL; return NULL;
} }
@ -563,9 +563,11 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
goto fail; goto fail;
} }
bdrv_graph_wrunlock();
return job; return job;
fail: fail:
bdrv_graph_wrunlock();
job_early_fail(&job->job); job_early_fail(&job->job);
return NULL; return NULL;
} }

View file

@ -138,8 +138,9 @@ BlockJob *block_job_get_locked(const char *id);
* @job. This means that all operations will be blocked on @bs while * @job. This means that all operations will be blocked on @bs while
* @job exists. * @job exists.
*/ */
int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs, int GRAPH_WRLOCK
uint64_t perm, uint64_t shared_perm, Error **errp); block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
uint64_t perm, uint64_t shared_perm, Error **errp);
/** /**
* block_job_remove_all_bdrv: * block_job_remove_all_bdrv:

View file

@ -111,10 +111,11 @@ struct BlockJobDriver {
* This function is not part of the public job interface; it should be * This function is not part of the public job interface; it should be
* called from a wrapper that is specific to the job type. * called from a wrapper that is specific to the job type.
*/ */
void *block_job_create(const char *job_id, const BlockJobDriver *driver, void * GRAPH_UNLOCKED
JobTxn *txn, BlockDriverState *bs, uint64_t perm, block_job_create(const char *job_id, const BlockJobDriver *driver,
uint64_t shared_perm, int64_t speed, int flags, JobTxn *txn, BlockDriverState *bs, uint64_t perm,
BlockCompletionFunc *cb, void *opaque, Error **errp); uint64_t shared_perm, int64_t speed, int flags,
BlockCompletionFunc *cb, void *opaque, Error **errp);
/** /**
* block_job_free: * block_job_free:

View file

@ -794,7 +794,10 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type,
0, 0, NULL, NULL, &error_abort); 0, 0, NULL, NULL, &error_abort);
tjob->bs = src; tjob->bs = src;
job = &tjob->common; job = &tjob->common;
bdrv_graph_wrlock(target);
block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort); block_job_add_bdrv(job, "target", target, 0, BLK_PERM_ALL, &error_abort);
bdrv_graph_wrunlock();
switch (result) { switch (result) {
case TEST_JOB_SUCCESS: case TEST_JOB_SUCCESS: