Block layer patches

- Clean up coroutine versions of bdrv_{is_allocated,block_status}*
 - Graph locking part 5 (protect children/parent links)
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmUoHL8RHGt3b2xmQHJl
 ZGhhdC5jb20ACgkQfwmycsiPL9b4uRAAjryVAaA5jXZ3mdGB80nhGtARZlIaIVO/
 tlXk065q2Cj+98f+fBPCPWvmEz28vJwBhJUsFwpHzLZrxecBpwZp0MPAkFBNkouq
 +AiO9xyTAqccEp/dnIys4Bun9Rp0Jq9lk9y29zzEmQuK5uCB56lpx2cDn/JkzSQt
 ZFtnxxTwi3MDTNvXATub8Ia/1suui0zvESS7J/NBxQNI3cFaQszp1vMwlRIoPiWo
 15YZFPZZQ2pvu6/1nL1Vl9OLbPAVcEGJpjHZv0XhudYOwRiDvjYnwfPL7BuwYEsU
 Dos4mZZd/KMU695s7OzlVYi1q4ATKUTUxyyylVhXZrFBXSE5ntnfoHTKHEruTyPb
 G31h5mribSTWjdvY5HewHbSSPjByAWsSQg9yzcHybhORiqGQCpcGQ8zuW7oNKMPV
 JicWdoRVY4U4hR0nRdDxz9zdpQ8QYok/ginBxFaOzrCfClUB7ZOBxwRMclIghuRH
 FV+ZJk0ylVOz2tbfNxUa3KhUgTPd8jgCHFI7xak5EBRtTJiJjE03Xag1Fdxy5/D5
 tRsBBW4sOFygAhjN/xyeaRv9L8rAv3x/akriFjPUbOMLkPcJpe/DTWsP8+5LaZF8
 GkQvjsg5UvmfcJ3LFtecXxfYH4UWhDmyAjF+BswiRqafDDi2CCUmdwDnzEPbwuWO
 x1y7cgxe9SE=
 =4d/s
 -----END PGP SIGNATURE-----

Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging

Block layer patches

- Clean up coroutine versions of bdrv_{is_allocated,block_status}*
- Graph locking part 5 (protect children/parent links)

# -----BEGIN PGP SIGNATURE-----
#
# iQJFBAABCAAvFiEE3D3rFZqa+V09dFb+fwmycsiPL9YFAmUoHL8RHGt3b2xmQHJl
# ZGhhdC5jb20ACgkQfwmycsiPL9b4uRAAjryVAaA5jXZ3mdGB80nhGtARZlIaIVO/
# tlXk065q2Cj+98f+fBPCPWvmEz28vJwBhJUsFwpHzLZrxecBpwZp0MPAkFBNkouq
# +AiO9xyTAqccEp/dnIys4Bun9Rp0Jq9lk9y29zzEmQuK5uCB56lpx2cDn/JkzSQt
# ZFtnxxTwi3MDTNvXATub8Ia/1suui0zvESS7J/NBxQNI3cFaQszp1vMwlRIoPiWo
# 15YZFPZZQ2pvu6/1nL1Vl9OLbPAVcEGJpjHZv0XhudYOwRiDvjYnwfPL7BuwYEsU
# Dos4mZZd/KMU695s7OzlVYi1q4ATKUTUxyyylVhXZrFBXSE5ntnfoHTKHEruTyPb
# G31h5mribSTWjdvY5HewHbSSPjByAWsSQg9yzcHybhORiqGQCpcGQ8zuW7oNKMPV
# JicWdoRVY4U4hR0nRdDxz9zdpQ8QYok/ginBxFaOzrCfClUB7ZOBxwRMclIghuRH
# FV+ZJk0ylVOz2tbfNxUa3KhUgTPd8jgCHFI7xak5EBRtTJiJjE03Xag1Fdxy5/D5
# tRsBBW4sOFygAhjN/xyeaRv9L8rAv3x/akriFjPUbOMLkPcJpe/DTWsP8+5LaZF8
# GkQvjsg5UvmfcJ3LFtecXxfYH4UWhDmyAjF+BswiRqafDDi2CCUmdwDnzEPbwuWO
# x1y7cgxe9SE=
# =4d/s
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 12 Oct 2023 12:20:15 EDT
# gpg:                using RSA key DC3DEB159A9AF95D3D7456FE7F09B272C88F2FD6
# gpg:                issuer "kwolf@redhat.com"
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full]
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74  56FE 7F09 B272 C88F 2FD6

* tag 'for-upstream' of https://repo.or.cz/qemu/kevin: (26 commits)
  block: Add assertion for bdrv_graph_wrlock()
  block: Protect bs->children with graph_lock
  block: Protect bs->parents with graph_lock
  block: Mark bdrv_get_specific_info() and callers GRAPH_RDLOCK
  block: Mark bdrv_apply_auto_read_only() and callers GRAPH_RDLOCK
  block: Mark bdrv_op_is_blocked() and callers GRAPH_RDLOCK
  qcow2: Mark check_constraints_on_bitmap() GRAPH_RDLOCK
  qcow2: Mark qcow2_inactivate() and callers GRAPH_RDLOCK
  qcow2: Mark qcow2_signal_corruption() and callers GRAPH_RDLOCK
  block: Mark bdrv_amend_options() and callers GRAPH_RDLOCK
  block: Mark bdrv_get_parent_name() and callers GRAPH_RDLOCK
  block: Mark bdrv_primary_child() and callers GRAPH_RDLOCK
  block: Mark bdrv_refresh_filename() and callers GRAPH_RDLOCK
  block: Mark bdrv_get_xdbg_block_graph() and callers GRAPH_RDLOCK
  block: Take graph rdlock in parts of reopen
  block: Mark bdrv_snapshot_fallback() and callers GRAPH_RDLOCK
  block: Mark bdrv_parent_cb_resize() and callers GRAPH_RDLOCK
  block: Mark drain related functions GRAPH_RDLOCK
  block: Mark bdrv_first_blk() and bdrv_is_root_node() GRAPH_RDLOCK
  block: Take graph rdlock in bdrv_inactivate_all()
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2023-10-16 12:34:45 -04:00
commit ce2f51697b
60 changed files with 842 additions and 498 deletions

120
block.c
View file

@ -279,8 +279,9 @@ bool bdrv_is_read_only(BlockDriverState *bs)
return !(bs->open_flags & BDRV_O_RDWR);
}
static int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
bool ignore_allow_rdw, Error **errp)
static int GRAPH_RDLOCK
bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
bool ignore_allow_rdw, Error **errp)
{
IO_CODE();
@ -371,8 +372,9 @@ char *bdrv_get_full_backing_filename_from_filename(const char *backed,
* setting @errp. In all other cases, NULL will only be returned with
* @errp set.
*/
static char *bdrv_make_absolute_filename(BlockDriverState *relative_to,
const char *filename, Error **errp)
static char * GRAPH_RDLOCK
bdrv_make_absolute_filename(BlockDriverState *relative_to,
const char *filename, Error **errp)
{
char *dir, *full_name;
@ -1192,19 +1194,19 @@ static char *bdrv_child_get_parent_desc(BdrvChild *c)
return g_strdup_printf("node '%s'", bdrv_get_node_name(parent));
}
static void bdrv_child_cb_drained_begin(BdrvChild *child)
static void GRAPH_RDLOCK bdrv_child_cb_drained_begin(BdrvChild *child)
{
BlockDriverState *bs = child->opaque;
bdrv_do_drained_begin_quiesce(bs, NULL);
}
static bool bdrv_child_cb_drained_poll(BdrvChild *child)
static bool GRAPH_RDLOCK bdrv_child_cb_drained_poll(BdrvChild *child)
{
BlockDriverState *bs = child->opaque;
return bdrv_drain_poll(bs, NULL, false);
}
static void bdrv_child_cb_drained_end(BdrvChild *child)
static void GRAPH_RDLOCK bdrv_child_cb_drained_end(BdrvChild *child)
{
BlockDriverState *bs = child->opaque;
bdrv_drained_end(bs);
@ -1250,7 +1252,7 @@ static void bdrv_temp_snapshot_options(int *child_flags, QDict *child_options,
*child_flags &= ~BDRV_O_NATIVE_AIO;
}
static void bdrv_backing_attach(BdrvChild *c)
static void GRAPH_WRLOCK bdrv_backing_attach(BdrvChild *c)
{
BlockDriverState *parent = c->opaque;
BlockDriverState *backing_hd = c->bs;
@ -1874,7 +1876,10 @@ static int bdrv_open_common(BlockDriverState *bs, BlockBackend *file,
}
if (file != NULL) {
bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(blk_bs(file));
bdrv_graph_rdunlock_main_loop();
filename = blk_bs(file)->filename;
} else {
/*
@ -1901,7 +1906,9 @@ static int bdrv_open_common(BlockDriverState *bs, BlockBackend *file,
if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, ro)) {
if (!ro && bdrv_is_whitelisted(drv, true)) {
bdrv_graph_rdlock_main_loop();
ret = bdrv_apply_auto_read_only(bs, NULL, NULL);
bdrv_graph_rdunlock_main_loop();
} else {
ret = -ENOTSUP;
}
@ -2966,6 +2973,8 @@ static void bdrv_child_free(BdrvChild *child)
{
assert(!child->bs);
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
assert(!child->next.le_prev); /* not in children list */
g_free(child->name);
@ -3644,7 +3653,10 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
implicit_backing = !strcmp(bs->auto_backing_file, bs->backing_file);
}
bdrv_graph_rdlock_main_loop();
backing_filename = bdrv_get_full_backing_filename(bs, &local_err);
bdrv_graph_rdunlock_main_loop();
if (local_err) {
ret = -EINVAL;
error_propagate(errp, local_err);
@ -3675,7 +3687,9 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
}
if (implicit_backing) {
bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(backing_hd);
bdrv_graph_rdunlock_main_loop();
pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
backing_hd->filename);
}
@ -4314,8 +4328,8 @@ static int bdrv_reset_options_allowed(BlockDriverState *bs,
/*
* Returns true if @child can be reached recursively from @bs
*/
static bool bdrv_recurse_has_child(BlockDriverState *bs,
BlockDriverState *child)
static bool GRAPH_RDLOCK
bdrv_recurse_has_child(BlockDriverState *bs, BlockDriverState *child)
{
BdrvChild *c;
@ -4356,15 +4370,12 @@ static bool bdrv_recurse_has_child(BlockDriverState *bs,
*
* To be called with bs->aio_context locked.
*/
static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
BlockDriverState *bs,
QDict *options,
const BdrvChildClass *klass,
BdrvChildRole role,
bool parent_is_format,
QDict *parent_options,
int parent_flags,
bool keep_old_opts)
static BlockReopenQueue * GRAPH_RDLOCK
bdrv_reopen_queue_child(BlockReopenQueue *bs_queue, BlockDriverState *bs,
QDict *options, const BdrvChildClass *klass,
BdrvChildRole role, bool parent_is_format,
QDict *parent_options, int parent_flags,
bool keep_old_opts)
{
assert(bs != NULL);
@ -4376,6 +4387,11 @@ static BlockReopenQueue *bdrv_reopen_queue_child(BlockReopenQueue *bs_queue,
GLOBAL_STATE_CODE();
/*
* Strictly speaking, draining is illegal under GRAPH_RDLOCK. We know that
* we've been called with bdrv_graph_rdlock_main_loop(), though, so it's ok
* in practice.
*/
bdrv_drained_begin(bs);
if (bs_queue == NULL) {
@ -4517,6 +4533,7 @@ BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
QDict *options, bool keep_old_opts)
{
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
return bdrv_reopen_queue_child(bs_queue, bs, options, NULL, 0, false,
NULL, 0, keep_old_opts);
@ -4736,9 +4753,10 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
* Callers must make sure that their AioContext locking is still correct after
* this.
*/
static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
bool is_backing, Transaction *tran,
Error **errp)
static int GRAPH_UNLOCKED
bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
bool is_backing, Transaction *tran,
Error **errp)
{
BlockDriverState *bs = reopen_state->bs;
BlockDriverState *new_child_bs;
@ -4748,6 +4766,7 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
QObject *value;
const char *str;
AioContext *ctx, *old_ctx;
bool has_child;
int ret;
GLOBAL_STATE_CODE();
@ -4767,7 +4786,13 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
new_child_bs = bdrv_lookup_bs(NULL, str, errp);
if (new_child_bs == NULL) {
return -EINVAL;
} else if (bdrv_recurse_has_child(new_child_bs, bs)) {
}
bdrv_graph_rdlock_main_loop();
has_child = bdrv_recurse_has_child(new_child_bs, bs);
bdrv_graph_rdunlock_main_loop();
if (has_child) {
error_setg(errp, "Making '%s' a %s child of '%s' would create a "
"cycle", str, child_name, bs->node_name);
return -EINVAL;
@ -4866,9 +4891,9 @@ static int bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state,
* After calling this function, the transaction @change_child_tran may only be
* completed while holding a writer lock for the graph.
*/
static int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue,
Transaction *change_child_tran, Error **errp)
static int GRAPH_UNLOCKED
bdrv_reopen_prepare(BDRVReopenState *reopen_state, BlockReopenQueue *queue,
Transaction *change_child_tran, Error **errp)
{
int ret = -1;
int old_flags;
@ -4930,7 +4955,10 @@ static int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
* to r/w. Attempting to set to r/w may fail if either BDRV_O_ALLOW_RDWR is
* not set, or if the BDS still has copy_on_read enabled */
read_only = !(reopen_state->flags & BDRV_O_RDWR);
bdrv_graph_rdlock_main_loop();
ret = bdrv_can_set_read_only(reopen_state->bs, read_only, true, &local_err);
bdrv_graph_rdunlock_main_loop();
if (local_err) {
error_propagate(errp, local_err);
goto error;
@ -4953,7 +4981,9 @@ static int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
if (local_err != NULL) {
error_propagate(errp, local_err);
} else {
bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(reopen_state->bs);
bdrv_graph_rdunlock_main_loop();
error_setg(errp, "failed while preparing to reopen image '%s'",
reopen_state->bs->filename);
}
@ -4962,9 +4992,11 @@ static int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
} else {
/* It is currently mandatory to have a bdrv_reopen_prepare()
* handler for each supported drv. */
bdrv_graph_rdlock_main_loop();
error_setg(errp, "Block format '%s' used by node '%s' "
"does not support reopening files", drv->format_name,
bdrv_get_device_or_node_name(reopen_state->bs));
bdrv_graph_rdunlock_main_loop();
ret = -1;
goto error;
}
@ -5010,6 +5042,8 @@ static int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
if (qdict_size(reopen_state->options)) {
const QDictEntry *entry = qdict_first(reopen_state->options);
GRAPH_RDLOCK_GUARD_MAINLOOP();
do {
QObject *new = entry->value;
QObject *old = qdict_get(reopen_state->bs->options, entry->key);
@ -5083,7 +5117,7 @@ error:
* makes them final by swapping the staging BlockDriverState contents into
* the active BlockDriverState contents.
*/
static void bdrv_reopen_commit(BDRVReopenState *reopen_state)
static void GRAPH_UNLOCKED bdrv_reopen_commit(BDRVReopenState *reopen_state)
{
BlockDriver *drv;
BlockDriverState *bs;
@ -5100,6 +5134,8 @@ static void bdrv_reopen_commit(BDRVReopenState *reopen_state)
drv->bdrv_reopen_commit(reopen_state);
}
GRAPH_RDLOCK_GUARD_MAINLOOP();
/* set BDS specific flags now */
qobject_unref(bs->explicit_options);
qobject_unref(bs->options);
@ -5121,9 +5157,7 @@ static void bdrv_reopen_commit(BDRVReopenState *reopen_state)
qdict_del(bs->explicit_options, "backing");
qdict_del(bs->options, "backing");
bdrv_graph_rdlock_main_loop();
bdrv_refresh_limits(bs, NULL, NULL);
bdrv_graph_rdunlock_main_loop();
bdrv_refresh_total_sectors(bs, bs->total_sectors);
}
@ -5131,7 +5165,7 @@ static void bdrv_reopen_commit(BDRVReopenState *reopen_state)
* Abort the reopen, and delete and free the staged changes in
* reopen_state
*/
static void bdrv_reopen_abort(BDRVReopenState *reopen_state)
static void GRAPH_UNLOCKED bdrv_reopen_abort(BDRVReopenState *reopen_state)
{
BlockDriver *drv;
@ -5918,6 +5952,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
bdrv_ref(top);
bdrv_drained_begin(base);
bdrv_graph_rdlock_main_loop();
if (!top->drv || !base->drv) {
goto exit;
@ -5942,11 +5977,9 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
backing_file_str = base->filename;
}
bdrv_graph_rdlock_main_loop();
QLIST_FOREACH(c, &top->parents, next_parent) {
updated_children = g_slist_prepend(updated_children, c);
}
bdrv_graph_rdunlock_main_loop();
/*
* It seems correct to pass detach_subchain=true here, but it triggers
@ -5992,6 +6025,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
ret = 0;
exit:
bdrv_graph_rdunlock_main_loop();
bdrv_drained_end(base);
bdrv_unref(top);
return ret;
@ -6282,6 +6316,7 @@ BlockDeviceInfoList *bdrv_named_nodes_list(bool flat,
BlockDriverState *bs;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
list = NULL;
QTAILQ_FOREACH(bs, &graph_bdrv_states, node_list) {
@ -6667,7 +6702,8 @@ void coroutine_fn bdrv_co_debug_event(BlockDriverState *bs, BlkdebugEvent event)
bs->drv->bdrv_co_debug_event(bs, event);
}
static BlockDriverState *bdrv_find_debug_node(BlockDriverState *bs)
static BlockDriverState * GRAPH_RDLOCK
bdrv_find_debug_node(BlockDriverState *bs)
{
GLOBAL_STATE_CODE();
while (bs && bs->drv && !bs->drv->bdrv_debug_breakpoint) {
@ -6686,6 +6722,8 @@ int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
const char *tag)
{
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = bdrv_find_debug_node(bs);
if (bs) {
return bs->drv->bdrv_debug_breakpoint(bs, event, tag);
@ -6697,6 +6735,8 @@ int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
{
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = bdrv_find_debug_node(bs);
if (bs) {
return bs->drv->bdrv_debug_remove_breakpoint(bs, tag);
@ -6708,6 +6748,8 @@ int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag)
int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
{
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
while (bs && (!bs->drv || !bs->drv->bdrv_debug_resume)) {
bs = bdrv_primary_bs(bs);
}
@ -6722,6 +6764,8 @@ int bdrv_debug_resume(BlockDriverState *bs, const char *tag)
bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag)
{
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
while (bs && bs->drv && !bs->drv->bdrv_debug_is_suspended) {
bs = bdrv_primary_bs(bs);
}
@ -6750,6 +6794,7 @@ BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
BlockDriverState *bs_below;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!bs || !bs->drv || !backing_file) {
return NULL;
@ -6961,6 +7006,7 @@ void bdrv_activate_all(Error **errp)
BdrvNextIterator it;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
AioContext *aio_context = bdrv_get_aio_context(bs);
@ -6976,7 +7022,8 @@ void bdrv_activate_all(Error **errp)
}
}
static bool bdrv_has_bds_parent(BlockDriverState *bs, bool only_active)
static bool GRAPH_RDLOCK
bdrv_has_bds_parent(BlockDriverState *bs, bool only_active)
{
BdrvChild *parent;
GLOBAL_STATE_CODE();
@ -6993,14 +7040,13 @@ static bool bdrv_has_bds_parent(BlockDriverState *bs, bool only_active)
return false;
}
static int bdrv_inactivate_recurse(BlockDriverState *bs)
static int GRAPH_RDLOCK bdrv_inactivate_recurse(BlockDriverState *bs)
{
BdrvChild *child, *parent;
int ret;
uint64_t cumulative_perms, cumulative_shared_perms;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!bs->drv) {
return -ENOMEDIUM;
@ -7066,6 +7112,7 @@ int bdrv_inactivate_all(void)
GSList *aio_ctxs = NULL, *ctx;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
AioContext *aio_context = bdrv_get_aio_context(bs);
@ -7205,6 +7252,7 @@ bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp)
{
BdrvOpBlocker *blocker;
GLOBAL_STATE_CODE();
assert((int) op >= 0 && op < BLOCK_OP_TYPE_MAX);
if (!QLIST_EMPTY(&bs->op_blockers[op])) {
blocker = QLIST_FIRST(&bs->op_blockers[op]);

View file

@ -374,6 +374,7 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
assert(bs);
assert(target);
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
/* QMP interface protects us from these cases */
assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL);

View file

@ -780,11 +780,12 @@ BlockDriverState *blk_bs(BlockBackend *blk)
return blk->root ? blk->root->bs : NULL;
}
static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
static BlockBackend * GRAPH_RDLOCK bdrv_first_blk(BlockDriverState *bs)
{
BdrvChild *child;
GLOBAL_STATE_CODE();
assert_bdrv_graph_readable();
QLIST_FOREACH(child, &bs->parents, next_parent) {
if (child->klass == &child_root) {
@ -812,6 +813,8 @@ bool bdrv_is_root_node(BlockDriverState *bs)
BdrvChild *c;
GLOBAL_STATE_CODE();
assert_bdrv_graph_readable();
QLIST_FOREACH(c, &bs->parents, next_parent) {
if (c->klass != &child_root) {
return false;
@ -2259,6 +2262,7 @@ void blk_activate(BlockBackend *blk, Error **errp)
if (qemu_in_coroutine()) {
bdrv_co_activate(bs, errp);
} else {
GRAPH_RDLOCK_GUARD_MAINLOOP();
bdrv_activate(bs, errp);
}
}
@ -2384,6 +2388,7 @@ bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
{
BlockDriverState *bs = blk_bs(blk);
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!bs) {
return false;
@ -2901,6 +2906,8 @@ const BdrvChild *blk_root(BlockBackend *blk)
int blk_make_empty(BlockBackend *blk, Error **errp)
{
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!blk_is_available(blk)) {
error_setg(errp, "No medium inserted");
return -ENOMEDIUM;

View file

@ -106,7 +106,9 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
int ret;
/* No write support yet */
bdrv_graph_rdlock_main_loop();
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
bdrv_graph_rdunlock_main_loop();
if (ret < 0) {
return ret;
}

View file

@ -67,7 +67,9 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
uint32_t offsets_size, max_compressed_block_size = 1, i;
int ret;
bdrv_graph_rdlock_main_loop();
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
bdrv_graph_rdunlock_main_loop();
if (ret < 0) {
return ret;
}

View file

@ -434,6 +434,7 @@ int bdrv_commit(BlockDriverState *bs)
Error *local_err = NULL;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!drv)
return -ENOMEDIUM;

View file

@ -305,7 +305,7 @@ cbw_co_snapshot_block_status(BlockDriverState *bs,
return -EACCES;
}
ret = bdrv_block_status(child->bs, offset, cur_bytes, pnum, map, file);
ret = bdrv_co_block_status(child->bs, offset, cur_bytes, pnum, map, file);
if (child == s->target) {
/*
* We refer to s->target only for areas that we've written to it.

View file

@ -146,11 +146,11 @@ cor_co_preadv_part(BlockDriverState *bs, int64_t offset, int64_t bytes,
local_flags = flags;
/* In case of failure, try to copy-on-read anyway */
ret = bdrv_is_allocated(bs->file->bs, offset, bytes, &n);
ret = bdrv_co_is_allocated(bs->file->bs, offset, bytes, &n);
if (ret <= 0) {
ret = bdrv_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
state->bottom_bs, true, offset,
n, &n);
ret = bdrv_co_is_allocated_above(bdrv_backing_chain_next(bs->file->bs),
state->bottom_bs, true, offset,
n, &n);
if (ret > 0 || ret < 0) {
local_flags |= BDRV_REQ_COPY_ON_READ;
}

View file

@ -828,7 +828,7 @@ block_crypto_amend_options_generic_luks(BlockDriverState *bs,
errp);
}
static int
static int GRAPH_RDLOCK
block_crypto_amend_options_luks(BlockDriverState *bs,
QemuOpts *opts,
BlockDriverAmendStatusCB *status_cb,
@ -841,8 +841,6 @@ block_crypto_amend_options_luks(BlockDriverState *bs,
QCryptoBlockAmendOptions *amend_options = NULL;
int ret = -EINVAL;
assume_graph_lock(); /* FIXME */
assert(crypto);
assert(crypto->block);

View file

@ -696,8 +696,10 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags,
const char *protocol_delimiter;
int ret;
bdrv_graph_rdlock_main_loop();
ret = bdrv_apply_auto_read_only(bs, "curl driver does not support writes",
errp);
bdrv_graph_rdunlock_main_loop();
if (ret < 0) {
return ret;
}

View file

@ -452,7 +452,9 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
int64_t offset;
int ret;
bdrv_graph_rdlock_main_loop();
ret = bdrv_apply_auto_read_only(bs, NULL, errp);
bdrv_graph_rdunlock_main_loop();
if (ret < 0) {
return ret;
}

View file

@ -83,6 +83,8 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
uint64_t perm;
int ret;
GLOBAL_STATE_CODE();
if (!id_wellformed(export->id)) {
error_setg(errp, "Invalid block export id");
return NULL;
@ -145,7 +147,9 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp)
* access since the export could be available before migration handover.
* ctx was acquired in the caller.
*/
bdrv_graph_rdlock_main_loop();
bdrv_activate(bs, NULL);
bdrv_graph_rdunlock_main_loop();
perm = BLK_PERM_CONSISTENT_READ;
if (export->writable) {

View file

@ -863,11 +863,13 @@ static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
if (ret == -EACCES || ret == -EROFS) {
/* Try to degrade to read-only, but if it doesn't work, still use the
* normal error message. */
bdrv_graph_rdlock_main_loop();
if (bdrv_apply_auto_read_only(bs, NULL, NULL) == 0) {
open_flags = (open_flags & ~O_RDWR) | O_RDONLY;
s->fd = glfs_open(s->glfs, gconf->path, open_flags);
ret = s->fd ? 0 : -errno;
}
bdrv_graph_rdunlock_main_loop();
}
s->supports_seek_data = qemu_gluster_test_seek(s->fd);

View file

@ -106,12 +106,13 @@ static uint32_t reader_count(void)
return rd;
}
void bdrv_graph_wrlock(BlockDriverState *bs)
void no_coroutine_fn bdrv_graph_wrlock(BlockDriverState *bs)
{
AioContext *ctx = NULL;
GLOBAL_STATE_CODE();
assert(!qatomic_read(&has_writer));
assert(!qemu_in_coroutine());
/*
* Release only non-mainloop AioContext. The mainloop often relies on the

View file

@ -42,13 +42,18 @@
/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
static void bdrv_parent_cb_resize(BlockDriverState *bs);
static void coroutine_fn GRAPH_RDLOCK
bdrv_parent_cb_resize(BlockDriverState *bs);
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int64_t bytes, BdrvRequestFlags flags);
static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
static void GRAPH_RDLOCK
bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
{
BdrvChild *c, *next;
IO_OR_GS_CODE();
assert_bdrv_graph_readable();
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
if (c == ignore) {
@ -70,9 +75,12 @@ void bdrv_parent_drained_end_single(BdrvChild *c)
}
}
static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
static void GRAPH_RDLOCK
bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
{
BdrvChild *c;
IO_OR_GS_CODE();
assert_bdrv_graph_readable();
QLIST_FOREACH(c, &bs->parents, next_parent) {
if (c == ignore) {
@ -84,17 +92,22 @@ static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
bool bdrv_parent_drained_poll_single(BdrvChild *c)
{
IO_OR_GS_CODE();
if (c->klass->drained_poll) {
return c->klass->drained_poll(c);
}
return false;
}
static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
bool ignore_bds_parents)
static bool GRAPH_RDLOCK
bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
bool ignore_bds_parents)
{
BdrvChild *c, *next;
bool busy = false;
IO_OR_GS_CODE();
assert_bdrv_graph_readable();
QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
@ -114,6 +127,7 @@ void bdrv_parent_drained_begin_single(BdrvChild *c)
c->quiesced_parent = true;
if (c->klass->drained_begin) {
/* called with rdlock taken, but it doesn't really need it. */
c->klass->drained_begin(c);
}
}
@ -263,6 +277,9 @@ bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
BdrvChild *ignore_parent)
{
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
return bdrv_drain_poll(bs, ignore_parent, false);
}
@ -362,6 +379,7 @@ static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
/* Stop things in parent-to-child order */
if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
GRAPH_RDLOCK_GUARD_MAINLOOP();
bdrv_parent_drained_begin(bs, parent);
if (bs->drv && bs->drv->bdrv_drain_begin) {
bs->drv->bdrv_drain_begin(bs);
@ -408,12 +426,16 @@ static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
bdrv_co_yield_to_drain(bs, false, parent, false);
return;
}
/* At this point, we should be always running in the main loop. */
GLOBAL_STATE_CODE();
assert(bs->quiesce_counter > 0);
GLOBAL_STATE_CODE();
/* Re-enable things in child-to-parent order */
old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
if (old_quiesce_counter == 1) {
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bs->drv && bs->drv->bdrv_drain_end) {
bs->drv->bdrv_drain_end(bs);
}
@ -437,6 +459,8 @@ void bdrv_drain(BlockDriverState *bs)
static void bdrv_drain_assert_idle(BlockDriverState *bs)
{
BdrvChild *child, *next;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
assert(qatomic_read(&bs->in_flight) == 0);
QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
@ -450,7 +474,9 @@ static bool bdrv_drain_all_poll(void)
{
BlockDriverState *bs = NULL;
bool result = false;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
/* bdrv_drain_poll() can't make changes to the graph and we are holding the
* main AioContext lock, so iterating bdrv_next_all_states() is safe. */
@ -1223,8 +1249,8 @@ bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
ret = 1; /* "already allocated", so nothing will be copied */
pnum = MIN(align_bytes, max_transfer);
} else {
ret = bdrv_is_allocated(bs, align_offset,
MIN(align_bytes, max_transfer), &pnum);
ret = bdrv_co_is_allocated(bs, align_offset,
MIN(align_bytes, max_transfer), &pnum);
if (ret < 0) {
/*
* Safe to treat errors in querying allocation as if
@ -1371,7 +1397,7 @@ bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
/* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
flags &= ~BDRV_REQ_COPY_ON_READ;
ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum);
if (ret < 0) {
goto out;
}
@ -2003,7 +2029,7 @@ bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
}
}
static inline void coroutine_fn
static inline void coroutine_fn GRAPH_RDLOCK
bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
BdrvTrackedRequest *req, int ret)
{
@ -2330,6 +2356,7 @@ int bdrv_flush_all(void)
int result = 0;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
/*
* bdrv queue is managed by record/replay,
@ -2383,9 +2410,9 @@ int bdrv_flush_all(void)
* set to the host mapping and BDS corresponding to the guest offset.
*/
static int coroutine_fn GRAPH_RDLOCK
bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file)
bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
int64_t total_size;
int64_t n; /* bytes */
@ -2544,8 +2571,8 @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
if (ret & BDRV_BLOCK_RAW) {
assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
ret = bdrv_co_block_status(local_file, want_zero, local_map,
*pnum, pnum, &local_map, &local_file);
ret = bdrv_co_do_block_status(local_file, want_zero, local_map,
*pnum, pnum, &local_map, &local_file);
goto out;
}
@ -2572,8 +2599,8 @@ bdrv_co_block_status(BlockDriverState *bs, bool want_zero,
int64_t file_pnum;
int ret2;
ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
*pnum, &file_pnum, NULL, NULL);
ret2 = bdrv_co_do_block_status(local_file, want_zero, local_map,
*pnum, &file_pnum, NULL, NULL);
if (ret2 >= 0) {
/* Ignore errors. This is just providing extra information, it
* is useful but not necessary.
@ -2640,7 +2667,8 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
return 0;
}
ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum,
map, file);
++*depth;
if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
return ret;
@ -2656,8 +2684,8 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
p = bdrv_filter_or_cow_bs(p))
{
ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
file);
ret = bdrv_co_do_block_status(p, want_zero, offset, bytes, pnum,
map, file);
++*depth;
if (ret < 0) {
return ret;
@ -2723,21 +2751,13 @@ int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
bytes, pnum, map, file, NULL);
}
int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file)
int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset,
int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file)
{
IO_CODE();
return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
pnum, map, file, NULL);
}
int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file)
{
IO_CODE();
return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
offset, bytes, pnum, map, file);
return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
offset, bytes, pnum, map, file);
}
/*
@ -2784,45 +2804,6 @@ int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
return !!(ret & BDRV_BLOCK_ALLOCATED);
}
int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum)
{
int ret;
int64_t dummy;
IO_CODE();
ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
bytes, pnum ? pnum : &dummy, NULL,
NULL, NULL);
if (ret < 0) {
return ret;
}
return !!(ret & BDRV_BLOCK_ALLOCATED);
}
/* See bdrv_is_allocated_above for documentation */
int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
BlockDriverState *base,
bool include_base, int64_t offset,
int64_t bytes, int64_t *pnum)
{
int depth;
int ret;
IO_CODE();
ret = bdrv_co_common_block_status_above(top, base, include_base, false,
offset, bytes, pnum, NULL, NULL,
&depth);
if (ret < 0) {
return ret;
}
if (ret & BDRV_BLOCK_ALLOCATED) {
return depth;
}
return 0;
}
/*
* Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
*
@ -2840,18 +2821,18 @@ int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
* words, the result is not necessarily the maximum possible range);
* but 'pnum' will only be 0 when end of file is reached.
*/
int bdrv_is_allocated_above(BlockDriverState *top,
BlockDriverState *base,
bool include_base, int64_t offset,
int64_t bytes, int64_t *pnum)
int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs,
BlockDriverState *base,
bool include_base, int64_t offset,
int64_t bytes, int64_t *pnum)
{
int depth;
int ret;
IO_CODE();
ret = bdrv_common_block_status_above(top, base, include_base, false,
offset, bytes, pnum, NULL, NULL,
&depth);
ret = bdrv_co_common_block_status_above(bs, base, include_base, false,
offset, bytes, pnum, NULL, NULL,
&depth);
if (ret < 0) {
return ret;
}
@ -3551,9 +3532,13 @@ int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
bytes, read_flags, write_flags);
}
static void bdrv_parent_cb_resize(BlockDriverState *bs)
static void coroutine_fn GRAPH_RDLOCK
bdrv_parent_cb_resize(BlockDriverState *bs)
{
BdrvChild *c;
assert_bdrv_graph_readable();
QLIST_FOREACH(c, &bs->parents, next_parent) {
if (c->klass->resize) {
c->klass->resize(c);

View file

@ -1925,7 +1925,9 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
/* Check the write protect flag of the LUN if we want to write */
if (iscsilun->type == TYPE_DISK && (flags & BDRV_O_RDWR) &&
iscsilun->write_protected) {
bdrv_graph_rdlock_main_loop();
ret = bdrv_apply_auto_read_only(bs, "LUN is write protected", errp);
bdrv_graph_rdunlock_main_loop();
if (ret < 0) {
goto out;
}

View file

@ -559,9 +559,9 @@ static void coroutine_fn mirror_iteration(MirrorBlockJob *s)
assert(!(offset % s->granularity));
WITH_GRAPH_RDLOCK_GUARD() {
ret = bdrv_block_status_above(source, NULL, offset,
nb_chunks * s->granularity,
&io_bytes, NULL, NULL);
ret = bdrv_co_block_status_above(source, NULL, offset,
nb_chunks * s->granularity,
&io_bytes, NULL, NULL);
}
if (ret < 0) {
io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
@ -879,8 +879,8 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
}
WITH_GRAPH_RDLOCK_GUARD() {
ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset,
bytes, &count);
ret = bdrv_co_is_allocated_above(bs, s->base_overlay, true, offset,
bytes, &count);
}
if (ret < 0) {
return ret;

View file

@ -144,6 +144,9 @@ void hmp_drive_del(Monitor *mon, const QDict *qdict)
AioContext *aio_context;
Error *local_err = NULL;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = bdrv_find_node(id);
if (bs) {
qmp_blockdev_del(id, &local_err);
@ -896,6 +899,8 @@ void hmp_info_snapshots(Monitor *mon, const QDict *qdict)
SnapshotEntry *snapshot_entry;
Error *err = NULL;
GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = bdrv_all_find_vmstate_bs(NULL, false, NULL, &err);
if (!bs) {
error_report_err(err);

View file

@ -275,7 +275,8 @@ static bool nbd_client_will_reconnect(BDRVNBDState *s)
* Return failure if the server's advertised options are incompatible with the
* client's needs.
*/
static int nbd_handle_updated_info(BlockDriverState *bs, Error **errp)
static int coroutine_fn GRAPH_RDLOCK
nbd_handle_updated_info(BlockDriverState *bs, Error **errp)
{
BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
int ret;

View file

@ -843,7 +843,7 @@ static void nfs_refresh_filename(BlockDriverState *bs)
}
}
static char *nfs_dirname(BlockDriverState *bs, Error **errp)
static char * GRAPH_RDLOCK nfs_dirname(BlockDriverState *bs, Error **errp)
{
NFSClient *client = bs->opaque;

View file

@ -1363,9 +1363,12 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
bitmap_new(DIV_ROUND_UP(s->header_size, s->bat_dirty_block));
/* Disable migration until bdrv_activate method is added */
bdrv_graph_rdlock_main_loop();
error_setg(&s->migration_blocker, "The Parallels format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
bdrv_graph_rdunlock_main_loop();
ret = migrate_add_blocker(s->migration_blocker, errp);
if (ret < 0) {
error_setg(errp, "Migration blocker error");

View file

@ -169,14 +169,16 @@ void qmp_blockdev_close_tray(const char *device,
}
}
static void blockdev_remove_medium(const char *device, const char *id,
Error **errp)
static void GRAPH_UNLOCKED
blockdev_remove_medium(const char *device, const char *id, Error **errp)
{
BlockBackend *blk;
BlockDriverState *bs;
AioContext *aio_context;
bool has_attached_device;
GLOBAL_STATE_CODE();
blk = qmp_get_blk(device, id, errp);
if (!blk) {
return;
@ -205,9 +207,12 @@ static void blockdev_remove_medium(const char *device, const char *id,
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
bdrv_graph_rdlock_main_loop();
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_EJECT, errp)) {
bdrv_graph_rdunlock_main_loop();
goto out;
}
bdrv_graph_rdunlock_main_loop();
blk_remove_bs(blk);
@ -279,6 +284,8 @@ static void blockdev_insert_medium(const char *device, const char *id,
BlockBackend *blk;
BlockDriverState *bs;
GRAPH_RDLOCK_GUARD_MAINLOOP();
blk = qmp_get_blk(device, id, errp);
if (!blk) {
return;

View file

@ -225,9 +225,8 @@ int bdrv_query_snapshot_info_list(BlockDriverState *bs,
* Helper function for other query info functions. Store information about @bs
* in @info, setting @errp on error.
*/
static void bdrv_do_query_node_info(BlockDriverState *bs,
BlockNodeInfo *info,
Error **errp)
static void GRAPH_RDLOCK
bdrv_do_query_node_info(BlockDriverState *bs, BlockNodeInfo *info, Error **errp)
{
int64_t size;
const char *backing_filename;
@ -423,8 +422,8 @@ fail:
}
/* @p_info will be set only on success. */
static void bdrv_query_info(BlockBackend *blk, BlockInfo **p_info,
Error **errp)
static void GRAPH_RDLOCK
bdrv_query_info(BlockBackend *blk, BlockInfo **p_info, Error **errp)
{
BlockInfo *info = g_malloc0(sizeof(*info));
BlockDriverState *bs = blk_bs(blk);
@ -672,6 +671,8 @@ BlockInfoList *qmp_query_block(Error **errp)
BlockBackend *blk;
Error *local_err = NULL;
GRAPH_RDLOCK_GUARD_MAINLOOP();
for (blk = blk_all_next(NULL); blk; blk = blk_all_next(blk)) {
BlockInfoList *info;

View file

@ -301,9 +301,12 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
}
/* Disable migration when qcow images are used */
bdrv_graph_rdlock_main_loop();
error_setg(&s->migration_blocker, "The qcow format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
bdrv_graph_rdunlock_main_loop();
ret = migrate_add_blocker(s->migration_blocker, errp);
if (ret < 0) {
error_free(s->migration_blocker);

View file

@ -156,10 +156,9 @@ static int64_t get_bitmap_bytes_needed(int64_t len, uint32_t granularity)
return DIV_ROUND_UP(num_bits, 8);
}
static int check_constraints_on_bitmap(BlockDriverState *bs,
const char *name,
uint32_t granularity,
Error **errp)
static int GRAPH_RDLOCK
check_constraints_on_bitmap(BlockDriverState *bs, const char *name,
uint32_t granularity, Error **errp)
{
BDRVQcow2State *s = bs->opaque;
int granularity_bits = ctz32(granularity);
@ -204,8 +203,9 @@ static int check_constraints_on_bitmap(BlockDriverState *bs,
return 0;
}
static void clear_bitmap_table(BlockDriverState *bs, uint64_t *bitmap_table,
uint32_t bitmap_table_size)
static void GRAPH_RDLOCK
clear_bitmap_table(BlockDriverState *bs, uint64_t *bitmap_table,
uint32_t bitmap_table_size)
{
BDRVQcow2State *s = bs->opaque;
int i;
@ -259,7 +259,8 @@ fail:
return ret;
}
static int free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb)
static int GRAPH_RDLOCK
free_bitmap_clusters(BlockDriverState *bs, Qcow2BitmapTable *tb)
{
int ret;
uint64_t *bitmap_table;
@ -730,8 +731,9 @@ out:
* Store bitmap list to qcow2 image as a bitmap directory.
* Everything is checked.
*/
static int bitmap_list_store(BlockDriverState *bs, Qcow2BitmapList *bm_list,
uint64_t *offset, uint64_t *size, bool in_place)
static int GRAPH_RDLOCK
bitmap_list_store(BlockDriverState *bs, Qcow2BitmapList *bm_list,
uint64_t *offset, uint64_t *size, bool in_place)
{
int ret;
uint8_t *dir;
@ -829,8 +831,9 @@ fail:
* Bitmap List end
*/
static int update_ext_header_and_dir_in_place(BlockDriverState *bs,
Qcow2BitmapList *bm_list)
static int GRAPH_RDLOCK
update_ext_header_and_dir_in_place(BlockDriverState *bs,
Qcow2BitmapList *bm_list)
{
BDRVQcow2State *s = bs->opaque;
int ret;
@ -877,8 +880,8 @@ static int update_ext_header_and_dir_in_place(BlockDriverState *bs,
*/
}
static int update_ext_header_and_dir(BlockDriverState *bs,
Qcow2BitmapList *bm_list)
static int GRAPH_RDLOCK
update_ext_header_and_dir(BlockDriverState *bs, Qcow2BitmapList *bm_list)
{
BDRVQcow2State *s = bs->opaque;
int ret;
@ -1271,9 +1274,9 @@ out:
/* store_bitmap_data()
* Store bitmap to image, filling bitmap table accordingly.
*/
static uint64_t *store_bitmap_data(BlockDriverState *bs,
BdrvDirtyBitmap *bitmap,
uint32_t *bitmap_table_size, Error **errp)
static uint64_t * GRAPH_RDLOCK
store_bitmap_data(BlockDriverState *bs, BdrvDirtyBitmap *bitmap,
uint32_t *bitmap_table_size, Error **errp)
{
int ret;
BDRVQcow2State *s = bs->opaque;
@ -1370,7 +1373,8 @@ fail:
* Store bm->dirty_bitmap to qcow2.
* Set bm->table_offset and bm->table_size accordingly.
*/
static int store_bitmap(BlockDriverState *bs, Qcow2Bitmap *bm, Error **errp)
static int GRAPH_RDLOCK
store_bitmap(BlockDriverState *bs, Qcow2Bitmap *bm, Error **errp)
{
int ret;
uint64_t *tb;

View file

@ -163,7 +163,8 @@ int qcow2_cache_destroy(Qcow2Cache *c)
return 0;
}
static int qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
static int GRAPH_RDLOCK
qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
{
int ret;
@ -178,7 +179,8 @@ static int qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
return 0;
}
static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
static int GRAPH_RDLOCK
qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
{
BDRVQcow2State *s = bs->opaque;
int ret = 0;
@ -318,8 +320,9 @@ int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c)
return 0;
}
static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
uint64_t offset, void **table, bool read_from_disk)
static int GRAPH_RDLOCK
qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
void **table, bool read_from_disk)
{
BDRVQcow2State *s = bs->opaque;
int i;

View file

@ -207,8 +207,9 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
* the cache is used; otherwise the L2 slice is loaded from the image
* file.
*/
static int l2_load(BlockDriverState *bs, uint64_t offset,
uint64_t l2_offset, uint64_t **l2_slice)
static int GRAPH_RDLOCK
l2_load(BlockDriverState *bs, uint64_t offset,
uint64_t l2_offset, uint64_t **l2_slice)
{
BDRVQcow2State *s = bs->opaque;
int start_of_slice = l2_entry_size(s) *
@ -269,7 +270,7 @@ int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
*
*/
static int l2_allocate(BlockDriverState *bs, int l1_index)
static int GRAPH_RDLOCK l2_allocate(BlockDriverState *bs, int l1_index)
{
BDRVQcow2State *s = bs->opaque;
uint64_t old_l2_offset;
@ -751,9 +752,9 @@ fail:
*
* Returns 0 on success, -errno in failure case
*/
static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
uint64_t **new_l2_slice,
int *new_l2_index)
static int GRAPH_RDLOCK
get_cluster_table(BlockDriverState *bs, uint64_t offset,
uint64_t **new_l2_slice, int *new_l2_index)
{
BDRVQcow2State *s = bs->opaque;
unsigned int l2_index;
@ -1155,11 +1156,10 @@ void coroutine_fn qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m)
*
* Returns 0 on success, -errno on failure.
*/
static int coroutine_fn calculate_l2_meta(BlockDriverState *bs,
uint64_t host_cluster_offset,
uint64_t guest_offset, unsigned bytes,
uint64_t *l2_slice, QCowL2Meta **m,
bool keep_old)
static int coroutine_fn GRAPH_RDLOCK
calculate_l2_meta(BlockDriverState *bs, uint64_t host_cluster_offset,
uint64_t guest_offset, unsigned bytes, uint64_t *l2_slice,
QCowL2Meta **m, bool keep_old)
{
BDRVQcow2State *s = bs->opaque;
int sc_index, l2_index = offset_to_l2_slice_index(s, guest_offset);
@ -1490,9 +1490,9 @@ static int coroutine_fn handle_dependencies(BlockDriverState *bs,
*
* -errno: in error cases
*/
static int coroutine_fn handle_copied(BlockDriverState *bs,
uint64_t guest_offset, uint64_t *host_offset, uint64_t *bytes,
QCowL2Meta **m)
static int coroutine_fn GRAPH_RDLOCK
handle_copied(BlockDriverState *bs, uint64_t guest_offset,
uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
{
BDRVQcow2State *s = bs->opaque;
int l2_index;
@ -1600,10 +1600,9 @@ out:
* function has been waiting for another request and the allocation must be
* restarted, but the whole request should not be failed.
*/
static int coroutine_fn do_alloc_cluster_offset(BlockDriverState *bs,
uint64_t guest_offset,
uint64_t *host_offset,
uint64_t *nb_clusters)
static int coroutine_fn GRAPH_RDLOCK
do_alloc_cluster_offset(BlockDriverState *bs, uint64_t guest_offset,
uint64_t *host_offset, uint64_t *nb_clusters)
{
BDRVQcow2State *s = bs->opaque;
@ -1658,9 +1657,9 @@ static int coroutine_fn do_alloc_cluster_offset(BlockDriverState *bs,
*
* -errno: in error cases
*/
static int coroutine_fn handle_alloc(BlockDriverState *bs,
uint64_t guest_offset, uint64_t *host_offset, uint64_t *bytes,
QCowL2Meta **m)
static int coroutine_fn GRAPH_RDLOCK
handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
uint64_t *host_offset, uint64_t *bytes, QCowL2Meta **m)
{
BDRVQcow2State *s = bs->opaque;
int l2_index;
@ -1898,9 +1897,9 @@ again:
* all clusters in the same L2 slice) and returns the number of discarded
* clusters.
*/
static int discard_in_l2_slice(BlockDriverState *bs, uint64_t offset,
uint64_t nb_clusters,
enum qcow2_discard_type type, bool full_discard)
static int GRAPH_RDLOCK
discard_in_l2_slice(BlockDriverState *bs, uint64_t offset, uint64_t nb_clusters,
enum qcow2_discard_type type, bool full_discard)
{
BDRVQcow2State *s = bs->opaque;
uint64_t *l2_slice;
@ -2037,7 +2036,7 @@ fail:
* all clusters in the same L2 slice) and returns the number of zeroed
* clusters.
*/
static int coroutine_fn
static int coroutine_fn GRAPH_RDLOCK
zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
uint64_t nb_clusters, int flags)
{
@ -2093,7 +2092,7 @@ zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
return nb_clusters;
}
static int coroutine_fn
static int coroutine_fn GRAPH_RDLOCK
zero_l2_subclusters(BlockDriverState *bs, uint64_t offset,
unsigned nb_subclusters)
{
@ -2231,11 +2230,12 @@ fail:
* status_cb(). l1_entries contains the total number of L1 entries and
* *visited_l1_entries counts all visited L1 entries.
*/
static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
int l1_size, int64_t *visited_l1_entries,
int64_t l1_entries,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque)
static int GRAPH_RDLOCK
expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
int l1_size, int64_t *visited_l1_entries,
int64_t l1_entries,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque)
{
BDRVQcow2State *s = bs->opaque;
bool is_active_l1 = (l1_table == s->l1_table);

View file

@ -229,9 +229,9 @@ static void set_refcount_ro6(void *refcount_array, uint64_t index,
}
static int load_refcount_block(BlockDriverState *bs,
int64_t refcount_block_offset,
void **refcount_block)
static int GRAPH_RDLOCK
load_refcount_block(BlockDriverState *bs, int64_t refcount_block_offset,
void **refcount_block)
{
BDRVQcow2State *s = bs->opaque;
@ -302,8 +302,9 @@ static int in_same_refcount_block(BDRVQcow2State *s, uint64_t offset_a,
*
* Returns 0 on success or -errno in error case
*/
static int alloc_refcount_block(BlockDriverState *bs,
int64_t cluster_index, void **refcount_block)
static int GRAPH_RDLOCK
alloc_refcount_block(BlockDriverState *bs, int64_t cluster_index,
void **refcount_block)
{
BDRVQcow2State *s = bs->opaque;
unsigned int refcount_table_index;
@ -806,12 +807,9 @@ found:
/* XXX: cache several refcount block clusters ? */
/* @addend is the absolute value of the addend; if @decrease is set, @addend
* will be subtracted from the current refcount, otherwise it will be added */
static int update_refcount(BlockDriverState *bs,
int64_t offset,
int64_t length,
uint64_t addend,
bool decrease,
enum qcow2_discard_type type)
static int GRAPH_RDLOCK
update_refcount(BlockDriverState *bs, int64_t offset, int64_t length,
uint64_t addend, bool decrease, enum qcow2_discard_type type)
{
BDRVQcow2State *s = bs->opaque;
int64_t start, last, cluster_offset;
@ -967,8 +965,8 @@ int qcow2_update_cluster_refcount(BlockDriverState *bs,
/* return < 0 if error */
static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size,
uint64_t max)
static int64_t GRAPH_RDLOCK
alloc_clusters_noref(BlockDriverState *bs, uint64_t size, uint64_t max)
{
BDRVQcow2State *s = bs->opaque;
uint64_t i, nb_clusters, refcount;
@ -2302,7 +2300,7 @@ calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
* Compares the actual reference count for each cluster in the image against the
* refcount as reported by the refcount structures on-disk.
*/
static void coroutine_fn
static void coroutine_fn GRAPH_RDLOCK
compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix, bool *rebuild,
int64_t *highest_cluster,
@ -3103,20 +3101,22 @@ int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
*
* @allocated should be set to true if a new cluster has been allocated.
*/
typedef int (RefblockFinishOp)(BlockDriverState *bs, uint64_t **reftable,
uint64_t reftable_index, uint64_t *reftable_size,
void *refblock, bool refblock_empty,
bool *allocated, Error **errp);
typedef int /* GRAPH_RDLOCK_PTR */
(RefblockFinishOp)(BlockDriverState *bs, uint64_t **reftable,
uint64_t reftable_index, uint64_t *reftable_size,
void *refblock, bool refblock_empty,
bool *allocated, Error **errp);
/**
* This "operation" for walk_over_reftable() allocates the refblock on disk (if
* it is not empty) and inserts its offset into the new reftable. The size of
* this new reftable is increased as required.
*/
static int alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
uint64_t reftable_index, uint64_t *reftable_size,
void *refblock, bool refblock_empty, bool *allocated,
Error **errp)
static int GRAPH_RDLOCK
alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
uint64_t reftable_index, uint64_t *reftable_size,
void *refblock, bool refblock_empty, bool *allocated,
Error **errp)
{
BDRVQcow2State *s = bs->opaque;
int64_t offset;
@ -3166,10 +3166,11 @@ static int alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
* offset specified by the new reftable's entry. It does not modify the new
* reftable or change any refcounts.
*/
static int flush_refblock(BlockDriverState *bs, uint64_t **reftable,
uint64_t reftable_index, uint64_t *reftable_size,
void *refblock, bool refblock_empty, bool *allocated,
Error **errp)
static int GRAPH_RDLOCK
flush_refblock(BlockDriverState *bs, uint64_t **reftable,
uint64_t reftable_index, uint64_t *reftable_size,
void *refblock, bool refblock_empty, bool *allocated,
Error **errp)
{
BDRVQcow2State *s = bs->opaque;
int64_t offset;
@ -3210,16 +3211,17 @@ static int flush_refblock(BlockDriverState *bs, uint64_t **reftable,
*
* @allocated is set to true if a new cluster has been allocated.
*/
static int walk_over_reftable(BlockDriverState *bs, uint64_t **new_reftable,
uint64_t *new_reftable_index,
uint64_t *new_reftable_size,
void *new_refblock, int new_refblock_size,
int new_refcount_bits,
RefblockFinishOp *operation, bool *allocated,
Qcow2SetRefcountFunc *new_set_refcount,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque, int index, int total,
Error **errp)
static int GRAPH_RDLOCK
walk_over_reftable(BlockDriverState *bs, uint64_t **new_reftable,
uint64_t *new_reftable_index,
uint64_t *new_reftable_size,
void *new_refblock, int new_refblock_size,
int new_refcount_bits,
RefblockFinishOp *operation, bool *allocated,
Qcow2SetRefcountFunc *new_set_refcount,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque, int index, int total,
Error **errp)
{
BDRVQcow2State *s = bs->opaque;
uint64_t reftable_index;
@ -3545,8 +3547,8 @@ done:
return ret;
}
static int64_t coroutine_fn get_refblock_offset(BlockDriverState *bs,
uint64_t offset)
static int64_t coroutine_fn GRAPH_RDLOCK
get_refblock_offset(BlockDriverState *bs, uint64_t offset)
{
BDRVQcow2State *s = bs->opaque;
uint32_t index = offset_to_reftable_index(s, offset);
@ -3565,7 +3567,7 @@ static int64_t coroutine_fn get_refblock_offset(BlockDriverState *bs,
return covering_refblock_offset;
}
static int coroutine_fn
static int coroutine_fn GRAPH_RDLOCK
qcow2_discard_refcount_block(BlockDriverState *bs, uint64_t discard_block_offs)
{
BDRVQcow2State *s = bs->opaque;

View file

@ -536,7 +536,7 @@ int qcow2_mark_dirty(BlockDriverState *bs)
* function when there are no pending requests, it does not guard against
* concurrent requests dirtying the image.
*/
static int qcow2_mark_clean(BlockDriverState *bs)
static int GRAPH_RDLOCK qcow2_mark_clean(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
@ -570,7 +570,8 @@ int qcow2_mark_corrupt(BlockDriverState *bs)
* Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
* before if necessary.
*/
static int coroutine_fn qcow2_mark_consistent(BlockDriverState *bs)
static int coroutine_fn GRAPH_RDLOCK
qcow2_mark_consistent(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
@ -980,10 +981,9 @@ typedef struct Qcow2ReopenState {
QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */
} Qcow2ReopenState;
static int qcow2_update_options_prepare(BlockDriverState *bs,
Qcow2ReopenState *r,
QDict *options, int flags,
Error **errp)
static int GRAPH_RDLOCK
qcow2_update_options_prepare(BlockDriverState *bs, Qcow2ReopenState *r,
QDict *options, int flags, Error **errp)
{
BDRVQcow2State *s = bs->opaque;
QemuOpts *opts = NULL;
@ -1260,7 +1260,7 @@ static void qcow2_update_options_abort(BlockDriverState *bs,
qapi_free_QCryptoBlockOpenOptions(r->crypto_opts);
}
static int coroutine_fn
static int coroutine_fn GRAPH_RDLOCK
qcow2_update_options(BlockDriverState *bs, QDict *options, int flags,
Error **errp)
{
@ -1969,13 +1969,17 @@ static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
bs->bl.pdiscard_alignment = s->cluster_size;
}
static int qcow2_reopen_prepare(BDRVReopenState *state,
BlockReopenQueue *queue, Error **errp)
static int GRAPH_UNLOCKED
qcow2_reopen_prepare(BDRVReopenState *state,BlockReopenQueue *queue,
Error **errp)
{
BDRVQcow2State *s = state->bs->opaque;
Qcow2ReopenState *r;
int ret;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
r = g_new0(Qcow2ReopenState, 1);
state->opaque = r;
@ -2038,6 +2042,8 @@ static void qcow2_reopen_commit(BDRVReopenState *state)
static void qcow2_reopen_commit_post(BDRVReopenState *state)
{
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (state->flags & BDRV_O_RDWR) {
Error *local_err = NULL;
@ -2731,7 +2737,7 @@ fail_nometa:
return ret;
}
static int qcow2_inactivate(BlockDriverState *bs)
static int GRAPH_RDLOCK qcow2_inactivate(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
int ret, result = 0;
@ -2766,7 +2772,8 @@ static int qcow2_inactivate(BlockDriverState *bs)
return result;
}
static void qcow2_do_close(BlockDriverState *bs, bool close_data_file)
static void coroutine_mixed_fn GRAPH_RDLOCK
qcow2_do_close(BlockDriverState *bs, bool close_data_file)
{
BDRVQcow2State *s = bs->opaque;
qemu_vfree(s->l1_table);
@ -2793,18 +2800,24 @@ static void qcow2_do_close(BlockDriverState *bs, bool close_data_file)
g_free(s->image_backing_format);
if (close_data_file && has_data_file(bs)) {
GLOBAL_STATE_CODE();
bdrv_graph_rdunlock_main_loop();
bdrv_graph_wrlock(NULL);
bdrv_unref_child(bs, s->data_file);
bdrv_graph_wrunlock();
s->data_file = NULL;
bdrv_graph_rdlock_main_loop();
}
qcow2_refcount_close(bs);
qcow2_free_snapshots(bs);
}
static void qcow2_close(BlockDriverState *bs)
static void GRAPH_UNLOCKED qcow2_close(BlockDriverState *bs)
{
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
qcow2_do_close(bs, true);
}
@ -3991,7 +4004,8 @@ finish:
}
static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
static bool coroutine_fn GRAPH_RDLOCK
is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
int64_t nr;
int res;
@ -4012,7 +4026,7 @@ static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
* backing file. So, we need a loop.
*/
do {
res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
res = bdrv_co_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
offset += nr;
bytes -= nr;
} while (res >= 0 && (res & BDRV_BLOCK_ZERO) && nr && bytes);
@ -4076,8 +4090,8 @@ qcow2_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
return ret;
}
static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
int64_t offset, int64_t bytes)
static int coroutine_fn GRAPH_RDLOCK
qcow2_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
{
int ret;
BDRVQcow2State *s = bs->opaque;
@ -4822,7 +4836,7 @@ fail:
return ret;
}
static int make_completely_empty(BlockDriverState *bs)
static int GRAPH_RDLOCK make_completely_empty(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
Error *local_err = NULL;
@ -4973,7 +4987,7 @@ fail:
return ret;
}
static int qcow2_make_empty(BlockDriverState *bs)
static int GRAPH_RDLOCK qcow2_make_empty(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
uint64_t offset, end_offset;
@ -5017,7 +5031,7 @@ static int qcow2_make_empty(BlockDriverState *bs)
return ret;
}
static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
static coroutine_fn GRAPH_RDLOCK int qcow2_co_flush_to_os(BlockDriverState *bs)
{
BDRVQcow2State *s = bs->opaque;
int ret;
@ -5366,7 +5380,7 @@ qcow2_co_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0);
}
static int qcow2_has_compressed_clusters(BlockDriverState *bs)
static int GRAPH_RDLOCK qcow2_has_compressed_clusters(BlockDriverState *bs)
{
int64_t offset = 0;
int64_t bytes = bdrv_getlength(bs);
@ -5402,9 +5416,10 @@ static int qcow2_has_compressed_clusters(BlockDriverState *bs)
* Downgrades an image's version. To achieve this, any incompatible features
* have to be removed.
*/
static int qcow2_downgrade(BlockDriverState *bs, int target_version,
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
Error **errp)
static int GRAPH_RDLOCK
qcow2_downgrade(BlockDriverState *bs, int target_version,
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
Error **errp)
{
BDRVQcow2State *s = bs->opaque;
int current_version = s->qcow_version;
@ -5512,9 +5527,10 @@ static int qcow2_downgrade(BlockDriverState *bs, int target_version,
* features of older versions, some things may have to be presented
* differently.
*/
static int qcow2_upgrade(BlockDriverState *bs, int target_version,
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
Error **errp)
static int GRAPH_RDLOCK
qcow2_upgrade(BlockDriverState *bs, int target_version,
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
Error **errp)
{
BDRVQcow2State *s = bs->opaque;
bool need_snapshot_update;
@ -5640,11 +5656,10 @@ static void qcow2_amend_helper_cb(BlockDriverState *bs,
info->original_cb_opaque);
}
static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque,
bool force,
Error **errp)
static int GRAPH_RDLOCK
qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
bool force, Error **errp)
{
BDRVQcow2State *s = bs->opaque;
int old_version = s->qcow_version, new_version = old_version;

View file

@ -838,9 +838,10 @@ int qcow2_mark_dirty(BlockDriverState *bs);
int qcow2_mark_corrupt(BlockDriverState *bs);
int qcow2_update_header(BlockDriverState *bs);
void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
int64_t size, const char *message_format, ...)
G_GNUC_PRINTF(5, 6);
void GRAPH_RDLOCK
qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
int64_t size, const char *message_format, ...)
G_GNUC_PRINTF(5, 6);
int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
uint64_t entries, size_t entry_len,
@ -851,33 +852,41 @@ int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
int coroutine_fn GRAPH_RDLOCK qcow2_refcount_init(BlockDriverState *bs);
void qcow2_refcount_close(BlockDriverState *bs);
int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
uint64_t *refcount);
int GRAPH_RDLOCK qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
uint64_t *refcount);
int qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
uint64_t addend, bool decrease,
enum qcow2_discard_type type);
int GRAPH_RDLOCK
qcow2_update_cluster_refcount(BlockDriverState *bs, int64_t cluster_index,
uint64_t addend, bool decrease,
enum qcow2_discard_type type);
int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t offset,
uint64_t additional_clusters, bool exact_size,
int new_refblock_index,
uint64_t new_refblock_offset);
int64_t GRAPH_RDLOCK
qcow2_refcount_area(BlockDriverState *bs, uint64_t offset,
uint64_t additional_clusters, bool exact_size,
int new_refblock_index,
uint64_t new_refblock_offset);
int64_t GRAPH_RDLOCK
qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
int64_t GRAPH_RDLOCK coroutine_fn
qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
int64_t nb_clusters);
int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size);
int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
int64_t nb_clusters);
int64_t coroutine_fn GRAPH_RDLOCK qcow2_alloc_bytes(BlockDriverState *bs, int size);
void qcow2_free_clusters(BlockDriverState *bs,
int64_t offset, int64_t size,
enum qcow2_discard_type type);
void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
enum qcow2_discard_type type);
void GRAPH_RDLOCK qcow2_free_clusters(BlockDriverState *bs,
int64_t offset, int64_t size,
enum qcow2_discard_type type);
void GRAPH_RDLOCK
qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
enum qcow2_discard_type type);
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
int64_t l1_table_offset, int l1_size, int addend);
int GRAPH_RDLOCK
qcow2_update_snapshot_refcount(BlockDriverState *bs, int64_t l1_table_offset,
int l1_size, int addend);
int qcow2_flush_caches(BlockDriverState *bs);
int qcow2_write_caches(BlockDriverState *bs);
int GRAPH_RDLOCK qcow2_flush_caches(BlockDriverState *bs);
int GRAPH_RDLOCK qcow2_write_caches(BlockDriverState *bs);
int coroutine_fn qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
BdrvCheckMode fix);
@ -885,39 +894,48 @@ void qcow2_process_discards(BlockDriverState *bs, int ret);
int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
int64_t size);
int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
int64_t size, bool data_file);
int GRAPH_RDLOCK
qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
int64_t size, bool data_file);
int coroutine_fn qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t offset, int64_t size);
int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque, Error **errp);
int GRAPH_RDLOCK
qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque, Error **errp);
int coroutine_fn GRAPH_RDLOCK qcow2_shrink_reftable(BlockDriverState *bs);
int64_t coroutine_fn qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
int64_t coroutine_fn GRAPH_RDLOCK
qcow2_get_last_cluster(BlockDriverState *bs, int64_t size);
int coroutine_fn GRAPH_RDLOCK
qcow2_detect_metadata_preallocation(BlockDriverState *bs);
/* qcow2-cluster.c functions */
int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
bool exact_size);
int GRAPH_RDLOCK
qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size, bool exact_size);
int coroutine_fn GRAPH_RDLOCK
qcow2_shrink_l1_table(BlockDriverState *bs, uint64_t max_size);
int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index);
int GRAPH_RDLOCK qcow2_write_l1_entry(BlockDriverState *bs, int l1_index);
int qcow2_encrypt_sectors(BDRVQcow2State *s, int64_t sector_num,
uint8_t *buf, int nb_sectors, bool enc, Error **errp);
int qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset,
QCow2SubclusterType *subcluster_type);
int coroutine_fn qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes,
uint64_t *host_offset, QCowL2Meta **m);
int GRAPH_RDLOCK
qcow2_get_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset,
QCow2SubclusterType *subcluster_type);
int coroutine_fn GRAPH_RDLOCK
qcow2_alloc_host_offset(BlockDriverState *bs, uint64_t offset,
unsigned int *bytes, uint64_t *host_offset,
QCowL2Meta **m);
int coroutine_fn GRAPH_RDLOCK
qcow2_alloc_compressed_cluster_offset(BlockDriverState *bs, uint64_t offset,
int compressed_size, uint64_t *host_offset);
@ -927,26 +945,33 @@ void qcow2_parse_compressed_l2_entry(BlockDriverState *bs, uint64_t l2_entry,
int coroutine_fn GRAPH_RDLOCK
qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m);
void coroutine_fn qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
int qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset,
uint64_t bytes, enum qcow2_discard_type type,
bool full_discard);
void coroutine_fn GRAPH_RDLOCK
qcow2_alloc_cluster_abort(BlockDriverState *bs, QCowL2Meta *m);
int GRAPH_RDLOCK
qcow2_cluster_discard(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
enum qcow2_discard_type type, bool full_discard);
int coroutine_fn GRAPH_RDLOCK
qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
int flags);
int qcow2_expand_zero_clusters(BlockDriverState *bs,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque);
int GRAPH_RDLOCK
qcow2_expand_zero_clusters(BlockDriverState *bs,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque);
/* qcow2-snapshot.c functions */
int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id);
int qcow2_snapshot_delete(BlockDriverState *bs,
const char *snapshot_id,
const char *name,
Error **errp);
int GRAPH_RDLOCK
qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
int GRAPH_RDLOCK
qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id);
int GRAPH_RDLOCK
qcow2_snapshot_delete(BlockDriverState *bs, const char *snapshot_id,
const char *name, Error **errp);
int qcow2_snapshot_list(BlockDriverState *bs, QEMUSnapshotInfo **psn_tab);
int qcow2_snapshot_load_tmp(BlockDriverState *bs,
const char *snapshot_id,
@ -956,15 +981,15 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs,
void qcow2_free_snapshots(BlockDriverState *bs);
int coroutine_fn GRAPH_RDLOCK
qcow2_read_snapshots(BlockDriverState *bs, Error **errp);
int qcow2_write_snapshots(BlockDriverState *bs);
int GRAPH_RDLOCK qcow2_write_snapshots(BlockDriverState *bs);
int coroutine_fn GRAPH_RDLOCK
qcow2_check_read_snapshot_table(BlockDriverState *bs, BdrvCheckResult *result,
BdrvCheckMode fix);
int coroutine_fn qcow2_check_fix_snapshot_table(BlockDriverState *bs,
BdrvCheckResult *result,
BdrvCheckMode fix);
int coroutine_fn GRAPH_RDLOCK
qcow2_check_fix_snapshot_table(BlockDriverState *bs, BdrvCheckResult *result,
BdrvCheckMode fix);
/* qcow2-cache.c functions */
Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
@ -972,19 +997,23 @@ Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
int qcow2_cache_destroy(Qcow2Cache *c);
void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table);
int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c);
int qcow2_cache_write(BlockDriverState *bs, Qcow2Cache *c);
int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
Qcow2Cache *dependency);
int GRAPH_RDLOCK qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c);
int GRAPH_RDLOCK qcow2_cache_write(BlockDriverState *bs, Qcow2Cache *c);
int GRAPH_RDLOCK qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
Qcow2Cache *dependency);
void qcow2_cache_depends_on_flush(Qcow2Cache *c);
void qcow2_cache_clean_unused(Qcow2Cache *c);
int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c);
int GRAPH_RDLOCK qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c);
int GRAPH_RDLOCK
qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
void **table);
int GRAPH_RDLOCK
qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
void **table);
int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
void **table);
int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
void **table);
void qcow2_cache_put(Qcow2Cache *c, void **table);
void *qcow2_cache_is_table_offset(Qcow2Cache *c, uint64_t offset);
void qcow2_cache_discard(Qcow2Cache *c, void *table);
@ -998,18 +1027,22 @@ bool coroutine_fn GRAPH_RDLOCK
qcow2_load_dirty_bitmaps(BlockDriverState *bs, bool *header_updated, Error **errp);
bool qcow2_get_bitmap_info_list(BlockDriverState *bs,
Qcow2BitmapInfoList **info_list, Error **errp);
int qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);
int GRAPH_RDLOCK qcow2_reopen_bitmaps_rw(BlockDriverState *bs, Error **errp);
int GRAPH_RDLOCK qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp);
int coroutine_fn qcow2_truncate_bitmaps_check(BlockDriverState *bs, Error **errp);
bool qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs,
bool release_stored, Error **errp);
int qcow2_reopen_bitmaps_ro(BlockDriverState *bs, Error **errp);
bool coroutine_fn qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs,
const char *name,
uint32_t granularity,
Error **errp);
int coroutine_fn qcow2_co_remove_persistent_dirty_bitmap(BlockDriverState *bs,
const char *name,
Error **errp);
bool GRAPH_RDLOCK
qcow2_store_persistent_dirty_bitmaps(BlockDriverState *bs, bool release_stored,
Error **errp);
bool coroutine_fn GRAPH_RDLOCK
qcow2_co_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
uint32_t granularity, Error **errp);
int coroutine_fn GRAPH_RDLOCK
qcow2_co_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name,
Error **errp);
bool qcow2_supports_persistent_dirty_bitmap(BlockDriverState *bs);
uint64_t qcow2_get_persistent_dirty_bitmap_size(BlockDriverState *bs,
uint32_t cluster_size);

View file

@ -206,7 +206,7 @@ static void quorum_report_bad(QuorumOpType type, uint64_t offset,
end_sector - start_sector);
}
static void quorum_report_failure(QuorumAIOCB *acb)
static void GRAPH_RDLOCK quorum_report_failure(QuorumAIOCB *acb)
{
const char *reference = bdrv_get_device_or_node_name(acb->bs);
int64_t start_sector = acb->offset / BDRV_SECTOR_SIZE;
@ -219,7 +219,7 @@ static void quorum_report_failure(QuorumAIOCB *acb)
static int quorum_vote_error(QuorumAIOCB *acb);
static bool quorum_has_too_much_io_failed(QuorumAIOCB *acb)
static bool GRAPH_RDLOCK quorum_has_too_much_io_failed(QuorumAIOCB *acb)
{
BDRVQuorumState *s = acb->bs->opaque;

View file

@ -505,7 +505,9 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
BDRV_REQ_ZERO_WRITE;
if (bs->probed && !bdrv_is_read_only(bs)) {
bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(bs->file->bs);
bdrv_graph_rdunlock_main_loop();
fprintf(stderr,
"WARNING: Image format was not specified for '%s' and probing "
"guessed raw.\n"

View file

@ -1168,7 +1168,9 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags,
/* If we are using an rbd snapshot, we must be r/o, otherwise
* leave as-is */
if (s->snap != NULL) {
bdrv_graph_rdlock_main_loop();
r = bdrv_apply_auto_read_only(bs, "rbd snapshots are read-only", errp);
bdrv_graph_rdunlock_main_loop();
if (r < 0) {
goto failed_post_open;
}
@ -1208,6 +1210,8 @@ static int qemu_rbd_reopen_prepare(BDRVReopenState *state,
BDRVRBDState *s = state->bs->opaque;
int ret = 0;
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (s->snap && state->flags & BDRV_O_RDWR) {
error_setg(errp,
"Cannot change node '%s' to r/w when using RBD snapshot",

View file

@ -276,10 +276,10 @@ replication_co_writev(BlockDriverState *bs, int64_t sector_num,
while (remaining_sectors > 0) {
int64_t count;
ret = bdrv_is_allocated_above(top->bs, base->bs, false,
sector_num * BDRV_SECTOR_SIZE,
remaining_sectors * BDRV_SECTOR_SIZE,
&count);
ret = bdrv_co_is_allocated_above(top->bs, base->bs, false,
sector_num * BDRV_SECTOR_SIZE,
remaining_sectors * BDRV_SECTOR_SIZE,
&count);
if (ret < 0) {
goto out1;
}
@ -307,13 +307,16 @@ out:
return ret;
}
static void secondary_do_checkpoint(BlockDriverState *bs, Error **errp)
static void GRAPH_UNLOCKED
secondary_do_checkpoint(BlockDriverState *bs, Error **errp)
{
BDRVReplicationState *s = bs->opaque;
BdrvChild *active_disk = bs->file;
Error *local_err = NULL;
int ret;
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!s->backup_job) {
error_setg(errp, "Backup job was cancelled unexpectedly");
return;
@ -427,7 +430,8 @@ static void backup_job_completed(void *opaque, int ret)
backup_job_cleanup(bs);
}
static bool check_top_bs(BlockDriverState *top_bs, BlockDriverState *bs)
static bool GRAPH_RDLOCK
check_top_bs(BlockDriverState *top_bs, BlockDriverState *bs)
{
BdrvChild *child;
@ -458,6 +462,8 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
Error *local_err = NULL;
BackupPerf perf = { .use_copy_range = true, .max_workers = 1 };
GLOBAL_STATE_CODE();
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
s = bs->opaque;
@ -504,12 +510,15 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
return;
}
bdrv_graph_rdlock_main_loop();
secondary_disk = hidden_disk->bs->backing;
if (!secondary_disk->bs || !bdrv_has_blk(secondary_disk->bs)) {
error_setg(errp, "The secondary disk doesn't have block backend");
bdrv_graph_rdunlock_main_loop();
aio_context_release(aio_context);
return;
}
bdrv_graph_rdunlock_main_loop();
/* verify the length */
active_length = bdrv_getlength(active_disk->bs);
@ -526,13 +535,16 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
/* Must be true, or the bdrv_getlength() calls would have failed */
assert(active_disk->bs->drv && hidden_disk->bs->drv);
bdrv_graph_rdlock_main_loop();
if (!active_disk->bs->drv->bdrv_make_empty ||
!hidden_disk->bs->drv->bdrv_make_empty) {
error_setg(errp,
"Active disk or hidden disk doesn't support make_empty");
aio_context_release(aio_context);
bdrv_graph_rdunlock_main_loop();
return;
}
bdrv_graph_rdunlock_main_loop();
/* reopen the backing file in r/w mode */
reopen_backing_file(bs, true, &local_err);
@ -566,8 +578,6 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
return;
}
bdrv_graph_wrunlock();
/* start backup job now */
error_setg(&s->blocker,
"Block device is in use by internal backup job");
@ -576,6 +586,7 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
if (!top_bs || !bdrv_is_root_node(top_bs) ||
!check_top_bs(top_bs, bs)) {
error_setg(errp, "No top_bs or it is invalid");
bdrv_graph_wrunlock();
reopen_backing_file(bs, false, NULL);
aio_context_release(aio_context);
return;
@ -583,6 +594,8 @@ static void replication_start(ReplicationState *rs, ReplicationMode mode,
bdrv_op_block_all(top_bs, s->blocker);
bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
bdrv_graph_wrunlock();
s->backup_job = backup_job_create(
NULL, s->secondary_disk->bs, s->hidden_disk->bs,
0, MIRROR_SYNC_MODE_NONE, NULL, 0, false, NULL,

View file

@ -155,11 +155,15 @@ bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs,
* back if the given BDS does not support snapshots.
* Return NULL if there is no BDS to (safely) fall back to.
*/
static BdrvChild *bdrv_snapshot_fallback_child(BlockDriverState *bs)
static BdrvChild * GRAPH_RDLOCK
bdrv_snapshot_fallback_child(BlockDriverState *bs)
{
BdrvChild *fallback = bdrv_primary_child(bs);
BdrvChild *child;
GLOBAL_STATE_CODE();
assert_bdrv_graph_readable();
/* We allow fallback only to primary child */
if (!fallback) {
return NULL;
@ -182,8 +186,10 @@ static BdrvChild *bdrv_snapshot_fallback_child(BlockDriverState *bs)
return fallback;
}
static BlockDriverState *bdrv_snapshot_fallback(BlockDriverState *bs)
static BlockDriverState * GRAPH_RDLOCK
bdrv_snapshot_fallback(BlockDriverState *bs)
{
GLOBAL_STATE_CODE();
return child_bs(bdrv_snapshot_fallback_child(bs));
}
@ -254,7 +260,10 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
return ret;
}
bdrv_graph_rdlock_main_loop();
fallback = bdrv_snapshot_fallback_child(bs);
bdrv_graph_rdunlock_main_loop();
if (fallback) {
QDict *options;
QDict *file_options;
@ -302,7 +311,10 @@ int bdrv_snapshot_goto(BlockDriverState *bs,
* respective option (with the qdict_put_str() call above).
* Assert that .bdrv_open() has attached the right BDS as primary child.
*/
bdrv_graph_rdlock_main_loop();
assert(bdrv_primary_bs(bs) == fallback_bs);
bdrv_graph_rdunlock_main_loop();
bdrv_unref(fallback_bs);
return ret;
}
@ -374,10 +386,12 @@ int bdrv_snapshot_delete(BlockDriverState *bs,
int bdrv_snapshot_list(BlockDriverState *bs,
QEMUSnapshotInfo **psn_info)
{
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
BlockDriver *drv = bs->drv;
BlockDriverState *fallback_bs = bdrv_snapshot_fallback(bs);
GLOBAL_STATE_CODE();
if (!drv) {
return -ENOMEDIUM;
}
@ -418,6 +432,7 @@ int bdrv_snapshot_load_tmp(BlockDriverState *bs,
BlockDriver *drv = bs->drv;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!drv) {
error_setg(errp, QERR_DEVICE_HAS_NO_MEDIUM, bdrv_get_device_name(bs));
@ -462,9 +477,9 @@ int bdrv_snapshot_load_tmp_by_id_or_name(BlockDriverState *bs,
}
static int bdrv_all_get_snapshot_devices(bool has_devices, strList *devices,
GList **all_bdrvs,
Error **errp)
static int GRAPH_RDLOCK
bdrv_all_get_snapshot_devices(bool has_devices, strList *devices,
GList **all_bdrvs, Error **errp)
{
g_autoptr(GList) bdrvs = NULL;
@ -496,8 +511,11 @@ static int bdrv_all_get_snapshot_devices(bool has_devices, strList *devices,
}
static bool bdrv_all_snapshots_includes_bs(BlockDriverState *bs)
static bool GRAPH_RDLOCK bdrv_all_snapshots_includes_bs(BlockDriverState *bs)
{
GLOBAL_STATE_CODE();
assert_bdrv_graph_readable();
if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
return false;
}
@ -518,6 +536,7 @@ bool bdrv_all_can_snapshot(bool has_devices, strList *devices,
GList *iterbdrvs;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return false;
@ -554,6 +573,7 @@ int bdrv_all_delete_snapshot(const char *name,
GList *iterbdrvs;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
@ -593,10 +613,15 @@ int bdrv_all_goto_snapshot(const char *name,
{
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
int ret;
GLOBAL_STATE_CODE();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
bdrv_graph_rdlock_main_loop();
ret = bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp);
bdrv_graph_rdunlock_main_loop();
if (ret < 0) {
return -1;
}
@ -605,15 +630,22 @@ int bdrv_all_goto_snapshot(const char *name,
BlockDriverState *bs = iterbdrvs->data;
AioContext *ctx = bdrv_get_aio_context(bs);
int ret = 0;
bool all_snapshots_includes_bs;
aio_context_acquire(ctx);
if (devices || bdrv_all_snapshots_includes_bs(bs)) {
bdrv_graph_rdlock_main_loop();
all_snapshots_includes_bs = bdrv_all_snapshots_includes_bs(bs);
bdrv_graph_rdunlock_main_loop();
if (devices || all_snapshots_includes_bs) {
ret = bdrv_snapshot_goto(bs, name, errp);
}
aio_context_release(ctx);
if (ret < 0) {
bdrv_graph_rdlock_main_loop();
error_prepend(errp, "Could not load snapshot '%s' on '%s': ",
name, bdrv_get_device_or_node_name(bs));
bdrv_graph_rdunlock_main_loop();
return -1;
}
@ -631,6 +663,7 @@ int bdrv_all_has_snapshot(const char *name,
GList *iterbdrvs;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
@ -673,7 +706,9 @@ int bdrv_all_create_snapshot(QEMUSnapshotInfo *sn,
{
g_autoptr(GList) bdrvs = NULL;
GList *iterbdrvs;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return -1;
@ -715,6 +750,7 @@ BlockDriverState *bdrv_all_find_vmstate_bs(const char *vmstate_bs,
GList *iterbdrvs;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bdrv_all_get_snapshot_devices(has_devices, devices, &bdrvs, errp) < 0) {
return NULL;

View file

@ -172,7 +172,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
copy = false;
WITH_GRAPH_RDLOCK_GUARD() {
ret = bdrv_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
ret = bdrv_co_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
if (ret == 1) {
/* Allocated in the top, no need to copy. */
} else if (ret >= 0) {
@ -180,9 +180,9 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
* Copy if allocated in the intermediate images. Limit to the
* known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE).
*/
ret = bdrv_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
s->base_overlay, true,
offset, n, &n);
ret = bdrv_co_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
s->base_overlay, true,
offset, n, &n);
/* Finish early if end of backing file has been reached */
if (ret == 0 && n == 0) {
n = len - offset;

View file

@ -492,9 +492,12 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
}
/* Disable migration when vdi images are used */
bdrv_graph_rdlock_main_loop();
error_setg(&s->migration_blocker, "The vdi format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
bdrv_graph_rdunlock_main_loop();
ret = migrate_add_blocker(s->migration_blocker, errp);
if (ret < 0) {
error_free(s->migration_blocker);

View file

@ -1001,11 +1001,15 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
uint64_t signature;
Error *local_err = NULL;
GLOBAL_STATE_CODE();
ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
if (ret < 0) {
return ret;
}
GRAPH_RDLOCK_GUARD_MAINLOOP();
s->bat = NULL;
s->first_visible_write = true;

View file

@ -410,8 +410,9 @@ uint32_t vhdx_checksum_calc(uint32_t crc, uint8_t *buf, size_t size,
bool vhdx_checksum_is_valid(uint8_t *buf, size_t size, int crc_offset);
int vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed,
Error **errp);
int GRAPH_RDLOCK
vhdx_parse_log(BlockDriverState *bs, BDRVVHDXState *s, bool *flushed,
Error **errp);
int coroutine_fn GRAPH_RDLOCK
vhdx_log_write_and_flush(BlockDriverState *bs, BDRVVHDXState *s,

View file

@ -578,8 +578,8 @@ static int vmdk_add_extent(BlockDriverState *bs,
return 0;
}
static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent,
Error **errp)
static int GRAPH_RDLOCK
vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent, Error **errp)
{
int ret;
size_t l1_size;
@ -641,9 +641,9 @@ static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent,
return ret;
}
static int vmdk_open_vmfs_sparse(BlockDriverState *bs,
BdrvChild *file,
int flags, Error **errp)
static int GRAPH_RDLOCK
vmdk_open_vmfs_sparse(BlockDriverState *bs, BdrvChild *file, int flags,
Error **errp)
{
int ret;
uint32_t magic;
@ -797,9 +797,9 @@ static int check_se_sparse_volatile_header(VMDKSESparseVolatileHeader *header,
return 0;
}
static int vmdk_open_se_sparse(BlockDriverState *bs,
BdrvChild *file,
int flags, Error **errp)
static int GRAPH_RDLOCK
vmdk_open_se_sparse(BlockDriverState *bs, BdrvChild *file, int flags,
Error **errp)
{
int ret;
VMDKSESparseConstHeader const_header;
@ -913,9 +913,9 @@ static char *vmdk_read_desc(BdrvChild *file, uint64_t desc_offset, Error **errp)
return buf;
}
static int vmdk_open_vmdk4(BlockDriverState *bs,
BdrvChild *file,
int flags, QDict *options, Error **errp)
static int GRAPH_RDLOCK
vmdk_open_vmdk4(BlockDriverState *bs, BdrvChild *file, int flags,
QDict *options, Error **errp)
{
int ret;
uint32_t magic;
@ -1095,8 +1095,9 @@ static int vmdk_parse_description(const char *desc, const char *opt_name,
}
/* Open an extent file and append to bs array */
static int vmdk_open_sparse(BlockDriverState *bs, BdrvChild *file, int flags,
char *buf, QDict *options, Error **errp)
static int GRAPH_RDLOCK
vmdk_open_sparse(BlockDriverState *bs, BdrvChild *file, int flags,
char *buf, QDict *options, Error **errp)
{
uint32_t magic;
@ -1123,8 +1124,9 @@ static const char *next_line(const char *s)
return s;
}
static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
QDict *options, Error **errp)
static int GRAPH_RDLOCK
vmdk_parse_extents(const char *desc, BlockDriverState *bs, QDict *options,
Error **errp)
{
int ret;
int matches;
@ -1143,6 +1145,8 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
char extent_opt_prefix[32];
Error *local_err = NULL;
GLOBAL_STATE_CODE();
for (p = desc; *p; p = next_line(p)) {
/* parse extent line in one of below formats:
*
@ -1223,9 +1227,11 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
ret = vmdk_add_extent(bs, extent_file, true, sectors,
0, 0, 0, 0, 0, &extent, errp);
if (ret < 0) {
bdrv_graph_rdunlock_main_loop();
bdrv_graph_wrlock(NULL);
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock();
bdrv_graph_rdlock_main_loop();
goto out;
}
extent->flat_start_offset = flat_offset << 9;
@ -1240,26 +1246,32 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
}
g_free(buf);
if (ret) {
bdrv_graph_rdunlock_main_loop();
bdrv_graph_wrlock(NULL);
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock();
bdrv_graph_rdlock_main_loop();
goto out;
}
extent = &s->extents[s->num_extents - 1];
} else if (!strcmp(type, "SESPARSE")) {
ret = vmdk_open_se_sparse(bs, extent_file, bs->open_flags, errp);
if (ret) {
bdrv_graph_rdunlock_main_loop();
bdrv_graph_wrlock(NULL);
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock();
bdrv_graph_rdlock_main_loop();
goto out;
}
extent = &s->extents[s->num_extents - 1];
} else {
error_setg(errp, "Unsupported extent type '%s'", type);
bdrv_graph_rdunlock_main_loop();
bdrv_graph_wrlock(NULL);
bdrv_unref_child(bs, extent_file);
bdrv_graph_wrunlock();
bdrv_graph_rdlock_main_loop();
ret = -ENOTSUP;
goto out;
}
@ -1283,8 +1295,9 @@ out:
return ret;
}
static int vmdk_open_desc_file(BlockDriverState *bs, int flags, char *buf,
QDict *options, Error **errp)
static int GRAPH_RDLOCK
vmdk_open_desc_file(BlockDriverState *bs, int flags, char *buf, QDict *options,
Error **errp)
{
int ret;
char ct[128];
@ -2900,7 +2913,7 @@ static int vmdk_has_zero_init(BlockDriverState *bs)
return 1;
}
static VmdkExtentInfo *vmdk_get_extent_info(VmdkExtent *extent)
static VmdkExtentInfo * GRAPH_RDLOCK vmdk_get_extent_info(VmdkExtent *extent)
{
VmdkExtentInfo *info = g_new0(VmdkExtentInfo, 1);
@ -2977,8 +2990,8 @@ vmdk_co_check(BlockDriverState *bs, BdrvCheckResult *result, BdrvCheckMode fix)
return ret;
}
static ImageInfoSpecific *vmdk_get_specific_info(BlockDriverState *bs,
Error **errp)
static ImageInfoSpecific * GRAPH_RDLOCK
vmdk_get_specific_info(BlockDriverState *bs, Error **errp)
{
int i;
BDRVVmdkState *s = bs->opaque;

View file

@ -446,9 +446,12 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
}
/* Disable migration when VHD images are used */
bdrv_graph_rdlock_main_loop();
error_setg(&s->migration_blocker, "The vpc format used by node '%s' "
"does not support live migration",
bdrv_get_device_or_node_name(bs));
bdrv_graph_rdunlock_main_loop();
ret = migrate_add_blocker(s->migration_blocker, errp);
if (ret < 0) {
error_free(s->migration_blocker);

View file

@ -1144,6 +1144,8 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags,
QemuOpts *opts;
int ret;
GRAPH_RDLOCK_GUARD_MAINLOOP();
#ifdef DEBUG
vvv = s;
#endif
@ -1480,8 +1482,8 @@ vvfat_read(BlockDriverState *bs, int64_t sector_num, uint8_t *buf, int nb_sector
if (s->qcow) {
int64_t n;
int ret;
ret = bdrv_is_allocated(s->qcow->bs, sector_num * BDRV_SECTOR_SIZE,
(nb_sectors - i) * BDRV_SECTOR_SIZE, &n);
ret = bdrv_co_is_allocated(s->qcow->bs, sector_num * BDRV_SECTOR_SIZE,
(nb_sectors - i) * BDRV_SECTOR_SIZE, &n);
if (ret < 0) {
return ret;
}
@ -1806,10 +1808,10 @@ cluster_was_modified(BDRVVVFATState *s, uint32_t cluster_num)
}
for (i = 0; !was_modified && i < s->sectors_per_cluster; i++) {
was_modified = bdrv_is_allocated(s->qcow->bs,
(cluster2sector(s, cluster_num) +
i) * BDRV_SECTOR_SIZE,
BDRV_SECTOR_SIZE, NULL);
was_modified = bdrv_co_is_allocated(s->qcow->bs,
(cluster2sector(s, cluster_num) +
i) * BDRV_SECTOR_SIZE,
BDRV_SECTOR_SIZE, NULL);
}
/*
@ -1967,9 +1969,9 @@ get_cluster_count_for_direntry(BDRVVVFATState* s, direntry_t* direntry, const ch
for (i = 0; i < s->sectors_per_cluster; i++) {
int res;
res = bdrv_is_allocated(s->qcow->bs,
(offs + i) * BDRV_SECTOR_SIZE,
BDRV_SECTOR_SIZE, NULL);
res = bdrv_co_is_allocated(s->qcow->bs,
(offs + i) * BDRV_SECTOR_SIZE,
BDRV_SECTOR_SIZE, NULL);
if (res < 0) {
return -1;
}

View file

@ -1041,6 +1041,8 @@ static BlockDriverState *qmp_get_root_bs(const char *name, Error **errp)
BlockDriverState *bs;
AioContext *aio_context;
GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = bdrv_lookup_bs(name, name, errp);
if (bs == NULL) {
return NULL;
@ -1136,6 +1138,9 @@ SnapshotInfo *qmp_blockdev_snapshot_delete_internal_sync(const char *device,
SnapshotInfo *info = NULL;
int ret;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = qmp_get_root_bs(device, errp);
if (!bs) {
return NULL;
@ -1221,6 +1226,9 @@ static void internal_snapshot_action(BlockdevSnapshotInternal *internal,
AioContext *aio_context;
int ret1;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
tran_add(tran, &internal_snapshot_drv, state);
device = internal->device;
@ -1309,6 +1317,9 @@ static void internal_snapshot_abort(void *opaque)
AioContext *aio_context;
Error *local_error = NULL;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (!state->created) {
return;
}
@ -1654,6 +1665,8 @@ static void drive_backup_action(DriveBackup *backup,
bool set_backing_hd = false;
int ret;
GLOBAL_STATE_CODE();
tran_add(tran, &drive_backup_drv, state);
if (!backup->has_mode) {
@ -1683,9 +1696,12 @@ static void drive_backup_action(DriveBackup *backup,
}
/* Early check to avoid creating target */
bdrv_graph_rdlock_main_loop();
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
bdrv_graph_rdunlock_main_loop();
goto out;
}
bdrv_graph_rdunlock_main_loop();
flags = bs->open_flags | BDRV_O_RDWR;
@ -1724,7 +1740,10 @@ static void drive_backup_action(DriveBackup *backup,
BlockDriverState *explicit_backing =
bdrv_skip_implicit_filters(source);
bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(explicit_backing);
bdrv_graph_rdunlock_main_loop();
bdrv_img_create(backup->target, format,
explicit_backing->filename,
explicit_backing->drv->format_name, NULL,
@ -2344,10 +2363,13 @@ void coroutine_fn qmp_block_resize(const char *device, const char *node_name,
return;
}
bdrv_graph_co_rdlock();
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_RESIZE, NULL)) {
error_setg(errp, QERR_DEVICE_IN_USE, device);
bdrv_graph_co_rdunlock();
return;
}
bdrv_graph_co_rdunlock();
blk = blk_co_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL, errp);
if (!blk) {
@ -2387,6 +2409,8 @@ void qmp_block_stream(const char *job_id, const char *device,
Error *local_err = NULL;
int job_flags = JOB_DEFAULT;
GLOBAL_STATE_CODE();
if (base && base_node) {
error_setg(errp, "'base' and 'base-node' cannot be specified "
"at the same time");
@ -2437,7 +2461,10 @@ void qmp_block_stream(const char *job_id, const char *device,
goto out;
}
assert(bdrv_get_aio_context(base_bs) == aio_context);
bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(base_bs);
bdrv_graph_rdunlock_main_loop();
}
if (bottom) {
@ -2466,13 +2493,16 @@ void qmp_block_stream(const char *job_id, const char *device,
* Check for op blockers in the whole chain between bs and base (or bottom)
*/
iter_end = bottom ? bdrv_filter_or_cow_bs(bottom_bs) : base_bs;
bdrv_graph_rdlock_main_loop();
for (iter = bs; iter && iter != iter_end;
iter = bdrv_filter_or_cow_bs(iter))
{
if (bdrv_op_is_blocked(iter, BLOCK_OP_TYPE_STREAM, errp)) {
bdrv_graph_rdunlock_main_loop();
goto out;
}
}
bdrv_graph_rdunlock_main_loop();
/* if we are streaming the entire chain, the result will have no backing
* file, and specifying one is therefore an error */
@ -2835,6 +2865,8 @@ BlockDeviceInfoList *qmp_query_named_block_nodes(bool has_flat,
XDbgBlockGraph *qmp_x_debug_query_block_graph(Error **errp)
{
GRAPH_RDLOCK_GUARD_MAINLOOP();
return bdrv_get_xdbg_block_graph(errp);
}
@ -2998,9 +3030,12 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
}
/* Early check to avoid creating target */
bdrv_graph_rdlock_main_loop();
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_MIRROR_SOURCE, errp)) {
bdrv_graph_rdunlock_main_loop();
return;
}
bdrv_graph_rdunlock_main_loop();
aio_context = bdrv_get_aio_context(bs);
aio_context_acquire(aio_context);
@ -3063,7 +3098,10 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp)
break;
case NEW_IMAGE_MODE_ABSOLUTE_PATHS:
/* create new image with backing file */
bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(explicit_backing);
bdrv_graph_rdunlock_main_loop();
bdrv_img_create(arg->target, format,
explicit_backing->filename,
explicit_backing->drv->format_name,
@ -3383,9 +3421,12 @@ void qmp_change_backing_file(const char *device,
/* even though we are not necessarily operating on bs, we need it to
* determine if block ops are currently prohibited on the chain */
bdrv_graph_rdlock_main_loop();
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_CHANGE, errp)) {
bdrv_graph_rdunlock_main_loop();
goto out;
}
bdrv_graph_rdunlock_main_loop();
/* final sanity check */
if (!bdrv_chain_contains(bs, image_bs)) {
@ -3509,6 +3550,7 @@ void qmp_blockdev_del(const char *node_name, Error **errp)
BlockDriverState *bs;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = bdrv_find_node(node_name);
if (!bs) {
@ -3636,6 +3678,8 @@ void qmp_x_blockdev_set_iothread(const char *node_name, StrOrNull *iothread,
AioContext *new_context;
BlockDriverState *bs;
GRAPH_RDLOCK_GUARD_MAINLOOP();
bs = bdrv_find_node(node_name);
if (!bs) {
error_setg(errp, "Failed to find node with node-name='%s'", node_name);

View file

@ -485,6 +485,7 @@ void *block_job_create(const char *job_id, const BlockJobDriver *driver,
BlockJob *job;
int ret;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (job_id == NULL && !(flags & JOB_INTERNAL)) {
job_id = bdrv_get_device_name(bs);

View file

@ -66,13 +66,16 @@
* function. The coroutine yields after scheduling the BH and is reentered when
* the wrapped function returns.
*
* A no_co_wrapper_bdrv_wrlock function is a no_co_wrapper function that
* automatically takes the graph wrlock when calling the wrapped function.
* A no_co_wrapper_bdrv_rdlock function is a no_co_wrapper function that
* automatically takes the graph rdlock when calling the wrapped function. In
* the same way, no_co_wrapper_bdrv_wrlock functions automatically take the
* graph wrlock.
*
* If the first parameter of the function is a BlockDriverState, BdrvChild or
* BlockBackend pointer, the AioContext lock for it is taken in the wrapper.
*/
#define no_co_wrapper
#define no_co_wrapper_bdrv_rdlock
#define no_co_wrapper_bdrv_wrlock
#include "block/blockjob.h"

View file

@ -132,13 +132,13 @@ int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
Error **errp);
BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
const char *backing_file);
void bdrv_refresh_filename(BlockDriverState *bs);
void GRAPH_RDLOCK bdrv_refresh_filename(BlockDriverState *bs);
void GRAPH_RDLOCK
bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp);
int bdrv_commit(BlockDriverState *bs);
int bdrv_make_empty(BdrvChild *c, Error **errp);
int GRAPH_RDLOCK bdrv_make_empty(BdrvChild *c, Error **errp);
int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file,
const char *backing_fmt, bool warn);
void bdrv_register(BlockDriver *bdrv);
@ -160,19 +160,20 @@ void bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base);
*/
typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset,
int64_t total_work_size, void *opaque);
int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts,
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
bool force,
Error **errp);
int GRAPH_RDLOCK
bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts,
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
bool force, Error **errp);
/* check if a named node can be replaced when doing drive-mirror */
BlockDriverState * GRAPH_RDLOCK
check_to_replace_node(BlockDriverState *parent_bs, const char *node_name,
Error **errp);
int no_coroutine_fn bdrv_activate(BlockDriverState *bs, Error **errp);
int no_coroutine_fn GRAPH_RDLOCK
bdrv_activate(BlockDriverState *bs, Error **errp);
int coroutine_fn no_co_wrapper
int coroutine_fn no_co_wrapper_bdrv_rdlock
bdrv_co_activate(BlockDriverState *bs, Error **errp);
void bdrv_activate_all(Error **errp);
@ -191,7 +192,7 @@ int bdrv_has_zero_init_1(BlockDriverState *bs);
int bdrv_has_zero_init(BlockDriverState *bs);
BlockDriverState *bdrv_find_node(const char *node_name);
BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, Error **errp);
XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp);
XDbgBlockGraph * GRAPH_RDLOCK bdrv_get_xdbg_block_graph(Error **errp);
BlockDriverState *bdrv_lookup_bs(const char *device,
const char *node_name,
Error **errp);
@ -208,15 +209,18 @@ typedef struct BdrvNextIterator {
BlockDriverState *bs;
} BdrvNextIterator;
BlockDriverState *bdrv_first(BdrvNextIterator *it);
BlockDriverState *bdrv_next(BdrvNextIterator *it);
BlockDriverState * GRAPH_RDLOCK bdrv_first(BdrvNextIterator *it);
BlockDriverState * GRAPH_RDLOCK bdrv_next(BdrvNextIterator *it);
void bdrv_next_cleanup(BdrvNextIterator *it);
BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
void *opaque, bool read_only);
char *bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp);
char *bdrv_dirname(BlockDriverState *bs, Error **errp);
char * GRAPH_RDLOCK
bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp);
char * GRAPH_RDLOCK bdrv_dirname(BlockDriverState *bs, Error **errp);
void bdrv_img_create(const char *filename, const char *fmt,
const char *base_filename, const char *base_fmt,
@ -242,7 +246,9 @@ bdrv_attach_child(BlockDriverState *parent_bs,
BdrvChildRole child_role,
Error **errp);
bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
bool GRAPH_RDLOCK
bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason);
void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason);
void bdrv_op_block_all(BlockDriverState *bs, Error *reason);

View file

@ -127,37 +127,46 @@ int coroutine_fn GRAPH_RDLOCK bdrv_co_zone_append(BlockDriverState *bs,
BdrvRequestFlags flags);
bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
int bdrv_block_status(BlockDriverState *bs, int64_t offset,
int64_t bytes, int64_t *pnum, int64_t *map,
BlockDriverState **file);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file);
int co_wrapper_mixed_bdrv_rdlock
bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum, int64_t *map, BlockDriverState **file);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_block_status_above(BlockDriverState *bs, BlockDriverState *base,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file);
int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file);
int co_wrapper_mixed_bdrv_rdlock
bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
int64_t offset, int64_t bytes, int64_t *pnum,
int64_t *map, BlockDriverState **file);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum);
int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
int64_t *pnum);
int co_wrapper_mixed_bdrv_rdlock
bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
int64_t bytes, int64_t *pnum);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
bool include_base, int64_t offset, int64_t bytes,
int64_t *pnum);
int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
bool include_base, int64_t offset, int64_t bytes,
int64_t *pnum);
int co_wrapper_mixed_bdrv_rdlock
bdrv_is_allocated_above(BlockDriverState *bs, BlockDriverState *base,
bool include_base, int64_t offset,
int64_t bytes, int64_t *pnum);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset, int64_t bytes);
int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
Error **errp);
int GRAPH_RDLOCK
bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
Error **errp);
bool bdrv_is_read_only(BlockDriverState *bs);
bool bdrv_is_writable(BlockDriverState *bs);
bool bdrv_is_sg(BlockDriverState *bs);
@ -176,8 +185,12 @@ const char *bdrv_get_format_name(BlockDriverState *bs);
bool bdrv_supports_compressed_writes(BlockDriverState *bs);
const char *bdrv_get_node_name(const BlockDriverState *bs);
const char *bdrv_get_device_name(const BlockDriverState *bs);
const char *bdrv_get_device_or_node_name(const BlockDriverState *bs);
const char * GRAPH_RDLOCK
bdrv_get_device_name(const BlockDriverState *bs);
const char * GRAPH_RDLOCK
bdrv_get_device_or_node_name(const BlockDriverState *bs);
int coroutine_fn GRAPH_RDLOCK
bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
@ -185,8 +198,9 @@ bdrv_co_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
int co_wrapper_mixed_bdrv_rdlock
bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
Error **errp);
ImageInfoSpecific * GRAPH_RDLOCK
bdrv_get_specific_info(BlockDriverState *bs, Error **errp);
BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs);
void bdrv_round_to_subclusters(BlockDriverState *bs,
int64_t offset, int64_t bytes,
@ -363,7 +377,7 @@ bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
*
* Begin a quiesced section for the parent of @c.
*/
void bdrv_parent_drained_begin_single(BdrvChild *c);
void GRAPH_RDLOCK bdrv_parent_drained_begin_single(BdrvChild *c);
/**
* bdrv_parent_drained_poll_single:
@ -371,14 +385,14 @@ void bdrv_parent_drained_begin_single(BdrvChild *c);
* Returns true if there is any pending activity to cease before @c can be
* called quiesced, false otherwise.
*/
bool bdrv_parent_drained_poll_single(BdrvChild *c);
bool GRAPH_RDLOCK bdrv_parent_drained_poll_single(BdrvChild *c);
/**
* bdrv_parent_drained_end_single:
*
* End a quiesced section for the parent of @c.
*/
void bdrv_parent_drained_end_single(BdrvChild *c);
void GRAPH_RDLOCK bdrv_parent_drained_end_single(BdrvChild *c);
/**
* bdrv_drain_poll:
@ -391,8 +405,9 @@ void bdrv_parent_drained_end_single(BdrvChild *c);
*
* This is part of bdrv_drained_begin.
*/
bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
bool ignore_bds_parents);
bool GRAPH_RDLOCK
bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
bool ignore_bds_parents);
/**
* bdrv_drained_begin:
@ -400,6 +415,12 @@ bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
* Begin a quiesced section for exclusive access to the BDS, by disabling
* external request sources including NBD server, block jobs, and device model.
*
* This function can only be invoked by the main loop or a coroutine
* (regardless of the AioContext where it is running).
* If the coroutine is running in an Iothread AioContext, this function will
* just schedule a BH to run in the main loop.
* However, it cannot be directly called by an Iothread.
*
* This function can be recursive.
*/
void bdrv_drained_begin(BlockDriverState *bs);
@ -416,6 +437,12 @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent);
* bdrv_drained_end:
*
* End a quiescent section started by bdrv_drained_begin().
*
* This function can only be invoked by the main loop or a coroutine
* (regardless of the AioContext where it is running).
* If the coroutine is running in an Iothread AioContext, this function will
* just schedule a BH to run in the main loop.
* However, it cannot be directly called by an Iothread.
*/
void bdrv_drained_end(BlockDriverState *bs);

View file

@ -235,11 +235,14 @@ struct BlockDriver {
Error **errp);
/* For handling image reopen for split or non-split files. */
int (*bdrv_reopen_prepare)(BDRVReopenState *reopen_state,
BlockReopenQueue *queue, Error **errp);
void (*bdrv_reopen_commit)(BDRVReopenState *reopen_state);
void (*bdrv_reopen_commit_post)(BDRVReopenState *reopen_state);
void (*bdrv_reopen_abort)(BDRVReopenState *reopen_state);
int GRAPH_UNLOCKED_PTR (*bdrv_reopen_prepare)(
BDRVReopenState *reopen_state, BlockReopenQueue *queue, Error **errp);
void GRAPH_UNLOCKED_PTR (*bdrv_reopen_commit)(
BDRVReopenState *reopen_state);
void GRAPH_UNLOCKED_PTR (*bdrv_reopen_commit_post)(
BDRVReopenState *reopen_state);
void GRAPH_UNLOCKED_PTR (*bdrv_reopen_abort)(
BDRVReopenState *reopen_state);
void (*bdrv_join_options)(QDict *options, QDict *old_options);
int GRAPH_UNLOCKED_PTR (*bdrv_open)(
@ -256,20 +259,18 @@ struct BlockDriver {
int coroutine_fn GRAPH_UNLOCKED_PTR (*bdrv_co_create_opts)(
BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp);
int (*bdrv_amend_options)(BlockDriverState *bs,
QemuOpts *opts,
BlockDriverAmendStatusCB *status_cb,
void *cb_opaque,
bool force,
Error **errp);
int GRAPH_RDLOCK_PTR (*bdrv_amend_options)(
BlockDriverState *bs, QemuOpts *opts,
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
bool force, Error **errp);
int (*bdrv_make_empty)(BlockDriverState *bs);
int GRAPH_RDLOCK_PTR (*bdrv_make_empty)(BlockDriverState *bs);
/*
* Refreshes the bs->exact_filename field. If that is impossible,
* bs->exact_filename has to be left empty.
*/
void (*bdrv_refresh_filename)(BlockDriverState *bs);
void GRAPH_RDLOCK_PTR (*bdrv_refresh_filename)(BlockDriverState *bs);
/*
* Gathers the open options for all children into @target.
@ -292,15 +293,15 @@ struct BlockDriver {
* block driver which implements it is probably doing something
* shady regarding its runtime option structure.
*/
void (*bdrv_gather_child_options)(BlockDriverState *bs, QDict *target,
bool backing_overridden);
void GRAPH_RDLOCK_PTR (*bdrv_gather_child_options)(
BlockDriverState *bs, QDict *target, bool backing_overridden);
/*
* Returns an allocated string which is the directory name of this BDS: It
* will be used to make relative filenames absolute by prepending this
* function's return value to them.
*/
char *(*bdrv_dirname)(BlockDriverState *bs, Error **errp);
char * GRAPH_RDLOCK_PTR (*bdrv_dirname)(BlockDriverState *bs, Error **errp);
/*
* This informs the driver that we are no longer interested in the result
@ -313,14 +314,16 @@ struct BlockDriver {
int GRAPH_RDLOCK_PTR (*bdrv_inactivate)(BlockDriverState *bs);
int (*bdrv_snapshot_create)(BlockDriverState *bs,
QEMUSnapshotInfo *sn_info);
int (*bdrv_snapshot_goto)(BlockDriverState *bs,
const char *snapshot_id);
int (*bdrv_snapshot_delete)(BlockDriverState *bs,
const char *snapshot_id,
const char *name,
Error **errp);
int GRAPH_RDLOCK_PTR (*bdrv_snapshot_create)(
BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
int GRAPH_UNLOCKED_PTR (*bdrv_snapshot_goto)(
BlockDriverState *bs, const char *snapshot_id);
int GRAPH_RDLOCK_PTR (*bdrv_snapshot_delete)(
BlockDriverState *bs, const char *snapshot_id, const char *name,
Error **errp);
int (*bdrv_snapshot_list)(BlockDriverState *bs,
QEMUSnapshotInfo **psn_info);
int (*bdrv_snapshot_load_tmp)(BlockDriverState *bs,
@ -725,8 +728,8 @@ struct BlockDriver {
int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_get_info)(
BlockDriverState *bs, BlockDriverInfo *bdi);
ImageInfoSpecific *(*bdrv_get_specific_info)(BlockDriverState *bs,
Error **errp);
ImageInfoSpecific * GRAPH_RDLOCK_PTR (*bdrv_get_specific_info)(
BlockDriverState *bs, Error **errp);
BlockStatsSpecific *(*bdrv_get_specific_stats)(BlockDriverState *bs);
int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_save_vmstate)(
@ -963,15 +966,15 @@ struct BdrvChildClass {
* Note that this can be nested. If drained_begin() was called twice, new
* I/O is allowed only after drained_end() was called twice, too.
*/
void (*drained_begin)(BdrvChild *child);
void (*drained_end)(BdrvChild *child);
void GRAPH_RDLOCK_PTR (*drained_begin)(BdrvChild *child);
void GRAPH_RDLOCK_PTR (*drained_end)(BdrvChild *child);
/*
* Returns whether the parent has pending requests for the child. This
* callback is polled after .drained_begin() has been called until all
* activity on the child has stopped.
*/
bool (*drained_poll)(BdrvChild *child);
bool GRAPH_RDLOCK_PTR (*drained_poll)(BdrvChild *child);
/*
* Notifies the parent that the filename of its child has changed (e.g.
@ -1039,8 +1042,8 @@ struct BdrvChild {
*/
bool quiesced_parent;
QLIST_ENTRY(BdrvChild) next;
QLIST_ENTRY(BdrvChild) next_parent;
QLIST_ENTRY(BdrvChild GRAPH_RDLOCK_PTR) next;
QLIST_ENTRY(BdrvChild GRAPH_RDLOCK_PTR) next_parent;
};
/*
@ -1173,11 +1176,11 @@ struct BlockDriverState {
* See also comment in include/block/block.h, to learn how backing and file
* are connected with BdrvChildRole.
*/
QLIST_HEAD(, BdrvChild) children;
QLIST_HEAD(, BdrvChild GRAPH_RDLOCK_PTR) children;
BdrvChild *backing;
BdrvChild *file;
QLIST_HEAD(, BdrvChild) parents;
QLIST_HEAD(, BdrvChild GRAPH_RDLOCK_PTR) parents;
QDict *options;
QDict *explicit_options;

View file

@ -99,7 +99,7 @@ BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
*/
void bdrv_wakeup(BlockDriverState *bs);
const char *bdrv_get_parent_name(const BlockDriverState *bs);
const char * GRAPH_RDLOCK bdrv_get_parent_name(const BlockDriverState *bs);
bool blk_dev_has_tray(BlockBackend *blk);
bool blk_dev_is_tray_open(BlockBackend *blk);
@ -133,7 +133,7 @@ bdrv_refresh_total_sectors(BlockDriverState *bs, int64_t hint);
BdrvChild *bdrv_cow_child(BlockDriverState *bs);
BdrvChild *bdrv_filter_child(BlockDriverState *bs);
BdrvChild *bdrv_filter_or_cow_child(BlockDriverState *bs);
BdrvChild *bdrv_primary_child(BlockDriverState *bs);
BdrvChild * GRAPH_RDLOCK bdrv_primary_child(BlockDriverState *bs);
BlockDriverState *bdrv_skip_filters(BlockDriverState *bs);
BlockDriverState *bdrv_backing_chain_next(BlockDriverState *bs);
@ -155,7 +155,8 @@ static inline BlockDriverState *bdrv_filter_or_cow_bs(BlockDriverState *bs)
return child_bs(bdrv_filter_or_cow_child(bs));
}
static inline BlockDriverState *bdrv_primary_bs(BlockDriverState *bs)
static inline BlockDriverState * GRAPH_RDLOCK
bdrv_primary_bs(BlockDriverState *bs)
{
IO_CODE();
return child_bs(bdrv_primary_child(bs));

View file

@ -116,7 +116,8 @@ void unregister_aiocontext(AioContext *ctx);
* This function polls. Callers must not hold the lock of any AioContext other
* than the current one and the one of @bs.
*/
void bdrv_graph_wrlock(BlockDriverState *bs) TSA_ACQUIRE(graph_lock) TSA_NO_TSA;
void no_coroutine_fn TSA_ACQUIRE(graph_lock) TSA_NO_TSA
bdrv_graph_wrlock(BlockDriverState *bs);
/*
* bdrv_graph_wrunlock:

View file

@ -29,18 +29,17 @@
#include "block/snapshot.h"
#include "qapi/qapi-types-block-core.h"
BlockDeviceInfo *bdrv_block_device_info(BlockBackend *blk,
BlockDriverState *bs,
bool flat,
Error **errp);
int bdrv_query_snapshot_info_list(BlockDriverState *bs,
SnapshotInfoList **p_list,
Error **errp);
void bdrv_query_image_info(BlockDriverState *bs,
ImageInfo **p_info,
bool flat,
bool skip_implicit_filters,
Error **errp);
BlockDeviceInfo * GRAPH_RDLOCK
bdrv_block_device_info(BlockBackend *blk, BlockDriverState *bs,
bool flat, Error **errp);
int GRAPH_RDLOCK
bdrv_query_snapshot_info_list(BlockDriverState *bs,
SnapshotInfoList **p_list,
Error **errp);
void GRAPH_RDLOCK
bdrv_query_image_info(BlockDriverState *bs, ImageInfo **p_info, bool flat,
bool skip_implicit_filters, Error **errp);
void GRAPH_RDLOCK
bdrv_query_block_graph_info(BlockDriverState *bs, BlockGraphInfo **p_info,
Error **errp);

View file

@ -25,6 +25,7 @@
#ifndef SNAPSHOT_H
#define SNAPSHOT_H
#include "block/graph-lock.h"
#include "qapi/qapi-builtin-types.h"
#define SNAPSHOT_OPT_BASE "snapshot."
@ -59,16 +60,19 @@ bool bdrv_snapshot_find_by_id_and_name(BlockDriverState *bs,
const char *name,
QEMUSnapshotInfo *sn_info,
Error **errp);
int bdrv_can_snapshot(BlockDriverState *bs);
int bdrv_snapshot_create(BlockDriverState *bs,
QEMUSnapshotInfo *sn_info);
int bdrv_snapshot_goto(BlockDriverState *bs,
const char *snapshot_id,
Error **errp);
int bdrv_snapshot_delete(BlockDriverState *bs,
const char *snapshot_id,
const char *name,
Error **errp);
int GRAPH_RDLOCK bdrv_can_snapshot(BlockDriverState *bs);
int GRAPH_RDLOCK
bdrv_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info);
int GRAPH_UNLOCKED
bdrv_snapshot_goto(BlockDriverState *bs, const char *snapshot_id, Error **errp);
int GRAPH_RDLOCK
bdrv_snapshot_delete(BlockDriverState *bs, const char *snapshot_id,
const char *name, Error **errp);
int bdrv_snapshot_list(BlockDriverState *bs,
QEMUSnapshotInfo **psn_info);
int bdrv_snapshot_load_tmp(BlockDriverState *bs,

View file

@ -59,8 +59,8 @@ BlockBackend *blk_by_public(BlockBackendPublic *public);
void blk_remove_bs(BlockBackend *blk);
int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp);
int blk_replace_bs(BlockBackend *blk, BlockDriverState *new_bs, Error **errp);
bool bdrv_has_blk(BlockDriverState *bs);
bool bdrv_is_root_node(BlockDriverState *bs);
bool GRAPH_RDLOCK bdrv_has_blk(BlockDriverState *bs);
bool GRAPH_RDLOCK bdrv_is_root_node(BlockDriverState *bs);
int GRAPH_UNLOCKED blk_set_perm(BlockBackend *blk, uint64_t perm,
uint64_t shared_perm, Error **errp);
void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm);

View file

@ -388,6 +388,8 @@ static int init_blk_migration(QEMUFile *f)
Error *local_err = NULL;
int ret;
GRAPH_RDLOCK_GUARD_MAINLOOP();
block_mig_state.submitted = 0;
block_mig_state.read_done = 0;
block_mig_state.transferred = 0;

View file

@ -794,6 +794,8 @@ static void vm_completion(ReadLineState *rs, const char *str)
BlockDriverState *bs;
BdrvNextIterator it;
GRAPH_RDLOCK_GUARD_MAINLOOP();
len = strlen(str);
readline_set_completion_index(rs, len);

View file

@ -3165,7 +3165,9 @@ static int get_block_status(BlockDriverState *bs, int64_t offset,
has_offset = !!(ret & BDRV_BLOCK_OFFSET_VALID);
if (file && has_offset) {
bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(file);
bdrv_graph_rdunlock_main_loop();
filename = file->filename;
}
@ -3470,7 +3472,10 @@ static int img_snapshot(int argc, char **argv)
sn.date_sec = rt / G_USEC_PER_SEC;
sn.date_nsec = (rt % G_USEC_PER_SEC) * 1000;
bdrv_graph_rdlock_main_loop();
ret = bdrv_snapshot_create(bs, &sn);
bdrv_graph_rdunlock_main_loop();
if (ret) {
error_report("Could not create snapshot '%s': %s",
snapshot_name, strerror(-ret));
@ -3486,6 +3491,7 @@ static int img_snapshot(int argc, char **argv)
break;
case SNAPSHOT_DELETE:
bdrv_graph_rdlock_main_loop();
ret = bdrv_snapshot_find(bs, &sn, snapshot_name);
if (ret < 0) {
error_report("Could not delete snapshot '%s': snapshot not "
@ -3499,6 +3505,7 @@ static int img_snapshot(int argc, char **argv)
ret = 1;
}
}
bdrv_graph_rdunlock_main_loop();
break;
}
@ -3683,7 +3690,9 @@ static int img_rebase(int argc, char **argv)
qdict_put_bool(options, BDRV_OPT_FORCE_SHARE, true);
}
bdrv_graph_rdlock_main_loop();
bdrv_refresh_filename(bs);
bdrv_graph_rdunlock_main_loop();
overlay_filename = bs->exact_filename[0] ? bs->exact_filename
: bs->filename;
out_real_path =
@ -4120,6 +4129,8 @@ static int print_amend_option_help(const char *format)
{
BlockDriver *drv;
GRAPH_RDLOCK_GUARD_MAINLOOP();
/* Find driver and parse its options */
drv = bdrv_find_format(format);
if (!drv) {
@ -4258,9 +4269,11 @@ static int img_amend(int argc, char **argv)
goto out;
}
bdrv_graph_rdlock_main_loop();
if (!bs->drv->bdrv_amend_options) {
error_report("Format driver '%s' does not support option amendment",
fmt);
bdrv_graph_rdunlock_main_loop();
ret = -1;
goto out;
}
@ -4280,6 +4293,7 @@ static int img_amend(int argc, char **argv)
"This option is only supported for image creation\n");
}
bdrv_graph_rdunlock_main_loop();
error_report_err(err);
ret = -1;
goto out;
@ -4289,6 +4303,8 @@ static int img_amend(int argc, char **argv)
qemu_progress_print(0.f, 0);
ret = bdrv_amend_options(bs, opts, &amend_status_cb, NULL, force, &err);
qemu_progress_print(100.f, 0);
bdrv_graph_rdunlock_main_loop();
if (ret < 0) {
error_report_err(err);
goto out;

View file

@ -2037,6 +2037,9 @@ static int info_f(BlockBackend *blk, int argc, char **argv)
char s1[64], s2[64];
int ret;
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
if (bs->drv && bs->drv->format_name) {
printf("format name: %s\n", bs->drv->format_name);
}

View file

@ -87,8 +87,9 @@ def __init__(self, wrapper_type: str, return_type: str, name: str,
raise ValueError(f"Invalid no_co function name: {self.name}")
if not self.create_only_co:
raise ValueError(f"no_co function can't be mixed: {self.name}")
if self.graph_rdlock:
raise ValueError(f"no_co function can't be rdlock: {self.name}")
if self.graph_rdlock and self.graph_wrlock:
raise ValueError("function can't be both rdlock and wrlock: "
f"{self.name}")
self.target_name = f'{subsystem}_{subname}'
self.ctx = self.gen_ctx()
@ -256,7 +257,10 @@ def gen_no_co_wrapper(func: FuncDecl) -> str:
graph_lock=''
graph_unlock=''
if func.graph_wrlock:
if func.graph_rdlock:
graph_lock=' bdrv_graph_rdlock_main_loop();'
graph_unlock=' bdrv_graph_rdunlock_main_loop();'
elif func.graph_wrlock:
graph_lock=' bdrv_graph_wrlock(NULL);'
graph_unlock=' bdrv_graph_wrunlock();'

View file

@ -1034,9 +1034,13 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque)
blk_co_unref(blk);
} else {
BdrvChild *c, *next_c;
bdrv_graph_co_rdlock();
QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) {
bdrv_graph_co_rdunlock();
bdrv_co_unref_child(bs, c);
bdrv_graph_co_rdlock();
}
bdrv_graph_co_rdunlock();
}
dbdd->done = true;
@ -1168,7 +1172,7 @@ struct detach_by_parent_data {
};
static struct detach_by_parent_data detach_by_parent_data;
static void detach_indirect_bh(void *opaque)
static void no_coroutine_fn detach_indirect_bh(void *opaque)
{
struct detach_by_parent_data *data = opaque;
@ -1184,18 +1188,19 @@ static void detach_indirect_bh(void *opaque)
bdrv_graph_wrunlock();
}
static void detach_by_parent_aio_cb(void *opaque, int ret)
static void coroutine_mixed_fn detach_by_parent_aio_cb(void *opaque, int ret)
{
struct detach_by_parent_data *data = &detach_by_parent_data;
g_assert_cmpint(ret, ==, 0);
if (data->by_parent_cb) {
bdrv_inc_in_flight(data->child_b->bs);
detach_indirect_bh(data);
aio_bh_schedule_oneshot(qemu_get_current_aio_context(),
detach_indirect_bh, &detach_by_parent_data);
}
}
static void detach_by_driver_cb_drained_begin(BdrvChild *child)
static void GRAPH_RDLOCK detach_by_driver_cb_drained_begin(BdrvChild *child)
{
struct detach_by_parent_data *data = &detach_by_parent_data;
@ -1232,7 +1237,7 @@ static BdrvChildClass detach_by_driver_cb_class;
* state is messed up, but if it is only polled in the single
* BDRV_POLL_WHILE() at the end of the drain, this should work fine.
*/
static void test_detach_indirect(bool by_parent_cb)
static void TSA_NO_TSA test_detach_indirect(bool by_parent_cb)
{
BlockBackend *blk;
BlockDriverState *parent_a, *parent_b, *a, *b, *c;

View file

@ -383,6 +383,9 @@ static void test_sync_op_check(BdrvChild *c)
static void test_sync_op_activate(BdrvChild *c)
{
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
/* Early success: Image is not inactive */
bdrv_activate(c->bs, NULL);
}
@ -468,11 +471,16 @@ static void test_sync_op(const void *opaque)
BlockDriverState *bs;
BdrvChild *c;
GLOBAL_STATE_CODE();
blk = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL);
bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort);
bs->total_sectors = 65536 / BDRV_SECTOR_SIZE;
blk_insert_bs(blk, bs, &error_abort);
bdrv_graph_rdlock_main_loop();
c = QLIST_FIRST(&bs->parents);
bdrv_graph_rdunlock_main_loop();
blk_set_aio_context(blk, ctx, &error_abort);
aio_context_acquire(ctx);