bcachefs: bch2_trans_unlock() must always be followed by relock() or begin()

We're about to add new asserts for btree_trans locking consistency, and
part of that requires that aren't using the btree_trans while it's
unlocked.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2024-04-09 23:23:08 -04:00
parent 4984faff5d
commit ca563dccb2
10 changed files with 29 additions and 4 deletions

View file

@ -2172,6 +2172,9 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
u64 now;
int ret = 0;
if (bch2_trans_relock(trans))
bch2_trans_begin(trans);
a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
ret = PTR_ERR_OR_ZERO(a);
if (ret)

View file

@ -1342,6 +1342,10 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
*wp_ret = wp = writepoint_find(trans, write_point.v);
ret = bch2_trans_relock(trans);
if (ret)
goto err;
/* metadata may not allocate on cache devices: */
if (wp->data_type != BCH_DATA_user)
have_cache = true;

View file

@ -729,6 +729,8 @@ transaction_restart: \
#define for_each_btree_key_upto(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _do) \
({ \
bch2_trans_begin(trans); \
\
struct btree_iter _iter; \
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \

View file

@ -737,9 +737,6 @@ static void btree_update_nodes_written(struct btree_update *as)
*/
b = READ_ONCE(as->b);
if (b) {
btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
as->btree_id, b->c.level, b->key.k.p);
struct btree_path *path = trans->paths + path_idx;
/*
* @b is the node we did the final insert into:
*
@ -763,6 +760,10 @@ static void btree_update_nodes_written(struct btree_update *as)
* have here:
*/
bch2_trans_unlock(trans);
bch2_trans_begin(trans);
btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
as->btree_id, b->c.level, b->key.k.p);
struct btree_path *path = trans->paths + path_idx;
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED);
path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);

View file

@ -386,6 +386,8 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
while (bio_sectors(bio)) {
unsigned sectors = bio_sectors(bio);
bch2_trans_begin(trans);
bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
BTREE_ITER_slots);
ret = lockrestart_do(trans, ({

View file

@ -1036,6 +1036,10 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bch2_btree_iter_set_pos(&iter,
POS(iter.pos.inode, iter.pos.offset + sectors));
ret = bch2_trans_relock(trans);
if (ret)
break;
}
start = iter.pos.offset;
bch2_trans_iter_exit(trans, &iter);

View file

@ -1248,6 +1248,10 @@ static void bch2_nocow_write(struct bch_write_op *op)
buckets.nr = 0;
ret = bch2_trans_relock(trans);
if (ret)
break;
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)

View file

@ -158,6 +158,8 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret)))
return ret;
bch2_trans_begin(trans);
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),

View file

@ -323,6 +323,8 @@ static int do_rebalance(struct moving_context *ctxt)
struct bkey_s_c k;
int ret = 0;
bch2_trans_begin(trans);
bch2_move_stats_init(&r->work_stats, "rebalance_work");
bch2_move_stats_init(&r->scan_stats, "rebalance_scan");

View file

@ -202,7 +202,7 @@ int bch2_journal_replay(struct bch_fs *c)
struct journal *j = &c->journal;
u64 start_seq = c->journal_replay_seq_start;
u64 end_seq = c->journal_replay_seq_start;
struct btree_trans *trans = bch2_trans_get(c);
struct btree_trans *trans = NULL;
bool immediate_flush = false;
int ret = 0;
@ -216,6 +216,7 @@ int bch2_journal_replay(struct bch_fs *c)
BUG_ON(!atomic_read(&keys->ref));
move_gap(keys, keys->nr);
trans = bch2_trans_get(c);
/*
* First, attempt to replay keys in sorted order. This is more