diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 2f91ff67453f..f07373b78174 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -2172,6 +2172,9 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, u64 now; int ret = 0; + if (bch2_trans_relock(trans)) + bch2_trans_begin(trans); + a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr)); ret = PTR_ERR_OR_ZERO(a); if (ret) diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index df4439a38df5..fb8825c4e7ad 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -1342,6 +1342,10 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, *wp_ret = wp = writepoint_find(trans, write_point.v); + ret = bch2_trans_relock(trans); + if (ret) + goto err; + /* metadata may not allocate on cache devices: */ if (wp->data_type != BCH_DATA_user) have_cache = true; diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 3bef5b1af16d..0d44d613e4ac 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -729,6 +729,8 @@ transaction_restart: \ #define for_each_btree_key_upto(_trans, _iter, _btree_id, \ _start, _end, _flags, _k, _do) \ ({ \ + bch2_trans_begin(trans); \ + \ struct btree_iter _iter; \ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index 4aa62a74a59b..35f80c97b973 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -737,9 +737,6 @@ static void btree_update_nodes_written(struct btree_update *as) */ b = READ_ONCE(as->b); if (b) { - btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans, - as->btree_id, b->c.level, b->key.k.p); - struct btree_path *path = trans->paths + path_idx; /* * @b is the node we did the final insert into: * @@ -763,6 +760,10 @@ static void btree_update_nodes_written(struct btree_update *as) * have here: */ bch2_trans_unlock(trans); + bch2_trans_begin(trans); + btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans, + as->btree_id, b->c.level, b->key.k.p); + struct btree_path *path = trans->paths + path_idx; btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED); path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index cae4e4eb5329..e8c7c5cec03f 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -386,6 +386,8 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans, while (bio_sectors(bio)) { unsigned sectors = bio_sectors(bio); + bch2_trans_begin(trans); + bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos, BTREE_ITER_slots); ret = lockrestart_do(trans, ({ diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index 81cfc74828fd..841bb92e53df 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -1036,6 +1036,10 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, iter.pos.offset + sectors)); + + ret = bch2_trans_relock(trans); + if (ret) + break; } start = iter.pos.offset; bch2_trans_iter_exit(trans, &iter); diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index 217cb98ed707..73e25250de75 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -1248,6 +1248,10 @@ static void bch2_nocow_write(struct bch_write_op *op) buckets.nr = 0; + ret = bch2_trans_relock(trans); + if (ret) + break; + k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); if (ret) diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index d0afd8bc0193..10bfb31c151b 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -158,6 +158,8 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt, if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret))) return ret; + bch2_trans_begin(trans); + ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru, lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0), lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX), diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index c640c0f13976..cf81e5128c3a 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -323,6 +323,8 @@ static int do_rebalance(struct moving_context *ctxt) struct bkey_s_c k; int ret = 0; + bch2_trans_begin(trans); + bch2_move_stats_init(&r->work_stats, "rebalance_work"); bch2_move_stats_init(&r->scan_stats, "rebalance_scan"); diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index cc603276f917..27ee27b285bd 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -202,7 +202,7 @@ int bch2_journal_replay(struct bch_fs *c) struct journal *j = &c->journal; u64 start_seq = c->journal_replay_seq_start; u64 end_seq = c->journal_replay_seq_start; - struct btree_trans *trans = bch2_trans_get(c); + struct btree_trans *trans = NULL; bool immediate_flush = false; int ret = 0; @@ -216,6 +216,7 @@ int bch2_journal_replay(struct bch_fs *c) BUG_ON(!atomic_read(&keys->ref)); move_gap(keys, keys->nr); + trans = bch2_trans_get(c); /* * First, attempt to replay keys in sorted order. This is more