bcachefs: Don't use bch_write_op->cl for delivering completions

We already had op->end_io as an alternative mechanism to op->cl.parent
for delivering write completions; this switches all code paths to using
op->end_io.

Two reasons:
 - op->end_io is more efficient, due to fewer atomic ops, this completes
   the conversion that was originally only done for the direct IO path.
 - We'll be restructing the write path to use a different mechanism for
   punting to process context, refactoring to not use op->cl will make
   that easier.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-10-29 02:47:33 -04:00
parent af17118319
commit 9f311f2166
3 changed files with 28 additions and 45 deletions

View file

@ -65,7 +65,6 @@ struct quota_res {
};
struct bch_writepage_io {
struct closure cl;
struct bch_inode_info *inode;
/* must be last: */
@ -979,18 +978,10 @@ static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs
};
}
static void bch2_writepage_io_free(struct closure *cl)
static void bch2_writepage_io_done(struct bch_write_op *op)
{
struct bch_writepage_io *io = container_of(cl,
struct bch_writepage_io, cl);
bio_put(&io->op.wbio.bio);
}
static void bch2_writepage_io_done(struct closure *cl)
{
struct bch_writepage_io *io = container_of(cl,
struct bch_writepage_io, cl);
struct bch_writepage_io *io =
container_of(op, struct bch_writepage_io, op);
struct bch_fs *c = io->op.c;
struct bio *bio = &io->op.wbio.bio;
struct bvec_iter_all iter;
@ -1054,7 +1045,7 @@ static void bch2_writepage_io_done(struct closure *cl)
end_page_writeback(bvec->bv_page);
}
closure_return_with_destructor(&io->cl, bch2_writepage_io_free);
bio_put(&io->op.wbio.bio);
}
static void bch2_writepage_do_io(struct bch_writepage_state *w)
@ -1064,8 +1055,7 @@ static void bch2_writepage_do_io(struct bch_writepage_state *w)
down(&io->op.c->io_in_flight);
w->io = NULL;
closure_call(&io->op.cl, bch2_write, NULL, &io->cl);
continue_at(&io->cl, bch2_writepage_io_done, NULL);
closure_call(&io->op.cl, bch2_write, NULL, NULL);
}
/*
@ -1087,9 +1077,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
&c->writepage_bioset),
struct bch_writepage_io, op.wbio.bio);
closure_init(&w->io->cl, NULL);
w->io->inode = inode;
op = &w->io->op;
bch2_write_op_init(op, c, w->opts);
op->target = w->opts.foreground_target;
@ -1098,6 +1086,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
op->res.nr_replicas = nr_replicas;
op->write_point = writepoint_hashed(inode->ei_last_dirtied);
op->pos = POS(inode->v.i_ino, sector);
op->end_io = bch2_writepage_io_done;
op->wbio.bio.bi_iter.bi_sector = sector;
op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
}

View file

@ -558,13 +558,9 @@ static void bch2_write_done(struct closure *cl)
bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
if (op->end_io) {
EBUG_ON(cl->parent);
closure_debug_destroy(cl);
op->end_io(op);
} else {
closure_return(cl);
}
EBUG_ON(cl->parent);
closure_debug_destroy(cl);
op->end_io(op);
}
/**
@ -1357,7 +1353,6 @@ void bch2_write(struct closure *cl)
/* Cache promotion on read */
struct promote_op {
struct closure cl;
struct rcu_head rcu;
u64 start_time;
@ -1411,10 +1406,10 @@ static void promote_free(struct bch_fs *c, struct promote_op *op)
kfree_rcu(op, rcu);
}
static void promote_done(struct closure *cl)
static void promote_done(struct bch_write_op *wop)
{
struct promote_op *op =
container_of(cl, struct promote_op, cl);
container_of(wop, struct promote_op, write.op);
struct bch_fs *c = op->write.op.c;
bch2_time_stats_update(&c->times[BCH_TIME_data_promote],
@ -1427,7 +1422,6 @@ static void promote_done(struct closure *cl)
static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
{
struct bch_fs *c = rbio->c;
struct closure *cl = &op->cl;
struct bio *bio = &op->write.op.wbio.bio;
trace_promote(&rbio->bio);
@ -1442,9 +1436,7 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
bch2_migrate_read_done(&op->write, rbio);
closure_init(cl, NULL);
closure_call(&op->write.op.cl, bch2_write, c->btree_update_wq, cl);
closure_return_with_destructor(cl, promote_done);
closure_call(&op->write.op.cl, bch2_write, c->btree_update_wq, NULL);
}
static struct promote_op *__promote_alloc(struct bch_fs *c,
@ -1509,6 +1501,7 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
},
btree_id, k);
BUG_ON(ret);
op->write.op.end_io = promote_done;
return op;
err:

View file

@ -324,9 +324,8 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
return 0;
}
static void move_free(struct closure *cl)
static void move_free(struct moving_io *io)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_context *ctxt = io->write.ctxt;
struct bvec_iter_all iter;
struct bio_vec *bv;
@ -342,28 +341,28 @@ static void move_free(struct closure *cl)
kfree(io);
}
static void move_write_done(struct closure *cl)
static void move_write_done(struct bch_write_op *op)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct moving_io *io = container_of(op, struct moving_io, write.op);
struct moving_context *ctxt = io->write.ctxt;
atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors);
closure_return_with_destructor(cl, move_free);
move_free(io);
closure_put(&ctxt->cl);
}
static void move_write(struct closure *cl)
static void move_write(struct moving_io *io)
{
struct moving_io *io = container_of(cl, struct moving_io, cl);
if (unlikely(io->rbio.bio.bi_status || io->rbio.hole)) {
closure_return_with_destructor(cl, move_free);
move_free(io);
return;
}
bch2_migrate_read_done(&io->write, &io->rbio);
closure_get(&io->write.ctxt->cl);
atomic_add(io->write_sectors, &io->write.ctxt->write_sectors);
closure_call(&io->write.op.cl, bch2_write, NULL, cl);
continue_at(cl, move_write_done, NULL);
bch2_migrate_read_done(&io->write, &io->rbio);
closure_call(&io->write.op.cl, bch2_write, NULL, NULL);
}
static inline struct moving_io *next_pending_write(struct moving_context *ctxt)
@ -394,7 +393,7 @@ static void do_pending_writes(struct moving_context *ctxt)
while ((io = next_pending_write(ctxt))) {
list_del(&io->list);
closure_call(&io->cl, move_write, NULL, &ctxt->cl);
move_write(io);
}
}
@ -480,6 +479,8 @@ static int bch2_move_extent(struct btree_trans *trans,
if (ret)
goto err_free_pages;
io->write.op.end_io = move_write_done;
atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);