mirror of
https://github.com/torvalds/linux
synced 2024-10-06 19:34:19 +00:00
nvme/io_uring: use helper for polled completions
NVMe is making up issue_flags, which is a no-no in general, and to make matters worse, they are completely the wrong ones. For a pure polled request, which it does check for, we're already inside the ctx->uring_lock when the completions are run off io_do_iopoll(). Hence the correct flag would be '0' rather than IO_URING_F_UNLOCKED. Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
36a005b9c6
commit
1afdb76038
|
@ -423,13 +423,20 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
|
||||||
pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
|
pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* For iopoll, complete it directly.
|
* For iopoll, complete it directly. Note that using the uring_cmd
|
||||||
|
* helper for this is safe only because we check blk_rq_is_poll().
|
||||||
|
* As that returns false if we're NOT on a polled queue, then it's
|
||||||
|
* safe to use the polled completion helper.
|
||||||
|
*
|
||||||
* Otherwise, move the completion to task work.
|
* Otherwise, move the completion to task work.
|
||||||
*/
|
*/
|
||||||
if (blk_rq_is_poll(req))
|
if (blk_rq_is_poll(req)) {
|
||||||
nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED);
|
if (pdu->bio)
|
||||||
else
|
blk_rq_unmap_user(pdu->bio);
|
||||||
|
io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
|
||||||
|
} else {
|
||||||
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
|
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
|
||||||
|
}
|
||||||
|
|
||||||
return RQ_END_IO_FREE;
|
return RQ_END_IO_FREE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,6 +69,17 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Polled completions must ensure they are coming from a poll queue, and
|
||||||
|
* hence are completed inside the usual poll handling loops.
|
||||||
|
*/
|
||||||
|
static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
|
||||||
|
ssize_t ret, ssize_t res2)
|
||||||
|
{
|
||||||
|
lockdep_assert(in_task());
|
||||||
|
io_uring_cmd_done(ioucmd, ret, res2, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
|
/* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
|
||||||
static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
|
static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
|
||||||
void (*task_work_cb)(struct io_uring_cmd *, unsigned))
|
void (*task_work_cb)(struct io_uring_cmd *, unsigned))
|
||||||
|
|
Loading…
Reference in a new issue