From 75d7b3aec13be557f15d099dc612dd658d670d1d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Thu, 16 Jun 2022 10:21:58 +0100 Subject: [PATCH] io_uring: kill REQ_F_COMPLETE_INLINE REQ_F_COMPLETE_INLINE is only needed to delay queueing into the completion list to io_queue_sqe() as __io_req_complete() is inlined and we don't want to bloat the kernel. As now we complete in a more centralised fashion in io_issue_sqe() we can get rid of the flag and queue to the list directly. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/600ba20a9338b8a39b249b23d3d177803613dde4.1655371007.git.asml.silence@gmail.com Reviewed-by: Hao Xu Signed-off-by: Jens Axboe --- io_uring/io_uring.c | 18 +++++++----------- io_uring/io_uring.h | 5 ----- io_uring/io_uring_types.h | 3 --- 3 files changed, 7 insertions(+), 19 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 72640aa55abc..541c109a9273 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -742,10 +742,7 @@ void io_req_complete_post(struct io_kiocb *req) inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags) { - if (issue_flags & IO_URING_F_COMPLETE_DEFER) - req->flags |= REQ_F_COMPLETE_INLINE; - else - io_req_complete_post(req); + io_req_complete_post(req); } void io_req_complete_failed(struct io_kiocb *req, s32 res) @@ -1581,9 +1578,12 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) if (creds) revert_creds(creds); - if (ret == IOU_OK) - __io_req_complete(req, issue_flags); - else if (ret != IOU_ISSUE_SKIP_COMPLETE) + if (ret == IOU_OK) { + if (issue_flags & IO_URING_F_COMPLETE_DEFER) + io_req_add_compl_list(req); + else + io_req_complete_post(req); + } else if (ret != IOU_ISSUE_SKIP_COMPLETE) return ret; /* If the op doesn't have a file, we're not polling for it */ @@ -1749,10 +1749,6 @@ static inline void io_queue_sqe(struct io_kiocb *req) ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); - if (req->flags & REQ_F_COMPLETE_INLINE) { - io_req_add_compl_list(req); - return; - } /* * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 6744ce111e38..3f06fbae0ee9 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -217,11 +217,6 @@ static inline bool io_run_task_work(void) return false; } -static inline void io_req_complete_state(struct io_kiocb *req) -{ - req->flags |= REQ_F_COMPLETE_INLINE; -} - static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) { if (!*locked) { diff --git a/io_uring/io_uring_types.h b/io_uring/io_uring_types.h index ef1cf86e8932..4576ea8cad2e 100644 --- a/io_uring/io_uring_types.h +++ b/io_uring/io_uring_types.h @@ -301,7 +301,6 @@ enum { REQ_F_POLLED_BIT, REQ_F_BUFFER_SELECTED_BIT, REQ_F_BUFFER_RING_BIT, - REQ_F_COMPLETE_INLINE_BIT, REQ_F_REISSUE_BIT, REQ_F_CREDS_BIT, REQ_F_REFCOUNT_BIT, @@ -356,8 +355,6 @@ enum { REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), /* buffer selected from ring, needs commit */ REQ_F_BUFFER_RING = BIT(REQ_F_BUFFER_RING_BIT), - /* completion is deferred through io_comp_state */ - REQ_F_COMPLETE_INLINE = BIT(REQ_F_COMPLETE_INLINE_BIT), /* caller should reissue async */ REQ_F_REISSUE = BIT(REQ_F_REISSUE_BIT), /* supports async reads/writes */