io_uring/rsrc: consolidate node caching

We store one pre-allocated rsrc node in ->rsrc_backup_node, merge it
with ->rsrc_node_cache.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/6d5410e51ccd29be7a716be045b51d6b371baef6.1681210788.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2023-04-11 12:06:05 +01:00 committed by Jens Axboe
parent 786788a8cf
commit 528407b1e0
4 changed files with 16 additions and 12 deletions

View file

@ -326,7 +326,6 @@ struct io_ring_ctx {
struct io_restriction restrictions;
/* slow path rsrc auxilary data, used by update/register */
struct io_rsrc_node *rsrc_backup_node;
struct io_mapped_ubuf *dummy_ubuf;
struct io_rsrc_data *file_data;
struct io_rsrc_data *buf_data;

View file

@ -23,6 +23,11 @@ static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
return false;
}
static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache)
{
return !cache->list.next;
}
static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
{
if (cache->list.next) {

View file

@ -2852,8 +2852,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
/* there are no registered resources left, nobody uses it */
if (ctx->rsrc_node)
io_rsrc_node_destroy(ctx, ctx->rsrc_node);
if (ctx->rsrc_backup_node)
io_rsrc_node_destroy(ctx, ctx->rsrc_backup_node);
WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));

View file

@ -230,7 +230,7 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
struct io_rsrc_data *data_to_kill)
__must_hold(&ctx->uring_lock)
{
WARN_ON_ONCE(!ctx->rsrc_backup_node);
WARN_ON_ONCE(io_alloc_cache_empty(&ctx->rsrc_node_cache));
WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node);
if (data_to_kill) {
@ -245,18 +245,20 @@ void io_rsrc_node_switch(struct io_ring_ctx *ctx,
ctx->rsrc_node = NULL;
}
if (!ctx->rsrc_node) {
ctx->rsrc_node = ctx->rsrc_backup_node;
ctx->rsrc_backup_node = NULL;
}
if (!ctx->rsrc_node)
ctx->rsrc_node = io_rsrc_node_alloc(ctx);
}
int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
{
if (ctx->rsrc_backup_node)
return 0;
ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
return ctx->rsrc_backup_node ? 0 : -ENOMEM;
if (io_alloc_cache_empty(&ctx->rsrc_node_cache)) {
struct io_rsrc_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache);
}
return 0;
}
__cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,