io_uring: move fdinfo helpers to its own file

This also means moving a bit more of the fixed file handling to the
filetable side, which makes sense separately too.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2022-05-25 10:40:19 -06:00
parent e5550a1447
commit a4ad4f748e
6 changed files with 230 additions and 208 deletions

View File

@ -6,5 +6,5 @@ obj-$(CONFIG_IO_URING) += io_uring.o xattr.o nop.o fs.o splice.o \
sync.o advise.o filetable.o \
openclose.o uring_cmd.o epoll.o \
statx.o net.o msg_ring.o timeout.o \
sqpoll.o
sqpoll.o fdinfo.o
obj-$(CONFIG_IO_WQ) += io-wq.o

191
io_uring/fdinfo.c Normal file
View File

@ -0,0 +1,191 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
#include "io_uring_types.h"
#include "io_uring.h"
#include "sqpoll.h"
#include "fdinfo.h"
#ifdef CONFIG_PROC_FS
static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
const struct cred *cred)
{
struct user_namespace *uns = seq_user_ns(m);
struct group_info *gi;
kernel_cap_t cap;
unsigned __capi;
int g;
seq_printf(m, "%5d\n", id);
seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
seq_puts(m, "\n\tGroups:\t");
gi = cred->group_info;
for (g = 0; g < gi->ngroups; g++) {
seq_put_decimal_ull(m, g ? " " : "",
from_kgid_munged(uns, gi->gid[g]));
}
seq_puts(m, "\n\tCapEff:\t");
cap = cred->cap_effective;
CAP_FOR_EACH_U32(__capi)
seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
seq_putc(m, '\n');
return 0;
}
static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
struct seq_file *m)
{
struct io_sq_data *sq = NULL;
struct io_overflow_cqe *ocqe;
struct io_rings *r = ctx->rings;
unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
unsigned int sq_head = READ_ONCE(r->sq.head);
unsigned int sq_tail = READ_ONCE(r->sq.tail);
unsigned int cq_head = READ_ONCE(r->cq.head);
unsigned int cq_tail = READ_ONCE(r->cq.tail);
unsigned int cq_shift = 0;
unsigned int sq_entries, cq_entries;
bool has_lock;
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
unsigned int i;
if (is_cqe32)
cq_shift = 1;
/*
* we may get imprecise sqe and cqe info if uring is actively running
* since we get cached_sq_head and cached_cq_tail without uring_lock
* and sq_tail and cq_head are changed by userspace. But it's ok since
* we usually use these info when it is stuck.
*/
seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
seq_printf(m, "SqHead:\t%u\n", sq_head);
seq_printf(m, "SqTail:\t%u\n", sq_tail);
seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
seq_printf(m, "CqHead:\t%u\n", cq_head);
seq_printf(m, "CqTail:\t%u\n", cq_tail);
seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head);
sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
for (i = 0; i < sq_entries; i++) {
unsigned int entry = i + sq_head;
unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
struct io_uring_sqe *sqe;
if (sq_idx > sq_mask)
continue;
sqe = &ctx->sq_sqes[sq_idx];
seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n",
sq_idx, sqe->opcode, sqe->fd, sqe->flags,
sqe->user_data);
}
seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
for (i = 0; i < cq_entries; i++) {
unsigned int entry = i + cq_head;
struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
if (!is_cqe32) {
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
entry & cq_mask, cqe->user_data, cqe->res,
cqe->flags);
} else {
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x, "
"extra1:%llu, extra2:%llu\n",
entry & cq_mask, cqe->user_data, cqe->res,
cqe->flags, cqe->big_cqe[0], cqe->big_cqe[1]);
}
}
/*
* Avoid ABBA deadlock between the seq lock and the io_uring mutex,
* since fdinfo case grabs it in the opposite direction of normal use
* cases. If we fail to get the lock, we just don't iterate any
* structures that could be going away outside the io_uring mutex.
*/
has_lock = mutex_trylock(&ctx->uring_lock);
if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
sq = ctx->sq_data;
if (!sq->thread)
sq = NULL;
}
seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
struct file *f = io_file_from_index(&ctx->file_table, i);
if (f)
seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
else
seq_printf(m, "%5u: <none>\n", i);
}
seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
struct io_mapped_ubuf *buf = ctx->user_bufs[i];
unsigned int len = buf->ubuf_end - buf->ubuf;
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
}
if (has_lock && !xa_empty(&ctx->personalities)) {
unsigned long index;
const struct cred *cred;
seq_printf(m, "Personalities:\n");
xa_for_each(&ctx->personalities, index, cred)
io_uring_show_cred(m, index, cred);
}
if (has_lock)
mutex_unlock(&ctx->uring_lock);
seq_puts(m, "PollList:\n");
spin_lock(&ctx->completion_lock);
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
struct hlist_head *list = &ctx->cancel_hash[i];
struct io_kiocb *req;
hlist_for_each_entry(req, list, hash_node)
seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
task_work_pending(req->task));
}
seq_puts(m, "CqOverflowList:\n");
list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
struct io_uring_cqe *cqe = &ocqe->cqe;
seq_printf(m, " user_data=%llu, res=%d, flags=%x\n",
cqe->user_data, cqe->res, cqe->flags);
}
spin_unlock(&ctx->completion_lock);
}
__cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
{
struct io_ring_ctx *ctx = f->private_data;
if (percpu_ref_tryget(&ctx->refs)) {
__io_uring_show_fdinfo(ctx, m);
percpu_ref_put(&ctx->refs);
}
}
#endif

3
io_uring/fdinfo.h Normal file
View File

@ -0,0 +1,3 @@
// SPDX-License-Identifier: GPL-2.0
void io_uring_show_fdinfo(struct seq_file *m, struct file *f);

View File

@ -36,6 +36,8 @@ bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files);
void io_free_file_tables(struct io_file_table *table);
int io_file_bitmap_get(struct io_ring_ctx *ctx);
unsigned int io_file_get_flags(struct file *file);
static inline void io_file_bitmap_clear(struct io_file_table *table, int bit)
{
__clear_bit(bit, table->bitmap);
@ -55,4 +57,21 @@ io_fixed_file_slot(struct io_file_table *table, unsigned i)
return &table->files[i];
}
static inline struct file *io_file_from_index(struct io_file_table *table,
int index)
{
struct io_fixed_file *slot = io_fixed_file_slot(table, index);
return (struct file *) (slot->file_ptr & FFS_MASK);
}
static inline void io_fixed_file_set(struct io_fixed_file *file_slot,
struct file *file)
{
unsigned long file_ptr = (unsigned long) file;
file_ptr |= io_file_get_flags(file);
file_slot->file_ptr = file_ptr;
}
#endif

View File

@ -93,6 +93,7 @@
#include "io_uring.h"
#include "refs.h"
#include "sqpoll.h"
#include "fdinfo.h"
#include "xattr.h"
#include "nop.h"
@ -137,21 +138,8 @@
#define IO_TCTX_REFS_CACHE_NR (1U << 10)
struct io_mapped_ubuf {
u64 ubuf;
u64 ubuf_end;
unsigned int nr_bvecs;
unsigned long acct_pages;
struct bio_vec bvec[];
};
struct io_ring_ctx;
struct io_overflow_cqe {
struct list_head list;
struct io_uring_cqe cqe;
};
struct io_rsrc_put {
struct list_head list;
u64 tag;
@ -2325,7 +2313,7 @@ static bool __io_file_supports_nowait(struct file *file, umode_t mode)
* any file. For now, just ensure that anything potentially problematic is done
* inline.
*/
static unsigned int io_file_get_flags(struct file *file)
unsigned int io_file_get_flags(struct file *file)
{
umode_t mode = file_inode(file)->i_mode;
unsigned int res = 0;
@ -4810,22 +4798,6 @@ static void io_wq_submit_work(struct io_wq_work *work)
io_req_task_queue_fail(req, ret);
}
static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
int index)
{
struct io_fixed_file *slot = io_fixed_file_slot(&ctx->file_table, index);
return (struct file *) (slot->file_ptr & FFS_MASK);
}
static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file)
{
unsigned long file_ptr = (unsigned long) file;
file_ptr |= io_file_get_flags(file);
file_slot->file_ptr = file_ptr;
}
inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
unsigned int issue_flags)
{
@ -5667,7 +5639,7 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
int i;
for (i = 0; i < ctx->nr_user_files; i++) {
struct file *file = io_file_from_index(ctx, i);
struct file *file = io_file_from_index(&ctx->file_table, i);
if (!file)
continue;
@ -7777,182 +7749,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
return ret;
}
#ifdef CONFIG_PROC_FS
static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id,
const struct cred *cred)
{
struct user_namespace *uns = seq_user_ns(m);
struct group_info *gi;
kernel_cap_t cap;
unsigned __capi;
int g;
seq_printf(m, "%5d\n", id);
seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
seq_puts(m, "\n\tGroups:\t");
gi = cred->group_info;
for (g = 0; g < gi->ngroups; g++) {
seq_put_decimal_ull(m, g ? " " : "",
from_kgid_munged(uns, gi->gid[g]));
}
seq_puts(m, "\n\tCapEff:\t");
cap = cred->cap_effective;
CAP_FOR_EACH_U32(__capi)
seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
seq_putc(m, '\n');
return 0;
}
static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
struct seq_file *m)
{
struct io_sq_data *sq = NULL;
struct io_overflow_cqe *ocqe;
struct io_rings *r = ctx->rings;
unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1;
unsigned int sq_head = READ_ONCE(r->sq.head);
unsigned int sq_tail = READ_ONCE(r->sq.tail);
unsigned int cq_head = READ_ONCE(r->cq.head);
unsigned int cq_tail = READ_ONCE(r->cq.tail);
unsigned int cq_shift = 0;
unsigned int sq_entries, cq_entries;
bool has_lock;
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
unsigned int i;
if (is_cqe32)
cq_shift = 1;
/*
* we may get imprecise sqe and cqe info if uring is actively running
* since we get cached_sq_head and cached_cq_tail without uring_lock
* and sq_tail and cq_head are changed by userspace. But it's ok since
* we usually use these info when it is stuck.
*/
seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
seq_printf(m, "SqHead:\t%u\n", sq_head);
seq_printf(m, "SqTail:\t%u\n", sq_tail);
seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
seq_printf(m, "CqMask:\t0x%x\n", cq_mask);
seq_printf(m, "CqHead:\t%u\n", cq_head);
seq_printf(m, "CqTail:\t%u\n", cq_tail);
seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail);
seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head);
sq_entries = min(sq_tail - sq_head, ctx->sq_entries);
for (i = 0; i < sq_entries; i++) {
unsigned int entry = i + sq_head;
unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
struct io_uring_sqe *sqe;
if (sq_idx > sq_mask)
continue;
sqe = &ctx->sq_sqes[sq_idx];
seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n",
sq_idx, sqe->opcode, sqe->fd, sqe->flags,
sqe->user_data);
}
seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head);
cq_entries = min(cq_tail - cq_head, ctx->cq_entries);
for (i = 0; i < cq_entries; i++) {
unsigned int entry = i + cq_head;
struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift];
if (!is_cqe32) {
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n",
entry & cq_mask, cqe->user_data, cqe->res,
cqe->flags);
} else {
seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x, "
"extra1:%llu, extra2:%llu\n",
entry & cq_mask, cqe->user_data, cqe->res,
cqe->flags, cqe->big_cqe[0], cqe->big_cqe[1]);
}
}
/*
* Avoid ABBA deadlock between the seq lock and the io_uring mutex,
* since fdinfo case grabs it in the opposite direction of normal use
* cases. If we fail to get the lock, we just don't iterate any
* structures that could be going away outside the io_uring mutex.
*/
has_lock = mutex_trylock(&ctx->uring_lock);
if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) {
sq = ctx->sq_data;
if (!sq->thread)
sq = NULL;
}
seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
struct file *f = io_file_from_index(ctx, i);
if (f)
seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
else
seq_printf(m, "%5u: <none>\n", i);
}
seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
struct io_mapped_ubuf *buf = ctx->user_bufs[i];
unsigned int len = buf->ubuf_end - buf->ubuf;
seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len);
}
if (has_lock && !xa_empty(&ctx->personalities)) {
unsigned long index;
const struct cred *cred;
seq_printf(m, "Personalities:\n");
xa_for_each(&ctx->personalities, index, cred)
io_uring_show_cred(m, index, cred);
}
if (has_lock)
mutex_unlock(&ctx->uring_lock);
seq_puts(m, "PollList:\n");
spin_lock(&ctx->completion_lock);
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
struct hlist_head *list = &ctx->cancel_hash[i];
struct io_kiocb *req;
hlist_for_each_entry(req, list, hash_node)
seq_printf(m, " op=%d, task_works=%d\n", req->opcode,
task_work_pending(req->task));
}
seq_puts(m, "CqOverflowList:\n");
list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
struct io_uring_cqe *cqe = &ocqe->cqe;
seq_printf(m, " user_data=%llu, res=%d, flags=%x\n",
cqe->user_data, cqe->res, cqe->flags);
}
spin_unlock(&ctx->completion_lock);
}
static __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
{
struct io_ring_ctx *ctx = f->private_data;
if (percpu_ref_tryget(&ctx->refs)) {
__io_uring_show_fdinfo(ctx, m);
percpu_ref_put(&ctx->refs);
}
}
#endif
static const struct file_operations io_uring_fops = {
.release = io_uring_release,
.mmap = io_uring_mmap,

View File

@ -498,4 +498,17 @@ struct io_cancel_data {
int seq;
};
struct io_overflow_cqe {
struct list_head list;
struct io_uring_cqe cqe;
};
struct io_mapped_ubuf {
u64 ubuf;
u64 ubuf_end;
unsigned int nr_bvecs;
unsigned long acct_pages;
struct bio_vec bvec[];
};
#endif