linux/net/netfilter/nf_flow_table_procfs.c
Vlad Buslov b038177636 netfilter: nf_flow_table: count pending offload workqueue tasks
To improve hardware offload debuggability count pending 'add', 'del' and
'stats' flow_table offload workqueue tasks. Counters are incremented before
scheduling new task and decremented when workqueue handler finishes
executing. These counters allow user to diagnose congestion on hardware
offload workqueues that can happen when either CPU is starved and workqueue
jobs are executed at lower rate than new ones are added or when
hardware/driver can't keep up with the rate.

Implement the described counters as percpu counters inside new struct
netns_ft which is stored inside struct net. Expose them via new procfs file
'/proc/net/stats/nf_flowtable' that is similar to existing 'nf_conntrack'
file.

Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
Signed-off-by: Oz Shlomo <ozsh@nvidia.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
2022-07-11 16:25:14 +02:00

81 lines
1.7 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <net/netfilter/nf_flow_table.h>
static void *nf_flow_table_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos - 1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ft.stat, cpu);
}
return NULL;
}
static void *nf_flow_table_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ft.stat, cpu);
}
(*pos)++;
return NULL;
}
static void nf_flow_table_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int nf_flow_table_cpu_seq_show(struct seq_file *seq, void *v)
{
const struct nf_flow_table_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_puts(seq, "wq_add wq_del wq_stats\n");
return 0;
}
seq_printf(seq, "%8d %8d %8d\n",
st->count_wq_add,
st->count_wq_del,
st->count_wq_stats
);
return 0;
}
static const struct seq_operations nf_flow_table_cpu_seq_ops = {
.start = nf_flow_table_cpu_seq_start,
.next = nf_flow_table_cpu_seq_next,
.stop = nf_flow_table_cpu_seq_stop,
.show = nf_flow_table_cpu_seq_show,
};
int nf_flow_table_init_proc(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_create_net("nf_flowtable", 0444, net->proc_net_stat,
&nf_flow_table_cpu_seq_ops,
sizeof(struct seq_net_private));
return pde ? 0 : -ENOMEM;
}
void nf_flow_table_fini_proc(struct net *net)
{
remove_proc_entry("nf_flowtable", net->proc_net_stat);
}