netdev: support dumping a single netdev in qstats

Having to filter the right ifindex in the tests is a bit tedious.
Add support for dumping qstats for a single ifindex.

Reviewed-by: David Ahern <dsahern@kernel.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20240420023543.3300306-2-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-04-19 19:35:39 -07:00
parent 1af2dface5
commit ce05d0f203
3 changed files with 41 additions and 13 deletions

View file

@ -486,6 +486,7 @@ operations:
dump:
request:
attributes:
- ifindex
- scope
reply:
attributes:

View file

@ -70,6 +70,7 @@ static const struct nla_policy netdev_napi_get_dump_nl_policy[NETDEV_A_NAPI_IFIN
/* NETDEV_CMD_QSTATS_GET - dump */
static const struct nla_policy netdev_qstats_get_nl_policy[NETDEV_A_QSTATS_SCOPE + 1] = {
[NETDEV_A_QSTATS_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
[NETDEV_A_QSTATS_SCOPE] = NLA_POLICY_MASK(NLA_UINT, 0x1),
};

View file

@ -639,6 +639,24 @@ netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
return -EMSGSIZE;
}
static int
netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope,
struct sk_buff *skb, const struct genl_info *info,
struct netdev_nl_dump_ctx *ctx)
{
if (!netdev->stat_ops)
return 0;
switch (scope) {
case 0:
return netdev_nl_stats_by_netdev(netdev, skb, info);
case NETDEV_QSTATS_SCOPE_QUEUE:
return netdev_nl_stats_by_queue(netdev, skb, info, ctx);
}
return -EINVAL; /* Should not happen, per netlink policy */
}
int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
@ -646,6 +664,7 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
const struct genl_info *info = genl_info_dump(cb);
struct net *net = sock_net(skb->sk);
struct net_device *netdev;
unsigned int ifindex;
unsigned int scope;
int err = 0;
@ -653,21 +672,28 @@ int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
if (info->attrs[NETDEV_A_QSTATS_SCOPE])
scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]);
rtnl_lock();
for_each_netdev_dump(net, netdev, ctx->ifindex) {
if (!netdev->stat_ops)
continue;
ifindex = 0;
if (info->attrs[NETDEV_A_QSTATS_IFINDEX])
ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]);
switch (scope) {
case 0:
err = netdev_nl_stats_by_netdev(netdev, skb, info);
break;
case NETDEV_QSTATS_SCOPE_QUEUE:
err = netdev_nl_stats_by_queue(netdev, skb, info, ctx);
break;
rtnl_lock();
if (ifindex) {
netdev = __dev_get_by_index(net, ifindex);
if (netdev && netdev->stat_ops) {
err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
info, ctx);
} else {
NL_SET_BAD_ATTR(info->extack,
info->attrs[NETDEV_A_QSTATS_IFINDEX]);
err = netdev ? -EOPNOTSUPP : -ENODEV;
}
} else {
for_each_netdev_dump(net, netdev, ctx->ifindex) {
err = netdev_nl_qstats_get_dump_one(netdev, scope, skb,
info, ctx);
if (err < 0)
break;
}
if (err < 0)
break;
}
rtnl_unlock();