net: adopt u64_stats_t in struct pcpu_sw_netstats

As explained in commit 316580b69d ("u64_stats: provide u64_stats_t type")
we should use u64_stats_t and related accessors to avoid load/store tearing.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eric Dumazet 2022-06-08 08:46:37 -07:00 committed by Jakub Kicinski
parent eeb15885ca
commit 9962acefbc
9 changed files with 59 additions and 55 deletions

View file

@ -523,8 +523,8 @@ static void count_tx(struct net_device *dev, int ret, int len)
struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
u64_stats_inc(&stats->tx_packets);
u64_stats_add(&stats->tx_bytes, len);
u64_stats_update_end(&stats->syncp);
}
}
@ -825,8 +825,8 @@ static void count_rx(struct net_device *dev, int len)
struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
stats->rx_bytes += len;
u64_stats_inc(&stats->rx_packets);
u64_stats_add(&stats->rx_bytes, len);
u64_stats_update_end(&stats->syncp);
}

View file

@ -337,8 +337,8 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
skb->protocol = eth_type_trans (skb, dev->net);
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
stats64->rx_packets++;
stats64->rx_bytes += skb->len;
u64_stats_inc(&stats64->rx_packets);
u64_stats_add(&stats64->rx_bytes, skb->len);
u64_stats_update_end_irqrestore(&stats64->syncp, flags);
netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
@ -1258,8 +1258,8 @@ static void tx_complete (struct urb *urb)
unsigned long flags;
flags = u64_stats_update_begin_irqsave(&stats64->syncp);
stats64->tx_packets += entry->packets;
stats64->tx_bytes += entry->length;
u64_stats_add(&stats64->tx_packets, entry->packets);
u64_stats_add(&stats64->tx_bytes, entry->length);
u64_stats_update_end_irqrestore(&stats64->syncp, flags);
} else {
dev->net->stats.tx_errors++;

View file

@ -2385,15 +2385,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tx_packets++;
tx_stats->tx_bytes += len;
u64_stats_inc(&tx_stats->tx_packets);
u64_stats_add(&tx_stats->tx_bytes, len);
u64_stats_update_end(&tx_stats->syncp);
vxlan_vnifilter_count(src_vxlan, vni, NULL, VXLAN_VNI_STATS_TX, len);
if (__netif_rx(skb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += len;
u64_stats_inc(&rx_stats->rx_packets);
u64_stats_add(&rx_stats->rx_bytes, len);
u64_stats_update_end(&rx_stats->syncp);
vxlan_vnifilter_count(dst_vxlan, vni, NULL, VXLAN_VNI_STATS_RX,
len);

View file

@ -2636,10 +2636,10 @@ struct packet_offload {
/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
u64_stats_t rx_packets;
u64_stats_t rx_bytes;
u64_stats_t tx_packets;
u64_stats_t tx_bytes;
struct u64_stats_sync syncp;
} __aligned(4 * sizeof(u64));
@ -2656,8 +2656,8 @@ static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int l
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->rx_bytes += len;
tstats->rx_packets++;
u64_stats_add(&tstats->rx_bytes, len);
u64_stats_inc(&tstats->rx_packets);
u64_stats_update_end(&tstats->syncp);
}
@ -2668,8 +2668,8 @@ static inline void dev_sw_netstats_tx_add(struct net_device *dev,
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->tx_bytes += len;
tstats->tx_packets += packets;
u64_stats_add(&tstats->tx_bytes, len);
u64_stats_add(&tstats->tx_packets, packets);
u64_stats_update_end(&tstats->syncp);
}

View file

@ -456,8 +456,8 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
u64_stats_update_begin(&tstats->syncp);
tstats->tx_bytes += pkt_len;
tstats->tx_packets++;
u64_stats_add(&tstats->tx_bytes, pkt_len);
u64_stats_inc(&tstats->tx_packets);
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
} else {

View file

@ -1770,10 +1770,10 @@ static int br_fill_linkxstats(struct sk_buff *skb,
if (v->vid == pvid)
vxi.flags |= BRIDGE_VLAN_INFO_PVID;
br_vlan_get_stats(v, &stats);
vxi.rx_bytes = stats.rx_bytes;
vxi.rx_packets = stats.rx_packets;
vxi.tx_bytes = stats.tx_bytes;
vxi.tx_packets = stats.tx_packets;
vxi.rx_bytes = u64_stats_read(&stats.rx_bytes);
vxi.rx_packets = u64_stats_read(&stats.rx_packets);
vxi.tx_bytes = u64_stats_read(&stats.tx_bytes);
vxi.tx_packets = u64_stats_read(&stats.tx_packets);
if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
goto nla_put_failure;

View file

@ -505,8 +505,8 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_bytes += skb->len;
stats->tx_packets++;
u64_stats_add(&stats->tx_bytes, skb->len);
u64_stats_inc(&stats->tx_packets);
u64_stats_update_end(&stats->syncp);
}
@ -624,8 +624,8 @@ static bool __allowed_ingress(const struct net_bridge *br,
if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
stats = this_cpu_ptr(v->stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_bytes += skb->len;
stats->rx_packets++;
u64_stats_add(&stats->rx_bytes, skb->len);
u64_stats_inc(&stats->rx_packets);
u64_stats_update_end(&stats->syncp);
}
@ -1379,16 +1379,16 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
cpu_stats = per_cpu_ptr(v->stats, i);
do {
start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
rxpackets = cpu_stats->rx_packets;
rxbytes = cpu_stats->rx_bytes;
txbytes = cpu_stats->tx_bytes;
txpackets = cpu_stats->tx_packets;
rxpackets = u64_stats_read(&cpu_stats->rx_packets);
rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
txbytes = u64_stats_read(&cpu_stats->tx_bytes);
txpackets = u64_stats_read(&cpu_stats->tx_packets);
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
stats->rx_packets += rxpackets;
stats->rx_bytes += rxbytes;
stats->tx_bytes += txbytes;
stats->tx_packets += txpackets;
u64_stats_add(&stats->rx_packets, rxpackets);
u64_stats_add(&stats->rx_bytes, rxbytes);
u64_stats_add(&stats->tx_bytes, txbytes);
u64_stats_add(&stats->tx_packets, txpackets);
}
}
@ -1779,14 +1779,18 @@ static bool br_vlan_stats_fill(struct sk_buff *skb,
return false;
br_vlan_get_stats(v, &stats);
if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES,
u64_stats_read(&stats.rx_bytes),
BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
u64_stats_read(&stats.rx_packets),
BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES,
u64_stats_read(&stats.tx_bytes),
BRIDGE_VLANDB_STATS_PAD) ||
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
u64_stats_read(&stats.tx_packets),
BRIDGE_VLANDB_STATS_PAD))
goto out_err;
nla_nest_end(skb, nest);

View file

@ -10459,23 +10459,23 @@ void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
int cpu;
for_each_possible_cpu(cpu) {
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
const struct pcpu_sw_netstats *stats;
struct pcpu_sw_netstats tmp;
unsigned int start;
stats = per_cpu_ptr(netstats, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats->syncp);
tmp.rx_packets = stats->rx_packets;
tmp.rx_bytes = stats->rx_bytes;
tmp.tx_packets = stats->tx_packets;
tmp.tx_bytes = stats->tx_bytes;
rx_packets = u64_stats_read(&stats->rx_packets);
rx_bytes = u64_stats_read(&stats->rx_bytes);
tx_packets = u64_stats_read(&stats->tx_packets);
tx_bytes = u64_stats_read(&stats->tx_bytes);
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
s->rx_packets += tmp.rx_packets;
s->rx_bytes += tmp.rx_bytes;
s->tx_packets += tmp.tx_packets;
s->tx_bytes += tmp.tx_bytes;
s->rx_packets += rx_packets;
s->rx_bytes += rx_bytes;
s->tx_packets += tx_packets;
s->tx_bytes += tx_bytes;
}
}
EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);

View file

@ -935,10 +935,10 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
s = per_cpu_ptr(dev->tstats, i);
do {
start = u64_stats_fetch_begin_irq(&s->syncp);
tx_packets = s->tx_packets;
tx_bytes = s->tx_bytes;
rx_packets = s->rx_packets;
rx_bytes = s->rx_bytes;
tx_packets = u64_stats_read(&s->tx_packets);
tx_bytes = u64_stats_read(&s->tx_bytes);
rx_packets = u64_stats_read(&s->rx_packets);
rx_bytes = u64_stats_read(&s->rx_bytes);
} while (u64_stats_fetch_retry_irq(&s->syncp, start));
data[0] += tx_packets;
data[1] += tx_bytes;