]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: ethernet: ti: netcp: Use u64_stats_t with u64_stats_sync properly
authorDavid Yang <mmyangfl@gmail.com>
Fri, 23 Jan 2026 16:48:36 +0000 (00:48 +0800)
committerJakub Kicinski <kuba@kernel.org>
Tue, 27 Jan 2026 03:53:41 +0000 (19:53 -0800)
On 64bit arches, struct u64_stats_sync is empty and provides no help
against load/store tearing. Convert to u64_stats_t to ensure atomic
operations.

Note that does not mean the code is now tear-free: there're u32 counters
unprotected by u64_stats or anything else.

Signed-off-by: David Yang <mmyangfl@gmail.com>
Link: https://patch.msgid.link/20260123164841.2890054-1-mmyangfl@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/ti/netcp.h
drivers/net/ethernet/ti/netcp_core.c

index b9cbd3b4a8a22a03b597fb193391685b143d8f79..9cfddaa807e2742c9d1d9f853b151f43a2a5db81 100644 (file)
@@ -65,14 +65,14 @@ struct netcp_addr {
 
 struct netcp_stats {
        struct u64_stats_sync   syncp_rx ____cacheline_aligned_in_smp;
-       u64                     rx_packets;
-       u64                     rx_bytes;
+       u64_stats_t             rx_packets;
+       u64_stats_t             rx_bytes;
        u32                     rx_errors;
        u32                     rx_dropped;
 
        struct u64_stats_sync   syncp_tx ____cacheline_aligned_in_smp;
-       u64                     tx_packets;
-       u64                     tx_bytes;
+       u64_stats_t             tx_packets;
+       u64_stats_t             tx_bytes;
        u32                     tx_errors;
        u32                     tx_dropped;
 };
index 5ed1c46bbcb100d997d459459c6e290e3834251e..eb8fc2ed05f45b5429de2aeea0204152e3c83d7b 100644 (file)
@@ -759,8 +759,8 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
        knav_pool_desc_put(netcp->rx_pool, desc);
 
        u64_stats_update_begin(&rx_stats->syncp_rx);
-       rx_stats->rx_packets++;
-       rx_stats->rx_bytes += skb->len;
+       u64_stats_inc(&rx_stats->rx_packets);
+       u64_stats_add(&rx_stats->rx_bytes, skb->len);
        u64_stats_update_end(&rx_stats->syncp_rx);
 
        /* push skb up the stack */
@@ -1045,8 +1045,8 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
                }
 
                u64_stats_update_begin(&tx_stats->syncp_tx);
-               tx_stats->tx_packets++;
-               tx_stats->tx_bytes += skb->len;
+               u64_stats_inc(&tx_stats->tx_packets);
+               u64_stats_add(&tx_stats->tx_bytes, skb->len);
                u64_stats_update_end(&tx_stats->syncp_tx);
                dev_kfree_skb(skb);
                pkts++;
@@ -1973,14 +1973,14 @@ netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
 
        do {
                start = u64_stats_fetch_begin(&p->syncp_rx);
-               rxpackets       = p->rx_packets;
-               rxbytes         = p->rx_bytes;
+               rxpackets       = u64_stats_read(&p->rx_packets);
+               rxbytes         = u64_stats_read(&p->rx_bytes);
        } while (u64_stats_fetch_retry(&p->syncp_rx, start));
 
        do {
                start = u64_stats_fetch_begin(&p->syncp_tx);
-               txpackets       = p->tx_packets;
-               txbytes         = p->tx_bytes;
+               txpackets       = u64_stats_read(&p->tx_packets);
+               txbytes         = u64_stats_read(&p->tx_bytes);
        } while (u64_stats_fetch_retry(&p->syncp_tx, start));
 
        stats->rx_packets = rxpackets;