]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
eth: fbnic: report software Rx queue stats
authorJakub Kicinski <kuba@kernel.org>
Tue, 11 Feb 2025 18:13:54 +0000 (10:13 -0800)
committerJakub Kicinski <kuba@kernel.org>
Thu, 13 Feb 2025 00:39:05 +0000 (16:39 -0800)
Gather and report software Rx queue stats - checksum stats
and allocation failures.

Acked-by: Joe Damato <jdamato@fastly.com>
Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://patch.msgid.link/20250211181356.580800-4-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
drivers/net/ethernet/meta/fbnic/fbnic_txrx.h

index 14e7a8384bce28e1c48ca4ad07eebd0d27e332e5..ceb6d1de9bcf68caf65eaf5188102df893672006 100644 (file)
@@ -487,8 +487,9 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
        struct fbnic_net *fbn = netdev_priv(dev);
        struct fbnic_ring *rxr = fbn->rx[idx];
        struct fbnic_queue_stats *stats;
+       u64 bytes, packets, alloc_fail;
+       u64 csum_complete, csum_none;
        unsigned int start;
-       u64 bytes, packets;
 
        if (!rxr)
                return;
@@ -498,10 +499,16 @@ static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
                start = u64_stats_fetch_begin(&stats->syncp);
                bytes = stats->bytes;
                packets = stats->packets;
+               alloc_fail = stats->rx.alloc_failed;
+               csum_complete = stats->rx.csum_complete;
+               csum_none = stats->rx.csum_none;
        } while (u64_stats_fetch_retry(&stats->syncp, start));
 
        rx->bytes = bytes;
        rx->packets = packets;
+       rx->alloc_fail = alloc_fail;
+       rx->csum_complete = csum_complete;
+       rx->csum_none = csum_none;
 }
 
 static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
@@ -538,6 +545,9 @@ static void fbnic_get_base_stats(struct net_device *dev,
 
        rx->bytes = fbn->rx_stats.bytes;
        rx->packets = fbn->rx_stats.packets;
+       rx->alloc_fail = fbn->rx_stats.rx.alloc_failed;
+       rx->csum_complete = fbn->rx_stats.rx.csum_complete;
+       rx->csum_none = fbn->rx_stats.rx.csum_none;
 }
 
 static const struct netdev_stat_ops fbnic_stat_ops = {
index b60dd1c9918e4d1f333a90b300c530b44b950457..66ba36fd3c081e323c53ed99185ec95fcc3eed37 100644 (file)
@@ -198,12 +198,15 @@ fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
 }
 
 static void
-fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
+fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq,
+             u64 *csum_cmpl, u64 *csum_none)
 {
        skb_checksum_none_assert(skb);
 
-       if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+       if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+               (*csum_none)++;
                return;
+       }
 
        if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -212,6 +215,7 @@ fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
 
                skb->ip_summed = CHECKSUM_COMPLETE;
                skb->csum = (__force __wsum)csum;
+               (*csum_cmpl)++;
        }
 }
 
@@ -661,8 +665,13 @@ static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
                struct page *page;
 
                page = page_pool_dev_alloc_pages(nv->page_pool);
-               if (!page)
+               if (!page) {
+                       u64_stats_update_begin(&bdq->stats.syncp);
+                       bdq->stats.rx.alloc_failed++;
+                       u64_stats_update_end(&bdq->stats.syncp);
+
                        break;
+               }
 
                fbnic_page_pool_init(bdq, i, page);
                fbnic_bd_prep(bdq, i, page);
@@ -875,12 +884,13 @@ static void fbnic_rx_tstamp(struct fbnic_napi_vector *nv, u64 rcd,
 
 static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
                                      u64 rcd, struct sk_buff *skb,
-                                     struct fbnic_q_triad *qt)
+                                     struct fbnic_q_triad *qt,
+                                     u64 *csum_cmpl, u64 *csum_none)
 {
        struct net_device *netdev = nv->napi.dev;
        struct fbnic_ring *rcq = &qt->cmpl;
 
-       fbnic_rx_csum(rcd, skb, rcq);
+       fbnic_rx_csum(rcd, skb, rcq, csum_cmpl, csum_none);
 
        if (netdev->features & NETIF_F_RXHASH)
                skb_set_hash(skb,
@@ -898,7 +908,8 @@ static bool fbnic_rcd_metadata_err(u64 rcd)
 static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
                           struct fbnic_q_triad *qt, int budget)
 {
-       unsigned int packets = 0, bytes = 0, dropped = 0;
+       unsigned int packets = 0, bytes = 0, dropped = 0, alloc_failed = 0;
+       u64 csum_complete = 0, csum_none = 0;
        struct fbnic_ring *rcq = &qt->cmpl;
        struct fbnic_pkt_buff *pkt;
        s32 head0 = -1, head1 = -1;
@@ -947,14 +958,22 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
 
                        /* Populate skb and invalidate XDP */
                        if (!IS_ERR_OR_NULL(skb)) {
-                               fbnic_populate_skb_fields(nv, rcd, skb, qt);
+                               fbnic_populate_skb_fields(nv, rcd, skb, qt,
+                                                         &csum_complete,
+                                                         &csum_none);
 
                                packets++;
                                bytes += skb->len;
 
                                napi_gro_receive(&nv->napi, skb);
                        } else {
-                               dropped++;
+                               if (!skb) {
+                                       alloc_failed++;
+                                       dropped++;
+                               } else {
+                                       dropped++;
+                               }
+
                                fbnic_put_pkt_buff(nv, pkt, 1);
                        }
 
@@ -977,6 +996,9 @@ static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
        /* Re-add ethernet header length (removed in fbnic_build_skb) */
        rcq->stats.bytes += ETH_HLEN * packets;
        rcq->stats.dropped += dropped;
+       rcq->stats.rx.alloc_failed += alloc_failed;
+       rcq->stats.rx.csum_complete += csum_complete;
+       rcq->stats.rx.csum_none += csum_none;
        u64_stats_update_end(&rcq->stats.syncp);
 
        /* Unmap and free processed buffers */
@@ -1054,6 +1076,11 @@ void fbnic_aggregate_ring_rx_counters(struct fbnic_net *fbn,
        fbn->rx_stats.bytes += stats->bytes;
        fbn->rx_stats.packets += stats->packets;
        fbn->rx_stats.dropped += stats->dropped;
+       fbn->rx_stats.rx.alloc_failed += stats->rx.alloc_failed;
+       fbn->rx_stats.rx.csum_complete += stats->rx.csum_complete;
+       fbn->rx_stats.rx.csum_none += stats->rx.csum_none;
+       /* Remember to add new stats here */
+       BUILD_BUG_ON(sizeof(fbn->tx_stats.rx) / 8 != 3);
 }
 
 void fbnic_aggregate_ring_tx_counters(struct fbnic_net *fbn,
index 6b549b0e7fa4aa288285f1f4bcfb8d9c4a658724..c9d9001042dc21c1c89d36e826a7a49272ab1805 100644 (file)
@@ -61,6 +61,11 @@ struct fbnic_queue_stats {
                        u64 ts_packets;
                        u64 ts_lost;
                } twq;
+               struct {
+                       u64 alloc_failed;
+                       u64 csum_complete;
+                       u64 csum_none;
+               } rx;
        };
        u64 dropped;
        struct u64_stats_sync syncp;