]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: mana: Add ethtool counters for RX CQEs in coalesced type
authorHaiyang Zhang <haiyangz@microsoft.com>
Tue, 17 Mar 2026 19:18:07 +0000 (12:18 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 19 Mar 2026 03:01:10 +0000 (20:01 -0700)
For RX CQEs with type CQE_RX_COALESCED_4, to measure the coalescing
efficiency, add counters to count how many contains 2, 3, 4 packets
respectively.
Also, add a counter for the error case of first packet with length == 0.

Reviewed-by: Long Li <longli@microsoft.com>
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Link: https://patch.msgid.link/20260317191826.1346111-4-haiyangz@linux.microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/microsoft/mana/mana_en.c
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
include/net/mana/mana.h

index fa30046dcd3d86970bb97f923994c8f5d5c3eed7..49c65cc1697c6c7e8895f3bf1df1e7adc771dcb1 100644 (file)
@@ -2147,14 +2147,8 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
        for (i = 0; i < MANA_RXCOMP_OOB_NUM_PPI; i++) {
                old_buf = NULL;
                pktlen = oob->ppi[i].pkt_len;
-               if (pktlen == 0) {
-                       if (i == 0)
-                               netdev_err_once(
-                                       ndev,
-                                       "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
-                                       rxq->gdma_id, cq->gdma_id, rxq->rxobj);
+               if (pktlen == 0)
                        break;
-               }
 
                curr = rxq->buf_index;
                rxbuf_oob = &rxq->rx_oobs[curr];
@@ -2175,6 +2169,22 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
                if (!coalesced)
                        break;
        }
+
+       /* Collect coalesced CQE count based on packets processed.
+        * Coalesced CQEs have at least 2 packets, so index is i - 2.
+        */
+       if (i > 1) {
+               u64_stats_update_begin(&rxq->stats.syncp);
+               rxq->stats.coalesced_cqe[i - 2]++;
+               u64_stats_update_end(&rxq->stats.syncp);
+       } else if (!i && !pktlen) {
+               u64_stats_update_begin(&rxq->stats.syncp);
+               rxq->stats.pkt_len0_err++;
+               u64_stats_update_end(&rxq->stats.syncp);
+               netdev_err_once(ndev,
+                               "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
+                               rxq->gdma_id, cq->gdma_id, rxq->rxobj);
+       }
 }
 
 static void mana_poll_rx_cq(struct mana_cq *cq)
index 4b234b16e57aa0be6a54dc10252dddb9bc558b62..6a4b42fe0944528b8178a4889d682363e902019a 100644 (file)
@@ -149,7 +149,7 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
 {
        struct mana_port_context *apc = netdev_priv(ndev);
        unsigned int num_queues = apc->num_queues;
-       int i;
+       int i, j;
 
        if (stringset != ETH_SS_STATS)
                return;
@@ -168,6 +168,9 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
                ethtool_sprintf(&data, "rx_%d_xdp_drop", i);
                ethtool_sprintf(&data, "rx_%d_xdp_tx", i);
                ethtool_sprintf(&data, "rx_%d_xdp_redirect", i);
+               ethtool_sprintf(&data, "rx_%d_pkt_len0_err", i);
+               for (j = 0; j < MANA_RXCOMP_OOB_NUM_PPI - 1; j++)
+                       ethtool_sprintf(&data, "rx_%d_coalesced_cqe_%d", i, j + 2);
        }
 
        for (i = 0; i < num_queues; i++) {
@@ -201,6 +204,8 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
        u64 xdp_xmit;
        u64 xdp_drop;
        u64 xdp_tx;
+       u64 pkt_len0_err;
+       u64 coalesced_cqe[MANA_RXCOMP_OOB_NUM_PPI - 1];
        u64 tso_packets;
        u64 tso_bytes;
        u64 tso_inner_packets;
@@ -209,7 +214,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
        u64 short_pkt_fmt;
        u64 csum_partial;
        u64 mana_map_err;
-       int q, i = 0;
+       int q, i = 0, j;
 
        if (!apc->port_is_up)
                return;
@@ -239,6 +244,9 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
                        xdp_drop = rx_stats->xdp_drop;
                        xdp_tx = rx_stats->xdp_tx;
                        xdp_redirect = rx_stats->xdp_redirect;
+                       pkt_len0_err = rx_stats->pkt_len0_err;
+                       for (j = 0; j < MANA_RXCOMP_OOB_NUM_PPI - 1; j++)
+                               coalesced_cqe[j] = rx_stats->coalesced_cqe[j];
                } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
 
                data[i++] = packets;
@@ -246,6 +254,9 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
                data[i++] = xdp_drop;
                data[i++] = xdp_tx;
                data[i++] = xdp_redirect;
+               data[i++] = pkt_len0_err;
+               for (j = 0; j < MANA_RXCOMP_OOB_NUM_PPI - 1; j++)
+                       data[i++] = coalesced_cqe[j];
        }
 
        for (q = 0; q < num_queues; q++) {
index a7f89e7ddc56fabded7fde879927df6107d0f4a5..3336688fed5e98b2ddcce4cb3be3332e5babdd86 100644 (file)
@@ -61,8 +61,11 @@ enum TRI_STATE {
 
 #define MAX_PORTS_IN_MANA_DEV 256
 
+/* Maximum number of packets per coalesced CQE */
+#define MANA_RXCOMP_OOB_NUM_PPI 4
+
 /* Update this count whenever the respective structures are changed */
-#define MANA_STATS_RX_COUNT 5
+#define MANA_STATS_RX_COUNT (6 + MANA_RXCOMP_OOB_NUM_PPI - 1)
 #define MANA_STATS_TX_COUNT 11
 
 #define MANA_RX_FRAG_ALIGNMENT 64
@@ -73,6 +76,8 @@ struct mana_stats_rx {
        u64 xdp_drop;
        u64 xdp_tx;
        u64 xdp_redirect;
+       u64 pkt_len0_err;
+       u64 coalesced_cqe[MANA_RXCOMP_OOB_NUM_PPI - 1];
        struct u64_stats_sync syncp;
 };
 
@@ -227,8 +232,6 @@ struct mana_rxcomp_perpkt_info {
        u32 pkt_hash;
 }; /* HW DATA */
 
-#define MANA_RXCOMP_OOB_NUM_PPI 4
-
 /* Receive completion OOB */
 struct mana_rxcomp_oob {
        struct mana_cqe_header cqe_hdr;