]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bng_en: periodically fetch and accumulate hardware statistics
authorBhargava Marreddy <bhargava.marreddy@broadcom.com>
Mon, 6 Apr 2026 18:04:17 +0000 (23:34 +0530)
committerJakub Kicinski <kuba@kernel.org>
Sun, 12 Apr 2026 18:09:37 +0000 (11:09 -0700)
Use the timer to schedule periodic stats collection via
the workqueue when the link is up. Fetch fresh counters from
hardware via DMA and accumulate them into 64-bit software
shadows, handling wrap-around for counters narrower than
64 bits.

Signed-off-by: Bhargava Marreddy <bhargava.marreddy@broadcom.com>
Reviewed-by: Vikas Gupta <vikas.gupta@broadcom.com>
Reviewed-by: Rahul Gupta <rahul-rg.gupta@broadcom.com>
Reviewed-by: Ajit Kumar Khaparde <ajit.khaparde@broadcom.com>
Link: https://patch.msgid.link/20260406180420.279470-8-bhargava.marreddy@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/broadcom/bnge/bnge_netdev.c
drivers/net/ethernet/broadcom/bnge/bnge_netdev.h

index 290bf1cd96c9b7c8878686333b08bfc7a8cc0a05..6f41dff3fbcba8a8718c634cbd98b56acc691bd8 100644 (file)
@@ -69,6 +69,9 @@ static int bnge_alloc_stats_mem(struct bnge_net *bn,
                if (!stats->hw_masks)
                        goto stats_mem_err;
        }
+
+       u64_stats_init(&stats->syncp);
+
        return 0;
 
 stats_mem_err:
@@ -294,9 +297,98 @@ static void bnge_timer(struct timer_list *t)
                }
        }
 
+       if (BNGE_LINK_IS_UP(bd) && bn->stats_coal_ticks)
+               bnge_queue_sp_work(bn, BNGE_PERIODIC_STATS_SP_EVENT);
+
        mod_timer(&bn->timer, jiffies + bn->current_interval);
 }
 
+static void bnge_add_one_ctr(u64 hw, u64 *sw, u64 mask)
+{
+       u64 sw_tmp, sw_val;
+
+       hw &= mask;
+       sw_val = READ_ONCE(*sw);
+       sw_tmp = (sw_val & ~mask) | hw;
+       if (hw < (sw_val & mask))
+               sw_tmp += mask + 1;
+       WRITE_ONCE(*sw, sw_tmp);
+}
+
+static void __bnge_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
+                                   int count, struct u64_stats_sync *syncp)
+{
+       unsigned long flags;
+       int i;
+
+       flags = u64_stats_update_begin_irqsave(syncp);
+       for (i = 0; i < count; i++) {
+               u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
+
+               if (masks[i] == -1ULL)
+                       WRITE_ONCE(sw_stats[i], hw);
+               else
+                       bnge_add_one_ctr(hw, &sw_stats[i], masks[i]);
+       }
+       u64_stats_update_end_irqrestore(syncp, flags);
+}
+
+static void bnge_accumulate_stats(struct bnge_stats_mem *stats)
+{
+       if (!stats->hw_stats)
+               return;
+
+       __bnge_accumulate_stats(stats->hw_stats, stats->sw_stats,
+                               stats->hw_masks, stats->len / 8, &stats->syncp);
+}
+
+static void bnge_accumulate_all_stats(struct bnge_dev *bd)
+{
+       struct bnge_net *bn = netdev_priv(bd->netdev);
+       struct bnge_stats_mem *ring0_stats = NULL;
+       int i;
+
+       for (i = 0; i < bd->nq_nr_rings; i++) {
+               struct bnge_napi *bnapi = bn->bnapi[i];
+               struct bnge_nq_ring_info *nqr;
+               struct bnge_stats_mem *stats;
+
+               nqr = &bnapi->nq_ring;
+               stats = &nqr->stats;
+
+               if (!ring0_stats)
+                       ring0_stats = &bn->bnapi[0]->nq_ring.stats;
+
+               __bnge_accumulate_stats(stats->hw_stats, stats->sw_stats,
+                                       ring0_stats->hw_masks,
+                                       ring0_stats->len / 8, &stats->syncp);
+       }
+
+       if (bn->flags & BNGE_FLAG_PORT_STATS) {
+               struct bnge_stats_mem *stats = &bn->port_stats;
+               __le64 *hw_stats = stats->hw_stats;
+               u64 *sw_stats = stats->sw_stats;
+               u64 *masks = stats->hw_masks;
+               u16 cnt;
+
+               cnt = sizeof(struct rx_port_stats) / 8;
+               __bnge_accumulate_stats(hw_stats, sw_stats, masks, cnt,
+                                       &stats->syncp);
+
+               hw_stats += BNGE_TX_PORT_STATS_BYTE_OFFSET / 8;
+               sw_stats += BNGE_TX_PORT_STATS_BYTE_OFFSET / 8;
+               masks += BNGE_TX_PORT_STATS_BYTE_OFFSET / 8;
+               cnt = sizeof(struct tx_port_stats) / 8;
+               __bnge_accumulate_stats(hw_stats, sw_stats, masks, cnt,
+                                       &stats->syncp);
+       }
+
+       if (bn->flags & BNGE_FLAG_PORT_STATS_EXT) {
+               bnge_accumulate_stats(&bn->rx_port_stats_ext);
+               bnge_accumulate_stats(&bn->tx_port_stats_ext);
+       }
+}
+
 static void bnge_sp_task(struct work_struct *work)
 {
        struct bnge_net *bn = container_of(work, struct bnge_net, sp_task);
@@ -309,6 +401,12 @@ static void bnge_sp_task(struct work_struct *work)
                return;
        }
 
+       if (test_and_clear_bit(BNGE_PERIODIC_STATS_SP_EVENT, &bn->sp_event)) {
+               bnge_hwrm_port_qstats(bd, 0);
+               bnge_hwrm_port_qstats_ext(bd, 0);
+               bnge_accumulate_all_stats(bd);
+       }
+
        if (test_and_clear_bit(BNGE_UPDATE_PHY_SP_EVENT, &bn->sp_event)) {
                int rc;
 
index 15ededb801f69e70d773da44e80abe2b3b7eb643..a73f51b01bc2cc9f4c2aa881cdd87120f9fd2f73 100644 (file)
@@ -278,6 +278,7 @@ enum bnge_sp_event {
        BNGE_LINK_SPEED_CHNG_SP_EVENT,
        BNGE_LINK_CFG_CHANGE_SP_EVENT,
        BNGE_UPDATE_PHY_SP_EVENT,
+       BNGE_PERIODIC_STATS_SP_EVENT,
 };
 
 struct bnge_net {