]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
idpf: remove legacy Page Pool Ethtool stats
authorAlexander Lobakin <aleksander.lobakin@intel.com>
Thu, 20 Jun 2024 13:53:44 +0000 (15:53 +0200)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Wed, 10 Jul 2024 17:44:59 +0000 (10:44 -0700)
Page Pool Ethtool stats are deprecated since the Netlink Page Pool
interface introduction.
idpf receives big changes in Rx buffer management, including &page_pool
layout, so keeping these deprecated stats does only harm, not speaking
of that CONFIG_IDPF selects CONFIG_PAGE_POOL_STATS unconditionally,
while the latter is often turned off for better performance.
Remove all the references to PP stats from the Ethtool code. The stats
are still available in their full via the generic Netlink interface.

Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/idpf/Kconfig
drivers/net/ethernet/intel/idpf/idpf_ethtool.c

index 638484c5723c954d37d3c3107950f2d8ec2e5396..1f071143d9922f80ce0c0900c9bd7b596499b92d 100644 (file)
@@ -7,7 +7,6 @@ config IDPF
        select DIMLIB
        select LIBETH
        select PAGE_POOL
-       select PAGE_POOL_STATS
        help
          This driver supports Intel(R) Infrastructure Data Path Function
          devices.
index e933fed16c7ea5d2055e483cfe7b6bfeeada0ea5..3806ddd3ce4ab95a4d52dbabaad34957c775d935 100644 (file)
@@ -565,8 +565,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
        for (i = 0; i < vport_config->max_q.max_rxq; i++)
                idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
                                       "rx", i);
-
-       page_pool_ethtool_stats_get_strings(data);
 }
 
 /**
@@ -600,7 +598,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
        struct idpf_netdev_priv *np = netdev_priv(netdev);
        struct idpf_vport_config *vport_config;
        u16 max_txq, max_rxq;
-       unsigned int size;
 
        if (sset != ETH_SS_STATS)
                return -EINVAL;
@@ -619,11 +616,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
        max_txq = vport_config->max_q.max_txq;
        max_rxq = vport_config->max_q.max_rxq;
 
-       size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
+       return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
               (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
-       size += page_pool_ethtool_stats_get_count();
-
-       return size;
 }
 
 /**
@@ -876,7 +870,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
 {
        struct idpf_netdev_priv *np = netdev_priv(netdev);
        struct idpf_vport_config *vport_config;
-       struct page_pool_stats pp_stats = { };
        struct idpf_vport *vport;
        unsigned int total = 0;
        unsigned int i, j;
@@ -946,32 +939,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
                                idpf_add_empty_queue_stats(&data, qtype);
                        else
                                idpf_add_queue_stats(&data, rxq, qtype);
-
-                       /* In splitq mode, don't get page pool stats here since
-                        * the pools are attached to the buffer queues
-                        */
-                       if (is_splitq)
-                               continue;
-
-                       if (rxq)
-                               page_pool_get_stats(rxq->pp, &pp_stats);
-               }
-       }
-
-       for (i = 0; i < vport->num_rxq_grp; i++) {
-               for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
-                       struct idpf_buf_queue *rxbufq =
-                               &vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
-
-                       page_pool_get_stats(rxbufq->pp, &pp_stats);
                }
        }
 
        for (; total < vport_config->max_q.max_rxq; total++)
                idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
 
-       page_pool_ethtool_stats_get(data, &pp_stats);
-
        rcu_read_unlock();
 
        idpf_vport_ctrl_unlock(netdev);