From: Joe Damato Date: Tue, 28 May 2024 18:11:38 +0000 (+0000) Subject: net/mlx4: support per-queue statistics via netlink X-Git-Tag: v6.11-rc1~163^2~288^2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a5602c6edf7cbf5d69a32f089fb3938f42a3ff03;p=thirdparty%2Flinux.git net/mlx4: support per-queue statistics via netlink Make mlx4 compatible with the newly added netlink queue stats API. Signed-off-by: Joe Damato Tested-by: Martin Karsten Reviewed-by: Tariq Toukan Reviewed-by: Jacob Keller Link: https://lore.kernel.org/r/20240528181139.515070-4-jdamato@fastly.com Signed-off-by: Jakub Kicinski --- diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 4d2f8c4583469..281b34af0bb48 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -3100,6 +3101,77 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, last_i += NUM_PHY_STATS; } +static void mlx4_get_queue_stats_rx(struct net_device *dev, int i, + struct netdev_queue_stats_rx *stats) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + const struct mlx4_en_rx_ring *ring; + + spin_lock_bh(&priv->stats_lock); + + if (!priv->port_up || mlx4_is_master(priv->mdev->dev)) + goto out_unlock; + + ring = priv->rx_ring[i]; + stats->packets = READ_ONCE(ring->packets); + stats->bytes = READ_ONCE(ring->bytes); + stats->alloc_fail = READ_ONCE(ring->alloc_fail); + +out_unlock: + spin_unlock_bh(&priv->stats_lock); +} + +static void mlx4_get_queue_stats_tx(struct net_device *dev, int i, + struct netdev_queue_stats_tx *stats) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + const struct mlx4_en_tx_ring *ring; + + spin_lock_bh(&priv->stats_lock); + + if (!priv->port_up || mlx4_is_master(priv->mdev->dev)) + goto out_unlock; + + ring = priv->tx_ring[TX][i]; + stats->packets = READ_ONCE(ring->packets); + stats->bytes = READ_ONCE(ring->bytes); + +out_unlock: + spin_unlock_bh(&priv->stats_lock); +} + +static void mlx4_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + spin_lock_bh(&priv->stats_lock); + + if (!priv->port_up || mlx4_is_master(priv->mdev->dev)) + goto out_unlock; + + if (priv->rx_ring_num) { + rx->packets = 0; + rx->bytes = 0; + rx->alloc_fail = 0; + } + + if (priv->tx_ring_num[TX]) { + tx->packets = 0; + tx->bytes = 0; + } + +out_unlock: + spin_unlock_bh(&priv->stats_lock); +} + +static const struct netdev_stat_ops mlx4_stat_ops = { + .get_queue_stats_rx = mlx4_get_queue_stats_rx, + .get_queue_stats_tx = mlx4_get_queue_stats_tx, + .get_base_stats = mlx4_get_base_stats, +}; + int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_port_profile *prof) { @@ -3263,6 +3335,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); + dev->stat_ops = &mlx4_stat_ops; dev->ethtool_ops = &mlx4_en_ethtool_ops; /*