From 1e86ace4c140fd5a693e266c9b23409358f25381 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 19 Nov 2018 10:52:31 -0800 Subject: [PATCH] net/mlx5: EQ, Use the right place to store/read IRQ affinity hint Currently the cpu affinity hint mask for completion EQs is stored and read from the wrong place, since reading and storing is done from the same index, there is no actual issue with that, but internal irq_info for completion EQs stars at MLX5_EQ_VEC_COMP_BASE offset in irq_info array, this patch changes the code to use the correct offset to store and read the IRQ affinity hint. Signed-off-by: Saeed Mahameed Reviewed-by: Leon Romanovsky Reviewed-by: Tariq Toukan Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 14 ++++++++------ include/linux/mlx5/driver.h | 2 +- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1243edbedc9e9..2839c30dd3a0d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1760,7 +1760,7 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq) static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) { - return cpumask_first(priv->mdev->priv.irq_info[ix].mask); + return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask); } static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 28132c7dc05f2..d5cea0a36e6a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -640,18 +640,19 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev) static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) { struct mlx5_priv *priv = &mdev->priv; - int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + int vecidx = MLX5_EQ_VEC_COMP_BASE + i; + int irq = pci_irq_vector(mdev->pdev, vecidx); - if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { + if (!zalloc_cpumask_var(&priv->irq_info[vecidx].mask, GFP_KERNEL)) { mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); return -ENOMEM; } cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), - priv->irq_info[i].mask); + priv->irq_info[vecidx].mask); if (IS_ENABLED(CONFIG_SMP) && - irq_set_affinity_hint(irq, priv->irq_info[i].mask)) + irq_set_affinity_hint(irq, priv->irq_info[vecidx].mask)) mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); return 0; @@ -659,11 +660,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) { + int vecidx = MLX5_EQ_VEC_COMP_BASE + i; struct mlx5_priv *priv = &mdev->priv; - int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); + int irq = pci_irq_vector(mdev->pdev, vecidx); irq_set_affinity_hint(irq, NULL); - free_cpumask_var(priv->irq_info[i].mask); + free_cpumask_var(priv->irq_info[vecidx].mask); } static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index aa5963b5d38e1..7d4ed995b4ce5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1309,7 +1309,7 @@ enum { static inline const struct cpumask * mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) { - return dev->priv.irq_info[vector].mask; + return dev->priv.irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask; } #endif /* MLX5_DRIVER_H */ -- 2.39.2