From f560e4e7c6b955dd30d7104b0a865af405045462 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 12 Jun 2014 10:20:25 -0700 Subject: [PATCH] 3.4-stable patches added patches: mlx4_en-don-t-use-napi_synchronize-inside-mlx4_en_netpoll.patch --- ...i_synchronize-inside-mlx4_en_netpoll.patch | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 queue-3.4/mlx4_en-don-t-use-napi_synchronize-inside-mlx4_en_netpoll.patch diff --git a/queue-3.4/mlx4_en-don-t-use-napi_synchronize-inside-mlx4_en_netpoll.patch b/queue-3.4/mlx4_en-don-t-use-napi_synchronize-inside-mlx4_en_netpoll.patch new file mode 100644 index 00000000000..e469654be2d --- /dev/null +++ b/queue-3.4/mlx4_en-don-t-use-napi_synchronize-inside-mlx4_en_netpoll.patch @@ -0,0 +1,73 @@ +From 0610ae43cefde18c9270e7c791faa629382200f1 Mon Sep 17 00:00:00 2001 +From: Chris Mason +Date: Tue, 15 Apr 2014 18:09:24 -0400 +Subject: mlx4_en: don't use napi_synchronize inside mlx4_en_netpoll + +From: Chris Mason + +commit c98235cb8584a72e95786e17d695a8e5fafcd766 upstream. + +The mlx4 driver is triggering schedules while atomic inside +mlx4_en_netpoll: + + spin_lock_irqsave(&cq->lock, flags); + napi_synchronize(&cq->napi); + ^^^^^ msleep here + mlx4_en_process_rx_cq(dev, cq, 0); + spin_unlock_irqrestore(&cq->lock, flags); + +This was part of a patch by Alexander Guller from Mellanox in 2011, +but it still isn't upstream. + +Signed-off-by: Chris Mason +Acked-By: Amir Vadai +Signed-off-by: David S. Miller +Signed-off-by: Jiri Slaby +Cc: Masoud Sharbiani +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/net/ethernet/mellanox/mlx4/en_cq.c | 1 - + drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 6 +----- + drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 1 - + 3 files changed, 1 insertion(+), 7 deletions(-) + +--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c +@@ -55,7 +55,6 @@ int mlx4_en_create_cq(struct mlx4_en_pri + + cq->ring = ring; + cq->is_tx = mode; +- spin_lock_init(&cq->lock); + + err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, + cq->buf_size, 2 * PAGE_SIZE); +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +@@ -370,15 +370,11 @@ static void mlx4_en_netpoll(struct net_d + { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_cq *cq; +- unsigned long flags; + int i; + + for (i = 0; i < priv->rx_ring_num; i++) { + cq = &priv->rx_cq[i]; +- spin_lock_irqsave(&cq->lock, flags); +- napi_synchronize(&cq->napi); +- mlx4_en_process_rx_cq(dev, cq, 0); +- spin_unlock_irqrestore(&cq->lock, flags); ++ napi_schedule(&cq->napi); + } + } + #endif +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +@@ -301,7 +301,6 @@ struct mlx4_en_cq { + struct mlx4_cq mcq; + struct mlx4_hwq_resources wqres; + int ring; +- spinlock_t lock; + struct net_device *dev; + struct napi_struct napi; + /* Per-core Tx cq processing support */ -- 2.47.3