]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Oct 2022 13:52:28 +0000 (15:52 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Oct 2022 13:52:28 +0000 (15:52 +0200)
added patches:
net-wwan-t7xx-use-gfp_atomic-under-spin-lock-in-t7xx_cldma_gpd_set_next_ptr.patch

queue-5.15/net-wwan-t7xx-use-gfp_atomic-under-spin-lock-in-t7xx_cldma_gpd_set_next_ptr.patch [new file with mode: 0644]

diff --git a/queue-5.15/net-wwan-t7xx-use-gfp_atomic-under-spin-lock-in-t7xx_cldma_gpd_set_next_ptr.patch b/queue-5.15/net-wwan-t7xx-use-gfp_atomic-under-spin-lock-in-t7xx_cldma_gpd_set_next_ptr.patch
new file mode 100644 (file)
index 0000000..e7e2517
--- /dev/null
@@ -0,0 +1,65 @@
+From 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 Mon Sep 17 00:00:00 2001
+From: Yang Yingliang <yangyingliang@huawei.com>
+Date: Thu, 19 May 2022 11:21:08 +0800
+Subject: net: wwan: t7xx: use GFP_ATOMIC under spin lock in t7xx_cldma_gpd_set_next_ptr()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+commit 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 upstream.
+
+Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock,
+so add 'gfp_mask' parameter in t7xx_cldma_gpd_set_next_ptr() to pass
+the flag.
+
+Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
+Link: https://lore.kernel.org/r/20220519032108.2996400-1-yangyingliang@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wwan/t7xx/t7xx_hif_cldma.c |   10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+@@ -89,9 +89,9 @@ static void t7xx_cldma_gpd_set_next_ptr(
+ }
+ static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
+-                                      size_t size)
++                                      size_t size, gfp_t gfp_mask)
+ {
+-      req->skb = __dev_alloc_skb(size, GFP_KERNEL);
++      req->skb = __dev_alloc_skb(size, gfp_mask);
+       if (!req->skb)
+               return -ENOMEM;
+@@ -173,7 +173,7 @@ static int t7xx_cldma_gpd_rx_from_q(stru
+               spin_unlock_irqrestore(&queue->ring_lock, flags);
+               req = queue->rx_refill;
+-              ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size);
++              ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
+               if (ret)
+                       return ret;
+@@ -396,7 +396,7 @@ static struct cldma_request *t7xx_alloc_
+       if (!req->gpd)
+               goto err_free_req;
+-      val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size);
++      val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
+       if (val)
+               goto err_free_pool;
+@@ -793,7 +793,7 @@ static int t7xx_cldma_clear_rxq(struct c
+               if (req->skb)
+                       continue;
+-              ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size);
++              ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
+               if (ret)
+                       break;