From: Greg Kroah-Hartman Date: Mon, 17 Oct 2022 13:52:28 +0000 (+0200) Subject: 5.15-stable patches X-Git-Tag: v5.4.219~4 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=21b4bbbcebbd8daad1eaa2950577da6ea915ab7d;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: net-wwan-t7xx-use-gfp_atomic-under-spin-lock-in-t7xx_cldma_gpd_set_next_ptr.patch --- diff --git a/queue-5.15/net-wwan-t7xx-use-gfp_atomic-under-spin-lock-in-t7xx_cldma_gpd_set_next_ptr.patch b/queue-5.15/net-wwan-t7xx-use-gfp_atomic-under-spin-lock-in-t7xx_cldma_gpd_set_next_ptr.patch new file mode 100644 index 00000000000..e7e25172a70 --- /dev/null +++ b/queue-5.15/net-wwan-t7xx-use-gfp_atomic-under-spin-lock-in-t7xx_cldma_gpd_set_next_ptr.patch @@ -0,0 +1,65 @@ +From 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 Mon Sep 17 00:00:00 2001 +From: Yang Yingliang +Date: Thu, 19 May 2022 11:21:08 +0800 +Subject: net: wwan: t7xx: use GFP_ATOMIC under spin lock in t7xx_cldma_gpd_set_next_ptr() + +From: Yang Yingliang + +commit 9ee152ee3ee3568b1a3302f2bb816d5440e6f5f1 upstream. + +Sometimes t7xx_cldma_gpd_set_next_ptr() is called under spin lock, +so add 'gfp_mask' parameter in t7xx_cldma_gpd_set_next_ptr() to pass +the flag. + +Fixes: 39d439047f1d ("net: wwan: t7xx: Add control DMA interface") +Reported-by: Hulk Robot +Signed-off-by: Yang Yingliang +Reviewed-by: Loic Poulain +Link: https://lore.kernel.org/r/20220519032108.2996400-1-yangyingliang@huawei.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Greg Kroah-Hartman +--- + drivers/net/wwan/t7xx/t7xx_hif_cldma.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c ++++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c +@@ -89,9 +89,9 @@ static void t7xx_cldma_gpd_set_next_ptr( + } + + static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req, +- size_t size) ++ size_t size, gfp_t gfp_mask) + { +- req->skb = __dev_alloc_skb(size, GFP_KERNEL); ++ req->skb = __dev_alloc_skb(size, gfp_mask); + if (!req->skb) + return -ENOMEM; + +@@ -173,7 +173,7 @@ static int t7xx_cldma_gpd_rx_from_q(stru + spin_unlock_irqrestore(&queue->ring_lock, flags); + req = queue->rx_refill; + +- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size); ++ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL); + if (ret) + return ret; + +@@ -396,7 +396,7 @@ static struct cldma_request *t7xx_alloc_ + if (!req->gpd) + goto err_free_req; + +- val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size); ++ val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL); + if (val) + goto err_free_pool; + +@@ -793,7 +793,7 @@ static int t7xx_cldma_clear_rxq(struct c + if (req->skb) + continue; + +- ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size); ++ ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC); + if (ret) + break; +