From: Aditya Garg Date: Tue, 18 Nov 2025 11:11:09 +0000 (-0800) Subject: net: mana: Drop TX skb on post_work_request failure and unmap resources X-Git-Tag: v6.19-rc1~170^2~120^2 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=45120304e84171fd215c1b57b15b285446d15106;p=thirdparty%2Fkernel%2Flinux.git net: mana: Drop TX skb on post_work_request failure and unmap resources Drop TX packets when posting the work request fails and ensure DMA mappings are always cleaned up. Signed-off-by: Aditya Garg Reviewed-by: Haiyang Zhang Link: https://patch.msgid.link/1763464269-10431-3-git-send-email-gargaditya@linux.microsoft.com Signed-off-by: Jakub Kicinski --- diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index effe0a2f207aa..8fd70b34807af 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1300,7 +1300,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq, struct gdma_posted_wqe_info *wqe_info) { u32 client_oob_size = wqe_req->inline_oob_size; - struct gdma_context *gc; u32 sgl_data_size; u32 max_wqe_size; u32 wqe_size; @@ -1330,11 +1329,8 @@ int mana_gd_post_work_request(struct gdma_queue *wq, if (wqe_size > max_wqe_size) return -EINVAL; - if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) { - gc = wq->gdma_dev->gdma_context; - dev_err(gc->dev, "unsuccessful flow control!\n"); + if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) return -ENOSPC; - } if (wqe_info) wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 7b49ab005e2d2..1ad154f9db1ad 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -492,9 +492,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (err) { (void)skb_dequeue_tail(&txq->pending_skbs); + mana_unmap_skb(skb, apc); netdev_warn(ndev, "Failed to post TX OOB: %d\n", err); - err = NETDEV_TX_BUSY; - goto tx_busy; + goto free_sgl_ptr; } err = NETDEV_TX_OK; @@ -514,7 +514,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs); u64_stats_update_end(&tx_stats->syncp); -tx_busy: if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) { netif_tx_wake_queue(net_txq); apc->eth_stats.wake_queue++; @@ -1687,7 +1686,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) return 0; } -static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) +void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) { struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index fb28b3cac067b..d7e089c6b6946 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -593,6 +593,7 @@ int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, void mana_query_phy_stats(struct mana_port_context *apc); int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues); void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); +void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc); extern const struct ethtool_ops mana_ethtool_ops; extern struct dentry *mana_debugfs_root;