]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: mana: Drop TX skb on post_work_request failure and unmap resources
authorAditya Garg <gargaditya@linux.microsoft.com>
Tue, 18 Nov 2025 11:11:09 +0000 (03:11 -0800)
committerJakub Kicinski <kuba@kernel.org>
Thu, 20 Nov 2025 04:11:57 +0000 (20:11 -0800)
Drop TX packets when posting the work request fails and ensure DMA
mappings are always cleaned up.

Signed-off-by: Aditya Garg <gargaditya@linux.microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Link: https://patch.msgid.link/1763464269-10431-3-git-send-email-gargaditya@linux.microsoft.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/microsoft/mana/gdma_main.c
drivers/net/ethernet/microsoft/mana/mana_en.c
include/net/mana/mana.h

index effe0a2f207aa19df6cca3409dd248fc55d93708..8fd70b34807af033e7669a60dafcfd453e66a528 100644 (file)
@@ -1300,7 +1300,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
                              struct gdma_posted_wqe_info *wqe_info)
 {
        u32 client_oob_size = wqe_req->inline_oob_size;
-       struct gdma_context *gc;
        u32 sgl_data_size;
        u32 max_wqe_size;
        u32 wqe_size;
@@ -1330,11 +1329,8 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
        if (wqe_size > max_wqe_size)
                return -EINVAL;
 
-       if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) {
-               gc = wq->gdma_dev->gdma_context;
-               dev_err(gc->dev, "unsuccessful flow control!\n");
+       if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq))
                return -ENOSPC;
-       }
 
        if (wqe_info)
                wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE;
index 7b49ab005e2d258b0d16fab9e1cdadbd219dc2a3..1ad154f9db1adc3e416b7e550546920a980f69ed 100644 (file)
@@ -492,9 +492,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        if (err) {
                (void)skb_dequeue_tail(&txq->pending_skbs);
+               mana_unmap_skb(skb, apc);
                netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
-               err = NETDEV_TX_BUSY;
-               goto tx_busy;
+               goto free_sgl_ptr;
        }
 
        err = NETDEV_TX_OK;
@@ -514,7 +514,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
        u64_stats_update_end(&tx_stats->syncp);
 
-tx_busy:
        if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
                netif_tx_wake_queue(net_txq);
                apc->eth_stats.wake_queue++;
@@ -1687,7 +1686,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
        return 0;
 }
 
-static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
 {
        struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
        struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
index fb28b3cac067bc1ffd1e99375cb91bbe7da1009e..d7e089c6b69469696f1099bc073483c32456a078 100644 (file)
@@ -593,6 +593,7 @@ int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
 void mana_query_phy_stats(struct mana_port_context *apc);
 int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
 void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
+void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc);
 
 extern const struct ethtool_ops mana_ethtool_ops;
 extern struct dentry *mana_debugfs_root;