]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
net: ionic: map SKB after pseudo-header checksum prep
authorMohammad Heib <mheib@redhat.com>
Fri, 31 Oct 2025 15:52:03 +0000 (17:52 +0200)
committerJakub Kicinski <kuba@kernel.org>
Tue, 4 Nov 2025 01:19:21 +0000 (17:19 -0800)
The TSO path called ionic_tx_map_skb() before preparing the TCP pseudo
checksum (ionic_tx_tcp_[inner_]pseudo_csum()), which may perform
skb_cow_head() and might modifies bytes in the linear header area.

Mapping first and then mutating the header risks:
  - Using a stale DMA address if skb_cow_head() relocates the head, and/or
  - Device reading stale header bytes on weakly-ordered systems
    (CPU writes after mapping are not guaranteed visible without an
    explicit dma_sync_single_for_device()).

Reorder the TX path to perform all header mutations (including
skb_cow_head()) *before* DMA mapping. Mapping is now done only after the
skb layout and header contents are final. This removes the need for any
post-mapping dma_sync and prevents on-wire corruption observed under
VLAN+TSO load after repeated runs.

This change is purely an ordering fix; no functional behavior change
otherwise.

Fixes: 0f3154e6bcb3 ("ionic: Add Tx and Rx handling")
Signed-off-by: Mohammad Heib <mheib@redhat.com>
Reviewed-by: Brett Creeley <brett.creeley@amd.com>
Link: https://patch.msgid.link/20251031155203.203031-2-mheib@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/pensando/ionic/ionic_txrx.c

index 2e571d0a0d8a282a51ce7266ec4de63c1f645c01..301ebee2fdc50b046dbea6bcbbbcca31dab4a354 100644 (file)
@@ -1448,19 +1448,6 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
        bool encap;
        int err;
 
-       desc_info = &q->tx_info[q->head_idx];
-
-       if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
-               return -EIO;
-
-       len = skb->len;
-       mss = skb_shinfo(skb)->gso_size;
-       outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
-                                                  SKB_GSO_GRE_CSUM |
-                                                  SKB_GSO_IPXIP4 |
-                                                  SKB_GSO_IPXIP6 |
-                                                  SKB_GSO_UDP_TUNNEL |
-                                                  SKB_GSO_UDP_TUNNEL_CSUM));
        has_vlan = !!skb_vlan_tag_present(skb);
        vlan_tci = skb_vlan_tag_get(skb);
        encap = skb->encapsulation;
@@ -1474,12 +1461,21 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
                err = ionic_tx_tcp_inner_pseudo_csum(skb);
        else
                err = ionic_tx_tcp_pseudo_csum(skb);
-       if (unlikely(err)) {
-               /* clean up mapping from ionic_tx_map_skb */
-               ionic_tx_desc_unmap_bufs(q, desc_info);
+       if (unlikely(err))
                return err;
-       }
 
+       desc_info = &q->tx_info[q->head_idx];
+       if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
+               return -EIO;
+
+       len = skb->len;
+       mss = skb_shinfo(skb)->gso_size;
+       outer_csum = (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+                                                  SKB_GSO_GRE_CSUM |
+                                                  SKB_GSO_IPXIP4 |
+                                                  SKB_GSO_IPXIP6 |
+                                                  SKB_GSO_UDP_TUNNEL |
+                                                  SKB_GSO_UDP_TUNNEL_CSUM));
        if (encap)
                hdrlen = skb_inner_tcp_all_headers(skb);
        else