From: Russell King (Oracle) Date: Fri, 20 Mar 2026 16:47:12 +0000 (+0000) Subject: net: stmmac: move stmmac_xmit() skb head handling X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fcbf106189aaf69a2a2f6116672d442d473ecbcd;p=thirdparty%2Fkernel%2Fstable.git net: stmmac: move stmmac_xmit() skb head handling The skb head buffer handling is delayed in stmmac_xmit() until after the skb fragments have been populated into the descriptors. The reason is this code used to set the OWN bit on the first descriptor, which then allows the TX DMA to process the first and subsequent descriptors. However, as of commit 579a25a854d4 ("net: stmmac: Initial support for TBS") this is now separated, but the comments weren't updated. Move the code populating the first descriptor along side the jumbo code which also populates the first descriptor. This gives a consistent location where we populate the descriptor(s) for the SKB head. Signed-off-by: Russell King (Oracle) Link: https://patch.msgid.link/E1w3d0C-0000000DfLj-0BLb@rmk-PC.armlinux.org.uk Tested-by: Maxime Chevallier Signed-off-by: Paolo Abeni --- diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ecb6d9a27567..fb9719820b93 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4763,6 +4763,33 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion); if (unlikely(entry < 0) && (entry != -EINVAL)) goto dma_map_err; + } else { + bool last_segment = (nfrags == 0); + + dma_addr = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, dma_addr)) + goto dma_map_err; + + stmmac_set_tx_skb_dma_entry(tx_q, first_entry, dma_addr, + nopaged_len, false); + + stmmac_set_desc_addr(priv, first_desc, dma_addr); + + if (last_segment) + stmmac_set_tx_dma_last_segment(tx_q, first_entry); + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + stmmac_enable_tx_timestamp(priv, first_desc); + } + + /* Prepare the first descriptor without setting the OWN bit */ + stmmac_prepare_tx_desc(priv, first_desc, 1, nopaged_len, + csum_insertion, priv->descriptor_mode, + 0, last_segment, skb->len); } for (i = 0; i < nfrags; i++) { @@ -4854,39 +4881,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->sarc_type) stmmac_set_desc_sarc(priv, first_desc, priv->sarc_type); - /* Ready to fill the first descriptor and set the OWN bit w/o any - * problems because all the descriptors are actually ready to be - * passed to the DMA engine. - */ - if (likely(!is_jumbo)) { - bool last_segment = (nfrags == 0); - - dma_addr = dma_map_single(priv->device, skb->data, - nopaged_len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, dma_addr)) - goto dma_map_err; - - stmmac_set_tx_skb_dma_entry(tx_q, first_entry, dma_addr, - nopaged_len, false); - - stmmac_set_desc_addr(priv, first_desc, dma_addr); - - if (last_segment) - stmmac_set_tx_dma_last_segment(tx_q, first_entry); - - if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - priv->hwts_tx_en)) { - /* declare that device is doing timestamping */ - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - stmmac_enable_tx_timestamp(priv, first_desc); - } - - /* Prepare the first descriptor setting the OWN bit too */ - stmmac_prepare_tx_desc(priv, first_desc, 1, nopaged_len, - csum_insertion, priv->descriptor_mode, - 0, last_segment, skb->len); - } - if (tx_q->tbs & STMMAC_TBS_EN) { struct timespec64 ts = ns_to_timespec64(skb->tstamp); @@ -4894,6 +4888,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec); } + /* Set the OWN bit on the first descriptor now that all descriptors + * for this skb are populated. + */ stmmac_set_tx_owner(priv, first_desc); netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);