]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ice: remove legacy Rx and construct SKB
authorMichal Kubiak <michal.kubiak@intel.com>
Thu, 25 Sep 2025 09:22:51 +0000 (11:22 +0200)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Wed, 29 Oct 2025 20:52:55 +0000 (13:52 -0700)
The commit 53844673d555 ("iavf: kill 'legacy-rx' for good") removed
the legacy Rx path in the iavf driver. This change applies the same
rationale to the ice driver.

The legacy Rx path relied on manual skb allocation and header copying,
which has become increasingly inefficient and difficult to maintain.
With the stabilization of build_skb() and the growing adoption of
features like XDP, page_pool, and multi-buffer support, the legacy
approach is no longer viable.

Key drawbacks of the legacy path included:
- Higher memory pressure due to direct page allocations and splitting;
- Redundant memcpy() operations for packet headers;
- CPU overhead from eth_get_headlen() and Flow Dissector usage;
- Compatibility issues with XDP, which imposes strict headroom and
  tailroom requirements.

The ice driver, like iavf, does not benefit from the minimal headroom
savings that legacy Rx once offered, as it already splits pages into
fixed halves. Removing this path simplifies the Rx logic, eliminates
unnecessary branches in the hotpath, and prepares the driver for
upcoming enhancements.

In addition to removing the legacy Rx path, this change also eliminates
the custom construct_skb() functions from both the standard and
zero-copy (ZC) Rx paths. These are replaced with the build_skb()
and standardized xdp_build_skb_from_zc() helpers, aligning the driver
with the modern XDP infrastructure and reducing code duplication.

This cleanup also reduces code complexity and improves maintainability
as we move toward a more unified and modern Rx model across drivers.

Co-developed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
Tested-by: Alexander Nowlin <alexander.nowlin@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_xsk.c

index 9ee596773f34e7afa009827185edf6ece3c95067..28de4273c2e82f3585a5b06bbebd2015711e5495 100644 (file)
@@ -509,7 +509,6 @@ enum ice_pf_flags {
        ICE_FLAG_MOD_POWER_UNSUPPORTED,
        ICE_FLAG_PHY_FW_LOAD_FAILED,
        ICE_FLAG_ETHTOOL_CTXT,          /* set when ethtool holds RTNL lock */
-       ICE_FLAG_LEGACY_RX,
        ICE_FLAG_VF_TRUE_PROMISC_ENA,
        ICE_FLAG_MDD_AUTO_RESET_VF,
        ICE_FLAG_VF_VLAN_PRUNING,
index 2d35a278c555c526939e509c386959da6ce0cba9..b3eb9f5125003207104c55b4e42f024cd3f1a368 100644 (file)
@@ -461,19 +461,6 @@ u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring)
        return tx_ring->count + max_fetch_desc;
 }
 
-/**
- * ice_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
-{
-       if (ice_ring_uses_build_skb(rx_ring))
-               return ICE_SKB_PAD;
-       return 0;
-}
-
 /**
  * ice_setup_rx_ctx - Configure a receive ring context
  * @ring: The Rx ring to configure
@@ -586,13 +573,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
        if (vsi->type == ICE_VSI_VF)
                return 0;
 
-       /* configure Rx buffer alignment */
-       if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
-               ice_clear_ring_build_skb_ena(ring);
-       else
-               ice_set_ring_build_skb_ena(ring);
-
-       ring->rx_offset = ice_rx_offset(ring);
+       ring->rx_offset = ICE_SKB_PAD;
 
        /* init queue specific tail register */
        ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
@@ -753,7 +734,7 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
  */
 static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
 {
-       if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+       if (!vsi->netdev) {
                ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
                ring->rx_buf_len = ICE_RXBUF_1664;
 #if (PAGE_SIZE < 8192)
index cb34d4675a78858b54ff9e8cd800cf4437aa88c1..240e3f35c10ad5e7c24845c2f63834b5a9a50cb3 100644 (file)
@@ -340,7 +340,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
                      ICE_FLAG_VF_TRUE_PROMISC_ENA),
        ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
        ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
-       ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
 };
 
 #define ICE_PRIV_FLAG_ARRAY_SIZE       ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -1856,10 +1855,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
                        ice_nway_reset(netdev);
                }
        }
-       if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
-               /* down and up VSI so that changes of Rx cfg are reflected. */
-               ice_down_up(vsi);
-       }
        /* don't allow modification of this flag when a single VF is in
         * promiscuous mode because it's not supported
         */
index 1de3da7b3907db9ce8e4782f2130da132ff58ac7..36e6a078c84c72b69a44651745781eda8f04ecf8 100644 (file)
@@ -2957,10 +2957,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
  */
 static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
 {
-       if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
-               return ICE_RXBUF_1664;
-       else
-               return ICE_RXBUF_3072;
+       return ICE_RXBUF_3072;
 }
 
 /**
@@ -7864,12 +7861,6 @@ int ice_change_mtu(struct net_device *netdev, int new_mtu)
                                   frame_size - ICE_ETH_PKT_HDR_PAD);
                        return -EINVAL;
                }
-       } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
-               if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
-                       netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
-                                  ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
-                       return -EINVAL;
-               }
        }
 
        /* if a reset is in progress, wait for some time for it to complete */
index 73f08d02f9c76f836fe92b4c80e875a3cc63dde2..5d59ee45d3da43b22c15f64558a3342667e25af9 100644 (file)
@@ -1169,87 +1169,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
        return skb;
 }
 
-/**
- * ice_construct_skb - Allocate skb and populate it
- * @rx_ring: Rx descriptor ring to transact packets on
- * @xdp: xdp_buff pointing to the data
- *
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
- */
-static struct sk_buff *
-ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
-{
-       unsigned int size = xdp->data_end - xdp->data;
-       struct skb_shared_info *sinfo = NULL;
-       struct ice_rx_buf *rx_buf;
-       unsigned int nr_frags = 0;
-       unsigned int headlen;
-       struct sk_buff *skb;
-
-       /* prefetch first cache line of first page */
-       net_prefetch(xdp->data);
-
-       if (unlikely(xdp_buff_has_frags(xdp))) {
-               sinfo = xdp_get_shared_info_from_buff(xdp);
-               nr_frags = sinfo->nr_frags;
-       }
-
-       /* allocate a skb to store the frags */
-       skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE);
-       if (unlikely(!skb))
-               return NULL;
-
-       rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
-       skb_record_rx_queue(skb, rx_ring->q_index);
-       /* Determine available headroom for copy */
-       headlen = size;
-       if (headlen > ICE_RX_HDR_SIZE)
-               headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
-
-       /* align pull length to size of long to optimize memcpy performance */
-       memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
-                                                        sizeof(long)));
-
-       /* if we exhaust the linear part then add what is left as a frag */
-       size -= headlen;
-       if (size) {
-               /* besides adding here a partial frag, we are going to add
-                * frags from xdp_buff, make sure there is enough space for
-                * them
-                */
-               if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
-                       dev_kfree_skb(skb);
-                       return NULL;
-               }
-               skb_add_rx_frag(skb, 0, rx_buf->page,
-                               rx_buf->page_offset + headlen, size,
-                               xdp->frame_sz);
-       } else {
-               /* buffer is unused, restore biased page count in Rx buffer;
-                * data was copied onto skb's linear part so there's no
-                * need for adjusting page offset and we can reuse this buffer
-                * as-is
-                */
-               rx_buf->pagecnt_bias++;
-       }
-
-       if (unlikely(xdp_buff_has_frags(xdp))) {
-               struct skb_shared_info *skinfo = skb_shinfo(skb);
-
-               memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
-                      sizeof(skb_frag_t) * nr_frags);
-
-               xdp_update_skb_frags_info(skb, skinfo->nr_frags + nr_frags,
-                                         sinfo->xdp_frags_size,
-                                         nr_frags * xdp->frame_sz,
-                                         xdp_buff_get_skb_flags(xdp));
-       }
-
-       return skb;
-}
-
 /**
  * ice_put_rx_buf - Clean up used buffer and either recycle or free
  * @rx_ring: Rx descriptor ring to transact packets on
@@ -1464,10 +1383,7 @@ static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
 
                continue;
 construct_skb:
-               if (likely(ice_ring_uses_build_skb(rx_ring)))
-                       skb = ice_build_skb(rx_ring, xdp);
-               else
-                       skb = ice_construct_skb(rx_ring, xdp);
+               skb = ice_build_skb(rx_ring, xdp);
                /* exit if we failed to retrieve a buffer */
                if (!skb) {
                        rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
index 841a07bfba54ffb4907e765881bee993238cf7b8..427f672fe0538e5b867e91842927f69277f0b128 100644 (file)
@@ -373,7 +373,6 @@ struct ice_rx_ring {
        dma_addr_t dma;                 /* physical address of ring */
        u8 dcb_tc;                      /* Traffic class of ring */
        u8 ptp_rx;
-#define ICE_RX_FLAGS_RING_BUILD_SKB    BIT(1)
 #define ICE_RX_FLAGS_CRC_STRIP_DIS     BIT(2)
 #define ICE_RX_FLAGS_MULTIDEV          BIT(3)
 #define ICE_RX_FLAGS_RING_GCS          BIT(4)
@@ -422,21 +421,6 @@ struct ice_tx_ring {
        u16 quanta_prof_id;
 } ____cacheline_internodealigned_in_smp;
 
-static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
-{
-       return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
-}
-
-static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
-{
-       ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
-}
-
-static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
-{
-       ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
-}
-
 static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
 {
        return !!ring->ch;
index 575fd48f485f1569e191dd2ca55cb654640f0147..b25bc5ba40abf4a750d689b84215b9f33c36fb06 100644 (file)
@@ -392,69 +392,6 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
        return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover);
 }
 
-/**
- * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
- * @rx_ring: Rx ring
- * @xdp: Pointer to XDP buffer
- *
- * This function allocates a new skb from a zero-copy Rx buffer.
- *
- * Returns the skb on success, NULL on failure.
- */
-static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
-{
-       unsigned int totalsize = xdp->data_end - xdp->data_meta;
-       unsigned int metasize = xdp->data - xdp->data_meta;
-       struct skb_shared_info *sinfo = NULL;
-       struct sk_buff *skb;
-       u32 nr_frags = 0;
-
-       if (unlikely(xdp_buff_has_frags(xdp))) {
-               sinfo = xdp_get_shared_info_from_buff(xdp);
-               nr_frags = sinfo->nr_frags;
-       }
-       net_prefetch(xdp->data_meta);
-
-       skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
-       if (unlikely(!skb))
-               return NULL;
-
-       memcpy(__skb_put(skb, totalsize), xdp->data_meta,
-              ALIGN(totalsize, sizeof(long)));
-
-       if (metasize) {
-               skb_metadata_set(skb, metasize);
-               __skb_pull(skb, metasize);
-       }
-
-       if (likely(!xdp_buff_has_frags(xdp)))
-               goto out;
-
-       for (int i = 0; i < nr_frags; i++) {
-               struct skb_shared_info *skinfo = skb_shinfo(skb);
-               skb_frag_t *frag = &sinfo->frags[i];
-               struct page *page;
-               void *addr;
-
-               page = dev_alloc_page();
-               if (!page) {
-                       dev_kfree_skb(skb);
-                       return NULL;
-               }
-               addr = page_to_virt(page);
-
-               memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
-
-               __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
-                                          addr, 0, skb_frag_size(frag));
-       }
-
-out:
-       xsk_buff_free(xdp);
-       return skb;
-}
-
 /**
  * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
  * @xdp_ring: XDP Tx ring
@@ -757,20 +694,15 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
 
 construct_skb:
                /* XDP_PASS path */
-               skb = ice_construct_skb_zc(rx_ring, first);
+               skb = xdp_build_skb_from_zc(first);
                if (!skb) {
+                       xsk_buff_free(first);
                        rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
                        break;
                }
 
                first = NULL;
                rx_ring->first_desc = ntc;
-
-               if (eth_skb_pad(skb)) {
-                       skb = NULL;
-                       continue;
-               }
-
                total_rx_bytes += skb->len;
                total_rx_packets++;