]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ice: add a separate Rx handler for flow director commands
authorMichal Kubiak <michal.kubiak@intel.com>
Wed, 14 May 2025 12:37:24 +0000 (14:37 +0200)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Mon, 9 Jun 2025 16:56:18 +0000 (09:56 -0700)
The "ice" driver implementation uses the control VSI to handle
the flow director configuration for PFs and VFs.

Unfortunately, although a separate VSI type was created to handle flow
director queues, the Rx queue handler was shared between the flow
director and a standard NAPI Rx handler.

Such a design approach was not very flexible. First, it mixed hotpath
and slowpath code, blocking their further optimization. It also created
a huge overkill for the flow director command processing, which is
descriptor-based only, so there is no need to allocate Rx data buffers.

For the above reasons, implement a separate Rx handler for the control
VSI. Also, remove from the NAPI handler the code dedicated to
configuring the flow director rules on VFs.
Do not allocate Rx data buffers to the flow director queues because
their processing is descriptor-based only.
Finally, allow Rx data queues to be allocated only for VSIs that have
netdev assigned to them.

This handler splitting approach is the first step in converting the
driver to use the Page Pool (which can only be used for data queues).

Test hints:
  1. Create a VF for any PF managed by the ice driver.
  2. In a loop, add and delete flow director rules for the VF, e.g.:

       for i in {1..128}; do
           q=$(( i % 16 ))
           ethtool -N ens802f0v0 flow-type tcp4 dst-port "$i" action "$q"
       done

       for i in {0..127}; do
           ethtool -N ens802f0v0 delete "$i"
       done

Suggested-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Suggested-by: Michal Swiatkowski <michal.swiatkowski@intel.com>
Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h

index 6db4ad8fc70b0658f399e519c28611d1c206f70d..270f936ce8074a531160d7f379d24c927b9f0bcd 100644 (file)
@@ -623,7 +623,10 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
                return 0;
        }
 
-       ice_alloc_rx_bufs(ring, num_bufs);
+       if (ring->vsi->type == ICE_VSI_CTRL)
+               ice_init_ctrl_rx_descs(ring, num_bufs);
+       else
+               ice_alloc_rx_bufs(ring, num_bufs);
 
        return 0;
 }
index 2cc050db509f17e4c65dea8a3080119d803fcca9..2f1782e9357f930a91f928b78fdf105f0bf4e6ef 100644 (file)
@@ -484,8 +484,7 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
        if (!q_vector->tx.tx_ring)
                return IRQ_HANDLED;
 
-#define FDIR_RX_DESC_CLEAN_BUDGET 64
-       ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
+       ice_clean_ctrl_rx_irq(q_vector->rx.rx_ring);
        ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
 
        return IRQ_HANDLED;
index 0e5107fe62ad5b7ac594ea71a192a8fbaa932f1e..29e0088ab6b28bf3d8eef27c4ecb8e733a21824a 100644 (file)
@@ -20,7 +20,6 @@
 
 #define ICE_RX_HDR_SIZE                256
 
-#define FDIR_DESC_RXDID 0x40
 #define ICE_FDIR_CLEAN_DELAY 10
 
 /**
@@ -706,6 +705,37 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
        return true;
 }
 
+/**
+ * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi.
+ * @rx_ring: ring to init descriptors on
+ * @count: number of descriptors to initialize
+ */
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count)
+{
+       union ice_32b_rx_flex_desc *rx_desc;
+       u32 ntu = rx_ring->next_to_use;
+
+       if (!count)
+               return;
+
+       rx_desc = ICE_RX_DESC(rx_ring, ntu);
+
+       do {
+               rx_desc++;
+               ntu++;
+               if (unlikely(ntu == rx_ring->count)) {
+                       rx_desc = ICE_RX_DESC(rx_ring, 0);
+                       ntu = 0;
+               }
+
+               rx_desc->wb.status_error0 = 0;
+               count--;
+       } while (count);
+
+       if (rx_ring->next_to_use != ntu)
+               ice_release_rx_desc(rx_ring, ntu);
+}
+
 /**
  * ice_alloc_rx_bufs - Replace used receive buffers
  * @rx_ring: ring to place buffers on
@@ -726,8 +756,7 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
        struct ice_rx_buf *bi;
 
        /* do nothing if no valid netdev defined */
-       if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
-           !cleaned_count)
+       if (!rx_ring->netdev || !cleaned_count)
                return false;
 
        /* get the Rx descriptor and buffer based on next_to_use */
@@ -1183,6 +1212,45 @@ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
        rx_ring->nr_frags = 0;
 }
 
+/**
+ * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring
+ * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on
+ *
+ * This function cleans Rx descriptors from the ctrl_vsi Rx ring used
+ * to set flow director rules on VFs.
+ */
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
+{
+       u32 ntc = rx_ring->next_to_clean;
+       unsigned int total_rx_pkts = 0;
+       u32 cnt = rx_ring->count;
+
+       while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) {
+               struct ice_vsi *ctrl_vsi = rx_ring->vsi;
+               union ice_32b_rx_flex_desc *rx_desc;
+               u16 stat_err_bits;
+
+               rx_desc = ICE_RX_DESC(rx_ring, ntc);
+
+               stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+               if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+                       break;
+
+               dma_rmb();
+
+               if (ctrl_vsi->vf)
+                       ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
+
+               if (++ntc == cnt)
+                       ntc = 0;
+               total_rx_pkts++;
+       }
+
+       rx_ring->first_desc = ntc;
+       rx_ring->next_to_clean = ntc;
+       ice_init_ctrl_rx_descs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
+}
+
 /**
  * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
  * @rx_ring: Rx descriptor ring to transact packets on
@@ -1195,7 +1263,7 @@ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
  *
  * Returns amount of work completed
  */
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
        unsigned int offset = rx_ring->rx_offset;
@@ -1242,17 +1310,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
                dma_rmb();
 
                ice_trace(clean_rx_irq, rx_ring, rx_desc);
-               if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
-                       struct ice_vsi *ctrl_vsi = rx_ring->vsi;
-
-                       if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
-                           ctrl_vsi->vf)
-                               ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
-                       if (++ntc == cnt)
-                               ntc = 0;
-                       rx_ring->first_desc = ntc;
-                       continue;
-               }
 
                size = le16_to_cpu(rx_desc->wb.pkt_len) &
                        ICE_RX_FLX_DESC_PKT_LEN_M;
index a4b1e95146327df49c45fba642de2b3e554d6b96..fef750c5f288f36af0ba2ad0d36c26a49d83443a 100644 (file)
@@ -491,6 +491,7 @@ static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
 
 union ice_32b_rx_flex_desc;
 
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs);
 bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
 u16
@@ -506,6 +507,6 @@ int ice_napi_poll(struct napi_struct *napi, int budget);
 int
 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
                   u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
 void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring);
 #endif /* _ICE_TXRX_H_ */