]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
idpf: fix netdev Tx queue stop/wake
authorMichal Kubiak <michal.kubiak@intel.com>
Wed, 4 Sep 2024 15:47:47 +0000 (17:47 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 4 Oct 2024 14:33:46 +0000 (16:33 +0200)
[ Upstream commit e4b398dd82f5d5867bc5f442c43abc8fba30ed2c ]

netif_txq_maybe_stop() returns -1, 0, or 1, while
idpf_tx_maybe_stop_common() says it returns 0 or -EBUSY. As a result,
there sometimes are Tx queue timeout warnings despite that the queue
is empty or there is at least enough space to restart it.
Make idpf_tx_maybe_stop_common() inline and returning true or false,
handling the return of netif_txq_maybe_stop() properly. Use a correct
goto in idpf_tx_maybe_stop_splitq() to avoid stopping the queue or
incrementing the stops counter twice.

Fixes: 6818c4d5b3c2 ("idpf: add splitq start_xmit")
Fixes: a5ab9ee0df0b ("idpf: add singleq start_xmit and napi poll")
Cc: stable@vger.kernel.org # 6.7+
Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
drivers/net/ethernet/intel/idpf/idpf_txrx.c
drivers/net/ethernet/intel/idpf/idpf_txrx.h

index 8630db24f63a74e0197cefb0dec76292381c112f..5e5fa2d0aa4d18807edd62b94cd908d7e1ffd146 100644 (file)
@@ -369,6 +369,10 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
                                      IDPF_TX_DESCS_FOR_CTX)) {
                idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
 
+               u64_stats_update_begin(&tx_q->stats_sync);
+               u64_stats_inc(&tx_q->q_stats.q_busy);
+               u64_stats_update_end(&tx_q->stats_sync);
+
                return NETDEV_TX_BUSY;
        }
 
index 7b06ca7b9732ab924084f4259366d207d2cb06c7..9b7e67d0f38be977b5d67d8b6890c8d98a0a211c 100644 (file)
@@ -2149,29 +2149,6 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
        desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
 }
 
-/**
- * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
- * @tx_q: the queue to be checked
- * @size: number of descriptors we want to assure is available
- *
- * Returns 0 if stop is not needed
- */
-int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size)
-{
-       struct netdev_queue *nq;
-
-       if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
-               return 0;
-
-       u64_stats_update_begin(&tx_q->stats_sync);
-       u64_stats_inc(&tx_q->q_stats.q_busy);
-       u64_stats_update_end(&tx_q->stats_sync);
-
-       nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
-
-       return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
-}
-
 /**
  * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
  * @tx_q: the queue to be checked
@@ -2183,7 +2160,7 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
                                     unsigned int descs_needed)
 {
        if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
-               goto splitq_stop;
+               goto out;
 
        /* If there are too many outstanding completions expected on the
         * completion queue, stop the TX queue to give the device some time to
@@ -2202,10 +2179,12 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
        return 0;
 
 splitq_stop:
+       netif_stop_subqueue(tx_q->netdev, tx_q->idx);
+
+out:
        u64_stats_update_begin(&tx_q->stats_sync);
        u64_stats_inc(&tx_q->q_stats.q_busy);
        u64_stats_update_end(&tx_q->stats_sync);
-       netif_stop_subqueue(tx_q->netdev, tx_q->idx);
 
        return -EBUSY;
 }
@@ -2228,7 +2207,11 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
        nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
        tx_q->next_to_use = val;
 
-       idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
+       if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) {
+               u64_stats_update_begin(&tx_q->stats_sync);
+               u64_stats_inc(&tx_q->q_stats.q_busy);
+               u64_stats_update_end(&tx_q->stats_sync);
+       }
 
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
index 5b3f19200255a86f26832b7c69b4c66b93082d3c..214a24e684634a93248c86ee1d1f815e61033a36 100644 (file)
@@ -1148,7 +1148,6 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
                           struct idpf_tx_buf *first, u16 ring_idx);
 unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
                                         struct sk_buff *skb);
-int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
 void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
 netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
                                  struct idpf_tx_queue *tx_q);
@@ -1157,4 +1156,12 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
                                      u16 cleaned_count);
 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
 
+static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q,
+                                            u32 needed)
+{
+       return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx,
+                                         IDPF_DESC_UNUSED(tx_q),
+                                         needed, needed);
+}
+
 #endif /* !_IDPF_TXRX_H_ */