]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
eth: fbnic: split fbnic_flush()
authorJakub Kicinski <kuba@kernel.org>
Mon, 1 Sep 2025 21:12:07 +0000 (14:12 -0700)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 4 Sep 2025 08:19:17 +0000 (10:19 +0200)
Factor out handling a single nv from fbnic_flush() to make
it reusable for queue ops.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Link: https://patch.msgid.link/20250901211214.1027927-8-kuba@kernel.org
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/meta/fbnic/fbnic_txrx.c

index 7d6bf35acfd47e206fd8d9738a54131969c0f2a8..8384e73b44921ef8c202d7f3196af07b986819b7 100644 (file)
@@ -2297,54 +2297,57 @@ int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
        return err;
 }
 
-void fbnic_flush(struct fbnic_net *fbn)
+static void fbnic_nv_flush(struct fbnic_napi_vector *nv)
 {
-       int i;
-
-       for (i = 0; i < fbn->num_napi; i++) {
-               struct fbnic_napi_vector *nv = fbn->napi[i];
-               int j, t;
+       int j, t;
 
-               /* Flush any processed Tx Queue Triads and drop the rest */
-               for (t = 0; t < nv->txt_count; t++) {
-                       struct fbnic_q_triad *qt = &nv->qt[t];
-                       struct netdev_queue *tx_queue;
+       /* Flush any processed Tx Queue Triads and drop the rest */
+       for (t = 0; t < nv->txt_count; t++) {
+               struct fbnic_q_triad *qt = &nv->qt[t];
+               struct netdev_queue *tx_queue;
 
-                       /* Clean the work queues of unprocessed work */
-                       fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
-                       fbnic_clean_twq1(nv, false, &qt->sub1, true,
-                                        qt->sub1.tail);
+               /* Clean the work queues of unprocessed work */
+               fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+               fbnic_clean_twq1(nv, false, &qt->sub1, true,
+                                qt->sub1.tail);
 
-                       /* Reset completion queue descriptor ring */
-                       memset(qt->cmpl.desc, 0, qt->cmpl.size);
+               /* Reset completion queue descriptor ring */
+               memset(qt->cmpl.desc, 0, qt->cmpl.size);
 
-                       /* Nothing else to do if Tx queue is disabled */
-                       if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
-                               continue;
+               /* Nothing else to do if Tx queue is disabled */
+               if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+                       continue;
 
-                       /* Reset BQL associated with Tx queue */
-                       tx_queue = netdev_get_tx_queue(nv->napi.dev,
-                                                      qt->sub0.q_idx);
-                       netdev_tx_reset_queue(tx_queue);
-               }
+               /* Reset BQL associated with Tx queue */
+               tx_queue = netdev_get_tx_queue(nv->napi.dev,
+                                              qt->sub0.q_idx);
+               netdev_tx_reset_queue(tx_queue);
+       }
 
-               /* Flush any processed Rx Queue Triads and drop the rest */
-               for (j = 0; j < nv->rxt_count; j++, t++) {
-                       struct fbnic_q_triad *qt = &nv->qt[t];
+       /* Flush any processed Rx Queue Triads and drop the rest */
+       for (j = 0; j < nv->rxt_count; j++, t++) {
+               struct fbnic_q_triad *qt = &nv->qt[t];
 
-                       /* Clean the work queues of unprocessed work */
-                       fbnic_clean_bdq(&qt->sub0, qt->sub0.tail, 0);
-                       fbnic_clean_bdq(&qt->sub1, qt->sub1.tail, 0);
+               /* Clean the work queues of unprocessed work */
+               fbnic_clean_bdq(&qt->sub0, qt->sub0.tail, 0);
+               fbnic_clean_bdq(&qt->sub1, qt->sub1.tail, 0);
 
-                       /* Reset completion queue descriptor ring */
-                       memset(qt->cmpl.desc, 0, qt->cmpl.size);
+               /* Reset completion queue descriptor ring */
+               memset(qt->cmpl.desc, 0, qt->cmpl.size);
 
-                       fbnic_put_pkt_buff(qt, qt->cmpl.pkt, 0);
-                       memset(qt->cmpl.pkt, 0, sizeof(struct fbnic_pkt_buff));
-               }
+               fbnic_put_pkt_buff(qt, qt->cmpl.pkt, 0);
+               memset(qt->cmpl.pkt, 0, sizeof(struct fbnic_pkt_buff));
        }
 }
 
+void fbnic_flush(struct fbnic_net *fbn)
+{
+       int i;
+
+       for (i = 0; i < fbn->num_napi; i++)
+               fbnic_nv_flush(fbn->napi[i]);
+}
+
 void fbnic_fill(struct fbnic_net *fbn)
 {
        int i;