]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Jan 2025 10:48:50 +0000 (11:48 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Jan 2025 10:48:50 +0000 (11:48 +0100)
added patches:
gve-process-xsk-tx-descriptors-as-part-of-rx-napi.patch

queue-6.12/gve-process-xsk-tx-descriptors-as-part-of-rx-napi.patch [new file with mode: 0644]
queue-6.12/series

diff --git a/queue-6.12/gve-process-xsk-tx-descriptors-as-part-of-rx-napi.patch b/queue-6.12/gve-process-xsk-tx-descriptors-as-part-of-rx-napi.patch
new file mode 100644 (file)
index 0000000..4e2ae7d
--- /dev/null
@@ -0,0 +1,119 @@
+From ba0925c34e0fa6fe02d3d642bc02ab099ab312c7 Mon Sep 17 00:00:00 2001
+From: Joshua Washington <joshwash@google.com>
+Date: Wed, 18 Dec 2024 05:34:14 -0800
+Subject: gve: process XSK TX descriptors as part of RX NAPI
+
+From: Joshua Washington <joshwash@google.com>
+
+commit ba0925c34e0fa6fe02d3d642bc02ab099ab312c7 upstream.
+
+When busy polling is enabled, xsk_sendmsg for AF_XDP zero copy marks
+the NAPI ID corresponding to the memory pool allocated for the socket.
+In GVE, this NAPI ID will never correspond to a NAPI ID of one of the
+dedicated XDP TX queues registered with the umem because XDP TX is not
+set up to share a NAPI with a corresponding RX queue.
+
+This patch moves XSK TX descriptor processing from the TX NAPI to the RX
+NAPI, and the gve_xsk_wakeup callback is updated to use the RX NAPI
+instead of the TX NAPI, accordingly. The branch on if the wakeup is for
+TX is removed, as the NAPI poll should be invoked whether the wakeup is
+for TX or for RX.
+
+Fixes: fd8e40321a12 ("gve: Add AF_XDP zero-copy support for GQI-QPL format")
+Cc: stable@vger.kernel.org
+Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
+Signed-off-by: Joshua Washington <joshwash@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/google/gve/gve.h      |    1 
+ drivers/net/ethernet/google/gve/gve_main.c |    8 ++++++
+ drivers/net/ethernet/google/gve/gve_tx.c   |   36 +++++++++++++++++------------
+ 3 files changed, 31 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/ethernet/google/gve/gve.h
++++ b/drivers/net/ethernet/google/gve/gve.h
+@@ -1134,6 +1134,7 @@ int gve_xdp_xmit_one(struct gve_priv *pr
+ void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
+ bool gve_tx_poll(struct gve_notify_block *block, int budget);
+ bool gve_xdp_poll(struct gve_notify_block *block, int budget);
++int gve_xsk_tx_poll(struct gve_notify_block *block, int budget);
+ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
+                          struct gve_tx_alloc_rings_cfg *cfg);
+ void gve_tx_free_rings_gqi(struct gve_priv *priv,
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -333,6 +333,14 @@ int gve_napi_poll(struct napi_struct *na
+       if (block->rx) {
+               work_done = gve_rx_poll(block, budget);
++
++              /* Poll XSK TX as part of RX NAPI. Setup re-poll based on max of
++               * TX and RX work done.
++               */
++              if (priv->xdp_prog)
++                      work_done = max_t(int, work_done,
++                                        gve_xsk_tx_poll(block, budget));
++
+               reschedule |= work_done == budget;
+       }
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -975,33 +975,41 @@ out:
+       return sent;
+ }
++int gve_xsk_tx_poll(struct gve_notify_block *rx_block, int budget)
++{
++      struct gve_rx_ring *rx = rx_block->rx;
++      struct gve_priv *priv = rx->gve;
++      struct gve_tx_ring *tx;
++      int sent = 0;
++
++      tx = &priv->tx[gve_xdp_tx_queue_id(priv, rx->q_num)];
++      if (tx->xsk_pool) {
++              sent = gve_xsk_tx(priv, tx, budget);
++
++              u64_stats_update_begin(&tx->statss);
++              tx->xdp_xsk_sent += sent;
++              u64_stats_update_end(&tx->statss);
++              if (xsk_uses_need_wakeup(tx->xsk_pool))
++                      xsk_set_tx_need_wakeup(tx->xsk_pool);
++      }
++
++      return sent;
++}
++
+ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
+ {
+       struct gve_priv *priv = block->priv;
+       struct gve_tx_ring *tx = block->tx;
+       u32 nic_done;
+-      bool repoll;
+       u32 to_do;
+       /* Find out how much work there is to be done */
+       nic_done = gve_tx_load_event_counter(priv, tx);
+       to_do = min_t(u32, (nic_done - tx->done), budget);
+       gve_clean_xdp_done(priv, tx, to_do);
+-      repoll = nic_done != tx->done;
+-
+-      if (tx->xsk_pool) {
+-              int sent = gve_xsk_tx(priv, tx, budget);
+-
+-              u64_stats_update_begin(&tx->statss);
+-              tx->xdp_xsk_sent += sent;
+-              u64_stats_update_end(&tx->statss);
+-              repoll |= (sent == budget);
+-              if (xsk_uses_need_wakeup(tx->xsk_pool))
+-                      xsk_set_tx_need_wakeup(tx->xsk_pool);
+-      }
+       /* If we still have work we want to repoll */
+-      return repoll;
++      return nic_done != tx->done;
+ }
+ bool gve_tx_poll(struct gve_notify_block *block, int budget)
index 1e60e263353d1373025e964fd16585da01093eb2..dcfe3454cc9c251b0b02640dffecb33b7b9415db 100644 (file)
@@ -141,3 +141,4 @@ mm-damon-core-fix-new-damon_target-objects-leaks-on-damon_commit_targets.patch
 mm-shmem-fix-the-update-of-shmem_falloc-nr_unswapped.patch
 mm-shmem-fix-incorrect-index-alignment-for-within_size-policy.patch
 fs-proc-task_mmu-fix-pagemap-flags-with-pmd-thp-entries-on-32bit.patch
+gve-process-xsk-tx-descriptors-as-part-of-rx-napi.patch