]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
gve: fix incorrect buffer cleanup in gve_tx_clean_pending_packets for QPL
authorAnkit Garg <nktgrg@google.com>
Fri, 20 Feb 2026 21:53:24 +0000 (13:53 -0800)
committerJakub Kicinski <kuba@kernel.org>
Tue, 24 Feb 2026 01:22:48 +0000 (17:22 -0800)
In DQ-QPL mode, gve_tx_clean_pending_packets() incorrectly uses the RDA
buffer cleanup path. It iterates num_bufs times and attempts to unmap
entries in the dma array.

This leads to two issues:
1. The dma array shares storage with tx_qpl_buf_ids (union).
 Interpreting buffer IDs as DMA addresses results in attempting to
 unmap incorrect memory locations.
2. num_bufs in QPL mode (counting 2K chunks) can significantly exceed
 the size of the dma array, causing out-of-bounds access warnings
(trace below is how we noticed this issue).

UBSAN: array-index-out-of-bounds in
drivers/net/ethernet/drivers/net/ethernet/google/gve/gve_tx_dqo.c:178:5 index 18 is out of
range for type 'dma_addr_t[18]' (aka 'unsigned long long[18]')
Workqueue: gve gve_service_task [gve]
Call Trace:
<TASK>
dump_stack_lvl+0x33/0xa0
__ubsan_handle_out_of_bounds+0xdc/0x110
gve_tx_stop_ring_dqo+0x182/0x200 [gve]
gve_close+0x1be/0x450 [gve]
gve_reset+0x99/0x120 [gve]
gve_service_task+0x61/0x100 [gve]
process_scheduled_works+0x1e9/0x380

Fix this by properly checking for QPL mode and delegating to
gve_free_tx_qpl_bufs() to reclaim the buffers.

Cc: stable@vger.kernel.org
Fixes: a6fb8d5a8b69 ("gve: Tx path for DQO-QPL")
Signed-off-by: Ankit Garg <nktgrg@google.com>
Reviewed-by: Jordan Rhee <jordanrhee@google.com>
Reviewed-by: Harshitha Ramamurthy <hramamurthy@google.com>
Signed-off-by: Joshua Washington <joshwash@google.com>
Reviewed-by: Simon Horman <horms@kernel.org>
Link: https://patch.msgid.link/20260220215324.1631350-1-joshwash@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/ethernet/google/gve/gve_tx_dqo.c

index 28e85730f785e438b2a0460a1df8548f07cbc323..b57e8f13cb51bf04a4f46fbf2afd025a35e2385b 100644 (file)
@@ -167,6 +167,25 @@ gve_free_pending_packet(struct gve_tx_ring *tx,
        }
 }
 
+static void gve_unmap_packet(struct device *dev,
+                            struct gve_tx_pending_packet_dqo *pkt)
+{
+       int i;
+
+       if (!pkt->num_bufs)
+               return;
+
+       /* SKB linear portion is guaranteed to be mapped */
+       dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
+                        dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
+       for (i = 1; i < pkt->num_bufs; i++) {
+               netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
+                                           dma_unmap_len(pkt, len[i]),
+                                           DMA_TO_DEVICE, 0);
+       }
+       pkt->num_bufs = 0;
+}
+
 /* gve_tx_free_desc - Cleans up all pending tx requests and buffers.
  */
 static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
@@ -176,21 +195,12 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
        for (i = 0; i < tx->dqo.num_pending_packets; i++) {
                struct gve_tx_pending_packet_dqo *cur_state =
                        &tx->dqo.pending_packets[i];
-               int j;
-
-               for (j = 0; j < cur_state->num_bufs; j++) {
-                       if (j == 0) {
-                               dma_unmap_single(tx->dev,
-                                       dma_unmap_addr(cur_state, dma[j]),
-                                       dma_unmap_len(cur_state, len[j]),
-                                       DMA_TO_DEVICE);
-                       } else {
-                               dma_unmap_page(tx->dev,
-                                       dma_unmap_addr(cur_state, dma[j]),
-                                       dma_unmap_len(cur_state, len[j]),
-                                       DMA_TO_DEVICE);
-                       }
-               }
+
+               if (tx->dqo.qpl)
+                       gve_free_tx_qpl_bufs(tx, cur_state);
+               else
+                       gve_unmap_packet(tx->dev, cur_state);
+
                if (cur_state->skb) {
                        dev_consume_skb_any(cur_state->skb);
                        cur_state->skb = NULL;
@@ -1157,22 +1167,6 @@ static void remove_from_list(struct gve_tx_ring *tx,
        }
 }
 
-static void gve_unmap_packet(struct device *dev,
-                            struct gve_tx_pending_packet_dqo *pkt)
-{
-       int i;
-
-       /* SKB linear portion is guaranteed to be mapped */
-       dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
-                        dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
-       for (i = 1; i < pkt->num_bufs; i++) {
-               netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]),
-                                           dma_unmap_len(pkt, len[i]),
-                                           DMA_TO_DEVICE, 0);
-       }
-       pkt->num_bufs = 0;
-}
-
 /* Completion types and expected behavior:
  * No Miss compl + Packet compl = Packet completed normally.
  * Miss compl + Re-inject compl = Packet completed normally.