]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
virtio_net: sync RX buffer before reading the header
authorMichael S. Tsirkin <mst@redhat.com>
Tue, 24 Mar 2026 15:15:54 +0000 (11:15 -0400)
committerJakub Kicinski <kuba@kernel.org>
Fri, 27 Mar 2026 03:07:45 +0000 (20:07 -0700)
receive_buf() reads the virtio header through buf before
page_pool_dma_sync_for_cpu() runs in receive_small() or
receive_mergeable(). The header buffer is thus unsynchronized at the
point where flags and, for mergeable buffers, num_buffers are consumed.

Omar Elghoul reported that on s390x Secure Execution this showed up as
greatly reduced virtio-net performance together with "bad gso" and
"bad csum" messages in dmesg. This is because with SE sync actually
copies data, so the header is uninitialized.

Move the sync into receive_buf() so the
header is synchronized before any access through buf.

Tool use: Cursor with GPT-5.4 drafted the initial code move from prompt:
"in drivers/net/virtio_net.c, move page_pool_dma_sync_for_cpu on receive
path to before memory is accessed through buf".
The result and the commit log were reviewed and edited manually.

Fixes: 24fbd3967f3f ("virtio_net: add page_pool support for buffer allocation")
Reported-by: Omar Elghoul <oelghoul@linux.ibm.com>
Tested-by: Srikanth Aithal <sraithal@amd.com>
Tested-by: Omar Elghoul <oelghoul@linux.ibm.com>
Link: https://lore.kernel.org/r/20260323150136.14452-1-oelghoul@linux.ibm.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Tested-by: Vishwanath Seshagiri <vishs@meta.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Link: https://patch.msgid.link/f4caa9be9e5addae7851c012cab0a733be7f0974.1774365273.git.mst@redhat.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
drivers/net/virtio_net.c

index bce885dc234b5bda1076a5f47cbc8e7e1dbaeafc..9cf240afe81e7dea254817228e941924ed5f8016 100644 (file)
@@ -1956,13 +1956,6 @@ static struct sk_buff *receive_small(struct net_device *dev,
         */
        buf -= VIRTNET_RX_PAD + xdp_headroom;
 
-       if (rq->use_page_pool_dma) {
-               int offset = buf - page_address(page) +
-                            VIRTNET_RX_PAD + xdp_headroom;
-
-               page_pool_dma_sync_for_cpu(rq->page_pool, page, offset, len);
-       }
-
        len -= vi->hdr_len;
        u64_stats_add(&stats->bytes, len);
 
@@ -2398,9 +2391,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
 
        head_skb = NULL;
 
-       if (rq->use_page_pool_dma)
-               page_pool_dma_sync_for_cpu(rq->page_pool, page, offset, len);
-
        u64_stats_add(&stats->bytes, len - vi->hdr_len);
 
        if (check_mergeable_len(dev, ctx, len))
@@ -2563,6 +2553,16 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
                return;
        }
 
+       /* Sync the memory before touching anything through buf,
+        * unless virtio core did it already.
+        */
+       if (rq->use_page_pool_dma) {
+               struct page *page = virt_to_head_page(buf);
+               int offset = buf - page_address(page);
+
+               page_pool_dma_sync_for_cpu(rq->page_pool, page, offset, len);
+       }
+
        /* About the flags below:
         * 1. Save the flags early, as the XDP program might overwrite them.
         * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID