]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
net: stmmac: Optimize cache prefetch in RX path
authorFurong Xu <0x1207@gmail.com>
Wed, 15 Jan 2025 03:27:04 +0000 (11:27 +0800)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 16 Jan 2025 11:14:23 +0000 (12:14 +0100)
Current code prefetches cache lines for the received frame first, and
then dma_sync_single_for_cpu() against this frame, this is wrong.
Cache prefetch should be triggered after dma_sync_single_for_cpu().

This patch brings ~2.8% driver performance improvement in a TCP RX
throughput test with iPerf tool on a single isolated Cortex-A65 CPU
core, 2.84 Gbits/sec increased to 2.92 Gbits/sec.

Signed-off-by: Furong Xu <0x1207@gmail.com>
Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Reviewed-by: Yanteng Si <si.yanteng@linux.dev>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c

index 811e2d372abf041393e608cb518c295ea2c486a6..ad928e8e21a950dd05acb9b7cad2ecf6eac34ce8 100644 (file)
@@ -5508,10 +5508,6 @@ read_again:
 
                /* Buffer is good. Go on. */
 
-               prefetch(page_address(buf->page) + buf->page_offset);
-               if (buf->sec_page)
-                       prefetch(page_address(buf->sec_page));
-
                buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
                len += buf1_len;
                buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
@@ -5533,6 +5529,7 @@ read_again:
 
                        dma_sync_single_for_cpu(priv->device, buf->addr,
                                                buf1_len, dma_dir);
+                       prefetch(page_address(buf->page) + buf->page_offset);
 
                        xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
                        xdp_prepare_buff(&ctx.xdp, page_address(buf->page),