From: Ankit Garg Date: Tue, 3 Mar 2026 19:55:48 +0000 (-0800) Subject: gve: pull network headers into skb linear part X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=0c7025fd24db5b2f8cbd2e1f0050c033b923fd48;p=thirdparty%2Flinux.git gve: pull network headers into skb linear part Currently, in DQO mode with hw-gro enabled, entire received packet is placed into skb fragments when header-split is disabled. This leaves the skb linear part empty, forcing the networking stack to do multiple small memory copies to access eth, IP and TCP headers. This patch adds a single memcpy to put all headers into linear portion before packet reaches the SW GRO stack; thus eliminating multiple smaller memcpy calls. Additionally, the criteria for calling napi_gro_frags() was updated. Since skb->head is now populated, we instead check if the SKB is the cached NAPI scratchpad to ensure we continue using the zero-allocation path. Signed-off-by: Ankit Garg Reviewed-by: Eric Dumazet Reviewed-by: Harshitha Ramamurthy Signed-off-by: Joshua Washington Link: https://patch.msgid.link/20260303195549.2679070-4-joshwash@google.com Signed-off-by: Paolo Abeni --- diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 27885ccf52267..7924dce719e29 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -943,6 +943,8 @@ static int gve_rx_complete_rsc(struct sk_buff *skb, { struct skb_shared_info *shinfo = skb_shinfo(skb); int rsc_segments, rsc_seg_len, hdr_len; + skb_frag_t *frag; + void *va; /* HW-GRO only coalesces TCP. */ if (ptype.l4_type != GVE_L4_TYPE_TCP) @@ -970,10 +972,20 @@ static int gve_rx_complete_rsc(struct sk_buff *skb, /* HW-GRO packets are guaranteed to have complete TCP/IP * headers in frag[0] when header-split is not enabled. */ - hdr_len = eth_get_headlen(skb->dev, - skb_frag_address(&shinfo->frags[0]), - skb_frag_size(&shinfo->frags[0])); + frag = &skb_shinfo(skb)->frags[0]; + va = skb_frag_address(frag); + hdr_len = + eth_get_headlen(skb->dev, va, skb_frag_size(frag)); rsc_segments = DIV_ROUND_UP(skb->len - hdr_len, rsc_seg_len); + skb_copy_to_linear_data(skb, va, hdr_len); + skb_frag_size_sub(frag, hdr_len); + /* Verify we didn't empty the fragment completely as that could + * otherwise lead to page leaks. + */ + DEBUG_NET_WARN_ON_ONCE(!skb_frag_size(frag)); + skb_frag_off_add(frag, hdr_len); + skb->data_len -= hdr_len; + skb->tail += hdr_len; } shinfo->gso_size = rsc_seg_len; shinfo->gso_segs = rsc_segments; @@ -1010,7 +1022,7 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi, return err; } - if (skb_headlen(rx->ctx.skb_head) == 0) + if (rx->ctx.skb_head == napi->skb) napi_gro_frags(napi); else napi_gro_receive(napi, rx->ctx.skb_head);