2 * IPV4 GSO/GRO offload support
3 * Linux INET implementation
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
13 #include <linux/skbuff.h>
15 #include <net/protocol.h>
16 #include <net/inet_common.h>
18 static struct sk_buff
*__skb_udp_tunnel_segment(struct sk_buff
*skb
,
19 netdev_features_t features
,
20 struct sk_buff
*(*gso_inner_segment
)(struct sk_buff
*skb
,
21 netdev_features_t features
),
22 __be16 new_protocol
, bool is_ipv6
)
24 int tnl_hlen
= skb_inner_mac_header(skb
) - skb_transport_header(skb
);
25 bool remcsum
, need_csum
, offload_csum
, gso_partial
;
26 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
27 struct udphdr
*uh
= udp_hdr(skb
);
28 u16 mac_offset
= skb
->mac_header
;
29 __be16 protocol
= skb
->protocol
;
30 u16 mac_len
= skb
->mac_len
;
31 int udp_offset
, outer_hlen
;
35 if (unlikely(!pskb_may_pull(skb
, tnl_hlen
)))
38 /* Adjust partial header checksum to negate old length.
39 * We cannot rely on the value contained in uh->len as it is
40 * possible that the actual value exceeds the boundaries of the
41 * 16 bit length field due to the header being added outside of an
42 * IP or IPv6 frame that was already limited to 64K - 1.
44 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
)
45 partial
= (__force __wsum
)uh
->len
;
47 partial
= (__force __wsum
)htonl(skb
->len
);
48 partial
= csum_sub(csum_unfold(uh
->check
), partial
);
50 /* setup inner skb. */
51 skb
->encapsulation
= 0;
52 SKB_GSO_CB(skb
)->encap_level
= 0;
53 __skb_pull(skb
, tnl_hlen
);
54 skb_reset_mac_header(skb
);
55 skb_set_network_header(skb
, skb_inner_network_offset(skb
));
56 skb
->mac_len
= skb_inner_network_offset(skb
);
57 skb
->protocol
= new_protocol
;
59 need_csum
= !!(skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
);
60 skb
->encap_hdr_csum
= need_csum
;
62 remcsum
= !!(skb_shinfo(skb
)->gso_type
& SKB_GSO_TUNNEL_REMCSUM
);
63 skb
->remcsum_offload
= remcsum
;
65 need_ipsec
= skb_dst(skb
) && dst_xfrm(skb_dst(skb
));
66 /* Try to offload checksum if possible */
67 offload_csum
= !!(need_csum
&&
70 (is_ipv6
? (NETIF_F_HW_CSUM
| NETIF_F_IPV6_CSUM
) :
71 (NETIF_F_HW_CSUM
| NETIF_F_IP_CSUM
))));
73 features
&= skb
->dev
->hw_enc_features
;
75 /* The only checksum offload we care about from here on out is the
76 * outer one so strip the existing checksum feature flags and
77 * instead set the flag based on our outer checksum offload value.
80 features
&= ~NETIF_F_CSUM_MASK
;
81 if (!need_csum
|| offload_csum
)
82 features
|= NETIF_F_HW_CSUM
;
85 /* segment inner packet. */
86 segs
= gso_inner_segment(skb
, features
);
87 if (IS_ERR_OR_NULL(segs
)) {
88 skb_gso_error_unwind(skb
, protocol
, tnl_hlen
, mac_offset
,
93 gso_partial
= !!(skb_shinfo(segs
)->gso_type
& SKB_GSO_PARTIAL
);
95 outer_hlen
= skb_tnl_header_len(skb
);
96 udp_offset
= outer_hlen
- tnl_hlen
;
102 skb
->ip_summed
= CHECKSUM_NONE
;
104 /* Set up inner headers if we are offloading inner checksum */
105 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
106 skb_reset_inner_headers(skb
);
107 skb
->encapsulation
= 1;
110 skb
->mac_len
= mac_len
;
111 skb
->protocol
= protocol
;
113 __skb_push(skb
, outer_hlen
);
114 skb_reset_mac_header(skb
);
115 skb_set_network_header(skb
, mac_len
);
116 skb_set_transport_header(skb
, udp_offset
);
117 len
= skb
->len
- udp_offset
;
120 /* If we are only performing partial GSO the inner header
121 * will be using a length value equal to only one MSS sized
122 * segment instead of the entire frame.
124 if (gso_partial
&& skb_is_gso(skb
)) {
125 uh
->len
= htons(skb_shinfo(skb
)->gso_size
+
126 SKB_GSO_CB(skb
)->data_offset
+
127 skb
->head
- (unsigned char *)uh
);
129 uh
->len
= htons(len
);
135 uh
->check
= ~csum_fold(csum_add(partial
,
136 (__force __wsum
)htonl(len
)));
138 if (skb
->encapsulation
|| !offload_csum
) {
139 uh
->check
= gso_make_checksum(skb
, ~uh
->check
);
141 uh
->check
= CSUM_MANGLED_0
;
143 skb
->ip_summed
= CHECKSUM_PARTIAL
;
144 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
145 skb
->csum_offset
= offsetof(struct udphdr
, check
);
147 } while ((skb
= skb
->next
));
152 struct sk_buff
*skb_udp_tunnel_segment(struct sk_buff
*skb
,
153 netdev_features_t features
,
156 __be16 protocol
= skb
->protocol
;
157 const struct net_offload
**offloads
;
158 const struct net_offload
*ops
;
159 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
160 struct sk_buff
*(*gso_inner_segment
)(struct sk_buff
*skb
,
161 netdev_features_t features
);
165 switch (skb
->inner_protocol_type
) {
166 case ENCAP_TYPE_ETHER
:
167 protocol
= skb
->inner_protocol
;
168 gso_inner_segment
= skb_mac_gso_segment
;
170 case ENCAP_TYPE_IPPROTO
:
171 offloads
= is_ipv6
? inet6_offloads
: inet_offloads
;
172 ops
= rcu_dereference(offloads
[skb
->inner_ipproto
]);
173 if (!ops
|| !ops
->callbacks
.gso_segment
)
175 gso_inner_segment
= ops
->callbacks
.gso_segment
;
181 segs
= __skb_udp_tunnel_segment(skb
, features
, gso_inner_segment
,
189 EXPORT_SYMBOL(skb_udp_tunnel_segment
);
191 struct sk_buff
*__udp_gso_segment(struct sk_buff
*gso_skb
,
192 netdev_features_t features
)
194 struct sock
*sk
= gso_skb
->sk
;
195 unsigned int sum_truesize
= 0;
196 struct sk_buff
*segs
, *seg
;
203 mss
= skb_shinfo(gso_skb
)->gso_size
;
204 if (gso_skb
->len
<= sizeof(*uh
) + mss
)
205 return ERR_PTR(-EINVAL
);
207 skb_pull(gso_skb
, sizeof(*uh
));
209 /* clear destructor to avoid skb_segment assigning it to tail */
210 copy_dtor
= gso_skb
->destructor
== sock_wfree
;
212 gso_skb
->destructor
= NULL
;
214 segs
= skb_segment(gso_skb
, features
);
215 if (unlikely(IS_ERR_OR_NULL(segs
))) {
217 gso_skb
->destructor
= sock_wfree
;
221 /* GSO partial and frag_list segmentation only requires splitting
222 * the frame into an MSS multiple and possibly a remainder, both
223 * cases return a GSO skb. So update the mss now.
225 if (skb_is_gso(segs
))
226 mss
*= skb_shinfo(segs
)->gso_segs
;
231 /* compute checksum adjustment based on old length versus new */
232 newlen
= htons(sizeof(*uh
) + mss
);
233 check
= csum16_add(csum16_sub(uh
->check
, uh
->len
), newlen
);
237 seg
->destructor
= sock_wfree
;
239 sum_truesize
+= seg
->truesize
;
248 if (seg
->ip_summed
== CHECKSUM_PARTIAL
)
249 gso_reset_checksum(seg
, ~check
);
251 uh
->check
= gso_make_checksum(seg
, ~check
) ? :
258 /* last packet can be partial gso_size, account for that in checksum */
259 newlen
= htons(skb_tail_pointer(seg
) - skb_transport_header(seg
) +
261 check
= csum16_add(csum16_sub(uh
->check
, uh
->len
), newlen
);
266 if (seg
->ip_summed
== CHECKSUM_PARTIAL
)
267 gso_reset_checksum(seg
, ~check
);
269 uh
->check
= gso_make_checksum(seg
, ~check
) ? : CSUM_MANGLED_0
;
271 /* update refcount for the packet */
273 int delta
= sum_truesize
- gso_skb
->truesize
;
275 /* In some pathological cases, delta can be negative.
276 * We need to either use refcount_add() or refcount_sub_and_test()
278 if (likely(delta
>= 0))
279 refcount_add(delta
, &sk
->sk_wmem_alloc
);
281 WARN_ON_ONCE(refcount_sub_and_test(-delta
, &sk
->sk_wmem_alloc
));
285 EXPORT_SYMBOL_GPL(__udp_gso_segment
);
287 static struct sk_buff
*udp4_ufo_fragment(struct sk_buff
*skb
,
288 netdev_features_t features
)
290 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
296 if (skb
->encapsulation
&&
297 (skb_shinfo(skb
)->gso_type
&
298 (SKB_GSO_UDP_TUNNEL
|SKB_GSO_UDP_TUNNEL_CSUM
))) {
299 segs
= skb_udp_tunnel_segment(skb
, features
, false);
303 if (!(skb_shinfo(skb
)->gso_type
& (SKB_GSO_UDP
| SKB_GSO_UDP_L4
)))
306 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
309 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
)
310 return __udp_gso_segment(skb
, features
);
312 mss
= skb_shinfo(skb
)->gso_size
;
313 if (unlikely(skb
->len
<= mss
))
316 /* Do software UFO. Complete and fill in the UDP checksum as
317 * HW cannot do checksum of UDP packets sent as multiple
325 csum
= skb_checksum(skb
, 0, skb
->len
, 0);
326 uh
->check
= udp_v4_check(skb
->len
, iph
->saddr
, iph
->daddr
, csum
);
328 uh
->check
= CSUM_MANGLED_0
;
330 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
332 /* If there is no outer header we can fake a checksum offload
333 * due to the fact that we have already done the checksum in
334 * software prior to segmenting the frame.
336 if (!skb
->encap_hdr_csum
)
337 features
|= NETIF_F_HW_CSUM
;
339 /* Fragment the skb. IP headers of the fragments are updated in
342 segs
= skb_segment(skb
, features
);
347 #define UDP_GRO_CNT_MAX 64
348 static struct sk_buff
*udp_gro_receive_segment(struct list_head
*head
,
351 struct udphdr
*uh
= udp_hdr(skb
);
352 struct sk_buff
*pp
= NULL
;
356 /* requires non zero csum, for symmetry with GSO */
358 NAPI_GRO_CB(skb
)->flush
= 1;
362 /* pull encapsulating udp header */
363 skb_gro_pull(skb
, sizeof(struct udphdr
));
364 skb_gro_postpull_rcsum(skb
, uh
, sizeof(struct udphdr
));
366 list_for_each_entry(p
, head
, list
) {
367 if (!NAPI_GRO_CB(p
)->same_flow
)
372 /* Match ports only, as csum is always non zero */
373 if ((*(u32
*)&uh
->source
!= *(u32
*)&uh2
->source
)) {
374 NAPI_GRO_CB(p
)->same_flow
= 0;
378 /* Terminate the flow on len mismatch or if it grow "too much".
379 * Under small packet flood GRO count could elsewhere grow a lot
380 * leading to execessive truesize values
382 if (!skb_gro_receive(p
, skb
) &&
383 NAPI_GRO_CB(p
)->count
>= UDP_GRO_CNT_MAX
)
385 else if (uh
->len
!= uh2
->len
)
391 /* mismatch, but we never need to flush */
395 INDIRECT_CALLABLE_DECLARE(struct sock
*udp6_lib_lookup_skb(struct sk_buff
*skb
,
396 __be16 sport
, __be16 dport
));
397 struct sk_buff
*udp_gro_receive(struct list_head
*head
, struct sk_buff
*skb
,
398 struct udphdr
*uh
, udp_lookup_t lookup
)
400 struct sk_buff
*pp
= NULL
;
403 unsigned int off
= skb_gro_offset(skb
);
408 sk
= INDIRECT_CALL_INET(lookup
, udp6_lib_lookup_skb
,
409 udp4_lib_lookup_skb
, skb
, uh
->source
, uh
->dest
);
413 if (udp_sk(sk
)->gro_enabled
) {
414 pp
= call_gro_receive(udp_gro_receive_segment
, head
, skb
);
419 if (NAPI_GRO_CB(skb
)->encap_mark
||
420 (skb
->ip_summed
!= CHECKSUM_PARTIAL
&&
421 NAPI_GRO_CB(skb
)->csum_cnt
== 0 &&
422 !NAPI_GRO_CB(skb
)->csum_valid
) ||
423 !udp_sk(sk
)->gro_receive
)
426 /* mark that this skb passed once through the tunnel gro layer */
427 NAPI_GRO_CB(skb
)->encap_mark
= 1;
431 list_for_each_entry(p
, head
, list
) {
432 if (!NAPI_GRO_CB(p
)->same_flow
)
435 uh2
= (struct udphdr
*)(p
->data
+ off
);
437 /* Match ports and either checksums are either both zero
440 if ((*(u32
*)&uh
->source
!= *(u32
*)&uh2
->source
) ||
441 (!uh
->check
^ !uh2
->check
)) {
442 NAPI_GRO_CB(p
)->same_flow
= 0;
447 skb_gro_pull(skb
, sizeof(struct udphdr
)); /* pull encapsulating udp header */
448 skb_gro_postpull_rcsum(skb
, uh
, sizeof(struct udphdr
));
449 pp
= call_gro_receive_sk(udp_sk(sk
)->gro_receive
, sk
, head
, skb
);
453 skb_gro_flush_final(skb
, pp
, flush
);
456 EXPORT_SYMBOL(udp_gro_receive
);
458 INDIRECT_CALLABLE_SCOPE
459 struct sk_buff
*udp4_gro_receive(struct list_head
*head
, struct sk_buff
*skb
)
461 struct udphdr
*uh
= udp_gro_udphdr(skb
);
463 if (unlikely(!uh
) || !static_branch_unlikely(&udp_encap_needed_key
))
466 /* Don't bother verifying checksum if we're going to flush anyway. */
467 if (NAPI_GRO_CB(skb
)->flush
)
470 if (skb_gro_checksum_validate_zero_check(skb
, IPPROTO_UDP
, uh
->check
,
471 inet_gro_compute_pseudo
))
474 skb_gro_checksum_try_convert(skb
, IPPROTO_UDP
, uh
->check
,
475 inet_gro_compute_pseudo
);
477 NAPI_GRO_CB(skb
)->is_ipv6
= 0;
478 return udp_gro_receive(head
, skb
, uh
, udp4_lib_lookup_skb
);
481 NAPI_GRO_CB(skb
)->flush
= 1;
485 static int udp_gro_complete_segment(struct sk_buff
*skb
)
487 struct udphdr
*uh
= udp_hdr(skb
);
489 skb
->csum_start
= (unsigned char *)uh
- skb
->head
;
490 skb
->csum_offset
= offsetof(struct udphdr
, check
);
491 skb
->ip_summed
= CHECKSUM_PARTIAL
;
493 skb_shinfo(skb
)->gso_segs
= NAPI_GRO_CB(skb
)->count
;
494 skb_shinfo(skb
)->gso_type
|= SKB_GSO_UDP_L4
;
498 int udp_gro_complete(struct sk_buff
*skb
, int nhoff
,
501 __be16 newlen
= htons(skb
->len
- nhoff
);
502 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+ nhoff
);
509 sk
= INDIRECT_CALL_INET(lookup
, udp6_lib_lookup_skb
,
510 udp4_lib_lookup_skb
, skb
, uh
->source
, uh
->dest
);
511 if (sk
&& udp_sk(sk
)->gro_enabled
) {
512 err
= udp_gro_complete_segment(skb
);
513 } else if (sk
&& udp_sk(sk
)->gro_complete
) {
514 skb_shinfo(skb
)->gso_type
= uh
->check
? SKB_GSO_UDP_TUNNEL_CSUM
515 : SKB_GSO_UDP_TUNNEL
;
517 /* Set encapsulation before calling into inner gro_complete()
518 * functions to make them set up the inner offsets.
520 skb
->encapsulation
= 1;
521 err
= udp_sk(sk
)->gro_complete(sk
, skb
,
522 nhoff
+ sizeof(struct udphdr
));
526 if (skb
->remcsum_offload
)
527 skb_shinfo(skb
)->gso_type
|= SKB_GSO_TUNNEL_REMCSUM
;
531 EXPORT_SYMBOL(udp_gro_complete
);
533 INDIRECT_CALLABLE_SCOPE
int udp4_gro_complete(struct sk_buff
*skb
, int nhoff
)
535 const struct iphdr
*iph
= ip_hdr(skb
);
536 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+ nhoff
);
539 uh
->check
= ~udp_v4_check(skb
->len
- nhoff
, iph
->saddr
,
542 return udp_gro_complete(skb
, nhoff
, udp4_lib_lookup_skb
);
545 static const struct net_offload udpv4_offload
= {
547 .gso_segment
= udp4_ufo_fragment
,
548 .gro_receive
= udp4_gro_receive
,
549 .gro_complete
= udp4_gro_complete
,
553 int __init
udpv4_offload_init(void)
555 return inet_add_offload(&udpv4_offload
, IPPROTO_UDP
);