1 From 203ae2b2db098f24e3e8f82c1bd3657e58b1400b Mon Sep 17 00:00:00 2001
2 From: Peter Oskolkov <posk@google.com>
3 Date: Tue, 23 Apr 2019 10:25:32 -0700
4 Subject: net: IP6 defrag: use rbtrees for IPv6 defrag
6 [ Upstream commit d4289fcc9b16b89619ee1c54f829e05e56de8b9a ]
8 Currently, IPv6 defragmentation code drops non-last fragments that
9 are smaller than 1280 bytes: see
10 commit 0ed4229b08c1 ("ipv6: defrag: drop non-last frags smaller than min mtu")
12 This behavior is not specified in IPv6 RFCs and appears to break
13 compatibility with some IPv6 implemenations, as reported here:
14 https://www.spinics.net/lists/netdev/msg543846.html
16 This patch re-uses common IP defragmentation queueing and reassembly
17 code in IPv6, removing the 1280 byte restriction.
19 v2: change handling of overlaps to match that of upstream.
21 Signed-off-by: Peter Oskolkov <posk@google.com>
22 Reported-by: Tom Herbert <tom@herbertland.com>
23 Cc: Eric Dumazet <edumazet@google.com>
24 Cc: Florian Westphal <fw@strlen.de>
25 Signed-off-by: David S. Miller <davem@davemloft.net>
26 Signed-off-by: Sasha Levin <sashal@kernel.org>
28 include/net/ipv6_frag.h | 11 +-
29 net/ipv6/reassembly.c | 240 +++++++++++-----------------------------
30 2 files changed, 75 insertions(+), 176 deletions(-)
32 diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
33 index 6ced1e6899b6..28aa9b30aece 100644
34 --- a/include/net/ipv6_frag.h
35 +++ b/include/net/ipv6_frag.h
36 @@ -82,8 +82,15 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
37 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
39 /* Don't send error if the first segment did not arrive. */
40 - head = fq->q.fragments;
41 - if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
42 + if (!(fq->q.flags & INET_FRAG_FIRST_IN))
45 + /* sk_buff::dev and sk_buff::rbnode are unionized. So we
46 + * pull the head out of the tree in order to be able to
47 + * deal with head->dev.
49 + head = inet_frag_pull_head(&fq->q);
54 diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
55 index 7c943392c128..095825f964e2 100644
56 --- a/net/ipv6/reassembly.c
57 +++ b/net/ipv6/reassembly.c
58 @@ -69,8 +69,8 @@ static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
60 static struct inet_frags ip6_frags;
62 -static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
63 - struct net_device *dev);
64 +static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
65 + struct sk_buff *prev_tail, struct net_device *dev);
67 static void ip6_frag_expire(struct timer_list *t)
69 @@ -111,21 +111,26 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
70 struct frag_hdr *fhdr, int nhoff,
73 - struct sk_buff *prev, *next;
74 - struct net_device *dev;
75 - int offset, end, fragsize;
76 struct net *net = dev_net(skb_dst(skb)->dev);
77 + int offset, end, fragsize;
78 + struct sk_buff *prev_tail;
79 + struct net_device *dev;
83 if (fq->q.flags & INET_FRAG_COMPLETE)
87 offset = ntohs(fhdr->frag_off) & ~0x7;
88 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
89 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
91 if ((unsigned int)end > IPV6_MAXPLEN) {
92 *prob_offset = (u8 *)&fhdr->frag_off - skb_network_header(skb);
93 + /* note that if prob_offset is set, the skb is freed elsewhere,
94 + * we do not free it here.
99 @@ -145,7 +150,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
101 if (end < fq->q.len ||
102 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
105 fq->q.flags |= INET_FRAG_LAST_IN;
108 @@ -162,70 +167,35 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
109 if (end > fq->q.len) {
110 /* Some bits beyond end -> corruption. */
111 if (fq->q.flags & INET_FRAG_LAST_IN)
123 /* Point into the IP datagram 'data' part. */
124 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
127 - if (pskb_trim_rcsum(skb, end - offset))
130 - /* Find out which fragments are in front and at the back of us
131 - * in the chain of fragments so far. We must know where to put
132 - * this fragment, right?
134 - prev = fq->q.fragments_tail;
135 - if (!prev || prev->ip_defrag_offset < offset) {
140 - for (next = fq->q.fragments; next != NULL; next = next->next) {
141 - if (next->ip_defrag_offset >= offset)
142 - break; /* bingo! */
147 - /* RFC5722, Section 4, amended by Errata ID : 3089
148 - * When reassembling an IPv6 datagram, if
149 - * one or more its constituent fragments is determined to be an
150 - * overlapping fragment, the entire datagram (and any constituent
151 - * fragments) MUST be silently discarded.
154 - /* Check for overlap with preceding fragment. */
156 - (prev->ip_defrag_offset + prev->len) > offset)
159 - /* Look for overlap with succeeding segment. */
160 - if (next && next->ip_defrag_offset < end)
161 + err = pskb_trim_rcsum(skb, end - offset);
165 - /* Note : skb->ip_defrag_offset and skb->dev share the same location */
166 + /* Note : skb->rbnode and skb->dev share the same location. */
169 - fq->iif = dev->ifindex;
170 /* Makes sure compiler wont do silly aliasing games */
172 - skb->ip_defrag_offset = offset;
174 - /* Insert this fragment in the chain of fragments. */
177 - fq->q.fragments_tail = skb;
181 - fq->q.fragments = skb;
182 + prev_tail = fq->q.fragments_tail;
183 + err = inet_frag_queue_insert(&fq->q, skb, offset, end);
188 + fq->iif = dev->ifindex;
190 fq->q.stamp = skb->tstamp;
191 fq->q.meat += skb->len;
192 @@ -246,44 +216,48 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
194 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
195 fq->q.meat == fq->q.len) {
197 unsigned long orefdst = skb->_skb_refdst;
199 skb->_skb_refdst = 0UL;
200 - res = ip6_frag_reasm(fq, prev, dev);
201 + err = ip6_frag_reasm(fq, skb, prev_tail, dev);
202 skb->_skb_refdst = orefdst;
209 + return -EINPROGRESS;
212 + if (err == IPFRAG_DUP) {
217 + __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
218 + IPSTATS_MIB_REASM_OVERLAPS);
220 inet_frag_kill(&fq->q);
222 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
223 IPSTATS_MIB_REASMFAILS);
231 * Check if this packet is complete.
232 - * Returns NULL on failure by any reason, and pointer
233 - * to current nexthdr field in reassembled frame.
235 * It is called with locked fq, and caller must check that
236 * queue is eligible for reassembly i.e. it is not COMPLETE,
237 * the last and the first frames arrived and all the bits are here.
239 -static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
240 - struct net_device *dev)
241 +static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
242 + struct sk_buff *prev_tail, struct net_device *dev)
244 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
245 - struct sk_buff *fp, *head = fq->q.fragments;
246 - int payload_len, delta;
253 inet_frag_kill(&fq->q);
254 @@ -292,121 +266,40 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
255 if (unlikely(ecn == 0xff))
258 - /* Make the one we just received the head. */
261 - fp = skb_clone(head, GFP_ATOMIC);
266 - fp->next = head->next;
268 - fq->q.fragments_tail = fp;
271 - skb_morph(head, fq->q.fragments);
272 - head->next = fq->q.fragments->next;
274 - consume_skb(fq->q.fragments);
275 - fq->q.fragments = head;
278 - WARN_ON(head == NULL);
279 - WARN_ON(head->ip_defrag_offset != 0);
280 + reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
284 - /* Unfragmented part is taken from the first segment. */
285 - payload_len = ((head->data - skb_network_header(head)) -
286 + payload_len = ((skb->data - skb_network_header(skb)) -
287 sizeof(struct ipv6hdr) + fq->q.len -
288 sizeof(struct frag_hdr));
289 if (payload_len > IPV6_MAXPLEN)
292 - delta = - head->truesize;
294 - /* Head of list must not be cloned. */
295 - if (skb_unclone(head, GFP_ATOMIC))
298 - delta += head->truesize;
300 - add_frag_mem_limit(fq->q.net, delta);
302 - /* If the first fragment is fragmented itself, we split
303 - * it to two chunks: the first with data and paged part
304 - * and the second, holding only fragments. */
305 - if (skb_has_frag_list(head)) {
306 - struct sk_buff *clone;
309 - clone = alloc_skb(0, GFP_ATOMIC);
312 - clone->next = head->next;
313 - head->next = clone;
314 - skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
315 - skb_frag_list_init(head);
316 - for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
317 - plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
318 - clone->len = clone->data_len = head->data_len - plen;
319 - head->data_len -= clone->len;
320 - head->len -= clone->len;
322 - clone->ip_summed = head->ip_summed;
323 - add_frag_mem_limit(fq->q.net, clone->truesize);
326 /* We have to remove fragment header from datagram and to relocate
327 * header in order to calculate ICV correctly. */
328 nhoff = fq->nhoffset;
329 - skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
330 - memmove(head->head + sizeof(struct frag_hdr), head->head,
331 - (head->data - head->head) - sizeof(struct frag_hdr));
332 - if (skb_mac_header_was_set(head))
333 - head->mac_header += sizeof(struct frag_hdr);
334 - head->network_header += sizeof(struct frag_hdr);
336 - skb_reset_transport_header(head);
337 - skb_push(head, head->data - skb_network_header(head));
339 - sum_truesize = head->truesize;
340 - for (fp = head->next; fp;) {
343 - struct sk_buff *next = fp->next;
345 - sum_truesize += fp->truesize;
346 - if (head->ip_summed != fp->ip_summed)
347 - head->ip_summed = CHECKSUM_NONE;
348 - else if (head->ip_summed == CHECKSUM_COMPLETE)
349 - head->csum = csum_add(head->csum, fp->csum);
351 - if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
352 - kfree_skb_partial(fp, headstolen);
355 - if (!skb_shinfo(head)->frag_list)
356 - skb_shinfo(head)->frag_list = fp;
357 - head->data_len += fp->len;
358 - head->len += fp->len;
359 - head->truesize += fp->truesize;
363 - sub_frag_mem_limit(fq->q.net, sum_truesize);
364 + skb_network_header(skb)[nhoff] = skb_transport_header(skb)[0];
365 + memmove(skb->head + sizeof(struct frag_hdr), skb->head,
366 + (skb->data - skb->head) - sizeof(struct frag_hdr));
367 + if (skb_mac_header_was_set(skb))
368 + skb->mac_header += sizeof(struct frag_hdr);
369 + skb->network_header += sizeof(struct frag_hdr);
371 + skb_reset_transport_header(skb);
373 + inet_frag_reasm_finish(&fq->q, skb, reasm_data);
377 - head->tstamp = fq->q.stamp;
378 - ipv6_hdr(head)->payload_len = htons(payload_len);
379 - ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
380 - IP6CB(head)->nhoff = nhoff;
381 - IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
382 - IP6CB(head)->frag_max_size = fq->q.max_size;
384 + ipv6_hdr(skb)->payload_len = htons(payload_len);
385 + ipv6_change_dsfield(ipv6_hdr(skb), 0xff, ecn);
386 + IP6CB(skb)->nhoff = nhoff;
387 + IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
388 + IP6CB(skb)->frag_max_size = fq->q.max_size;
390 /* Yes, and fold redundant checksum back. 8) */
391 - skb_postpush_rcsum(head, skb_network_header(head),
392 - skb_network_header_len(head));
393 + skb_postpush_rcsum(skb, skb_network_header(skb),
394 + skb_network_header_len(skb));
397 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
398 @@ -414,6 +307,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
399 fq->q.fragments = NULL;
400 fq->q.rb_fragments = RB_ROOT;
401 fq->q.fragments_tail = NULL;
402 + fq->q.last_run_head = NULL;
406 @@ -425,6 +319,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
408 __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
410 + inet_frag_kill(&fq->q);
414 @@ -463,10 +358,6 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
418 - if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
419 - fhdr->frag_off & htons(IP6_MF))
422 iif = skb->dev ? skb->dev->ifindex : 0;
423 fq = fq_find(net, fhdr->identification, hdr, iif);
425 @@ -484,6 +375,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
427 __IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev),
428 IPSTATS_MIB_INHDRERRORS);
429 + /* icmpv6_param_prob() calls kfree_skb(skb) */
430 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, prob_offset);