1 From 95847c69d0f559fd6701bcd60e8e8b889fee2499 Mon Sep 17 00:00:00 2001
2 From: Peter Oskolkov <posk@google.com>
3 Date: Tue, 23 Apr 2019 10:48:22 -0700
4 Subject: net: IP defrag: encapsulate rbtree defrag code into callable
7 [ Upstream commit c23f35d19db3b36ffb9e04b08f1d91565d15f84f ]
9 This is a refactoring patch: without changing runtime behavior,
10 it moves rbtree-related code from IPv4-specific files/functions
11 into .h/.c defrag files shared with IPv6 defragmentation code.
13 v2: make handling of overlapping packets match upstream.
15 Signed-off-by: Peter Oskolkov <posk@google.com>
16 Cc: Eric Dumazet <edumazet@google.com>
17 Cc: Florian Westphal <fw@strlen.de>
18 Cc: Tom Herbert <tom@herbertland.com>
19 Signed-off-by: David S. Miller <davem@davemloft.net>
20 Signed-off-by: Sasha Levin <sashal@kernel.org>
22 include/net/inet_frag.h | 16 ++-
23 net/ipv4/inet_fragment.c | 293 +++++++++++++++++++++++++++++++++++++
24 net/ipv4/ip_fragment.c | 302 +++++----------------------------------
25 3 files changed, 342 insertions(+), 269 deletions(-)
27 diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
28 index 335cf7851f12..008f64823c41 100644
29 --- a/include/net/inet_frag.h
30 +++ b/include/net/inet_frag.h
31 @@ -77,8 +77,8 @@ struct inet_frag_queue {
32 struct timer_list timer;
35 - struct sk_buff *fragments; /* Used in IPv6. */
36 - struct rb_root rb_fragments; /* Used in IPv4. */
37 + struct sk_buff *fragments; /* used in 6lopwpan IPv6. */
38 + struct rb_root rb_fragments; /* Used in IPv4/IPv6. */
39 struct sk_buff *fragments_tail;
40 struct sk_buff *last_run_head;
42 @@ -153,4 +153,16 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
44 extern const u8 ip_frag_ecn_table[16];
46 +/* Return values of inet_frag_queue_insert() */
49 +#define IPFRAG_OVERLAP 2
50 +int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
51 + int offset, int end);
52 +void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
53 + struct sk_buff *parent);
54 +void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
56 +struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
59 diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
60 index 6ffee9d2b0e5..481cded81b2d 100644
61 --- a/net/ipv4/inet_fragment.c
62 +++ b/net/ipv4/inet_fragment.c
65 #include <net/inet_frag.h>
66 #include <net/inet_ecn.h>
68 +#include <net/ipv6.h>
70 +/* Use skb->cb to track consecutive/adjacent fragments coming at
71 + * the end of the queue. Nodes in the rb-tree queue will
72 + * contain "runs" of one or more adjacent fragments.
75 + * - next_frag is NULL at the tail of a "run";
76 + * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
78 +struct ipfrag_skb_cb {
80 + struct inet_skb_parm h4;
81 + struct inet6_skb_parm h6;
83 + struct sk_buff *next_frag;
87 +#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
89 +static void fragcb_clear(struct sk_buff *skb)
91 + RB_CLEAR_NODE(&skb->rbnode);
92 + FRAG_CB(skb)->next_frag = NULL;
93 + FRAG_CB(skb)->frag_run_len = skb->len;
96 +/* Append skb to the last "run". */
97 +static void fragrun_append_to_last(struct inet_frag_queue *q,
98 + struct sk_buff *skb)
102 + FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
103 + FRAG_CB(q->fragments_tail)->next_frag = skb;
104 + q->fragments_tail = skb;
107 +/* Create a new "run" with the skb. */
108 +static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
110 + BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
113 + if (q->last_run_head)
114 + rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
115 + &q->last_run_head->rbnode.rb_right);
117 + rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
118 + rb_insert_color(&skb->rbnode, &q->rb_fragments);
120 + q->fragments_tail = skb;
121 + q->last_run_head = skb;
124 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
125 * Value : 0xff if frame should be dropped.
126 @@ -122,6 +178,28 @@ static void inet_frag_destroy_rcu(struct rcu_head *head)
127 kmem_cache_free(f->frags_cachep, q);
130 +unsigned int inet_frag_rbtree_purge(struct rb_root *root)
132 + struct rb_node *p = rb_first(root);
133 + unsigned int sum = 0;
136 + struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
139 + rb_erase(&skb->rbnode, root);
141 + struct sk_buff *next = FRAG_CB(skb)->next_frag;
143 + sum += skb->truesize;
150 +EXPORT_SYMBOL(inet_frag_rbtree_purge);
152 void inet_frag_destroy(struct inet_frag_queue *q)
155 @@ -224,3 +302,218 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
158 EXPORT_SYMBOL(inet_frag_find);
160 +int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
161 + int offset, int end)
163 + struct sk_buff *last = q->fragments_tail;
165 + /* RFC5722, Section 4, amended by Errata ID : 3089
166 + * When reassembling an IPv6 datagram, if
167 + * one or more its constituent fragments is determined to be an
168 + * overlapping fragment, the entire datagram (and any constituent
169 + * fragments) MUST be silently discarded.
171 + * Duplicates, however, should be ignored (i.e. skb dropped, but the
172 + * queue/fragments kept for later reassembly).
175 + fragrun_create(q, skb); /* First fragment. */
176 + else if (last->ip_defrag_offset + last->len < end) {
177 + /* This is the common case: skb goes to the end. */
178 + /* Detect and discard overlaps. */
179 + if (offset < last->ip_defrag_offset + last->len)
180 + return IPFRAG_OVERLAP;
181 + if (offset == last->ip_defrag_offset + last->len)
182 + fragrun_append_to_last(q, skb);
184 + fragrun_create(q, skb);
186 + /* Binary search. Note that skb can become the first fragment,
187 + * but not the last (covered above).
189 + struct rb_node **rbn, *parent;
191 + rbn = &q->rb_fragments.rb_node;
193 + struct sk_buff *curr;
197 + curr = rb_to_skb(parent);
198 + curr_run_end = curr->ip_defrag_offset +
199 + FRAG_CB(curr)->frag_run_len;
200 + if (end <= curr->ip_defrag_offset)
201 + rbn = &parent->rb_left;
202 + else if (offset >= curr_run_end)
203 + rbn = &parent->rb_right;
204 + else if (offset >= curr->ip_defrag_offset &&
205 + end <= curr_run_end)
208 + return IPFRAG_OVERLAP;
210 + /* Here we have parent properly set, and rbn pointing to
211 + * one of its NULL left/right children. Insert skb.
214 + rb_link_node(&skb->rbnode, parent, rbn);
215 + rb_insert_color(&skb->rbnode, &q->rb_fragments);
218 + skb->ip_defrag_offset = offset;
222 +EXPORT_SYMBOL(inet_frag_queue_insert);
224 +void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
225 + struct sk_buff *parent)
227 + struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
228 + struct sk_buff **nextp;
232 + fp = skb_clone(skb, GFP_ATOMIC);
235 + FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
236 + if (RB_EMPTY_NODE(&skb->rbnode))
237 + FRAG_CB(parent)->next_frag = fp;
239 + rb_replace_node(&skb->rbnode, &fp->rbnode,
241 + if (q->fragments_tail == skb)
242 + q->fragments_tail = fp;
243 + skb_morph(skb, head);
244 + FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
245 + rb_replace_node(&head->rbnode, &skb->rbnode,
250 + WARN_ON(head->ip_defrag_offset != 0);
252 + delta = -head->truesize;
254 + /* Head of list must not be cloned. */
255 + if (skb_unclone(head, GFP_ATOMIC))
258 + delta += head->truesize;
260 + add_frag_mem_limit(q->net, delta);
262 + /* If the first fragment is fragmented itself, we split
263 + * it to two chunks: the first with data and paged part
264 + * and the second, holding only fragments.
266 + if (skb_has_frag_list(head)) {
267 + struct sk_buff *clone;
270 + clone = alloc_skb(0, GFP_ATOMIC);
273 + skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
274 + skb_frag_list_init(head);
275 + for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
276 + plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
277 + clone->data_len = head->data_len - plen;
278 + clone->len = clone->data_len;
279 + head->truesize += clone->truesize;
281 + clone->ip_summed = head->ip_summed;
282 + add_frag_mem_limit(q->net, clone->truesize);
283 + skb_shinfo(head)->frag_list = clone;
284 + nextp = &clone->next;
286 + nextp = &skb_shinfo(head)->frag_list;
291 +EXPORT_SYMBOL(inet_frag_reasm_prepare);
293 +void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
296 + struct sk_buff **nextp = (struct sk_buff **)reasm_data;
297 + struct rb_node *rbn;
298 + struct sk_buff *fp;
300 + skb_push(head, head->data - skb_network_header(head));
302 + /* Traverse the tree in order, to build frag_list. */
303 + fp = FRAG_CB(head)->next_frag;
304 + rbn = rb_next(&head->rbnode);
305 + rb_erase(&head->rbnode, &q->rb_fragments);
306 + while (rbn || fp) {
307 + /* fp points to the next sk_buff in the current run;
308 + * rbn points to the next run.
310 + /* Go through the current run. */
315 + memset(&fp->rbnode, 0, sizeof(fp->rbnode));
317 + head->data_len += fp->len;
318 + head->len += fp->len;
319 + if (head->ip_summed != fp->ip_summed)
320 + head->ip_summed = CHECKSUM_NONE;
321 + else if (head->ip_summed == CHECKSUM_COMPLETE)
322 + head->csum = csum_add(head->csum, fp->csum);
323 + head->truesize += fp->truesize;
324 + fp = FRAG_CB(fp)->next_frag;
326 + /* Move to the next run. */
328 + struct rb_node *rbnext = rb_next(rbn);
330 + fp = rb_to_skb(rbn);
331 + rb_erase(rbn, &q->rb_fragments);
335 + sub_frag_mem_limit(q->net, head->truesize);
340 + head->tstamp = q->stamp;
342 +EXPORT_SYMBOL(inet_frag_reasm_finish);
344 +struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
346 + struct sk_buff *head;
348 + if (q->fragments) {
349 + head = q->fragments;
350 + q->fragments = head->next;
352 + struct sk_buff *skb;
354 + head = skb_rb_first(&q->rb_fragments);
357 + skb = FRAG_CB(head)->next_frag;
359 + rb_replace_node(&head->rbnode, &skb->rbnode,
362 + rb_erase(&head->rbnode, &q->rb_fragments);
363 + memset(&head->rbnode, 0, sizeof(head->rbnode));
366 + if (head == q->fragments_tail)
367 + q->fragments_tail = NULL;
369 + sub_frag_mem_limit(q->net, head->truesize);
373 +EXPORT_SYMBOL(inet_frag_pull_head);
374 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
375 index d95b32af4a0e..5a1d39e32196 100644
376 --- a/net/ipv4/ip_fragment.c
377 +++ b/net/ipv4/ip_fragment.c
380 static const char ip_frag_cache_name[] = "ip4-frags";
382 -/* Use skb->cb to track consecutive/adjacent fragments coming at
383 - * the end of the queue. Nodes in the rb-tree queue will
384 - * contain "runs" of one or more adjacent fragments.
387 - * - next_frag is NULL at the tail of a "run";
388 - * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
390 -struct ipfrag_skb_cb {
391 - struct inet_skb_parm h;
392 - struct sk_buff *next_frag;
396 -#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
398 -static void ip4_frag_init_run(struct sk_buff *skb)
400 - BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
402 - FRAG_CB(skb)->next_frag = NULL;
403 - FRAG_CB(skb)->frag_run_len = skb->len;
406 -/* Append skb to the last "run". */
407 -static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
408 - struct sk_buff *skb)
410 - RB_CLEAR_NODE(&skb->rbnode);
411 - FRAG_CB(skb)->next_frag = NULL;
413 - FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
414 - FRAG_CB(q->fragments_tail)->next_frag = skb;
415 - q->fragments_tail = skb;
418 -/* Create a new "run" with the skb. */
419 -static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
421 - if (q->last_run_head)
422 - rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
423 - &q->last_run_head->rbnode.rb_right);
425 - rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
426 - rb_insert_color(&skb->rbnode, &q->rb_fragments);
428 - ip4_frag_init_run(skb);
429 - q->fragments_tail = skb;
430 - q->last_run_head = skb;
433 /* Describe an entry in the "incomplete datagrams" queue. */
435 struct inet_frag_queue q;
436 @@ -212,27 +161,9 @@ static void ip_expire(struct timer_list *t)
437 * pull the head out of the tree in order to be able to
438 * deal with head->dev.
440 - if (qp->q.fragments) {
441 - head = qp->q.fragments;
442 - qp->q.fragments = head->next;
444 - head = skb_rb_first(&qp->q.rb_fragments);
447 - if (FRAG_CB(head)->next_frag)
448 - rb_replace_node(&head->rbnode,
449 - &FRAG_CB(head)->next_frag->rbnode,
450 - &qp->q.rb_fragments);
452 - rb_erase(&head->rbnode, &qp->q.rb_fragments);
453 - memset(&head->rbnode, 0, sizeof(head->rbnode));
456 - if (head == qp->q.fragments_tail)
457 - qp->q.fragments_tail = NULL;
459 - sub_frag_mem_limit(qp->q.net, head->truesize);
461 + head = inet_frag_pull_head(&qp->q);
464 head->dev = dev_get_by_index_rcu(net, qp->iif);
467 @@ -345,12 +276,10 @@ static int ip_frag_reinit(struct ipq *qp)
468 static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
470 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
471 - struct rb_node **rbn, *parent;
472 - struct sk_buff *skb1, *prev_tail;
473 - int ihl, end, skb1_run_end;
474 + int ihl, end, flags, offset;
475 + struct sk_buff *prev_tail;
476 struct net_device *dev;
477 unsigned int fragsize;
482 @@ -382,7 +311,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
484 if (end < qp->q.len ||
485 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
488 qp->q.flags |= INET_FRAG_LAST_IN;
491 @@ -394,82 +323,33 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
492 if (end > qp->q.len) {
493 /* Some bits beyond end -> corruption. */
494 if (qp->q.flags & INET_FRAG_LAST_IN)
505 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
509 err = pskb_trim_rcsum(skb, end - offset);
514 /* Note : skb->rbnode and skb->dev share the same location. */
516 /* Makes sure compiler wont do silly aliasing games */
519 - /* RFC5722, Section 4, amended by Errata ID : 3089
520 - * When reassembling an IPv6 datagram, if
521 - * one or more its constituent fragments is determined to be an
522 - * overlapping fragment, the entire datagram (and any constituent
523 - * fragments) MUST be silently discarded.
525 - * We do the same here for IPv4 (and increment an snmp counter) but
526 - * we do not want to drop the whole queue in response to a duplicate
531 - /* Find out where to put this fragment. */
532 prev_tail = qp->q.fragments_tail;
534 - ip4_frag_create_run(&qp->q, skb); /* First fragment. */
535 - else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
536 - /* This is the common case: skb goes to the end. */
537 - /* Detect and discard overlaps. */
538 - if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
540 - if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
541 - ip4_frag_append_to_last_run(&qp->q, skb);
543 - ip4_frag_create_run(&qp->q, skb);
545 - /* Binary search. Note that skb can become the first fragment,
546 - * but not the last (covered above).
548 - rbn = &qp->q.rb_fragments.rb_node;
551 - skb1 = rb_to_skb(parent);
552 - skb1_run_end = skb1->ip_defrag_offset +
553 - FRAG_CB(skb1)->frag_run_len;
554 - if (end <= skb1->ip_defrag_offset)
555 - rbn = &parent->rb_left;
556 - else if (offset >= skb1_run_end)
557 - rbn = &parent->rb_right;
558 - else if (offset >= skb1->ip_defrag_offset &&
559 - end <= skb1_run_end)
560 - goto err; /* No new data, potential duplicate */
562 - goto discard_qp; /* Found an overlap */
564 - /* Here we have parent properly set, and rbn pointing to
565 - * one of its NULL left/right children. Insert skb.
567 - ip4_frag_init_run(skb);
568 - rb_link_node(&skb->rbnode, parent, rbn);
569 - rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
571 + err = inet_frag_queue_insert(&qp->q, skb, offset, end);
576 qp->iif = dev->ifindex;
577 - skb->ip_defrag_offset = offset;
579 qp->q.stamp = skb->tstamp;
580 qp->q.meat += skb->len;
581 @@ -494,15 +374,24 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
582 skb->_skb_refdst = 0UL;
583 err = ip_frag_reasm(qp, skb, prev_tail, dev);
584 skb->_skb_refdst = orefdst;
586 + inet_frag_kill(&qp->q);
594 + if (err == IPFRAG_DUP) {
599 + __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
601 inet_frag_kill(&qp->q);
602 - __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
603 + __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
607 @@ -514,13 +403,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
609 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
611 - struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
612 - struct sk_buff **nextp; /* To build frag_list. */
613 - struct rb_node *rbn;
623 @@ -530,117 +414,23 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
627 - /* Make the one we just received the head. */
629 - fp = skb_clone(skb, GFP_ATOMIC);
632 - FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
633 - if (RB_EMPTY_NODE(&skb->rbnode))
634 - FRAG_CB(prev_tail)->next_frag = fp;
636 - rb_replace_node(&skb->rbnode, &fp->rbnode,
637 - &qp->q.rb_fragments);
638 - if (qp->q.fragments_tail == skb)
639 - qp->q.fragments_tail = fp;
640 - skb_morph(skb, head);
641 - FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
642 - rb_replace_node(&head->rbnode, &skb->rbnode,
643 - &qp->q.rb_fragments);
648 - WARN_ON(head->ip_defrag_offset != 0);
650 - /* Allocate a new buffer for the datagram. */
651 - ihlen = ip_hdrlen(head);
652 - len = ihlen + qp->q.len;
653 + /* Make the one we just received the head. */
654 + reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail);
658 + len = ip_hdrlen(skb) + qp->q.len;
663 - delta = - head->truesize;
665 - /* Head of list must not be cloned. */
666 - if (skb_unclone(head, GFP_ATOMIC))
669 - delta += head->truesize;
671 - add_frag_mem_limit(qp->q.net, delta);
673 - /* If the first fragment is fragmented itself, we split
674 - * it to two chunks: the first with data and paged part
675 - * and the second, holding only fragments. */
676 - if (skb_has_frag_list(head)) {
677 - struct sk_buff *clone;
680 - clone = alloc_skb(0, GFP_ATOMIC);
683 - skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
684 - skb_frag_list_init(head);
685 - for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
686 - plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
687 - clone->len = clone->data_len = head->data_len - plen;
688 - head->truesize += clone->truesize;
690 - clone->ip_summed = head->ip_summed;
691 - add_frag_mem_limit(qp->q.net, clone->truesize);
692 - skb_shinfo(head)->frag_list = clone;
693 - nextp = &clone->next;
695 - nextp = &skb_shinfo(head)->frag_list;
697 + inet_frag_reasm_finish(&qp->q, skb, reasm_data);
699 - skb_push(head, head->data - skb_network_header(head));
701 + IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
703 - /* Traverse the tree in order, to build frag_list. */
704 - fp = FRAG_CB(head)->next_frag;
705 - rbn = rb_next(&head->rbnode);
706 - rb_erase(&head->rbnode, &qp->q.rb_fragments);
707 - while (rbn || fp) {
708 - /* fp points to the next sk_buff in the current run;
709 - * rbn points to the next run.
711 - /* Go through the current run. */
716 - memset(&fp->rbnode, 0, sizeof(fp->rbnode));
718 - head->data_len += fp->len;
719 - head->len += fp->len;
720 - if (head->ip_summed != fp->ip_summed)
721 - head->ip_summed = CHECKSUM_NONE;
722 - else if (head->ip_summed == CHECKSUM_COMPLETE)
723 - head->csum = csum_add(head->csum, fp->csum);
724 - head->truesize += fp->truesize;
725 - fp = FRAG_CB(fp)->next_frag;
727 - /* Move to the next run. */
729 - struct rb_node *rbnext = rb_next(rbn);
731 - fp = rb_to_skb(rbn);
732 - rb_erase(rbn, &qp->q.rb_fragments);
736 - sub_frag_mem_limit(qp->q.net, head->truesize);
742 - head->tstamp = qp->q.stamp;
743 - IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
745 - iph = ip_hdr(head);
747 iph->tot_len = htons(len);
750 @@ -653,7 +443,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
751 * from one very small df-fragment and one large non-df frag.
753 if (qp->max_df_size == qp->q.max_size) {
754 - IPCB(head)->flags |= IPSKB_FRAG_PMTU;
755 + IPCB(skb)->flags |= IPSKB_FRAG_PMTU;
756 iph->frag_off = htons(IP_DF);
759 @@ -751,28 +541,6 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
761 EXPORT_SYMBOL(ip_check_defrag);
763 -unsigned int inet_frag_rbtree_purge(struct rb_root *root)
765 - struct rb_node *p = rb_first(root);
766 - unsigned int sum = 0;
769 - struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
772 - rb_erase(&skb->rbnode, root);
774 - struct sk_buff *next = FRAG_CB(skb)->next_frag;
776 - sum += skb->truesize;
783 -EXPORT_SYMBOL(inet_frag_rbtree_purge);