]> git.ipfire.org Git - people/ms/linux.git/blob - net/ipv6/reassembly.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[people/ms/linux.git] / net / ipv6 / reassembly.c
1 /*
2 * IPv6 fragment reassembly
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * Based on: net/ipv4/ip_fragment.c
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 /*
17 * Fixes:
18 * Andi Kleen Make it work with multiple hosts.
19 * More RFC compliance.
20 *
21 * Horst von Brand Add missing #include <linux/string.h>
22 * Alexey Kuznetsov SMP races, threading, cleanup.
23 * Patrick McHardy LRU queue of frag heads for evictor.
24 * Mitsuru KANDA @USAGI Register inet6_protocol{}.
25 * David Stevens and
26 * YOSHIFUJI,H. @USAGI Always remove fragment header to
27 * calculate ICV correctly.
28 */
29
30 #define pr_fmt(fmt) "IPv6: " fmt
31
32 #include <linux/errno.h>
33 #include <linux/types.h>
34 #include <linux/string.h>
35 #include <linux/socket.h>
36 #include <linux/sockios.h>
37 #include <linux/jiffies.h>
38 #include <linux/net.h>
39 #include <linux/list.h>
40 #include <linux/netdevice.h>
41 #include <linux/in6.h>
42 #include <linux/ipv6.h>
43 #include <linux/icmpv6.h>
44 #include <linux/random.h>
45 #include <linux/jhash.h>
46 #include <linux/skbuff.h>
47 #include <linux/slab.h>
48 #include <linux/export.h>
49
50 #include <net/sock.h>
51 #include <net/snmp.h>
52
53 #include <net/ipv6.h>
54 #include <net/ip6_route.h>
55 #include <net/protocol.h>
56 #include <net/transp_v6.h>
57 #include <net/rawv6.h>
58 #include <net/ndisc.h>
59 #include <net/addrconf.h>
60 #include <net/inet_frag.h>
61 #include <net/inet_ecn.h>
62
63 static const char ip6_frag_cache_name[] = "ip6-frags";
64
65 struct ip6frag_skb_cb {
66 struct inet6_skb_parm h;
67 int offset;
68 };
69
70 #define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb))
71
72 static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
73 {
74 return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
75 }
76
77 static struct inet_frags ip6_frags;
78
79 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
80 struct net_device *dev);
81
82 /*
83 * callers should be careful not to use the hash value outside the ipfrag_lock
84 * as doing so could race with ipfrag_hash_rnd being recalculated.
85 */
86 static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
87 const struct in6_addr *daddr)
88 {
89 net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
90 return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
91 (__force u32)id, ip6_frags.rnd);
92 }
93
94 static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
95 {
96 const struct frag_queue *fq;
97
98 fq = container_of(q, struct frag_queue, q);
99 return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
100 }
101
102 bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
103 {
104 const struct frag_queue *fq;
105 const struct ip6_create_arg *arg = a;
106
107 fq = container_of(q, struct frag_queue, q);
108 return fq->id == arg->id &&
109 fq->user == arg->user &&
110 ipv6_addr_equal(&fq->saddr, arg->src) &&
111 ipv6_addr_equal(&fq->daddr, arg->dst) &&
112 (arg->iif == fq->iif ||
113 !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
114 IPV6_ADDR_LINKLOCAL)));
115 }
116 EXPORT_SYMBOL(ip6_frag_match);
117
118 void ip6_frag_init(struct inet_frag_queue *q, const void *a)
119 {
120 struct frag_queue *fq = container_of(q, struct frag_queue, q);
121 const struct ip6_create_arg *arg = a;
122
123 fq->id = arg->id;
124 fq->user = arg->user;
125 fq->saddr = *arg->src;
126 fq->daddr = *arg->dst;
127 fq->ecn = arg->ecn;
128 }
129 EXPORT_SYMBOL(ip6_frag_init);
130
131 void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
132 struct inet_frags *frags)
133 {
134 struct net_device *dev = NULL;
135
136 spin_lock(&fq->q.lock);
137
138 if (fq->q.flags & INET_FRAG_COMPLETE)
139 goto out;
140
141 inet_frag_kill(&fq->q, frags);
142
143 rcu_read_lock();
144 dev = dev_get_by_index_rcu(net, fq->iif);
145 if (!dev)
146 goto out_rcu_unlock;
147
148 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
149
150 if (inet_frag_evicting(&fq->q))
151 goto out_rcu_unlock;
152
153 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
154
155 /* Don't send error if the first segment did not arrive. */
156 if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
157 goto out_rcu_unlock;
158
159 /* But use as source device on which LAST ARRIVED
160 * segment was received. And do not use fq->dev
161 * pointer directly, device might already disappeared.
162 */
163 fq->q.fragments->dev = dev;
164 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
165 out_rcu_unlock:
166 rcu_read_unlock();
167 out:
168 spin_unlock(&fq->q.lock);
169 inet_frag_put(&fq->q, frags);
170 }
171 EXPORT_SYMBOL(ip6_expire_frag_queue);
172
173 static void ip6_frag_expire(unsigned long data)
174 {
175 struct frag_queue *fq;
176 struct net *net;
177
178 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
179 net = container_of(fq->q.net, struct net, ipv6.frags);
180
181 ip6_expire_frag_queue(net, fq, &ip6_frags);
182 }
183
184 static struct frag_queue *
185 fq_find(struct net *net, __be32 id, const struct in6_addr *src,
186 const struct in6_addr *dst, int iif, u8 ecn)
187 {
188 struct inet_frag_queue *q;
189 struct ip6_create_arg arg;
190 unsigned int hash;
191
192 arg.id = id;
193 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
194 arg.src = src;
195 arg.dst = dst;
196 arg.iif = iif;
197 arg.ecn = ecn;
198
199 hash = inet6_hash_frag(id, src, dst);
200
201 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
202 if (IS_ERR_OR_NULL(q)) {
203 inet_frag_maybe_warn_overflow(q, pr_fmt());
204 return NULL;
205 }
206 return container_of(q, struct frag_queue, q);
207 }
208
209 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
210 struct frag_hdr *fhdr, int nhoff)
211 {
212 struct sk_buff *prev, *next;
213 struct net_device *dev;
214 int offset, end;
215 struct net *net = dev_net(skb_dst(skb)->dev);
216 u8 ecn;
217
218 if (fq->q.flags & INET_FRAG_COMPLETE)
219 goto err;
220
221 offset = ntohs(fhdr->frag_off) & ~0x7;
222 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
223 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
224
225 if ((unsigned int)end > IPV6_MAXPLEN) {
226 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
227 IPSTATS_MIB_INHDRERRORS);
228 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
229 ((u8 *)&fhdr->frag_off -
230 skb_network_header(skb)));
231 return -1;
232 }
233
234 ecn = ip6_frag_ecn(ipv6_hdr(skb));
235
236 if (skb->ip_summed == CHECKSUM_COMPLETE) {
237 const unsigned char *nh = skb_network_header(skb);
238 skb->csum = csum_sub(skb->csum,
239 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
240 0));
241 }
242
243 /* Is this the final fragment? */
244 if (!(fhdr->frag_off & htons(IP6_MF))) {
245 /* If we already have some bits beyond end
246 * or have different end, the segment is corrupted.
247 */
248 if (end < fq->q.len ||
249 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
250 goto err;
251 fq->q.flags |= INET_FRAG_LAST_IN;
252 fq->q.len = end;
253 } else {
254 /* Check if the fragment is rounded to 8 bytes.
255 * Required by the RFC.
256 */
257 if (end & 0x7) {
258 /* RFC2460 says always send parameter problem in
259 * this case. -DaveM
260 */
261 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
262 IPSTATS_MIB_INHDRERRORS);
263 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
264 offsetof(struct ipv6hdr, payload_len));
265 return -1;
266 }
267 if (end > fq->q.len) {
268 /* Some bits beyond end -> corruption. */
269 if (fq->q.flags & INET_FRAG_LAST_IN)
270 goto err;
271 fq->q.len = end;
272 }
273 }
274
275 if (end == offset)
276 goto err;
277
278 /* Point into the IP datagram 'data' part. */
279 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
280 goto err;
281
282 if (pskb_trim_rcsum(skb, end - offset))
283 goto err;
284
285 /* Find out which fragments are in front and at the back of us
286 * in the chain of fragments so far. We must know where to put
287 * this fragment, right?
288 */
289 prev = fq->q.fragments_tail;
290 if (!prev || FRAG6_CB(prev)->offset < offset) {
291 next = NULL;
292 goto found;
293 }
294 prev = NULL;
295 for (next = fq->q.fragments; next != NULL; next = next->next) {
296 if (FRAG6_CB(next)->offset >= offset)
297 break; /* bingo! */
298 prev = next;
299 }
300
301 found:
302 /* RFC5722, Section 4, amended by Errata ID : 3089
303 * When reassembling an IPv6 datagram, if
304 * one or more its constituent fragments is determined to be an
305 * overlapping fragment, the entire datagram (and any constituent
306 * fragments) MUST be silently discarded.
307 */
308
309 /* Check for overlap with preceding fragment. */
310 if (prev &&
311 (FRAG6_CB(prev)->offset + prev->len) > offset)
312 goto discard_fq;
313
314 /* Look for overlap with succeeding segment. */
315 if (next && FRAG6_CB(next)->offset < end)
316 goto discard_fq;
317
318 FRAG6_CB(skb)->offset = offset;
319
320 /* Insert this fragment in the chain of fragments. */
321 skb->next = next;
322 if (!next)
323 fq->q.fragments_tail = skb;
324 if (prev)
325 prev->next = skb;
326 else
327 fq->q.fragments = skb;
328
329 dev = skb->dev;
330 if (dev) {
331 fq->iif = dev->ifindex;
332 skb->dev = NULL;
333 }
334 fq->q.stamp = skb->tstamp;
335 fq->q.meat += skb->len;
336 fq->ecn |= ecn;
337 add_frag_mem_limit(fq->q.net, skb->truesize);
338
339 /* The first fragment.
340 * nhoffset is obtained from the first fragment, of course.
341 */
342 if (offset == 0) {
343 fq->nhoffset = nhoff;
344 fq->q.flags |= INET_FRAG_FIRST_IN;
345 }
346
347 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
348 fq->q.meat == fq->q.len) {
349 int res;
350 unsigned long orefdst = skb->_skb_refdst;
351
352 skb->_skb_refdst = 0UL;
353 res = ip6_frag_reasm(fq, prev, dev);
354 skb->_skb_refdst = orefdst;
355 return res;
356 }
357
358 skb_dst_drop(skb);
359 return -1;
360
361 discard_fq:
362 inet_frag_kill(&fq->q, &ip6_frags);
363 err:
364 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
365 IPSTATS_MIB_REASMFAILS);
366 kfree_skb(skb);
367 return -1;
368 }
369
370 /*
371 * Check if this packet is complete.
372 * Returns NULL on failure by any reason, and pointer
373 * to current nexthdr field in reassembled frame.
374 *
375 * It is called with locked fq, and caller must check that
376 * queue is eligible for reassembly i.e. it is not COMPLETE,
377 * the last and the first frames arrived and all the bits are here.
378 */
379 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
380 struct net_device *dev)
381 {
382 struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
383 struct sk_buff *fp, *head = fq->q.fragments;
384 int payload_len;
385 unsigned int nhoff;
386 int sum_truesize;
387 u8 ecn;
388
389 inet_frag_kill(&fq->q, &ip6_frags);
390
391 ecn = ip_frag_ecn_table[fq->ecn];
392 if (unlikely(ecn == 0xff))
393 goto out_fail;
394
395 /* Make the one we just received the head. */
396 if (prev) {
397 head = prev->next;
398 fp = skb_clone(head, GFP_ATOMIC);
399
400 if (!fp)
401 goto out_oom;
402
403 fp->next = head->next;
404 if (!fp->next)
405 fq->q.fragments_tail = fp;
406 prev->next = fp;
407
408 skb_morph(head, fq->q.fragments);
409 head->next = fq->q.fragments->next;
410
411 consume_skb(fq->q.fragments);
412 fq->q.fragments = head;
413 }
414
415 WARN_ON(head == NULL);
416 WARN_ON(FRAG6_CB(head)->offset != 0);
417
418 /* Unfragmented part is taken from the first segment. */
419 payload_len = ((head->data - skb_network_header(head)) -
420 sizeof(struct ipv6hdr) + fq->q.len -
421 sizeof(struct frag_hdr));
422 if (payload_len > IPV6_MAXPLEN)
423 goto out_oversize;
424
425 /* Head of list must not be cloned. */
426 if (skb_unclone(head, GFP_ATOMIC))
427 goto out_oom;
428
429 /* If the first fragment is fragmented itself, we split
430 * it to two chunks: the first with data and paged part
431 * and the second, holding only fragments. */
432 if (skb_has_frag_list(head)) {
433 struct sk_buff *clone;
434 int i, plen = 0;
435
436 clone = alloc_skb(0, GFP_ATOMIC);
437 if (!clone)
438 goto out_oom;
439 clone->next = head->next;
440 head->next = clone;
441 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
442 skb_frag_list_init(head);
443 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
444 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
445 clone->len = clone->data_len = head->data_len - plen;
446 head->data_len -= clone->len;
447 head->len -= clone->len;
448 clone->csum = 0;
449 clone->ip_summed = head->ip_summed;
450 add_frag_mem_limit(fq->q.net, clone->truesize);
451 }
452
453 /* We have to remove fragment header from datagram and to relocate
454 * header in order to calculate ICV correctly. */
455 nhoff = fq->nhoffset;
456 skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
457 memmove(head->head + sizeof(struct frag_hdr), head->head,
458 (head->data - head->head) - sizeof(struct frag_hdr));
459 head->mac_header += sizeof(struct frag_hdr);
460 head->network_header += sizeof(struct frag_hdr);
461
462 skb_reset_transport_header(head);
463 skb_push(head, head->data - skb_network_header(head));
464
465 sum_truesize = head->truesize;
466 for (fp = head->next; fp;) {
467 bool headstolen;
468 int delta;
469 struct sk_buff *next = fp->next;
470
471 sum_truesize += fp->truesize;
472 if (head->ip_summed != fp->ip_summed)
473 head->ip_summed = CHECKSUM_NONE;
474 else if (head->ip_summed == CHECKSUM_COMPLETE)
475 head->csum = csum_add(head->csum, fp->csum);
476
477 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
478 kfree_skb_partial(fp, headstolen);
479 } else {
480 if (!skb_shinfo(head)->frag_list)
481 skb_shinfo(head)->frag_list = fp;
482 head->data_len += fp->len;
483 head->len += fp->len;
484 head->truesize += fp->truesize;
485 }
486 fp = next;
487 }
488 sub_frag_mem_limit(fq->q.net, sum_truesize);
489
490 head->next = NULL;
491 head->dev = dev;
492 head->tstamp = fq->q.stamp;
493 ipv6_hdr(head)->payload_len = htons(payload_len);
494 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
495 IP6CB(head)->nhoff = nhoff;
496 IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
497
498 /* Yes, and fold redundant checksum back. 8) */
499 if (head->ip_summed == CHECKSUM_COMPLETE)
500 head->csum = csum_partial(skb_network_header(head),
501 skb_network_header_len(head),
502 head->csum);
503
504 rcu_read_lock();
505 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
506 rcu_read_unlock();
507 fq->q.fragments = NULL;
508 fq->q.fragments_tail = NULL;
509 return 1;
510
511 out_oversize:
512 net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
513 goto out_fail;
514 out_oom:
515 net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
516 out_fail:
517 rcu_read_lock();
518 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
519 rcu_read_unlock();
520 return -1;
521 }
522
523 static int ipv6_frag_rcv(struct sk_buff *skb)
524 {
525 struct frag_hdr *fhdr;
526 struct frag_queue *fq;
527 const struct ipv6hdr *hdr = ipv6_hdr(skb);
528 struct net *net = dev_net(skb_dst(skb)->dev);
529
530 if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
531 goto fail_hdr;
532
533 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
534
535 /* Jumbo payload inhibits frag. header */
536 if (hdr->payload_len == 0)
537 goto fail_hdr;
538
539 if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
540 sizeof(struct frag_hdr))))
541 goto fail_hdr;
542
543 hdr = ipv6_hdr(skb);
544 fhdr = (struct frag_hdr *)skb_transport_header(skb);
545
546 if (!(fhdr->frag_off & htons(0xFFF9))) {
547 /* It is not a fragmented frame */
548 skb->transport_header += sizeof(struct frag_hdr);
549 IP6_INC_STATS_BH(net,
550 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
551
552 IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
553 IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
554 return 1;
555 }
556
557 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
558 skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
559 if (fq) {
560 int ret;
561
562 spin_lock(&fq->q.lock);
563
564 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
565
566 spin_unlock(&fq->q.lock);
567 inet_frag_put(&fq->q, &ip6_frags);
568 return ret;
569 }
570
571 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
572 kfree_skb(skb);
573 return -1;
574
575 fail_hdr:
576 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
577 IPSTATS_MIB_INHDRERRORS);
578 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
579 return -1;
580 }
581
582 static const struct inet6_protocol frag_protocol = {
583 .handler = ipv6_frag_rcv,
584 .flags = INET6_PROTO_NOPOLICY,
585 };
586
587 #ifdef CONFIG_SYSCTL
588 static int zero;
589
590 static struct ctl_table ip6_frags_ns_ctl_table[] = {
591 {
592 .procname = "ip6frag_high_thresh",
593 .data = &init_net.ipv6.frags.high_thresh,
594 .maxlen = sizeof(int),
595 .mode = 0644,
596 .proc_handler = proc_dointvec_minmax,
597 .extra1 = &init_net.ipv6.frags.low_thresh
598 },
599 {
600 .procname = "ip6frag_low_thresh",
601 .data = &init_net.ipv6.frags.low_thresh,
602 .maxlen = sizeof(int),
603 .mode = 0644,
604 .proc_handler = proc_dointvec_minmax,
605 .extra1 = &zero,
606 .extra2 = &init_net.ipv6.frags.high_thresh
607 },
608 {
609 .procname = "ip6frag_time",
610 .data = &init_net.ipv6.frags.timeout,
611 .maxlen = sizeof(int),
612 .mode = 0644,
613 .proc_handler = proc_dointvec_jiffies,
614 },
615 { }
616 };
617
618 /* secret interval has been deprecated */
619 static int ip6_frags_secret_interval_unused;
620 static struct ctl_table ip6_frags_ctl_table[] = {
621 {
622 .procname = "ip6frag_secret_interval",
623 .data = &ip6_frags_secret_interval_unused,
624 .maxlen = sizeof(int),
625 .mode = 0644,
626 .proc_handler = proc_dointvec_jiffies,
627 },
628 { }
629 };
630
631 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
632 {
633 struct ctl_table *table;
634 struct ctl_table_header *hdr;
635
636 table = ip6_frags_ns_ctl_table;
637 if (!net_eq(net, &init_net)) {
638 table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
639 if (!table)
640 goto err_alloc;
641
642 table[0].data = &net->ipv6.frags.high_thresh;
643 table[0].extra1 = &net->ipv6.frags.low_thresh;
644 table[0].extra2 = &init_net.ipv6.frags.high_thresh;
645 table[1].data = &net->ipv6.frags.low_thresh;
646 table[1].extra2 = &net->ipv6.frags.high_thresh;
647 table[2].data = &net->ipv6.frags.timeout;
648
649 /* Don't export sysctls to unprivileged users */
650 if (net->user_ns != &init_user_ns)
651 table[0].procname = NULL;
652 }
653
654 hdr = register_net_sysctl(net, "net/ipv6", table);
655 if (!hdr)
656 goto err_reg;
657
658 net->ipv6.sysctl.frags_hdr = hdr;
659 return 0;
660
661 err_reg:
662 if (!net_eq(net, &init_net))
663 kfree(table);
664 err_alloc:
665 return -ENOMEM;
666 }
667
668 static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
669 {
670 struct ctl_table *table;
671
672 table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
673 unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
674 if (!net_eq(net, &init_net))
675 kfree(table);
676 }
677
678 static struct ctl_table_header *ip6_ctl_header;
679
680 static int ip6_frags_sysctl_register(void)
681 {
682 ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
683 ip6_frags_ctl_table);
684 return ip6_ctl_header == NULL ? -ENOMEM : 0;
685 }
686
687 static void ip6_frags_sysctl_unregister(void)
688 {
689 unregister_net_sysctl_table(ip6_ctl_header);
690 }
691 #else
692 static int ip6_frags_ns_sysctl_register(struct net *net)
693 {
694 return 0;
695 }
696
697 static void ip6_frags_ns_sysctl_unregister(struct net *net)
698 {
699 }
700
701 static int ip6_frags_sysctl_register(void)
702 {
703 return 0;
704 }
705
706 static void ip6_frags_sysctl_unregister(void)
707 {
708 }
709 #endif
710
711 static int __net_init ipv6_frags_init_net(struct net *net)
712 {
713 int res;
714
715 net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
716 net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
717 net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
718
719 res = inet_frags_init_net(&net->ipv6.frags);
720 if (res)
721 return res;
722 res = ip6_frags_ns_sysctl_register(net);
723 if (res)
724 inet_frags_uninit_net(&net->ipv6.frags);
725 return res;
726 }
727
728 static void __net_exit ipv6_frags_exit_net(struct net *net)
729 {
730 ip6_frags_ns_sysctl_unregister(net);
731 inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
732 }
733
734 static struct pernet_operations ip6_frags_ops = {
735 .init = ipv6_frags_init_net,
736 .exit = ipv6_frags_exit_net,
737 };
738
739 int __init ipv6_frag_init(void)
740 {
741 int ret;
742
743 ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
744 if (ret)
745 goto out;
746
747 ret = ip6_frags_sysctl_register();
748 if (ret)
749 goto err_sysctl;
750
751 ret = register_pernet_subsys(&ip6_frags_ops);
752 if (ret)
753 goto err_pernet;
754
755 ip6_frags.hashfn = ip6_hashfn;
756 ip6_frags.constructor = ip6_frag_init;
757 ip6_frags.destructor = NULL;
758 ip6_frags.skb_free = NULL;
759 ip6_frags.qsize = sizeof(struct frag_queue);
760 ip6_frags.match = ip6_frag_match;
761 ip6_frags.frag_expire = ip6_frag_expire;
762 ip6_frags.frags_cache_name = ip6_frag_cache_name;
763 ret = inet_frags_init(&ip6_frags);
764 if (ret)
765 goto err_pernet;
766 out:
767 return ret;
768
769 err_pernet:
770 ip6_frags_sysctl_unregister();
771 err_sysctl:
772 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
773 goto out;
774 }
775
776 void ipv6_frag_exit(void)
777 {
778 inet_frags_fini(&ip6_frags);
779 ip6_frags_sysctl_unregister();
780 unregister_pernet_subsys(&ip6_frags_ops);
781 inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
782 }