]> git.ipfire.org Git - people/ms/linux.git/blob - net/ipv6/netfilter/ip6_tables.c
Importing "grsecurity-3.1-3.19.2-201503201903.patch"
[people/ms/linux.git] / net / ipv6 / netfilter / ip6_tables.c
1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/capability.h>
14 #include <linux/in.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/poison.h>
21 #include <linux/icmpv6.h>
22 #include <net/ipv6.h>
23 #include <net/compat.h>
24 #include <asm/uaccess.h>
25 #include <linux/mutex.h>
26 #include <linux/proc_fs.h>
27 #include <linux/err.h>
28 #include <linux/cpumask.h>
29
30 #include <linux/netfilter_ipv6/ip6_tables.h>
31 #include <linux/netfilter/x_tables.h>
32 #include <net/netfilter/nf_log.h>
33 #include "../../netfilter/xt_repldata.h"
34
35 MODULE_LICENSE("GPL");
36 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
37 MODULE_DESCRIPTION("IPv6 packet filter");
38
39 /*#define DEBUG_IP_FIREWALL*/
40 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
41 /*#define DEBUG_IP_FIREWALL_USER*/
42
43 #ifdef DEBUG_IP_FIREWALL
44 #define dprintf(format, args...) pr_info(format , ## args)
45 #else
46 #define dprintf(format, args...)
47 #endif
48
49 #ifdef DEBUG_IP_FIREWALL_USER
50 #define duprintf(format, args...) pr_info(format , ## args)
51 #else
52 #define duprintf(format, args...)
53 #endif
54
55 #ifdef CONFIG_NETFILTER_DEBUG
56 #define IP_NF_ASSERT(x) WARN_ON(!(x))
57 #else
58 #define IP_NF_ASSERT(x)
59 #endif
60
61 #if 0
62 /* All the better to debug you with... */
63 #define static
64 #define inline
65 #endif
66
67 void *ip6t_alloc_initial_table(const struct xt_table *info)
68 {
69 return xt_alloc_initial_table(ip6t, IP6T);
70 }
71 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
72
73 /*
74 We keep a set of rules for each CPU, so we can avoid write-locking
75 them in the softirq when updating the counters and therefore
76 only need to read-lock in the softirq; doing a write_lock_bh() in user
77 context stops packets coming through and allows user context to read
78 the counters or update the rules.
79
80 Hence the start of any table is given by get_table() below. */
81
82 /* Returns whether matches rule or not. */
83 /* Performance critical - called for every packet */
84 static inline bool
85 ip6_packet_match(const struct sk_buff *skb,
86 const char *indev,
87 const char *outdev,
88 const struct ip6t_ip6 *ip6info,
89 unsigned int *protoff,
90 int *fragoff, bool *hotdrop)
91 {
92 unsigned long ret;
93 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
94
95 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
96
97 if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
98 &ip6info->src), IP6T_INV_SRCIP) ||
99 FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
100 &ip6info->dst), IP6T_INV_DSTIP)) {
101 dprintf("Source or dest mismatch.\n");
102 /*
103 dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
104 ipinfo->smsk.s_addr, ipinfo->src.s_addr,
105 ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
106 dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
107 ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
108 ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
109 return false;
110 }
111
112 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
113
114 if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
115 dprintf("VIA in mismatch (%s vs %s).%s\n",
116 indev, ip6info->iniface,
117 ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
118 return false;
119 }
120
121 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
122
123 if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
124 dprintf("VIA out mismatch (%s vs %s).%s\n",
125 outdev, ip6info->outiface,
126 ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
127 return false;
128 }
129
130 /* ... might want to do something with class and flowlabel here ... */
131
132 /* look for the desired protocol header */
133 if((ip6info->flags & IP6T_F_PROTO)) {
134 int protohdr;
135 unsigned short _frag_off;
136
137 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
138 if (protohdr < 0) {
139 if (_frag_off == 0)
140 *hotdrop = true;
141 return false;
142 }
143 *fragoff = _frag_off;
144
145 dprintf("Packet protocol %hi ?= %s%hi.\n",
146 protohdr,
147 ip6info->invflags & IP6T_INV_PROTO ? "!":"",
148 ip6info->proto);
149
150 if (ip6info->proto == protohdr) {
151 if(ip6info->invflags & IP6T_INV_PROTO) {
152 return false;
153 }
154 return true;
155 }
156
157 /* We need match for the '-p all', too! */
158 if ((ip6info->proto != 0) &&
159 !(ip6info->invflags & IP6T_INV_PROTO))
160 return false;
161 }
162 return true;
163 }
164
165 /* should be ip6 safe */
166 static bool
167 ip6_checkentry(const struct ip6t_ip6 *ipv6)
168 {
169 if (ipv6->flags & ~IP6T_F_MASK) {
170 duprintf("Unknown flag bits set: %08X\n",
171 ipv6->flags & ~IP6T_F_MASK);
172 return false;
173 }
174 if (ipv6->invflags & ~IP6T_INV_MASK) {
175 duprintf("Unknown invflag bits set: %08X\n",
176 ipv6->invflags & ~IP6T_INV_MASK);
177 return false;
178 }
179 return true;
180 }
181
182 static unsigned int
183 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
184 {
185 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
186
187 return NF_DROP;
188 }
189
190 static inline struct ip6t_entry *
191 get_entry(const void *base, unsigned int offset)
192 {
193 return (struct ip6t_entry *)(base + offset);
194 }
195
196 /* All zeroes == unconditional rule. */
197 /* Mildly perf critical (only if packet tracing is on) */
198 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
199 {
200 static const struct ip6t_ip6 uncond;
201
202 return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
203 }
204
205 static inline const struct xt_entry_target *
206 ip6t_get_target_c(const struct ip6t_entry *e)
207 {
208 return ip6t_get_target((struct ip6t_entry *)e);
209 }
210
211 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
212 /* This cries for unification! */
213 static const char *const hooknames[] = {
214 [NF_INET_PRE_ROUTING] = "PREROUTING",
215 [NF_INET_LOCAL_IN] = "INPUT",
216 [NF_INET_FORWARD] = "FORWARD",
217 [NF_INET_LOCAL_OUT] = "OUTPUT",
218 [NF_INET_POST_ROUTING] = "POSTROUTING",
219 };
220
221 enum nf_ip_trace_comments {
222 NF_IP6_TRACE_COMMENT_RULE,
223 NF_IP6_TRACE_COMMENT_RETURN,
224 NF_IP6_TRACE_COMMENT_POLICY,
225 };
226
227 static const char *const comments[] = {
228 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
229 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
230 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
231 };
232
233 static struct nf_loginfo trace_loginfo = {
234 .type = NF_LOG_TYPE_LOG,
235 .u = {
236 .log = {
237 .level = 4,
238 .logflags = NF_LOG_MASK,
239 },
240 },
241 };
242
243 /* Mildly perf critical (only if packet tracing is on) */
244 static inline int
245 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
246 const char *hookname, const char **chainname,
247 const char **comment, unsigned int *rulenum)
248 {
249 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
250
251 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
252 /* Head of user chain: ERROR target with chainname */
253 *chainname = t->target.data;
254 (*rulenum) = 0;
255 } else if (s == e) {
256 (*rulenum)++;
257
258 if (s->target_offset == sizeof(struct ip6t_entry) &&
259 strcmp(t->target.u.kernel.target->name,
260 XT_STANDARD_TARGET) == 0 &&
261 t->verdict < 0 &&
262 unconditional(&s->ipv6)) {
263 /* Tail of chains: STANDARD target (return/policy) */
264 *comment = *chainname == hookname
265 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
266 : comments[NF_IP6_TRACE_COMMENT_RETURN];
267 }
268 return 1;
269 } else
270 (*rulenum)++;
271
272 return 0;
273 }
274
275 static void trace_packet(const struct sk_buff *skb,
276 unsigned int hook,
277 const struct net_device *in,
278 const struct net_device *out,
279 const char *tablename,
280 const struct xt_table_info *private,
281 const struct ip6t_entry *e)
282 {
283 const void *table_base;
284 const struct ip6t_entry *root;
285 const char *hookname, *chainname, *comment;
286 const struct ip6t_entry *iter;
287 unsigned int rulenum = 0;
288 struct net *net = dev_net(in ? in : out);
289
290 table_base = private->entries[smp_processor_id()];
291 root = get_entry(table_base, private->hook_entry[hook]);
292
293 hookname = chainname = hooknames[hook];
294 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
295
296 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
297 if (get_chainname_rulenum(iter, e, hookname,
298 &chainname, &comment, &rulenum) != 0)
299 break;
300
301 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
302 "TRACE: %s:%s:%s:%u ",
303 tablename, chainname, comment, rulenum);
304 }
305 #endif
306
307 static inline __pure struct ip6t_entry *
308 ip6t_next_entry(const struct ip6t_entry *entry)
309 {
310 return (void *)entry + entry->next_offset;
311 }
312
313 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
314 unsigned int
315 ip6t_do_table(struct sk_buff *skb,
316 unsigned int hook,
317 const struct net_device *in,
318 const struct net_device *out,
319 struct xt_table *table)
320 {
321 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
322 /* Initializing verdict to NF_DROP keeps gcc happy. */
323 unsigned int verdict = NF_DROP;
324 const char *indev, *outdev;
325 const void *table_base;
326 struct ip6t_entry *e, **jumpstack;
327 unsigned int *stackptr, origptr, cpu;
328 const struct xt_table_info *private;
329 struct xt_action_param acpar;
330 unsigned int addend;
331
332 /* Initialization */
333 indev = in ? in->name : nulldevname;
334 outdev = out ? out->name : nulldevname;
335 /* We handle fragments by dealing with the first fragment as
336 * if it was a normal packet. All other fragments are treated
337 * normally, except that they will NEVER match rules that ask
338 * things we don't know, ie. tcp syn flag or ports). If the
339 * rule is also a fragment-specific rule, non-fragments won't
340 * match it. */
341 acpar.hotdrop = false;
342 acpar.in = in;
343 acpar.out = out;
344 acpar.family = NFPROTO_IPV6;
345 acpar.hooknum = hook;
346
347 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
348
349 local_bh_disable();
350 addend = xt_write_recseq_begin();
351 private = table->private;
352 /*
353 * Ensure we load private-> members after we've fetched the base
354 * pointer.
355 */
356 smp_read_barrier_depends();
357 cpu = smp_processor_id();
358 table_base = private->entries[cpu];
359 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
360 stackptr = per_cpu_ptr(private->stackptr, cpu);
361 origptr = *stackptr;
362
363 e = get_entry(table_base, private->hook_entry[hook]);
364
365 do {
366 const struct xt_entry_target *t;
367 const struct xt_entry_match *ematch;
368
369 IP_NF_ASSERT(e);
370 acpar.thoff = 0;
371 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
372 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
373 no_match:
374 e = ip6t_next_entry(e);
375 continue;
376 }
377
378 xt_ematch_foreach(ematch, e) {
379 acpar.match = ematch->u.kernel.match;
380 acpar.matchinfo = ematch->data;
381 if (!acpar.match->match(skb, &acpar))
382 goto no_match;
383 }
384
385 ADD_COUNTER(e->counters, skb->len, 1);
386
387 t = ip6t_get_target_c(e);
388 IP_NF_ASSERT(t->u.kernel.target);
389
390 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
391 /* The packet is traced: log it */
392 if (unlikely(skb->nf_trace))
393 trace_packet(skb, hook, in, out,
394 table->name, private, e);
395 #endif
396 /* Standard target? */
397 if (!t->u.kernel.target->target) {
398 int v;
399
400 v = ((struct xt_standard_target *)t)->verdict;
401 if (v < 0) {
402 /* Pop from stack? */
403 if (v != XT_RETURN) {
404 verdict = (unsigned int)(-v) - 1;
405 break;
406 }
407 if (*stackptr <= origptr)
408 e = get_entry(table_base,
409 private->underflow[hook]);
410 else
411 e = ip6t_next_entry(jumpstack[--*stackptr]);
412 continue;
413 }
414 if (table_base + v != ip6t_next_entry(e) &&
415 !(e->ipv6.flags & IP6T_F_GOTO)) {
416 if (*stackptr >= private->stacksize) {
417 verdict = NF_DROP;
418 break;
419 }
420 jumpstack[(*stackptr)++] = e;
421 }
422
423 e = get_entry(table_base, v);
424 continue;
425 }
426
427 acpar.target = t->u.kernel.target;
428 acpar.targinfo = t->data;
429
430 verdict = t->u.kernel.target->target(skb, &acpar);
431 if (verdict == XT_CONTINUE)
432 e = ip6t_next_entry(e);
433 else
434 /* Verdict */
435 break;
436 } while (!acpar.hotdrop);
437
438 *stackptr = origptr;
439
440 xt_write_recseq_end(addend);
441 local_bh_enable();
442
443 #ifdef DEBUG_ALLOW_ALL
444 return NF_ACCEPT;
445 #else
446 if (acpar.hotdrop)
447 return NF_DROP;
448 else return verdict;
449 #endif
450 }
451
452 /* Figures out from what hook each rule can be called: returns 0 if
453 there are loops. Puts hook bitmask in comefrom. */
454 static int
455 mark_source_chains(const struct xt_table_info *newinfo,
456 unsigned int valid_hooks, void *entry0)
457 {
458 unsigned int hook;
459
460 /* No recursion; use packet counter to save back ptrs (reset
461 to 0 as we leave), and comefrom to save source hook bitmask */
462 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
463 unsigned int pos = newinfo->hook_entry[hook];
464 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
465
466 if (!(valid_hooks & (1 << hook)))
467 continue;
468
469 /* Set initial back pointer. */
470 e->counters.pcnt = pos;
471
472 for (;;) {
473 const struct xt_standard_target *t
474 = (void *)ip6t_get_target_c(e);
475 int visited = e->comefrom & (1 << hook);
476
477 if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
478 pr_err("iptables: loop hook %u pos %u %08X.\n",
479 hook, pos, e->comefrom);
480 return 0;
481 }
482 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
483
484 /* Unconditional return/END. */
485 if ((e->target_offset == sizeof(struct ip6t_entry) &&
486 (strcmp(t->target.u.user.name,
487 XT_STANDARD_TARGET) == 0) &&
488 t->verdict < 0 &&
489 unconditional(&e->ipv6)) || visited) {
490 unsigned int oldpos, size;
491
492 if ((strcmp(t->target.u.user.name,
493 XT_STANDARD_TARGET) == 0) &&
494 t->verdict < -NF_MAX_VERDICT - 1) {
495 duprintf("mark_source_chains: bad "
496 "negative verdict (%i)\n",
497 t->verdict);
498 return 0;
499 }
500
501 /* Return: backtrack through the last
502 big jump. */
503 do {
504 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
505 #ifdef DEBUG_IP_FIREWALL_USER
506 if (e->comefrom
507 & (1 << NF_INET_NUMHOOKS)) {
508 duprintf("Back unset "
509 "on hook %u "
510 "rule %u\n",
511 hook, pos);
512 }
513 #endif
514 oldpos = pos;
515 pos = e->counters.pcnt;
516 e->counters.pcnt = 0;
517
518 /* We're at the start. */
519 if (pos == oldpos)
520 goto next;
521
522 e = (struct ip6t_entry *)
523 (entry0 + pos);
524 } while (oldpos == pos + e->next_offset);
525
526 /* Move along one */
527 size = e->next_offset;
528 e = (struct ip6t_entry *)
529 (entry0 + pos + size);
530 e->counters.pcnt = pos;
531 pos += size;
532 } else {
533 int newpos = t->verdict;
534
535 if (strcmp(t->target.u.user.name,
536 XT_STANDARD_TARGET) == 0 &&
537 newpos >= 0) {
538 if (newpos > newinfo->size -
539 sizeof(struct ip6t_entry)) {
540 duprintf("mark_source_chains: "
541 "bad verdict (%i)\n",
542 newpos);
543 return 0;
544 }
545 /* This a jump; chase it. */
546 duprintf("Jump rule %u -> %u\n",
547 pos, newpos);
548 } else {
549 /* ... this is a fallthru */
550 newpos = pos + e->next_offset;
551 }
552 e = (struct ip6t_entry *)
553 (entry0 + newpos);
554 e->counters.pcnt = pos;
555 pos = newpos;
556 }
557 }
558 next:
559 duprintf("Finished chain %u\n", hook);
560 }
561 return 1;
562 }
563
564 static void cleanup_match(struct xt_entry_match *m, struct net *net)
565 {
566 struct xt_mtdtor_param par;
567
568 par.net = net;
569 par.match = m->u.kernel.match;
570 par.matchinfo = m->data;
571 par.family = NFPROTO_IPV6;
572 if (par.match->destroy != NULL)
573 par.match->destroy(&par);
574 module_put(par.match->me);
575 }
576
577 static int
578 check_entry(const struct ip6t_entry *e, const char *name)
579 {
580 const struct xt_entry_target *t;
581
582 if (!ip6_checkentry(&e->ipv6)) {
583 duprintf("ip_tables: ip check failed %p %s.\n", e, name);
584 return -EINVAL;
585 }
586
587 if (e->target_offset + sizeof(struct xt_entry_target) >
588 e->next_offset)
589 return -EINVAL;
590
591 t = ip6t_get_target_c(e);
592 if (e->target_offset + t->u.target_size > e->next_offset)
593 return -EINVAL;
594
595 return 0;
596 }
597
598 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
599 {
600 const struct ip6t_ip6 *ipv6 = par->entryinfo;
601 int ret;
602
603 par->match = m->u.kernel.match;
604 par->matchinfo = m->data;
605
606 ret = xt_check_match(par, m->u.match_size - sizeof(*m),
607 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
608 if (ret < 0) {
609 duprintf("ip_tables: check failed for `%s'.\n",
610 par.match->name);
611 return ret;
612 }
613 return 0;
614 }
615
616 static int
617 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
618 {
619 struct xt_match *match;
620 int ret;
621
622 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
623 m->u.user.revision);
624 if (IS_ERR(match)) {
625 duprintf("find_check_match: `%s' not found\n", m->u.user.name);
626 return PTR_ERR(match);
627 }
628 m->u.kernel.match = match;
629
630 ret = check_match(m, par);
631 if (ret)
632 goto err;
633
634 return 0;
635 err:
636 module_put(m->u.kernel.match->me);
637 return ret;
638 }
639
640 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
641 {
642 struct xt_entry_target *t = ip6t_get_target(e);
643 struct xt_tgchk_param par = {
644 .net = net,
645 .table = name,
646 .entryinfo = e,
647 .target = t->u.kernel.target,
648 .targinfo = t->data,
649 .hook_mask = e->comefrom,
650 .family = NFPROTO_IPV6,
651 };
652 int ret;
653
654 t = ip6t_get_target(e);
655 ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
656 e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
657 if (ret < 0) {
658 duprintf("ip_tables: check failed for `%s'.\n",
659 t->u.kernel.target->name);
660 return ret;
661 }
662 return 0;
663 }
664
665 static int
666 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
667 unsigned int size)
668 {
669 struct xt_entry_target *t;
670 struct xt_target *target;
671 int ret;
672 unsigned int j;
673 struct xt_mtchk_param mtpar;
674 struct xt_entry_match *ematch;
675
676 ret = check_entry(e, name);
677 if (ret)
678 return ret;
679
680 j = 0;
681 mtpar.net = net;
682 mtpar.table = name;
683 mtpar.entryinfo = &e->ipv6;
684 mtpar.hook_mask = e->comefrom;
685 mtpar.family = NFPROTO_IPV6;
686 xt_ematch_foreach(ematch, e) {
687 ret = find_check_match(ematch, &mtpar);
688 if (ret != 0)
689 goto cleanup_matches;
690 ++j;
691 }
692
693 t = ip6t_get_target(e);
694 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
695 t->u.user.revision);
696 if (IS_ERR(target)) {
697 duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
698 ret = PTR_ERR(target);
699 goto cleanup_matches;
700 }
701 t->u.kernel.target = target;
702
703 ret = check_target(e, net, name);
704 if (ret)
705 goto err;
706 return 0;
707 err:
708 module_put(t->u.kernel.target->me);
709 cleanup_matches:
710 xt_ematch_foreach(ematch, e) {
711 if (j-- == 0)
712 break;
713 cleanup_match(ematch, net);
714 }
715 return ret;
716 }
717
718 static bool check_underflow(const struct ip6t_entry *e)
719 {
720 const struct xt_entry_target *t;
721 unsigned int verdict;
722
723 if (!unconditional(&e->ipv6))
724 return false;
725 t = ip6t_get_target_c(e);
726 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
727 return false;
728 verdict = ((struct xt_standard_target *)t)->verdict;
729 verdict = -verdict - 1;
730 return verdict == NF_DROP || verdict == NF_ACCEPT;
731 }
732
733 static int
734 check_entry_size_and_hooks(struct ip6t_entry *e,
735 struct xt_table_info *newinfo,
736 const unsigned char *base,
737 const unsigned char *limit,
738 const unsigned int *hook_entries,
739 const unsigned int *underflows,
740 unsigned int valid_hooks)
741 {
742 unsigned int h;
743
744 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
745 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
746 duprintf("Bad offset %p\n", e);
747 return -EINVAL;
748 }
749
750 if (e->next_offset
751 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
752 duprintf("checking: element %p size %u\n",
753 e, e->next_offset);
754 return -EINVAL;
755 }
756
757 /* Check hooks & underflows */
758 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
759 if (!(valid_hooks & (1 << h)))
760 continue;
761 if ((unsigned char *)e - base == hook_entries[h])
762 newinfo->hook_entry[h] = hook_entries[h];
763 if ((unsigned char *)e - base == underflows[h]) {
764 if (!check_underflow(e)) {
765 pr_err("Underflows must be unconditional and "
766 "use the STANDARD target with "
767 "ACCEPT/DROP\n");
768 return -EINVAL;
769 }
770 newinfo->underflow[h] = underflows[h];
771 }
772 }
773
774 /* Clear counters and comefrom */
775 e->counters = ((struct xt_counters) { 0, 0 });
776 e->comefrom = 0;
777 return 0;
778 }
779
780 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
781 {
782 struct xt_tgdtor_param par;
783 struct xt_entry_target *t;
784 struct xt_entry_match *ematch;
785
786 /* Cleanup all matches */
787 xt_ematch_foreach(ematch, e)
788 cleanup_match(ematch, net);
789 t = ip6t_get_target(e);
790
791 par.net = net;
792 par.target = t->u.kernel.target;
793 par.targinfo = t->data;
794 par.family = NFPROTO_IPV6;
795 if (par.target->destroy != NULL)
796 par.target->destroy(&par);
797 module_put(par.target->me);
798 }
799
800 /* Checks and translates the user-supplied table segment (held in
801 newinfo) */
802 static int
803 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
804 const struct ip6t_replace *repl)
805 {
806 struct ip6t_entry *iter;
807 unsigned int i;
808 int ret = 0;
809
810 newinfo->size = repl->size;
811 newinfo->number = repl->num_entries;
812
813 /* Init all hooks to impossible value. */
814 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
815 newinfo->hook_entry[i] = 0xFFFFFFFF;
816 newinfo->underflow[i] = 0xFFFFFFFF;
817 }
818
819 duprintf("translate_table: size %u\n", newinfo->size);
820 i = 0;
821 /* Walk through entries, checking offsets. */
822 xt_entry_foreach(iter, entry0, newinfo->size) {
823 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
824 entry0 + repl->size,
825 repl->hook_entry,
826 repl->underflow,
827 repl->valid_hooks);
828 if (ret != 0)
829 return ret;
830 ++i;
831 if (strcmp(ip6t_get_target(iter)->u.user.name,
832 XT_ERROR_TARGET) == 0)
833 ++newinfo->stacksize;
834 }
835
836 if (i != repl->num_entries) {
837 duprintf("translate_table: %u not %u entries\n",
838 i, repl->num_entries);
839 return -EINVAL;
840 }
841
842 /* Check hooks all assigned */
843 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
844 /* Only hooks which are valid */
845 if (!(repl->valid_hooks & (1 << i)))
846 continue;
847 if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
848 duprintf("Invalid hook entry %u %u\n",
849 i, repl->hook_entry[i]);
850 return -EINVAL;
851 }
852 if (newinfo->underflow[i] == 0xFFFFFFFF) {
853 duprintf("Invalid underflow %u %u\n",
854 i, repl->underflow[i]);
855 return -EINVAL;
856 }
857 }
858
859 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
860 return -ELOOP;
861
862 /* Finally, each sanity check must pass */
863 i = 0;
864 xt_entry_foreach(iter, entry0, newinfo->size) {
865 ret = find_check_entry(iter, net, repl->name, repl->size);
866 if (ret != 0)
867 break;
868 ++i;
869 }
870
871 if (ret != 0) {
872 xt_entry_foreach(iter, entry0, newinfo->size) {
873 if (i-- == 0)
874 break;
875 cleanup_entry(iter, net);
876 }
877 return ret;
878 }
879
880 /* And one copy for every other CPU */
881 for_each_possible_cpu(i) {
882 if (newinfo->entries[i] && newinfo->entries[i] != entry0)
883 memcpy(newinfo->entries[i], entry0, newinfo->size);
884 }
885
886 return ret;
887 }
888
889 static void
890 get_counters(const struct xt_table_info *t,
891 struct xt_counters counters[])
892 {
893 struct ip6t_entry *iter;
894 unsigned int cpu;
895 unsigned int i;
896
897 for_each_possible_cpu(cpu) {
898 seqcount_t *s = &per_cpu(xt_recseq, cpu);
899
900 i = 0;
901 xt_entry_foreach(iter, t->entries[cpu], t->size) {
902 u64 bcnt, pcnt;
903 unsigned int start;
904
905 do {
906 start = read_seqcount_begin(s);
907 bcnt = iter->counters.bcnt;
908 pcnt = iter->counters.pcnt;
909 } while (read_seqcount_retry(s, start));
910
911 ADD_COUNTER(counters[i], bcnt, pcnt);
912 ++i;
913 }
914 }
915 }
916
917 static struct xt_counters *alloc_counters(const struct xt_table *table)
918 {
919 unsigned int countersize;
920 struct xt_counters *counters;
921 const struct xt_table_info *private = table->private;
922
923 /* We need atomic snapshot of counters: rest doesn't change
924 (other than comefrom, which userspace doesn't care
925 about). */
926 countersize = sizeof(struct xt_counters) * private->number;
927 counters = vzalloc(countersize);
928
929 if (counters == NULL)
930 return ERR_PTR(-ENOMEM);
931
932 get_counters(private, counters);
933
934 return counters;
935 }
936
937 static int
938 copy_entries_to_user(unsigned int total_size,
939 const struct xt_table *table,
940 void __user *userptr)
941 {
942 unsigned int off, num;
943 const struct ip6t_entry *e;
944 struct xt_counters *counters;
945 const struct xt_table_info *private = table->private;
946 int ret = 0;
947 const void *loc_cpu_entry;
948
949 counters = alloc_counters(table);
950 if (IS_ERR(counters))
951 return PTR_ERR(counters);
952
953 /* choose the copy that is on our node/cpu, ...
954 * This choice is lazy (because current thread is
955 * allowed to migrate to another cpu)
956 */
957 loc_cpu_entry = private->entries[raw_smp_processor_id()];
958 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
959 ret = -EFAULT;
960 goto free_counters;
961 }
962
963 /* FIXME: use iterator macros --RR */
964 /* ... then go back and fix counters and names */
965 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
966 unsigned int i;
967 const struct xt_entry_match *m;
968 const struct xt_entry_target *t;
969
970 e = (struct ip6t_entry *)(loc_cpu_entry + off);
971 if (copy_to_user(userptr + off
972 + offsetof(struct ip6t_entry, counters),
973 &counters[num],
974 sizeof(counters[num])) != 0) {
975 ret = -EFAULT;
976 goto free_counters;
977 }
978
979 for (i = sizeof(struct ip6t_entry);
980 i < e->target_offset;
981 i += m->u.match_size) {
982 m = (void *)e + i;
983
984 if (copy_to_user(userptr + off + i
985 + offsetof(struct xt_entry_match,
986 u.user.name),
987 m->u.kernel.match->name,
988 strlen(m->u.kernel.match->name)+1)
989 != 0) {
990 ret = -EFAULT;
991 goto free_counters;
992 }
993 }
994
995 t = ip6t_get_target_c(e);
996 if (copy_to_user(userptr + off + e->target_offset
997 + offsetof(struct xt_entry_target,
998 u.user.name),
999 t->u.kernel.target->name,
1000 strlen(t->u.kernel.target->name)+1) != 0) {
1001 ret = -EFAULT;
1002 goto free_counters;
1003 }
1004 }
1005
1006 free_counters:
1007 vfree(counters);
1008 return ret;
1009 }
1010
1011 #ifdef CONFIG_COMPAT
1012 static void compat_standard_from_user(void *dst, const void *src)
1013 {
1014 int v = *(compat_int_t *)src;
1015
1016 if (v > 0)
1017 v += xt_compat_calc_jump(AF_INET6, v);
1018 memcpy(dst, &v, sizeof(v));
1019 }
1020
1021 static int compat_standard_to_user(void __user *dst, const void *src)
1022 {
1023 compat_int_t cv = *(int *)src;
1024
1025 if (cv > 0)
1026 cv -= xt_compat_calc_jump(AF_INET6, cv);
1027 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1028 }
1029
1030 static int compat_calc_entry(const struct ip6t_entry *e,
1031 const struct xt_table_info *info,
1032 const void *base, struct xt_table_info *newinfo)
1033 {
1034 const struct xt_entry_match *ematch;
1035 const struct xt_entry_target *t;
1036 unsigned int entry_offset;
1037 int off, i, ret;
1038
1039 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1040 entry_offset = (void *)e - base;
1041 xt_ematch_foreach(ematch, e)
1042 off += xt_compat_match_offset(ematch->u.kernel.match);
1043 t = ip6t_get_target_c(e);
1044 off += xt_compat_target_offset(t->u.kernel.target);
1045 newinfo->size -= off;
1046 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1047 if (ret)
1048 return ret;
1049
1050 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1051 if (info->hook_entry[i] &&
1052 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1053 newinfo->hook_entry[i] -= off;
1054 if (info->underflow[i] &&
1055 (e < (struct ip6t_entry *)(base + info->underflow[i])))
1056 newinfo->underflow[i] -= off;
1057 }
1058 return 0;
1059 }
1060
1061 static int compat_table_info(const struct xt_table_info *info,
1062 struct xt_table_info *newinfo)
1063 {
1064 struct ip6t_entry *iter;
1065 void *loc_cpu_entry;
1066 int ret;
1067
1068 if (!newinfo || !info)
1069 return -EINVAL;
1070
1071 /* we dont care about newinfo->entries[] */
1072 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1073 newinfo->initial_entries = 0;
1074 loc_cpu_entry = info->entries[raw_smp_processor_id()];
1075 xt_compat_init_offsets(AF_INET6, info->number);
1076 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1077 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1078 if (ret != 0)
1079 return ret;
1080 }
1081 return 0;
1082 }
1083 #endif
1084
1085 static int get_info(struct net *net, void __user *user,
1086 int len, int compat)
1087 {
1088 char name[XT_TABLE_MAXNAMELEN];
1089 struct xt_table *t;
1090 int ret;
1091
1092 if (len != sizeof(struct ip6t_getinfo)) {
1093 duprintf("length %u != %zu\n", len,
1094 sizeof(struct ip6t_getinfo));
1095 return -EINVAL;
1096 }
1097
1098 if (copy_from_user(name, user, sizeof(name)) != 0)
1099 return -EFAULT;
1100
1101 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1102 #ifdef CONFIG_COMPAT
1103 if (compat)
1104 xt_compat_lock(AF_INET6);
1105 #endif
1106 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1107 "ip6table_%s", name);
1108 if (!IS_ERR_OR_NULL(t)) {
1109 struct ip6t_getinfo info;
1110 const struct xt_table_info *private = t->private;
1111 #ifdef CONFIG_COMPAT
1112 struct xt_table_info tmp;
1113
1114 if (compat) {
1115 ret = compat_table_info(private, &tmp);
1116 xt_compat_flush_offsets(AF_INET6);
1117 private = &tmp;
1118 }
1119 #endif
1120 memset(&info, 0, sizeof(info));
1121 info.valid_hooks = t->valid_hooks;
1122 memcpy(info.hook_entry, private->hook_entry,
1123 sizeof(info.hook_entry));
1124 memcpy(info.underflow, private->underflow,
1125 sizeof(info.underflow));
1126 info.num_entries = private->number;
1127 info.size = private->size;
1128 strcpy(info.name, name);
1129
1130 if (copy_to_user(user, &info, len) != 0)
1131 ret = -EFAULT;
1132 else
1133 ret = 0;
1134
1135 xt_table_unlock(t);
1136 module_put(t->me);
1137 } else
1138 ret = t ? PTR_ERR(t) : -ENOENT;
1139 #ifdef CONFIG_COMPAT
1140 if (compat)
1141 xt_compat_unlock(AF_INET6);
1142 #endif
1143 return ret;
1144 }
1145
1146 static int
1147 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1148 const int *len)
1149 {
1150 int ret;
1151 struct ip6t_get_entries get;
1152 struct xt_table *t;
1153
1154 if (*len < sizeof(get)) {
1155 duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1156 return -EINVAL;
1157 }
1158 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1159 return -EFAULT;
1160 if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1161 duprintf("get_entries: %u != %zu\n",
1162 *len, sizeof(get) + get.size);
1163 return -EINVAL;
1164 }
1165
1166 t = xt_find_table_lock(net, AF_INET6, get.name);
1167 if (!IS_ERR_OR_NULL(t)) {
1168 struct xt_table_info *private = t->private;
1169 duprintf("t->private->number = %u\n", private->number);
1170 if (get.size == private->size)
1171 ret = copy_entries_to_user(private->size,
1172 t, uptr->entrytable);
1173 else {
1174 duprintf("get_entries: I've got %u not %u!\n",
1175 private->size, get.size);
1176 ret = -EAGAIN;
1177 }
1178 module_put(t->me);
1179 xt_table_unlock(t);
1180 } else
1181 ret = t ? PTR_ERR(t) : -ENOENT;
1182
1183 return ret;
1184 }
1185
1186 static int
1187 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1188 struct xt_table_info *newinfo, unsigned int num_counters,
1189 void __user *counters_ptr)
1190 {
1191 int ret;
1192 struct xt_table *t;
1193 struct xt_table_info *oldinfo;
1194 struct xt_counters *counters;
1195 const void *loc_cpu_old_entry;
1196 struct ip6t_entry *iter;
1197
1198 ret = 0;
1199 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1200 if (!counters) {
1201 ret = -ENOMEM;
1202 goto out;
1203 }
1204
1205 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1206 "ip6table_%s", name);
1207 if (IS_ERR_OR_NULL(t)) {
1208 ret = t ? PTR_ERR(t) : -ENOENT;
1209 goto free_newinfo_counters_untrans;
1210 }
1211
1212 /* You lied! */
1213 if (valid_hooks != t->valid_hooks) {
1214 duprintf("Valid hook crap: %08X vs %08X\n",
1215 valid_hooks, t->valid_hooks);
1216 ret = -EINVAL;
1217 goto put_module;
1218 }
1219
1220 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1221 if (!oldinfo)
1222 goto put_module;
1223
1224 /* Update module usage count based on number of rules */
1225 duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1226 oldinfo->number, oldinfo->initial_entries, newinfo->number);
1227 if ((oldinfo->number > oldinfo->initial_entries) ||
1228 (newinfo->number <= oldinfo->initial_entries))
1229 module_put(t->me);
1230 if ((oldinfo->number > oldinfo->initial_entries) &&
1231 (newinfo->number <= oldinfo->initial_entries))
1232 module_put(t->me);
1233
1234 /* Get the old counters, and synchronize with replace */
1235 get_counters(oldinfo, counters);
1236
1237 /* Decrease module usage counts and free resource */
1238 loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1239 xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1240 cleanup_entry(iter, net);
1241
1242 xt_free_table_info(oldinfo);
1243 if (copy_to_user(counters_ptr, counters,
1244 sizeof(struct xt_counters) * num_counters) != 0) {
1245 /* Silent error, can't fail, new table is already in place */
1246 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1247 }
1248 vfree(counters);
1249 xt_table_unlock(t);
1250 return ret;
1251
1252 put_module:
1253 module_put(t->me);
1254 xt_table_unlock(t);
1255 free_newinfo_counters_untrans:
1256 vfree(counters);
1257 out:
1258 return ret;
1259 }
1260
1261 static int
1262 do_replace(struct net *net, const void __user *user, unsigned int len)
1263 {
1264 int ret;
1265 struct ip6t_replace tmp;
1266 struct xt_table_info *newinfo;
1267 void *loc_cpu_entry;
1268 struct ip6t_entry *iter;
1269
1270 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1271 return -EFAULT;
1272
1273 /* overflow check */
1274 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1275 return -ENOMEM;
1276 tmp.name[sizeof(tmp.name)-1] = 0;
1277
1278 newinfo = xt_alloc_table_info(tmp.size);
1279 if (!newinfo)
1280 return -ENOMEM;
1281
1282 /* choose the copy that is on our node/cpu */
1283 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1284 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1285 tmp.size) != 0) {
1286 ret = -EFAULT;
1287 goto free_newinfo;
1288 }
1289
1290 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1291 if (ret != 0)
1292 goto free_newinfo;
1293
1294 duprintf("ip_tables: Translated table\n");
1295
1296 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1297 tmp.num_counters, tmp.counters);
1298 if (ret)
1299 goto free_newinfo_untrans;
1300 return 0;
1301
1302 free_newinfo_untrans:
1303 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1304 cleanup_entry(iter, net);
1305 free_newinfo:
1306 xt_free_table_info(newinfo);
1307 return ret;
1308 }
1309
1310 static int
1311 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1312 int compat)
1313 {
1314 unsigned int i, curcpu;
1315 struct xt_counters_info tmp;
1316 struct xt_counters *paddc;
1317 unsigned int num_counters;
1318 char *name;
1319 int size;
1320 void *ptmp;
1321 struct xt_table *t;
1322 const struct xt_table_info *private;
1323 int ret = 0;
1324 const void *loc_cpu_entry;
1325 struct ip6t_entry *iter;
1326 unsigned int addend;
1327 #ifdef CONFIG_COMPAT
1328 struct compat_xt_counters_info compat_tmp;
1329
1330 if (compat) {
1331 ptmp = &compat_tmp;
1332 size = sizeof(struct compat_xt_counters_info);
1333 } else
1334 #endif
1335 {
1336 ptmp = &tmp;
1337 size = sizeof(struct xt_counters_info);
1338 }
1339
1340 if (copy_from_user(ptmp, user, size) != 0)
1341 return -EFAULT;
1342
1343 #ifdef CONFIG_COMPAT
1344 if (compat) {
1345 num_counters = compat_tmp.num_counters;
1346 name = compat_tmp.name;
1347 } else
1348 #endif
1349 {
1350 num_counters = tmp.num_counters;
1351 name = tmp.name;
1352 }
1353
1354 if (len != size + num_counters * sizeof(struct xt_counters))
1355 return -EINVAL;
1356
1357 paddc = vmalloc(len - size);
1358 if (!paddc)
1359 return -ENOMEM;
1360
1361 if (copy_from_user(paddc, user + size, len - size) != 0) {
1362 ret = -EFAULT;
1363 goto free;
1364 }
1365
1366 t = xt_find_table_lock(net, AF_INET6, name);
1367 if (IS_ERR_OR_NULL(t)) {
1368 ret = t ? PTR_ERR(t) : -ENOENT;
1369 goto free;
1370 }
1371
1372
1373 local_bh_disable();
1374 private = t->private;
1375 if (private->number != num_counters) {
1376 ret = -EINVAL;
1377 goto unlock_up_free;
1378 }
1379
1380 i = 0;
1381 /* Choose the copy that is on our node */
1382 curcpu = smp_processor_id();
1383 addend = xt_write_recseq_begin();
1384 loc_cpu_entry = private->entries[curcpu];
1385 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1386 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1387 ++i;
1388 }
1389 xt_write_recseq_end(addend);
1390
1391 unlock_up_free:
1392 local_bh_enable();
1393 xt_table_unlock(t);
1394 module_put(t->me);
1395 free:
1396 vfree(paddc);
1397
1398 return ret;
1399 }
1400
1401 #ifdef CONFIG_COMPAT
1402 struct compat_ip6t_replace {
1403 char name[XT_TABLE_MAXNAMELEN];
1404 u32 valid_hooks;
1405 u32 num_entries;
1406 u32 size;
1407 u32 hook_entry[NF_INET_NUMHOOKS];
1408 u32 underflow[NF_INET_NUMHOOKS];
1409 u32 num_counters;
1410 compat_uptr_t counters; /* struct xt_counters * */
1411 struct compat_ip6t_entry entries[0];
1412 };
1413
1414 static int
1415 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1416 unsigned int *size, struct xt_counters *counters,
1417 unsigned int i)
1418 {
1419 struct xt_entry_target *t;
1420 struct compat_ip6t_entry __user *ce;
1421 u_int16_t target_offset, next_offset;
1422 compat_uint_t origsize;
1423 const struct xt_entry_match *ematch;
1424 int ret = 0;
1425
1426 origsize = *size;
1427 ce = (struct compat_ip6t_entry __user *)*dstptr;
1428 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1429 copy_to_user(&ce->counters, &counters[i],
1430 sizeof(counters[i])) != 0)
1431 return -EFAULT;
1432
1433 *dstptr += sizeof(struct compat_ip6t_entry);
1434 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1435
1436 xt_ematch_foreach(ematch, e) {
1437 ret = xt_compat_match_to_user(ematch, dstptr, size);
1438 if (ret != 0)
1439 return ret;
1440 }
1441 target_offset = e->target_offset - (origsize - *size);
1442 t = ip6t_get_target(e);
1443 ret = xt_compat_target_to_user(t, dstptr, size);
1444 if (ret)
1445 return ret;
1446 next_offset = e->next_offset - (origsize - *size);
1447 if (put_user(target_offset, &ce->target_offset) != 0 ||
1448 put_user(next_offset, &ce->next_offset) != 0)
1449 return -EFAULT;
1450 return 0;
1451 }
1452
1453 static int
1454 compat_find_calc_match(struct xt_entry_match *m,
1455 const char *name,
1456 const struct ip6t_ip6 *ipv6,
1457 unsigned int hookmask,
1458 int *size)
1459 {
1460 struct xt_match *match;
1461
1462 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1463 m->u.user.revision);
1464 if (IS_ERR(match)) {
1465 duprintf("compat_check_calc_match: `%s' not found\n",
1466 m->u.user.name);
1467 return PTR_ERR(match);
1468 }
1469 m->u.kernel.match = match;
1470 *size += xt_compat_match_offset(match);
1471 return 0;
1472 }
1473
1474 static void compat_release_entry(struct compat_ip6t_entry *e)
1475 {
1476 struct xt_entry_target *t;
1477 struct xt_entry_match *ematch;
1478
1479 /* Cleanup all matches */
1480 xt_ematch_foreach(ematch, e)
1481 module_put(ematch->u.kernel.match->me);
1482 t = compat_ip6t_get_target(e);
1483 module_put(t->u.kernel.target->me);
1484 }
1485
1486 static int
1487 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1488 struct xt_table_info *newinfo,
1489 unsigned int *size,
1490 const unsigned char *base,
1491 const unsigned char *limit,
1492 const unsigned int *hook_entries,
1493 const unsigned int *underflows,
1494 const char *name)
1495 {
1496 struct xt_entry_match *ematch;
1497 struct xt_entry_target *t;
1498 struct xt_target *target;
1499 unsigned int entry_offset;
1500 unsigned int j;
1501 int ret, off, h;
1502
1503 duprintf("check_compat_entry_size_and_hooks %p\n", e);
1504 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1505 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1506 duprintf("Bad offset %p, limit = %p\n", e, limit);
1507 return -EINVAL;
1508 }
1509
1510 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1511 sizeof(struct compat_xt_entry_target)) {
1512 duprintf("checking: element %p size %u\n",
1513 e, e->next_offset);
1514 return -EINVAL;
1515 }
1516
1517 /* For purposes of check_entry casting the compat entry is fine */
1518 ret = check_entry((struct ip6t_entry *)e, name);
1519 if (ret)
1520 return ret;
1521
1522 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1523 entry_offset = (void *)e - (void *)base;
1524 j = 0;
1525 xt_ematch_foreach(ematch, e) {
1526 ret = compat_find_calc_match(ematch, name,
1527 &e->ipv6, e->comefrom, &off);
1528 if (ret != 0)
1529 goto release_matches;
1530 ++j;
1531 }
1532
1533 t = compat_ip6t_get_target(e);
1534 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1535 t->u.user.revision);
1536 if (IS_ERR(target)) {
1537 duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1538 t->u.user.name);
1539 ret = PTR_ERR(target);
1540 goto release_matches;
1541 }
1542 t->u.kernel.target = target;
1543
1544 off += xt_compat_target_offset(target);
1545 *size += off;
1546 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1547 if (ret)
1548 goto out;
1549
1550 /* Check hooks & underflows */
1551 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1552 if ((unsigned char *)e - base == hook_entries[h])
1553 newinfo->hook_entry[h] = hook_entries[h];
1554 if ((unsigned char *)e - base == underflows[h])
1555 newinfo->underflow[h] = underflows[h];
1556 }
1557
1558 /* Clear counters and comefrom */
1559 memset(&e->counters, 0, sizeof(e->counters));
1560 e->comefrom = 0;
1561 return 0;
1562
1563 out:
1564 module_put(t->u.kernel.target->me);
1565 release_matches:
1566 xt_ematch_foreach(ematch, e) {
1567 if (j-- == 0)
1568 break;
1569 module_put(ematch->u.kernel.match->me);
1570 }
1571 return ret;
1572 }
1573
1574 static int
1575 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1576 unsigned int *size, const char *name,
1577 struct xt_table_info *newinfo, unsigned char *base)
1578 {
1579 struct xt_entry_target *t;
1580 struct ip6t_entry *de;
1581 unsigned int origsize;
1582 int ret, h;
1583 struct xt_entry_match *ematch;
1584
1585 ret = 0;
1586 origsize = *size;
1587 de = (struct ip6t_entry *)*dstptr;
1588 memcpy(de, e, sizeof(struct ip6t_entry));
1589 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1590
1591 *dstptr += sizeof(struct ip6t_entry);
1592 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1593
1594 xt_ematch_foreach(ematch, e) {
1595 ret = xt_compat_match_from_user(ematch, dstptr, size);
1596 if (ret != 0)
1597 return ret;
1598 }
1599 de->target_offset = e->target_offset - (origsize - *size);
1600 t = compat_ip6t_get_target(e);
1601 xt_compat_target_from_user(t, dstptr, size);
1602
1603 de->next_offset = e->next_offset - (origsize - *size);
1604 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1605 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1606 newinfo->hook_entry[h] -= origsize - *size;
1607 if ((unsigned char *)de - base < newinfo->underflow[h])
1608 newinfo->underflow[h] -= origsize - *size;
1609 }
1610 return ret;
1611 }
1612
1613 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1614 const char *name)
1615 {
1616 unsigned int j;
1617 int ret = 0;
1618 struct xt_mtchk_param mtpar;
1619 struct xt_entry_match *ematch;
1620
1621 j = 0;
1622 mtpar.net = net;
1623 mtpar.table = name;
1624 mtpar.entryinfo = &e->ipv6;
1625 mtpar.hook_mask = e->comefrom;
1626 mtpar.family = NFPROTO_IPV6;
1627 xt_ematch_foreach(ematch, e) {
1628 ret = check_match(ematch, &mtpar);
1629 if (ret != 0)
1630 goto cleanup_matches;
1631 ++j;
1632 }
1633
1634 ret = check_target(e, net, name);
1635 if (ret)
1636 goto cleanup_matches;
1637 return 0;
1638
1639 cleanup_matches:
1640 xt_ematch_foreach(ematch, e) {
1641 if (j-- == 0)
1642 break;
1643 cleanup_match(ematch, net);
1644 }
1645 return ret;
1646 }
1647
1648 static int
1649 translate_compat_table(struct net *net,
1650 const char *name,
1651 unsigned int valid_hooks,
1652 struct xt_table_info **pinfo,
1653 void **pentry0,
1654 unsigned int total_size,
1655 unsigned int number,
1656 unsigned int *hook_entries,
1657 unsigned int *underflows)
1658 {
1659 unsigned int i, j;
1660 struct xt_table_info *newinfo, *info;
1661 void *pos, *entry0, *entry1;
1662 struct compat_ip6t_entry *iter0;
1663 struct ip6t_entry *iter1;
1664 unsigned int size;
1665 int ret = 0;
1666
1667 info = *pinfo;
1668 entry0 = *pentry0;
1669 size = total_size;
1670 info->number = number;
1671
1672 /* Init all hooks to impossible value. */
1673 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1674 info->hook_entry[i] = 0xFFFFFFFF;
1675 info->underflow[i] = 0xFFFFFFFF;
1676 }
1677
1678 duprintf("translate_compat_table: size %u\n", info->size);
1679 j = 0;
1680 xt_compat_lock(AF_INET6);
1681 xt_compat_init_offsets(AF_INET6, number);
1682 /* Walk through entries, checking offsets. */
1683 xt_entry_foreach(iter0, entry0, total_size) {
1684 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1685 entry0,
1686 entry0 + total_size,
1687 hook_entries,
1688 underflows,
1689 name);
1690 if (ret != 0)
1691 goto out_unlock;
1692 ++j;
1693 }
1694
1695 ret = -EINVAL;
1696 if (j != number) {
1697 duprintf("translate_compat_table: %u not %u entries\n",
1698 j, number);
1699 goto out_unlock;
1700 }
1701
1702 /* Check hooks all assigned */
1703 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1704 /* Only hooks which are valid */
1705 if (!(valid_hooks & (1 << i)))
1706 continue;
1707 if (info->hook_entry[i] == 0xFFFFFFFF) {
1708 duprintf("Invalid hook entry %u %u\n",
1709 i, hook_entries[i]);
1710 goto out_unlock;
1711 }
1712 if (info->underflow[i] == 0xFFFFFFFF) {
1713 duprintf("Invalid underflow %u %u\n",
1714 i, underflows[i]);
1715 goto out_unlock;
1716 }
1717 }
1718
1719 ret = -ENOMEM;
1720 newinfo = xt_alloc_table_info(size);
1721 if (!newinfo)
1722 goto out_unlock;
1723
1724 newinfo->number = number;
1725 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1726 newinfo->hook_entry[i] = info->hook_entry[i];
1727 newinfo->underflow[i] = info->underflow[i];
1728 }
1729 entry1 = newinfo->entries[raw_smp_processor_id()];
1730 pos = entry1;
1731 size = total_size;
1732 xt_entry_foreach(iter0, entry0, total_size) {
1733 ret = compat_copy_entry_from_user(iter0, &pos, &size,
1734 name, newinfo, entry1);
1735 if (ret != 0)
1736 break;
1737 }
1738 xt_compat_flush_offsets(AF_INET6);
1739 xt_compat_unlock(AF_INET6);
1740 if (ret)
1741 goto free_newinfo;
1742
1743 ret = -ELOOP;
1744 if (!mark_source_chains(newinfo, valid_hooks, entry1))
1745 goto free_newinfo;
1746
1747 i = 0;
1748 xt_entry_foreach(iter1, entry1, newinfo->size) {
1749 ret = compat_check_entry(iter1, net, name);
1750 if (ret != 0)
1751 break;
1752 ++i;
1753 if (strcmp(ip6t_get_target(iter1)->u.user.name,
1754 XT_ERROR_TARGET) == 0)
1755 ++newinfo->stacksize;
1756 }
1757 if (ret) {
1758 /*
1759 * The first i matches need cleanup_entry (calls ->destroy)
1760 * because they had called ->check already. The other j-i
1761 * entries need only release.
1762 */
1763 int skip = i;
1764 j -= i;
1765 xt_entry_foreach(iter0, entry0, newinfo->size) {
1766 if (skip-- > 0)
1767 continue;
1768 if (j-- == 0)
1769 break;
1770 compat_release_entry(iter0);
1771 }
1772 xt_entry_foreach(iter1, entry1, newinfo->size) {
1773 if (i-- == 0)
1774 break;
1775 cleanup_entry(iter1, net);
1776 }
1777 xt_free_table_info(newinfo);
1778 return ret;
1779 }
1780
1781 /* And one copy for every other CPU */
1782 for_each_possible_cpu(i)
1783 if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1784 memcpy(newinfo->entries[i], entry1, newinfo->size);
1785
1786 *pinfo = newinfo;
1787 *pentry0 = entry1;
1788 xt_free_table_info(info);
1789 return 0;
1790
1791 free_newinfo:
1792 xt_free_table_info(newinfo);
1793 out:
1794 xt_entry_foreach(iter0, entry0, total_size) {
1795 if (j-- == 0)
1796 break;
1797 compat_release_entry(iter0);
1798 }
1799 return ret;
1800 out_unlock:
1801 xt_compat_flush_offsets(AF_INET6);
1802 xt_compat_unlock(AF_INET6);
1803 goto out;
1804 }
1805
1806 static int
1807 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1808 {
1809 int ret;
1810 struct compat_ip6t_replace tmp;
1811 struct xt_table_info *newinfo;
1812 void *loc_cpu_entry;
1813 struct ip6t_entry *iter;
1814
1815 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1816 return -EFAULT;
1817
1818 /* overflow check */
1819 if (tmp.size >= INT_MAX / num_possible_cpus())
1820 return -ENOMEM;
1821 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1822 return -ENOMEM;
1823 tmp.name[sizeof(tmp.name)-1] = 0;
1824
1825 newinfo = xt_alloc_table_info(tmp.size);
1826 if (!newinfo)
1827 return -ENOMEM;
1828
1829 /* choose the copy that is on our node/cpu */
1830 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1831 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1832 tmp.size) != 0) {
1833 ret = -EFAULT;
1834 goto free_newinfo;
1835 }
1836
1837 ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1838 &newinfo, &loc_cpu_entry, tmp.size,
1839 tmp.num_entries, tmp.hook_entry,
1840 tmp.underflow);
1841 if (ret != 0)
1842 goto free_newinfo;
1843
1844 duprintf("compat_do_replace: Translated table\n");
1845
1846 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1847 tmp.num_counters, compat_ptr(tmp.counters));
1848 if (ret)
1849 goto free_newinfo_untrans;
1850 return 0;
1851
1852 free_newinfo_untrans:
1853 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1854 cleanup_entry(iter, net);
1855 free_newinfo:
1856 xt_free_table_info(newinfo);
1857 return ret;
1858 }
1859
1860 static int
1861 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1862 unsigned int len)
1863 {
1864 int ret;
1865
1866 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1867 return -EPERM;
1868
1869 switch (cmd) {
1870 case IP6T_SO_SET_REPLACE:
1871 ret = compat_do_replace(sock_net(sk), user, len);
1872 break;
1873
1874 case IP6T_SO_SET_ADD_COUNTERS:
1875 ret = do_add_counters(sock_net(sk), user, len, 1);
1876 break;
1877
1878 default:
1879 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
1880 ret = -EINVAL;
1881 }
1882
1883 return ret;
1884 }
1885
1886 struct compat_ip6t_get_entries {
1887 char name[XT_TABLE_MAXNAMELEN];
1888 compat_uint_t size;
1889 struct compat_ip6t_entry entrytable[0];
1890 };
1891
1892 static int
1893 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1894 void __user *userptr)
1895 {
1896 struct xt_counters *counters;
1897 const struct xt_table_info *private = table->private;
1898 void __user *pos;
1899 unsigned int size;
1900 int ret = 0;
1901 const void *loc_cpu_entry;
1902 unsigned int i = 0;
1903 struct ip6t_entry *iter;
1904
1905 counters = alloc_counters(table);
1906 if (IS_ERR(counters))
1907 return PTR_ERR(counters);
1908
1909 /* choose the copy that is on our node/cpu, ...
1910 * This choice is lazy (because current thread is
1911 * allowed to migrate to another cpu)
1912 */
1913 loc_cpu_entry = private->entries[raw_smp_processor_id()];
1914 pos = userptr;
1915 size = total_size;
1916 xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1917 ret = compat_copy_entry_to_user(iter, &pos,
1918 &size, counters, i++);
1919 if (ret != 0)
1920 break;
1921 }
1922
1923 vfree(counters);
1924 return ret;
1925 }
1926
1927 static int
1928 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1929 int *len)
1930 {
1931 int ret;
1932 struct compat_ip6t_get_entries get;
1933 struct xt_table *t;
1934
1935 if (*len < sizeof(get)) {
1936 duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1937 return -EINVAL;
1938 }
1939
1940 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1941 return -EFAULT;
1942
1943 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1944 duprintf("compat_get_entries: %u != %zu\n",
1945 *len, sizeof(get) + get.size);
1946 return -EINVAL;
1947 }
1948
1949 xt_compat_lock(AF_INET6);
1950 t = xt_find_table_lock(net, AF_INET6, get.name);
1951 if (!IS_ERR_OR_NULL(t)) {
1952 const struct xt_table_info *private = t->private;
1953 struct xt_table_info info;
1954 duprintf("t->private->number = %u\n", private->number);
1955 ret = compat_table_info(private, &info);
1956 if (!ret && get.size == info.size) {
1957 ret = compat_copy_entries_to_user(private->size,
1958 t, uptr->entrytable);
1959 } else if (!ret) {
1960 duprintf("compat_get_entries: I've got %u not %u!\n",
1961 private->size, get.size);
1962 ret = -EAGAIN;
1963 }
1964 xt_compat_flush_offsets(AF_INET6);
1965 module_put(t->me);
1966 xt_table_unlock(t);
1967 } else
1968 ret = t ? PTR_ERR(t) : -ENOENT;
1969
1970 xt_compat_unlock(AF_INET6);
1971 return ret;
1972 }
1973
1974 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1975
1976 static int
1977 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1978 {
1979 int ret;
1980
1981 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1982 return -EPERM;
1983
1984 switch (cmd) {
1985 case IP6T_SO_GET_INFO:
1986 ret = get_info(sock_net(sk), user, *len, 1);
1987 break;
1988 case IP6T_SO_GET_ENTRIES:
1989 ret = compat_get_entries(sock_net(sk), user, len);
1990 break;
1991 default:
1992 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1993 }
1994 return ret;
1995 }
1996 #endif
1997
1998 static int
1999 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2000 {
2001 int ret;
2002
2003 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2004 return -EPERM;
2005
2006 switch (cmd) {
2007 case IP6T_SO_SET_REPLACE:
2008 ret = do_replace(sock_net(sk), user, len);
2009 break;
2010
2011 case IP6T_SO_SET_ADD_COUNTERS:
2012 ret = do_add_counters(sock_net(sk), user, len, 0);
2013 break;
2014
2015 default:
2016 duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd);
2017 ret = -EINVAL;
2018 }
2019
2020 return ret;
2021 }
2022
2023 static int
2024 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2025 {
2026 int ret;
2027
2028 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2029 return -EPERM;
2030
2031 switch (cmd) {
2032 case IP6T_SO_GET_INFO:
2033 ret = get_info(sock_net(sk), user, *len, 0);
2034 break;
2035
2036 case IP6T_SO_GET_ENTRIES:
2037 ret = get_entries(sock_net(sk), user, len);
2038 break;
2039
2040 case IP6T_SO_GET_REVISION_MATCH:
2041 case IP6T_SO_GET_REVISION_TARGET: {
2042 struct xt_get_revision rev;
2043 int target;
2044
2045 if (*len != sizeof(rev)) {
2046 ret = -EINVAL;
2047 break;
2048 }
2049 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2050 ret = -EFAULT;
2051 break;
2052 }
2053 rev.name[sizeof(rev.name)-1] = 0;
2054
2055 if (cmd == IP6T_SO_GET_REVISION_TARGET)
2056 target = 1;
2057 else
2058 target = 0;
2059
2060 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2061 rev.revision,
2062 target, &ret),
2063 "ip6t_%s", rev.name);
2064 break;
2065 }
2066
2067 default:
2068 duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2069 ret = -EINVAL;
2070 }
2071
2072 return ret;
2073 }
2074
2075 struct xt_table *ip6t_register_table(struct net *net,
2076 const struct xt_table *table,
2077 const struct ip6t_replace *repl)
2078 {
2079 int ret;
2080 struct xt_table_info *newinfo;
2081 struct xt_table_info bootstrap = {0};
2082 void *loc_cpu_entry;
2083 struct xt_table *new_table;
2084
2085 newinfo = xt_alloc_table_info(repl->size);
2086 if (!newinfo) {
2087 ret = -ENOMEM;
2088 goto out;
2089 }
2090
2091 /* choose the copy on our node/cpu, but dont care about preemption */
2092 loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2093 memcpy(loc_cpu_entry, repl->entries, repl->size);
2094
2095 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2096 if (ret != 0)
2097 goto out_free;
2098
2099 new_table = xt_register_table(net, table, &bootstrap, newinfo);
2100 if (IS_ERR(new_table)) {
2101 ret = PTR_ERR(new_table);
2102 goto out_free;
2103 }
2104 return new_table;
2105
2106 out_free:
2107 xt_free_table_info(newinfo);
2108 out:
2109 return ERR_PTR(ret);
2110 }
2111
2112 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2113 {
2114 struct xt_table_info *private;
2115 void *loc_cpu_entry;
2116 struct module *table_owner = table->me;
2117 struct ip6t_entry *iter;
2118
2119 private = xt_unregister_table(table);
2120
2121 /* Decrease module usage counts and free resources */
2122 loc_cpu_entry = private->entries[raw_smp_processor_id()];
2123 xt_entry_foreach(iter, loc_cpu_entry, private->size)
2124 cleanup_entry(iter, net);
2125 if (private->number > private->initial_entries)
2126 module_put(table_owner);
2127 xt_free_table_info(private);
2128 }
2129
2130 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2131 static inline bool
2132 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2133 u_int8_t type, u_int8_t code,
2134 bool invert)
2135 {
2136 return (type == test_type && code >= min_code && code <= max_code)
2137 ^ invert;
2138 }
2139
2140 static bool
2141 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2142 {
2143 const struct icmp6hdr *ic;
2144 struct icmp6hdr _icmph;
2145 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2146
2147 /* Must not be a fragment. */
2148 if (par->fragoff != 0)
2149 return false;
2150
2151 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2152 if (ic == NULL) {
2153 /* We've been asked to examine this packet, and we
2154 * can't. Hence, no choice but to drop.
2155 */
2156 duprintf("Dropping evil ICMP tinygram.\n");
2157 par->hotdrop = true;
2158 return false;
2159 }
2160
2161 return icmp6_type_code_match(icmpinfo->type,
2162 icmpinfo->code[0],
2163 icmpinfo->code[1],
2164 ic->icmp6_type, ic->icmp6_code,
2165 !!(icmpinfo->invflags&IP6T_ICMP_INV));
2166 }
2167
2168 /* Called when user tries to insert an entry of this type. */
2169 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2170 {
2171 const struct ip6t_icmp *icmpinfo = par->matchinfo;
2172
2173 /* Must specify no unknown invflags */
2174 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2175 }
2176
2177 /* The built-in targets: standard (NULL) and error. */
2178 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2179 {
2180 .name = XT_STANDARD_TARGET,
2181 .targetsize = sizeof(int),
2182 .family = NFPROTO_IPV6,
2183 #ifdef CONFIG_COMPAT
2184 .compatsize = sizeof(compat_int_t),
2185 .compat_from_user = compat_standard_from_user,
2186 .compat_to_user = compat_standard_to_user,
2187 #endif
2188 },
2189 {
2190 .name = XT_ERROR_TARGET,
2191 .target = ip6t_error,
2192 .targetsize = XT_FUNCTION_MAXNAMELEN,
2193 .family = NFPROTO_IPV6,
2194 },
2195 };
2196
2197 static struct nf_sockopt_ops ip6t_sockopts = {
2198 .pf = PF_INET6,
2199 .set_optmin = IP6T_BASE_CTL,
2200 .set_optmax = IP6T_SO_SET_MAX+1,
2201 .set = do_ip6t_set_ctl,
2202 #ifdef CONFIG_COMPAT
2203 .compat_set = compat_do_ip6t_set_ctl,
2204 #endif
2205 .get_optmin = IP6T_BASE_CTL,
2206 .get_optmax = IP6T_SO_GET_MAX+1,
2207 .get = do_ip6t_get_ctl,
2208 #ifdef CONFIG_COMPAT
2209 .compat_get = compat_do_ip6t_get_ctl,
2210 #endif
2211 .owner = THIS_MODULE,
2212 };
2213
2214 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2215 {
2216 .name = "icmp6",
2217 .match = icmp6_match,
2218 .matchsize = sizeof(struct ip6t_icmp),
2219 .checkentry = icmp6_checkentry,
2220 .proto = IPPROTO_ICMPV6,
2221 .family = NFPROTO_IPV6,
2222 },
2223 };
2224
2225 static int __net_init ip6_tables_net_init(struct net *net)
2226 {
2227 return xt_proto_init(net, NFPROTO_IPV6);
2228 }
2229
2230 static void __net_exit ip6_tables_net_exit(struct net *net)
2231 {
2232 xt_proto_fini(net, NFPROTO_IPV6);
2233 }
2234
2235 static struct pernet_operations ip6_tables_net_ops = {
2236 .init = ip6_tables_net_init,
2237 .exit = ip6_tables_net_exit,
2238 };
2239
2240 static int __init ip6_tables_init(void)
2241 {
2242 int ret;
2243
2244 ret = register_pernet_subsys(&ip6_tables_net_ops);
2245 if (ret < 0)
2246 goto err1;
2247
2248 /* No one else will be downing sem now, so we won't sleep */
2249 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2250 if (ret < 0)
2251 goto err2;
2252 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2253 if (ret < 0)
2254 goto err4;
2255
2256 /* Register setsockopt */
2257 ret = nf_register_sockopt(&ip6t_sockopts);
2258 if (ret < 0)
2259 goto err5;
2260
2261 pr_info("(C) 2000-2006 Netfilter Core Team\n");
2262 return 0;
2263
2264 err5:
2265 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2266 err4:
2267 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2268 err2:
2269 unregister_pernet_subsys(&ip6_tables_net_ops);
2270 err1:
2271 return ret;
2272 }
2273
2274 static void __exit ip6_tables_fini(void)
2275 {
2276 nf_unregister_sockopt(&ip6t_sockopts);
2277
2278 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2279 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2280 unregister_pernet_subsys(&ip6_tables_net_ops);
2281 }
2282
2283 EXPORT_SYMBOL(ip6t_register_table);
2284 EXPORT_SYMBOL(ip6t_unregister_table);
2285 EXPORT_SYMBOL(ip6t_do_table);
2286
2287 module_init(ip6_tables_init);
2288 module_exit(ip6_tables_fini);