]>
Commit | Line | Data |
---|---|---|
2e4e6a17 HW |
1 | /* |
2 | * x_tables core - Backend for {ip,ip6,arp}_tables | |
3 | * | |
4 | * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> | |
f229f6ce | 5 | * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net> |
2e4e6a17 HW |
6 | * |
7 | * Based on existing ip_tables code which is | |
8 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling | |
9 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License version 2 as | |
13 | * published by the Free Software Foundation. | |
14 | * | |
15 | */ | |
be91fd5e | 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
2e4e6a17 | 17 | #include <linux/kernel.h> |
3a9a231d | 18 | #include <linux/module.h> |
2e4e6a17 HW |
19 | #include <linux/socket.h> |
20 | #include <linux/net.h> | |
21 | #include <linux/proc_fs.h> | |
22 | #include <linux/seq_file.h> | |
23 | #include <linux/string.h> | |
24 | #include <linux/vmalloc.h> | |
9e19bb6d | 25 | #include <linux/mutex.h> |
d7fe0f24 | 26 | #include <linux/mm.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
fbabf31e | 28 | #include <linux/audit.h> |
f13f2aee | 29 | #include <linux/user_namespace.h> |
457c4cbc | 30 | #include <net/net_namespace.h> |
2e4e6a17 HW |
31 | |
32 | #include <linux/netfilter/x_tables.h> | |
33 | #include <linux/netfilter_arp.h> | |
e3eaa991 JE |
34 | #include <linux/netfilter_ipv4/ip_tables.h> |
35 | #include <linux/netfilter_ipv6/ip6_tables.h> | |
36 | #include <linux/netfilter_arp/arp_tables.h> | |
9e19bb6d | 37 | |
2e4e6a17 HW |
38 | MODULE_LICENSE("GPL"); |
39 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | |
043ef46c | 40 | MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); |
2e4e6a17 | 41 | |
ae0ac0ed | 42 | #define XT_PCPU_BLOCK_SIZE 4096 |
fab0b3ce | 43 | #define XT_MAX_TABLE_SIZE (512 * 1024 * 1024) |
2e4e6a17 | 44 | |
b386d9f5 | 45 | struct compat_delta { |
255d0dc3 ED |
46 | unsigned int offset; /* offset in kernel */ |
47 | int delta; /* delta in 32bit user land */ | |
b386d9f5 PM |
48 | }; |
49 | ||
2e4e6a17 | 50 | struct xt_af { |
9e19bb6d | 51 | struct mutex mutex; |
2e4e6a17 HW |
52 | struct list_head match; |
53 | struct list_head target; | |
b386d9f5 | 54 | #ifdef CONFIG_COMPAT |
2722971c | 55 | struct mutex compat_mutex; |
255d0dc3 ED |
56 | struct compat_delta *compat_tab; |
57 | unsigned int number; /* number of slots in compat_tab[] */ | |
58 | unsigned int cur; /* number of used slots in compat_tab[] */ | |
b386d9f5 | 59 | #endif |
2e4e6a17 HW |
60 | }; |
61 | ||
62 | static struct xt_af *xt; | |
63 | ||
7e9c6eeb JE |
64 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { |
65 | [NFPROTO_UNSPEC] = "x", | |
66 | [NFPROTO_IPV4] = "ip", | |
67 | [NFPROTO_ARP] = "arp", | |
68 | [NFPROTO_BRIDGE] = "eb", | |
69 | [NFPROTO_IPV6] = "ip6", | |
37f9f733 PM |
70 | }; |
71 | ||
2e4e6a17 | 72 | /* Registration hooks for targets. */ |
7926dbfa | 73 | int xt_register_target(struct xt_target *target) |
2e4e6a17 | 74 | { |
76108cea | 75 | u_int8_t af = target->family; |
2e4e6a17 | 76 | |
7926dbfa | 77 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 78 | list_add(&target->list, &xt[af].target); |
9e19bb6d | 79 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 80 | return 0; |
2e4e6a17 HW |
81 | } |
82 | EXPORT_SYMBOL(xt_register_target); | |
83 | ||
84 | void | |
a45049c5 | 85 | xt_unregister_target(struct xt_target *target) |
2e4e6a17 | 86 | { |
76108cea | 87 | u_int8_t af = target->family; |
a45049c5 | 88 | |
9e19bb6d | 89 | mutex_lock(&xt[af].mutex); |
df0933dc | 90 | list_del(&target->list); |
9e19bb6d | 91 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
92 | } |
93 | EXPORT_SYMBOL(xt_unregister_target); | |
94 | ||
52d9c42e PM |
95 | int |
96 | xt_register_targets(struct xt_target *target, unsigned int n) | |
97 | { | |
98 | unsigned int i; | |
99 | int err = 0; | |
100 | ||
101 | for (i = 0; i < n; i++) { | |
102 | err = xt_register_target(&target[i]); | |
103 | if (err) | |
104 | goto err; | |
105 | } | |
106 | return err; | |
107 | ||
108 | err: | |
109 | if (i > 0) | |
110 | xt_unregister_targets(target, i); | |
111 | return err; | |
112 | } | |
113 | EXPORT_SYMBOL(xt_register_targets); | |
114 | ||
115 | void | |
116 | xt_unregister_targets(struct xt_target *target, unsigned int n) | |
117 | { | |
f68c5301 CG |
118 | while (n-- > 0) |
119 | xt_unregister_target(&target[n]); | |
52d9c42e PM |
120 | } |
121 | EXPORT_SYMBOL(xt_unregister_targets); | |
122 | ||
7926dbfa | 123 | int xt_register_match(struct xt_match *match) |
2e4e6a17 | 124 | { |
76108cea | 125 | u_int8_t af = match->family; |
2e4e6a17 | 126 | |
7926dbfa | 127 | mutex_lock(&xt[af].mutex); |
2e4e6a17 | 128 | list_add(&match->list, &xt[af].match); |
9e19bb6d | 129 | mutex_unlock(&xt[af].mutex); |
7926dbfa | 130 | return 0; |
2e4e6a17 HW |
131 | } |
132 | EXPORT_SYMBOL(xt_register_match); | |
133 | ||
134 | void | |
a45049c5 | 135 | xt_unregister_match(struct xt_match *match) |
2e4e6a17 | 136 | { |
76108cea | 137 | u_int8_t af = match->family; |
a45049c5 | 138 | |
9e19bb6d | 139 | mutex_lock(&xt[af].mutex); |
df0933dc | 140 | list_del(&match->list); |
9e19bb6d | 141 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
142 | } |
143 | EXPORT_SYMBOL(xt_unregister_match); | |
144 | ||
52d9c42e PM |
145 | int |
146 | xt_register_matches(struct xt_match *match, unsigned int n) | |
147 | { | |
148 | unsigned int i; | |
149 | int err = 0; | |
150 | ||
151 | for (i = 0; i < n; i++) { | |
152 | err = xt_register_match(&match[i]); | |
153 | if (err) | |
154 | goto err; | |
155 | } | |
156 | return err; | |
157 | ||
158 | err: | |
159 | if (i > 0) | |
160 | xt_unregister_matches(match, i); | |
161 | return err; | |
162 | } | |
163 | EXPORT_SYMBOL(xt_register_matches); | |
164 | ||
165 | void | |
166 | xt_unregister_matches(struct xt_match *match, unsigned int n) | |
167 | { | |
f68c5301 CG |
168 | while (n-- > 0) |
169 | xt_unregister_match(&match[n]); | |
52d9c42e PM |
170 | } |
171 | EXPORT_SYMBOL(xt_unregister_matches); | |
172 | ||
2e4e6a17 HW |
173 | |
174 | /* | |
175 | * These are weird, but module loading must not be done with mutex | |
176 | * held (since they will register), and we have to have a single | |
adb00ae2 | 177 | * function to use. |
2e4e6a17 HW |
178 | */ |
179 | ||
180 | /* Find match, grabs ref. Returns ERR_PTR() on error. */ | |
76108cea | 181 | struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
182 | { |
183 | struct xt_match *m; | |
42046e2e | 184 | int err = -ENOENT; |
2e4e6a17 | 185 | |
7926dbfa | 186 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
187 | list_for_each_entry(m, &xt[af].match, list) { |
188 | if (strcmp(m->name, name) == 0) { | |
189 | if (m->revision == revision) { | |
190 | if (try_module_get(m->me)) { | |
9e19bb6d | 191 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
192 | return m; |
193 | } | |
194 | } else | |
195 | err = -EPROTOTYPE; /* Found something. */ | |
196 | } | |
197 | } | |
9e19bb6d | 198 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
199 | |
200 | if (af != NFPROTO_UNSPEC) | |
201 | /* Try searching again in the family-independent list */ | |
202 | return xt_find_match(NFPROTO_UNSPEC, name, revision); | |
203 | ||
2e4e6a17 HW |
204 | return ERR_PTR(err); |
205 | } | |
206 | EXPORT_SYMBOL(xt_find_match); | |
207 | ||
fd0ec0e6 JE |
208 | struct xt_match * |
209 | xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) | |
210 | { | |
211 | struct xt_match *match; | |
212 | ||
f25f048d ED |
213 | if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN) |
214 | return ERR_PTR(-EINVAL); | |
215 | ||
adb00ae2 SH |
216 | match = xt_find_match(nfproto, name, revision); |
217 | if (IS_ERR(match)) { | |
218 | request_module("%st_%s", xt_prefix[nfproto], name); | |
219 | match = xt_find_match(nfproto, name, revision); | |
220 | } | |
221 | ||
222 | return match; | |
fd0ec0e6 JE |
223 | } |
224 | EXPORT_SYMBOL_GPL(xt_request_find_match); | |
225 | ||
2e4e6a17 | 226 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ |
76108cea | 227 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
228 | { |
229 | struct xt_target *t; | |
42046e2e | 230 | int err = -ENOENT; |
2e4e6a17 | 231 | |
7926dbfa | 232 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
233 | list_for_each_entry(t, &xt[af].target, list) { |
234 | if (strcmp(t->name, name) == 0) { | |
235 | if (t->revision == revision) { | |
236 | if (try_module_get(t->me)) { | |
9e19bb6d | 237 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
238 | return t; |
239 | } | |
240 | } else | |
241 | err = -EPROTOTYPE; /* Found something. */ | |
242 | } | |
243 | } | |
9e19bb6d | 244 | mutex_unlock(&xt[af].mutex); |
55b69e91 JE |
245 | |
246 | if (af != NFPROTO_UNSPEC) | |
247 | /* Try searching again in the family-independent list */ | |
248 | return xt_find_target(NFPROTO_UNSPEC, name, revision); | |
249 | ||
2e4e6a17 HW |
250 | return ERR_PTR(err); |
251 | } | |
252 | EXPORT_SYMBOL(xt_find_target); | |
253 | ||
76108cea | 254 | struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) |
2e4e6a17 HW |
255 | { |
256 | struct xt_target *target; | |
257 | ||
f25f048d ED |
258 | if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN) |
259 | return ERR_PTR(-EINVAL); | |
260 | ||
adb00ae2 SH |
261 | target = xt_find_target(af, name, revision); |
262 | if (IS_ERR(target)) { | |
263 | request_module("%st_%s", xt_prefix[af], name); | |
264 | target = xt_find_target(af, name, revision); | |
265 | } | |
266 | ||
267 | return target; | |
2e4e6a17 HW |
268 | } |
269 | EXPORT_SYMBOL_GPL(xt_request_find_target); | |
270 | ||
f32815d2 WB |
271 | |
272 | static int xt_obj_to_user(u16 __user *psize, u16 size, | |
273 | void __user *pname, const char *name, | |
274 | u8 __user *prev, u8 rev) | |
275 | { | |
276 | if (put_user(size, psize)) | |
277 | return -EFAULT; | |
278 | if (copy_to_user(pname, name, strlen(name) + 1)) | |
279 | return -EFAULT; | |
280 | if (put_user(rev, prev)) | |
281 | return -EFAULT; | |
282 | ||
283 | return 0; | |
284 | } | |
285 | ||
286 | #define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \ | |
287 | xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \ | |
288 | U->u.user.name, K->u.kernel.TYPE->name, \ | |
289 | &U->u.user.revision, K->u.kernel.TYPE->revision) | |
290 | ||
291 | int xt_data_to_user(void __user *dst, const void *src, | |
324318f0 | 292 | int usersize, int size, int aligned_size) |
f32815d2 WB |
293 | { |
294 | usersize = usersize ? : size; | |
295 | if (copy_to_user(dst, src, usersize)) | |
296 | return -EFAULT; | |
324318f0 WB |
297 | if (usersize != aligned_size && |
298 | clear_user(dst + usersize, aligned_size - usersize)) | |
f32815d2 WB |
299 | return -EFAULT; |
300 | ||
301 | return 0; | |
302 | } | |
303 | EXPORT_SYMBOL_GPL(xt_data_to_user); | |
304 | ||
751a9c76 | 305 | #define XT_DATA_TO_USER(U, K, TYPE) \ |
f32815d2 WB |
306 | xt_data_to_user(U->data, K->data, \ |
307 | K->u.kernel.TYPE->usersize, \ | |
751a9c76 WB |
308 | K->u.kernel.TYPE->TYPE##size, \ |
309 | XT_ALIGN(K->u.kernel.TYPE->TYPE##size)) | |
f32815d2 WB |
310 | |
311 | int xt_match_to_user(const struct xt_entry_match *m, | |
312 | struct xt_entry_match __user *u) | |
313 | { | |
314 | return XT_OBJ_TO_USER(u, m, match, 0) || | |
751a9c76 | 315 | XT_DATA_TO_USER(u, m, match); |
f32815d2 WB |
316 | } |
317 | EXPORT_SYMBOL_GPL(xt_match_to_user); | |
318 | ||
319 | int xt_target_to_user(const struct xt_entry_target *t, | |
320 | struct xt_entry_target __user *u) | |
321 | { | |
322 | return XT_OBJ_TO_USER(u, t, target, 0) || | |
751a9c76 | 323 | XT_DATA_TO_USER(u, t, target); |
f32815d2 WB |
324 | } |
325 | EXPORT_SYMBOL_GPL(xt_target_to_user); | |
326 | ||
76108cea | 327 | static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 328 | { |
5452e425 | 329 | const struct xt_match *m; |
2e4e6a17 HW |
330 | int have_rev = 0; |
331 | ||
332 | list_for_each_entry(m, &xt[af].match, list) { | |
333 | if (strcmp(m->name, name) == 0) { | |
334 | if (m->revision > *bestp) | |
335 | *bestp = m->revision; | |
336 | if (m->revision == revision) | |
337 | have_rev = 1; | |
338 | } | |
339 | } | |
656caff2 PM |
340 | |
341 | if (af != NFPROTO_UNSPEC && !have_rev) | |
342 | return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
343 | ||
2e4e6a17 HW |
344 | return have_rev; |
345 | } | |
346 | ||
76108cea | 347 | static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) |
2e4e6a17 | 348 | { |
5452e425 | 349 | const struct xt_target *t; |
2e4e6a17 HW |
350 | int have_rev = 0; |
351 | ||
352 | list_for_each_entry(t, &xt[af].target, list) { | |
353 | if (strcmp(t->name, name) == 0) { | |
354 | if (t->revision > *bestp) | |
355 | *bestp = t->revision; | |
356 | if (t->revision == revision) | |
357 | have_rev = 1; | |
358 | } | |
359 | } | |
656caff2 PM |
360 | |
361 | if (af != NFPROTO_UNSPEC && !have_rev) | |
362 | return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); | |
363 | ||
2e4e6a17 HW |
364 | return have_rev; |
365 | } | |
366 | ||
367 | /* Returns true or false (if no such extension at all) */ | |
76108cea | 368 | int xt_find_revision(u8 af, const char *name, u8 revision, int target, |
2e4e6a17 HW |
369 | int *err) |
370 | { | |
371 | int have_rev, best = -1; | |
372 | ||
7926dbfa | 373 | mutex_lock(&xt[af].mutex); |
2e4e6a17 HW |
374 | if (target == 1) |
375 | have_rev = target_revfn(af, name, revision, &best); | |
376 | else | |
377 | have_rev = match_revfn(af, name, revision, &best); | |
9e19bb6d | 378 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
379 | |
380 | /* Nothing at all? Return 0 to try loading module. */ | |
381 | if (best == -1) { | |
382 | *err = -ENOENT; | |
383 | return 0; | |
384 | } | |
385 | ||
386 | *err = best; | |
387 | if (!have_rev) | |
388 | *err = -EPROTONOSUPPORT; | |
389 | return 1; | |
390 | } | |
391 | EXPORT_SYMBOL_GPL(xt_find_revision); | |
392 | ||
5b76c494 JE |
393 | static char * |
394 | textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto) | |
45185364 | 395 | { |
5b76c494 | 396 | static const char *const inetbr_names[] = { |
45185364 JE |
397 | "PREROUTING", "INPUT", "FORWARD", |
398 | "OUTPUT", "POSTROUTING", "BROUTING", | |
399 | }; | |
5b76c494 JE |
400 | static const char *const arp_names[] = { |
401 | "INPUT", "FORWARD", "OUTPUT", | |
402 | }; | |
403 | const char *const *names; | |
404 | unsigned int i, max; | |
45185364 JE |
405 | char *p = buf; |
406 | bool np = false; | |
407 | int res; | |
408 | ||
5b76c494 JE |
409 | names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names; |
410 | max = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) : | |
411 | ARRAY_SIZE(inetbr_names); | |
45185364 | 412 | *p = '\0'; |
5b76c494 | 413 | for (i = 0; i < max; ++i) { |
45185364 JE |
414 | if (!(mask & (1 << i))) |
415 | continue; | |
416 | res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); | |
417 | if (res > 0) { | |
418 | size -= res; | |
419 | p += res; | |
420 | } | |
421 | np = true; | |
422 | } | |
423 | ||
424 | return buf; | |
425 | } | |
426 | ||
839a4c3b FW |
427 | /** |
428 | * xt_check_proc_name - check that name is suitable for /proc file creation | |
429 | * | |
430 | * @name: file name candidate | |
431 | * @size: length of buffer | |
432 | * | |
433 | * some x_tables modules wish to create a file in /proc. | |
434 | * This function makes sure that the name is suitable for this | |
435 | * purpose, it checks that name is NUL terminated and isn't a 'special' | |
436 | * name, like "..". | |
437 | * | |
438 | * returns negative number on error or 0 if name is useable. | |
439 | */ | |
440 | int xt_check_proc_name(const char *name, unsigned int size) | |
441 | { | |
442 | if (name[0] == '\0') | |
443 | return -EINVAL; | |
444 | ||
445 | if (strnlen(name, size) == size) | |
446 | return -ENAMETOOLONG; | |
447 | ||
448 | if (strcmp(name, ".") == 0 || | |
449 | strcmp(name, "..") == 0 || | |
450 | strchr(name, '/')) | |
451 | return -EINVAL; | |
452 | ||
453 | return 0; | |
454 | } | |
455 | EXPORT_SYMBOL(xt_check_proc_name); | |
456 | ||
916a917d | 457 | int xt_check_match(struct xt_mtchk_param *par, |
9b4fce7a | 458 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 459 | { |
bd414ee6 JE |
460 | int ret; |
461 | ||
9b4fce7a JE |
462 | if (XT_ALIGN(par->match->matchsize) != size && |
463 | par->match->matchsize != -1) { | |
043ef46c JE |
464 | /* |
465 | * ebt_among is exempt from centralized matchsize checking | |
466 | * because it uses a dynamic-size data set. | |
467 | */ | |
b402405d JE |
468 | pr_err("%s_tables: %s.%u match: invalid size " |
469 | "%u (kernel) != (user) %u\n", | |
916a917d | 470 | xt_prefix[par->family], par->match->name, |
b402405d | 471 | par->match->revision, |
9b4fce7a | 472 | XT_ALIGN(par->match->matchsize), size); |
37f9f733 PM |
473 | return -EINVAL; |
474 | } | |
9b4fce7a JE |
475 | if (par->match->table != NULL && |
476 | strcmp(par->match->table, par->table) != 0) { | |
3dd5d7e3 | 477 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", |
916a917d | 478 | xt_prefix[par->family], par->match->name, |
9b4fce7a | 479 | par->match->table, par->table); |
37f9f733 PM |
480 | return -EINVAL; |
481 | } | |
9b4fce7a | 482 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { |
45185364 JE |
483 | char used[64], allow[64]; |
484 | ||
3dd5d7e3 | 485 | pr_err("%s_tables: %s match: used from hooks %s, but only " |
45185364 | 486 | "valid from %s\n", |
916a917d | 487 | xt_prefix[par->family], par->match->name, |
5b76c494 JE |
488 | textify_hooks(used, sizeof(used), par->hook_mask, |
489 | par->family), | |
490 | textify_hooks(allow, sizeof(allow), par->match->hooks, | |
491 | par->family)); | |
37f9f733 PM |
492 | return -EINVAL; |
493 | } | |
9b4fce7a | 494 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { |
3dd5d7e3 | 495 | pr_err("%s_tables: %s match: only valid for protocol %u\n", |
916a917d JE |
496 | xt_prefix[par->family], par->match->name, |
497 | par->match->proto); | |
37f9f733 PM |
498 | return -EINVAL; |
499 | } | |
bd414ee6 JE |
500 | if (par->match->checkentry != NULL) { |
501 | ret = par->match->checkentry(par); | |
502 | if (ret < 0) | |
503 | return ret; | |
504 | else if (ret > 0) | |
505 | /* Flag up potential errors. */ | |
506 | return -EIO; | |
507 | } | |
37f9f733 PM |
508 | return 0; |
509 | } | |
510 | EXPORT_SYMBOL_GPL(xt_check_match); | |
511 | ||
13631bfc FW |
512 | /** xt_check_entry_match - check that matches end before start of target |
513 | * | |
514 | * @match: beginning of xt_entry_match | |
515 | * @target: beginning of this rules target (alleged end of matches) | |
516 | * @alignment: alignment requirement of match structures | |
517 | * | |
518 | * Validates that all matches add up to the beginning of the target, | |
519 | * and that each match covers at least the base structure size. | |
520 | * | |
521 | * Return: 0 on success, negative errno on failure. | |
522 | */ | |
523 | static int xt_check_entry_match(const char *match, const char *target, | |
524 | const size_t alignment) | |
525 | { | |
526 | const struct xt_entry_match *pos; | |
527 | int length = target - match; | |
528 | ||
529 | if (length == 0) /* no matches */ | |
530 | return 0; | |
531 | ||
532 | pos = (struct xt_entry_match *)match; | |
533 | do { | |
534 | if ((unsigned long)pos % alignment) | |
535 | return -EINVAL; | |
536 | ||
537 | if (length < (int)sizeof(struct xt_entry_match)) | |
538 | return -EINVAL; | |
539 | ||
540 | if (pos->u.match_size < sizeof(struct xt_entry_match)) | |
541 | return -EINVAL; | |
542 | ||
543 | if (pos->u.match_size > length) | |
544 | return -EINVAL; | |
545 | ||
546 | length -= pos->u.match_size; | |
547 | pos = ((void *)((char *)(pos) + (pos)->u.match_size)); | |
548 | } while (length > 0); | |
549 | ||
550 | return 0; | |
551 | } | |
552 | ||
2722971c | 553 | #ifdef CONFIG_COMPAT |
255d0dc3 | 554 | int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta) |
b386d9f5 | 555 | { |
255d0dc3 | 556 | struct xt_af *xp = &xt[af]; |
b386d9f5 | 557 | |
255d0dc3 ED |
558 | if (!xp->compat_tab) { |
559 | if (!xp->number) | |
560 | return -EINVAL; | |
561 | xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number); | |
562 | if (!xp->compat_tab) | |
563 | return -ENOMEM; | |
564 | xp->cur = 0; | |
565 | } | |
b386d9f5 | 566 | |
255d0dc3 ED |
567 | if (xp->cur >= xp->number) |
568 | return -EINVAL; | |
b386d9f5 | 569 | |
255d0dc3 ED |
570 | if (xp->cur) |
571 | delta += xp->compat_tab[xp->cur - 1].delta; | |
572 | xp->compat_tab[xp->cur].offset = offset; | |
573 | xp->compat_tab[xp->cur].delta = delta; | |
574 | xp->cur++; | |
b386d9f5 PM |
575 | return 0; |
576 | } | |
577 | EXPORT_SYMBOL_GPL(xt_compat_add_offset); | |
578 | ||
76108cea | 579 | void xt_compat_flush_offsets(u_int8_t af) |
b386d9f5 | 580 | { |
255d0dc3 ED |
581 | if (xt[af].compat_tab) { |
582 | vfree(xt[af].compat_tab); | |
583 | xt[af].compat_tab = NULL; | |
584 | xt[af].number = 0; | |
5a6351ee | 585 | xt[af].cur = 0; |
b386d9f5 PM |
586 | } |
587 | } | |
588 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); | |
589 | ||
3e5e524f | 590 | int xt_compat_calc_jump(u_int8_t af, unsigned int offset) |
b386d9f5 | 591 | { |
255d0dc3 ED |
592 | struct compat_delta *tmp = xt[af].compat_tab; |
593 | int mid, left = 0, right = xt[af].cur - 1; | |
594 | ||
595 | while (left <= right) { | |
596 | mid = (left + right) >> 1; | |
597 | if (offset > tmp[mid].offset) | |
598 | left = mid + 1; | |
599 | else if (offset < tmp[mid].offset) | |
600 | right = mid - 1; | |
601 | else | |
602 | return mid ? tmp[mid - 1].delta : 0; | |
603 | } | |
5a6351ee | 604 | return left ? tmp[left - 1].delta : 0; |
b386d9f5 PM |
605 | } |
606 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); | |
607 | ||
255d0dc3 ED |
608 | void xt_compat_init_offsets(u_int8_t af, unsigned int number) |
609 | { | |
610 | xt[af].number = number; | |
611 | xt[af].cur = 0; | |
612 | } | |
613 | EXPORT_SYMBOL(xt_compat_init_offsets); | |
614 | ||
5452e425 | 615 | int xt_compat_match_offset(const struct xt_match *match) |
2722971c | 616 | { |
9fa492cd PM |
617 | u_int16_t csize = match->compatsize ? : match->matchsize; |
618 | return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); | |
619 | } | |
620 | EXPORT_SYMBOL_GPL(xt_compat_match_offset); | |
621 | ||
0188346f FW |
622 | void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, |
623 | unsigned int *size) | |
9fa492cd | 624 | { |
5452e425 | 625 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
626 | struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; |
627 | int pad, off = xt_compat_match_offset(match); | |
628 | u_int16_t msize = cm->u.user.match_size; | |
09d96860 | 629 | char name[sizeof(m->u.user.name)]; |
9fa492cd PM |
630 | |
631 | m = *dstptr; | |
632 | memcpy(m, cm, sizeof(*cm)); | |
633 | if (match->compat_from_user) | |
634 | match->compat_from_user(m->data, cm->data); | |
635 | else | |
636 | memcpy(m->data, cm->data, msize - sizeof(*cm)); | |
637 | pad = XT_ALIGN(match->matchsize) - match->matchsize; | |
638 | if (pad > 0) | |
639 | memset(m->data + match->matchsize, 0, pad); | |
640 | ||
641 | msize += off; | |
642 | m->u.user.match_size = msize; | |
09d96860 FW |
643 | strlcpy(name, match->name, sizeof(name)); |
644 | module_put(match->me); | |
645 | strncpy(m->u.user.name, name, sizeof(m->u.user.name)); | |
9fa492cd PM |
646 | |
647 | *size += off; | |
648 | *dstptr += msize; | |
649 | } | |
650 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); | |
651 | ||
751a9c76 WB |
652 | #define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ |
653 | xt_data_to_user(U->data, K->data, \ | |
654 | K->u.kernel.TYPE->usersize, \ | |
655 | C_SIZE, \ | |
656 | COMPAT_XT_ALIGN(C_SIZE)) | |
657 | ||
739674fb JE |
658 | int xt_compat_match_to_user(const struct xt_entry_match *m, |
659 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 660 | { |
5452e425 | 661 | const struct xt_match *match = m->u.kernel.match; |
9fa492cd PM |
662 | struct compat_xt_entry_match __user *cm = *dstptr; |
663 | int off = xt_compat_match_offset(match); | |
664 | u_int16_t msize = m->u.user.match_size - off; | |
665 | ||
4915f7bb | 666 | if (XT_OBJ_TO_USER(cm, m, match, msize)) |
601e68e1 | 667 | return -EFAULT; |
9fa492cd PM |
668 | |
669 | if (match->compat_to_user) { | |
670 | if (match->compat_to_user((void __user *)cm->data, m->data)) | |
671 | return -EFAULT; | |
672 | } else { | |
751a9c76 | 673 | if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) |
9fa492cd | 674 | return -EFAULT; |
2722971c | 675 | } |
9fa492cd PM |
676 | |
677 | *size -= off; | |
678 | *dstptr += msize; | |
679 | return 0; | |
2722971c | 680 | } |
9fa492cd | 681 | EXPORT_SYMBOL_GPL(xt_compat_match_to_user); |
fc1221b3 | 682 | |
7ed2abdd FW |
683 | /* non-compat version may have padding after verdict */ |
684 | struct compat_xt_standard_target { | |
685 | struct compat_xt_entry_target t; | |
686 | compat_uint_t verdict; | |
687 | }; | |
688 | ||
ce683e5f | 689 | int xt_compat_check_entry_offsets(const void *base, const char *elems, |
fc1221b3 FW |
690 | unsigned int target_offset, |
691 | unsigned int next_offset) | |
692 | { | |
ce683e5f | 693 | long size_of_base_struct = elems - (const char *)base; |
fc1221b3 FW |
694 | const struct compat_xt_entry_target *t; |
695 | const char *e = base; | |
696 | ||
ce683e5f FW |
697 | if (target_offset < size_of_base_struct) |
698 | return -EINVAL; | |
699 | ||
fc1221b3 FW |
700 | if (target_offset + sizeof(*t) > next_offset) |
701 | return -EINVAL; | |
702 | ||
703 | t = (void *)(e + target_offset); | |
704 | if (t->u.target_size < sizeof(*t)) | |
705 | return -EINVAL; | |
706 | ||
707 | if (target_offset + t->u.target_size > next_offset) | |
708 | return -EINVAL; | |
709 | ||
7ed2abdd | 710 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 711 | COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) |
7ed2abdd FW |
712 | return -EINVAL; |
713 | ||
550116d2 | 714 | /* compat_xt_entry match has less strict alignment requirements, |
13631bfc FW |
715 | * otherwise they are identical. In case of padding differences |
716 | * we need to add compat version of xt_check_entry_match. | |
717 | */ | |
718 | BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match)); | |
719 | ||
720 | return xt_check_entry_match(elems, base + target_offset, | |
721 | __alignof__(struct compat_xt_entry_match)); | |
fc1221b3 FW |
722 | } |
723 | EXPORT_SYMBOL(xt_compat_check_entry_offsets); | |
9fa492cd | 724 | #endif /* CONFIG_COMPAT */ |
2722971c | 725 | |
7d35812c FW |
726 | /** |
727 | * xt_check_entry_offsets - validate arp/ip/ip6t_entry | |
728 | * | |
729 | * @base: pointer to arp/ip/ip6t_entry | |
ce683e5f | 730 | * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems |
7d35812c FW |
731 | * @target_offset: the arp/ip/ip6_t->target_offset |
732 | * @next_offset: the arp/ip/ip6_t->next_offset | |
733 | * | |
13631bfc FW |
734 | * validates that target_offset and next_offset are sane and that all |
735 | * match sizes (if any) align with the target offset. | |
7d35812c | 736 | * |
ce683e5f | 737 | * This function does not validate the targets or matches themselves, it |
13631bfc FW |
738 | * only tests that all the offsets and sizes are correct, that all |
739 | * match structures are aligned, and that the last structure ends where | |
740 | * the target structure begins. | |
741 | * | |
742 | * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version. | |
ce683e5f | 743 | * |
7d35812c FW |
744 | * The arp/ip/ip6t_entry structure @base must have passed following tests: |
745 | * - it must point to a valid memory location | |
746 | * - base to base + next_offset must be accessible, i.e. not exceed allocated | |
747 | * length. | |
748 | * | |
13631bfc FW |
749 | * A well-formed entry looks like this: |
750 | * | |
751 | * ip(6)t_entry match [mtdata] match [mtdata] target [tgdata] ip(6)t_entry | |
752 | * e->elems[]-----' | | | |
753 | * matchsize | | | |
754 | * matchsize | | | |
755 | * | | | |
756 | * target_offset---------------------------------' | | |
757 | * next_offset---------------------------------------------------' | |
758 | * | |
759 | * elems[]: flexible array member at end of ip(6)/arpt_entry struct. | |
760 | * This is where matches (if any) and the target reside. | |
761 | * target_offset: beginning of target. | |
762 | * next_offset: start of the next rule; also: size of this rule. | |
763 | * Since targets have a minimum size, target_offset + minlen <= next_offset. | |
764 | * | |
765 | * Every match stores its size, sum of sizes must not exceed target_offset. | |
766 | * | |
7d35812c FW |
767 | * Return: 0 on success, negative errno on failure. |
768 | */ | |
769 | int xt_check_entry_offsets(const void *base, | |
ce683e5f | 770 | const char *elems, |
7d35812c FW |
771 | unsigned int target_offset, |
772 | unsigned int next_offset) | |
773 | { | |
ce683e5f | 774 | long size_of_base_struct = elems - (const char *)base; |
7d35812c FW |
775 | const struct xt_entry_target *t; |
776 | const char *e = base; | |
777 | ||
ce683e5f FW |
778 | /* target start is within the ip/ip6/arpt_entry struct */ |
779 | if (target_offset < size_of_base_struct) | |
780 | return -EINVAL; | |
781 | ||
7d35812c FW |
782 | if (target_offset + sizeof(*t) > next_offset) |
783 | return -EINVAL; | |
784 | ||
785 | t = (void *)(e + target_offset); | |
a08e4e19 FW |
786 | if (t->u.target_size < sizeof(*t)) |
787 | return -EINVAL; | |
788 | ||
7d35812c FW |
789 | if (target_offset + t->u.target_size > next_offset) |
790 | return -EINVAL; | |
791 | ||
7ed2abdd | 792 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
7b7eba0f | 793 | XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset) |
7ed2abdd FW |
794 | return -EINVAL; |
795 | ||
13631bfc FW |
796 | return xt_check_entry_match(elems, base + target_offset, |
797 | __alignof__(struct xt_entry_match)); | |
7d35812c FW |
798 | } |
799 | EXPORT_SYMBOL(xt_check_entry_offsets); | |
800 | ||
f4dc7771 FW |
801 | /** |
802 | * xt_alloc_entry_offsets - allocate array to store rule head offsets | |
803 | * | |
804 | * @size: number of entries | |
805 | * | |
806 | * Return: NULL or kmalloc'd or vmalloc'd array | |
807 | */ | |
808 | unsigned int *xt_alloc_entry_offsets(unsigned int size) | |
809 | { | |
752ade68 | 810 | return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO); |
f4dc7771 | 811 | |
f4dc7771 FW |
812 | } |
813 | EXPORT_SYMBOL(xt_alloc_entry_offsets); | |
814 | ||
815 | /** | |
816 | * xt_find_jump_offset - check if target is a valid jump offset | |
817 | * | |
818 | * @offsets: array containing all valid rule start offsets of a rule blob | |
819 | * @target: the jump target to search for | |
820 | * @size: entries in @offset | |
821 | */ | |
822 | bool xt_find_jump_offset(const unsigned int *offsets, | |
823 | unsigned int target, unsigned int size) | |
824 | { | |
825 | int m, low = 0, hi = size; | |
826 | ||
827 | while (hi > low) { | |
828 | m = (low + hi) / 2u; | |
829 | ||
830 | if (offsets[m] > target) | |
831 | hi = m; | |
832 | else if (offsets[m] < target) | |
833 | low = m + 1; | |
834 | else | |
835 | return true; | |
836 | } | |
837 | ||
838 | return false; | |
839 | } | |
840 | EXPORT_SYMBOL(xt_find_jump_offset); | |
841 | ||
916a917d | 842 | int xt_check_target(struct xt_tgchk_param *par, |
af5d6dc2 | 843 | unsigned int size, u_int8_t proto, bool inv_proto) |
37f9f733 | 844 | { |
d6b00a53 JE |
845 | int ret; |
846 | ||
af5d6dc2 | 847 | if (XT_ALIGN(par->target->targetsize) != size) { |
b402405d JE |
848 | pr_err("%s_tables: %s.%u target: invalid size " |
849 | "%u (kernel) != (user) %u\n", | |
916a917d | 850 | xt_prefix[par->family], par->target->name, |
b402405d | 851 | par->target->revision, |
af5d6dc2 | 852 | XT_ALIGN(par->target->targetsize), size); |
37f9f733 PM |
853 | return -EINVAL; |
854 | } | |
af5d6dc2 JE |
855 | if (par->target->table != NULL && |
856 | strcmp(par->target->table, par->table) != 0) { | |
3dd5d7e3 | 857 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", |
916a917d | 858 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 859 | par->target->table, par->table); |
37f9f733 PM |
860 | return -EINVAL; |
861 | } | |
af5d6dc2 | 862 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { |
45185364 JE |
863 | char used[64], allow[64]; |
864 | ||
3dd5d7e3 | 865 | pr_err("%s_tables: %s target: used from hooks %s, but only " |
45185364 | 866 | "usable from %s\n", |
916a917d | 867 | xt_prefix[par->family], par->target->name, |
5b76c494 JE |
868 | textify_hooks(used, sizeof(used), par->hook_mask, |
869 | par->family), | |
870 | textify_hooks(allow, sizeof(allow), par->target->hooks, | |
871 | par->family)); | |
37f9f733 PM |
872 | return -EINVAL; |
873 | } | |
af5d6dc2 | 874 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { |
3dd5d7e3 | 875 | pr_err("%s_tables: %s target: only valid for protocol %u\n", |
916a917d | 876 | xt_prefix[par->family], par->target->name, |
af5d6dc2 | 877 | par->target->proto); |
37f9f733 PM |
878 | return -EINVAL; |
879 | } | |
d6b00a53 JE |
880 | if (par->target->checkentry != NULL) { |
881 | ret = par->target->checkentry(par); | |
882 | if (ret < 0) | |
883 | return ret; | |
884 | else if (ret > 0) | |
885 | /* Flag up potential errors. */ | |
886 | return -EIO; | |
887 | } | |
37f9f733 PM |
888 | return 0; |
889 | } | |
890 | EXPORT_SYMBOL_GPL(xt_check_target); | |
891 | ||
d7591f0c FW |
892 | /** |
893 | * xt_copy_counters_from_user - copy counters and metadata from userspace | |
894 | * | |
895 | * @user: src pointer to userspace memory | |
896 | * @len: alleged size of userspace memory | |
897 | * @info: where to store the xt_counters_info metadata | |
898 | * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel | |
899 | * | |
900 | * Copies counter meta data from @user and stores it in @info. | |
901 | * | |
902 | * vmallocs memory to hold the counters, then copies the counter data | |
903 | * from @user to the new memory and returns a pointer to it. | |
904 | * | |
905 | * If @compat is true, @info gets converted automatically to the 64bit | |
906 | * representation. | |
907 | * | |
908 | * The metadata associated with the counters is stored in @info. | |
909 | * | |
910 | * Return: returns pointer that caller has to test via IS_ERR(). | |
911 | * If IS_ERR is false, caller has to vfree the pointer. | |
912 | */ | |
913 | void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | |
914 | struct xt_counters_info *info, bool compat) | |
915 | { | |
916 | void *mem; | |
917 | u64 size; | |
918 | ||
919 | #ifdef CONFIG_COMPAT | |
920 | if (compat) { | |
921 | /* structures only differ in size due to alignment */ | |
922 | struct compat_xt_counters_info compat_tmp; | |
923 | ||
924 | if (len <= sizeof(compat_tmp)) | |
925 | return ERR_PTR(-EINVAL); | |
926 | ||
927 | len -= sizeof(compat_tmp); | |
928 | if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) | |
929 | return ERR_PTR(-EFAULT); | |
930 | ||
e466af75 | 931 | memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); |
d7591f0c FW |
932 | info->num_counters = compat_tmp.num_counters; |
933 | user += sizeof(compat_tmp); | |
934 | } else | |
935 | #endif | |
936 | { | |
937 | if (len <= sizeof(*info)) | |
938 | return ERR_PTR(-EINVAL); | |
939 | ||
940 | len -= sizeof(*info); | |
941 | if (copy_from_user(info, user, sizeof(*info)) != 0) | |
942 | return ERR_PTR(-EFAULT); | |
943 | ||
d7591f0c FW |
944 | user += sizeof(*info); |
945 | } | |
e466af75 | 946 | info->name[sizeof(info->name) - 1] = '\0'; |
d7591f0c FW |
947 | |
948 | size = sizeof(struct xt_counters); | |
949 | size *= info->num_counters; | |
950 | ||
951 | if (size != (u64)len) | |
952 | return ERR_PTR(-EINVAL); | |
953 | ||
954 | mem = vmalloc(len); | |
955 | if (!mem) | |
956 | return ERR_PTR(-ENOMEM); | |
957 | ||
958 | if (copy_from_user(mem, user, len) == 0) | |
959 | return mem; | |
960 | ||
961 | vfree(mem); | |
962 | return ERR_PTR(-EFAULT); | |
963 | } | |
964 | EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); | |
965 | ||
2722971c | 966 | #ifdef CONFIG_COMPAT |
5452e425 | 967 | int xt_compat_target_offset(const struct xt_target *target) |
2722971c | 968 | { |
9fa492cd PM |
969 | u_int16_t csize = target->compatsize ? : target->targetsize; |
970 | return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); | |
971 | } | |
972 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); | |
973 | ||
974 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, | |
b0a6363c | 975 | unsigned int *size) |
9fa492cd | 976 | { |
5452e425 | 977 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
978 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; |
979 | int pad, off = xt_compat_target_offset(target); | |
980 | u_int16_t tsize = ct->u.user.target_size; | |
09d96860 | 981 | char name[sizeof(t->u.user.name)]; |
9fa492cd PM |
982 | |
983 | t = *dstptr; | |
984 | memcpy(t, ct, sizeof(*ct)); | |
985 | if (target->compat_from_user) | |
986 | target->compat_from_user(t->data, ct->data); | |
987 | else | |
988 | memcpy(t->data, ct->data, tsize - sizeof(*ct)); | |
989 | pad = XT_ALIGN(target->targetsize) - target->targetsize; | |
990 | if (pad > 0) | |
991 | memset(t->data + target->targetsize, 0, pad); | |
992 | ||
993 | tsize += off; | |
994 | t->u.user.target_size = tsize; | |
09d96860 FW |
995 | strlcpy(name, target->name, sizeof(name)); |
996 | module_put(target->me); | |
997 | strncpy(t->u.user.name, name, sizeof(t->u.user.name)); | |
9fa492cd PM |
998 | |
999 | *size += off; | |
1000 | *dstptr += tsize; | |
1001 | } | |
1002 | EXPORT_SYMBOL_GPL(xt_compat_target_from_user); | |
1003 | ||
739674fb JE |
1004 | int xt_compat_target_to_user(const struct xt_entry_target *t, |
1005 | void __user **dstptr, unsigned int *size) | |
9fa492cd | 1006 | { |
5452e425 | 1007 | const struct xt_target *target = t->u.kernel.target; |
9fa492cd PM |
1008 | struct compat_xt_entry_target __user *ct = *dstptr; |
1009 | int off = xt_compat_target_offset(target); | |
1010 | u_int16_t tsize = t->u.user.target_size - off; | |
1011 | ||
4915f7bb | 1012 | if (XT_OBJ_TO_USER(ct, t, target, tsize)) |
601e68e1 | 1013 | return -EFAULT; |
9fa492cd PM |
1014 | |
1015 | if (target->compat_to_user) { | |
1016 | if (target->compat_to_user((void __user *)ct->data, t->data)) | |
1017 | return -EFAULT; | |
1018 | } else { | |
751a9c76 | 1019 | if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) |
9fa492cd | 1020 | return -EFAULT; |
2722971c | 1021 | } |
9fa492cd PM |
1022 | |
1023 | *size -= off; | |
1024 | *dstptr += tsize; | |
1025 | return 0; | |
2722971c | 1026 | } |
9fa492cd | 1027 | EXPORT_SYMBOL_GPL(xt_compat_target_to_user); |
2722971c DM |
1028 | #endif |
1029 | ||
2e4e6a17 HW |
1030 | struct xt_table_info *xt_alloc_table_info(unsigned int size) |
1031 | { | |
711bdde6 ED |
1032 | struct xt_table_info *info = NULL; |
1033 | size_t sz = sizeof(*info) + size; | |
2e4e6a17 | 1034 | |
fab0b3ce | 1035 | if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE) |
d157bd76 FW |
1036 | return NULL; |
1037 | ||
2e4e6a17 | 1038 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ |
6609d112 | 1039 | if ((size >> PAGE_SHIFT) + 2 > totalram_pages) |
2e4e6a17 HW |
1040 | return NULL; |
1041 | ||
546ade70 MH |
1042 | /* __GFP_NORETRY is not fully supported by kvmalloc but it should |
1043 | * work reasonably well if sz is too large and bail out rather | |
1044 | * than shoot all processes down before realizing there is nothing | |
1045 | * more to reclaim. | |
1046 | */ | |
1047 | info = kvmalloc(sz, GFP_KERNEL | __GFP_NORETRY); | |
eacd86ca MH |
1048 | if (!info) |
1049 | return NULL; | |
1050 | ||
711bdde6 ED |
1051 | memset(info, 0, sizeof(*info)); |
1052 | info->size = size; | |
1053 | return info; | |
2e4e6a17 HW |
1054 | } |
1055 | EXPORT_SYMBOL(xt_alloc_table_info); | |
1056 | ||
1057 | void xt_free_table_info(struct xt_table_info *info) | |
1058 | { | |
1059 | int cpu; | |
1060 | ||
f3c5c1bf | 1061 | if (info->jumpstack != NULL) { |
f6b50824 ED |
1062 | for_each_possible_cpu(cpu) |
1063 | kvfree(info->jumpstack[cpu]); | |
1064 | kvfree(info->jumpstack); | |
f3c5c1bf JE |
1065 | } |
1066 | ||
711bdde6 | 1067 | kvfree(info); |
2e4e6a17 HW |
1068 | } |
1069 | EXPORT_SYMBOL(xt_free_table_info); | |
1070 | ||
eb1a6bdc | 1071 | /* Find table by name, grabs mutex & ref. Returns NULL on error. */ |
76108cea JE |
1072 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
1073 | const char *name) | |
2e4e6a17 | 1074 | { |
b9e69e12 | 1075 | struct xt_table *t, *found = NULL; |
2e4e6a17 | 1076 | |
7926dbfa | 1077 | mutex_lock(&xt[af].mutex); |
8d870052 | 1078 | list_for_each_entry(t, &net->xt.tables[af], list) |
2e4e6a17 HW |
1079 | if (strcmp(t->name, name) == 0 && try_module_get(t->me)) |
1080 | return t; | |
b9e69e12 FW |
1081 | |
1082 | if (net == &init_net) | |
1083 | goto out; | |
1084 | ||
1085 | /* Table doesn't exist in this netns, re-try init */ | |
1086 | list_for_each_entry(t, &init_net.xt.tables[af], list) { | |
1087 | if (strcmp(t->name, name)) | |
1088 | continue; | |
7dde07e9 DC |
1089 | if (!try_module_get(t->me)) { |
1090 | mutex_unlock(&xt[af].mutex); | |
b9e69e12 | 1091 | return NULL; |
7dde07e9 | 1092 | } |
b9e69e12 FW |
1093 | |
1094 | mutex_unlock(&xt[af].mutex); | |
1095 | if (t->table_init(net) != 0) { | |
1096 | module_put(t->me); | |
1097 | return NULL; | |
1098 | } | |
1099 | ||
1100 | found = t; | |
1101 | ||
1102 | mutex_lock(&xt[af].mutex); | |
1103 | break; | |
1104 | } | |
1105 | ||
1106 | if (!found) | |
1107 | goto out; | |
1108 | ||
1109 | /* and once again: */ | |
1110 | list_for_each_entry(t, &net->xt.tables[af], list) | |
1111 | if (strcmp(t->name, name) == 0) | |
1112 | return t; | |
1113 | ||
1114 | module_put(found->me); | |
1115 | out: | |
9e19bb6d | 1116 | mutex_unlock(&xt[af].mutex); |
2e4e6a17 HW |
1117 | return NULL; |
1118 | } | |
1119 | EXPORT_SYMBOL_GPL(xt_find_table_lock); | |
1120 | ||
1121 | void xt_table_unlock(struct xt_table *table) | |
1122 | { | |
9e19bb6d | 1123 | mutex_unlock(&xt[table->af].mutex); |
2e4e6a17 HW |
1124 | } |
1125 | EXPORT_SYMBOL_GPL(xt_table_unlock); | |
1126 | ||
2722971c | 1127 | #ifdef CONFIG_COMPAT |
76108cea | 1128 | void xt_compat_lock(u_int8_t af) |
2722971c DM |
1129 | { |
1130 | mutex_lock(&xt[af].compat_mutex); | |
1131 | } | |
1132 | EXPORT_SYMBOL_GPL(xt_compat_lock); | |
1133 | ||
76108cea | 1134 | void xt_compat_unlock(u_int8_t af) |
2722971c DM |
1135 | { |
1136 | mutex_unlock(&xt[af].compat_mutex); | |
1137 | } | |
1138 | EXPORT_SYMBOL_GPL(xt_compat_unlock); | |
1139 | #endif | |
2e4e6a17 | 1140 | |
7f5c6d4f ED |
1141 | DEFINE_PER_CPU(seqcount_t, xt_recseq); |
1142 | EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq); | |
942e4a2b | 1143 | |
dcebd315 FW |
1144 | struct static_key xt_tee_enabled __read_mostly; |
1145 | EXPORT_SYMBOL_GPL(xt_tee_enabled); | |
1146 | ||
f3c5c1bf JE |
1147 | static int xt_jumpstack_alloc(struct xt_table_info *i) |
1148 | { | |
1149 | unsigned int size; | |
1150 | int cpu; | |
1151 | ||
f3c5c1bf JE |
1152 | size = sizeof(void **) * nr_cpu_ids; |
1153 | if (size > PAGE_SIZE) | |
752ade68 | 1154 | i->jumpstack = kvzalloc(size, GFP_KERNEL); |
f3c5c1bf | 1155 | else |
3dbd4439 | 1156 | i->jumpstack = kzalloc(size, GFP_KERNEL); |
f3c5c1bf JE |
1157 | if (i->jumpstack == NULL) |
1158 | return -ENOMEM; | |
f3c5c1bf | 1159 | |
98d1bd80 FW |
1160 | /* ruleset without jumps -- no stack needed */ |
1161 | if (i->stacksize == 0) | |
1162 | return 0; | |
1163 | ||
7814b6ec FW |
1164 | /* Jumpstack needs to be able to record two full callchains, one |
1165 | * from the first rule set traversal, plus one table reentrancy | |
1166 | * via -j TEE without clobbering the callchain that brought us to | |
1167 | * TEE target. | |
1168 | * | |
1169 | * This is done by allocating two jumpstacks per cpu, on reentry | |
1170 | * the upper half of the stack is used. | |
1171 | * | |
1172 | * see the jumpstack setup in ipt_do_table() for more details. | |
1173 | */ | |
1174 | size = sizeof(void *) * i->stacksize * 2u; | |
f3c5c1bf | 1175 | for_each_possible_cpu(cpu) { |
752ade68 MH |
1176 | i->jumpstack[cpu] = kvmalloc_node(size, GFP_KERNEL, |
1177 | cpu_to_node(cpu)); | |
f3c5c1bf JE |
1178 | if (i->jumpstack[cpu] == NULL) |
1179 | /* | |
1180 | * Freeing will be done later on by the callers. The | |
1181 | * chain is: xt_replace_table -> __do_replace -> | |
1182 | * do_replace -> xt_free_table_info. | |
1183 | */ | |
1184 | return -ENOMEM; | |
1185 | } | |
1186 | ||
1187 | return 0; | |
1188 | } | |
942e4a2b | 1189 | |
82b68ecd FW |
1190 | struct xt_counters *xt_counters_alloc(unsigned int counters) |
1191 | { | |
1192 | struct xt_counters *mem; | |
1193 | ||
1194 | if (counters == 0 || counters > INT_MAX / sizeof(*mem)) | |
1195 | return NULL; | |
1196 | ||
1197 | counters *= sizeof(*mem); | |
1198 | if (counters > XT_MAX_TABLE_SIZE) | |
1199 | return NULL; | |
1200 | ||
1201 | return vzalloc(counters); | |
1202 | } | |
1203 | EXPORT_SYMBOL(xt_counters_alloc); | |
1204 | ||
2e4e6a17 HW |
1205 | struct xt_table_info * |
1206 | xt_replace_table(struct xt_table *table, | |
1207 | unsigned int num_counters, | |
1208 | struct xt_table_info *newinfo, | |
1209 | int *error) | |
1210 | { | |
942e4a2b | 1211 | struct xt_table_info *private; |
f3c5c1bf | 1212 | int ret; |
2e4e6a17 | 1213 | |
d97a9e47 JE |
1214 | ret = xt_jumpstack_alloc(newinfo); |
1215 | if (ret < 0) { | |
1216 | *error = ret; | |
1217 | return NULL; | |
1218 | } | |
1219 | ||
2e4e6a17 | 1220 | /* Do the substitution. */ |
942e4a2b | 1221 | local_bh_disable(); |
2e4e6a17 | 1222 | private = table->private; |
942e4a2b | 1223 | |
2e4e6a17 HW |
1224 | /* Check inside lock: is the old number correct? */ |
1225 | if (num_counters != private->number) { | |
be91fd5e | 1226 | pr_debug("num_counters != table->private->number (%u/%u)\n", |
2e4e6a17 | 1227 | num_counters, private->number); |
942e4a2b | 1228 | local_bh_enable(); |
2e4e6a17 HW |
1229 | *error = -EAGAIN; |
1230 | return NULL; | |
1231 | } | |
2e4e6a17 | 1232 | |
942e4a2b | 1233 | newinfo->initial_entries = private->initial_entries; |
b416c144 WD |
1234 | /* |
1235 | * Ensure contents of newinfo are visible before assigning to | |
1236 | * private. | |
1237 | */ | |
1238 | smp_wmb(); | |
1239 | table->private = newinfo; | |
942e4a2b SH |
1240 | |
1241 | /* | |
1242 | * Even though table entries have now been swapped, other CPU's | |
1243 | * may still be using the old entries. This is okay, because | |
1244 | * resynchronization happens because of the locking done | |
1245 | * during the get_counters() routine. | |
1246 | */ | |
1247 | local_bh_enable(); | |
1248 | ||
fbabf31e TG |
1249 | #ifdef CONFIG_AUDIT |
1250 | if (audit_enabled) { | |
46b20c38 GT |
1251 | audit_log(current->audit_context, GFP_KERNEL, |
1252 | AUDIT_NETFILTER_CFG, | |
1253 | "table=%s family=%u entries=%u", | |
1254 | table->name, table->af, private->number); | |
fbabf31e TG |
1255 | } |
1256 | #endif | |
1257 | ||
942e4a2b | 1258 | return private; |
2e4e6a17 HW |
1259 | } |
1260 | EXPORT_SYMBOL_GPL(xt_replace_table); | |
1261 | ||
35aad0ff JE |
1262 | struct xt_table *xt_register_table(struct net *net, |
1263 | const struct xt_table *input_table, | |
a98da11d AD |
1264 | struct xt_table_info *bootstrap, |
1265 | struct xt_table_info *newinfo) | |
2e4e6a17 HW |
1266 | { |
1267 | int ret; | |
1268 | struct xt_table_info *private; | |
35aad0ff | 1269 | struct xt_table *t, *table; |
2e4e6a17 | 1270 | |
44d34e72 | 1271 | /* Don't add one object to multiple lists. */ |
35aad0ff | 1272 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
44d34e72 AD |
1273 | if (!table) { |
1274 | ret = -ENOMEM; | |
1275 | goto out; | |
1276 | } | |
1277 | ||
7926dbfa | 1278 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1279 | /* Don't autoload: we'd eat our tail... */ |
8d870052 | 1280 | list_for_each_entry(t, &net->xt.tables[table->af], list) { |
df0933dc PM |
1281 | if (strcmp(t->name, table->name) == 0) { |
1282 | ret = -EEXIST; | |
1283 | goto unlock; | |
1284 | } | |
2e4e6a17 HW |
1285 | } |
1286 | ||
1287 | /* Simplifies replace_table code. */ | |
1288 | table->private = bootstrap; | |
78454473 | 1289 | |
2e4e6a17 HW |
1290 | if (!xt_replace_table(table, 0, newinfo, &ret)) |
1291 | goto unlock; | |
1292 | ||
1293 | private = table->private; | |
be91fd5e | 1294 | pr_debug("table->private->number = %u\n", private->number); |
2e4e6a17 HW |
1295 | |
1296 | /* save number of initial entries */ | |
1297 | private->initial_entries = private->number; | |
1298 | ||
8d870052 | 1299 | list_add(&table->list, &net->xt.tables[table->af]); |
a98da11d AD |
1300 | mutex_unlock(&xt[table->af].mutex); |
1301 | return table; | |
2e4e6a17 | 1302 | |
7926dbfa | 1303 | unlock: |
9e19bb6d | 1304 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1305 | kfree(table); |
a98da11d AD |
1306 | out: |
1307 | return ERR_PTR(ret); | |
2e4e6a17 HW |
1308 | } |
1309 | EXPORT_SYMBOL_GPL(xt_register_table); | |
1310 | ||
1311 | void *xt_unregister_table(struct xt_table *table) | |
1312 | { | |
1313 | struct xt_table_info *private; | |
1314 | ||
9e19bb6d | 1315 | mutex_lock(&xt[table->af].mutex); |
2e4e6a17 | 1316 | private = table->private; |
df0933dc | 1317 | list_del(&table->list); |
9e19bb6d | 1318 | mutex_unlock(&xt[table->af].mutex); |
44d34e72 | 1319 | kfree(table); |
2e4e6a17 HW |
1320 | |
1321 | return private; | |
1322 | } | |
1323 | EXPORT_SYMBOL_GPL(xt_unregister_table); | |
1324 | ||
1325 | #ifdef CONFIG_PROC_FS | |
715cf35a AD |
1326 | struct xt_names_priv { |
1327 | struct seq_net_private p; | |
76108cea | 1328 | u_int8_t af; |
715cf35a | 1329 | }; |
025d93d1 | 1330 | static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) |
2e4e6a17 | 1331 | { |
715cf35a | 1332 | struct xt_names_priv *priv = seq->private; |
1218854a | 1333 | struct net *net = seq_file_net(seq); |
76108cea | 1334 | u_int8_t af = priv->af; |
2e4e6a17 | 1335 | |
025d93d1 | 1336 | mutex_lock(&xt[af].mutex); |
715cf35a | 1337 | return seq_list_start(&net->xt.tables[af], *pos); |
025d93d1 | 1338 | } |
2e4e6a17 | 1339 | |
025d93d1 AD |
1340 | static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1341 | { | |
715cf35a | 1342 | struct xt_names_priv *priv = seq->private; |
1218854a | 1343 | struct net *net = seq_file_net(seq); |
76108cea | 1344 | u_int8_t af = priv->af; |
2e4e6a17 | 1345 | |
715cf35a | 1346 | return seq_list_next(v, &net->xt.tables[af], pos); |
2e4e6a17 HW |
1347 | } |
1348 | ||
025d93d1 | 1349 | static void xt_table_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1350 | { |
715cf35a | 1351 | struct xt_names_priv *priv = seq->private; |
76108cea | 1352 | u_int8_t af = priv->af; |
2e4e6a17 | 1353 | |
025d93d1 AD |
1354 | mutex_unlock(&xt[af].mutex); |
1355 | } | |
2e4e6a17 | 1356 | |
025d93d1 AD |
1357 | static int xt_table_seq_show(struct seq_file *seq, void *v) |
1358 | { | |
1359 | struct xt_table *table = list_entry(v, struct xt_table, list); | |
2e4e6a17 | 1360 | |
861fb107 | 1361 | if (*table->name) |
e71456ae | 1362 | seq_printf(seq, "%s\n", table->name); |
861fb107 | 1363 | return 0; |
025d93d1 | 1364 | } |
601e68e1 | 1365 | |
025d93d1 AD |
1366 | static const struct seq_operations xt_table_seq_ops = { |
1367 | .start = xt_table_seq_start, | |
1368 | .next = xt_table_seq_next, | |
1369 | .stop = xt_table_seq_stop, | |
1370 | .show = xt_table_seq_show, | |
1371 | }; | |
1372 | ||
1373 | static int xt_table_open(struct inode *inode, struct file *file) | |
1374 | { | |
1375 | int ret; | |
715cf35a | 1376 | struct xt_names_priv *priv; |
025d93d1 | 1377 | |
715cf35a AD |
1378 | ret = seq_open_net(inode, file, &xt_table_seq_ops, |
1379 | sizeof(struct xt_names_priv)); | |
025d93d1 | 1380 | if (!ret) { |
715cf35a | 1381 | priv = ((struct seq_file *)file->private_data)->private; |
d9dda78b | 1382 | priv->af = (unsigned long)PDE_DATA(inode); |
025d93d1 AD |
1383 | } |
1384 | return ret; | |
2e4e6a17 HW |
1385 | } |
1386 | ||
025d93d1 AD |
1387 | static const struct file_operations xt_table_ops = { |
1388 | .owner = THIS_MODULE, | |
1389 | .open = xt_table_open, | |
1390 | .read = seq_read, | |
1391 | .llseek = seq_lseek, | |
0e93bb94 | 1392 | .release = seq_release_net, |
025d93d1 AD |
1393 | }; |
1394 | ||
eb132205 JE |
1395 | /* |
1396 | * Traverse state for ip{,6}_{tables,matches} for helping crossing | |
1397 | * the multi-AF mutexes. | |
1398 | */ | |
1399 | struct nf_mttg_trav { | |
1400 | struct list_head *head, *curr; | |
1401 | uint8_t class, nfproto; | |
1402 | }; | |
1403 | ||
1404 | enum { | |
1405 | MTTG_TRAV_INIT, | |
1406 | MTTG_TRAV_NFP_UNSPEC, | |
1407 | MTTG_TRAV_NFP_SPEC, | |
1408 | MTTG_TRAV_DONE, | |
1409 | }; | |
1410 | ||
1411 | static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, | |
1412 | bool is_target) | |
2e4e6a17 | 1413 | { |
eb132205 JE |
1414 | static const uint8_t next_class[] = { |
1415 | [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, | |
1416 | [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, | |
1417 | }; | |
1418 | struct nf_mttg_trav *trav = seq->private; | |
1419 | ||
1420 | switch (trav->class) { | |
1421 | case MTTG_TRAV_INIT: | |
1422 | trav->class = MTTG_TRAV_NFP_UNSPEC; | |
1423 | mutex_lock(&xt[NFPROTO_UNSPEC].mutex); | |
1424 | trav->head = trav->curr = is_target ? | |
1425 | &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; | |
1426 | break; | |
1427 | case MTTG_TRAV_NFP_UNSPEC: | |
1428 | trav->curr = trav->curr->next; | |
1429 | if (trav->curr != trav->head) | |
1430 | break; | |
1431 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1432 | mutex_lock(&xt[trav->nfproto].mutex); | |
1433 | trav->head = trav->curr = is_target ? | |
1434 | &xt[trav->nfproto].target : &xt[trav->nfproto].match; | |
1435 | trav->class = next_class[trav->class]; | |
1436 | break; | |
1437 | case MTTG_TRAV_NFP_SPEC: | |
1438 | trav->curr = trav->curr->next; | |
1439 | if (trav->curr != trav->head) | |
1440 | break; | |
1441 | /* fallthru, _stop will unlock */ | |
1442 | default: | |
1443 | return NULL; | |
1444 | } | |
2e4e6a17 | 1445 | |
eb132205 JE |
1446 | if (ppos != NULL) |
1447 | ++*ppos; | |
1448 | return trav; | |
025d93d1 | 1449 | } |
601e68e1 | 1450 | |
eb132205 JE |
1451 | static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, |
1452 | bool is_target) | |
025d93d1 | 1453 | { |
eb132205 JE |
1454 | struct nf_mttg_trav *trav = seq->private; |
1455 | unsigned int j; | |
2e4e6a17 | 1456 | |
eb132205 JE |
1457 | trav->class = MTTG_TRAV_INIT; |
1458 | for (j = 0; j < *pos; ++j) | |
1459 | if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) | |
1460 | return NULL; | |
1461 | return trav; | |
2e4e6a17 HW |
1462 | } |
1463 | ||
eb132205 | 1464 | static void xt_mttg_seq_stop(struct seq_file *seq, void *v) |
2e4e6a17 | 1465 | { |
eb132205 JE |
1466 | struct nf_mttg_trav *trav = seq->private; |
1467 | ||
1468 | switch (trav->class) { | |
1469 | case MTTG_TRAV_NFP_UNSPEC: | |
1470 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); | |
1471 | break; | |
1472 | case MTTG_TRAV_NFP_SPEC: | |
1473 | mutex_unlock(&xt[trav->nfproto].mutex); | |
1474 | break; | |
1475 | } | |
1476 | } | |
2e4e6a17 | 1477 | |
eb132205 JE |
1478 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) |
1479 | { | |
1480 | return xt_mttg_seq_start(seq, pos, false); | |
2e4e6a17 HW |
1481 | } |
1482 | ||
eb132205 | 1483 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
2e4e6a17 | 1484 | { |
eb132205 JE |
1485 | return xt_mttg_seq_next(seq, v, ppos, false); |
1486 | } | |
2e4e6a17 | 1487 | |
eb132205 JE |
1488 | static int xt_match_seq_show(struct seq_file *seq, void *v) |
1489 | { | |
1490 | const struct nf_mttg_trav *trav = seq->private; | |
1491 | const struct xt_match *match; | |
1492 | ||
1493 | switch (trav->class) { | |
1494 | case MTTG_TRAV_NFP_UNSPEC: | |
1495 | case MTTG_TRAV_NFP_SPEC: | |
1496 | if (trav->curr == trav->head) | |
1497 | return 0; | |
1498 | match = list_entry(trav->curr, struct xt_match, list); | |
861fb107 JP |
1499 | if (*match->name) |
1500 | seq_printf(seq, "%s\n", match->name); | |
eb132205 JE |
1501 | } |
1502 | return 0; | |
2e4e6a17 HW |
1503 | } |
1504 | ||
025d93d1 AD |
1505 | static const struct seq_operations xt_match_seq_ops = { |
1506 | .start = xt_match_seq_start, | |
1507 | .next = xt_match_seq_next, | |
eb132205 | 1508 | .stop = xt_mttg_seq_stop, |
025d93d1 | 1509 | .show = xt_match_seq_show, |
2e4e6a17 HW |
1510 | }; |
1511 | ||
025d93d1 | 1512 | static int xt_match_open(struct inode *inode, struct file *file) |
2e4e6a17 | 1513 | { |
eb132205 | 1514 | struct nf_mttg_trav *trav; |
772476df RJ |
1515 | trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav)); |
1516 | if (!trav) | |
eb132205 | 1517 | return -ENOMEM; |
2e4e6a17 | 1518 | |
d9dda78b | 1519 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1520 | return 0; |
025d93d1 AD |
1521 | } |
1522 | ||
1523 | static const struct file_operations xt_match_ops = { | |
1524 | .owner = THIS_MODULE, | |
1525 | .open = xt_match_open, | |
1526 | .read = seq_read, | |
1527 | .llseek = seq_lseek, | |
eb132205 | 1528 | .release = seq_release_private, |
025d93d1 | 1529 | }; |
2e4e6a17 | 1530 | |
025d93d1 AD |
1531 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) |
1532 | { | |
eb132205 | 1533 | return xt_mttg_seq_start(seq, pos, true); |
025d93d1 AD |
1534 | } |
1535 | ||
eb132205 | 1536 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
025d93d1 | 1537 | { |
eb132205 | 1538 | return xt_mttg_seq_next(seq, v, ppos, true); |
025d93d1 AD |
1539 | } |
1540 | ||
1541 | static int xt_target_seq_show(struct seq_file *seq, void *v) | |
1542 | { | |
eb132205 JE |
1543 | const struct nf_mttg_trav *trav = seq->private; |
1544 | const struct xt_target *target; | |
1545 | ||
1546 | switch (trav->class) { | |
1547 | case MTTG_TRAV_NFP_UNSPEC: | |
1548 | case MTTG_TRAV_NFP_SPEC: | |
1549 | if (trav->curr == trav->head) | |
1550 | return 0; | |
1551 | target = list_entry(trav->curr, struct xt_target, list); | |
861fb107 JP |
1552 | if (*target->name) |
1553 | seq_printf(seq, "%s\n", target->name); | |
eb132205 JE |
1554 | } |
1555 | return 0; | |
025d93d1 AD |
1556 | } |
1557 | ||
1558 | static const struct seq_operations xt_target_seq_ops = { | |
1559 | .start = xt_target_seq_start, | |
1560 | .next = xt_target_seq_next, | |
eb132205 | 1561 | .stop = xt_mttg_seq_stop, |
025d93d1 AD |
1562 | .show = xt_target_seq_show, |
1563 | }; | |
1564 | ||
1565 | static int xt_target_open(struct inode *inode, struct file *file) | |
1566 | { | |
eb132205 | 1567 | struct nf_mttg_trav *trav; |
772476df RJ |
1568 | trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav)); |
1569 | if (!trav) | |
eb132205 | 1570 | return -ENOMEM; |
025d93d1 | 1571 | |
d9dda78b | 1572 | trav->nfproto = (unsigned long)PDE_DATA(inode); |
eb132205 | 1573 | return 0; |
2e4e6a17 HW |
1574 | } |
1575 | ||
025d93d1 | 1576 | static const struct file_operations xt_target_ops = { |
2e4e6a17 | 1577 | .owner = THIS_MODULE, |
025d93d1 | 1578 | .open = xt_target_open, |
2e4e6a17 HW |
1579 | .read = seq_read, |
1580 | .llseek = seq_lseek, | |
eb132205 | 1581 | .release = seq_release_private, |
2e4e6a17 HW |
1582 | }; |
1583 | ||
1584 | #define FORMAT_TABLES "_tables_names" | |
1585 | #define FORMAT_MATCHES "_tables_matches" | |
1586 | #define FORMAT_TARGETS "_tables_targets" | |
1587 | ||
1588 | #endif /* CONFIG_PROC_FS */ | |
1589 | ||
2b95efe7 | 1590 | /** |
b9e69e12 | 1591 | * xt_hook_ops_alloc - set up hooks for a new table |
2b95efe7 JE |
1592 | * @table: table with metadata needed to set up hooks |
1593 | * @fn: Hook function | |
1594 | * | |
b9e69e12 FW |
1595 | * This function will create the nf_hook_ops that the x_table needs |
1596 | * to hand to xt_hook_link_net(). | |
2b95efe7 | 1597 | */ |
b9e69e12 FW |
1598 | struct nf_hook_ops * |
1599 | xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn) | |
2b95efe7 JE |
1600 | { |
1601 | unsigned int hook_mask = table->valid_hooks; | |
1602 | uint8_t i, num_hooks = hweight32(hook_mask); | |
1603 | uint8_t hooknum; | |
1604 | struct nf_hook_ops *ops; | |
2b95efe7 | 1605 | |
a6d0bae1 XL |
1606 | if (!num_hooks) |
1607 | return ERR_PTR(-EINVAL); | |
1608 | ||
1ecc281e | 1609 | ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL); |
2b95efe7 JE |
1610 | if (ops == NULL) |
1611 | return ERR_PTR(-ENOMEM); | |
1612 | ||
1613 | for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; | |
1614 | hook_mask >>= 1, ++hooknum) { | |
1615 | if (!(hook_mask & 1)) | |
1616 | continue; | |
1617 | ops[i].hook = fn; | |
2b95efe7 JE |
1618 | ops[i].pf = table->af; |
1619 | ops[i].hooknum = hooknum; | |
1620 | ops[i].priority = table->priority; | |
1621 | ++i; | |
1622 | } | |
1623 | ||
2b95efe7 JE |
1624 | return ops; |
1625 | } | |
b9e69e12 | 1626 | EXPORT_SYMBOL_GPL(xt_hook_ops_alloc); |
2b95efe7 | 1627 | |
76108cea | 1628 | int xt_proto_init(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1629 | { |
1630 | #ifdef CONFIG_PROC_FS | |
1631 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1632 | struct proc_dir_entry *proc; | |
f13f2aee PW |
1633 | kuid_t root_uid; |
1634 | kgid_t root_gid; | |
2e4e6a17 HW |
1635 | #endif |
1636 | ||
7e9c6eeb | 1637 | if (af >= ARRAY_SIZE(xt_prefix)) |
2e4e6a17 HW |
1638 | return -EINVAL; |
1639 | ||
1640 | ||
1641 | #ifdef CONFIG_PROC_FS | |
f13f2aee PW |
1642 | root_uid = make_kuid(net->user_ns, 0); |
1643 | root_gid = make_kgid(net->user_ns, 0); | |
1644 | ||
ce18afe5 | 1645 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1646 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
8b169240 DL |
1647 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, |
1648 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1649 | if (!proc) |
1650 | goto out; | |
f13f2aee PW |
1651 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1652 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1653 | |
ce18afe5 | 1654 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1655 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
8b169240 DL |
1656 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, |
1657 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1658 | if (!proc) |
1659 | goto out_remove_tables; | |
f13f2aee PW |
1660 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1661 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 | 1662 | |
ce18afe5 | 1663 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1664 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
8b169240 DL |
1665 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, |
1666 | (void *)(unsigned long)af); | |
2e4e6a17 HW |
1667 | if (!proc) |
1668 | goto out_remove_matches; | |
f13f2aee PW |
1669 | if (uid_valid(root_uid) && gid_valid(root_gid)) |
1670 | proc_set_user(proc, root_uid, root_gid); | |
2e4e6a17 HW |
1671 | #endif |
1672 | ||
1673 | return 0; | |
1674 | ||
1675 | #ifdef CONFIG_PROC_FS | |
1676 | out_remove_matches: | |
ce18afe5 | 1677 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1678 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1679 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1680 | |
1681 | out_remove_tables: | |
ce18afe5 | 1682 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1683 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1684 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1685 | out: |
1686 | return -1; | |
1687 | #endif | |
1688 | } | |
1689 | EXPORT_SYMBOL_GPL(xt_proto_init); | |
1690 | ||
76108cea | 1691 | void xt_proto_fini(struct net *net, u_int8_t af) |
2e4e6a17 HW |
1692 | { |
1693 | #ifdef CONFIG_PROC_FS | |
1694 | char buf[XT_FUNCTION_MAXNAMELEN]; | |
1695 | ||
ce18afe5 | 1696 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1697 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
ece31ffd | 1698 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1699 | |
ce18afe5 | 1700 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1701 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
ece31ffd | 1702 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 | 1703 | |
ce18afe5 | 1704 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
2e4e6a17 | 1705 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
ece31ffd | 1706 | remove_proc_entry(buf, net->proc_net); |
2e4e6a17 HW |
1707 | #endif /*CONFIG_PROC_FS*/ |
1708 | } | |
1709 | EXPORT_SYMBOL_GPL(xt_proto_fini); | |
1710 | ||
f28e15ba FW |
1711 | /** |
1712 | * xt_percpu_counter_alloc - allocate x_tables rule counter | |
1713 | * | |
ae0ac0ed | 1714 | * @state: pointer to xt_percpu allocation state |
f28e15ba FW |
1715 | * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct |
1716 | * | |
1717 | * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then | |
1718 | * contain the address of the real (percpu) counter. | |
1719 | * | |
1720 | * Rule evaluation needs to use xt_get_this_cpu_counter() helper | |
1721 | * to fetch the real percpu counter. | |
1722 | * | |
ae0ac0ed FW |
1723 | * To speed up allocation and improve data locality, a 4kb block is |
1724 | * allocated. | |
1725 | * | |
1726 | * xt_percpu_counter_alloc_state contains the base address of the | |
1727 | * allocated page and the current sub-offset. | |
1728 | * | |
f28e15ba FW |
1729 | * returns false on error. |
1730 | */ | |
ae0ac0ed FW |
1731 | bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state, |
1732 | struct xt_counters *counter) | |
f28e15ba | 1733 | { |
ae0ac0ed | 1734 | BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2)); |
f28e15ba FW |
1735 | |
1736 | if (nr_cpu_ids <= 1) | |
1737 | return true; | |
1738 | ||
ae0ac0ed FW |
1739 | if (!state->mem) { |
1740 | state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE, | |
1741 | XT_PCPU_BLOCK_SIZE); | |
1742 | if (!state->mem) | |
1743 | return false; | |
1744 | } | |
1745 | counter->pcnt = (__force unsigned long)(state->mem + state->off); | |
1746 | state->off += sizeof(*counter); | |
1747 | if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) { | |
1748 | state->mem = NULL; | |
1749 | state->off = 0; | |
1750 | } | |
f28e15ba FW |
1751 | return true; |
1752 | } | |
1753 | EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc); | |
1754 | ||
4d31eef5 FW |
1755 | void xt_percpu_counter_free(struct xt_counters *counters) |
1756 | { | |
1757 | unsigned long pcnt = counters->pcnt; | |
1758 | ||
ae0ac0ed | 1759 | if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0) |
4d31eef5 FW |
1760 | free_percpu((void __percpu *)pcnt); |
1761 | } | |
1762 | EXPORT_SYMBOL_GPL(xt_percpu_counter_free); | |
1763 | ||
8d870052 AD |
1764 | static int __net_init xt_net_init(struct net *net) |
1765 | { | |
1766 | int i; | |
1767 | ||
7e9c6eeb | 1768 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
8d870052 AD |
1769 | INIT_LIST_HEAD(&net->xt.tables[i]); |
1770 | return 0; | |
1771 | } | |
1772 | ||
1773 | static struct pernet_operations xt_net_ops = { | |
1774 | .init = xt_net_init, | |
1775 | }; | |
2e4e6a17 HW |
1776 | |
1777 | static int __init xt_init(void) | |
1778 | { | |
942e4a2b SH |
1779 | unsigned int i; |
1780 | int rv; | |
1781 | ||
1782 | for_each_possible_cpu(i) { | |
7f5c6d4f | 1783 | seqcount_init(&per_cpu(xt_recseq, i)); |
942e4a2b | 1784 | } |
2e4e6a17 | 1785 | |
7e9c6eeb | 1786 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); |
2e4e6a17 HW |
1787 | if (!xt) |
1788 | return -ENOMEM; | |
1789 | ||
7e9c6eeb | 1790 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { |
9e19bb6d | 1791 | mutex_init(&xt[i].mutex); |
2722971c DM |
1792 | #ifdef CONFIG_COMPAT |
1793 | mutex_init(&xt[i].compat_mutex); | |
255d0dc3 | 1794 | xt[i].compat_tab = NULL; |
2722971c | 1795 | #endif |
2e4e6a17 HW |
1796 | INIT_LIST_HEAD(&xt[i].target); |
1797 | INIT_LIST_HEAD(&xt[i].match); | |
2e4e6a17 | 1798 | } |
8d870052 AD |
1799 | rv = register_pernet_subsys(&xt_net_ops); |
1800 | if (rv < 0) | |
1801 | kfree(xt); | |
1802 | return rv; | |
2e4e6a17 HW |
1803 | } |
1804 | ||
1805 | static void __exit xt_fini(void) | |
1806 | { | |
8d870052 | 1807 | unregister_pernet_subsys(&xt_net_ops); |
2e4e6a17 HW |
1808 | kfree(xt); |
1809 | } | |
1810 | ||
1811 | module_init(xt_init); | |
1812 | module_exit(xt_fini); | |
1813 |