]> git.ipfire.org Git - thirdparty/linux.git/blob - net/netfilter/ipset/ip_set_core.c
gpu: host1x: Use SMMU on Tegra124 and Tegra210
[thirdparty/linux.git] / net / netfilter / ipset / ip_set_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3 * Patrick Schaaf <bof@bof.de>
4 * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
5 */
6
7 /* Kernel module for IP set management */
8
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/ip.h>
13 #include <linux/skbuff.h>
14 #include <linux/spinlock.h>
15 #include <linux/rculist.h>
16 #include <net/netlink.h>
17 #include <net/net_namespace.h>
18 #include <net/netns/generic.h>
19
20 #include <linux/netfilter.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter/nfnetlink.h>
23 #include <linux/netfilter/ipset/ip_set.h>
24
25 static LIST_HEAD(ip_set_type_list); /* all registered set types */
26 static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
27 static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
28
29 struct ip_set_net {
30 struct ip_set * __rcu *ip_set_list; /* all individual sets */
31 ip_set_id_t ip_set_max; /* max number of sets */
32 bool is_deleted; /* deleted by ip_set_net_exit */
33 bool is_destroyed; /* all sets are destroyed */
34 };
35
36 static unsigned int ip_set_net_id __read_mostly;
37
38 static struct ip_set_net *ip_set_pernet(struct net *net)
39 {
40 return net_generic(net, ip_set_net_id);
41 }
42
43 #define IP_SET_INC 64
44 #define STRNCMP(a, b) (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
45
46 static unsigned int max_sets;
47
48 module_param(max_sets, int, 0600);
49 MODULE_PARM_DESC(max_sets, "maximal number of sets");
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
52 MODULE_DESCRIPTION("core IP set support");
53 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
54
55 /* When the nfnl mutex or ip_set_ref_lock is held: */
56 #define ip_set_dereference(p) \
57 rcu_dereference_protected(p, \
58 lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
59 lockdep_is_held(&ip_set_ref_lock))
60 #define ip_set(inst, id) \
61 ip_set_dereference((inst)->ip_set_list)[id]
62 #define ip_set_ref_netlink(inst,id) \
63 rcu_dereference_raw((inst)->ip_set_list)[id]
64
65 /* The set types are implemented in modules and registered set types
66 * can be found in ip_set_type_list. Adding/deleting types is
67 * serialized by ip_set_type_mutex.
68 */
69
70 static void
71 ip_set_type_lock(void)
72 {
73 mutex_lock(&ip_set_type_mutex);
74 }
75
76 static void
77 ip_set_type_unlock(void)
78 {
79 mutex_unlock(&ip_set_type_mutex);
80 }
81
82 /* Register and deregister settype */
83
84 static struct ip_set_type *
85 find_set_type(const char *name, u8 family, u8 revision)
86 {
87 struct ip_set_type *type;
88
89 list_for_each_entry_rcu(type, &ip_set_type_list, list)
90 if (STRNCMP(type->name, name) &&
91 (type->family == family ||
92 type->family == NFPROTO_UNSPEC) &&
93 revision >= type->revision_min &&
94 revision <= type->revision_max)
95 return type;
96 return NULL;
97 }
98
99 /* Unlock, try to load a set type module and lock again */
100 static bool
101 load_settype(const char *name)
102 {
103 nfnl_unlock(NFNL_SUBSYS_IPSET);
104 pr_debug("try to load ip_set_%s\n", name);
105 if (request_module("ip_set_%s", name) < 0) {
106 pr_warn("Can't find ip_set type %s\n", name);
107 nfnl_lock(NFNL_SUBSYS_IPSET);
108 return false;
109 }
110 nfnl_lock(NFNL_SUBSYS_IPSET);
111 return true;
112 }
113
114 /* Find a set type and reference it */
115 #define find_set_type_get(name, family, revision, found) \
116 __find_set_type_get(name, family, revision, found, false)
117
118 static int
119 __find_set_type_get(const char *name, u8 family, u8 revision,
120 struct ip_set_type **found, bool retry)
121 {
122 struct ip_set_type *type;
123 int err;
124
125 if (retry && !load_settype(name))
126 return -IPSET_ERR_FIND_TYPE;
127
128 rcu_read_lock();
129 *found = find_set_type(name, family, revision);
130 if (*found) {
131 err = !try_module_get((*found)->me) ? -EFAULT : 0;
132 goto unlock;
133 }
134 /* Make sure the type is already loaded
135 * but we don't support the revision
136 */
137 list_for_each_entry_rcu(type, &ip_set_type_list, list)
138 if (STRNCMP(type->name, name)) {
139 err = -IPSET_ERR_FIND_TYPE;
140 goto unlock;
141 }
142 rcu_read_unlock();
143
144 return retry ? -IPSET_ERR_FIND_TYPE :
145 __find_set_type_get(name, family, revision, found, true);
146
147 unlock:
148 rcu_read_unlock();
149 return err;
150 }
151
152 /* Find a given set type by name and family.
153 * If we succeeded, the supported minimal and maximum revisions are
154 * filled out.
155 */
156 #define find_set_type_minmax(name, family, min, max) \
157 __find_set_type_minmax(name, family, min, max, false)
158
159 static int
160 __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
161 bool retry)
162 {
163 struct ip_set_type *type;
164 bool found = false;
165
166 if (retry && !load_settype(name))
167 return -IPSET_ERR_FIND_TYPE;
168
169 *min = 255; *max = 0;
170 rcu_read_lock();
171 list_for_each_entry_rcu(type, &ip_set_type_list, list)
172 if (STRNCMP(type->name, name) &&
173 (type->family == family ||
174 type->family == NFPROTO_UNSPEC)) {
175 found = true;
176 if (type->revision_min < *min)
177 *min = type->revision_min;
178 if (type->revision_max > *max)
179 *max = type->revision_max;
180 }
181 rcu_read_unlock();
182 if (found)
183 return 0;
184
185 return retry ? -IPSET_ERR_FIND_TYPE :
186 __find_set_type_minmax(name, family, min, max, true);
187 }
188
189 #define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \
190 (f) == NFPROTO_IPV6 ? "inet6" : "any")
191
192 /* Register a set type structure. The type is identified by
193 * the unique triple of name, family and revision.
194 */
195 int
196 ip_set_type_register(struct ip_set_type *type)
197 {
198 int ret = 0;
199
200 if (type->protocol != IPSET_PROTOCOL) {
201 pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n",
202 type->name, family_name(type->family),
203 type->revision_min, type->revision_max,
204 type->protocol, IPSET_PROTOCOL);
205 return -EINVAL;
206 }
207
208 ip_set_type_lock();
209 if (find_set_type(type->name, type->family, type->revision_min)) {
210 /* Duplicate! */
211 pr_warn("ip_set type %s, family %s with revision min %u already registered!\n",
212 type->name, family_name(type->family),
213 type->revision_min);
214 ip_set_type_unlock();
215 return -EINVAL;
216 }
217 list_add_rcu(&type->list, &ip_set_type_list);
218 pr_debug("type %s, family %s, revision %u:%u registered.\n",
219 type->name, family_name(type->family),
220 type->revision_min, type->revision_max);
221 ip_set_type_unlock();
222
223 return ret;
224 }
225 EXPORT_SYMBOL_GPL(ip_set_type_register);
226
227 /* Unregister a set type. There's a small race with ip_set_create */
228 void
229 ip_set_type_unregister(struct ip_set_type *type)
230 {
231 ip_set_type_lock();
232 if (!find_set_type(type->name, type->family, type->revision_min)) {
233 pr_warn("ip_set type %s, family %s with revision min %u not registered\n",
234 type->name, family_name(type->family),
235 type->revision_min);
236 ip_set_type_unlock();
237 return;
238 }
239 list_del_rcu(&type->list);
240 pr_debug("type %s, family %s with revision min %u unregistered.\n",
241 type->name, family_name(type->family), type->revision_min);
242 ip_set_type_unlock();
243
244 synchronize_rcu();
245 }
246 EXPORT_SYMBOL_GPL(ip_set_type_unregister);
247
248 /* Utility functions */
249 void *
250 ip_set_alloc(size_t size)
251 {
252 void *members = NULL;
253
254 if (size < KMALLOC_MAX_SIZE)
255 members = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
256
257 if (members) {
258 pr_debug("%p: allocated with kmalloc\n", members);
259 return members;
260 }
261
262 members = vzalloc(size);
263 if (!members)
264 return NULL;
265 pr_debug("%p: allocated with vmalloc\n", members);
266
267 return members;
268 }
269 EXPORT_SYMBOL_GPL(ip_set_alloc);
270
271 void
272 ip_set_free(void *members)
273 {
274 pr_debug("%p: free with %s\n", members,
275 is_vmalloc_addr(members) ? "vfree" : "kfree");
276 kvfree(members);
277 }
278 EXPORT_SYMBOL_GPL(ip_set_free);
279
280 static bool
281 flag_nested(const struct nlattr *nla)
282 {
283 return nla->nla_type & NLA_F_NESTED;
284 }
285
286 static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
287 [IPSET_ATTR_IPADDR_IPV4] = { .type = NLA_U32 },
288 [IPSET_ATTR_IPADDR_IPV6] = { .type = NLA_BINARY,
289 .len = sizeof(struct in6_addr) },
290 };
291
292 int
293 ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
294 {
295 struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
296
297 if (unlikely(!flag_nested(nla)))
298 return -IPSET_ERR_PROTOCOL;
299 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
300 ipaddr_policy, NULL))
301 return -IPSET_ERR_PROTOCOL;
302 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
303 return -IPSET_ERR_PROTOCOL;
304
305 *ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
306 return 0;
307 }
308 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
309
310 int
311 ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
312 {
313 struct nlattr *tb[IPSET_ATTR_IPADDR_MAX + 1];
314
315 if (unlikely(!flag_nested(nla)))
316 return -IPSET_ERR_PROTOCOL;
317
318 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
319 ipaddr_policy, NULL))
320 return -IPSET_ERR_PROTOCOL;
321 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
322 return -IPSET_ERR_PROTOCOL;
323
324 memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
325 sizeof(struct in6_addr));
326 return 0;
327 }
328 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
329
330 static u32
331 ip_set_timeout_get(const unsigned long *timeout)
332 {
333 u32 t;
334
335 if (*timeout == IPSET_ELEM_PERMANENT)
336 return 0;
337
338 t = jiffies_to_msecs(*timeout - jiffies) / MSEC_PER_SEC;
339 /* Zero value in userspace means no timeout */
340 return t == 0 ? 1 : t;
341 }
342
343 static char *
344 ip_set_comment_uget(struct nlattr *tb)
345 {
346 return nla_data(tb);
347 }
348
349 /* Called from uadd only, protected by the set spinlock.
350 * The kadt functions don't use the comment extensions in any way.
351 */
352 void
353 ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
354 const struct ip_set_ext *ext)
355 {
356 struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
357 size_t len = ext->comment ? strlen(ext->comment) : 0;
358
359 if (unlikely(c)) {
360 set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
361 kfree_rcu(c, rcu);
362 rcu_assign_pointer(comment->c, NULL);
363 }
364 if (!len)
365 return;
366 if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
367 len = IPSET_MAX_COMMENT_SIZE;
368 c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
369 if (unlikely(!c))
370 return;
371 strlcpy(c->str, ext->comment, len + 1);
372 set->ext_size += sizeof(*c) + strlen(c->str) + 1;
373 rcu_assign_pointer(comment->c, c);
374 }
375 EXPORT_SYMBOL_GPL(ip_set_init_comment);
376
377 /* Used only when dumping a set, protected by rcu_read_lock() */
378 static int
379 ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
380 {
381 struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
382
383 if (!c)
384 return 0;
385 return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
386 }
387
388 /* Called from uadd/udel, flush or the garbage collectors protected
389 * by the set spinlock.
390 * Called when the set is destroyed and when there can't be any user
391 * of the set data anymore.
392 */
393 static void
394 ip_set_comment_free(struct ip_set *set, void *ptr)
395 {
396 struct ip_set_comment *comment = ptr;
397 struct ip_set_comment_rcu *c;
398
399 c = rcu_dereference_protected(comment->c, 1);
400 if (unlikely(!c))
401 return;
402 set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
403 kfree_rcu(c, rcu);
404 rcu_assign_pointer(comment->c, NULL);
405 }
406
407 typedef void (*destroyer)(struct ip_set *, void *);
408 /* ipset data extension types, in size order */
409
410 const struct ip_set_ext_type ip_set_extensions[] = {
411 [IPSET_EXT_ID_COUNTER] = {
412 .type = IPSET_EXT_COUNTER,
413 .flag = IPSET_FLAG_WITH_COUNTERS,
414 .len = sizeof(struct ip_set_counter),
415 .align = __alignof__(struct ip_set_counter),
416 },
417 [IPSET_EXT_ID_TIMEOUT] = {
418 .type = IPSET_EXT_TIMEOUT,
419 .len = sizeof(unsigned long),
420 .align = __alignof__(unsigned long),
421 },
422 [IPSET_EXT_ID_SKBINFO] = {
423 .type = IPSET_EXT_SKBINFO,
424 .flag = IPSET_FLAG_WITH_SKBINFO,
425 .len = sizeof(struct ip_set_skbinfo),
426 .align = __alignof__(struct ip_set_skbinfo),
427 },
428 [IPSET_EXT_ID_COMMENT] = {
429 .type = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY,
430 .flag = IPSET_FLAG_WITH_COMMENT,
431 .len = sizeof(struct ip_set_comment),
432 .align = __alignof__(struct ip_set_comment),
433 .destroy = ip_set_comment_free,
434 },
435 };
436 EXPORT_SYMBOL_GPL(ip_set_extensions);
437
438 static bool
439 add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
440 {
441 return ip_set_extensions[id].flag ?
442 (flags & ip_set_extensions[id].flag) :
443 !!tb[IPSET_ATTR_TIMEOUT];
444 }
445
446 size_t
447 ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
448 size_t align)
449 {
450 enum ip_set_ext_id id;
451 u32 cadt_flags = 0;
452
453 if (tb[IPSET_ATTR_CADT_FLAGS])
454 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
455 if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
456 set->flags |= IPSET_CREATE_FLAG_FORCEADD;
457 if (!align)
458 align = 1;
459 for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
460 if (!add_extension(id, cadt_flags, tb))
461 continue;
462 len = ALIGN(len, ip_set_extensions[id].align);
463 set->offset[id] = len;
464 set->extensions |= ip_set_extensions[id].type;
465 len += ip_set_extensions[id].len;
466 }
467 return ALIGN(len, align);
468 }
469 EXPORT_SYMBOL_GPL(ip_set_elem_len);
470
471 int
472 ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
473 struct ip_set_ext *ext)
474 {
475 u64 fullmark;
476
477 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
478 !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
479 !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
480 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
481 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
482 !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
483 return -IPSET_ERR_PROTOCOL;
484
485 if (tb[IPSET_ATTR_TIMEOUT]) {
486 if (!SET_WITH_TIMEOUT(set))
487 return -IPSET_ERR_TIMEOUT;
488 ext->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
489 }
490 if (tb[IPSET_ATTR_BYTES] || tb[IPSET_ATTR_PACKETS]) {
491 if (!SET_WITH_COUNTER(set))
492 return -IPSET_ERR_COUNTER;
493 if (tb[IPSET_ATTR_BYTES])
494 ext->bytes = be64_to_cpu(nla_get_be64(
495 tb[IPSET_ATTR_BYTES]));
496 if (tb[IPSET_ATTR_PACKETS])
497 ext->packets = be64_to_cpu(nla_get_be64(
498 tb[IPSET_ATTR_PACKETS]));
499 }
500 if (tb[IPSET_ATTR_COMMENT]) {
501 if (!SET_WITH_COMMENT(set))
502 return -IPSET_ERR_COMMENT;
503 ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
504 }
505 if (tb[IPSET_ATTR_SKBMARK]) {
506 if (!SET_WITH_SKBINFO(set))
507 return -IPSET_ERR_SKBINFO;
508 fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK]));
509 ext->skbinfo.skbmark = fullmark >> 32;
510 ext->skbinfo.skbmarkmask = fullmark & 0xffffffff;
511 }
512 if (tb[IPSET_ATTR_SKBPRIO]) {
513 if (!SET_WITH_SKBINFO(set))
514 return -IPSET_ERR_SKBINFO;
515 ext->skbinfo.skbprio =
516 be32_to_cpu(nla_get_be32(tb[IPSET_ATTR_SKBPRIO]));
517 }
518 if (tb[IPSET_ATTR_SKBQUEUE]) {
519 if (!SET_WITH_SKBINFO(set))
520 return -IPSET_ERR_SKBINFO;
521 ext->skbinfo.skbqueue =
522 be16_to_cpu(nla_get_be16(tb[IPSET_ATTR_SKBQUEUE]));
523 }
524 return 0;
525 }
526 EXPORT_SYMBOL_GPL(ip_set_get_extensions);
527
528 static u64
529 ip_set_get_bytes(const struct ip_set_counter *counter)
530 {
531 return (u64)atomic64_read(&(counter)->bytes);
532 }
533
534 static u64
535 ip_set_get_packets(const struct ip_set_counter *counter)
536 {
537 return (u64)atomic64_read(&(counter)->packets);
538 }
539
540 static bool
541 ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
542 {
543 return nla_put_net64(skb, IPSET_ATTR_BYTES,
544 cpu_to_be64(ip_set_get_bytes(counter)),
545 IPSET_ATTR_PAD) ||
546 nla_put_net64(skb, IPSET_ATTR_PACKETS,
547 cpu_to_be64(ip_set_get_packets(counter)),
548 IPSET_ATTR_PAD);
549 }
550
551 static bool
552 ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
553 {
554 /* Send nonzero parameters only */
555 return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
556 nla_put_net64(skb, IPSET_ATTR_SKBMARK,
557 cpu_to_be64((u64)skbinfo->skbmark << 32 |
558 skbinfo->skbmarkmask),
559 IPSET_ATTR_PAD)) ||
560 (skbinfo->skbprio &&
561 nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
562 cpu_to_be32(skbinfo->skbprio))) ||
563 (skbinfo->skbqueue &&
564 nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
565 cpu_to_be16(skbinfo->skbqueue)));
566 }
567
568 int
569 ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
570 const void *e, bool active)
571 {
572 if (SET_WITH_TIMEOUT(set)) {
573 unsigned long *timeout = ext_timeout(e, set);
574
575 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
576 htonl(active ? ip_set_timeout_get(timeout)
577 : *timeout)))
578 return -EMSGSIZE;
579 }
580 if (SET_WITH_COUNTER(set) &&
581 ip_set_put_counter(skb, ext_counter(e, set)))
582 return -EMSGSIZE;
583 if (SET_WITH_COMMENT(set) &&
584 ip_set_put_comment(skb, ext_comment(e, set)))
585 return -EMSGSIZE;
586 if (SET_WITH_SKBINFO(set) &&
587 ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
588 return -EMSGSIZE;
589 return 0;
590 }
591 EXPORT_SYMBOL_GPL(ip_set_put_extensions);
592
593 static bool
594 ip_set_match_counter(u64 counter, u64 match, u8 op)
595 {
596 switch (op) {
597 case IPSET_COUNTER_NONE:
598 return true;
599 case IPSET_COUNTER_EQ:
600 return counter == match;
601 case IPSET_COUNTER_NE:
602 return counter != match;
603 case IPSET_COUNTER_LT:
604 return counter < match;
605 case IPSET_COUNTER_GT:
606 return counter > match;
607 }
608 return false;
609 }
610
611 static void
612 ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
613 {
614 atomic64_add((long long)bytes, &(counter)->bytes);
615 }
616
617 static void
618 ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
619 {
620 atomic64_add((long long)packets, &(counter)->packets);
621 }
622
623 static void
624 ip_set_update_counter(struct ip_set_counter *counter,
625 const struct ip_set_ext *ext, u32 flags)
626 {
627 if (ext->packets != ULLONG_MAX &&
628 !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
629 ip_set_add_bytes(ext->bytes, counter);
630 ip_set_add_packets(ext->packets, counter);
631 }
632 }
633
634 static void
635 ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
636 const struct ip_set_ext *ext,
637 struct ip_set_ext *mext, u32 flags)
638 {
639 mext->skbinfo = *skbinfo;
640 }
641
642 bool
643 ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
644 struct ip_set_ext *mext, u32 flags, void *data)
645 {
646 if (SET_WITH_TIMEOUT(set) &&
647 ip_set_timeout_expired(ext_timeout(data, set)))
648 return false;
649 if (SET_WITH_COUNTER(set)) {
650 struct ip_set_counter *counter = ext_counter(data, set);
651
652 if (flags & IPSET_FLAG_MATCH_COUNTERS &&
653 !(ip_set_match_counter(ip_set_get_packets(counter),
654 mext->packets, mext->packets_op) &&
655 ip_set_match_counter(ip_set_get_bytes(counter),
656 mext->bytes, mext->bytes_op)))
657 return false;
658 ip_set_update_counter(counter, ext, flags);
659 }
660 if (SET_WITH_SKBINFO(set))
661 ip_set_get_skbinfo(ext_skbinfo(data, set),
662 ext, mext, flags);
663 return true;
664 }
665 EXPORT_SYMBOL_GPL(ip_set_match_extensions);
666
667 /* Creating/destroying/renaming/swapping affect the existence and
668 * the properties of a set. All of these can be executed from userspace
669 * only and serialized by the nfnl mutex indirectly from nfnetlink.
670 *
671 * Sets are identified by their index in ip_set_list and the index
672 * is used by the external references (set/SET netfilter modules).
673 *
674 * The set behind an index may change by swapping only, from userspace.
675 */
676
677 static void
678 __ip_set_get(struct ip_set *set)
679 {
680 write_lock_bh(&ip_set_ref_lock);
681 set->ref++;
682 write_unlock_bh(&ip_set_ref_lock);
683 }
684
685 static void
686 __ip_set_put(struct ip_set *set)
687 {
688 write_lock_bh(&ip_set_ref_lock);
689 BUG_ON(set->ref == 0);
690 set->ref--;
691 write_unlock_bh(&ip_set_ref_lock);
692 }
693
694 /* set->ref can be swapped out by ip_set_swap, netlink events (like dump) need
695 * a separate reference counter
696 */
697 static void
698 __ip_set_put_netlink(struct ip_set *set)
699 {
700 write_lock_bh(&ip_set_ref_lock);
701 BUG_ON(set->ref_netlink == 0);
702 set->ref_netlink--;
703 write_unlock_bh(&ip_set_ref_lock);
704 }
705
706 /* Add, del and test set entries from kernel.
707 *
708 * The set behind the index must exist and must be referenced
709 * so it can't be destroyed (or changed) under our foot.
710 */
711
712 static struct ip_set *
713 ip_set_rcu_get(struct net *net, ip_set_id_t index)
714 {
715 struct ip_set *set;
716 struct ip_set_net *inst = ip_set_pernet(net);
717
718 rcu_read_lock();
719 /* ip_set_list itself needs to be protected */
720 set = rcu_dereference(inst->ip_set_list)[index];
721 rcu_read_unlock();
722
723 return set;
724 }
725
726 static inline void
727 ip_set_lock(struct ip_set *set)
728 {
729 if (!set->variant->region_lock)
730 spin_lock_bh(&set->lock);
731 }
732
733 static inline void
734 ip_set_unlock(struct ip_set *set)
735 {
736 if (!set->variant->region_lock)
737 spin_unlock_bh(&set->lock);
738 }
739
740 int
741 ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
742 const struct xt_action_param *par, struct ip_set_adt_opt *opt)
743 {
744 struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
745 int ret = 0;
746
747 BUG_ON(!set);
748 pr_debug("set %s, index %u\n", set->name, index);
749
750 if (opt->dim < set->type->dimension ||
751 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
752 return 0;
753
754 rcu_read_lock_bh();
755 ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt);
756 rcu_read_unlock_bh();
757
758 if (ret == -EAGAIN) {
759 /* Type requests element to be completed */
760 pr_debug("element must be completed, ADD is triggered\n");
761 ip_set_lock(set);
762 set->variant->kadt(set, skb, par, IPSET_ADD, opt);
763 ip_set_unlock(set);
764 ret = 1;
765 } else {
766 /* --return-nomatch: invert matched element */
767 if ((opt->cmdflags & IPSET_FLAG_RETURN_NOMATCH) &&
768 (set->type->features & IPSET_TYPE_NOMATCH) &&
769 (ret > 0 || ret == -ENOTEMPTY))
770 ret = -ret;
771 }
772
773 /* Convert error codes to nomatch */
774 return (ret < 0 ? 0 : ret);
775 }
776 EXPORT_SYMBOL_GPL(ip_set_test);
777
778 int
779 ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
780 const struct xt_action_param *par, struct ip_set_adt_opt *opt)
781 {
782 struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
783 int ret;
784
785 BUG_ON(!set);
786 pr_debug("set %s, index %u\n", set->name, index);
787
788 if (opt->dim < set->type->dimension ||
789 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
790 return -IPSET_ERR_TYPE_MISMATCH;
791
792 ip_set_lock(set);
793 ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
794 ip_set_unlock(set);
795
796 return ret;
797 }
798 EXPORT_SYMBOL_GPL(ip_set_add);
799
800 int
801 ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
802 const struct xt_action_param *par, struct ip_set_adt_opt *opt)
803 {
804 struct ip_set *set = ip_set_rcu_get(xt_net(par), index);
805 int ret = 0;
806
807 BUG_ON(!set);
808 pr_debug("set %s, index %u\n", set->name, index);
809
810 if (opt->dim < set->type->dimension ||
811 !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
812 return -IPSET_ERR_TYPE_MISMATCH;
813
814 ip_set_lock(set);
815 ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
816 ip_set_unlock(set);
817
818 return ret;
819 }
820 EXPORT_SYMBOL_GPL(ip_set_del);
821
822 /* Find set by name, reference it once. The reference makes sure the
823 * thing pointed to, does not go away under our feet.
824 *
825 */
826 ip_set_id_t
827 ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
828 {
829 ip_set_id_t i, index = IPSET_INVALID_ID;
830 struct ip_set *s;
831 struct ip_set_net *inst = ip_set_pernet(net);
832
833 rcu_read_lock();
834 for (i = 0; i < inst->ip_set_max; i++) {
835 s = rcu_dereference(inst->ip_set_list)[i];
836 if (s && STRNCMP(s->name, name)) {
837 __ip_set_get(s);
838 index = i;
839 *set = s;
840 break;
841 }
842 }
843 rcu_read_unlock();
844
845 return index;
846 }
847 EXPORT_SYMBOL_GPL(ip_set_get_byname);
848
849 /* If the given set pointer points to a valid set, decrement
850 * reference count by 1. The caller shall not assume the index
851 * to be valid, after calling this function.
852 *
853 */
854
855 static void
856 __ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index)
857 {
858 struct ip_set *set;
859
860 rcu_read_lock();
861 set = rcu_dereference(inst->ip_set_list)[index];
862 if (set)
863 __ip_set_put(set);
864 rcu_read_unlock();
865 }
866
867 void
868 ip_set_put_byindex(struct net *net, ip_set_id_t index)
869 {
870 struct ip_set_net *inst = ip_set_pernet(net);
871
872 __ip_set_put_byindex(inst, index);
873 }
874 EXPORT_SYMBOL_GPL(ip_set_put_byindex);
875
876 /* Get the name of a set behind a set index.
877 * Set itself is protected by RCU, but its name isn't: to protect against
878 * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
879 * name.
880 */
881 void
882 ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
883 {
884 struct ip_set *set = ip_set_rcu_get(net, index);
885
886 BUG_ON(!set);
887
888 read_lock_bh(&ip_set_ref_lock);
889 strncpy(name, set->name, IPSET_MAXNAMELEN);
890 read_unlock_bh(&ip_set_ref_lock);
891 }
892 EXPORT_SYMBOL_GPL(ip_set_name_byindex);
893
894 /* Routines to call by external subsystems, which do not
895 * call nfnl_lock for us.
896 */
897
898 /* Find set by index, reference it once. The reference makes sure the
899 * thing pointed to, does not go away under our feet.
900 *
901 * The nfnl mutex is used in the function.
902 */
903 ip_set_id_t
904 ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
905 {
906 struct ip_set *set;
907 struct ip_set_net *inst = ip_set_pernet(net);
908
909 if (index >= inst->ip_set_max)
910 return IPSET_INVALID_ID;
911
912 nfnl_lock(NFNL_SUBSYS_IPSET);
913 set = ip_set(inst, index);
914 if (set)
915 __ip_set_get(set);
916 else
917 index = IPSET_INVALID_ID;
918 nfnl_unlock(NFNL_SUBSYS_IPSET);
919
920 return index;
921 }
922 EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
923
924 /* If the given set pointer points to a valid set, decrement
925 * reference count by 1. The caller shall not assume the index
926 * to be valid, after calling this function.
927 *
928 * The nfnl mutex is used in the function.
929 */
930 void
931 ip_set_nfnl_put(struct net *net, ip_set_id_t index)
932 {
933 struct ip_set *set;
934 struct ip_set_net *inst = ip_set_pernet(net);
935
936 nfnl_lock(NFNL_SUBSYS_IPSET);
937 if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
938 set = ip_set(inst, index);
939 if (set)
940 __ip_set_put(set);
941 }
942 nfnl_unlock(NFNL_SUBSYS_IPSET);
943 }
944 EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
945
946 /* Communication protocol with userspace over netlink.
947 *
948 * The commands are serialized by the nfnl mutex.
949 */
950
951 static inline u8 protocol(const struct nlattr * const tb[])
952 {
953 return nla_get_u8(tb[IPSET_ATTR_PROTOCOL]);
954 }
955
956 static inline bool
957 protocol_failed(const struct nlattr * const tb[])
958 {
959 return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) != IPSET_PROTOCOL;
960 }
961
962 static inline bool
963 protocol_min_failed(const struct nlattr * const tb[])
964 {
965 return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) < IPSET_PROTOCOL_MIN;
966 }
967
968 static inline u32
969 flag_exist(const struct nlmsghdr *nlh)
970 {
971 return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST;
972 }
973
974 static struct nlmsghdr *
975 start_msg(struct sk_buff *skb, u32 portid, u32 seq, unsigned int flags,
976 enum ipset_cmd cmd)
977 {
978 struct nlmsghdr *nlh;
979 struct nfgenmsg *nfmsg;
980
981 nlh = nlmsg_put(skb, portid, seq, nfnl_msg_type(NFNL_SUBSYS_IPSET, cmd),
982 sizeof(*nfmsg), flags);
983 if (!nlh)
984 return NULL;
985
986 nfmsg = nlmsg_data(nlh);
987 nfmsg->nfgen_family = NFPROTO_IPV4;
988 nfmsg->version = NFNETLINK_V0;
989 nfmsg->res_id = 0;
990
991 return nlh;
992 }
993
994 /* Create a set */
995
996 static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
997 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
998 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
999 .len = IPSET_MAXNAMELEN - 1 },
1000 [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
1001 .len = IPSET_MAXNAMELEN - 1},
1002 [IPSET_ATTR_REVISION] = { .type = NLA_U8 },
1003 [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
1004 [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
1005 };
1006
1007 static struct ip_set *
1008 find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
1009 {
1010 struct ip_set *set = NULL;
1011 ip_set_id_t i;
1012
1013 *id = IPSET_INVALID_ID;
1014 for (i = 0; i < inst->ip_set_max; i++) {
1015 set = ip_set(inst, i);
1016 if (set && STRNCMP(set->name, name)) {
1017 *id = i;
1018 break;
1019 }
1020 }
1021 return (*id == IPSET_INVALID_ID ? NULL : set);
1022 }
1023
1024 static inline struct ip_set *
1025 find_set(struct ip_set_net *inst, const char *name)
1026 {
1027 ip_set_id_t id;
1028
1029 return find_set_and_id(inst, name, &id);
1030 }
1031
1032 static int
1033 find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
1034 struct ip_set **set)
1035 {
1036 struct ip_set *s;
1037 ip_set_id_t i;
1038
1039 *index = IPSET_INVALID_ID;
1040 for (i = 0; i < inst->ip_set_max; i++) {
1041 s = ip_set(inst, i);
1042 if (!s) {
1043 if (*index == IPSET_INVALID_ID)
1044 *index = i;
1045 } else if (STRNCMP(name, s->name)) {
1046 /* Name clash */
1047 *set = s;
1048 return -EEXIST;
1049 }
1050 }
1051 if (*index == IPSET_INVALID_ID)
1052 /* No free slot remained */
1053 return -IPSET_ERR_MAX_SETS;
1054 return 0;
1055 }
1056
1057 static int ip_set_none(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1058 const struct nlmsghdr *nlh,
1059 const struct nlattr * const attr[],
1060 struct netlink_ext_ack *extack)
1061 {
1062 return -EOPNOTSUPP;
1063 }
1064
1065 static int ip_set_create(struct net *net, struct sock *ctnl,
1066 struct sk_buff *skb, const struct nlmsghdr *nlh,
1067 const struct nlattr * const attr[],
1068 struct netlink_ext_ack *extack)
1069 {
1070 struct ip_set_net *inst = ip_set_pernet(net);
1071 struct ip_set *set, *clash = NULL;
1072 ip_set_id_t index = IPSET_INVALID_ID;
1073 struct nlattr *tb[IPSET_ATTR_CREATE_MAX + 1] = {};
1074 const char *name, *typename;
1075 u8 family, revision;
1076 u32 flags = flag_exist(nlh);
1077 int ret = 0;
1078
1079 if (unlikely(protocol_min_failed(attr) ||
1080 !attr[IPSET_ATTR_SETNAME] ||
1081 !attr[IPSET_ATTR_TYPENAME] ||
1082 !attr[IPSET_ATTR_REVISION] ||
1083 !attr[IPSET_ATTR_FAMILY] ||
1084 (attr[IPSET_ATTR_DATA] &&
1085 !flag_nested(attr[IPSET_ATTR_DATA]))))
1086 return -IPSET_ERR_PROTOCOL;
1087
1088 name = nla_data(attr[IPSET_ATTR_SETNAME]);
1089 typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
1090 family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
1091 revision = nla_get_u8(attr[IPSET_ATTR_REVISION]);
1092 pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
1093 name, typename, family_name(family), revision);
1094
1095 /* First, and without any locks, allocate and initialize
1096 * a normal base set structure.
1097 */
1098 set = kzalloc(sizeof(*set), GFP_KERNEL);
1099 if (!set)
1100 return -ENOMEM;
1101 spin_lock_init(&set->lock);
1102 strlcpy(set->name, name, IPSET_MAXNAMELEN);
1103 set->family = family;
1104 set->revision = revision;
1105
1106 /* Next, check that we know the type, and take
1107 * a reference on the type, to make sure it stays available
1108 * while constructing our new set.
1109 *
1110 * After referencing the type, we try to create the type
1111 * specific part of the set without holding any locks.
1112 */
1113 ret = find_set_type_get(typename, family, revision, &set->type);
1114 if (ret)
1115 goto out;
1116
1117 /* Without holding any locks, create private part. */
1118 if (attr[IPSET_ATTR_DATA] &&
1119 nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
1120 set->type->create_policy, NULL)) {
1121 ret = -IPSET_ERR_PROTOCOL;
1122 goto put_out;
1123 }
1124
1125 ret = set->type->create(net, set, tb, flags);
1126 if (ret != 0)
1127 goto put_out;
1128
1129 /* BTW, ret==0 here. */
1130
1131 /* Here, we have a valid, constructed set and we are protected
1132 * by the nfnl mutex. Find the first free index in ip_set_list
1133 * and check clashing.
1134 */
1135 ret = find_free_id(inst, set->name, &index, &clash);
1136 if (ret == -EEXIST) {
1137 /* If this is the same set and requested, ignore error */
1138 if ((flags & IPSET_FLAG_EXIST) &&
1139 STRNCMP(set->type->name, clash->type->name) &&
1140 set->type->family == clash->type->family &&
1141 set->type->revision_min == clash->type->revision_min &&
1142 set->type->revision_max == clash->type->revision_max &&
1143 set->variant->same_set(set, clash))
1144 ret = 0;
1145 goto cleanup;
1146 } else if (ret == -IPSET_ERR_MAX_SETS) {
1147 struct ip_set **list, **tmp;
1148 ip_set_id_t i = inst->ip_set_max + IP_SET_INC;
1149
1150 if (i < inst->ip_set_max || i == IPSET_INVALID_ID)
1151 /* Wraparound */
1152 goto cleanup;
1153
1154 list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
1155 if (!list)
1156 goto cleanup;
1157 /* nfnl mutex is held, both lists are valid */
1158 tmp = ip_set_dereference(inst->ip_set_list);
1159 memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
1160 rcu_assign_pointer(inst->ip_set_list, list);
1161 /* Make sure all current packets have passed through */
1162 synchronize_net();
1163 /* Use new list */
1164 index = inst->ip_set_max;
1165 inst->ip_set_max = i;
1166 kvfree(tmp);
1167 ret = 0;
1168 } else if (ret) {
1169 goto cleanup;
1170 }
1171
1172 /* Finally! Add our shiny new set to the list, and be done. */
1173 pr_debug("create: '%s' created with index %u!\n", set->name, index);
1174 ip_set(inst, index) = set;
1175
1176 return ret;
1177
1178 cleanup:
1179 set->variant->destroy(set);
1180 put_out:
1181 module_put(set->type->me);
1182 out:
1183 kfree(set);
1184 return ret;
1185 }
1186
1187 /* Destroy sets */
1188
1189 static const struct nla_policy
1190 ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
1191 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1192 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1193 .len = IPSET_MAXNAMELEN - 1 },
1194 };
1195
1196 static void
1197 ip_set_destroy_set(struct ip_set *set)
1198 {
1199 pr_debug("set: %s\n", set->name);
1200
1201 /* Must call it without holding any lock */
1202 set->variant->destroy(set);
1203 module_put(set->type->me);
1204 kfree(set);
1205 }
1206
1207 static int ip_set_destroy(struct net *net, struct sock *ctnl,
1208 struct sk_buff *skb, const struct nlmsghdr *nlh,
1209 const struct nlattr * const attr[],
1210 struct netlink_ext_ack *extack)
1211 {
1212 struct ip_set_net *inst = ip_set_pernet(net);
1213 struct ip_set *s;
1214 ip_set_id_t i;
1215 int ret = 0;
1216
1217 if (unlikely(protocol_min_failed(attr)))
1218 return -IPSET_ERR_PROTOCOL;
1219
1220 /* Must wait for flush to be really finished in list:set */
1221 rcu_barrier();
1222
1223 /* Commands are serialized and references are
1224 * protected by the ip_set_ref_lock.
1225 * External systems (i.e. xt_set) must call
1226 * ip_set_put|get_nfnl_* functions, that way we
1227 * can safely check references here.
1228 *
1229 * list:set timer can only decrement the reference
1230 * counter, so if it's already zero, we can proceed
1231 * without holding the lock.
1232 */
1233 read_lock_bh(&ip_set_ref_lock);
1234 if (!attr[IPSET_ATTR_SETNAME]) {
1235 for (i = 0; i < inst->ip_set_max; i++) {
1236 s = ip_set(inst, i);
1237 if (s && (s->ref || s->ref_netlink)) {
1238 ret = -IPSET_ERR_BUSY;
1239 goto out;
1240 }
1241 }
1242 inst->is_destroyed = true;
1243 read_unlock_bh(&ip_set_ref_lock);
1244 for (i = 0; i < inst->ip_set_max; i++) {
1245 s = ip_set(inst, i);
1246 if (s) {
1247 ip_set(inst, i) = NULL;
1248 ip_set_destroy_set(s);
1249 }
1250 }
1251 /* Modified by ip_set_destroy() only, which is serialized */
1252 inst->is_destroyed = false;
1253 } else {
1254 s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
1255 &i);
1256 if (!s) {
1257 ret = -ENOENT;
1258 goto out;
1259 } else if (s->ref || s->ref_netlink) {
1260 ret = -IPSET_ERR_BUSY;
1261 goto out;
1262 }
1263 ip_set(inst, i) = NULL;
1264 read_unlock_bh(&ip_set_ref_lock);
1265
1266 ip_set_destroy_set(s);
1267 }
1268 return 0;
1269 out:
1270 read_unlock_bh(&ip_set_ref_lock);
1271 return ret;
1272 }
1273
1274 /* Flush sets */
1275
1276 static void
1277 ip_set_flush_set(struct ip_set *set)
1278 {
1279 pr_debug("set: %s\n", set->name);
1280
1281 ip_set_lock(set);
1282 set->variant->flush(set);
1283 ip_set_unlock(set);
1284 }
1285
1286 static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1287 const struct nlmsghdr *nlh,
1288 const struct nlattr * const attr[],
1289 struct netlink_ext_ack *extack)
1290 {
1291 struct ip_set_net *inst = ip_set_pernet(net);
1292 struct ip_set *s;
1293 ip_set_id_t i;
1294
1295 if (unlikely(protocol_min_failed(attr)))
1296 return -IPSET_ERR_PROTOCOL;
1297
1298 if (!attr[IPSET_ATTR_SETNAME]) {
1299 for (i = 0; i < inst->ip_set_max; i++) {
1300 s = ip_set(inst, i);
1301 if (s)
1302 ip_set_flush_set(s);
1303 }
1304 } else {
1305 s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1306 if (!s)
1307 return -ENOENT;
1308
1309 ip_set_flush_set(s);
1310 }
1311
1312 return 0;
1313 }
1314
1315 /* Rename a set */
1316
1317 static const struct nla_policy
1318 ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
1319 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1320 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1321 .len = IPSET_MAXNAMELEN - 1 },
1322 [IPSET_ATTR_SETNAME2] = { .type = NLA_NUL_STRING,
1323 .len = IPSET_MAXNAMELEN - 1 },
1324 };
1325
1326 static int ip_set_rename(struct net *net, struct sock *ctnl,
1327 struct sk_buff *skb, const struct nlmsghdr *nlh,
1328 const struct nlattr * const attr[],
1329 struct netlink_ext_ack *extack)
1330 {
1331 struct ip_set_net *inst = ip_set_pernet(net);
1332 struct ip_set *set, *s;
1333 const char *name2;
1334 ip_set_id_t i;
1335 int ret = 0;
1336
1337 if (unlikely(protocol_min_failed(attr) ||
1338 !attr[IPSET_ATTR_SETNAME] ||
1339 !attr[IPSET_ATTR_SETNAME2]))
1340 return -IPSET_ERR_PROTOCOL;
1341
1342 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1343 if (!set)
1344 return -ENOENT;
1345
1346 write_lock_bh(&ip_set_ref_lock);
1347 if (set->ref != 0 || set->ref_netlink != 0) {
1348 ret = -IPSET_ERR_REFERENCED;
1349 goto out;
1350 }
1351
1352 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
1353 for (i = 0; i < inst->ip_set_max; i++) {
1354 s = ip_set(inst, i);
1355 if (s && STRNCMP(s->name, name2)) {
1356 ret = -IPSET_ERR_EXIST_SETNAME2;
1357 goto out;
1358 }
1359 }
1360 strncpy(set->name, name2, IPSET_MAXNAMELEN);
1361
1362 out:
1363 write_unlock_bh(&ip_set_ref_lock);
1364 return ret;
1365 }
1366
1367 /* Swap two sets so that name/index points to the other.
1368 * References and set names are also swapped.
1369 *
1370 * The commands are serialized by the nfnl mutex and references are
1371 * protected by the ip_set_ref_lock. The kernel interfaces
1372 * do not hold the mutex but the pointer settings are atomic
1373 * so the ip_set_list always contains valid pointers to the sets.
1374 */
1375
1376 static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1377 const struct nlmsghdr *nlh,
1378 const struct nlattr * const attr[],
1379 struct netlink_ext_ack *extack)
1380 {
1381 struct ip_set_net *inst = ip_set_pernet(net);
1382 struct ip_set *from, *to;
1383 ip_set_id_t from_id, to_id;
1384 char from_name[IPSET_MAXNAMELEN];
1385
1386 if (unlikely(protocol_min_failed(attr) ||
1387 !attr[IPSET_ATTR_SETNAME] ||
1388 !attr[IPSET_ATTR_SETNAME2]))
1389 return -IPSET_ERR_PROTOCOL;
1390
1391 from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
1392 &from_id);
1393 if (!from)
1394 return -ENOENT;
1395
1396 to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
1397 &to_id);
1398 if (!to)
1399 return -IPSET_ERR_EXIST_SETNAME2;
1400
1401 /* Features must not change.
1402 * Not an artifical restriction anymore, as we must prevent
1403 * possible loops created by swapping in setlist type of sets.
1404 */
1405 if (!(from->type->features == to->type->features &&
1406 from->family == to->family))
1407 return -IPSET_ERR_TYPE_MISMATCH;
1408
1409 write_lock_bh(&ip_set_ref_lock);
1410
1411 if (from->ref_netlink || to->ref_netlink) {
1412 write_unlock_bh(&ip_set_ref_lock);
1413 return -EBUSY;
1414 }
1415
1416 strncpy(from_name, from->name, IPSET_MAXNAMELEN);
1417 strncpy(from->name, to->name, IPSET_MAXNAMELEN);
1418 strncpy(to->name, from_name, IPSET_MAXNAMELEN);
1419
1420 swap(from->ref, to->ref);
1421 ip_set(inst, from_id) = to;
1422 ip_set(inst, to_id) = from;
1423 write_unlock_bh(&ip_set_ref_lock);
1424
1425 return 0;
1426 }
1427
1428 /* List/save set data */
1429
1430 #define DUMP_INIT 0
1431 #define DUMP_ALL 1
1432 #define DUMP_ONE 2
1433 #define DUMP_LAST 3
1434
1435 #define DUMP_TYPE(arg) (((u32)(arg)) & 0x0000FFFF)
1436 #define DUMP_FLAGS(arg) (((u32)(arg)) >> 16)
1437
1438 int
1439 ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
1440 {
1441 u32 cadt_flags = 0;
1442
1443 if (SET_WITH_TIMEOUT(set))
1444 if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
1445 htonl(set->timeout))))
1446 return -EMSGSIZE;
1447 if (SET_WITH_COUNTER(set))
1448 cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
1449 if (SET_WITH_COMMENT(set))
1450 cadt_flags |= IPSET_FLAG_WITH_COMMENT;
1451 if (SET_WITH_SKBINFO(set))
1452 cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
1453 if (SET_WITH_FORCEADD(set))
1454 cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
1455
1456 if (!cadt_flags)
1457 return 0;
1458 return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
1459 }
1460 EXPORT_SYMBOL_GPL(ip_set_put_flags);
1461
1462 static int
1463 ip_set_dump_done(struct netlink_callback *cb)
1464 {
1465 if (cb->args[IPSET_CB_ARG0]) {
1466 struct ip_set_net *inst =
1467 (struct ip_set_net *)cb->args[IPSET_CB_NET];
1468 ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
1469 struct ip_set *set = ip_set_ref_netlink(inst, index);
1470
1471 if (set->variant->uref)
1472 set->variant->uref(set, cb, false);
1473 pr_debug("release set %s\n", set->name);
1474 __ip_set_put_netlink(set);
1475 }
1476 return 0;
1477 }
1478
1479 static inline void
1480 dump_attrs(struct nlmsghdr *nlh)
1481 {
1482 const struct nlattr *attr;
1483 int rem;
1484
1485 pr_debug("dump nlmsg\n");
1486 nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
1487 pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len);
1488 }
1489 }
1490
1491 static const struct nla_policy
1492 ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
1493 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1494 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1495 .len = IPSET_MAXNAMELEN - 1 },
1496 [IPSET_ATTR_FLAGS] = { .type = NLA_U32 },
1497 };
1498
1499 static int
1500 ip_set_dump_start(struct netlink_callback *cb)
1501 {
1502 struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
1503 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
1504 struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
1505 struct nlattr *attr = (void *)nlh + min_len;
1506 struct sk_buff *skb = cb->skb;
1507 struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
1508 u32 dump_type;
1509 int ret;
1510
1511 ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
1512 nlh->nlmsg_len - min_len,
1513 ip_set_dump_policy, NULL);
1514 if (ret)
1515 goto error;
1516
1517 cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
1518 if (cda[IPSET_ATTR_SETNAME]) {
1519 ip_set_id_t index;
1520 struct ip_set *set;
1521
1522 set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
1523 &index);
1524 if (!set) {
1525 ret = -ENOENT;
1526 goto error;
1527 }
1528 dump_type = DUMP_ONE;
1529 cb->args[IPSET_CB_INDEX] = index;
1530 } else {
1531 dump_type = DUMP_ALL;
1532 }
1533
1534 if (cda[IPSET_ATTR_FLAGS]) {
1535 u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]);
1536
1537 dump_type |= (f << 16);
1538 }
1539 cb->args[IPSET_CB_NET] = (unsigned long)inst;
1540 cb->args[IPSET_CB_DUMP] = dump_type;
1541
1542 return 0;
1543
1544 error:
1545 /* We have to create and send the error message manually :-( */
1546 if (nlh->nlmsg_flags & NLM_F_ACK) {
1547 netlink_ack(cb->skb, nlh, ret, NULL);
1548 }
1549 return ret;
1550 }
1551
1552 static int
1553 ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb)
1554 {
1555 ip_set_id_t index = IPSET_INVALID_ID, max;
1556 struct ip_set *set = NULL;
1557 struct nlmsghdr *nlh = NULL;
1558 unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
1559 struct ip_set_net *inst = ip_set_pernet(sock_net(skb->sk));
1560 u32 dump_type, dump_flags;
1561 bool is_destroyed;
1562 int ret = 0;
1563
1564 if (!cb->args[IPSET_CB_DUMP])
1565 return -EINVAL;
1566
1567 if (cb->args[IPSET_CB_INDEX] >= inst->ip_set_max)
1568 goto out;
1569
1570 dump_type = DUMP_TYPE(cb->args[IPSET_CB_DUMP]);
1571 dump_flags = DUMP_FLAGS(cb->args[IPSET_CB_DUMP]);
1572 max = dump_type == DUMP_ONE ? cb->args[IPSET_CB_INDEX] + 1
1573 : inst->ip_set_max;
1574 dump_last:
1575 pr_debug("dump type, flag: %u %u index: %ld\n",
1576 dump_type, dump_flags, cb->args[IPSET_CB_INDEX]);
1577 for (; cb->args[IPSET_CB_INDEX] < max; cb->args[IPSET_CB_INDEX]++) {
1578 index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
1579 write_lock_bh(&ip_set_ref_lock);
1580 set = ip_set(inst, index);
1581 is_destroyed = inst->is_destroyed;
1582 if (!set || is_destroyed) {
1583 write_unlock_bh(&ip_set_ref_lock);
1584 if (dump_type == DUMP_ONE) {
1585 ret = -ENOENT;
1586 goto out;
1587 }
1588 if (is_destroyed) {
1589 /* All sets are just being destroyed */
1590 ret = 0;
1591 goto out;
1592 }
1593 continue;
1594 }
1595 /* When dumping all sets, we must dump "sorted"
1596 * so that lists (unions of sets) are dumped last.
1597 */
1598 if (dump_type != DUMP_ONE &&
1599 ((dump_type == DUMP_ALL) ==
1600 !!(set->type->features & IPSET_DUMP_LAST))) {
1601 write_unlock_bh(&ip_set_ref_lock);
1602 continue;
1603 }
1604 pr_debug("List set: %s\n", set->name);
1605 if (!cb->args[IPSET_CB_ARG0]) {
1606 /* Start listing: make sure set won't be destroyed */
1607 pr_debug("reference set\n");
1608 set->ref_netlink++;
1609 }
1610 write_unlock_bh(&ip_set_ref_lock);
1611 nlh = start_msg(skb, NETLINK_CB(cb->skb).portid,
1612 cb->nlh->nlmsg_seq, flags,
1613 IPSET_CMD_LIST);
1614 if (!nlh) {
1615 ret = -EMSGSIZE;
1616 goto release_refcount;
1617 }
1618 if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL,
1619 cb->args[IPSET_CB_PROTO]) ||
1620 nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
1621 goto nla_put_failure;
1622 if (dump_flags & IPSET_FLAG_LIST_SETNAME)
1623 goto next_set;
1624 switch (cb->args[IPSET_CB_ARG0]) {
1625 case 0:
1626 /* Core header data */
1627 if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
1628 set->type->name) ||
1629 nla_put_u8(skb, IPSET_ATTR_FAMILY,
1630 set->family) ||
1631 nla_put_u8(skb, IPSET_ATTR_REVISION,
1632 set->revision))
1633 goto nla_put_failure;
1634 if (cb->args[IPSET_CB_PROTO] > IPSET_PROTOCOL_MIN &&
1635 nla_put_net16(skb, IPSET_ATTR_INDEX, htons(index)))
1636 goto nla_put_failure;
1637 ret = set->variant->head(set, skb);
1638 if (ret < 0)
1639 goto release_refcount;
1640 if (dump_flags & IPSET_FLAG_LIST_HEADER)
1641 goto next_set;
1642 if (set->variant->uref)
1643 set->variant->uref(set, cb, true);
1644 /* fall through */
1645 default:
1646 ret = set->variant->list(set, skb, cb);
1647 if (!cb->args[IPSET_CB_ARG0])
1648 /* Set is done, proceed with next one */
1649 goto next_set;
1650 goto release_refcount;
1651 }
1652 }
1653 /* If we dump all sets, continue with dumping last ones */
1654 if (dump_type == DUMP_ALL) {
1655 dump_type = DUMP_LAST;
1656 cb->args[IPSET_CB_DUMP] = dump_type | (dump_flags << 16);
1657 cb->args[IPSET_CB_INDEX] = 0;
1658 if (set && set->variant->uref)
1659 set->variant->uref(set, cb, false);
1660 goto dump_last;
1661 }
1662 goto out;
1663
1664 nla_put_failure:
1665 ret = -EFAULT;
1666 next_set:
1667 if (dump_type == DUMP_ONE)
1668 cb->args[IPSET_CB_INDEX] = IPSET_INVALID_ID;
1669 else
1670 cb->args[IPSET_CB_INDEX]++;
1671 release_refcount:
1672 /* If there was an error or set is done, release set */
1673 if (ret || !cb->args[IPSET_CB_ARG0]) {
1674 set = ip_set_ref_netlink(inst, index);
1675 if (set->variant->uref)
1676 set->variant->uref(set, cb, false);
1677 pr_debug("release set %s\n", set->name);
1678 __ip_set_put_netlink(set);
1679 cb->args[IPSET_CB_ARG0] = 0;
1680 }
1681 out:
1682 if (nlh) {
1683 nlmsg_end(skb, nlh);
1684 pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len);
1685 dump_attrs(nlh);
1686 }
1687
1688 return ret < 0 ? ret : skb->len;
1689 }
1690
1691 static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1692 const struct nlmsghdr *nlh,
1693 const struct nlattr * const attr[],
1694 struct netlink_ext_ack *extack)
1695 {
1696 if (unlikely(protocol_min_failed(attr)))
1697 return -IPSET_ERR_PROTOCOL;
1698
1699 {
1700 struct netlink_dump_control c = {
1701 .start = ip_set_dump_start,
1702 .dump = ip_set_dump_do,
1703 .done = ip_set_dump_done,
1704 };
1705 return netlink_dump_start(ctnl, skb, nlh, &c);
1706 }
1707 }
1708
1709 /* Add, del and test */
1710
1711 static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
1712 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1713 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1714 .len = IPSET_MAXNAMELEN - 1 },
1715 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
1716 [IPSET_ATTR_DATA] = { .type = NLA_NESTED },
1717 [IPSET_ATTR_ADT] = { .type = NLA_NESTED },
1718 };
1719
1720 static int
1721 call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
1722 struct nlattr *tb[], enum ipset_adt adt,
1723 u32 flags, bool use_lineno)
1724 {
1725 int ret;
1726 u32 lineno = 0;
1727 bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
1728
1729 do {
1730 ip_set_lock(set);
1731 ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
1732 ip_set_unlock(set);
1733 retried = true;
1734 } while (ret == -EAGAIN &&
1735 set->variant->resize &&
1736 (ret = set->variant->resize(set, retried)) == 0);
1737
1738 if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
1739 return 0;
1740 if (lineno && use_lineno) {
1741 /* Error in restore/batch mode: send back lineno */
1742 struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
1743 struct sk_buff *skb2;
1744 struct nlmsgerr *errmsg;
1745 size_t payload = min(SIZE_MAX,
1746 sizeof(*errmsg) + nlmsg_len(nlh));
1747 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
1748 struct nlattr *cda[IPSET_ATTR_CMD_MAX + 1];
1749 struct nlattr *cmdattr;
1750 u32 *errline;
1751
1752 skb2 = nlmsg_new(payload, GFP_KERNEL);
1753 if (!skb2)
1754 return -ENOMEM;
1755 rep = __nlmsg_put(skb2, NETLINK_CB(skb).portid,
1756 nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
1757 errmsg = nlmsg_data(rep);
1758 errmsg->error = ret;
1759 memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
1760 cmdattr = (void *)&errmsg->msg + min_len;
1761
1762 ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr,
1763 nlh->nlmsg_len - min_len, ip_set_adt_policy,
1764 NULL);
1765
1766 if (ret) {
1767 nlmsg_free(skb2);
1768 return ret;
1769 }
1770 errline = nla_data(cda[IPSET_ATTR_LINENO]);
1771
1772 *errline = lineno;
1773
1774 netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid,
1775 MSG_DONTWAIT);
1776 /* Signal netlink not to send its ACK/errmsg. */
1777 return -EINTR;
1778 }
1779
1780 return ret;
1781 }
1782
1783 static int ip_set_ad(struct net *net, struct sock *ctnl,
1784 struct sk_buff *skb,
1785 enum ipset_adt adt,
1786 const struct nlmsghdr *nlh,
1787 const struct nlattr * const attr[],
1788 struct netlink_ext_ack *extack)
1789 {
1790 struct ip_set_net *inst = ip_set_pernet(net);
1791 struct ip_set *set;
1792 struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
1793 const struct nlattr *nla;
1794 u32 flags = flag_exist(nlh);
1795 bool use_lineno;
1796 int ret = 0;
1797
1798 if (unlikely(protocol_min_failed(attr) ||
1799 !attr[IPSET_ATTR_SETNAME] ||
1800 !((attr[IPSET_ATTR_DATA] != NULL) ^
1801 (attr[IPSET_ATTR_ADT] != NULL)) ||
1802 (attr[IPSET_ATTR_DATA] &&
1803 !flag_nested(attr[IPSET_ATTR_DATA])) ||
1804 (attr[IPSET_ATTR_ADT] &&
1805 (!flag_nested(attr[IPSET_ATTR_ADT]) ||
1806 !attr[IPSET_ATTR_LINENO]))))
1807 return -IPSET_ERR_PROTOCOL;
1808
1809 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1810 if (!set)
1811 return -ENOENT;
1812
1813 use_lineno = !!attr[IPSET_ATTR_LINENO];
1814 if (attr[IPSET_ATTR_DATA]) {
1815 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
1816 attr[IPSET_ATTR_DATA],
1817 set->type->adt_policy, NULL))
1818 return -IPSET_ERR_PROTOCOL;
1819 ret = call_ad(ctnl, skb, set, tb, adt, flags,
1820 use_lineno);
1821 } else {
1822 int nla_rem;
1823
1824 nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
1825 if (nla_type(nla) != IPSET_ATTR_DATA ||
1826 !flag_nested(nla) ||
1827 nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
1828 set->type->adt_policy, NULL))
1829 return -IPSET_ERR_PROTOCOL;
1830 ret = call_ad(ctnl, skb, set, tb, adt,
1831 flags, use_lineno);
1832 if (ret < 0)
1833 return ret;
1834 }
1835 }
1836 return ret;
1837 }
1838
1839 static int ip_set_uadd(struct net *net, struct sock *ctnl,
1840 struct sk_buff *skb, const struct nlmsghdr *nlh,
1841 const struct nlattr * const attr[],
1842 struct netlink_ext_ack *extack)
1843 {
1844 return ip_set_ad(net, ctnl, skb,
1845 IPSET_ADD, nlh, attr, extack);
1846 }
1847
1848 static int ip_set_udel(struct net *net, struct sock *ctnl,
1849 struct sk_buff *skb, const struct nlmsghdr *nlh,
1850 const struct nlattr * const attr[],
1851 struct netlink_ext_ack *extack)
1852 {
1853 return ip_set_ad(net, ctnl, skb,
1854 IPSET_DEL, nlh, attr, extack);
1855 }
1856
1857 static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1858 const struct nlmsghdr *nlh,
1859 const struct nlattr * const attr[],
1860 struct netlink_ext_ack *extack)
1861 {
1862 struct ip_set_net *inst = ip_set_pernet(net);
1863 struct ip_set *set;
1864 struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
1865 int ret = 0;
1866 u32 lineno;
1867
1868 if (unlikely(protocol_min_failed(attr) ||
1869 !attr[IPSET_ATTR_SETNAME] ||
1870 !attr[IPSET_ATTR_DATA] ||
1871 !flag_nested(attr[IPSET_ATTR_DATA])))
1872 return -IPSET_ERR_PROTOCOL;
1873
1874 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1875 if (!set)
1876 return -ENOENT;
1877
1878 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
1879 set->type->adt_policy, NULL))
1880 return -IPSET_ERR_PROTOCOL;
1881
1882 rcu_read_lock_bh();
1883 ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0);
1884 rcu_read_unlock_bh();
1885 /* Userspace can't trigger element to be re-added */
1886 if (ret == -EAGAIN)
1887 ret = 1;
1888
1889 return ret > 0 ? 0 : -IPSET_ERR_EXIST;
1890 }
1891
1892 /* Get headed data of a set */
1893
1894 static int ip_set_header(struct net *net, struct sock *ctnl,
1895 struct sk_buff *skb, const struct nlmsghdr *nlh,
1896 const struct nlattr * const attr[],
1897 struct netlink_ext_ack *extack)
1898 {
1899 struct ip_set_net *inst = ip_set_pernet(net);
1900 const struct ip_set *set;
1901 struct sk_buff *skb2;
1902 struct nlmsghdr *nlh2;
1903 int ret = 0;
1904
1905 if (unlikely(protocol_min_failed(attr) ||
1906 !attr[IPSET_ATTR_SETNAME]))
1907 return -IPSET_ERR_PROTOCOL;
1908
1909 set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
1910 if (!set)
1911 return -ENOENT;
1912
1913 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1914 if (!skb2)
1915 return -ENOMEM;
1916
1917 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
1918 IPSET_CMD_HEADER);
1919 if (!nlh2)
1920 goto nlmsg_failure;
1921 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
1922 nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
1923 nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
1924 nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
1925 nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision))
1926 goto nla_put_failure;
1927 nlmsg_end(skb2, nlh2);
1928
1929 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1930 if (ret < 0)
1931 return ret;
1932
1933 return 0;
1934
1935 nla_put_failure:
1936 nlmsg_cancel(skb2, nlh2);
1937 nlmsg_failure:
1938 kfree_skb(skb2);
1939 return -EMSGSIZE;
1940 }
1941
1942 /* Get type data */
1943
1944 static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
1945 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1946 [IPSET_ATTR_TYPENAME] = { .type = NLA_NUL_STRING,
1947 .len = IPSET_MAXNAMELEN - 1 },
1948 [IPSET_ATTR_FAMILY] = { .type = NLA_U8 },
1949 };
1950
1951 static int ip_set_type(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1952 const struct nlmsghdr *nlh,
1953 const struct nlattr * const attr[],
1954 struct netlink_ext_ack *extack)
1955 {
1956 struct sk_buff *skb2;
1957 struct nlmsghdr *nlh2;
1958 u8 family, min, max;
1959 const char *typename;
1960 int ret = 0;
1961
1962 if (unlikely(protocol_min_failed(attr) ||
1963 !attr[IPSET_ATTR_TYPENAME] ||
1964 !attr[IPSET_ATTR_FAMILY]))
1965 return -IPSET_ERR_PROTOCOL;
1966
1967 family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
1968 typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
1969 ret = find_set_type_minmax(typename, family, &min, &max);
1970 if (ret)
1971 return ret;
1972
1973 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1974 if (!skb2)
1975 return -ENOMEM;
1976
1977 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
1978 IPSET_CMD_TYPE);
1979 if (!nlh2)
1980 goto nlmsg_failure;
1981 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
1982 nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
1983 nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
1984 nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
1985 nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min))
1986 goto nla_put_failure;
1987 nlmsg_end(skb2, nlh2);
1988
1989 pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
1990 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
1991 if (ret < 0)
1992 return ret;
1993
1994 return 0;
1995
1996 nla_put_failure:
1997 nlmsg_cancel(skb2, nlh2);
1998 nlmsg_failure:
1999 kfree_skb(skb2);
2000 return -EMSGSIZE;
2001 }
2002
2003 /* Get protocol version */
2004
2005 static const struct nla_policy
2006 ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
2007 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
2008 };
2009
2010 static int ip_set_protocol(struct net *net, struct sock *ctnl,
2011 struct sk_buff *skb, const struct nlmsghdr *nlh,
2012 const struct nlattr * const attr[],
2013 struct netlink_ext_ack *extack)
2014 {
2015 struct sk_buff *skb2;
2016 struct nlmsghdr *nlh2;
2017 int ret = 0;
2018
2019 if (unlikely(!attr[IPSET_ATTR_PROTOCOL]))
2020 return -IPSET_ERR_PROTOCOL;
2021
2022 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2023 if (!skb2)
2024 return -ENOMEM;
2025
2026 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
2027 IPSET_CMD_PROTOCOL);
2028 if (!nlh2)
2029 goto nlmsg_failure;
2030 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
2031 goto nla_put_failure;
2032 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL_MIN, IPSET_PROTOCOL_MIN))
2033 goto nla_put_failure;
2034 nlmsg_end(skb2, nlh2);
2035
2036 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2037 if (ret < 0)
2038 return ret;
2039
2040 return 0;
2041
2042 nla_put_failure:
2043 nlmsg_cancel(skb2, nlh2);
2044 nlmsg_failure:
2045 kfree_skb(skb2);
2046 return -EMSGSIZE;
2047 }
2048
2049 /* Get set by name or index, from userspace */
2050
2051 static int ip_set_byname(struct net *net, struct sock *ctnl,
2052 struct sk_buff *skb, const struct nlmsghdr *nlh,
2053 const struct nlattr * const attr[],
2054 struct netlink_ext_ack *extack)
2055 {
2056 struct ip_set_net *inst = ip_set_pernet(net);
2057 struct sk_buff *skb2;
2058 struct nlmsghdr *nlh2;
2059 ip_set_id_t id = IPSET_INVALID_ID;
2060 const struct ip_set *set;
2061 int ret = 0;
2062
2063 if (unlikely(protocol_failed(attr) ||
2064 !attr[IPSET_ATTR_SETNAME]))
2065 return -IPSET_ERR_PROTOCOL;
2066
2067 set = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &id);
2068 if (id == IPSET_INVALID_ID)
2069 return -ENOENT;
2070
2071 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2072 if (!skb2)
2073 return -ENOMEM;
2074
2075 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
2076 IPSET_CMD_GET_BYNAME);
2077 if (!nlh2)
2078 goto nlmsg_failure;
2079 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
2080 nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
2081 nla_put_net16(skb2, IPSET_ATTR_INDEX, htons(id)))
2082 goto nla_put_failure;
2083 nlmsg_end(skb2, nlh2);
2084
2085 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2086 if (ret < 0)
2087 return ret;
2088
2089 return 0;
2090
2091 nla_put_failure:
2092 nlmsg_cancel(skb2, nlh2);
2093 nlmsg_failure:
2094 kfree_skb(skb2);
2095 return -EMSGSIZE;
2096 }
2097
2098 static const struct nla_policy ip_set_index_policy[IPSET_ATTR_CMD_MAX + 1] = {
2099 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
2100 [IPSET_ATTR_INDEX] = { .type = NLA_U16 },
2101 };
2102
2103 static int ip_set_byindex(struct net *net, struct sock *ctnl,
2104 struct sk_buff *skb, const struct nlmsghdr *nlh,
2105 const struct nlattr * const attr[],
2106 struct netlink_ext_ack *extack)
2107 {
2108 struct ip_set_net *inst = ip_set_pernet(net);
2109 struct sk_buff *skb2;
2110 struct nlmsghdr *nlh2;
2111 ip_set_id_t id = IPSET_INVALID_ID;
2112 const struct ip_set *set;
2113 int ret = 0;
2114
2115 if (unlikely(protocol_failed(attr) ||
2116 !attr[IPSET_ATTR_INDEX]))
2117 return -IPSET_ERR_PROTOCOL;
2118
2119 id = ip_set_get_h16(attr[IPSET_ATTR_INDEX]);
2120 if (id >= inst->ip_set_max)
2121 return -ENOENT;
2122 set = ip_set(inst, id);
2123 if (set == NULL)
2124 return -ENOENT;
2125
2126 skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2127 if (!skb2)
2128 return -ENOMEM;
2129
2130 nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
2131 IPSET_CMD_GET_BYINDEX);
2132 if (!nlh2)
2133 goto nlmsg_failure;
2134 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
2135 nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name))
2136 goto nla_put_failure;
2137 nlmsg_end(skb2, nlh2);
2138
2139 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
2140 if (ret < 0)
2141 return ret;
2142
2143 return 0;
2144
2145 nla_put_failure:
2146 nlmsg_cancel(skb2, nlh2);
2147 nlmsg_failure:
2148 kfree_skb(skb2);
2149 return -EMSGSIZE;
2150 }
2151
2152 static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
2153 [IPSET_CMD_NONE] = {
2154 .call = ip_set_none,
2155 .attr_count = IPSET_ATTR_CMD_MAX,
2156 },
2157 [IPSET_CMD_CREATE] = {
2158 .call = ip_set_create,
2159 .attr_count = IPSET_ATTR_CMD_MAX,
2160 .policy = ip_set_create_policy,
2161 },
2162 [IPSET_CMD_DESTROY] = {
2163 .call = ip_set_destroy,
2164 .attr_count = IPSET_ATTR_CMD_MAX,
2165 .policy = ip_set_setname_policy,
2166 },
2167 [IPSET_CMD_FLUSH] = {
2168 .call = ip_set_flush,
2169 .attr_count = IPSET_ATTR_CMD_MAX,
2170 .policy = ip_set_setname_policy,
2171 },
2172 [IPSET_CMD_RENAME] = {
2173 .call = ip_set_rename,
2174 .attr_count = IPSET_ATTR_CMD_MAX,
2175 .policy = ip_set_setname2_policy,
2176 },
2177 [IPSET_CMD_SWAP] = {
2178 .call = ip_set_swap,
2179 .attr_count = IPSET_ATTR_CMD_MAX,
2180 .policy = ip_set_setname2_policy,
2181 },
2182 [IPSET_CMD_LIST] = {
2183 .call = ip_set_dump,
2184 .attr_count = IPSET_ATTR_CMD_MAX,
2185 .policy = ip_set_dump_policy,
2186 },
2187 [IPSET_CMD_SAVE] = {
2188 .call = ip_set_dump,
2189 .attr_count = IPSET_ATTR_CMD_MAX,
2190 .policy = ip_set_setname_policy,
2191 },
2192 [IPSET_CMD_ADD] = {
2193 .call = ip_set_uadd,
2194 .attr_count = IPSET_ATTR_CMD_MAX,
2195 .policy = ip_set_adt_policy,
2196 },
2197 [IPSET_CMD_DEL] = {
2198 .call = ip_set_udel,
2199 .attr_count = IPSET_ATTR_CMD_MAX,
2200 .policy = ip_set_adt_policy,
2201 },
2202 [IPSET_CMD_TEST] = {
2203 .call = ip_set_utest,
2204 .attr_count = IPSET_ATTR_CMD_MAX,
2205 .policy = ip_set_adt_policy,
2206 },
2207 [IPSET_CMD_HEADER] = {
2208 .call = ip_set_header,
2209 .attr_count = IPSET_ATTR_CMD_MAX,
2210 .policy = ip_set_setname_policy,
2211 },
2212 [IPSET_CMD_TYPE] = {
2213 .call = ip_set_type,
2214 .attr_count = IPSET_ATTR_CMD_MAX,
2215 .policy = ip_set_type_policy,
2216 },
2217 [IPSET_CMD_PROTOCOL] = {
2218 .call = ip_set_protocol,
2219 .attr_count = IPSET_ATTR_CMD_MAX,
2220 .policy = ip_set_protocol_policy,
2221 },
2222 [IPSET_CMD_GET_BYNAME] = {
2223 .call = ip_set_byname,
2224 .attr_count = IPSET_ATTR_CMD_MAX,
2225 .policy = ip_set_setname_policy,
2226 },
2227 [IPSET_CMD_GET_BYINDEX] = {
2228 .call = ip_set_byindex,
2229 .attr_count = IPSET_ATTR_CMD_MAX,
2230 .policy = ip_set_index_policy,
2231 },
2232 };
2233
2234 static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
2235 .name = "ip_set",
2236 .subsys_id = NFNL_SUBSYS_IPSET,
2237 .cb_count = IPSET_MSG_MAX,
2238 .cb = ip_set_netlink_subsys_cb,
2239 };
2240
2241 /* Interface to iptables/ip6tables */
2242
2243 static int
2244 ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
2245 {
2246 unsigned int *op;
2247 void *data;
2248 int copylen = *len, ret = 0;
2249 struct net *net = sock_net(sk);
2250 struct ip_set_net *inst = ip_set_pernet(net);
2251
2252 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2253 return -EPERM;
2254 if (optval != SO_IP_SET)
2255 return -EBADF;
2256 if (*len < sizeof(unsigned int))
2257 return -EINVAL;
2258
2259 data = vmalloc(*len);
2260 if (!data)
2261 return -ENOMEM;
2262 if (copy_from_user(data, user, *len) != 0) {
2263 ret = -EFAULT;
2264 goto done;
2265 }
2266 op = data;
2267
2268 if (*op < IP_SET_OP_VERSION) {
2269 /* Check the version at the beginning of operations */
2270 struct ip_set_req_version *req_version = data;
2271
2272 if (*len < sizeof(struct ip_set_req_version)) {
2273 ret = -EINVAL;
2274 goto done;
2275 }
2276
2277 if (req_version->version < IPSET_PROTOCOL_MIN) {
2278 ret = -EPROTO;
2279 goto done;
2280 }
2281 }
2282
2283 switch (*op) {
2284 case IP_SET_OP_VERSION: {
2285 struct ip_set_req_version *req_version = data;
2286
2287 if (*len != sizeof(struct ip_set_req_version)) {
2288 ret = -EINVAL;
2289 goto done;
2290 }
2291
2292 req_version->version = IPSET_PROTOCOL;
2293 if (copy_to_user(user, req_version,
2294 sizeof(struct ip_set_req_version)))
2295 ret = -EFAULT;
2296 goto done;
2297 }
2298 case IP_SET_OP_GET_BYNAME: {
2299 struct ip_set_req_get_set *req_get = data;
2300 ip_set_id_t id;
2301
2302 if (*len != sizeof(struct ip_set_req_get_set)) {
2303 ret = -EINVAL;
2304 goto done;
2305 }
2306 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
2307 nfnl_lock(NFNL_SUBSYS_IPSET);
2308 find_set_and_id(inst, req_get->set.name, &id);
2309 req_get->set.index = id;
2310 nfnl_unlock(NFNL_SUBSYS_IPSET);
2311 goto copy;
2312 }
2313 case IP_SET_OP_GET_FNAME: {
2314 struct ip_set_req_get_set_family *req_get = data;
2315 ip_set_id_t id;
2316
2317 if (*len != sizeof(struct ip_set_req_get_set_family)) {
2318 ret = -EINVAL;
2319 goto done;
2320 }
2321 req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
2322 nfnl_lock(NFNL_SUBSYS_IPSET);
2323 find_set_and_id(inst, req_get->set.name, &id);
2324 req_get->set.index = id;
2325 if (id != IPSET_INVALID_ID)
2326 req_get->family = ip_set(inst, id)->family;
2327 nfnl_unlock(NFNL_SUBSYS_IPSET);
2328 goto copy;
2329 }
2330 case IP_SET_OP_GET_BYINDEX: {
2331 struct ip_set_req_get_set *req_get = data;
2332 struct ip_set *set;
2333
2334 if (*len != sizeof(struct ip_set_req_get_set) ||
2335 req_get->set.index >= inst->ip_set_max) {
2336 ret = -EINVAL;
2337 goto done;
2338 }
2339 nfnl_lock(NFNL_SUBSYS_IPSET);
2340 set = ip_set(inst, req_get->set.index);
2341 ret = strscpy(req_get->set.name, set ? set->name : "",
2342 IPSET_MAXNAMELEN);
2343 nfnl_unlock(NFNL_SUBSYS_IPSET);
2344 if (ret < 0)
2345 goto done;
2346 goto copy;
2347 }
2348 default:
2349 ret = -EBADMSG;
2350 goto done;
2351 } /* end of switch(op) */
2352
2353 copy:
2354 if (copy_to_user(user, data, copylen))
2355 ret = -EFAULT;
2356
2357 done:
2358 vfree(data);
2359 if (ret > 0)
2360 ret = 0;
2361 return ret;
2362 }
2363
2364 static struct nf_sockopt_ops so_set __read_mostly = {
2365 .pf = PF_INET,
2366 .get_optmin = SO_IP_SET,
2367 .get_optmax = SO_IP_SET + 1,
2368 .get = ip_set_sockfn_get,
2369 .owner = THIS_MODULE,
2370 };
2371
2372 static int __net_init
2373 ip_set_net_init(struct net *net)
2374 {
2375 struct ip_set_net *inst = ip_set_pernet(net);
2376 struct ip_set **list;
2377
2378 inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX;
2379 if (inst->ip_set_max >= IPSET_INVALID_ID)
2380 inst->ip_set_max = IPSET_INVALID_ID - 1;
2381
2382 list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
2383 if (!list)
2384 return -ENOMEM;
2385 inst->is_deleted = false;
2386 inst->is_destroyed = false;
2387 rcu_assign_pointer(inst->ip_set_list, list);
2388 return 0;
2389 }
2390
2391 static void __net_exit
2392 ip_set_net_exit(struct net *net)
2393 {
2394 struct ip_set_net *inst = ip_set_pernet(net);
2395
2396 struct ip_set *set = NULL;
2397 ip_set_id_t i;
2398
2399 inst->is_deleted = true; /* flag for ip_set_nfnl_put */
2400
2401 nfnl_lock(NFNL_SUBSYS_IPSET);
2402 for (i = 0; i < inst->ip_set_max; i++) {
2403 set = ip_set(inst, i);
2404 if (set) {
2405 ip_set(inst, i) = NULL;
2406 ip_set_destroy_set(set);
2407 }
2408 }
2409 nfnl_unlock(NFNL_SUBSYS_IPSET);
2410 kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
2411 }
2412
2413 static struct pernet_operations ip_set_net_ops = {
2414 .init = ip_set_net_init,
2415 .exit = ip_set_net_exit,
2416 .id = &ip_set_net_id,
2417 .size = sizeof(struct ip_set_net),
2418 };
2419
2420 static int __init
2421 ip_set_init(void)
2422 {
2423 int ret = register_pernet_subsys(&ip_set_net_ops);
2424
2425 if (ret) {
2426 pr_err("ip_set: cannot register pernet_subsys.\n");
2427 return ret;
2428 }
2429
2430 ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
2431 if (ret != 0) {
2432 pr_err("ip_set: cannot register with nfnetlink.\n");
2433 unregister_pernet_subsys(&ip_set_net_ops);
2434 return ret;
2435 }
2436
2437 ret = nf_register_sockopt(&so_set);
2438 if (ret != 0) {
2439 pr_err("SO_SET registry failed: %d\n", ret);
2440 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
2441 unregister_pernet_subsys(&ip_set_net_ops);
2442 return ret;
2443 }
2444
2445 return 0;
2446 }
2447
2448 static void __exit
2449 ip_set_fini(void)
2450 {
2451 nf_unregister_sockopt(&so_set);
2452 nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
2453
2454 unregister_pernet_subsys(&ip_set_net_ops);
2455 pr_debug("these are the famous last words\n");
2456 }
2457
2458 module_init(ip_set_init);
2459 module_exit(ip_set_fini);
2460
2461 MODULE_DESCRIPTION("ip_set: protocol " __stringify(IPSET_PROTOCOL));