]> git.ipfire.org Git - thirdparty/linux.git/blob - net/netfilter/nf_conncount.c
mm/hotplug: treat CMA pages as unmovable
[thirdparty/linux.git] / net / netfilter / nf_conncount.c
1 /*
2 * count the number of connections matching an arbitrary key.
3 *
4 * (C) 2017 Red Hat GmbH
5 * Author: Florian Westphal <fw@strlen.de>
6 *
7 * split from xt_connlimit.c:
8 * (c) 2000 Gerd Knorr <kraxel@bytesex.org>
9 * Nov 2002: Martin Bene <martin.bene@icomedias.com>:
10 * only ignore TIME_WAIT or gone connections
11 * (C) CC Computer Consultants GmbH, 2007
12 */
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/in.h>
15 #include <linux/in6.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/jhash.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <linux/rbtree.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/skbuff.h>
25 #include <linux/spinlock.h>
26 #include <linux/netfilter/nf_conntrack_tcp.h>
27 #include <linux/netfilter/x_tables.h>
28 #include <net/netfilter/nf_conntrack.h>
29 #include <net/netfilter/nf_conntrack_count.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_tuple.h>
32 #include <net/netfilter/nf_conntrack_zones.h>
33
34 #define CONNCOUNT_SLOTS 256U
35
36 #define CONNCOUNT_GC_MAX_NODES 8
37 #define MAX_KEYLEN 5
38
39 /* we will save the tuples of all connections we care about */
40 struct nf_conncount_tuple {
41 struct list_head node;
42 struct nf_conntrack_tuple tuple;
43 struct nf_conntrack_zone zone;
44 int cpu;
45 u32 jiffies32;
46 };
47
48 struct nf_conncount_rb {
49 struct rb_node node;
50 struct nf_conncount_list list;
51 u32 key[MAX_KEYLEN];
52 struct rcu_head rcu_head;
53 };
54
55 static spinlock_t nf_conncount_locks[CONNCOUNT_SLOTS] __cacheline_aligned_in_smp;
56
57 struct nf_conncount_data {
58 unsigned int keylen;
59 struct rb_root root[CONNCOUNT_SLOTS];
60 struct net *net;
61 struct work_struct gc_work;
62 unsigned long pending_trees[BITS_TO_LONGS(CONNCOUNT_SLOTS)];
63 unsigned int gc_tree;
64 };
65
66 static u_int32_t conncount_rnd __read_mostly;
67 static struct kmem_cache *conncount_rb_cachep __read_mostly;
68 static struct kmem_cache *conncount_conn_cachep __read_mostly;
69
70 static inline bool already_closed(const struct nf_conn *conn)
71 {
72 if (nf_ct_protonum(conn) == IPPROTO_TCP)
73 return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT ||
74 conn->proto.tcp.state == TCP_CONNTRACK_CLOSE;
75 else
76 return false;
77 }
78
79 static int key_diff(const u32 *a, const u32 *b, unsigned int klen)
80 {
81 return memcmp(a, b, klen * sizeof(u32));
82 }
83
84 static void conn_free(struct nf_conncount_list *list,
85 struct nf_conncount_tuple *conn)
86 {
87 lockdep_assert_held(&list->list_lock);
88
89 list->count--;
90 list_del(&conn->node);
91
92 kmem_cache_free(conncount_conn_cachep, conn);
93 }
94
95 static const struct nf_conntrack_tuple_hash *
96 find_or_evict(struct net *net, struct nf_conncount_list *list,
97 struct nf_conncount_tuple *conn)
98 {
99 const struct nf_conntrack_tuple_hash *found;
100 unsigned long a, b;
101 int cpu = raw_smp_processor_id();
102 u32 age;
103
104 found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
105 if (found)
106 return found;
107 b = conn->jiffies32;
108 a = (u32)jiffies;
109
110 /* conn might have been added just before by another cpu and
111 * might still be unconfirmed. In this case, nf_conntrack_find()
112 * returns no result. Thus only evict if this cpu added the
113 * stale entry or if the entry is older than two jiffies.
114 */
115 age = a - b;
116 if (conn->cpu == cpu || age >= 2) {
117 conn_free(list, conn);
118 return ERR_PTR(-ENOENT);
119 }
120
121 return ERR_PTR(-EAGAIN);
122 }
123
124 static int __nf_conncount_add(struct net *net,
125 struct nf_conncount_list *list,
126 const struct nf_conntrack_tuple *tuple,
127 const struct nf_conntrack_zone *zone)
128 {
129 const struct nf_conntrack_tuple_hash *found;
130 struct nf_conncount_tuple *conn, *conn_n;
131 struct nf_conn *found_ct;
132 unsigned int collect = 0;
133
134 /* check the saved connections */
135 list_for_each_entry_safe(conn, conn_n, &list->head, node) {
136 if (collect > CONNCOUNT_GC_MAX_NODES)
137 break;
138
139 found = find_or_evict(net, list, conn);
140 if (IS_ERR(found)) {
141 /* Not found, but might be about to be confirmed */
142 if (PTR_ERR(found) == -EAGAIN) {
143 if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
144 nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
145 nf_ct_zone_id(zone, zone->dir))
146 return 0; /* already exists */
147 } else {
148 collect++;
149 }
150 continue;
151 }
152
153 found_ct = nf_ct_tuplehash_to_ctrack(found);
154
155 if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
156 nf_ct_zone_equal(found_ct, zone, zone->dir)) {
157 /*
158 * We should not see tuples twice unless someone hooks
159 * this into a table without "-p tcp --syn".
160 *
161 * Attempt to avoid a re-add in this case.
162 */
163 nf_ct_put(found_ct);
164 return 0;
165 } else if (already_closed(found_ct)) {
166 /*
167 * we do not care about connections which are
168 * closed already -> ditch it
169 */
170 nf_ct_put(found_ct);
171 conn_free(list, conn);
172 collect++;
173 continue;
174 }
175
176 nf_ct_put(found_ct);
177 }
178
179 if (WARN_ON_ONCE(list->count > INT_MAX))
180 return -EOVERFLOW;
181
182 conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
183 if (conn == NULL)
184 return -ENOMEM;
185
186 conn->tuple = *tuple;
187 conn->zone = *zone;
188 conn->cpu = raw_smp_processor_id();
189 conn->jiffies32 = (u32)jiffies;
190 list_add_tail(&conn->node, &list->head);
191 list->count++;
192 return 0;
193 }
194
195 int nf_conncount_add(struct net *net,
196 struct nf_conncount_list *list,
197 const struct nf_conntrack_tuple *tuple,
198 const struct nf_conntrack_zone *zone)
199 {
200 int ret;
201
202 /* check the saved connections */
203 spin_lock_bh(&list->list_lock);
204 ret = __nf_conncount_add(net, list, tuple, zone);
205 spin_unlock_bh(&list->list_lock);
206
207 return ret;
208 }
209 EXPORT_SYMBOL_GPL(nf_conncount_add);
210
211 void nf_conncount_list_init(struct nf_conncount_list *list)
212 {
213 spin_lock_init(&list->list_lock);
214 INIT_LIST_HEAD(&list->head);
215 list->count = 0;
216 }
217 EXPORT_SYMBOL_GPL(nf_conncount_list_init);
218
219 /* Return true if the list is empty. Must be called with BH disabled. */
220 bool nf_conncount_gc_list(struct net *net,
221 struct nf_conncount_list *list)
222 {
223 const struct nf_conntrack_tuple_hash *found;
224 struct nf_conncount_tuple *conn, *conn_n;
225 struct nf_conn *found_ct;
226 unsigned int collected = 0;
227 bool ret = false;
228
229 /* don't bother if other cpu is already doing GC */
230 if (!spin_trylock(&list->list_lock))
231 return false;
232
233 list_for_each_entry_safe(conn, conn_n, &list->head, node) {
234 found = find_or_evict(net, list, conn);
235 if (IS_ERR(found)) {
236 if (PTR_ERR(found) == -ENOENT)
237 collected++;
238 continue;
239 }
240
241 found_ct = nf_ct_tuplehash_to_ctrack(found);
242 if (already_closed(found_ct)) {
243 /*
244 * we do not care about connections which are
245 * closed already -> ditch it
246 */
247 nf_ct_put(found_ct);
248 conn_free(list, conn);
249 collected++;
250 continue;
251 }
252
253 nf_ct_put(found_ct);
254 if (collected > CONNCOUNT_GC_MAX_NODES)
255 break;
256 }
257
258 if (!list->count)
259 ret = true;
260 spin_unlock(&list->list_lock);
261
262 return ret;
263 }
264 EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
265
266 static void __tree_nodes_free(struct rcu_head *h)
267 {
268 struct nf_conncount_rb *rbconn;
269
270 rbconn = container_of(h, struct nf_conncount_rb, rcu_head);
271 kmem_cache_free(conncount_rb_cachep, rbconn);
272 }
273
274 /* caller must hold tree nf_conncount_locks[] lock */
275 static void tree_nodes_free(struct rb_root *root,
276 struct nf_conncount_rb *gc_nodes[],
277 unsigned int gc_count)
278 {
279 struct nf_conncount_rb *rbconn;
280
281 while (gc_count) {
282 rbconn = gc_nodes[--gc_count];
283 spin_lock(&rbconn->list.list_lock);
284 if (!rbconn->list.count) {
285 rb_erase(&rbconn->node, root);
286 call_rcu(&rbconn->rcu_head, __tree_nodes_free);
287 }
288 spin_unlock(&rbconn->list.list_lock);
289 }
290 }
291
292 static void schedule_gc_worker(struct nf_conncount_data *data, int tree)
293 {
294 set_bit(tree, data->pending_trees);
295 schedule_work(&data->gc_work);
296 }
297
298 static unsigned int
299 insert_tree(struct net *net,
300 struct nf_conncount_data *data,
301 struct rb_root *root,
302 unsigned int hash,
303 const u32 *key,
304 const struct nf_conntrack_tuple *tuple,
305 const struct nf_conntrack_zone *zone)
306 {
307 struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES];
308 struct rb_node **rbnode, *parent;
309 struct nf_conncount_rb *rbconn;
310 struct nf_conncount_tuple *conn;
311 unsigned int count = 0, gc_count = 0;
312 u8 keylen = data->keylen;
313 bool do_gc = true;
314
315 spin_lock_bh(&nf_conncount_locks[hash]);
316 restart:
317 parent = NULL;
318 rbnode = &(root->rb_node);
319 while (*rbnode) {
320 int diff;
321 rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node);
322
323 parent = *rbnode;
324 diff = key_diff(key, rbconn->key, keylen);
325 if (diff < 0) {
326 rbnode = &((*rbnode)->rb_left);
327 } else if (diff > 0) {
328 rbnode = &((*rbnode)->rb_right);
329 } else {
330 int ret;
331
332 ret = nf_conncount_add(net, &rbconn->list, tuple, zone);
333 if (ret)
334 count = 0; /* hotdrop */
335 else
336 count = rbconn->list.count;
337 tree_nodes_free(root, gc_nodes, gc_count);
338 goto out_unlock;
339 }
340
341 if (gc_count >= ARRAY_SIZE(gc_nodes))
342 continue;
343
344 if (do_gc && nf_conncount_gc_list(net, &rbconn->list))
345 gc_nodes[gc_count++] = rbconn;
346 }
347
348 if (gc_count) {
349 tree_nodes_free(root, gc_nodes, gc_count);
350 schedule_gc_worker(data, hash);
351 gc_count = 0;
352 do_gc = false;
353 goto restart;
354 }
355
356 /* expected case: match, insert new node */
357 rbconn = kmem_cache_alloc(conncount_rb_cachep, GFP_ATOMIC);
358 if (rbconn == NULL)
359 goto out_unlock;
360
361 conn = kmem_cache_alloc(conncount_conn_cachep, GFP_ATOMIC);
362 if (conn == NULL) {
363 kmem_cache_free(conncount_rb_cachep, rbconn);
364 goto out_unlock;
365 }
366
367 conn->tuple = *tuple;
368 conn->zone = *zone;
369 memcpy(rbconn->key, key, sizeof(u32) * keylen);
370
371 nf_conncount_list_init(&rbconn->list);
372 list_add(&conn->node, &rbconn->list.head);
373 count = 1;
374 rbconn->list.count = count;
375
376 rb_link_node_rcu(&rbconn->node, parent, rbnode);
377 rb_insert_color(&rbconn->node, root);
378 out_unlock:
379 spin_unlock_bh(&nf_conncount_locks[hash]);
380 return count;
381 }
382
383 static unsigned int
384 count_tree(struct net *net,
385 struct nf_conncount_data *data,
386 const u32 *key,
387 const struct nf_conntrack_tuple *tuple,
388 const struct nf_conntrack_zone *zone)
389 {
390 struct rb_root *root;
391 struct rb_node *parent;
392 struct nf_conncount_rb *rbconn;
393 unsigned int hash;
394 u8 keylen = data->keylen;
395
396 hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS;
397 root = &data->root[hash];
398
399 parent = rcu_dereference_raw(root->rb_node);
400 while (parent) {
401 int diff;
402
403 rbconn = rb_entry(parent, struct nf_conncount_rb, node);
404
405 diff = key_diff(key, rbconn->key, keylen);
406 if (diff < 0) {
407 parent = rcu_dereference_raw(parent->rb_left);
408 } else if (diff > 0) {
409 parent = rcu_dereference_raw(parent->rb_right);
410 } else {
411 int ret;
412
413 if (!tuple) {
414 nf_conncount_gc_list(net, &rbconn->list);
415 return rbconn->list.count;
416 }
417
418 spin_lock_bh(&rbconn->list.list_lock);
419 /* Node might be about to be free'd.
420 * We need to defer to insert_tree() in this case.
421 */
422 if (rbconn->list.count == 0) {
423 spin_unlock_bh(&rbconn->list.list_lock);
424 break;
425 }
426
427 /* same source network -> be counted! */
428 ret = __nf_conncount_add(net, &rbconn->list, tuple, zone);
429 spin_unlock_bh(&rbconn->list.list_lock);
430 if (ret)
431 return 0; /* hotdrop */
432 else
433 return rbconn->list.count;
434 }
435 }
436
437 if (!tuple)
438 return 0;
439
440 return insert_tree(net, data, root, hash, key, tuple, zone);
441 }
442
443 static void tree_gc_worker(struct work_struct *work)
444 {
445 struct nf_conncount_data *data = container_of(work, struct nf_conncount_data, gc_work);
446 struct nf_conncount_rb *gc_nodes[CONNCOUNT_GC_MAX_NODES], *rbconn;
447 struct rb_root *root;
448 struct rb_node *node;
449 unsigned int tree, next_tree, gc_count = 0;
450
451 tree = data->gc_tree % CONNCOUNT_SLOTS;
452 root = &data->root[tree];
453
454 local_bh_disable();
455 rcu_read_lock();
456 for (node = rb_first(root); node != NULL; node = rb_next(node)) {
457 rbconn = rb_entry(node, struct nf_conncount_rb, node);
458 if (nf_conncount_gc_list(data->net, &rbconn->list))
459 gc_count++;
460 }
461 rcu_read_unlock();
462 local_bh_enable();
463
464 cond_resched();
465
466 spin_lock_bh(&nf_conncount_locks[tree]);
467 if (gc_count < ARRAY_SIZE(gc_nodes))
468 goto next; /* do not bother */
469
470 gc_count = 0;
471 node = rb_first(root);
472 while (node != NULL) {
473 rbconn = rb_entry(node, struct nf_conncount_rb, node);
474 node = rb_next(node);
475
476 if (rbconn->list.count > 0)
477 continue;
478
479 gc_nodes[gc_count++] = rbconn;
480 if (gc_count >= ARRAY_SIZE(gc_nodes)) {
481 tree_nodes_free(root, gc_nodes, gc_count);
482 gc_count = 0;
483 }
484 }
485
486 tree_nodes_free(root, gc_nodes, gc_count);
487 next:
488 clear_bit(tree, data->pending_trees);
489
490 next_tree = (tree + 1) % CONNCOUNT_SLOTS;
491 next_tree = find_next_bit(data->pending_trees, CONNCOUNT_SLOTS, next_tree);
492
493 if (next_tree < CONNCOUNT_SLOTS) {
494 data->gc_tree = next_tree;
495 schedule_work(work);
496 }
497
498 spin_unlock_bh(&nf_conncount_locks[tree]);
499 }
500
501 /* Count and return number of conntrack entries in 'net' with particular 'key'.
502 * If 'tuple' is not null, insert it into the accounting data structure.
503 * Call with RCU read lock.
504 */
505 unsigned int nf_conncount_count(struct net *net,
506 struct nf_conncount_data *data,
507 const u32 *key,
508 const struct nf_conntrack_tuple *tuple,
509 const struct nf_conntrack_zone *zone)
510 {
511 return count_tree(net, data, key, tuple, zone);
512 }
513 EXPORT_SYMBOL_GPL(nf_conncount_count);
514
515 struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
516 unsigned int keylen)
517 {
518 struct nf_conncount_data *data;
519 int ret, i;
520
521 if (keylen % sizeof(u32) ||
522 keylen / sizeof(u32) > MAX_KEYLEN ||
523 keylen == 0)
524 return ERR_PTR(-EINVAL);
525
526 net_get_random_once(&conncount_rnd, sizeof(conncount_rnd));
527
528 data = kmalloc(sizeof(*data), GFP_KERNEL);
529 if (!data)
530 return ERR_PTR(-ENOMEM);
531
532 ret = nf_ct_netns_get(net, family);
533 if (ret < 0) {
534 kfree(data);
535 return ERR_PTR(ret);
536 }
537
538 for (i = 0; i < ARRAY_SIZE(data->root); ++i)
539 data->root[i] = RB_ROOT;
540
541 data->keylen = keylen / sizeof(u32);
542 data->net = net;
543 INIT_WORK(&data->gc_work, tree_gc_worker);
544
545 return data;
546 }
547 EXPORT_SYMBOL_GPL(nf_conncount_init);
548
549 void nf_conncount_cache_free(struct nf_conncount_list *list)
550 {
551 struct nf_conncount_tuple *conn, *conn_n;
552
553 list_for_each_entry_safe(conn, conn_n, &list->head, node)
554 kmem_cache_free(conncount_conn_cachep, conn);
555 }
556 EXPORT_SYMBOL_GPL(nf_conncount_cache_free);
557
558 static void destroy_tree(struct rb_root *r)
559 {
560 struct nf_conncount_rb *rbconn;
561 struct rb_node *node;
562
563 while ((node = rb_first(r)) != NULL) {
564 rbconn = rb_entry(node, struct nf_conncount_rb, node);
565
566 rb_erase(node, r);
567
568 nf_conncount_cache_free(&rbconn->list);
569
570 kmem_cache_free(conncount_rb_cachep, rbconn);
571 }
572 }
573
574 void nf_conncount_destroy(struct net *net, unsigned int family,
575 struct nf_conncount_data *data)
576 {
577 unsigned int i;
578
579 cancel_work_sync(&data->gc_work);
580 nf_ct_netns_put(net, family);
581
582 for (i = 0; i < ARRAY_SIZE(data->root); ++i)
583 destroy_tree(&data->root[i]);
584
585 kfree(data);
586 }
587 EXPORT_SYMBOL_GPL(nf_conncount_destroy);
588
589 static int __init nf_conncount_modinit(void)
590 {
591 int i;
592
593 for (i = 0; i < CONNCOUNT_SLOTS; ++i)
594 spin_lock_init(&nf_conncount_locks[i]);
595
596 conncount_conn_cachep = kmem_cache_create("nf_conncount_tuple",
597 sizeof(struct nf_conncount_tuple),
598 0, 0, NULL);
599 if (!conncount_conn_cachep)
600 return -ENOMEM;
601
602 conncount_rb_cachep = kmem_cache_create("nf_conncount_rb",
603 sizeof(struct nf_conncount_rb),
604 0, 0, NULL);
605 if (!conncount_rb_cachep) {
606 kmem_cache_destroy(conncount_conn_cachep);
607 return -ENOMEM;
608 }
609
610 return 0;
611 }
612
613 static void __exit nf_conncount_modexit(void)
614 {
615 kmem_cache_destroy(conncount_conn_cachep);
616 kmem_cache_destroy(conncount_rb_cachep);
617 }
618
619 module_init(nf_conncount_modinit);
620 module_exit(nf_conncount_modexit);
621 MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
622 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
623 MODULE_DESCRIPTION("netfilter: count number of connections matching a key");
624 MODULE_LICENSE("GPL");