]> git.ipfire.org Git - thirdparty/linux.git/blob - net/netfilter/nft_set_rbtree.c
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / net / netfilter / nft_set_rbtree.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17
18 struct nft_rbtree {
19 struct rb_root root;
20 rwlock_t lock;
21 seqcount_t count;
22 struct delayed_work gc_work;
23 };
24
25 struct nft_rbtree_elem {
26 struct rb_node node;
27 struct nft_set_ext ext;
28 };
29
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
31 {
32 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
34 }
35
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
37 {
38 return !nft_rbtree_interval_end(rbe);
39 }
40
41 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
42 const struct nft_rbtree_elem *interval)
43 {
44 return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
45 }
46
47 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
48 const u32 *key, const struct nft_set_ext **ext,
49 unsigned int seq)
50 {
51 struct nft_rbtree *priv = nft_set_priv(set);
52 const struct nft_rbtree_elem *rbe, *interval = NULL;
53 u8 genmask = nft_genmask_cur(net);
54 const struct rb_node *parent;
55 const void *this;
56 int d;
57
58 parent = rcu_dereference_raw(priv->root.rb_node);
59 while (parent != NULL) {
60 if (read_seqcount_retry(&priv->count, seq))
61 return false;
62
63 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
64
65 this = nft_set_ext_key(&rbe->ext);
66 d = memcmp(this, key, set->klen);
67 if (d < 0) {
68 parent = rcu_dereference_raw(parent->rb_left);
69 if (interval &&
70 nft_rbtree_equal(set, this, interval) &&
71 nft_rbtree_interval_end(rbe) &&
72 nft_rbtree_interval_start(interval))
73 continue;
74 interval = rbe;
75 } else if (d > 0)
76 parent = rcu_dereference_raw(parent->rb_right);
77 else {
78 if (!nft_set_elem_active(&rbe->ext, genmask)) {
79 parent = rcu_dereference_raw(parent->rb_left);
80 continue;
81 }
82 if (nft_rbtree_interval_end(rbe)) {
83 if (nft_set_is_anonymous(set))
84 return false;
85 parent = rcu_dereference_raw(parent->rb_left);
86 interval = NULL;
87 continue;
88 }
89
90 *ext = &rbe->ext;
91 return true;
92 }
93 }
94
95 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
96 nft_set_elem_active(&interval->ext, genmask) &&
97 nft_rbtree_interval_start(interval)) {
98 *ext = &interval->ext;
99 return true;
100 }
101
102 return false;
103 }
104
105 static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
106 const u32 *key, const struct nft_set_ext **ext)
107 {
108 struct nft_rbtree *priv = nft_set_priv(set);
109 unsigned int seq = read_seqcount_begin(&priv->count);
110 bool ret;
111
112 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
113 if (ret || !read_seqcount_retry(&priv->count, seq))
114 return ret;
115
116 read_lock_bh(&priv->lock);
117 seq = read_seqcount_begin(&priv->count);
118 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
119 read_unlock_bh(&priv->lock);
120
121 return ret;
122 }
123
124 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
125 const u32 *key, struct nft_rbtree_elem **elem,
126 unsigned int seq, unsigned int flags, u8 genmask)
127 {
128 struct nft_rbtree_elem *rbe, *interval = NULL;
129 struct nft_rbtree *priv = nft_set_priv(set);
130 const struct rb_node *parent;
131 const void *this;
132 int d;
133
134 parent = rcu_dereference_raw(priv->root.rb_node);
135 while (parent != NULL) {
136 if (read_seqcount_retry(&priv->count, seq))
137 return false;
138
139 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
140
141 this = nft_set_ext_key(&rbe->ext);
142 d = memcmp(this, key, set->klen);
143 if (d < 0) {
144 parent = rcu_dereference_raw(parent->rb_left);
145 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
146 interval = rbe;
147 } else if (d > 0) {
148 parent = rcu_dereference_raw(parent->rb_right);
149 if (flags & NFT_SET_ELEM_INTERVAL_END)
150 interval = rbe;
151 } else {
152 if (!nft_set_elem_active(&rbe->ext, genmask)) {
153 parent = rcu_dereference_raw(parent->rb_left);
154 continue;
155 }
156
157 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
158 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
159 (flags & NFT_SET_ELEM_INTERVAL_END)) {
160 *elem = rbe;
161 return true;
162 }
163
164 if (nft_rbtree_interval_end(rbe))
165 interval = NULL;
166
167 parent = rcu_dereference_raw(parent->rb_left);
168 }
169 }
170
171 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
172 nft_set_elem_active(&interval->ext, genmask) &&
173 ((!nft_rbtree_interval_end(interval) &&
174 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
175 (nft_rbtree_interval_end(interval) &&
176 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
177 *elem = interval;
178 return true;
179 }
180
181 return false;
182 }
183
184 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
185 const struct nft_set_elem *elem, unsigned int flags)
186 {
187 struct nft_rbtree *priv = nft_set_priv(set);
188 unsigned int seq = read_seqcount_begin(&priv->count);
189 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
190 const u32 *key = (const u32 *)&elem->key.val;
191 u8 genmask = nft_genmask_cur(net);
192 bool ret;
193
194 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
195 if (ret || !read_seqcount_retry(&priv->count, seq))
196 return rbe;
197
198 read_lock_bh(&priv->lock);
199 seq = read_seqcount_begin(&priv->count);
200 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
201 if (!ret)
202 rbe = ERR_PTR(-ENOENT);
203 read_unlock_bh(&priv->lock);
204
205 return rbe;
206 }
207
208 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
209 struct nft_rbtree_elem *new,
210 struct nft_set_ext **ext)
211 {
212 struct nft_rbtree *priv = nft_set_priv(set);
213 u8 genmask = nft_genmask_next(net);
214 struct nft_rbtree_elem *rbe;
215 struct rb_node *parent, **p;
216 bool overlap = false;
217 int d;
218
219 /* Detect overlaps as we descend the tree. Set the flag in these cases:
220 *
221 * a1. _ _ __>| ?_ _ __| (insert end before existing end)
222 * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
223 * a3. _ _ ___? >|_ _ __| (insert start before existing end)
224 *
225 * and clear it later on, as we eventually reach the points indicated by
226 * '?' above, in the cases described below. We'll always meet these
227 * later, locally, due to tree ordering, and overlaps for the intervals
228 * that are the closest together are always evaluated last.
229 *
230 * b1. _ _ __>| !_ _ __| (insert end before existing start)
231 * b2. _ _ ___| !_ _ _>| (insert end after existing start)
232 * b3. _ _ ___! >|_ _ __| (insert start after existing end)
233 *
234 * Case a3. resolves to b3.:
235 * - if the inserted start element is the leftmost, because the '0'
236 * element in the tree serves as end element
237 * - otherwise, if an existing end is found. Note that end elements are
238 * always inserted after corresponding start elements.
239 *
240 * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
241 * in that order.
242 *
243 * The flag is also cleared in two special cases:
244 *
245 * b4. |__ _ _!|<_ _ _ (insert start right before existing end)
246 * b5. |__ _ >|!__ _ _ (insert end right after existing start)
247 *
248 * which always happen as last step and imply that no further
249 * overlapping is possible.
250 */
251
252 parent = NULL;
253 p = &priv->root.rb_node;
254 while (*p != NULL) {
255 parent = *p;
256 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
257 d = memcmp(nft_set_ext_key(&rbe->ext),
258 nft_set_ext_key(&new->ext),
259 set->klen);
260 if (d < 0) {
261 p = &parent->rb_left;
262
263 if (nft_rbtree_interval_start(new)) {
264 if (nft_rbtree_interval_end(rbe) &&
265 nft_set_elem_active(&rbe->ext, genmask))
266 overlap = false;
267 } else {
268 overlap = nft_rbtree_interval_end(rbe) &&
269 nft_set_elem_active(&rbe->ext,
270 genmask);
271 }
272 } else if (d > 0) {
273 p = &parent->rb_right;
274
275 if (nft_rbtree_interval_end(new)) {
276 overlap = nft_rbtree_interval_end(rbe) &&
277 nft_set_elem_active(&rbe->ext,
278 genmask);
279 } else if (nft_rbtree_interval_end(rbe) &&
280 nft_set_elem_active(&rbe->ext, genmask)) {
281 overlap = true;
282 }
283 } else {
284 if (nft_rbtree_interval_end(rbe) &&
285 nft_rbtree_interval_start(new)) {
286 p = &parent->rb_left;
287
288 if (nft_set_elem_active(&rbe->ext, genmask))
289 overlap = false;
290 } else if (nft_rbtree_interval_start(rbe) &&
291 nft_rbtree_interval_end(new)) {
292 p = &parent->rb_right;
293
294 if (nft_set_elem_active(&rbe->ext, genmask))
295 overlap = false;
296 } else if (nft_set_elem_active(&rbe->ext, genmask)) {
297 *ext = &rbe->ext;
298 return -EEXIST;
299 } else {
300 p = &parent->rb_left;
301 }
302 }
303 }
304
305 if (overlap)
306 return -ENOTEMPTY;
307
308 rb_link_node_rcu(&new->node, parent, p);
309 rb_insert_color(&new->node, &priv->root);
310 return 0;
311 }
312
313 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
314 const struct nft_set_elem *elem,
315 struct nft_set_ext **ext)
316 {
317 struct nft_rbtree *priv = nft_set_priv(set);
318 struct nft_rbtree_elem *rbe = elem->priv;
319 int err;
320
321 write_lock_bh(&priv->lock);
322 write_seqcount_begin(&priv->count);
323 err = __nft_rbtree_insert(net, set, rbe, ext);
324 write_seqcount_end(&priv->count);
325 write_unlock_bh(&priv->lock);
326
327 return err;
328 }
329
330 static void nft_rbtree_remove(const struct net *net,
331 const struct nft_set *set,
332 const struct nft_set_elem *elem)
333 {
334 struct nft_rbtree *priv = nft_set_priv(set);
335 struct nft_rbtree_elem *rbe = elem->priv;
336
337 write_lock_bh(&priv->lock);
338 write_seqcount_begin(&priv->count);
339 rb_erase(&rbe->node, &priv->root);
340 write_seqcount_end(&priv->count);
341 write_unlock_bh(&priv->lock);
342 }
343
344 static void nft_rbtree_activate(const struct net *net,
345 const struct nft_set *set,
346 const struct nft_set_elem *elem)
347 {
348 struct nft_rbtree_elem *rbe = elem->priv;
349
350 nft_set_elem_change_active(net, set, &rbe->ext);
351 nft_set_elem_clear_busy(&rbe->ext);
352 }
353
354 static bool nft_rbtree_flush(const struct net *net,
355 const struct nft_set *set, void *priv)
356 {
357 struct nft_rbtree_elem *rbe = priv;
358
359 if (!nft_set_elem_mark_busy(&rbe->ext) ||
360 !nft_is_active(net, &rbe->ext)) {
361 nft_set_elem_change_active(net, set, &rbe->ext);
362 return true;
363 }
364 return false;
365 }
366
367 static void *nft_rbtree_deactivate(const struct net *net,
368 const struct nft_set *set,
369 const struct nft_set_elem *elem)
370 {
371 const struct nft_rbtree *priv = nft_set_priv(set);
372 const struct rb_node *parent = priv->root.rb_node;
373 struct nft_rbtree_elem *rbe, *this = elem->priv;
374 u8 genmask = nft_genmask_next(net);
375 int d;
376
377 while (parent != NULL) {
378 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
379
380 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
381 set->klen);
382 if (d < 0)
383 parent = parent->rb_left;
384 else if (d > 0)
385 parent = parent->rb_right;
386 else {
387 if (nft_rbtree_interval_end(rbe) &&
388 nft_rbtree_interval_start(this)) {
389 parent = parent->rb_left;
390 continue;
391 } else if (nft_rbtree_interval_start(rbe) &&
392 nft_rbtree_interval_end(this)) {
393 parent = parent->rb_right;
394 continue;
395 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
396 parent = parent->rb_left;
397 continue;
398 }
399 nft_rbtree_flush(net, set, rbe);
400 return rbe;
401 }
402 }
403 return NULL;
404 }
405
406 static void nft_rbtree_walk(const struct nft_ctx *ctx,
407 struct nft_set *set,
408 struct nft_set_iter *iter)
409 {
410 struct nft_rbtree *priv = nft_set_priv(set);
411 struct nft_rbtree_elem *rbe;
412 struct nft_set_elem elem;
413 struct rb_node *node;
414
415 read_lock_bh(&priv->lock);
416 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
417 rbe = rb_entry(node, struct nft_rbtree_elem, node);
418
419 if (iter->count < iter->skip)
420 goto cont;
421 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
422 goto cont;
423
424 elem.priv = rbe;
425
426 iter->err = iter->fn(ctx, set, iter, &elem);
427 if (iter->err < 0) {
428 read_unlock_bh(&priv->lock);
429 return;
430 }
431 cont:
432 iter->count++;
433 }
434 read_unlock_bh(&priv->lock);
435 }
436
437 static void nft_rbtree_gc(struct work_struct *work)
438 {
439 struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
440 struct nft_set_gc_batch *gcb = NULL;
441 struct nft_rbtree *priv;
442 struct rb_node *node;
443 struct nft_set *set;
444
445 priv = container_of(work, struct nft_rbtree, gc_work.work);
446 set = nft_set_container_of(priv);
447
448 write_lock_bh(&priv->lock);
449 write_seqcount_begin(&priv->count);
450 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
451 rbe = rb_entry(node, struct nft_rbtree_elem, node);
452
453 if (nft_rbtree_interval_end(rbe)) {
454 rbe_end = rbe;
455 continue;
456 }
457 if (!nft_set_elem_expired(&rbe->ext))
458 continue;
459 if (nft_set_elem_mark_busy(&rbe->ext))
460 continue;
461
462 if (rbe_prev) {
463 rb_erase(&rbe_prev->node, &priv->root);
464 rbe_prev = NULL;
465 }
466 gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
467 if (!gcb)
468 break;
469
470 atomic_dec(&set->nelems);
471 nft_set_gc_batch_add(gcb, rbe);
472 rbe_prev = rbe;
473
474 if (rbe_end) {
475 atomic_dec(&set->nelems);
476 nft_set_gc_batch_add(gcb, rbe_end);
477 rb_erase(&rbe_end->node, &priv->root);
478 rbe_end = NULL;
479 }
480 node = rb_next(node);
481 if (!node)
482 break;
483 }
484 if (rbe_prev)
485 rb_erase(&rbe_prev->node, &priv->root);
486 write_seqcount_end(&priv->count);
487 write_unlock_bh(&priv->lock);
488
489 nft_set_gc_batch_complete(gcb);
490
491 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
492 nft_set_gc_interval(set));
493 }
494
495 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
496 const struct nft_set_desc *desc)
497 {
498 return sizeof(struct nft_rbtree);
499 }
500
501 static int nft_rbtree_init(const struct nft_set *set,
502 const struct nft_set_desc *desc,
503 const struct nlattr * const nla[])
504 {
505 struct nft_rbtree *priv = nft_set_priv(set);
506
507 rwlock_init(&priv->lock);
508 seqcount_init(&priv->count);
509 priv->root = RB_ROOT;
510
511 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
512 if (set->flags & NFT_SET_TIMEOUT)
513 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
514 nft_set_gc_interval(set));
515
516 return 0;
517 }
518
519 static void nft_rbtree_destroy(const struct nft_set *set)
520 {
521 struct nft_rbtree *priv = nft_set_priv(set);
522 struct nft_rbtree_elem *rbe;
523 struct rb_node *node;
524
525 cancel_delayed_work_sync(&priv->gc_work);
526 rcu_barrier();
527 while ((node = priv->root.rb_node) != NULL) {
528 rb_erase(node, &priv->root);
529 rbe = rb_entry(node, struct nft_rbtree_elem, node);
530 nft_set_elem_destroy(set, rbe, true);
531 }
532 }
533
534 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
535 struct nft_set_estimate *est)
536 {
537 if (desc->field_count > 1)
538 return false;
539
540 if (desc->size)
541 est->size = sizeof(struct nft_rbtree) +
542 desc->size * sizeof(struct nft_rbtree_elem);
543 else
544 est->size = ~0;
545
546 est->lookup = NFT_SET_CLASS_O_LOG_N;
547 est->space = NFT_SET_CLASS_O_N;
548
549 return true;
550 }
551
552 const struct nft_set_type nft_set_rbtree_type = {
553 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
554 .ops = {
555 .privsize = nft_rbtree_privsize,
556 .elemsize = offsetof(struct nft_rbtree_elem, ext),
557 .estimate = nft_rbtree_estimate,
558 .init = nft_rbtree_init,
559 .destroy = nft_rbtree_destroy,
560 .insert = nft_rbtree_insert,
561 .remove = nft_rbtree_remove,
562 .deactivate = nft_rbtree_deactivate,
563 .flush = nft_rbtree_flush,
564 .activate = nft_rbtree_activate,
565 .lookup = nft_rbtree_lookup,
566 .walk = nft_rbtree_walk,
567 .get = nft_rbtree_get,
568 },
569 };