1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
22 struct delayed_work gc_work
;
25 struct nft_rbtree_elem
{
27 struct nft_set_ext ext
;
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem
*rbe
)
32 return nft_set_ext_exists(&rbe
->ext
, NFT_SET_EXT_FLAGS
) &&
33 (*nft_set_ext_flags(&rbe
->ext
) & NFT_SET_ELEM_INTERVAL_END
);
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem
*rbe
)
38 return !nft_rbtree_interval_end(rbe
);
41 static bool nft_rbtree_equal(const struct nft_set
*set
, const void *this,
42 const struct nft_rbtree_elem
*interval
)
44 return memcmp(this, nft_set_ext_key(&interval
->ext
), set
->klen
) == 0;
47 static bool __nft_rbtree_lookup(const struct net
*net
, const struct nft_set
*set
,
48 const u32
*key
, const struct nft_set_ext
**ext
,
51 struct nft_rbtree
*priv
= nft_set_priv(set
);
52 const struct nft_rbtree_elem
*rbe
, *interval
= NULL
;
53 u8 genmask
= nft_genmask_cur(net
);
54 const struct rb_node
*parent
;
58 parent
= rcu_dereference_raw(priv
->root
.rb_node
);
59 while (parent
!= NULL
) {
60 if (read_seqcount_retry(&priv
->count
, seq
))
63 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
65 this = nft_set_ext_key(&rbe
->ext
);
66 d
= memcmp(this, key
, set
->klen
);
68 parent
= rcu_dereference_raw(parent
->rb_left
);
70 nft_rbtree_equal(set
, this, interval
) &&
71 nft_rbtree_interval_end(rbe
) &&
72 nft_rbtree_interval_start(interval
))
76 parent
= rcu_dereference_raw(parent
->rb_right
);
78 if (!nft_set_elem_active(&rbe
->ext
, genmask
)) {
79 parent
= rcu_dereference_raw(parent
->rb_left
);
82 if (nft_rbtree_interval_end(rbe
)) {
83 if (nft_set_is_anonymous(set
))
85 parent
= rcu_dereference_raw(parent
->rb_left
);
95 if (set
->flags
& NFT_SET_INTERVAL
&& interval
!= NULL
&&
96 nft_set_elem_active(&interval
->ext
, genmask
) &&
97 nft_rbtree_interval_start(interval
)) {
98 *ext
= &interval
->ext
;
105 static bool nft_rbtree_lookup(const struct net
*net
, const struct nft_set
*set
,
106 const u32
*key
, const struct nft_set_ext
**ext
)
108 struct nft_rbtree
*priv
= nft_set_priv(set
);
109 unsigned int seq
= read_seqcount_begin(&priv
->count
);
112 ret
= __nft_rbtree_lookup(net
, set
, key
, ext
, seq
);
113 if (ret
|| !read_seqcount_retry(&priv
->count
, seq
))
116 read_lock_bh(&priv
->lock
);
117 seq
= read_seqcount_begin(&priv
->count
);
118 ret
= __nft_rbtree_lookup(net
, set
, key
, ext
, seq
);
119 read_unlock_bh(&priv
->lock
);
124 static bool __nft_rbtree_get(const struct net
*net
, const struct nft_set
*set
,
125 const u32
*key
, struct nft_rbtree_elem
**elem
,
126 unsigned int seq
, unsigned int flags
, u8 genmask
)
128 struct nft_rbtree_elem
*rbe
, *interval
= NULL
;
129 struct nft_rbtree
*priv
= nft_set_priv(set
);
130 const struct rb_node
*parent
;
134 parent
= rcu_dereference_raw(priv
->root
.rb_node
);
135 while (parent
!= NULL
) {
136 if (read_seqcount_retry(&priv
->count
, seq
))
139 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
141 this = nft_set_ext_key(&rbe
->ext
);
142 d
= memcmp(this, key
, set
->klen
);
144 parent
= rcu_dereference_raw(parent
->rb_left
);
145 if (!(flags
& NFT_SET_ELEM_INTERVAL_END
))
148 parent
= rcu_dereference_raw(parent
->rb_right
);
149 if (flags
& NFT_SET_ELEM_INTERVAL_END
)
152 if (!nft_set_elem_active(&rbe
->ext
, genmask
)) {
153 parent
= rcu_dereference_raw(parent
->rb_left
);
157 if (!nft_set_ext_exists(&rbe
->ext
, NFT_SET_EXT_FLAGS
) ||
158 (*nft_set_ext_flags(&rbe
->ext
) & NFT_SET_ELEM_INTERVAL_END
) ==
159 (flags
& NFT_SET_ELEM_INTERVAL_END
)) {
164 if (nft_rbtree_interval_end(rbe
))
167 parent
= rcu_dereference_raw(parent
->rb_left
);
171 if (set
->flags
& NFT_SET_INTERVAL
&& interval
!= NULL
&&
172 nft_set_elem_active(&interval
->ext
, genmask
) &&
173 ((!nft_rbtree_interval_end(interval
) &&
174 !(flags
& NFT_SET_ELEM_INTERVAL_END
)) ||
175 (nft_rbtree_interval_end(interval
) &&
176 (flags
& NFT_SET_ELEM_INTERVAL_END
)))) {
184 static void *nft_rbtree_get(const struct net
*net
, const struct nft_set
*set
,
185 const struct nft_set_elem
*elem
, unsigned int flags
)
187 struct nft_rbtree
*priv
= nft_set_priv(set
);
188 unsigned int seq
= read_seqcount_begin(&priv
->count
);
189 struct nft_rbtree_elem
*rbe
= ERR_PTR(-ENOENT
);
190 const u32
*key
= (const u32
*)&elem
->key
.val
;
191 u8 genmask
= nft_genmask_cur(net
);
194 ret
= __nft_rbtree_get(net
, set
, key
, &rbe
, seq
, flags
, genmask
);
195 if (ret
|| !read_seqcount_retry(&priv
->count
, seq
))
198 read_lock_bh(&priv
->lock
);
199 seq
= read_seqcount_begin(&priv
->count
);
200 ret
= __nft_rbtree_get(net
, set
, key
, &rbe
, seq
, flags
, genmask
);
202 rbe
= ERR_PTR(-ENOENT
);
203 read_unlock_bh(&priv
->lock
);
208 static int __nft_rbtree_insert(const struct net
*net
, const struct nft_set
*set
,
209 struct nft_rbtree_elem
*new,
210 struct nft_set_ext
**ext
)
212 struct nft_rbtree
*priv
= nft_set_priv(set
);
213 u8 genmask
= nft_genmask_next(net
);
214 struct nft_rbtree_elem
*rbe
;
215 struct rb_node
*parent
, **p
;
216 bool overlap
= false;
219 /* Detect overlaps as we descend the tree. Set the flag in these cases:
221 * a1. _ _ __>| ?_ _ __| (insert end before existing end)
222 * a2. _ _ ___| ?_ _ _>| (insert end after existing end)
223 * a3. _ _ ___? >|_ _ __| (insert start before existing end)
225 * and clear it later on, as we eventually reach the points indicated by
226 * '?' above, in the cases described below. We'll always meet these
227 * later, locally, due to tree ordering, and overlaps for the intervals
228 * that are the closest together are always evaluated last.
230 * b1. _ _ __>| !_ _ __| (insert end before existing start)
231 * b2. _ _ ___| !_ _ _>| (insert end after existing start)
232 * b3. _ _ ___! >|_ _ __| (insert start after existing end)
234 * Case a3. resolves to b3.:
235 * - if the inserted start element is the leftmost, because the '0'
236 * element in the tree serves as end element
237 * - otherwise, if an existing end is found. Note that end elements are
238 * always inserted after corresponding start elements.
240 * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
243 * The flag is also cleared in two special cases:
245 * b4. |__ _ _!|<_ _ _ (insert start right before existing end)
246 * b5. |__ _ >|!__ _ _ (insert end right after existing start)
248 * which always happen as last step and imply that no further
249 * overlapping is possible.
253 p
= &priv
->root
.rb_node
;
256 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
257 d
= memcmp(nft_set_ext_key(&rbe
->ext
),
258 nft_set_ext_key(&new->ext
),
261 p
= &parent
->rb_left
;
263 if (nft_rbtree_interval_start(new)) {
264 if (nft_rbtree_interval_end(rbe
) &&
265 nft_set_elem_active(&rbe
->ext
, genmask
))
268 overlap
= nft_rbtree_interval_end(rbe
) &&
269 nft_set_elem_active(&rbe
->ext
,
273 p
= &parent
->rb_right
;
275 if (nft_rbtree_interval_end(new)) {
276 overlap
= nft_rbtree_interval_end(rbe
) &&
277 nft_set_elem_active(&rbe
->ext
,
279 } else if (nft_rbtree_interval_end(rbe
) &&
280 nft_set_elem_active(&rbe
->ext
, genmask
)) {
284 if (nft_rbtree_interval_end(rbe
) &&
285 nft_rbtree_interval_start(new)) {
286 p
= &parent
->rb_left
;
288 if (nft_set_elem_active(&rbe
->ext
, genmask
))
290 } else if (nft_rbtree_interval_start(rbe
) &&
291 nft_rbtree_interval_end(new)) {
292 p
= &parent
->rb_right
;
294 if (nft_set_elem_active(&rbe
->ext
, genmask
))
296 } else if (nft_set_elem_active(&rbe
->ext
, genmask
)) {
300 p
= &parent
->rb_left
;
308 rb_link_node_rcu(&new->node
, parent
, p
);
309 rb_insert_color(&new->node
, &priv
->root
);
313 static int nft_rbtree_insert(const struct net
*net
, const struct nft_set
*set
,
314 const struct nft_set_elem
*elem
,
315 struct nft_set_ext
**ext
)
317 struct nft_rbtree
*priv
= nft_set_priv(set
);
318 struct nft_rbtree_elem
*rbe
= elem
->priv
;
321 write_lock_bh(&priv
->lock
);
322 write_seqcount_begin(&priv
->count
);
323 err
= __nft_rbtree_insert(net
, set
, rbe
, ext
);
324 write_seqcount_end(&priv
->count
);
325 write_unlock_bh(&priv
->lock
);
330 static void nft_rbtree_remove(const struct net
*net
,
331 const struct nft_set
*set
,
332 const struct nft_set_elem
*elem
)
334 struct nft_rbtree
*priv
= nft_set_priv(set
);
335 struct nft_rbtree_elem
*rbe
= elem
->priv
;
337 write_lock_bh(&priv
->lock
);
338 write_seqcount_begin(&priv
->count
);
339 rb_erase(&rbe
->node
, &priv
->root
);
340 write_seqcount_end(&priv
->count
);
341 write_unlock_bh(&priv
->lock
);
344 static void nft_rbtree_activate(const struct net
*net
,
345 const struct nft_set
*set
,
346 const struct nft_set_elem
*elem
)
348 struct nft_rbtree_elem
*rbe
= elem
->priv
;
350 nft_set_elem_change_active(net
, set
, &rbe
->ext
);
351 nft_set_elem_clear_busy(&rbe
->ext
);
354 static bool nft_rbtree_flush(const struct net
*net
,
355 const struct nft_set
*set
, void *priv
)
357 struct nft_rbtree_elem
*rbe
= priv
;
359 if (!nft_set_elem_mark_busy(&rbe
->ext
) ||
360 !nft_is_active(net
, &rbe
->ext
)) {
361 nft_set_elem_change_active(net
, set
, &rbe
->ext
);
367 static void *nft_rbtree_deactivate(const struct net
*net
,
368 const struct nft_set
*set
,
369 const struct nft_set_elem
*elem
)
371 const struct nft_rbtree
*priv
= nft_set_priv(set
);
372 const struct rb_node
*parent
= priv
->root
.rb_node
;
373 struct nft_rbtree_elem
*rbe
, *this = elem
->priv
;
374 u8 genmask
= nft_genmask_next(net
);
377 while (parent
!= NULL
) {
378 rbe
= rb_entry(parent
, struct nft_rbtree_elem
, node
);
380 d
= memcmp(nft_set_ext_key(&rbe
->ext
), &elem
->key
.val
,
383 parent
= parent
->rb_left
;
385 parent
= parent
->rb_right
;
387 if (nft_rbtree_interval_end(rbe
) &&
388 nft_rbtree_interval_start(this)) {
389 parent
= parent
->rb_left
;
391 } else if (nft_rbtree_interval_start(rbe
) &&
392 nft_rbtree_interval_end(this)) {
393 parent
= parent
->rb_right
;
395 } else if (!nft_set_elem_active(&rbe
->ext
, genmask
)) {
396 parent
= parent
->rb_left
;
399 nft_rbtree_flush(net
, set
, rbe
);
406 static void nft_rbtree_walk(const struct nft_ctx
*ctx
,
408 struct nft_set_iter
*iter
)
410 struct nft_rbtree
*priv
= nft_set_priv(set
);
411 struct nft_rbtree_elem
*rbe
;
412 struct nft_set_elem elem
;
413 struct rb_node
*node
;
415 read_lock_bh(&priv
->lock
);
416 for (node
= rb_first(&priv
->root
); node
!= NULL
; node
= rb_next(node
)) {
417 rbe
= rb_entry(node
, struct nft_rbtree_elem
, node
);
419 if (iter
->count
< iter
->skip
)
421 if (!nft_set_elem_active(&rbe
->ext
, iter
->genmask
))
426 iter
->err
= iter
->fn(ctx
, set
, iter
, &elem
);
428 read_unlock_bh(&priv
->lock
);
434 read_unlock_bh(&priv
->lock
);
437 static void nft_rbtree_gc(struct work_struct
*work
)
439 struct nft_rbtree_elem
*rbe
, *rbe_end
= NULL
, *rbe_prev
= NULL
;
440 struct nft_set_gc_batch
*gcb
= NULL
;
441 struct nft_rbtree
*priv
;
442 struct rb_node
*node
;
445 priv
= container_of(work
, struct nft_rbtree
, gc_work
.work
);
446 set
= nft_set_container_of(priv
);
448 write_lock_bh(&priv
->lock
);
449 write_seqcount_begin(&priv
->count
);
450 for (node
= rb_first(&priv
->root
); node
!= NULL
; node
= rb_next(node
)) {
451 rbe
= rb_entry(node
, struct nft_rbtree_elem
, node
);
453 if (nft_rbtree_interval_end(rbe
)) {
457 if (!nft_set_elem_expired(&rbe
->ext
))
459 if (nft_set_elem_mark_busy(&rbe
->ext
))
463 rb_erase(&rbe_prev
->node
, &priv
->root
);
466 gcb
= nft_set_gc_batch_check(set
, gcb
, GFP_ATOMIC
);
470 atomic_dec(&set
->nelems
);
471 nft_set_gc_batch_add(gcb
, rbe
);
475 atomic_dec(&set
->nelems
);
476 nft_set_gc_batch_add(gcb
, rbe_end
);
477 rb_erase(&rbe_end
->node
, &priv
->root
);
480 node
= rb_next(node
);
485 rb_erase(&rbe_prev
->node
, &priv
->root
);
486 write_seqcount_end(&priv
->count
);
487 write_unlock_bh(&priv
->lock
);
489 nft_set_gc_batch_complete(gcb
);
491 queue_delayed_work(system_power_efficient_wq
, &priv
->gc_work
,
492 nft_set_gc_interval(set
));
495 static u64
nft_rbtree_privsize(const struct nlattr
* const nla
[],
496 const struct nft_set_desc
*desc
)
498 return sizeof(struct nft_rbtree
);
501 static int nft_rbtree_init(const struct nft_set
*set
,
502 const struct nft_set_desc
*desc
,
503 const struct nlattr
* const nla
[])
505 struct nft_rbtree
*priv
= nft_set_priv(set
);
507 rwlock_init(&priv
->lock
);
508 seqcount_init(&priv
->count
);
509 priv
->root
= RB_ROOT
;
511 INIT_DEFERRABLE_WORK(&priv
->gc_work
, nft_rbtree_gc
);
512 if (set
->flags
& NFT_SET_TIMEOUT
)
513 queue_delayed_work(system_power_efficient_wq
, &priv
->gc_work
,
514 nft_set_gc_interval(set
));
519 static void nft_rbtree_destroy(const struct nft_set
*set
)
521 struct nft_rbtree
*priv
= nft_set_priv(set
);
522 struct nft_rbtree_elem
*rbe
;
523 struct rb_node
*node
;
525 cancel_delayed_work_sync(&priv
->gc_work
);
527 while ((node
= priv
->root
.rb_node
) != NULL
) {
528 rb_erase(node
, &priv
->root
);
529 rbe
= rb_entry(node
, struct nft_rbtree_elem
, node
);
530 nft_set_elem_destroy(set
, rbe
, true);
534 static bool nft_rbtree_estimate(const struct nft_set_desc
*desc
, u32 features
,
535 struct nft_set_estimate
*est
)
537 if (desc
->field_count
> 1)
541 est
->size
= sizeof(struct nft_rbtree
) +
542 desc
->size
* sizeof(struct nft_rbtree_elem
);
546 est
->lookup
= NFT_SET_CLASS_O_LOG_N
;
547 est
->space
= NFT_SET_CLASS_O_N
;
552 const struct nft_set_type nft_set_rbtree_type
= {
553 .features
= NFT_SET_INTERVAL
| NFT_SET_MAP
| NFT_SET_OBJECT
| NFT_SET_TIMEOUT
,
555 .privsize
= nft_rbtree_privsize
,
556 .elemsize
= offsetof(struct nft_rbtree_elem
, ext
),
557 .estimate
= nft_rbtree_estimate
,
558 .init
= nft_rbtree_init
,
559 .destroy
= nft_rbtree_destroy
,
560 .insert
= nft_rbtree_insert
,
561 .remove
= nft_rbtree_remove
,
562 .deactivate
= nft_rbtree_deactivate
,
563 .flush
= nft_rbtree_flush
,
564 .activate
= nft_rbtree_activate
,
565 .lookup
= nft_rbtree_lookup
,
566 .walk
= nft_rbtree_walk
,
567 .get
= nft_rbtree_get
,