]> git.ipfire.org Git - thirdparty/bird.git/blame_incremental - filter/filter.c
Filter refactoring: The values are now saved on a custom stack.
[thirdparty/bird.git] / filter / filter.c
... / ...
CommitLineData
1/*
2 * Filters: utility functions
3 *
4 * Copyright 1998 Pavel Machek <pavel@ucw.cz>
5 *
6 * Can be freely distributed and used under the terms of the GNU GPL.
7 *
8 */
9
10/**
11 * DOC: Filters
12 *
13 * You can find sources of the filter language in |filter/|
14 * directory. File |filter/config.Y| contains filter grammar and basically translates
15 * the source from user into a tree of &f_inst structures. These trees are
16 * later interpreted using code in |filter/filter.c|.
17 *
18 * A filter is represented by a tree of &f_inst structures, one structure per
19 * "instruction". Each &f_inst contains @code, @aux value which is
20 * usually the data type this instruction operates on and two generic
21 * arguments (@a1, @a2). Some instructions contain pointer(s) to other
22 * instructions in their (@a1, @a2) fields.
23 *
24 * Filters use a &f_val structure for their data. Each &f_val
25 * contains type and value (types are constants prefixed with %T_). Few
26 * of the types are special; %T_RETURN can be or-ed with a type to indicate
27 * that return from a function or from the whole filter should be
28 * forced. Important thing about &f_val's is that they may be copied
29 * with a simple |=|. That's fine for all currently defined types: strings
30 * are read-only (and therefore okay), paths are copied for each
31 * operation (okay too).
32 */
33
34#undef LOCAL_DEBUG
35
36#include "nest/bird.h"
37#include "lib/lists.h"
38#include "lib/resource.h"
39#include "lib/socket.h"
40#include "lib/string.h"
41#include "lib/unaligned.h"
42#include "lib/net.h"
43#include "lib/ip.h"
44#include "nest/route.h"
45#include "nest/protocol.h"
46#include "nest/iface.h"
47#include "nest/attrs.h"
48#include "conf/conf.h"
49#include "filter/filter.h"
50
51#define CMP_ERROR 999
52
53#define FILTER_STACK_DEPTH 16384
54
55/* Filter interpreter stack. Make this thread local after going parallel. */
56struct filter_stack {
57 struct f_val val;
58};
59
60static struct filter_stack filter_stack[FILTER_STACK_DEPTH];
61
62/* Internal filter state, to be allocated on stack when executing filters */
63struct filter_state {
64 struct rte **rte;
65 struct rta *old_rta;
66 struct ea_list **eattrs;
67 struct linpool *pool;
68 struct buffer buf;
69 struct filter_stack *stack;
70 int stack_ptr;
71 int flags;
72};
73
74void (*bt_assert_hook)(int result, struct f_inst *assert);
75
76static struct adata undef_adata; /* adata of length 0 used for undefined */
77
78/* Special undef value for paths and clists */
79static inline int
80undef_value(struct f_val v)
81{
82 return ((v.type == T_PATH) || (v.type == T_CLIST) ||
83 (v.type == T_ECLIST) || (v.type == T_LCLIST)) &&
84 (v.val.ad == &undef_adata);
85}
86
87static struct adata *
88adata_empty(struct linpool *pool, int l)
89{
90 struct adata *res = lp_alloc(pool, sizeof(struct adata) + l);
91 res->length = l;
92 return res;
93}
94
95static void
96pm_format(struct f_path_mask *p, buffer *buf)
97{
98 buffer_puts(buf, "[= ");
99
100 while (p)
101 {
102 switch(p->kind)
103 {
104 case PM_ASN:
105 buffer_print(buf, "%u ", p->val);
106 break;
107
108 case PM_QUESTION:
109 buffer_puts(buf, "? ");
110 break;
111
112 case PM_ASTERISK:
113 buffer_puts(buf, "* ");
114 break;
115
116 case PM_ASN_RANGE:
117 buffer_print(buf, "%u..%u ", p->val, p->val2);
118 break;
119
120 case PM_ASN_EXPR:
121 ASSERT(0);
122 }
123
124 p = p->next;
125 }
126
127 buffer_puts(buf, "=]");
128}
129
130static inline int val_is_ip4(const struct f_val v)
131{ return (v.type == T_IP) && ipa_is_ip4(v.val.ip); }
132
133static inline int
134lcomm_cmp(lcomm v1, lcomm v2)
135{
136 if (v1.asn != v2.asn)
137 return (v1.asn > v2.asn) ? 1 : -1;
138 if (v1.ldp1 != v2.ldp1)
139 return (v1.ldp1 > v2.ldp1) ? 1 : -1;
140 if (v1.ldp2 != v2.ldp2)
141 return (v1.ldp2 > v2.ldp2) ? 1 : -1;
142 return 0;
143}
144
145/**
146 * val_compare - compare two values
147 * @v1: first value
148 * @v2: second value
149 *
150 * Compares two values and returns -1, 0, 1 on <, =, > or CMP_ERROR on
151 * error. Tree module relies on this giving consistent results so
152 * that it can be used for building balanced trees.
153 */
154int
155val_compare(struct f_val v1, struct f_val v2)
156{
157 if (v1.type != v2.type) {
158 if (v1.type == T_VOID) /* Hack for else */
159 return -1;
160 if (v2.type == T_VOID)
161 return 1;
162
163 /* IP->Quad implicit conversion */
164 if ((v1.type == T_QUAD) && val_is_ip4(v2))
165 return uint_cmp(v1.val.i, ipa_to_u32(v2.val.ip));
166 if (val_is_ip4(v1) && (v2.type == T_QUAD))
167 return uint_cmp(ipa_to_u32(v1.val.ip), v2.val.i);
168
169 debug( "Types do not match in val_compare\n" );
170 return CMP_ERROR;
171 }
172
173 switch (v1.type) {
174 case T_VOID:
175 return 0;
176 case T_ENUM:
177 case T_INT:
178 case T_BOOL:
179 case T_PAIR:
180 case T_QUAD:
181 return uint_cmp(v1.val.i, v2.val.i);
182 case T_EC:
183 case T_RD:
184 return u64_cmp(v1.val.ec, v2.val.ec);
185 case T_LC:
186 return lcomm_cmp(v1.val.lc, v2.val.lc);
187 case T_IP:
188 return ipa_compare(v1.val.ip, v2.val.ip);
189 case T_NET:
190 return net_compare(v1.val.net, v2.val.net);
191 case T_STRING:
192 return strcmp(v1.val.s, v2.val.s);
193 default:
194 return CMP_ERROR;
195 }
196}
197
198static int
199pm_same(struct f_path_mask *m1, struct f_path_mask *m2)
200{
201 while (m1 && m2)
202 {
203 if (m1->kind != m2->kind)
204 return 0;
205
206 if (m1->kind == PM_ASN_EXPR)
207 {
208 if (!i_same((struct f_inst *) m1->val, (struct f_inst *) m2->val))
209 return 0;
210 }
211 else
212 {
213 if ((m1->val != m2->val) || (m1->val2 != m2->val2))
214 return 0;
215 }
216
217 m1 = m1->next;
218 m2 = m2->next;
219 }
220
221 return !m1 && !m2;
222}
223
224/**
225 * val_same - compare two values
226 * @v1: first value
227 * @v2: second value
228 *
229 * Compares two values and returns 1 if they are same and 0 if not.
230 * Comparison of values of different types is valid and returns 0.
231 */
232int
233val_same(struct f_val v1, struct f_val v2)
234{
235 int rc;
236
237 rc = val_compare(v1, v2);
238 if (rc != CMP_ERROR)
239 return !rc;
240
241 if (v1.type != v2.type)
242 return 0;
243
244 switch (v1.type) {
245 case T_PATH_MASK:
246 return pm_same(v1.val.path_mask, v2.val.path_mask);
247 case T_PATH:
248 case T_CLIST:
249 case T_ECLIST:
250 case T_LCLIST:
251 return adata_same(v1.val.ad, v2.val.ad);
252 case T_SET:
253 return same_tree(v1.val.t, v2.val.t);
254 case T_PREFIX_SET:
255 return trie_same(v1.val.ti, v2.val.ti);
256 default:
257 bug("Invalid type in val_same(): %x", v1.type);
258 }
259}
260
261static int
262clist_set_type(struct f_tree *set, struct f_val *v)
263{
264 switch (set->from.type)
265 {
266 case T_PAIR:
267 v->type = T_PAIR;
268 return 1;
269
270 case T_QUAD:
271 v->type = T_QUAD;
272 return 1;
273
274 case T_IP:
275 if (val_is_ip4(set->from) && val_is_ip4(set->to))
276 {
277 v->type = T_QUAD;
278 return 1;
279 }
280 /* Fall through */
281 default:
282 v->type = T_VOID;
283 return 0;
284 }
285}
286
287static inline int
288eclist_set_type(struct f_tree *set)
289{ return set->from.type == T_EC; }
290
291static inline int
292lclist_set_type(struct f_tree *set)
293{ return set->from.type == T_LC; }
294
295static int
296clist_match_set(struct adata *clist, struct f_tree *set)
297{
298 if (!clist)
299 return 0;
300
301 struct f_val v;
302 if (!clist_set_type(set, &v))
303 return CMP_ERROR;
304
305 u32 *l = (u32 *) clist->data;
306 u32 *end = l + clist->length/4;
307
308 while (l < end) {
309 v.val.i = *l++;
310 if (find_tree(set, v))
311 return 1;
312 }
313 return 0;
314}
315
316static int
317eclist_match_set(struct adata *list, struct f_tree *set)
318{
319 if (!list)
320 return 0;
321
322 if (!eclist_set_type(set))
323 return CMP_ERROR;
324
325 struct f_val v;
326 u32 *l = int_set_get_data(list);
327 int len = int_set_get_size(list);
328 int i;
329
330 v.type = T_EC;
331 for (i = 0; i < len; i += 2) {
332 v.val.ec = ec_get(l, i);
333 if (find_tree(set, v))
334 return 1;
335 }
336
337 return 0;
338}
339
340static int
341lclist_match_set(struct adata *list, struct f_tree *set)
342{
343 if (!list)
344 return 0;
345
346 if (!lclist_set_type(set))
347 return CMP_ERROR;
348
349 struct f_val v;
350 u32 *l = int_set_get_data(list);
351 int len = int_set_get_size(list);
352 int i;
353
354 v.type = T_LC;
355 for (i = 0; i < len; i += 3) {
356 v.val.lc = lc_get(l, i);
357 if (find_tree(set, v))
358 return 1;
359 }
360
361 return 0;
362}
363
364static struct adata *
365clist_filter(struct linpool *pool, struct adata *list, struct f_val set, int pos)
366{
367 if (!list)
368 return NULL;
369
370 int tree = (set.type == T_SET); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
371 struct f_val v;
372 if (tree)
373 clist_set_type(set.val.t, &v);
374 else
375 v.type = T_PAIR;
376
377 int len = int_set_get_size(list);
378 u32 *l = int_set_get_data(list);
379 u32 tmp[len];
380 u32 *k = tmp;
381 u32 *end = l + len;
382
383 while (l < end) {
384 v.val.i = *l++;
385 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
386 if ((tree ? !!find_tree(set.val.t, v) : int_set_contains(set.val.ad, v.val.i)) == pos)
387 *k++ = v.val.i;
388 }
389
390 uint nl = (k - tmp) * sizeof(u32);
391 if (nl == list->length)
392 return list;
393
394 struct adata *res = adata_empty(pool, nl);
395 memcpy(res->data, tmp, nl);
396 return res;
397}
398
399static struct adata *
400eclist_filter(struct linpool *pool, struct adata *list, struct f_val set, int pos)
401{
402 if (!list)
403 return NULL;
404
405 int tree = (set.type == T_SET); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
406 struct f_val v;
407
408 int len = int_set_get_size(list);
409 u32 *l = int_set_get_data(list);
410 u32 tmp[len];
411 u32 *k = tmp;
412 int i;
413
414 v.type = T_EC;
415 for (i = 0; i < len; i += 2) {
416 v.val.ec = ec_get(l, i);
417 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
418 if ((tree ? !!find_tree(set.val.t, v) : ec_set_contains(set.val.ad, v.val.ec)) == pos) {
419 *k++ = l[i];
420 *k++ = l[i+1];
421 }
422 }
423
424 uint nl = (k - tmp) * sizeof(u32);
425 if (nl == list->length)
426 return list;
427
428 struct adata *res = adata_empty(pool, nl);
429 memcpy(res->data, tmp, nl);
430 return res;
431}
432
433static struct adata *
434lclist_filter(struct linpool *pool, struct adata *list, struct f_val set, int pos)
435{
436 if (!list)
437 return NULL;
438
439 int tree = (set.type == T_SET); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
440 struct f_val v;
441
442 int len = int_set_get_size(list);
443 u32 *l = int_set_get_data(list);
444 u32 tmp[len];
445 u32 *k = tmp;
446 int i;
447
448 v.type = T_LC;
449 for (i = 0; i < len; i += 3) {
450 v.val.lc = lc_get(l, i);
451 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
452 if ((tree ? !!find_tree(set.val.t, v) : lc_set_contains(set.val.ad, v.val.lc)) == pos)
453 k = lc_copy(k, l+i);
454 }
455
456 uint nl = (k - tmp) * sizeof(u32);
457 if (nl == list->length)
458 return list;
459
460 struct adata *res = adata_empty(pool, nl);
461 memcpy(res->data, tmp, nl);
462 return res;
463}
464
465/**
466 * val_in_range - implement |~| operator
467 * @v1: element
468 * @v2: set
469 *
470 * Checks if @v1 is element (|~| operator) of @v2.
471 */
472static int
473val_in_range(struct f_val v1, struct f_val v2)
474{
475 if ((v1.type == T_PATH) && (v2.type == T_PATH_MASK))
476 return as_path_match(v1.val.ad, v2.val.path_mask);
477
478 if ((v1.type == T_INT) && (v2.type == T_PATH))
479 return as_path_contains(v2.val.ad, v1.val.i, 1);
480
481 if (((v1.type == T_PAIR) || (v1.type == T_QUAD)) && (v2.type == T_CLIST))
482 return int_set_contains(v2.val.ad, v1.val.i);
483 /* IP->Quad implicit conversion */
484 if (val_is_ip4(v1) && (v2.type == T_CLIST))
485 return int_set_contains(v2.val.ad, ipa_to_u32(v1.val.ip));
486
487 if ((v1.type == T_EC) && (v2.type == T_ECLIST))
488 return ec_set_contains(v2.val.ad, v1.val.ec);
489
490 if ((v1.type == T_LC) && (v2.type == T_LCLIST))
491 return lc_set_contains(v2.val.ad, v1.val.lc);
492
493 if ((v1.type == T_STRING) && (v2.type == T_STRING))
494 return patmatch(v2.val.s, v1.val.s);
495
496 if ((v1.type == T_IP) && (v2.type == T_NET))
497 return ipa_in_netX(v1.val.ip, v2.val.net);
498
499 if ((v1.type == T_NET) && (v2.type == T_NET))
500 return net_in_netX(v1.val.net, v2.val.net);
501
502 if ((v1.type == T_NET) && (v2.type == T_PREFIX_SET))
503 return trie_match_net(v2.val.ti, v1.val.net);
504
505 if (v2.type != T_SET)
506 return CMP_ERROR;
507
508 /* With integrated Quad<->IP implicit conversion */
509 if ((v1.type == v2.val.t->from.type) ||
510 ((v1.type == T_QUAD) && val_is_ip4(v2.val.t->from) && val_is_ip4(v2.val.t->to)))
511 return !!find_tree(v2.val.t, v1);
512
513 if (v1.type == T_CLIST)
514 return clist_match_set(v1.val.ad, v2.val.t);
515
516 if (v1.type == T_ECLIST)
517 return eclist_match_set(v1.val.ad, v2.val.t);
518
519 if (v1.type == T_LCLIST)
520 return lclist_match_set(v1.val.ad, v2.val.t);
521
522 if (v1.type == T_PATH)
523 return as_path_match_set(v1.val.ad, v2.val.t);
524
525 return CMP_ERROR;
526}
527
528/*
529 * val_format - format filter value
530 */
531void
532val_format(struct f_val v, buffer *buf)
533{
534 char buf2[1024];
535 switch (v.type)
536 {
537 case T_VOID: buffer_puts(buf, "(void)"); return;
538 case T_BOOL: buffer_puts(buf, v.val.i ? "TRUE" : "FALSE"); return;
539 case T_INT: buffer_print(buf, "%u", v.val.i); return;
540 case T_STRING: buffer_print(buf, "%s", v.val.s); return;
541 case T_IP: buffer_print(buf, "%I", v.val.ip); return;
542 case T_NET: buffer_print(buf, "%N", v.val.net); return;
543 case T_PAIR: buffer_print(buf, "(%u,%u)", v.val.i >> 16, v.val.i & 0xffff); return;
544 case T_QUAD: buffer_print(buf, "%R", v.val.i); return;
545 case T_EC: ec_format(buf2, v.val.ec); buffer_print(buf, "%s", buf2); return;
546 case T_LC: lc_format(buf2, v.val.lc); buffer_print(buf, "%s", buf2); return;
547 case T_RD: rd_format(v.val.ec, buf2, 1024); buffer_print(buf, "%s", buf2); return;
548 case T_PREFIX_SET: trie_format(v.val.ti, buf); return;
549 case T_SET: tree_format(v.val.t, buf); return;
550 case T_ENUM: buffer_print(buf, "(enum %x)%u", v.type, v.val.i); return;
551 case T_PATH: as_path_format(v.val.ad, buf2, 1000); buffer_print(buf, "(path %s)", buf2); return;
552 case T_CLIST: int_set_format(v.val.ad, 1, -1, buf2, 1000); buffer_print(buf, "(clist %s)", buf2); return;
553 case T_ECLIST: ec_set_format(v.val.ad, -1, buf2, 1000); buffer_print(buf, "(eclist %s)", buf2); return;
554 case T_LCLIST: lc_set_format(v.val.ad, -1, buf2, 1000); buffer_print(buf, "(lclist %s)", buf2); return;
555 case T_PATH_MASK: pm_format(v.val.path_mask, buf); return;
556 default: buffer_print(buf, "[unknown type %x]", v.type); return;
557 }
558}
559
560
561static inline void f_cache_eattrs(struct filter_state *fs)
562{
563 fs->eattrs = &((*fs->rte)->attrs->eattrs);
564}
565
566static inline void f_rte_cow(struct filter_state *fs)
567{
568 if (!((*fs->rte)->flags & REF_COW))
569 return;
570
571 *fs->rte = rte_cow(*fs->rte);
572}
573
574/*
575 * rta_cow - prepare rta for modification by filter
576 */
577static void
578f_rta_cow(struct filter_state *fs)
579{
580 if (!rta_is_cached((*fs->rte)->attrs))
581 return;
582
583 /* Prepare to modify rte */
584 f_rte_cow(fs);
585
586 /* Store old rta to free it later, it stores reference from rte_cow() */
587 fs->old_rta = (*fs->rte)->attrs;
588
589 /*
590 * Get shallow copy of rta. Fields eattrs and nexthops of rta are shared
591 * with fs->old_rta (they will be copied when the cached rta will be obtained
592 * at the end of f_run()), also the lock of hostentry is inherited (we
593 * suppose hostentry is not changed by filters).
594 */
595 (*fs->rte)->attrs = rta_do_cow((*fs->rte)->attrs, fs->pool);
596
597 /* Re-cache the ea_list */
598 f_cache_eattrs(fs);
599}
600
601static char *
602val_format_str(struct filter_state *fs, struct f_val v) {
603 buffer b;
604 LOG_BUFFER_INIT(b);
605 val_format(v, &b);
606 return lp_strdup(fs->pool, b.start);
607}
608
609static struct tbf rl_runtime_err = TBF_DEFAULT_LOG_LIMITS;
610
611/**
612 * interpret
613 * @fs: filter state
614 * @what: filter to interpret
615 *
616 * Interpret given tree of filter instructions. This is core function
617 * of filter system and does all the hard work.
618 *
619 * Each instruction has 4 fields: code (which is instruction code),
620 * aux (which is extension to instruction code, typically type),
621 * arg1 and arg2 - arguments. Depending on instruction, arguments
622 * are either integers, or pointers to instruction trees. Common
623 * instructions like +, that have two expressions as arguments use
624 * TWOARGS macro to get both of them evaluated.
625 */
626static enum filter_return
627interpret(struct filter_state *fs, struct f_inst *what)
628{
629 struct symbol *sym;
630 struct f_val *vp;
631 unsigned u1, u2;
632 enum filter_return fret;
633 int i;
634 u32 as;
635
636#define res fs->stack[fs->stack_ptr].val
637#define v1 fs->stack[fs->stack_ptr + 1].val
638#define v2 fs->stack[fs->stack_ptr + 2].val
639#define v3 fs->stack[fs->stack_ptr + 3].val
640
641 res = (struct f_val) { .type = T_VOID };
642
643 for ( ; what; what = what->next) {
644 res = (struct f_val) { .type = T_VOID };
645 switch(what->fi_code) {
646
647#define runtime(fmt, ...) do { \
648 if (!(fs->flags & FF_SILENT)) \
649 log_rl(&rl_runtime_err, L_ERR "filters, line %d: " fmt, what->lineno, ##__VA_ARGS__); \
650 return F_ERROR; \
651 } while(0)
652
653#define ARG_ANY(n) INTERPRET(what->a##n.p, n)
654
655#define ARG(n,t) ARG_ANY(n); \
656 if (v##n.type != t) \
657 runtime("Argument %d of instruction %s must be of type %02x, got %02x", \
658 n, f_instruction_name(what->fi_code), t, v##n.type);
659
660#define INTERPRET(what_, n) do { \
661 fs->stack_ptr += n; \
662 fret = interpret(fs, what_); \
663 fs->stack_ptr -= n; \
664 if (fret == F_RETURN) \
665 bug("This shall not happen"); \
666 if (fret > F_RETURN) \
667 return fret; \
668} while (0)
669
670#define ACCESS_RTE \
671 do { if (!fs->rte) runtime("No route to access"); } while (0)
672
673#define ACCESS_EATTRS \
674 do { if (!fs->eattrs) f_cache_eattrs(fs); } while (0)
675
676#define BITFIELD_MASK(what_) (1u << EA_BIT_GET(what_->a2.i))
677
678#include "filter/f-inst.c"
679
680#undef res
681#undef runtime
682#undef ARG_ANY
683#undef ARG
684#undef INTERPRET
685#undef ACCESS_RTE
686#undef ACCESS_EATTRS
687 }}
688 return F_NOP;
689}
690
691
692#define ARG(n) \
693 if (!i_same(f1->a##n.p, f2->a##n.p)) \
694 return 0;
695
696#define ONEARG ARG(1);
697#define TWOARGS ONEARG; ARG(2);
698#define THREEARGS TWOARGS; ARG(3);
699
700#define A2_SAME if (f1->a2.i != f2->a2.i) return 0;
701
702/*
703 * i_same - function that does real comparing of instruction trees, you should call filter_same from outside
704 */
705int
706i_same(struct f_inst *f1, struct f_inst *f2)
707{
708 if ((!!f1) != (!!f2))
709 return 0;
710 if (!f1)
711 return 1;
712 if (f1->aux != f2->aux)
713 return 0;
714 if (f1->fi_code != f2->fi_code)
715 return 0;
716 if (f1 == f2) /* It looks strange, but it is possible with call rewriting trickery */
717 return 1;
718
719 switch(f1->fi_code) {
720 case FI_ADD: /* fall through */
721 case FI_SUBTRACT:
722 case FI_MULTIPLY:
723 case FI_DIVIDE:
724 case FI_OR:
725 case FI_AND:
726 case FI_PAIR_CONSTRUCT:
727 case FI_EC_CONSTRUCT:
728 case FI_NEQ:
729 case FI_EQ:
730 case FI_LT:
731 case FI_LTE: TWOARGS; break;
732
733 case FI_PATHMASK_CONSTRUCT: if (!pm_same(f1->a1.p, f2->a1.p)) return 0; break;
734
735 case FI_NOT: ONEARG; break;
736 case FI_NOT_MATCH:
737 case FI_MATCH: TWOARGS; break;
738 case FI_DEFINED: ONEARG; break;
739 case FI_TYPE: ONEARG; break;
740
741 case FI_LC_CONSTRUCT:
742 THREEARGS;
743 break;
744
745 case FI_SET:
746 ARG(2);
747 {
748 struct symbol *s1, *s2;
749 s1 = f1->a1.p;
750 s2 = f2->a1.p;
751 if (strcmp(s1->name, s2->name))
752 return 0;
753 if (s1->class != s2->class)
754 return 0;
755 }
756 break;
757
758 case FI_CONSTANT:
759 switch (f1->aux) {
760
761 case T_PREFIX_SET:
762 if (!trie_same(f1->a2.p, f2->a2.p))
763 return 0;
764 break;
765
766 case T_SET:
767 if (!same_tree(f1->a2.p, f2->a2.p))
768 return 0;
769 break;
770
771 case T_STRING:
772 if (strcmp(f1->a2.p, f2->a2.p))
773 return 0;
774 break;
775
776 default:
777 A2_SAME;
778 }
779 break;
780
781 case FI_CONSTANT_INDIRECT:
782 if (!val_same(* (struct f_val *) f1->a1.p, * (struct f_val *) f2->a1.p))
783 return 0;
784 break;
785
786 case FI_VARIABLE:
787 if (strcmp((char *) f1->a2.p, (char *) f2->a2.p))
788 return 0;
789 break;
790 case FI_PRINT: case FI_LENGTH: ONEARG; break;
791 case FI_CONDITION: TWOARGS; break;
792 case FI_NOP: case FI_EMPTY: break;
793 case FI_PRINT_AND_DIE: ONEARG; A2_SAME; break;
794 case FI_PREF_GET:
795 case FI_RTA_GET: A2_SAME; break;
796 case FI_EA_GET: A2_SAME; break;
797 case FI_PREF_SET:
798 case FI_RTA_SET:
799 case FI_EA_SET: ONEARG; A2_SAME; break;
800
801 case FI_RETURN: ONEARG; break;
802 case FI_ROA_MAXLEN: ONEARG; break;
803 case FI_ROA_ASN: ONEARG; break;
804 case FI_SADR_SRC: ONEARG; break;
805 case FI_IP: ONEARG; break;
806 case FI_IS_V4: ONEARG; break;
807 case FI_ROUTE_DISTINGUISHER: ONEARG; break;
808 case FI_CALL: /* Call rewriting trickery to avoid exponential behaviour */
809 ONEARG;
810 if (!i_same(f1->a2.p, f2->a2.p))
811 return 0;
812 f2->a2.p = f1->a2.p;
813 break;
814 case FI_CLEAR_LOCAL_VARS: break; /* internal instruction */
815 case FI_SWITCH: ONEARG; if (!same_tree(f1->a2.p, f2->a2.p)) return 0; break;
816 case FI_IP_MASK: TWOARGS; break;
817 case FI_PATH_PREPEND: TWOARGS; break;
818 case FI_CLIST_ADD_DEL: TWOARGS; break;
819 case FI_AS_PATH_FIRST:
820 case FI_AS_PATH_LAST:
821 case FI_AS_PATH_LAST_NAG: ONEARG; break;
822 case FI_ROA_CHECK:
823 TWOARGS;
824 /* Does not really make sense - ROA check results may change anyway */
825 if (strcmp(((struct f_inst_roa_check *) f1)->rtc->name,
826 ((struct f_inst_roa_check *) f2)->rtc->name))
827 return 0;
828 break;
829 case FI_FORMAT: ONEARG; break;
830 case FI_ASSERT: ONEARG; break;
831 default:
832 bug( "Unknown instruction %d in same (%c)", f1->fi_code, f1->fi_code & 0xff);
833 }
834 return i_same(f1->next, f2->next);
835}
836
837/**
838 * f_run - run a filter for a route
839 * @filter: filter to run
840 * @rte: route being filtered, may be modified
841 * @tmp_pool: all filter allocations go from this pool
842 * @flags: flags
843 *
844 * If filter needs to modify the route, there are several
845 * posibilities. @rte might be read-only (with REF_COW flag), in that
846 * case rw copy is obtained by rte_cow() and @rte is replaced. If
847 * @rte is originally rw, it may be directly modified (and it is never
848 * copied).
849 *
850 * The returned rte may reuse the (possibly cached, cloned) rta, or
851 * (if rta was modificied) contains a modified uncached rta, which
852 * uses parts allocated from @tmp_pool and parts shared from original
853 * rta. There is one exception - if @rte is rw but contains a cached
854 * rta and that is modified, rta in returned rte is also cached.
855 *
856 * Ownership of cached rtas is consistent with rte, i.e.
857 * if a new rte is returned, it has its own clone of cached rta
858 * (and cached rta of read-only source rte is intact), if rte is
859 * modified in place, old cached rta is possibly freed.
860 */
861enum filter_return
862f_run(struct filter *filter, struct rte **rte, struct linpool *tmp_pool, int flags)
863{
864 if (filter == FILTER_ACCEPT)
865 return F_ACCEPT;
866
867 if (filter == FILTER_REJECT)
868 return F_REJECT;
869
870 int rte_cow = ((*rte)->flags & REF_COW);
871 DBG( "Running filter `%s'...", filter->name );
872
873 struct filter_state fs = {
874 .rte = rte,
875 .pool = tmp_pool,
876 .flags = flags,
877 .stack = filter_stack,
878 };
879
880 LOG_BUFFER_INIT(fs.buf);
881
882 enum filter_return fret = interpret(&fs, filter->root);
883
884 if (fs.old_rta) {
885 /*
886 * Cached rta was modified and fs->rte contains now an uncached one,
887 * sharing some part with the cached one. The cached rta should
888 * be freed (if rte was originally COW, fs->old_rta is a clone
889 * obtained during rte_cow()).
890 *
891 * This also implements the exception mentioned in f_run()
892 * description. The reason for this is that rta reuses parts of
893 * fs->old_rta, and these may be freed during rta_free(fs->old_rta).
894 * This is not the problem if rte was COW, because original rte
895 * also holds the same rta.
896 */
897 if (!rte_cow)
898 (*fs.rte)->attrs = rta_lookup((*fs.rte)->attrs);
899
900 rta_free(fs.old_rta);
901 }
902
903
904 if (fret < F_ACCEPT) {
905 if (!(fs.flags & FF_SILENT))
906 log_rl(&rl_runtime_err, L_ERR "Filter %s did not return accept nor reject. Make up your mind", filter->name);
907 return F_ERROR;
908 }
909 DBG( "done (%u)\n", res.val.i );
910 return fret;
911}
912
913/* TODO: perhaps we could integrate f_eval(), f_eval_rte() and f_run() */
914
915enum filter_return
916f_eval_rte(struct f_inst *expr, struct rte **rte, struct linpool *tmp_pool)
917{
918
919 struct filter_state fs = {
920 .rte = rte,
921 .pool = tmp_pool,
922 .stack = filter_stack,
923 };
924
925 LOG_BUFFER_INIT(fs.buf);
926
927 /* Note that in this function we assume that rte->attrs is private / uncached */
928 return interpret(&fs, expr);
929}
930
931enum filter_return
932f_eval(struct f_inst *expr, struct linpool *tmp_pool, struct f_val *pres)
933{
934 struct filter_state fs = {
935 .pool = tmp_pool,
936 .stack = filter_stack,
937 };
938
939 LOG_BUFFER_INIT(fs.buf);
940
941 enum filter_return fret = interpret(&fs, expr);
942 *pres = filter_stack[0].val;
943 return fret;
944}
945
946uint
947f_eval_int(struct f_inst *expr)
948{
949 /* Called independently in parse-time to eval expressions */
950 struct filter_state fs = {
951 .pool = cfg_mem,
952 .stack = filter_stack,
953 };
954
955 LOG_BUFFER_INIT(fs.buf);
956
957 if (interpret(&fs, expr) > F_RETURN)
958 cf_error("Runtime error while evaluating expression");
959
960 if (filter_stack[0].val.type != T_INT)
961 cf_error("Integer expression expected");
962
963 return filter_stack[0].val.val.i;
964}
965
966/**
967 * filter_same - compare two filters
968 * @new: first filter to be compared
969 * @old: second filter to be compared, notice that this filter is
970 * damaged while comparing.
971 *
972 * Returns 1 in case filters are same, otherwise 0. If there are
973 * underlying bugs, it will rather say 0 on same filters than say
974 * 1 on different.
975 */
976int
977filter_same(struct filter *new, struct filter *old)
978{
979 if (old == new) /* Handle FILTER_ACCEPT and FILTER_REJECT */
980 return 1;
981 if (old == FILTER_ACCEPT || old == FILTER_REJECT ||
982 new == FILTER_ACCEPT || new == FILTER_REJECT)
983 return 0;
984 return i_same(new->root, old->root);
985}