]> git.ipfire.org Git - thirdparty/bird.git/blob - filter/filter.c
Filter refactoring: Moved filter instruction definition to a separate file
[thirdparty/bird.git] / filter / filter.c
1 /*
2 * Filters: utility functions
3 *
4 * Copyright 1998 Pavel Machek <pavel@ucw.cz>
5 *
6 * Can be freely distributed and used under the terms of the GNU GPL.
7 *
8 */
9
10 /**
11 * DOC: Filters
12 *
13 * You can find sources of the filter language in |filter/|
14 * directory. File |filter/config.Y| contains filter grammar and basically translates
15 * the source from user into a tree of &f_inst structures. These trees are
16 * later interpreted using code in |filter/filter.c|.
17 *
18 * A filter is represented by a tree of &f_inst structures, one structure per
19 * "instruction". Each &f_inst contains @code, @aux value which is
20 * usually the data type this instruction operates on and two generic
21 * arguments (@a1, @a2). Some instructions contain pointer(s) to other
22 * instructions in their (@a1, @a2) fields.
23 *
24 * Filters use a &f_val structure for their data. Each &f_val
25 * contains type and value (types are constants prefixed with %T_). Few
26 * of the types are special; %T_RETURN can be or-ed with a type to indicate
27 * that return from a function or from the whole filter should be
28 * forced. Important thing about &f_val's is that they may be copied
29 * with a simple |=|. That's fine for all currently defined types: strings
30 * are read-only (and therefore okay), paths are copied for each
31 * operation (okay too).
32 */
33
34 #undef LOCAL_DEBUG
35
36 #include "nest/bird.h"
37 #include "lib/lists.h"
38 #include "lib/resource.h"
39 #include "lib/socket.h"
40 #include "lib/string.h"
41 #include "lib/unaligned.h"
42 #include "lib/net.h"
43 #include "lib/ip.h"
44 #include "nest/route.h"
45 #include "nest/protocol.h"
46 #include "nest/iface.h"
47 #include "nest/attrs.h"
48 #include "conf/conf.h"
49 #include "filter/filter.h"
50
51 #define CMP_ERROR 999
52
53 /* Internal filter state, to be allocated on stack when executing filters */
54 struct filter_state {
55 struct rte **rte;
56 struct rta *old_rta;
57 struct ea_list **eattrs;
58 struct linpool *pool;
59 struct buffer buf;
60 int flags;
61 };
62
63 void (*bt_assert_hook)(int result, struct f_inst *assert);
64
65 static struct adata undef_adata; /* adata of length 0 used for undefined */
66
67 /* Special undef value for paths and clists */
68 static inline int
69 undef_value(struct f_val v)
70 {
71 return ((v.type == T_PATH) || (v.type == T_CLIST) ||
72 (v.type == T_ECLIST) || (v.type == T_LCLIST)) &&
73 (v.val.ad == &undef_adata);
74 }
75
76 static struct adata *
77 adata_empty(struct linpool *pool, int l)
78 {
79 struct adata *res = lp_alloc(pool, sizeof(struct adata) + l);
80 res->length = l;
81 return res;
82 }
83
84 static void
85 pm_format(struct f_path_mask *p, buffer *buf)
86 {
87 buffer_puts(buf, "[= ");
88
89 while (p)
90 {
91 switch(p->kind)
92 {
93 case PM_ASN:
94 buffer_print(buf, "%u ", p->val);
95 break;
96
97 case PM_QUESTION:
98 buffer_puts(buf, "? ");
99 break;
100
101 case PM_ASTERISK:
102 buffer_puts(buf, "* ");
103 break;
104
105 case PM_ASN_RANGE:
106 buffer_print(buf, "%u..%u ", p->val, p->val2);
107 break;
108
109 case PM_ASN_EXPR:
110 ASSERT(0);
111 }
112
113 p = p->next;
114 }
115
116 buffer_puts(buf, "=]");
117 }
118
119 static inline int val_is_ip4(const struct f_val v)
120 { return (v.type == T_IP) && ipa_is_ip4(v.val.ip); }
121
122 static inline int
123 lcomm_cmp(lcomm v1, lcomm v2)
124 {
125 if (v1.asn != v2.asn)
126 return (v1.asn > v2.asn) ? 1 : -1;
127 if (v1.ldp1 != v2.ldp1)
128 return (v1.ldp1 > v2.ldp1) ? 1 : -1;
129 if (v1.ldp2 != v2.ldp2)
130 return (v1.ldp2 > v2.ldp2) ? 1 : -1;
131 return 0;
132 }
133
134 /**
135 * val_compare - compare two values
136 * @v1: first value
137 * @v2: second value
138 *
139 * Compares two values and returns -1, 0, 1 on <, =, > or CMP_ERROR on
140 * error. Tree module relies on this giving consistent results so
141 * that it can be used for building balanced trees.
142 */
143 int
144 val_compare(struct f_val v1, struct f_val v2)
145 {
146 if (v1.type != v2.type) {
147 if (v1.type == T_VOID) /* Hack for else */
148 return -1;
149 if (v2.type == T_VOID)
150 return 1;
151
152 /* IP->Quad implicit conversion */
153 if ((v1.type == T_QUAD) && val_is_ip4(v2))
154 return uint_cmp(v1.val.i, ipa_to_u32(v2.val.ip));
155 if (val_is_ip4(v1) && (v2.type == T_QUAD))
156 return uint_cmp(ipa_to_u32(v1.val.ip), v2.val.i);
157
158 debug( "Types do not match in val_compare\n" );
159 return CMP_ERROR;
160 }
161
162 switch (v1.type) {
163 case T_VOID:
164 return 0;
165 case T_ENUM:
166 case T_INT:
167 case T_BOOL:
168 case T_PAIR:
169 case T_QUAD:
170 return uint_cmp(v1.val.i, v2.val.i);
171 case T_EC:
172 case T_RD:
173 return u64_cmp(v1.val.ec, v2.val.ec);
174 case T_LC:
175 return lcomm_cmp(v1.val.lc, v2.val.lc);
176 case T_IP:
177 return ipa_compare(v1.val.ip, v2.val.ip);
178 case T_NET:
179 return net_compare(v1.val.net, v2.val.net);
180 case T_STRING:
181 return strcmp(v1.val.s, v2.val.s);
182 default:
183 return CMP_ERROR;
184 }
185 }
186
187 static int
188 pm_same(struct f_path_mask *m1, struct f_path_mask *m2)
189 {
190 while (m1 && m2)
191 {
192 if (m1->kind != m2->kind)
193 return 0;
194
195 if (m1->kind == PM_ASN_EXPR)
196 {
197 if (!i_same((struct f_inst *) m1->val, (struct f_inst *) m2->val))
198 return 0;
199 }
200 else
201 {
202 if ((m1->val != m2->val) || (m1->val2 != m2->val2))
203 return 0;
204 }
205
206 m1 = m1->next;
207 m2 = m2->next;
208 }
209
210 return !m1 && !m2;
211 }
212
213 /**
214 * val_same - compare two values
215 * @v1: first value
216 * @v2: second value
217 *
218 * Compares two values and returns 1 if they are same and 0 if not.
219 * Comparison of values of different types is valid and returns 0.
220 */
221 int
222 val_same(struct f_val v1, struct f_val v2)
223 {
224 int rc;
225
226 rc = val_compare(v1, v2);
227 if (rc != CMP_ERROR)
228 return !rc;
229
230 if (v1.type != v2.type)
231 return 0;
232
233 switch (v1.type) {
234 case T_PATH_MASK:
235 return pm_same(v1.val.path_mask, v2.val.path_mask);
236 case T_PATH:
237 case T_CLIST:
238 case T_ECLIST:
239 case T_LCLIST:
240 return adata_same(v1.val.ad, v2.val.ad);
241 case T_SET:
242 return same_tree(v1.val.t, v2.val.t);
243 case T_PREFIX_SET:
244 return trie_same(v1.val.ti, v2.val.ti);
245 default:
246 bug("Invalid type in val_same(): %x", v1.type);
247 }
248 }
249
250 static int
251 clist_set_type(struct f_tree *set, struct f_val *v)
252 {
253 switch (set->from.type)
254 {
255 case T_PAIR:
256 v->type = T_PAIR;
257 return 1;
258
259 case T_QUAD:
260 v->type = T_QUAD;
261 return 1;
262
263 case T_IP:
264 if (val_is_ip4(set->from) && val_is_ip4(set->to))
265 {
266 v->type = T_QUAD;
267 return 1;
268 }
269 /* Fall through */
270 default:
271 v->type = T_VOID;
272 return 0;
273 }
274 }
275
276 static inline int
277 eclist_set_type(struct f_tree *set)
278 { return set->from.type == T_EC; }
279
280 static inline int
281 lclist_set_type(struct f_tree *set)
282 { return set->from.type == T_LC; }
283
284 static int
285 clist_match_set(struct adata *clist, struct f_tree *set)
286 {
287 if (!clist)
288 return 0;
289
290 struct f_val v;
291 if (!clist_set_type(set, &v))
292 return CMP_ERROR;
293
294 u32 *l = (u32 *) clist->data;
295 u32 *end = l + clist->length/4;
296
297 while (l < end) {
298 v.val.i = *l++;
299 if (find_tree(set, v))
300 return 1;
301 }
302 return 0;
303 }
304
305 static int
306 eclist_match_set(struct adata *list, struct f_tree *set)
307 {
308 if (!list)
309 return 0;
310
311 if (!eclist_set_type(set))
312 return CMP_ERROR;
313
314 struct f_val v;
315 u32 *l = int_set_get_data(list);
316 int len = int_set_get_size(list);
317 int i;
318
319 v.type = T_EC;
320 for (i = 0; i < len; i += 2) {
321 v.val.ec = ec_get(l, i);
322 if (find_tree(set, v))
323 return 1;
324 }
325
326 return 0;
327 }
328
329 static int
330 lclist_match_set(struct adata *list, struct f_tree *set)
331 {
332 if (!list)
333 return 0;
334
335 if (!lclist_set_type(set))
336 return CMP_ERROR;
337
338 struct f_val v;
339 u32 *l = int_set_get_data(list);
340 int len = int_set_get_size(list);
341 int i;
342
343 v.type = T_LC;
344 for (i = 0; i < len; i += 3) {
345 v.val.lc = lc_get(l, i);
346 if (find_tree(set, v))
347 return 1;
348 }
349
350 return 0;
351 }
352
353 static struct adata *
354 clist_filter(struct linpool *pool, struct adata *list, struct f_val set, int pos)
355 {
356 if (!list)
357 return NULL;
358
359 int tree = (set.type == T_SET); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
360 struct f_val v;
361 if (tree)
362 clist_set_type(set.val.t, &v);
363 else
364 v.type = T_PAIR;
365
366 int len = int_set_get_size(list);
367 u32 *l = int_set_get_data(list);
368 u32 tmp[len];
369 u32 *k = tmp;
370 u32 *end = l + len;
371
372 while (l < end) {
373 v.val.i = *l++;
374 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
375 if ((tree ? !!find_tree(set.val.t, v) : int_set_contains(set.val.ad, v.val.i)) == pos)
376 *k++ = v.val.i;
377 }
378
379 uint nl = (k - tmp) * sizeof(u32);
380 if (nl == list->length)
381 return list;
382
383 struct adata *res = adata_empty(pool, nl);
384 memcpy(res->data, tmp, nl);
385 return res;
386 }
387
388 static struct adata *
389 eclist_filter(struct linpool *pool, struct adata *list, struct f_val set, int pos)
390 {
391 if (!list)
392 return NULL;
393
394 int tree = (set.type == T_SET); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
395 struct f_val v;
396
397 int len = int_set_get_size(list);
398 u32 *l = int_set_get_data(list);
399 u32 tmp[len];
400 u32 *k = tmp;
401 int i;
402
403 v.type = T_EC;
404 for (i = 0; i < len; i += 2) {
405 v.val.ec = ec_get(l, i);
406 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
407 if ((tree ? !!find_tree(set.val.t, v) : ec_set_contains(set.val.ad, v.val.ec)) == pos) {
408 *k++ = l[i];
409 *k++ = l[i+1];
410 }
411 }
412
413 uint nl = (k - tmp) * sizeof(u32);
414 if (nl == list->length)
415 return list;
416
417 struct adata *res = adata_empty(pool, nl);
418 memcpy(res->data, tmp, nl);
419 return res;
420 }
421
422 static struct adata *
423 lclist_filter(struct linpool *pool, struct adata *list, struct f_val set, int pos)
424 {
425 if (!list)
426 return NULL;
427
428 int tree = (set.type == T_SET); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
429 struct f_val v;
430
431 int len = int_set_get_size(list);
432 u32 *l = int_set_get_data(list);
433 u32 tmp[len];
434 u32 *k = tmp;
435 int i;
436
437 v.type = T_LC;
438 for (i = 0; i < len; i += 3) {
439 v.val.lc = lc_get(l, i);
440 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
441 if ((tree ? !!find_tree(set.val.t, v) : lc_set_contains(set.val.ad, v.val.lc)) == pos)
442 k = lc_copy(k, l+i);
443 }
444
445 uint nl = (k - tmp) * sizeof(u32);
446 if (nl == list->length)
447 return list;
448
449 struct adata *res = adata_empty(pool, nl);
450 memcpy(res->data, tmp, nl);
451 return res;
452 }
453
454 /**
455 * val_in_range - implement |~| operator
456 * @v1: element
457 * @v2: set
458 *
459 * Checks if @v1 is element (|~| operator) of @v2.
460 */
461 static int
462 val_in_range(struct f_val v1, struct f_val v2)
463 {
464 if ((v1.type == T_PATH) && (v2.type == T_PATH_MASK))
465 return as_path_match(v1.val.ad, v2.val.path_mask);
466
467 if ((v1.type == T_INT) && (v2.type == T_PATH))
468 return as_path_contains(v2.val.ad, v1.val.i, 1);
469
470 if (((v1.type == T_PAIR) || (v1.type == T_QUAD)) && (v2.type == T_CLIST))
471 return int_set_contains(v2.val.ad, v1.val.i);
472 /* IP->Quad implicit conversion */
473 if (val_is_ip4(v1) && (v2.type == T_CLIST))
474 return int_set_contains(v2.val.ad, ipa_to_u32(v1.val.ip));
475
476 if ((v1.type == T_EC) && (v2.type == T_ECLIST))
477 return ec_set_contains(v2.val.ad, v1.val.ec);
478
479 if ((v1.type == T_LC) && (v2.type == T_LCLIST))
480 return lc_set_contains(v2.val.ad, v1.val.lc);
481
482 if ((v1.type == T_STRING) && (v2.type == T_STRING))
483 return patmatch(v2.val.s, v1.val.s);
484
485 if ((v1.type == T_IP) && (v2.type == T_NET))
486 return ipa_in_netX(v1.val.ip, v2.val.net);
487
488 if ((v1.type == T_NET) && (v2.type == T_NET))
489 return net_in_netX(v1.val.net, v2.val.net);
490
491 if ((v1.type == T_NET) && (v2.type == T_PREFIX_SET))
492 return trie_match_net(v2.val.ti, v1.val.net);
493
494 if (v2.type != T_SET)
495 return CMP_ERROR;
496
497 /* With integrated Quad<->IP implicit conversion */
498 if ((v1.type == v2.val.t->from.type) ||
499 ((v1.type == T_QUAD) && val_is_ip4(v2.val.t->from) && val_is_ip4(v2.val.t->to)))
500 return !!find_tree(v2.val.t, v1);
501
502 if (v1.type == T_CLIST)
503 return clist_match_set(v1.val.ad, v2.val.t);
504
505 if (v1.type == T_ECLIST)
506 return eclist_match_set(v1.val.ad, v2.val.t);
507
508 if (v1.type == T_LCLIST)
509 return lclist_match_set(v1.val.ad, v2.val.t);
510
511 if (v1.type == T_PATH)
512 return as_path_match_set(v1.val.ad, v2.val.t);
513
514 return CMP_ERROR;
515 }
516
517 /*
518 * val_format - format filter value
519 */
520 void
521 val_format(struct f_val v, buffer *buf)
522 {
523 char buf2[1024];
524 switch (v.type)
525 {
526 case T_VOID: buffer_puts(buf, "(void)"); return;
527 case T_BOOL: buffer_puts(buf, v.val.i ? "TRUE" : "FALSE"); return;
528 case T_INT: buffer_print(buf, "%u", v.val.i); return;
529 case T_STRING: buffer_print(buf, "%s", v.val.s); return;
530 case T_IP: buffer_print(buf, "%I", v.val.ip); return;
531 case T_NET: buffer_print(buf, "%N", v.val.net); return;
532 case T_PAIR: buffer_print(buf, "(%u,%u)", v.val.i >> 16, v.val.i & 0xffff); return;
533 case T_QUAD: buffer_print(buf, "%R", v.val.i); return;
534 case T_EC: ec_format(buf2, v.val.ec); buffer_print(buf, "%s", buf2); return;
535 case T_LC: lc_format(buf2, v.val.lc); buffer_print(buf, "%s", buf2); return;
536 case T_RD: rd_format(v.val.ec, buf2, 1024); buffer_print(buf, "%s", buf2); return;
537 case T_PREFIX_SET: trie_format(v.val.ti, buf); return;
538 case T_SET: tree_format(v.val.t, buf); return;
539 case T_ENUM: buffer_print(buf, "(enum %x)%u", v.type, v.val.i); return;
540 case T_PATH: as_path_format(v.val.ad, buf2, 1000); buffer_print(buf, "(path %s)", buf2); return;
541 case T_CLIST: int_set_format(v.val.ad, 1, -1, buf2, 1000); buffer_print(buf, "(clist %s)", buf2); return;
542 case T_ECLIST: ec_set_format(v.val.ad, -1, buf2, 1000); buffer_print(buf, "(eclist %s)", buf2); return;
543 case T_LCLIST: lc_set_format(v.val.ad, -1, buf2, 1000); buffer_print(buf, "(lclist %s)", buf2); return;
544 case T_PATH_MASK: pm_format(v.val.path_mask, buf); return;
545 default: buffer_print(buf, "[unknown type %x]", v.type); return;
546 }
547 }
548
549
550 static inline void f_cache_eattrs(struct filter_state *fs)
551 {
552 fs->eattrs = &((*fs->rte)->attrs->eattrs);
553 }
554
555 static inline void f_rte_cow(struct filter_state *fs)
556 {
557 if (!((*fs->rte)->flags & REF_COW))
558 return;
559
560 *fs->rte = rte_cow(*fs->rte);
561 }
562
563 /*
564 * rta_cow - prepare rta for modification by filter
565 */
566 static void
567 f_rta_cow(struct filter_state *fs)
568 {
569 if (!rta_is_cached((*fs->rte)->attrs))
570 return;
571
572 /* Prepare to modify rte */
573 f_rte_cow(fs);
574
575 /* Store old rta to free it later, it stores reference from rte_cow() */
576 fs->old_rta = (*fs->rte)->attrs;
577
578 /*
579 * Get shallow copy of rta. Fields eattrs and nexthops of rta are shared
580 * with fs->old_rta (they will be copied when the cached rta will be obtained
581 * at the end of f_run()), also the lock of hostentry is inherited (we
582 * suppose hostentry is not changed by filters).
583 */
584 (*fs->rte)->attrs = rta_do_cow((*fs->rte)->attrs, fs->pool);
585
586 /* Re-cache the ea_list */
587 f_cache_eattrs(fs);
588 }
589
590 static char *
591 val_format_str(struct filter_state *fs, struct f_val v) {
592 buffer b;
593 LOG_BUFFER_INIT(b);
594 val_format(v, &b);
595 return lp_strdup(fs->pool, b.start);
596 }
597
598 static struct tbf rl_runtime_err = TBF_DEFAULT_LOG_LIMITS;
599
600 /**
601 * interpret
602 * @fs: filter state
603 * @what: filter to interpret
604 *
605 * Interpret given tree of filter instructions. This is core function
606 * of filter system and does all the hard work.
607 *
608 * Each instruction has 4 fields: code (which is instruction code),
609 * aux (which is extension to instruction code, typically type),
610 * arg1 and arg2 - arguments. Depending on instruction, arguments
611 * are either integers, or pointers to instruction trees. Common
612 * instructions like +, that have two expressions as arguments use
613 * TWOARGS macro to get both of them evaluated.
614 *
615 * &f_val structures are copied around, so there are no problems with
616 * memory managment.
617 */
618 static struct f_val
619 interpret(struct filter_state *fs, struct f_inst *what)
620 {
621 struct symbol *sym;
622 struct f_val v1, v2, v3, res = { .type = T_VOID }, *vp;
623 unsigned u1, u2;
624 int i;
625 u32 as;
626
627 for ( ; what; what = what->next) {
628 res.type = T_VOID;
629 switch(what->fi_code) {
630 #define runtime(fmt, ...) do { \
631 if (!(fs->flags & FF_SILENT)) \
632 log_rl(&rl_runtime_err, L_ERR "filters, line %d: " fmt, what->lineno, ##__VA_ARGS__); \
633 res.type = T_RETURN; \
634 res.val.i = F_ERROR; \
635 return res; \
636 } while(0)
637
638 #define ARG_ANY(n) INTERPRET(v##n, what->a##n.p)
639
640 #define ARG(n,t) ARG_ANY(n) \
641 if (v##n.type != t) \
642 runtime("Argument %d of instruction %s must be of type %02x, got %02x", \
643 n, f_instruction_name(what->fi_code), t, v##n.type);
644
645 #define INTERPRET(val, what_) \
646 val = interpret(fs, what_); \
647 if (val.type & T_RETURN) \
648 return val;
649
650 #define ACCESS_RTE \
651 do { if (!fs->rte) runtime("No route to access"); } while (0)
652
653 #define ACCESS_EATTRS \
654 do { if (!fs->eattrs) f_cache_eattrs(fs); } while (0)
655
656 #define BITFIELD_MASK(what_) (1u << EA_BIT_GET(what_->a2.i))
657
658 #include "filter/f-inst.c"
659
660 #undef runtime
661 #undef ARG_ANY
662 #undef ARG
663 #undef INTERPRET
664 #undef ACCESS_RTE
665 #undef ACCESS_EATTRS
666 }}
667 return res;
668 }
669
670
671 #define ARG(n) \
672 if (!i_same(f1->a##n.p, f2->a##n.p)) \
673 return 0;
674
675 #define ONEARG ARG(1);
676 #define TWOARGS ONEARG; ARG(2);
677 #define THREEARGS TWOARGS; ARG(3);
678
679 #define A2_SAME if (f1->a2.i != f2->a2.i) return 0;
680
681 /*
682 * i_same - function that does real comparing of instruction trees, you should call filter_same from outside
683 */
684 int
685 i_same(struct f_inst *f1, struct f_inst *f2)
686 {
687 if ((!!f1) != (!!f2))
688 return 0;
689 if (!f1)
690 return 1;
691 if (f1->aux != f2->aux)
692 return 0;
693 if (f1->fi_code != f2->fi_code)
694 return 0;
695 if (f1 == f2) /* It looks strange, but it is possible with call rewriting trickery */
696 return 1;
697
698 switch(f1->fi_code) {
699 case FI_ADD: /* fall through */
700 case FI_SUBTRACT:
701 case FI_MULTIPLY:
702 case FI_DIVIDE:
703 case FI_OR:
704 case FI_AND:
705 case FI_PAIR_CONSTRUCT:
706 case FI_EC_CONSTRUCT:
707 case FI_NEQ:
708 case FI_EQ:
709 case FI_LT:
710 case FI_LTE: TWOARGS; break;
711
712 case FI_PATHMASK_CONSTRUCT: if (!pm_same(f1->a1.p, f2->a1.p)) return 0; break;
713
714 case FI_NOT: ONEARG; break;
715 case FI_NOT_MATCH:
716 case FI_MATCH: TWOARGS; break;
717 case FI_DEFINED: ONEARG; break;
718 case FI_TYPE: ONEARG; break;
719
720 case FI_LC_CONSTRUCT:
721 THREEARGS;
722 break;
723
724 case FI_SET:
725 ARG(2);
726 {
727 struct symbol *s1, *s2;
728 s1 = f1->a1.p;
729 s2 = f2->a1.p;
730 if (strcmp(s1->name, s2->name))
731 return 0;
732 if (s1->class != s2->class)
733 return 0;
734 }
735 break;
736
737 case FI_CONSTANT:
738 switch (f1->aux) {
739
740 case T_PREFIX_SET:
741 if (!trie_same(f1->a2.p, f2->a2.p))
742 return 0;
743 break;
744
745 case T_SET:
746 if (!same_tree(f1->a2.p, f2->a2.p))
747 return 0;
748 break;
749
750 case T_STRING:
751 if (strcmp(f1->a2.p, f2->a2.p))
752 return 0;
753 break;
754
755 default:
756 A2_SAME;
757 }
758 break;
759
760 case FI_CONSTANT_INDIRECT:
761 if (!val_same(* (struct f_val *) f1->a1.p, * (struct f_val *) f2->a1.p))
762 return 0;
763 break;
764
765 case FI_VARIABLE:
766 if (strcmp((char *) f1->a2.p, (char *) f2->a2.p))
767 return 0;
768 break;
769 case FI_PRINT: case FI_LENGTH: ONEARG; break;
770 case FI_CONDITION: TWOARGS; break;
771 case FI_NOP: case FI_EMPTY: break;
772 case FI_PRINT_AND_DIE: ONEARG; A2_SAME; break;
773 case FI_PREF_GET:
774 case FI_RTA_GET: A2_SAME; break;
775 case FI_EA_GET: A2_SAME; break;
776 case FI_PREF_SET:
777 case FI_RTA_SET:
778 case FI_EA_SET: ONEARG; A2_SAME; break;
779
780 case FI_RETURN: ONEARG; break;
781 case FI_ROA_MAXLEN: ONEARG; break;
782 case FI_ROA_ASN: ONEARG; break;
783 case FI_SADR_SRC: ONEARG; break;
784 case FI_IP: ONEARG; break;
785 case FI_IS_V4: ONEARG; break;
786 case FI_ROUTE_DISTINGUISHER: ONEARG; break;
787 case FI_CALL: /* Call rewriting trickery to avoid exponential behaviour */
788 ONEARG;
789 if (!i_same(f1->a2.p, f2->a2.p))
790 return 0;
791 f2->a2.p = f1->a2.p;
792 break;
793 case FI_CLEAR_LOCAL_VARS: break; /* internal instruction */
794 case FI_SWITCH: ONEARG; if (!same_tree(f1->a2.p, f2->a2.p)) return 0; break;
795 case FI_IP_MASK: TWOARGS; break;
796 case FI_PATH_PREPEND: TWOARGS; break;
797 case FI_CLIST_ADD_DEL: TWOARGS; break;
798 case FI_AS_PATH_FIRST:
799 case FI_AS_PATH_LAST:
800 case FI_AS_PATH_LAST_NAG: ONEARG; break;
801 case FI_ROA_CHECK:
802 TWOARGS;
803 /* Does not really make sense - ROA check results may change anyway */
804 if (strcmp(((struct f_inst_roa_check *) f1)->rtc->name,
805 ((struct f_inst_roa_check *) f2)->rtc->name))
806 return 0;
807 break;
808 case FI_FORMAT: ONEARG; break;
809 case FI_ASSERT: ONEARG; break;
810 default:
811 bug( "Unknown instruction %d in same (%c)", f1->fi_code, f1->fi_code & 0xff);
812 }
813 return i_same(f1->next, f2->next);
814 }
815
816 /**
817 * f_run - run a filter for a route
818 * @filter: filter to run
819 * @rte: route being filtered, may be modified
820 * @tmp_pool: all filter allocations go from this pool
821 * @flags: flags
822 *
823 * If filter needs to modify the route, there are several
824 * posibilities. @rte might be read-only (with REF_COW flag), in that
825 * case rw copy is obtained by rte_cow() and @rte is replaced. If
826 * @rte is originally rw, it may be directly modified (and it is never
827 * copied).
828 *
829 * The returned rte may reuse the (possibly cached, cloned) rta, or
830 * (if rta was modificied) contains a modified uncached rta, which
831 * uses parts allocated from @tmp_pool and parts shared from original
832 * rta. There is one exception - if @rte is rw but contains a cached
833 * rta and that is modified, rta in returned rte is also cached.
834 *
835 * Ownership of cached rtas is consistent with rte, i.e.
836 * if a new rte is returned, it has its own clone of cached rta
837 * (and cached rta of read-only source rte is intact), if rte is
838 * modified in place, old cached rta is possibly freed.
839 */
840 int
841 f_run(struct filter *filter, struct rte **rte, struct linpool *tmp_pool, int flags)
842 {
843 if (filter == FILTER_ACCEPT)
844 return F_ACCEPT;
845
846 if (filter == FILTER_REJECT)
847 return F_REJECT;
848
849 int rte_cow = ((*rte)->flags & REF_COW);
850 DBG( "Running filter `%s'...", filter->name );
851
852 struct filter_state fs = {
853 .rte = rte,
854 .pool = tmp_pool,
855 .flags = flags,
856 };
857
858 LOG_BUFFER_INIT(fs.buf);
859
860 struct f_val res = interpret(&fs, filter->root);
861
862 if (fs.old_rta) {
863 /*
864 * Cached rta was modified and fs->rte contains now an uncached one,
865 * sharing some part with the cached one. The cached rta should
866 * be freed (if rte was originally COW, fs->old_rta is a clone
867 * obtained during rte_cow()).
868 *
869 * This also implements the exception mentioned in f_run()
870 * description. The reason for this is that rta reuses parts of
871 * fs->old_rta, and these may be freed during rta_free(fs->old_rta).
872 * This is not the problem if rte was COW, because original rte
873 * also holds the same rta.
874 */
875 if (!rte_cow)
876 (*fs.rte)->attrs = rta_lookup((*fs.rte)->attrs);
877
878 rta_free(fs.old_rta);
879 }
880
881
882 if (res.type != T_RETURN) {
883 if (!(fs.flags & FF_SILENT))
884 log_rl(&rl_runtime_err, L_ERR "Filter %s did not return accept nor reject. Make up your mind", filter->name);
885 return F_ERROR;
886 }
887 DBG( "done (%u)\n", res.val.i );
888 return res.val.i;
889 }
890
891 /* TODO: perhaps we could integrate f_eval(), f_eval_rte() and f_run() */
892
893 struct f_val
894 f_eval_rte(struct f_inst *expr, struct rte **rte, struct linpool *tmp_pool)
895 {
896
897 struct filter_state fs = {
898 .rte = rte,
899 .pool = tmp_pool,
900 };
901
902 LOG_BUFFER_INIT(fs.buf);
903
904 /* Note that in this function we assume that rte->attrs is private / uncached */
905 struct f_val res = interpret(&fs, expr);
906
907 return res;
908 }
909
910 struct f_val
911 f_eval(struct f_inst *expr, struct linpool *tmp_pool)
912 {
913 struct filter_state fs = {
914 .pool = tmp_pool,
915 };
916
917 LOG_BUFFER_INIT(fs.buf);
918
919 return interpret(&fs, expr);
920 }
921
922 uint
923 f_eval_int(struct f_inst *expr)
924 {
925 /* Called independently in parse-time to eval expressions */
926 struct f_val res = f_eval(expr, cfg_mem);
927
928 if (res.type != T_INT)
929 cf_error("Integer expression expected");
930
931 return res.val.i;
932 }
933
934 /**
935 * filter_same - compare two filters
936 * @new: first filter to be compared
937 * @old: second filter to be compared, notice that this filter is
938 * damaged while comparing.
939 *
940 * Returns 1 in case filters are same, otherwise 0. If there are
941 * underlying bugs, it will rather say 0 on same filters than say
942 * 1 on different.
943 */
944 int
945 filter_same(struct filter *new, struct filter *old)
946 {
947 if (old == new) /* Handle FILTER_ACCEPT and FILTER_REJECT */
948 return 1;
949 if (old == FILTER_ACCEPT || old == FILTER_REJECT ||
950 new == FILTER_ACCEPT || new == FILTER_REJECT)
951 return 0;
952 return i_same(new->root, old->root);
953 }