]> git.ipfire.org Git - thirdparty/bird.git/blob - filter/filter.c
Filter: macro for recursive interpretation of instructions
[thirdparty/bird.git] / filter / filter.c
1 /*
2 * Filters: utility functions
3 *
4 * Copyright 1998 Pavel Machek <pavel@ucw.cz>
5 *
6 * Can be freely distributed and used under the terms of the GNU GPL.
7 *
8 */
9
10 /**
11 * DOC: Filters
12 *
13 * You can find sources of the filter language in |filter/|
14 * directory. File |filter/config.Y| contains filter grammar and basically translates
15 * the source from user into a tree of &f_inst structures. These trees are
16 * later interpreted using code in |filter/filter.c|.
17 *
18 * A filter is represented by a tree of &f_inst structures, one structure per
19 * "instruction". Each &f_inst contains @code, @aux value which is
20 * usually the data type this instruction operates on and two generic
21 * arguments (@a1, @a2). Some instructions contain pointer(s) to other
22 * instructions in their (@a1, @a2) fields.
23 *
24 * Filters use a &f_val structure for their data. Each &f_val
25 * contains type and value (types are constants prefixed with %T_). Few
26 * of the types are special; %T_RETURN can be or-ed with a type to indicate
27 * that return from a function or from the whole filter should be
28 * forced. Important thing about &f_val's is that they may be copied
29 * with a simple |=|. That's fine for all currently defined types: strings
30 * are read-only (and therefore okay), paths are copied for each
31 * operation (okay too).
32 */
33
34 #undef LOCAL_DEBUG
35
36 #include "nest/bird.h"
37 #include "lib/lists.h"
38 #include "lib/resource.h"
39 #include "lib/socket.h"
40 #include "lib/string.h"
41 #include "lib/unaligned.h"
42 #include "lib/net.h"
43 #include "lib/ip.h"
44 #include "nest/route.h"
45 #include "nest/protocol.h"
46 #include "nest/iface.h"
47 #include "nest/attrs.h"
48 #include "conf/conf.h"
49 #include "filter/filter.h"
50
51 #define CMP_ERROR 999
52
53 void (*bt_assert_hook)(int result, struct f_inst *assert);
54
55 static struct adata undef_adata; /* adata of length 0 used for undefined */
56
57 /* Special undef value for paths and clists */
58 static inline int
59 undef_value(struct f_val v)
60 {
61 return ((v.type == T_PATH) || (v.type == T_CLIST) ||
62 (v.type == T_ECLIST) || (v.type == T_LCLIST)) &&
63 (v.val.ad == &undef_adata);
64 }
65
66 static struct adata *
67 adata_empty(struct linpool *pool, int l)
68 {
69 struct adata *res = lp_alloc(pool, sizeof(struct adata) + l);
70 res->length = l;
71 return res;
72 }
73
74 static void
75 pm_format(struct f_path_mask *p, buffer *buf)
76 {
77 buffer_puts(buf, "[= ");
78
79 while (p)
80 {
81 switch(p->kind)
82 {
83 case PM_ASN:
84 buffer_print(buf, "%u ", p->val);
85 break;
86
87 case PM_QUESTION:
88 buffer_puts(buf, "? ");
89 break;
90
91 case PM_ASTERISK:
92 buffer_puts(buf, "* ");
93 break;
94
95 case PM_ASN_RANGE:
96 buffer_print(buf, "%u..%u ", p->val, p->val2);
97 break;
98
99 case PM_ASN_EXPR:
100 ASSERT(0);
101 }
102
103 p = p->next;
104 }
105
106 buffer_puts(buf, "=]");
107 }
108
109 static inline int val_is_ip4(const struct f_val v)
110 { return (v.type == T_IP) && ipa_is_ip4(v.val.ip); }
111
112 static inline int
113 lcomm_cmp(lcomm v1, lcomm v2)
114 {
115 if (v1.asn != v2.asn)
116 return (v1.asn > v2.asn) ? 1 : -1;
117 if (v1.ldp1 != v2.ldp1)
118 return (v1.ldp1 > v2.ldp1) ? 1 : -1;
119 if (v1.ldp2 != v2.ldp2)
120 return (v1.ldp2 > v2.ldp2) ? 1 : -1;
121 return 0;
122 }
123
124 /**
125 * val_compare - compare two values
126 * @v1: first value
127 * @v2: second value
128 *
129 * Compares two values and returns -1, 0, 1 on <, =, > or CMP_ERROR on
130 * error. Tree module relies on this giving consistent results so
131 * that it can be used for building balanced trees.
132 */
133 int
134 val_compare(struct f_val v1, struct f_val v2)
135 {
136 if (v1.type != v2.type) {
137 if (v1.type == T_VOID) /* Hack for else */
138 return -1;
139 if (v2.type == T_VOID)
140 return 1;
141
142 /* IP->Quad implicit conversion */
143 if ((v1.type == T_QUAD) && val_is_ip4(v2))
144 return uint_cmp(v1.val.i, ipa_to_u32(v2.val.ip));
145 if (val_is_ip4(v1) && (v2.type == T_QUAD))
146 return uint_cmp(ipa_to_u32(v1.val.ip), v2.val.i);
147
148 debug( "Types do not match in val_compare\n" );
149 return CMP_ERROR;
150 }
151
152 switch (v1.type) {
153 case T_VOID:
154 return 0;
155 case T_ENUM:
156 case T_INT:
157 case T_BOOL:
158 case T_PAIR:
159 case T_QUAD:
160 return uint_cmp(v1.val.i, v2.val.i);
161 case T_EC:
162 case T_RD:
163 return u64_cmp(v1.val.ec, v2.val.ec);
164 case T_LC:
165 return lcomm_cmp(v1.val.lc, v2.val.lc);
166 case T_IP:
167 return ipa_compare(v1.val.ip, v2.val.ip);
168 case T_NET:
169 return net_compare(v1.val.net, v2.val.net);
170 case T_STRING:
171 return strcmp(v1.val.s, v2.val.s);
172 default:
173 return CMP_ERROR;
174 }
175 }
176
177 static int
178 pm_same(struct f_path_mask *m1, struct f_path_mask *m2)
179 {
180 while (m1 && m2)
181 {
182 if (m1->kind != m2->kind)
183 return 0;
184
185 if (m1->kind == PM_ASN_EXPR)
186 {
187 if (!i_same((struct f_inst *) m1->val, (struct f_inst *) m2->val))
188 return 0;
189 }
190 else
191 {
192 if ((m1->val != m2->val) || (m1->val2 != m2->val2))
193 return 0;
194 }
195
196 m1 = m1->next;
197 m2 = m2->next;
198 }
199
200 return !m1 && !m2;
201 }
202
203 /**
204 * val_same - compare two values
205 * @v1: first value
206 * @v2: second value
207 *
208 * Compares two values and returns 1 if they are same and 0 if not.
209 * Comparison of values of different types is valid and returns 0.
210 */
211 int
212 val_same(struct f_val v1, struct f_val v2)
213 {
214 int rc;
215
216 rc = val_compare(v1, v2);
217 if (rc != CMP_ERROR)
218 return !rc;
219
220 if (v1.type != v2.type)
221 return 0;
222
223 switch (v1.type) {
224 case T_PATH_MASK:
225 return pm_same(v1.val.path_mask, v2.val.path_mask);
226 case T_PATH:
227 case T_CLIST:
228 case T_ECLIST:
229 case T_LCLIST:
230 return adata_same(v1.val.ad, v2.val.ad);
231 case T_SET:
232 return same_tree(v1.val.t, v2.val.t);
233 case T_PREFIX_SET:
234 return trie_same(v1.val.ti, v2.val.ti);
235 default:
236 bug("Invalid type in val_same(): %x", v1.type);
237 }
238 }
239
240 static int
241 clist_set_type(struct f_tree *set, struct f_val *v)
242 {
243 switch (set->from.type)
244 {
245 case T_PAIR:
246 v->type = T_PAIR;
247 return 1;
248
249 case T_QUAD:
250 v->type = T_QUAD;
251 return 1;
252
253 case T_IP:
254 if (val_is_ip4(set->from) && val_is_ip4(set->to))
255 {
256 v->type = T_QUAD;
257 return 1;
258 }
259 /* Fall through */
260 default:
261 v->type = T_VOID;
262 return 0;
263 }
264 }
265
266 static inline int
267 eclist_set_type(struct f_tree *set)
268 { return set->from.type == T_EC; }
269
270 static inline int
271 lclist_set_type(struct f_tree *set)
272 { return set->from.type == T_LC; }
273
274 static int
275 clist_match_set(struct adata *clist, struct f_tree *set)
276 {
277 if (!clist)
278 return 0;
279
280 struct f_val v;
281 if (!clist_set_type(set, &v))
282 return CMP_ERROR;
283
284 u32 *l = (u32 *) clist->data;
285 u32 *end = l + clist->length/4;
286
287 while (l < end) {
288 v.val.i = *l++;
289 if (find_tree(set, v))
290 return 1;
291 }
292 return 0;
293 }
294
295 static int
296 eclist_match_set(struct adata *list, struct f_tree *set)
297 {
298 if (!list)
299 return 0;
300
301 if (!eclist_set_type(set))
302 return CMP_ERROR;
303
304 struct f_val v;
305 u32 *l = int_set_get_data(list);
306 int len = int_set_get_size(list);
307 int i;
308
309 v.type = T_EC;
310 for (i = 0; i < len; i += 2) {
311 v.val.ec = ec_get(l, i);
312 if (find_tree(set, v))
313 return 1;
314 }
315
316 return 0;
317 }
318
319 static int
320 lclist_match_set(struct adata *list, struct f_tree *set)
321 {
322 if (!list)
323 return 0;
324
325 if (!lclist_set_type(set))
326 return CMP_ERROR;
327
328 struct f_val v;
329 u32 *l = int_set_get_data(list);
330 int len = int_set_get_size(list);
331 int i;
332
333 v.type = T_LC;
334 for (i = 0; i < len; i += 3) {
335 v.val.lc = lc_get(l, i);
336 if (find_tree(set, v))
337 return 1;
338 }
339
340 return 0;
341 }
342
343 static struct adata *
344 clist_filter(struct linpool *pool, struct adata *list, struct f_val set, int pos)
345 {
346 if (!list)
347 return NULL;
348
349 int tree = (set.type == T_SET); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
350 struct f_val v;
351 if (tree)
352 clist_set_type(set.val.t, &v);
353 else
354 v.type = T_PAIR;
355
356 int len = int_set_get_size(list);
357 u32 *l = int_set_get_data(list);
358 u32 tmp[len];
359 u32 *k = tmp;
360 u32 *end = l + len;
361
362 while (l < end) {
363 v.val.i = *l++;
364 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
365 if ((tree ? !!find_tree(set.val.t, v) : int_set_contains(set.val.ad, v.val.i)) == pos)
366 *k++ = v.val.i;
367 }
368
369 uint nl = (k - tmp) * sizeof(u32);
370 if (nl == list->length)
371 return list;
372
373 struct adata *res = adata_empty(pool, nl);
374 memcpy(res->data, tmp, nl);
375 return res;
376 }
377
378 static struct adata *
379 eclist_filter(struct linpool *pool, struct adata *list, struct f_val set, int pos)
380 {
381 if (!list)
382 return NULL;
383
384 int tree = (set.type == T_SET); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
385 struct f_val v;
386
387 int len = int_set_get_size(list);
388 u32 *l = int_set_get_data(list);
389 u32 tmp[len];
390 u32 *k = tmp;
391 int i;
392
393 v.type = T_EC;
394 for (i = 0; i < len; i += 2) {
395 v.val.ec = ec_get(l, i);
396 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
397 if ((tree ? !!find_tree(set.val.t, v) : ec_set_contains(set.val.ad, v.val.ec)) == pos) {
398 *k++ = l[i];
399 *k++ = l[i+1];
400 }
401 }
402
403 uint nl = (k - tmp) * sizeof(u32);
404 if (nl == list->length)
405 return list;
406
407 struct adata *res = adata_empty(pool, nl);
408 memcpy(res->data, tmp, nl);
409 return res;
410 }
411
412 static struct adata *
413 lclist_filter(struct linpool *pool, struct adata *list, struct f_val set, int pos)
414 {
415 if (!list)
416 return NULL;
417
418 int tree = (set.type == T_SET); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
419 struct f_val v;
420
421 int len = int_set_get_size(list);
422 u32 *l = int_set_get_data(list);
423 u32 tmp[len];
424 u32 *k = tmp;
425 int i;
426
427 v.type = T_LC;
428 for (i = 0; i < len; i += 3) {
429 v.val.lc = lc_get(l, i);
430 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
431 if ((tree ? !!find_tree(set.val.t, v) : lc_set_contains(set.val.ad, v.val.lc)) == pos)
432 k = lc_copy(k, l+i);
433 }
434
435 uint nl = (k - tmp) * sizeof(u32);
436 if (nl == list->length)
437 return list;
438
439 struct adata *res = adata_empty(pool, nl);
440 memcpy(res->data, tmp, nl);
441 return res;
442 }
443
444 /**
445 * val_in_range - implement |~| operator
446 * @v1: element
447 * @v2: set
448 *
449 * Checks if @v1 is element (|~| operator) of @v2.
450 */
451 static int
452 val_in_range(struct f_val v1, struct f_val v2)
453 {
454 if ((v1.type == T_PATH) && (v2.type == T_PATH_MASK))
455 return as_path_match(v1.val.ad, v2.val.path_mask);
456
457 if ((v1.type == T_INT) && (v2.type == T_PATH))
458 return as_path_contains(v2.val.ad, v1.val.i, 1);
459
460 if (((v1.type == T_PAIR) || (v1.type == T_QUAD)) && (v2.type == T_CLIST))
461 return int_set_contains(v2.val.ad, v1.val.i);
462 /* IP->Quad implicit conversion */
463 if (val_is_ip4(v1) && (v2.type == T_CLIST))
464 return int_set_contains(v2.val.ad, ipa_to_u32(v1.val.ip));
465
466 if ((v1.type == T_EC) && (v2.type == T_ECLIST))
467 return ec_set_contains(v2.val.ad, v1.val.ec);
468
469 if ((v1.type == T_LC) && (v2.type == T_LCLIST))
470 return lc_set_contains(v2.val.ad, v1.val.lc);
471
472 if ((v1.type == T_STRING) && (v2.type == T_STRING))
473 return patmatch(v2.val.s, v1.val.s);
474
475 if ((v1.type == T_IP) && (v2.type == T_NET))
476 return ipa_in_netX(v1.val.ip, v2.val.net);
477
478 if ((v1.type == T_NET) && (v2.type == T_NET))
479 return net_in_netX(v1.val.net, v2.val.net);
480
481 if ((v1.type == T_NET) && (v2.type == T_PREFIX_SET))
482 return trie_match_net(v2.val.ti, v1.val.net);
483
484 if (v2.type != T_SET)
485 return CMP_ERROR;
486
487 /* With integrated Quad<->IP implicit conversion */
488 if ((v1.type == v2.val.t->from.type) ||
489 ((v1.type == T_QUAD) && val_is_ip4(v2.val.t->from) && val_is_ip4(v2.val.t->to)))
490 return !!find_tree(v2.val.t, v1);
491
492 if (v1.type == T_CLIST)
493 return clist_match_set(v1.val.ad, v2.val.t);
494
495 if (v1.type == T_ECLIST)
496 return eclist_match_set(v1.val.ad, v2.val.t);
497
498 if (v1.type == T_LCLIST)
499 return lclist_match_set(v1.val.ad, v2.val.t);
500
501 if (v1.type == T_PATH)
502 return as_path_match_set(v1.val.ad, v2.val.t);
503
504 return CMP_ERROR;
505 }
506
507 /*
508 * val_format - format filter value
509 */
510 void
511 val_format(struct f_val v, buffer *buf)
512 {
513 char buf2[1024];
514 switch (v.type)
515 {
516 case T_VOID: buffer_puts(buf, "(void)"); return;
517 case T_BOOL: buffer_puts(buf, v.val.i ? "TRUE" : "FALSE"); return;
518 case T_INT: buffer_print(buf, "%u", v.val.i); return;
519 case T_STRING: buffer_print(buf, "%s", v.val.s); return;
520 case T_IP: buffer_print(buf, "%I", v.val.ip); return;
521 case T_NET: buffer_print(buf, "%N", v.val.net); return;
522 case T_PAIR: buffer_print(buf, "(%u,%u)", v.val.i >> 16, v.val.i & 0xffff); return;
523 case T_QUAD: buffer_print(buf, "%R", v.val.i); return;
524 case T_EC: ec_format(buf2, v.val.ec); buffer_print(buf, "%s", buf2); return;
525 case T_LC: lc_format(buf2, v.val.lc); buffer_print(buf, "%s", buf2); return;
526 case T_RD: rd_format(v.val.ec, buf2, 1024); buffer_print(buf, "%s", buf2); return;
527 case T_PREFIX_SET: trie_format(v.val.ti, buf); return;
528 case T_SET: tree_format(v.val.t, buf); return;
529 case T_ENUM: buffer_print(buf, "(enum %x)%u", v.type, v.val.i); return;
530 case T_PATH: as_path_format(v.val.ad, buf2, 1000); buffer_print(buf, "(path %s)", buf2); return;
531 case T_CLIST: int_set_format(v.val.ad, 1, -1, buf2, 1000); buffer_print(buf, "(clist %s)", buf2); return;
532 case T_ECLIST: ec_set_format(v.val.ad, -1, buf2, 1000); buffer_print(buf, "(eclist %s)", buf2); return;
533 case T_LCLIST: lc_set_format(v.val.ad, -1, buf2, 1000); buffer_print(buf, "(lclist %s)", buf2); return;
534 case T_PATH_MASK: pm_format(v.val.path_mask, buf); return;
535 default: buffer_print(buf, "[unknown type %x]", v.type); return;
536 }
537 }
538
539 static struct rte **f_rte;
540 static struct rta *f_old_rta;
541 static struct ea_list **f_tmp_attrs;
542 static struct linpool *f_pool;
543 static struct buffer f_buf;
544 static int f_flags;
545
546 static inline void f_rte_cow(void)
547 {
548 *f_rte = rte_cow(*f_rte);
549 }
550
551 /*
552 * rta_cow - prepare rta for modification by filter
553 */
554 static void
555 f_rta_cow(void)
556 {
557 if (!rta_is_cached((*f_rte)->attrs))
558 return;
559
560 /* Prepare to modify rte */
561 f_rte_cow();
562
563 /* Store old rta to free it later, it stores reference from rte_cow() */
564 f_old_rta = (*f_rte)->attrs;
565
566 /*
567 * Get shallow copy of rta. Fields eattrs and nexthops of rta are shared
568 * with f_old_rta (they will be copied when the cached rta will be obtained
569 * at the end of f_run()), also the lock of hostentry is inherited (we
570 * suppose hostentry is not changed by filters).
571 */
572 (*f_rte)->attrs = rta_do_cow((*f_rte)->attrs, f_pool);
573 }
574
575 static char *
576 val_format_str(struct f_val v) {
577 buffer b;
578 LOG_BUFFER_INIT(b);
579 val_format(v, &b);
580 return lp_strdup(f_pool, b.start);
581 }
582
583 static struct tbf rl_runtime_err = TBF_DEFAULT_LOG_LIMITS;
584
585 #define runtime(fmt, ...) do { \
586 if (!(f_flags & FF_SILENT)) \
587 log_rl(&rl_runtime_err, L_ERR "filters, line %d: " fmt, what->lineno, ##__VA_ARGS__); \
588 res.type = T_RETURN; \
589 res.val.i = F_ERROR; \
590 return res; \
591 } while(0)
592
593 #define ARG_ANY(n) INTERPRET(v##n, what->a##n.p)
594
595 #define ARG(n,t) ARG_ANY(n) \
596 if (v##n.type != t) \
597 runtime("Argument %d of instruction %s must be of type %02x, got %02x", \
598 n, f_instruction_name(what->fi_code), t, v##n.type);
599
600 #define INTERPRET(val, what_) \
601 val = interpret(what_); \
602 if (val.type & T_RETURN) \
603 return val;
604
605 #define ACCESS_RTE \
606 do { if (!f_rte) runtime("No route to access"); } while (0)
607
608 #define BITFIELD_MASK(what) \
609 (1u << (what->a2.i >> 24))
610
611 /**
612 * interpret
613 * @what: filter to interpret
614 *
615 * Interpret given tree of filter instructions. This is core function
616 * of filter system and does all the hard work.
617 *
618 * Each instruction has 4 fields: code (which is instruction code),
619 * aux (which is extension to instruction code, typically type),
620 * arg1 and arg2 - arguments. Depending on instruction, arguments
621 * are either integers, or pointers to instruction trees. Common
622 * instructions like +, that have two expressions as arguments use
623 * TWOARGS macro to get both of them evaluated.
624 *
625 * &f_val structures are copied around, so there are no problems with
626 * memory managment.
627 */
628 static struct f_val
629 interpret(struct f_inst *what)
630 {
631 struct symbol *sym;
632 struct f_val v1, v2, v3, res = { .type = T_VOID }, *vp;
633 unsigned u1, u2;
634 int i;
635 u32 as;
636
637 for ( ; what; what = what->next) {
638 res.type = T_VOID;
639 switch(what->fi_code) {
640 /* Binary operators */
641 case FI_ADD:
642 ARG(1,T_INT);
643 ARG(2,T_INT);
644 res.type = T_INT;
645 res.val.i = v1.val.i + v2.val.i;
646 break;
647 case FI_SUBTRACT:
648 ARG(1,T_INT);
649 ARG(2,T_INT);
650 res.type = T_INT;
651 res.val.i = v1.val.i - v2.val.i;
652 break;
653 case FI_MULTIPLY:
654 ARG(1,T_INT);
655 ARG(2,T_INT);
656 res.type = T_INT;
657 res.val.i = v1.val.i * v2.val.i;
658 break;
659 case FI_DIVIDE:
660 ARG(1,T_INT);
661 ARG(2,T_INT);
662 res.type = T_INT;
663 if (v2.val.i == 0) runtime( "Mother told me not to divide by 0" );
664 res.val.i = v1.val.i / v2.val.i;
665 break;
666 case FI_AND:
667 case FI_OR:
668 ARG(1,T_BOOL);
669 if (v1.val.i == (what->fi_code == FI_OR)) {
670 res.type = T_BOOL;
671 res.val.i = v1.val.i;
672 } else {
673 ARG(2,T_BOOL);
674 res = v2;
675 }
676 break;
677 case FI_PAIR_CONSTRUCT:
678 ARG(1,T_INT);
679 ARG(2,T_INT);
680 u1 = v1.val.i;
681 u2 = v2.val.i;
682 if ((u1 > 0xFFFF) || (u2 > 0xFFFF))
683 runtime( "Can't operate with value out of bounds in pair constructor" );
684 res.val.i = (u1 << 16) | u2;
685 res.type = T_PAIR;
686 break;
687
688 case FI_EC_CONSTRUCT:
689 {
690 ARG_ANY(1);
691 ARG(2, T_INT);
692
693 int check, ipv4_used;
694 u32 key, val;
695
696 if (v1.type == T_INT) {
697 ipv4_used = 0; key = v1.val.i;
698 }
699 else if (v1.type == T_QUAD) {
700 ipv4_used = 1; key = v1.val.i;
701 }
702 /* IP->Quad implicit conversion */
703 else if (val_is_ip4(v1)) {
704 ipv4_used = 1; key = ipa_to_u32(v1.val.ip);
705 }
706 else
707 runtime("Can't operate with key of non-integer/IPv4 type in EC constructor");
708
709 val = v2.val.i;
710
711 /* XXXX */
712 res.type = T_EC;
713
714 if (what->aux == EC_GENERIC) {
715 check = 0; res.val.ec = ec_generic(key, val);
716 }
717 else if (ipv4_used) {
718 check = 1; res.val.ec = ec_ip4(what->aux, key, val);
719 }
720 else if (key < 0x10000) {
721 check = 0; res.val.ec = ec_as2(what->aux, key, val);
722 }
723 else {
724 check = 1; res.val.ec = ec_as4(what->aux, key, val);
725 }
726
727 if (check && (val > 0xFFFF))
728 runtime("Can't operate with value out of bounds in EC constructor");
729
730 break;
731 }
732
733 case FI_LC_CONSTRUCT:
734 {
735 ARG(1, T_INT);
736 ARG(2, T_INT);
737 ARG(3, T_INT);
738
739 res.type = T_LC;
740 res.val.lc = (lcomm) { v1.val.i, v2.val.i, v3.val.i };
741
742 break;
743 }
744
745 case FI_PATHMASK_CONSTRUCT:
746 {
747 struct f_path_mask *tt = what->a1.p, *vbegin, **vv = &vbegin;
748
749 while (tt) {
750 *vv = lp_alloc(f_pool, sizeof(struct f_path_mask));
751 if (tt->kind == PM_ASN_EXPR) {
752 struct f_val res;
753 INTERPRET(res, (struct f_inst *) tt->val);
754 (*vv)->kind = PM_ASN;
755 if (res.type != T_INT) {
756 runtime( "Error resolving path mask template: value not an integer" );
757 return (struct f_val) { .type = T_VOID };
758 }
759
760 (*vv)->val = res.val.i;
761 } else {
762 **vv = *tt;
763 }
764 tt = tt->next;
765 vv = &((*vv)->next);
766 }
767
768 res = (struct f_val) { .type = T_PATH_MASK, .val.path_mask = vbegin };
769 break;
770 }
771
772 /* Relational operators */
773
774 #define COMPARE(x) \
775 ARG_ANY(1); \
776 ARG_ANY(2); \
777 i = val_compare(v1, v2); \
778 if (i==CMP_ERROR) \
779 runtime( "Can't compare values of incompatible types" ); \
780 res.type = T_BOOL; \
781 res.val.i = (x); \
782 break;
783
784 #define SAME(x) \
785 ARG_ANY(1); \
786 ARG_ANY(2); \
787 i = val_same(v1, v2); \
788 res.type = T_BOOL; \
789 res.val.i = (x); \
790 break;
791
792 case FI_NEQ: SAME(!i);
793 case FI_EQ: SAME(i);
794 case FI_LT: COMPARE(i==-1);
795 case FI_LTE: COMPARE(i!=1);
796
797 case FI_NOT:
798 ARG(1,T_BOOL);
799 res = v1;
800 res.val.i = !res.val.i;
801 break;
802
803 case FI_MATCH:
804 ARG_ANY(1);
805 ARG_ANY(2);
806 res.type = T_BOOL;
807 res.val.i = val_in_range(v1, v2);
808 if (res.val.i == CMP_ERROR)
809 runtime( "~ applied on unknown type pair" );
810 res.val.i = !!res.val.i;
811 break;
812
813 case FI_NOT_MATCH:
814 ARG_ANY(1);
815 ARG_ANY(2);
816 res.type = T_BOOL;
817 res.val.i = val_in_range(v1, v2);
818 if (res.val.i == CMP_ERROR)
819 runtime( "!~ applied on unknown type pair" );
820 res.val.i = !res.val.i;
821 break;
822
823 case FI_DEFINED:
824 ARG_ANY(1);
825 res.type = T_BOOL;
826 res.val.i = (v1.type != T_VOID) && !undef_value(v1);
827 break;
828 case FI_TYPE:
829 ARG_ANY(1); /* There may be more types supporting this operation */
830 switch (v1.type)
831 {
832 case T_NET:
833 res.type = T_ENUM_NETTYPE;
834 res.val.i = v1.val.net->type;
835 break;
836 default:
837 runtime( "Can't determine type of this item" );
838 }
839 break;
840 case FI_IS_V4:
841 ARG(1, T_IP);
842 res.type = T_BOOL;
843 res.val.i = ipa_is_ip4(v1.val.ip);
844 break;
845
846 /* Set to indirect value, a1 = variable, a2 = value */
847 case FI_SET:
848 ARG_ANY(2);
849 sym = what->a1.p;
850 vp = sym->def;
851 if ((sym->class != (SYM_VARIABLE | v2.type)) && (v2.type != T_VOID))
852 {
853 /* IP->Quad implicit conversion */
854 if ((sym->class == (SYM_VARIABLE | T_QUAD)) && val_is_ip4(v2))
855 {
856 vp->type = T_QUAD;
857 vp->val.i = ipa_to_u32(v2.val.ip);
858 break;
859 }
860 runtime( "Assigning to variable of incompatible type" );
861 }
862 *vp = v2;
863 break;
864
865 /* some constants have value in a2, some in *a1.p, strange. */
866 case FI_CONSTANT: /* integer (or simple type) constant, string, set, or prefix_set */
867 res.type = what->aux;
868
869 if (res.type == T_PREFIX_SET)
870 res.val.ti = what->a2.p;
871 else if (res.type == T_SET)
872 res.val.t = what->a2.p;
873 else if (res.type == T_STRING)
874 res.val.s = what->a2.p;
875 else
876 res.val.i = what->a2.i;
877 break;
878 case FI_VARIABLE:
879 case FI_CONSTANT_INDIRECT:
880 res = * ((struct f_val *) what->a1.p);
881 break;
882 case FI_PRINT:
883 ARG_ANY(1);
884 val_format(v1, &f_buf);
885 break;
886 case FI_CONDITION: /* ? has really strange error value, so we can implement if ... else nicely :-) */
887 ARG(1, T_BOOL);
888 if (v1.val.i) {
889 ARG_ANY(2);
890 res.val.i = 0;
891 } else
892 res.val.i = 1;
893 res.type = T_BOOL;
894 break;
895 case FI_NOP:
896 debug( "No operation\n" );
897 break;
898 case FI_PRINT_AND_DIE:
899 ARG_ANY(1);
900 if ((what->a2.i == F_NOP || (what->a2.i != F_NONL && what->a1.p)) &&
901 !(f_flags & FF_SILENT))
902 log_commit(*L_INFO, &f_buf);
903
904 switch (what->a2.i) {
905 case F_QUITBIRD:
906 die( "Filter asked me to die" );
907 case F_ACCEPT:
908 /* Should take care about turning ACCEPT into MODIFY */
909 case F_ERROR:
910 case F_REJECT: /* FIXME (noncritical) Should print complete route along with reason to reject route */
911 res.type = T_RETURN;
912 res.val.i = what->a2.i;
913 return res; /* We have to return now, no more processing. */
914 case F_NONL:
915 case F_NOP:
916 break;
917 default:
918 bug( "unknown return type: Can't happen");
919 }
920 break;
921 case FI_RTA_GET: /* rta access */
922 {
923 ACCESS_RTE;
924 struct rta *rta = (*f_rte)->attrs;
925 res.type = what->aux;
926
927 switch (what->a2.i)
928 {
929 case SA_FROM: res.val.ip = rta->from; break;
930 case SA_GW: res.val.ip = rta->nh.gw; break;
931 case SA_NET: res.val.net = (*f_rte)->net->n.addr; break;
932 case SA_PROTO: res.val.s = rta->src->proto->name; break;
933 case SA_SOURCE: res.val.i = rta->source; break;
934 case SA_SCOPE: res.val.i = rta->scope; break;
935 case SA_DEST: res.val.i = rta->dest; break;
936 case SA_IFNAME: res.val.s = rta->nh.iface ? rta->nh.iface->name : ""; break;
937 case SA_IFINDEX: res.val.i = rta->nh.iface ? rta->nh.iface->index : 0; break;
938
939 default:
940 bug("Invalid static attribute access (%x)", res.type);
941 }
942 }
943 break;
944 case FI_RTA_SET:
945 ACCESS_RTE;
946 ARG_ANY(1);
947 if (what->aux != v1.type)
948 runtime( "Attempt to set static attribute to incompatible type" );
949
950 f_rta_cow();
951 {
952 struct rta *rta = (*f_rte)->attrs;
953
954 switch (what->a2.i)
955 {
956 case SA_FROM:
957 rta->from = v1.val.ip;
958 break;
959
960 case SA_GW:
961 {
962 ip_addr ip = v1.val.ip;
963 neighbor *n = neigh_find(rta->src->proto, &ip, 0);
964 if (!n || (n->scope == SCOPE_HOST))
965 runtime( "Invalid gw address" );
966
967 rta->dest = RTD_UNICAST;
968 rta->nh.gw = ip;
969 rta->nh.iface = n->iface;
970 rta->nh.next = NULL;
971 rta->hostentry = NULL;
972 }
973 break;
974
975 case SA_SCOPE:
976 rta->scope = v1.val.i;
977 break;
978
979 case SA_DEST:
980 i = v1.val.i;
981 if ((i != RTD_BLACKHOLE) && (i != RTD_UNREACHABLE) && (i != RTD_PROHIBIT))
982 runtime( "Destination can be changed only to blackhole, unreachable or prohibit" );
983
984 rta->dest = i;
985 rta->nh.gw = IPA_NONE;
986 rta->nh.iface = NULL;
987 rta->nh.next = NULL;
988 rta->hostentry = NULL;
989 break;
990
991 default:
992 bug("Invalid static attribute access (%x)", res.type);
993 }
994 }
995 break;
996 case FI_EA_GET: /* Access to extended attributes */
997 ACCESS_RTE;
998 {
999 eattr *e = NULL;
1000 u16 code = what->a2.i;
1001 int f_type = what->aux >> 8;
1002
1003 if (!(f_flags & FF_FORCE_TMPATTR))
1004 e = ea_find((*f_rte)->attrs->eattrs, code);
1005 if (!e)
1006 e = ea_find((*f_tmp_attrs), code);
1007 if ((!e) && (f_flags & FF_FORCE_TMPATTR))
1008 e = ea_find((*f_rte)->attrs->eattrs, code);
1009
1010 if (!e) {
1011 /* A special case: undefined as_path looks like empty as_path */
1012 if ((what->aux & EAF_TYPE_MASK) == EAF_TYPE_AS_PATH) {
1013 res.type = T_PATH;
1014 res.val.ad = &undef_adata;
1015 break;
1016 }
1017
1018 /* The same special case for int_set */
1019 if ((what->aux & EAF_TYPE_MASK) == EAF_TYPE_INT_SET) {
1020 res.type = T_CLIST;
1021 res.val.ad = &undef_adata;
1022 break;
1023 }
1024
1025 /* The same special case for ec_set */
1026 if ((what->aux & EAF_TYPE_MASK) == EAF_TYPE_EC_SET) {
1027 res.type = T_ECLIST;
1028 res.val.ad = &undef_adata;
1029 break;
1030 }
1031
1032 /* The same special case for lc_set */
1033 if ((what->aux & EAF_TYPE_MASK) == EAF_TYPE_LC_SET) {
1034 res.type = T_LCLIST;
1035 res.val.ad = &undef_adata;
1036 break;
1037 }
1038
1039 /* Undefined value */
1040 res.type = T_VOID;
1041 break;
1042 }
1043
1044 switch (what->aux & EAF_TYPE_MASK) {
1045 case EAF_TYPE_INT:
1046 res.type = f_type;
1047 res.val.i = e->u.data;
1048 break;
1049 case EAF_TYPE_ROUTER_ID:
1050 res.type = T_QUAD;
1051 res.val.i = e->u.data;
1052 break;
1053 case EAF_TYPE_OPAQUE:
1054 res.type = T_ENUM_EMPTY;
1055 res.val.i = 0;
1056 break;
1057 case EAF_TYPE_IP_ADDRESS:
1058 res.type = T_IP;
1059 struct adata * ad = e->u.ptr;
1060 res.val.ip = * (ip_addr *) ad->data;
1061 break;
1062 case EAF_TYPE_AS_PATH:
1063 res.type = T_PATH;
1064 res.val.ad = e->u.ptr;
1065 break;
1066 case EAF_TYPE_BITFIELD:
1067 res.type = T_BOOL;
1068 res.val.i = !!(e->u.data & BITFIELD_MASK(what));
1069 break;
1070 case EAF_TYPE_INT_SET:
1071 res.type = T_CLIST;
1072 res.val.ad = e->u.ptr;
1073 break;
1074 case EAF_TYPE_EC_SET:
1075 res.type = T_ECLIST;
1076 res.val.ad = e->u.ptr;
1077 break;
1078 case EAF_TYPE_LC_SET:
1079 res.type = T_LCLIST;
1080 res.val.ad = e->u.ptr;
1081 break;
1082 case EAF_TYPE_UNDEF:
1083 res.type = T_VOID;
1084 break;
1085 default:
1086 bug("Unknown type in e,a");
1087 }
1088 }
1089 break;
1090 case FI_EA_SET:
1091 ACCESS_RTE;
1092 ARG_ANY(1);
1093 {
1094 struct ea_list *l = lp_alloc(f_pool, sizeof(struct ea_list) + sizeof(eattr));
1095 u16 code = what->a2.i;
1096 int f_type = what->aux >> 8;
1097
1098 l->next = NULL;
1099 l->flags = EALF_SORTED;
1100 l->count = 1;
1101 l->attrs[0].id = code;
1102 l->attrs[0].flags = 0;
1103 l->attrs[0].type = (what->aux & 0xff) | EAF_ORIGINATED | EAF_FRESH;
1104
1105 switch (what->aux & EAF_TYPE_MASK) {
1106 case EAF_TYPE_INT:
1107 if (v1.type != f_type)
1108 runtime( "Setting int attribute to non-int value" );
1109 l->attrs[0].u.data = v1.val.i;
1110 break;
1111
1112 case EAF_TYPE_ROUTER_ID:
1113 /* IP->Quad implicit conversion */
1114 if (val_is_ip4(v1)) {
1115 l->attrs[0].u.data = ipa_to_u32(v1.val.ip);
1116 break;
1117 }
1118 /* T_INT for backward compatibility */
1119 if ((v1.type != T_QUAD) && (v1.type != T_INT))
1120 runtime( "Setting quad attribute to non-quad value" );
1121 l->attrs[0].u.data = v1.val.i;
1122 break;
1123
1124 case EAF_TYPE_OPAQUE:
1125 runtime( "Setting opaque attribute is not allowed" );
1126 break;
1127 case EAF_TYPE_IP_ADDRESS:
1128 if (v1.type != T_IP)
1129 runtime( "Setting ip attribute to non-ip value" );
1130 int len = sizeof(ip_addr);
1131 struct adata *ad = lp_alloc(f_pool, sizeof(struct adata) + len);
1132 ad->length = len;
1133 (* (ip_addr *) ad->data) = v1.val.ip;
1134 l->attrs[0].u.ptr = ad;
1135 break;
1136 case EAF_TYPE_AS_PATH:
1137 if (v1.type != T_PATH)
1138 runtime( "Setting path attribute to non-path value" );
1139 l->attrs[0].u.ptr = v1.val.ad;
1140 break;
1141 case EAF_TYPE_BITFIELD:
1142 if (v1.type != T_BOOL)
1143 runtime( "Setting bit in bitfield attribute to non-bool value" );
1144 {
1145 /* First, we have to find the old value */
1146 eattr *e = NULL;
1147 if (!(f_flags & FF_FORCE_TMPATTR))
1148 e = ea_find((*f_rte)->attrs->eattrs, code);
1149 if (!e)
1150 e = ea_find((*f_tmp_attrs), code);
1151 if ((!e) && (f_flags & FF_FORCE_TMPATTR))
1152 e = ea_find((*f_rte)->attrs->eattrs, code);
1153 u32 data = e ? e->u.data : 0;
1154
1155 if (v1.val.i)
1156 l->attrs[0].u.data = data | BITFIELD_MASK(what);
1157 else
1158 l->attrs[0].u.data = data & ~BITFIELD_MASK(what);;
1159 }
1160 break;
1161 case EAF_TYPE_INT_SET:
1162 if (v1.type != T_CLIST)
1163 runtime( "Setting clist attribute to non-clist value" );
1164 l->attrs[0].u.ptr = v1.val.ad;
1165 break;
1166 case EAF_TYPE_EC_SET:
1167 if (v1.type != T_ECLIST)
1168 runtime( "Setting eclist attribute to non-eclist value" );
1169 l->attrs[0].u.ptr = v1.val.ad;
1170 break;
1171 case EAF_TYPE_LC_SET:
1172 if (v1.type != T_LCLIST)
1173 runtime( "Setting lclist attribute to non-lclist value" );
1174 l->attrs[0].u.ptr = v1.val.ad;
1175 break;
1176 case EAF_TYPE_UNDEF:
1177 if (v1.type != T_VOID)
1178 runtime( "Setting void attribute to non-void value" );
1179 l->attrs[0].u.data = 0;
1180 break;
1181 default: bug("Unknown type in e,S");
1182 }
1183
1184 if (!(what->aux & EAF_TEMP) && (!(f_flags & FF_FORCE_TMPATTR))) {
1185 f_rta_cow();
1186 l->next = (*f_rte)->attrs->eattrs;
1187 (*f_rte)->attrs->eattrs = l;
1188 } else {
1189 l->next = (*f_tmp_attrs);
1190 (*f_tmp_attrs) = l;
1191 }
1192 }
1193 break;
1194 case FI_PREF_GET:
1195 ACCESS_RTE;
1196 res.type = T_INT;
1197 res.val.i = (*f_rte)->pref;
1198 break;
1199 case FI_PREF_SET:
1200 ACCESS_RTE;
1201 ARG(1,T_INT);
1202 if (v1.val.i > 0xFFFF)
1203 runtime( "Setting preference value out of bounds" );
1204 f_rte_cow();
1205 (*f_rte)->pref = v1.val.i;
1206 break;
1207 case FI_LENGTH: /* Get length of */
1208 ARG_ANY(1);
1209 res.type = T_INT;
1210 switch(v1.type) {
1211 case T_NET: res.val.i = net_pxlen(v1.val.net); break;
1212 case T_PATH: res.val.i = as_path_getlen(v1.val.ad); break;
1213 case T_CLIST: res.val.i = int_set_get_size(v1.val.ad); break;
1214 case T_ECLIST: res.val.i = ec_set_get_size(v1.val.ad); break;
1215 case T_LCLIST: res.val.i = lc_set_get_size(v1.val.ad); break;
1216 default: runtime( "Prefix, path, clist or eclist expected" );
1217 }
1218 break;
1219 case FI_SADR_SRC: /* Get SADR src prefix */
1220 ARG(1, T_NET);
1221 if (!net_is_sadr(v1.val.net))
1222 runtime( "SADR expected" );
1223
1224 {
1225 net_addr_ip6_sadr *net = (void *) v1.val.net;
1226 net_addr *src = lp_alloc(f_pool, sizeof(net_addr_ip6));
1227 net_fill_ip6(src, net->src_prefix, net->src_pxlen);
1228
1229 res.type = T_NET;
1230 res.val.net = src;
1231 }
1232 break;
1233 case FI_ROA_MAXLEN: /* Get ROA max prefix length */
1234 ARG(1, T_NET);
1235 if (!net_is_roa(v1.val.net))
1236 runtime( "ROA expected" );
1237
1238 res.type = T_INT;
1239 res.val.i = (v1.val.net->type == NET_ROA4) ?
1240 ((net_addr_roa4 *) v1.val.net)->max_pxlen :
1241 ((net_addr_roa6 *) v1.val.net)->max_pxlen;
1242 break;
1243 case FI_ROA_ASN: /* Get ROA ASN */
1244 ARG(1, T_NET);
1245 if (!net_is_roa(v1.val.net))
1246 runtime( "ROA expected" );
1247
1248 res.type = T_INT;
1249 res.val.i = (v1.val.net->type == NET_ROA4) ?
1250 ((net_addr_roa4 *) v1.val.net)->asn :
1251 ((net_addr_roa6 *) v1.val.net)->asn;
1252 break;
1253 case FI_IP: /* Convert prefix to ... */
1254 ARG(1, T_NET);
1255 res.type = T_IP;
1256 res.val.ip = net_prefix(v1.val.net);
1257 break;
1258 case FI_ROUTE_DISTINGUISHER:
1259 ARG(1, T_NET);
1260 res.type = T_IP;
1261 if (!net_is_vpn(v1.val.net))
1262 runtime( "VPN address expected" );
1263 res.type = T_RD;
1264 res.val.ec = net_rd(v1.val.net);
1265 break;
1266 case FI_AS_PATH_FIRST: /* Get first ASN from AS PATH */
1267 ARG(1, T_PATH);
1268
1269 as = 0;
1270 as_path_get_first(v1.val.ad, &as);
1271 res.type = T_INT;
1272 res.val.i = as;
1273 break;
1274 case FI_AS_PATH_LAST: /* Get last ASN from AS PATH */
1275 ARG(1, T_PATH);
1276
1277 as = 0;
1278 as_path_get_last(v1.val.ad, &as);
1279 res.type = T_INT;
1280 res.val.i = as;
1281 break;
1282 case FI_AS_PATH_LAST_NAG: /* Get last ASN from non-aggregated part of AS PATH */
1283 ARG(1, T_PATH);
1284
1285 res.type = T_INT;
1286 res.val.i = as_path_get_last_nonaggregated(v1.val.ad);
1287 break;
1288 case FI_RETURN:
1289 ARG_ANY(1);
1290 res = v1;
1291 res.type |= T_RETURN;
1292 return res;
1293 case FI_CALL: /* CALL: this is special: if T_RETURN and returning some value, mask it out */
1294 ARG_ANY(1);
1295 res = interpret(what->a2.p);
1296 if (res.type == T_RETURN)
1297 return res;
1298 res.type &= ~T_RETURN;
1299 break;
1300 case FI_CLEAR_LOCAL_VARS: /* Clear local variables */
1301 for (sym = what->a1.p; sym != NULL; sym = sym->aux2)
1302 ((struct f_val *) sym->def)->type = T_VOID;
1303 break;
1304 case FI_SWITCH:
1305 ARG_ANY(1);
1306 {
1307 struct f_tree *t = find_tree(what->a2.p, v1);
1308 if (!t) {
1309 v1.type = T_VOID;
1310 t = find_tree(what->a2.p, v1);
1311 if (!t) {
1312 debug( "No else statement?\n");
1313 break;
1314 }
1315 }
1316 /* It is actually possible to have t->data NULL */
1317
1318 INTERPRET(res, t->data);
1319 }
1320 break;
1321 case FI_IP_MASK: /* IP.MASK(val) */
1322 ARG(1, T_IP);
1323 ARG(2, T_INT);
1324
1325 res.type = T_IP;
1326 res.val.ip = ipa_is_ip4(v1.val.ip) ?
1327 ipa_from_ip4(ip4_and(ipa_to_ip4(v1.val.ip), ip4_mkmask(v2.val.i))) :
1328 ipa_from_ip6(ip6_and(ipa_to_ip6(v1.val.ip), ip6_mkmask(v2.val.i)));
1329 break;
1330
1331 case FI_EMPTY: /* Create empty attribute */
1332 res.type = what->aux;
1333 res.val.ad = adata_empty(f_pool, 0);
1334 break;
1335 case FI_PATH_PREPEND: /* Path prepend */
1336 ARG(1, T_PATH);
1337 ARG(2, T_INT);
1338
1339 res.type = T_PATH;
1340 res.val.ad = as_path_prepend(f_pool, v1.val.ad, v2.val.i);
1341 break;
1342
1343 case FI_CLIST_ADD_DEL: /* (Extended) Community list add or delete */
1344 ARG_ANY(1);
1345 ARG_ANY(2);
1346 if (v1.type == T_PATH)
1347 {
1348 struct f_tree *set = NULL;
1349 u32 key = 0;
1350 int pos;
1351
1352 if (v2.type == T_INT)
1353 key = v2.val.i;
1354 else if ((v2.type == T_SET) && (v2.val.t->from.type == T_INT))
1355 set = v2.val.t;
1356 else
1357 runtime("Can't delete non-integer (set)");
1358
1359 switch (what->aux)
1360 {
1361 case 'a': runtime("Can't add to path");
1362 case 'd': pos = 0; break;
1363 case 'f': pos = 1; break;
1364 default: bug("unknown Ca operation");
1365 }
1366
1367 if (pos && !set)
1368 runtime("Can't filter integer");
1369
1370 res.type = T_PATH;
1371 res.val.ad = as_path_filter(f_pool, v1.val.ad, set, key, pos);
1372 }
1373 else if (v1.type == T_CLIST)
1374 {
1375 /* Community (or cluster) list */
1376 struct f_val dummy;
1377 int arg_set = 0;
1378 uint n = 0;
1379
1380 if ((v2.type == T_PAIR) || (v2.type == T_QUAD))
1381 n = v2.val.i;
1382 /* IP->Quad implicit conversion */
1383 else if (val_is_ip4(v2))
1384 n = ipa_to_u32(v2.val.ip);
1385 else if ((v2.type == T_SET) && clist_set_type(v2.val.t, &dummy))
1386 arg_set = 1;
1387 else if (v2.type == T_CLIST)
1388 arg_set = 2;
1389 else
1390 runtime("Can't add/delete non-pair");
1391
1392 res.type = T_CLIST;
1393 switch (what->aux)
1394 {
1395 case 'a':
1396 if (arg_set == 1)
1397 runtime("Can't add set");
1398 else if (!arg_set)
1399 res.val.ad = int_set_add(f_pool, v1.val.ad, n);
1400 else
1401 res.val.ad = int_set_union(f_pool, v1.val.ad, v2.val.ad);
1402 break;
1403
1404 case 'd':
1405 if (!arg_set)
1406 res.val.ad = int_set_del(f_pool, v1.val.ad, n);
1407 else
1408 res.val.ad = clist_filter(f_pool, v1.val.ad, v2, 0);
1409 break;
1410
1411 case 'f':
1412 if (!arg_set)
1413 runtime("Can't filter pair");
1414 res.val.ad = clist_filter(f_pool, v1.val.ad, v2, 1);
1415 break;
1416
1417 default:
1418 bug("unknown Ca operation");
1419 }
1420 }
1421 else if (v1.type == T_ECLIST)
1422 {
1423 /* Extended community list */
1424 int arg_set = 0;
1425
1426 /* v2.val is either EC or EC-set */
1427 if ((v2.type == T_SET) && eclist_set_type(v2.val.t))
1428 arg_set = 1;
1429 else if (v2.type == T_ECLIST)
1430 arg_set = 2;
1431 else if (v2.type != T_EC)
1432 runtime("Can't add/delete non-ec");
1433
1434 res.type = T_ECLIST;
1435 switch (what->aux)
1436 {
1437 case 'a':
1438 if (arg_set == 1)
1439 runtime("Can't add set");
1440 else if (!arg_set)
1441 res.val.ad = ec_set_add(f_pool, v1.val.ad, v2.val.ec);
1442 else
1443 res.val.ad = ec_set_union(f_pool, v1.val.ad, v2.val.ad);
1444 break;
1445
1446 case 'd':
1447 if (!arg_set)
1448 res.val.ad = ec_set_del(f_pool, v1.val.ad, v2.val.ec);
1449 else
1450 res.val.ad = eclist_filter(f_pool, v1.val.ad, v2, 0);
1451 break;
1452
1453 case 'f':
1454 if (!arg_set)
1455 runtime("Can't filter ec");
1456 res.val.ad = eclist_filter(f_pool, v1.val.ad, v2, 1);
1457 break;
1458
1459 default:
1460 bug("unknown Ca operation");
1461 }
1462 }
1463 else if (v1.type == T_LCLIST)
1464 {
1465 /* Large community list */
1466 int arg_set = 0;
1467
1468 /* v2.val is either LC or LC-set */
1469 if ((v2.type == T_SET) && lclist_set_type(v2.val.t))
1470 arg_set = 1;
1471 else if (v2.type == T_LCLIST)
1472 arg_set = 2;
1473 else if (v2.type != T_LC)
1474 runtime("Can't add/delete non-lc");
1475
1476 res.type = T_LCLIST;
1477 switch (what->aux)
1478 {
1479 case 'a':
1480 if (arg_set == 1)
1481 runtime("Can't add set");
1482 else if (!arg_set)
1483 res.val.ad = lc_set_add(f_pool, v1.val.ad, v2.val.lc);
1484 else
1485 res.val.ad = lc_set_union(f_pool, v1.val.ad, v2.val.ad);
1486 break;
1487
1488 case 'd':
1489 if (!arg_set)
1490 res.val.ad = lc_set_del(f_pool, v1.val.ad, v2.val.lc);
1491 else
1492 res.val.ad = lclist_filter(f_pool, v1.val.ad, v2, 0);
1493 break;
1494
1495 case 'f':
1496 if (!arg_set)
1497 runtime("Can't filter lc");
1498 res.val.ad = lclist_filter(f_pool, v1.val.ad, v2, 1);
1499 break;
1500
1501 default:
1502 bug("unknown Ca operation");
1503 }
1504 }
1505 else
1506 runtime("Can't add/delete to non-[e|l]clist");
1507
1508 break;
1509
1510 case FI_ROA_CHECK: /* ROA Check */
1511 if (what->arg1)
1512 {
1513 ARG(1, T_NET);
1514 ARG(2, T_INT);
1515
1516 as = v2.val.i;
1517 }
1518 else
1519 {
1520 ACCESS_RTE;
1521 v1.val.net = (*f_rte)->net->n.addr;
1522
1523 /* We ignore temporary attributes, probably not a problem here */
1524 /* 0x02 is a value of BA_AS_PATH, we don't want to include BGP headers */
1525 eattr *e = ea_find((*f_rte)->attrs->eattrs, EA_CODE(EAP_BGP, 0x02));
1526
1527 if (!e || e->type != EAF_TYPE_AS_PATH)
1528 runtime("Missing AS_PATH attribute");
1529
1530 as_path_get_last(e->u.ptr, &as);
1531 }
1532
1533 struct rtable *table = ((struct f_inst_roa_check *) what)->rtc->table;
1534 if (!table)
1535 runtime("Missing ROA table");
1536
1537 if (table->addr_type != NET_ROA4 && table->addr_type != NET_ROA6)
1538 runtime("Table type must be either ROA4 or ROA6");
1539
1540 res.type = T_ENUM_ROA;
1541
1542 if (table->addr_type != (v1.val.net->type == NET_IP4 ? NET_ROA4 : NET_ROA6))
1543 res.val.i = ROA_UNKNOWN; /* Prefix and table type mismatch */
1544 else
1545 res.val.i = net_roa_check(table, v1.val.net, as);
1546
1547 break;
1548
1549 case FI_FORMAT: /* Format */
1550 ARG_ANY(1);
1551
1552 res.type = T_STRING;
1553 res.val.s = val_format_str(v1);
1554 break;
1555
1556 case FI_ASSERT: /* Birdtest Assert */
1557 ARG(1, T_BOOL);
1558
1559 res.type = v1.type;
1560 res.val = v1.val;
1561
1562 CALL(bt_assert_hook, res.val.i, what);
1563 break;
1564
1565 default:
1566 bug( "Unknown instruction %d (%c)", what->fi_code, what->fi_code & 0xff);
1567 }}
1568 return res;
1569 }
1570
1571 #undef ARG
1572 #undef ARG_ANY
1573
1574 #define ARG(n) \
1575 if (!i_same(f1->a##n.p, f2->a##n.p)) \
1576 return 0;
1577
1578 #define ONEARG ARG(1);
1579 #define TWOARGS ONEARG; ARG(2);
1580 #define THREEARGS TWOARGS; ARG(3);
1581
1582 #define A2_SAME if (f1->a2.i != f2->a2.i) return 0;
1583
1584 /*
1585 * i_same - function that does real comparing of instruction trees, you should call filter_same from outside
1586 */
1587 int
1588 i_same(struct f_inst *f1, struct f_inst *f2)
1589 {
1590 if ((!!f1) != (!!f2))
1591 return 0;
1592 if (!f1)
1593 return 1;
1594 if (f1->aux != f2->aux)
1595 return 0;
1596 if (f1->fi_code != f2->fi_code)
1597 return 0;
1598 if (f1 == f2) /* It looks strange, but it is possible with call rewriting trickery */
1599 return 1;
1600
1601 switch(f1->fi_code) {
1602 case FI_ADD: /* fall through */
1603 case FI_SUBTRACT:
1604 case FI_MULTIPLY:
1605 case FI_DIVIDE:
1606 case FI_OR:
1607 case FI_AND:
1608 case FI_PAIR_CONSTRUCT:
1609 case FI_EC_CONSTRUCT:
1610 case FI_NEQ:
1611 case FI_EQ:
1612 case FI_LT:
1613 case FI_LTE: TWOARGS; break;
1614
1615 case FI_PATHMASK_CONSTRUCT: if (!pm_same(f1->a1.p, f2->a1.p)) return 0; break;
1616
1617 case FI_NOT: ONEARG; break;
1618 case FI_NOT_MATCH:
1619 case FI_MATCH: TWOARGS; break;
1620 case FI_DEFINED: ONEARG; break;
1621 case FI_TYPE: ONEARG; break;
1622
1623 case FI_LC_CONSTRUCT:
1624 THREEARGS;
1625 break;
1626
1627 case FI_SET:
1628 ARG(2);
1629 {
1630 struct symbol *s1, *s2;
1631 s1 = f1->a1.p;
1632 s2 = f2->a1.p;
1633 if (strcmp(s1->name, s2->name))
1634 return 0;
1635 if (s1->class != s2->class)
1636 return 0;
1637 }
1638 break;
1639
1640 case FI_CONSTANT:
1641 switch (f1->aux) {
1642
1643 case T_PREFIX_SET:
1644 if (!trie_same(f1->a2.p, f2->a2.p))
1645 return 0;
1646 break;
1647
1648 case T_SET:
1649 if (!same_tree(f1->a2.p, f2->a2.p))
1650 return 0;
1651 break;
1652
1653 case T_STRING:
1654 if (strcmp(f1->a2.p, f2->a2.p))
1655 return 0;
1656 break;
1657
1658 default:
1659 A2_SAME;
1660 }
1661 break;
1662
1663 case FI_CONSTANT_INDIRECT:
1664 if (!val_same(* (struct f_val *) f1->a1.p, * (struct f_val *) f2->a1.p))
1665 return 0;
1666 break;
1667
1668 case FI_VARIABLE:
1669 if (strcmp((char *) f1->a2.p, (char *) f2->a2.p))
1670 return 0;
1671 break;
1672 case FI_PRINT: case FI_LENGTH: ONEARG; break;
1673 case FI_CONDITION: TWOARGS; break;
1674 case FI_NOP: case FI_EMPTY: break;
1675 case FI_PRINT_AND_DIE: ONEARG; A2_SAME; break;
1676 case FI_PREF_GET:
1677 case FI_RTA_GET: A2_SAME; break;
1678 case FI_EA_GET: A2_SAME; break;
1679 case FI_PREF_SET:
1680 case FI_RTA_SET:
1681 case FI_EA_SET: ONEARG; A2_SAME; break;
1682
1683 case FI_RETURN: ONEARG; break;
1684 case FI_ROA_MAXLEN: ONEARG; break;
1685 case FI_ROA_ASN: ONEARG; break;
1686 case FI_SADR_SRC: ONEARG; break;
1687 case FI_IP: ONEARG; break;
1688 case FI_IS_V4: ONEARG; break;
1689 case FI_ROUTE_DISTINGUISHER: ONEARG; break;
1690 case FI_CALL: /* Call rewriting trickery to avoid exponential behaviour */
1691 ONEARG;
1692 if (!i_same(f1->a2.p, f2->a2.p))
1693 return 0;
1694 f2->a2.p = f1->a2.p;
1695 break;
1696 case FI_CLEAR_LOCAL_VARS: break; /* internal instruction */
1697 case FI_SWITCH: ONEARG; if (!same_tree(f1->a2.p, f2->a2.p)) return 0; break;
1698 case FI_IP_MASK: TWOARGS; break;
1699 case FI_PATH_PREPEND: TWOARGS; break;
1700 case FI_CLIST_ADD_DEL: TWOARGS; break;
1701 case FI_AS_PATH_FIRST:
1702 case FI_AS_PATH_LAST:
1703 case FI_AS_PATH_LAST_NAG: ONEARG; break;
1704 case FI_ROA_CHECK:
1705 TWOARGS;
1706 /* Does not really make sense - ROA check results may change anyway */
1707 if (strcmp(((struct f_inst_roa_check *) f1)->rtc->name,
1708 ((struct f_inst_roa_check *) f2)->rtc->name))
1709 return 0;
1710 break;
1711 case FI_FORMAT: ONEARG; break;
1712 case FI_ASSERT: ONEARG; break;
1713 default:
1714 bug( "Unknown instruction %d in same (%c)", f1->fi_code, f1->fi_code & 0xff);
1715 }
1716 return i_same(f1->next, f2->next);
1717 }
1718
1719 /**
1720 * f_run - run a filter for a route
1721 * @filter: filter to run
1722 * @rte: route being filtered, may be modified
1723 * @tmp_attrs: temporary attributes, prepared by caller or generated by f_run()
1724 * @tmp_pool: all filter allocations go from this pool
1725 * @flags: flags
1726 *
1727 * If filter needs to modify the route, there are several
1728 * posibilities. @rte might be read-only (with REF_COW flag), in that
1729 * case rw copy is obtained by rte_cow() and @rte is replaced. If
1730 * @rte is originally rw, it may be directly modified (and it is never
1731 * copied).
1732 *
1733 * The returned rte may reuse the (possibly cached, cloned) rta, or
1734 * (if rta was modificied) contains a modified uncached rta, which
1735 * uses parts allocated from @tmp_pool and parts shared from original
1736 * rta. There is one exception - if @rte is rw but contains a cached
1737 * rta and that is modified, rta in returned rte is also cached.
1738 *
1739 * Ownership of cached rtas is consistent with rte, i.e.
1740 * if a new rte is returned, it has its own clone of cached rta
1741 * (and cached rta of read-only source rte is intact), if rte is
1742 * modified in place, old cached rta is possibly freed.
1743 */
1744 int
1745 f_run(struct filter *filter, struct rte **rte, struct ea_list **tmp_attrs, struct linpool *tmp_pool, int flags)
1746 {
1747 if (filter == FILTER_ACCEPT)
1748 return F_ACCEPT;
1749
1750 if (filter == FILTER_REJECT)
1751 return F_REJECT;
1752
1753 int rte_cow = ((*rte)->flags & REF_COW);
1754 DBG( "Running filter `%s'...", filter->name );
1755
1756 f_rte = rte;
1757 f_old_rta = NULL;
1758 f_tmp_attrs = tmp_attrs;
1759 f_pool = tmp_pool;
1760 f_flags = flags;
1761
1762 LOG_BUFFER_INIT(f_buf);
1763
1764 struct f_val res = interpret(filter->root);
1765
1766 if (f_old_rta) {
1767 /*
1768 * Cached rta was modified and f_rte contains now an uncached one,
1769 * sharing some part with the cached one. The cached rta should
1770 * be freed (if rte was originally COW, f_old_rta is a clone
1771 * obtained during rte_cow()).
1772 *
1773 * This also implements the exception mentioned in f_run()
1774 * description. The reason for this is that rta reuses parts of
1775 * f_old_rta, and these may be freed during rta_free(f_old_rta).
1776 * This is not the problem if rte was COW, because original rte
1777 * also holds the same rta.
1778 */
1779 if (!rte_cow)
1780 (*f_rte)->attrs = rta_lookup((*f_rte)->attrs);
1781
1782 rta_free(f_old_rta);
1783 }
1784
1785
1786 if (res.type != T_RETURN) {
1787 if (!(f_flags & FF_SILENT))
1788 log_rl(&rl_runtime_err, L_ERR "Filter %s did not return accept nor reject. Make up your mind", filter->name);
1789 return F_ERROR;
1790 }
1791 DBG( "done (%u)\n", res.val.i );
1792 return res.val.i;
1793 }
1794
1795 /* TODO: perhaps we could integrate f_eval(), f_eval_rte() and f_run() */
1796
1797 struct f_val
1798 f_eval_rte(struct f_inst *expr, struct rte **rte, struct linpool *tmp_pool)
1799 {
1800 struct ea_list *tmp_attrs = NULL;
1801
1802 f_rte = rte;
1803 f_old_rta = NULL;
1804 f_tmp_attrs = &tmp_attrs;
1805 f_pool = tmp_pool;
1806 f_flags = 0;
1807
1808 LOG_BUFFER_INIT(f_buf);
1809
1810 /* Note that in this function we assume that rte->attrs is private / uncached */
1811 struct f_val res = interpret(expr);
1812
1813 /* Hack to include EAF_TEMP attributes to the main list */
1814 (*rte)->attrs->eattrs = ea_append(tmp_attrs, (*rte)->attrs->eattrs);
1815
1816 return res;
1817 }
1818
1819 struct f_val
1820 f_eval(struct f_inst *expr, struct linpool *tmp_pool)
1821 {
1822 f_flags = 0;
1823 f_tmp_attrs = NULL;
1824 f_rte = NULL;
1825 f_pool = tmp_pool;
1826
1827 LOG_BUFFER_INIT(f_buf);
1828
1829 return interpret(expr);
1830 }
1831
1832 uint
1833 f_eval_int(struct f_inst *expr)
1834 {
1835 /* Called independently in parse-time to eval expressions */
1836 struct f_val res = f_eval(expr, cfg_mem);
1837
1838 if (res.type != T_INT)
1839 cf_error("Integer expression expected");
1840
1841 return res.val.i;
1842 }
1843
1844 /**
1845 * filter_same - compare two filters
1846 * @new: first filter to be compared
1847 * @old: second filter to be compared, notice that this filter is
1848 * damaged while comparing.
1849 *
1850 * Returns 1 in case filters are same, otherwise 0. If there are
1851 * underlying bugs, it will rather say 0 on same filters than say
1852 * 1 on different.
1853 */
1854 int
1855 filter_same(struct filter *new, struct filter *old)
1856 {
1857 if (old == new) /* Handle FILTER_ACCEPT and FILTER_REJECT */
1858 return 1;
1859 if (old == FILTER_ACCEPT || old == FILTER_REJECT ||
1860 new == FILTER_ACCEPT || new == FILTER_REJECT)
1861 return 0;
1862 return i_same(new->root, old->root);
1863 }