2 * Filters: utility functions
4 * Copyright 1998 Pavel Machek <pavel@ucw.cz>
6 * Can be freely distributed and used under the terms of the GNU GPL.
13 * You can find sources of the filter language in |filter/|
14 * directory. File |filter/config.Y| contains filter grammar and basically translates
15 * the source from user into a tree of &f_inst structures. These trees are
16 * later interpreted using code in |filter/filter.c|.
18 * A filter is represented by a tree of &f_inst structures, one structure per
19 * "instruction". Each &f_inst contains @code, @aux value which is
20 * usually the data type this instruction operates on and two generic
21 * arguments (@a1, @a2). Some instructions contain pointer(s) to other
22 * instructions in their (@a1, @a2) fields.
24 * Filters use a &f_val structure for their data. Each &f_val
25 * contains type and value (types are constants prefixed with %T_). Few
26 * of the types are special; %T_RETURN can be or-ed with a type to indicate
27 * that return from a function or from the whole filter should be
28 * forced. Important thing about &f_val's is that they may be copied
29 * with a simple |=|. That's fine for all currently defined types: strings
30 * are read-only (and therefore okay), paths are copied for each
31 * operation (okay too).
36 #include "nest/bird.h"
37 #include "lib/lists.h"
38 #include "lib/resource.h"
39 #include "lib/socket.h"
40 #include "lib/string.h"
41 #include "lib/unaligned.h"
44 #include "nest/route.h"
45 #include "nest/protocol.h"
46 #include "nest/iface.h"
47 #include "nest/attrs.h"
48 #include "conf/conf.h"
49 #include "filter/filter.h"
53 #define FILTER_STACK_DEPTH 16384
55 /* Filter interpreter stack. Make this thread local after going parallel. */
60 static struct filter_stack filter_stack
[FILTER_STACK_DEPTH
];
62 /* Internal filter state, to be allocated on stack when executing filters */
66 struct ea_list
**eattrs
;
69 struct filter_stack
*stack
;
74 void (*bt_assert_hook
)(int result
, struct f_inst
*assert);
76 static struct adata undef_adata
; /* adata of length 0 used for undefined */
78 /* Special undef value for paths and clists */
80 undef_value(struct f_val v
)
82 return ((v
.type
== T_PATH
) || (v
.type
== T_CLIST
) ||
83 (v
.type
== T_ECLIST
) || (v
.type
== T_LCLIST
)) &&
84 (v
.val
.ad
== &undef_adata
);
88 adata_empty(struct linpool
*pool
, int l
)
90 struct adata
*res
= lp_alloc(pool
, sizeof(struct adata
) + l
);
96 pm_format(struct f_path_mask
*p
, buffer
*buf
)
98 buffer_puts(buf
, "[= ");
105 buffer_print(buf
, "%u ", p
->val
);
109 buffer_puts(buf
, "? ");
113 buffer_puts(buf
, "* ");
117 buffer_print(buf
, "%u..%u ", p
->val
, p
->val2
);
127 buffer_puts(buf
, "=]");
130 static inline int val_is_ip4(const struct f_val v
)
131 { return (v
.type
== T_IP
) && ipa_is_ip4(v
.val
.ip
); }
134 lcomm_cmp(lcomm v1
, lcomm v2
)
136 if (v1
.asn
!= v2
.asn
)
137 return (v1
.asn
> v2
.asn
) ? 1 : -1;
138 if (v1
.ldp1
!= v2
.ldp1
)
139 return (v1
.ldp1
> v2
.ldp1
) ? 1 : -1;
140 if (v1
.ldp2
!= v2
.ldp2
)
141 return (v1
.ldp2
> v2
.ldp2
) ? 1 : -1;
146 * val_compare - compare two values
150 * Compares two values and returns -1, 0, 1 on <, =, > or CMP_ERROR on
151 * error. Tree module relies on this giving consistent results so
152 * that it can be used for building balanced trees.
155 val_compare(struct f_val v1
, struct f_val v2
)
157 if (v1
.type
!= v2
.type
) {
158 if (v1
.type
== T_VOID
) /* Hack for else */
160 if (v2
.type
== T_VOID
)
163 /* IP->Quad implicit conversion */
164 if ((v1
.type
== T_QUAD
) && val_is_ip4(v2
))
165 return uint_cmp(v1
.val
.i
, ipa_to_u32(v2
.val
.ip
));
166 if (val_is_ip4(v1
) && (v2
.type
== T_QUAD
))
167 return uint_cmp(ipa_to_u32(v1
.val
.ip
), v2
.val
.i
);
169 debug( "Types do not match in val_compare\n" );
181 return uint_cmp(v1
.val
.i
, v2
.val
.i
);
184 return u64_cmp(v1
.val
.ec
, v2
.val
.ec
);
186 return lcomm_cmp(v1
.val
.lc
, v2
.val
.lc
);
188 return ipa_compare(v1
.val
.ip
, v2
.val
.ip
);
190 return net_compare(v1
.val
.net
, v2
.val
.net
);
192 return strcmp(v1
.val
.s
, v2
.val
.s
);
199 pm_same(struct f_path_mask
*m1
, struct f_path_mask
*m2
)
203 if (m1
->kind
!= m2
->kind
)
206 if (m1
->kind
== PM_ASN_EXPR
)
208 if (!i_same((struct f_inst
*) m1
->val
, (struct f_inst
*) m2
->val
))
213 if ((m1
->val
!= m2
->val
) || (m1
->val2
!= m2
->val2
))
225 * val_same - compare two values
229 * Compares two values and returns 1 if they are same and 0 if not.
230 * Comparison of values of different types is valid and returns 0.
233 val_same(struct f_val v1
, struct f_val v2
)
237 rc
= val_compare(v1
, v2
);
241 if (v1
.type
!= v2
.type
)
246 return pm_same(v1
.val
.path_mask
, v2
.val
.path_mask
);
251 return adata_same(v1
.val
.ad
, v2
.val
.ad
);
253 return same_tree(v1
.val
.t
, v2
.val
.t
);
255 return trie_same(v1
.val
.ti
, v2
.val
.ti
);
257 bug("Invalid type in val_same(): %x", v1
.type
);
262 clist_set_type(struct f_tree
*set
, struct f_val
*v
)
264 switch (set
->from
.type
)
275 if (val_is_ip4(set
->from
) && val_is_ip4(set
->to
))
288 eclist_set_type(struct f_tree
*set
)
289 { return set
->from
.type
== T_EC
; }
292 lclist_set_type(struct f_tree
*set
)
293 { return set
->from
.type
== T_LC
; }
296 clist_match_set(struct adata
*clist
, struct f_tree
*set
)
302 if (!clist_set_type(set
, &v
))
305 u32
*l
= (u32
*) clist
->data
;
306 u32
*end
= l
+ clist
->length
/4;
310 if (find_tree(set
, v
))
317 eclist_match_set(struct adata
*list
, struct f_tree
*set
)
322 if (!eclist_set_type(set
))
326 u32
*l
= int_set_get_data(list
);
327 int len
= int_set_get_size(list
);
331 for (i
= 0; i
< len
; i
+= 2) {
332 v
.val
.ec
= ec_get(l
, i
);
333 if (find_tree(set
, v
))
341 lclist_match_set(struct adata
*list
, struct f_tree
*set
)
346 if (!lclist_set_type(set
))
350 u32
*l
= int_set_get_data(list
);
351 int len
= int_set_get_size(list
);
355 for (i
= 0; i
< len
; i
+= 3) {
356 v
.val
.lc
= lc_get(l
, i
);
357 if (find_tree(set
, v
))
364 static struct adata
*
365 clist_filter(struct linpool
*pool
, struct adata
*list
, struct f_val set
, int pos
)
370 int tree
= (set
.type
== T_SET
); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
373 clist_set_type(set
.val
.t
, &v
);
377 int len
= int_set_get_size(list
);
378 u32
*l
= int_set_get_data(list
);
385 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
386 if ((tree
? !!find_tree(set
.val
.t
, v
) : int_set_contains(set
.val
.ad
, v
.val
.i
)) == pos
)
390 uint nl
= (k
- tmp
) * sizeof(u32
);
391 if (nl
== list
->length
)
394 struct adata
*res
= adata_empty(pool
, nl
);
395 memcpy(res
->data
, tmp
, nl
);
399 static struct adata
*
400 eclist_filter(struct linpool
*pool
, struct adata
*list
, struct f_val set
, int pos
)
405 int tree
= (set
.type
== T_SET
); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
408 int len
= int_set_get_size(list
);
409 u32
*l
= int_set_get_data(list
);
415 for (i
= 0; i
< len
; i
+= 2) {
416 v
.val
.ec
= ec_get(l
, i
);
417 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
418 if ((tree
? !!find_tree(set
.val
.t
, v
) : ec_set_contains(set
.val
.ad
, v
.val
.ec
)) == pos
) {
424 uint nl
= (k
- tmp
) * sizeof(u32
);
425 if (nl
== list
->length
)
428 struct adata
*res
= adata_empty(pool
, nl
);
429 memcpy(res
->data
, tmp
, nl
);
433 static struct adata
*
434 lclist_filter(struct linpool
*pool
, struct adata
*list
, struct f_val set
, int pos
)
439 int tree
= (set
.type
== T_SET
); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
442 int len
= int_set_get_size(list
);
443 u32
*l
= int_set_get_data(list
);
449 for (i
= 0; i
< len
; i
+= 3) {
450 v
.val
.lc
= lc_get(l
, i
);
451 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
452 if ((tree
? !!find_tree(set
.val
.t
, v
) : lc_set_contains(set
.val
.ad
, v
.val
.lc
)) == pos
)
456 uint nl
= (k
- tmp
) * sizeof(u32
);
457 if (nl
== list
->length
)
460 struct adata
*res
= adata_empty(pool
, nl
);
461 memcpy(res
->data
, tmp
, nl
);
466 * val_in_range - implement |~| operator
470 * Checks if @v1 is element (|~| operator) of @v2.
473 val_in_range(struct f_val v1
, struct f_val v2
)
475 if ((v1
.type
== T_PATH
) && (v2
.type
== T_PATH_MASK
))
476 return as_path_match(v1
.val
.ad
, v2
.val
.path_mask
);
478 if ((v1
.type
== T_INT
) && (v2
.type
== T_PATH
))
479 return as_path_contains(v2
.val
.ad
, v1
.val
.i
, 1);
481 if (((v1
.type
== T_PAIR
) || (v1
.type
== T_QUAD
)) && (v2
.type
== T_CLIST
))
482 return int_set_contains(v2
.val
.ad
, v1
.val
.i
);
483 /* IP->Quad implicit conversion */
484 if (val_is_ip4(v1
) && (v2
.type
== T_CLIST
))
485 return int_set_contains(v2
.val
.ad
, ipa_to_u32(v1
.val
.ip
));
487 if ((v1
.type
== T_EC
) && (v2
.type
== T_ECLIST
))
488 return ec_set_contains(v2
.val
.ad
, v1
.val
.ec
);
490 if ((v1
.type
== T_LC
) && (v2
.type
== T_LCLIST
))
491 return lc_set_contains(v2
.val
.ad
, v1
.val
.lc
);
493 if ((v1
.type
== T_STRING
) && (v2
.type
== T_STRING
))
494 return patmatch(v2
.val
.s
, v1
.val
.s
);
496 if ((v1
.type
== T_IP
) && (v2
.type
== T_NET
))
497 return ipa_in_netX(v1
.val
.ip
, v2
.val
.net
);
499 if ((v1
.type
== T_NET
) && (v2
.type
== T_NET
))
500 return net_in_netX(v1
.val
.net
, v2
.val
.net
);
502 if ((v1
.type
== T_NET
) && (v2
.type
== T_PREFIX_SET
))
503 return trie_match_net(v2
.val
.ti
, v1
.val
.net
);
505 if (v2
.type
!= T_SET
)
508 /* With integrated Quad<->IP implicit conversion */
509 if ((v1
.type
== v2
.val
.t
->from
.type
) ||
510 ((v1
.type
== T_QUAD
) && val_is_ip4(v2
.val
.t
->from
) && val_is_ip4(v2
.val
.t
->to
)))
511 return !!find_tree(v2
.val
.t
, v1
);
513 if (v1
.type
== T_CLIST
)
514 return clist_match_set(v1
.val
.ad
, v2
.val
.t
);
516 if (v1
.type
== T_ECLIST
)
517 return eclist_match_set(v1
.val
.ad
, v2
.val
.t
);
519 if (v1
.type
== T_LCLIST
)
520 return lclist_match_set(v1
.val
.ad
, v2
.val
.t
);
522 if (v1
.type
== T_PATH
)
523 return as_path_match_set(v1
.val
.ad
, v2
.val
.t
);
529 * val_format - format filter value
532 val_format(struct f_val v
, buffer
*buf
)
537 case T_VOID
: buffer_puts(buf
, "(void)"); return;
538 case T_BOOL
: buffer_puts(buf
, v
.val
.i
? "TRUE" : "FALSE"); return;
539 case T_INT
: buffer_print(buf
, "%u", v
.val
.i
); return;
540 case T_STRING
: buffer_print(buf
, "%s", v
.val
.s
); return;
541 case T_IP
: buffer_print(buf
, "%I", v
.val
.ip
); return;
542 case T_NET
: buffer_print(buf
, "%N", v
.val
.net
); return;
543 case T_PAIR
: buffer_print(buf
, "(%u,%u)", v
.val
.i
>> 16, v
.val
.i
& 0xffff); return;
544 case T_QUAD
: buffer_print(buf
, "%R", v
.val
.i
); return;
545 case T_EC
: ec_format(buf2
, v
.val
.ec
); buffer_print(buf
, "%s", buf2
); return;
546 case T_LC
: lc_format(buf2
, v
.val
.lc
); buffer_print(buf
, "%s", buf2
); return;
547 case T_RD
: rd_format(v
.val
.ec
, buf2
, 1024); buffer_print(buf
, "%s", buf2
); return;
548 case T_PREFIX_SET
: trie_format(v
.val
.ti
, buf
); return;
549 case T_SET
: tree_format(v
.val
.t
, buf
); return;
550 case T_ENUM
: buffer_print(buf
, "(enum %x)%u", v
.type
, v
.val
.i
); return;
551 case T_PATH
: as_path_format(v
.val
.ad
, buf2
, 1000); buffer_print(buf
, "(path %s)", buf2
); return;
552 case T_CLIST
: int_set_format(v
.val
.ad
, 1, -1, buf2
, 1000); buffer_print(buf
, "(clist %s)", buf2
); return;
553 case T_ECLIST
: ec_set_format(v
.val
.ad
, -1, buf2
, 1000); buffer_print(buf
, "(eclist %s)", buf2
); return;
554 case T_LCLIST
: lc_set_format(v
.val
.ad
, -1, buf2
, 1000); buffer_print(buf
, "(lclist %s)", buf2
); return;
555 case T_PATH_MASK
: pm_format(v
.val
.path_mask
, buf
); return;
556 default: buffer_print(buf
, "[unknown type %x]", v
.type
); return;
561 static inline void f_cache_eattrs(struct filter_state
*fs
)
563 fs
->eattrs
= &((*fs
->rte
)->attrs
->eattrs
);
566 static inline void f_rte_cow(struct filter_state
*fs
)
568 if (!((*fs
->rte
)->flags
& REF_COW
))
571 *fs
->rte
= rte_cow(*fs
->rte
);
575 * rta_cow - prepare rta for modification by filter
578 f_rta_cow(struct filter_state
*fs
)
580 if (!rta_is_cached((*fs
->rte
)->attrs
))
583 /* Prepare to modify rte */
586 /* Store old rta to free it later, it stores reference from rte_cow() */
587 fs
->old_rta
= (*fs
->rte
)->attrs
;
590 * Get shallow copy of rta. Fields eattrs and nexthops of rta are shared
591 * with fs->old_rta (they will be copied when the cached rta will be obtained
592 * at the end of f_run()), also the lock of hostentry is inherited (we
593 * suppose hostentry is not changed by filters).
595 (*fs
->rte
)->attrs
= rta_do_cow((*fs
->rte
)->attrs
, fs
->pool
);
597 /* Re-cache the ea_list */
602 val_format_str(struct filter_state
*fs
, struct f_val v
) {
606 return lp_strdup(fs
->pool
, b
.start
);
609 static struct tbf rl_runtime_err
= TBF_DEFAULT_LOG_LIMITS
;
614 * @what: filter to interpret
616 * Interpret given tree of filter instructions. This is core function
617 * of filter system and does all the hard work.
619 * Each instruction has 4 fields: code (which is instruction code),
620 * aux (which is extension to instruction code, typically type),
621 * arg1 and arg2 - arguments. Depending on instruction, arguments
622 * are either integers, or pointers to instruction trees. Common
623 * instructions like +, that have two expressions as arguments use
624 * TWOARGS macro to get both of them evaluated.
626 static enum filter_return
627 interpret(struct filter_state
*fs
, struct f_inst
*what
)
632 enum filter_return fret
;
636 #define res fs->stack[fs->stack_ptr].val
637 #define v1 fs->stack[fs->stack_ptr + 1].val
638 #define v2 fs->stack[fs->stack_ptr + 2].val
639 #define v3 fs->stack[fs->stack_ptr + 3].val
641 res
= (struct f_val
) { .type
= T_VOID
};
643 for ( ; what
; what
= what
->next
) {
644 res
= (struct f_val
) { .type
= T_VOID
};
645 switch(what
->fi_code
) {
647 #define runtime(fmt, ...) do { \
648 if (!(fs->flags & FF_SILENT)) \
649 log_rl(&rl_runtime_err, L_ERR "filters, line %d: " fmt, what->lineno, ##__VA_ARGS__); \
653 #define ARG_ANY(n) INTERPRET(what->a##n.p, n)
655 #define ARG(n,t) ARG_ANY(n); \
656 if (v##n.type != t) \
657 runtime("Argument %d of instruction %s must be of type %02x, got %02x", \
658 n, f_instruction_name(what->fi_code), t, v##n.type);
660 #define INTERPRET(what_, n) do { \
661 fs->stack_ptr += n; \
662 fret = interpret(fs, what_); \
663 fs->stack_ptr -= n; \
664 if (fret == F_RETURN) \
665 bug("This shall not happen"); \
666 if (fret > F_RETURN) \
671 do { if (!fs->rte) runtime("No route to access"); } while (0)
673 #define ACCESS_EATTRS \
674 do { if (!fs->eattrs) f_cache_eattrs(fs); } while (0)
676 #define BITFIELD_MASK(what_) (1u << EA_BIT_GET(what_->a2.i))
678 #include "filter/f-inst.c"
693 if (!i_same(f1->a##n.p, f2->a##n.p)) \
696 #define ONEARG ARG(1);
697 #define TWOARGS ONEARG; ARG(2);
698 #define THREEARGS TWOARGS; ARG(3);
700 #define A2_SAME if (f1->a2.i != f2->a2.i) return 0;
703 * i_same - function that does real comparing of instruction trees, you should call filter_same from outside
706 i_same(struct f_inst
*f1
, struct f_inst
*f2
)
708 if ((!!f1
) != (!!f2
))
712 if (f1
->aux
!= f2
->aux
)
714 if (f1
->fi_code
!= f2
->fi_code
)
716 if (f1
== f2
) /* It looks strange, but it is possible with call rewriting trickery */
719 switch(f1
->fi_code
) {
720 case FI_ADD
: /* fall through */
726 case FI_PAIR_CONSTRUCT
:
727 case FI_EC_CONSTRUCT
:
731 case FI_LTE
: TWOARGS
; break;
733 case FI_PATHMASK_CONSTRUCT
: if (!pm_same(f1
->a1
.p
, f2
->a1
.p
)) return 0; break;
735 case FI_NOT
: ONEARG
; break;
737 case FI_MATCH
: TWOARGS
; break;
738 case FI_DEFINED
: ONEARG
; break;
739 case FI_TYPE
: ONEARG
; break;
741 case FI_LC_CONSTRUCT
:
748 struct symbol
*s1
, *s2
;
751 if (strcmp(s1
->name
, s2
->name
))
753 if (s1
->class != s2
->class)
762 if (!trie_same(f1
->a2
.p
, f2
->a2
.p
))
767 if (!same_tree(f1
->a2
.p
, f2
->a2
.p
))
772 if (strcmp(f1
->a2
.p
, f2
->a2
.p
))
781 case FI_CONSTANT_INDIRECT
:
782 if (!val_same(* (struct f_val
*) f1
->a1
.p
, * (struct f_val
*) f2
->a1
.p
))
787 if (strcmp((char *) f1
->a2
.p
, (char *) f2
->a2
.p
))
790 case FI_PRINT
: case FI_LENGTH
: ONEARG
; break;
791 case FI_CONDITION
: TWOARGS
; break;
792 case FI_NOP
: case FI_EMPTY
: break;
793 case FI_PRINT_AND_DIE
: ONEARG
; A2_SAME
; break;
795 case FI_RTA_GET
: A2_SAME
; break;
796 case FI_EA_GET
: A2_SAME
; break;
799 case FI_EA_SET
: ONEARG
; A2_SAME
; break;
801 case FI_RETURN
: ONEARG
; break;
802 case FI_ROA_MAXLEN
: ONEARG
; break;
803 case FI_ROA_ASN
: ONEARG
; break;
804 case FI_SADR_SRC
: ONEARG
; break;
805 case FI_IP
: ONEARG
; break;
806 case FI_IS_V4
: ONEARG
; break;
807 case FI_ROUTE_DISTINGUISHER
: ONEARG
; break;
808 case FI_CALL
: /* Call rewriting trickery to avoid exponential behaviour */
810 if (!i_same(f1
->a2
.p
, f2
->a2
.p
))
814 case FI_CLEAR_LOCAL_VARS
: break; /* internal instruction */
815 case FI_SWITCH
: ONEARG
; if (!same_tree(f1
->a2
.p
, f2
->a2
.p
)) return 0; break;
816 case FI_IP_MASK
: TWOARGS
; break;
817 case FI_PATH_PREPEND
: TWOARGS
; break;
818 case FI_CLIST_ADD_DEL
: TWOARGS
; break;
819 case FI_AS_PATH_FIRST
:
820 case FI_AS_PATH_LAST
:
821 case FI_AS_PATH_LAST_NAG
: ONEARG
; break;
824 /* Does not really make sense - ROA check results may change anyway */
825 if (strcmp(((struct f_inst_roa_check
*) f1
)->rtc
->name
,
826 ((struct f_inst_roa_check
*) f2
)->rtc
->name
))
829 case FI_FORMAT
: ONEARG
; break;
830 case FI_ASSERT
: ONEARG
; break;
832 bug( "Unknown instruction %d in same (%c)", f1
->fi_code
, f1
->fi_code
& 0xff);
834 return i_same(f1
->next
, f2
->next
);
838 * f_run - run a filter for a route
839 * @filter: filter to run
840 * @rte: route being filtered, may be modified
841 * @tmp_pool: all filter allocations go from this pool
844 * If filter needs to modify the route, there are several
845 * posibilities. @rte might be read-only (with REF_COW flag), in that
846 * case rw copy is obtained by rte_cow() and @rte is replaced. If
847 * @rte is originally rw, it may be directly modified (and it is never
850 * The returned rte may reuse the (possibly cached, cloned) rta, or
851 * (if rta was modificied) contains a modified uncached rta, which
852 * uses parts allocated from @tmp_pool and parts shared from original
853 * rta. There is one exception - if @rte is rw but contains a cached
854 * rta and that is modified, rta in returned rte is also cached.
856 * Ownership of cached rtas is consistent with rte, i.e.
857 * if a new rte is returned, it has its own clone of cached rta
858 * (and cached rta of read-only source rte is intact), if rte is
859 * modified in place, old cached rta is possibly freed.
862 f_run(struct filter
*filter
, struct rte
**rte
, struct linpool
*tmp_pool
, int flags
)
864 if (filter
== FILTER_ACCEPT
)
867 if (filter
== FILTER_REJECT
)
870 int rte_cow
= ((*rte
)->flags
& REF_COW
);
871 DBG( "Running filter `%s'...", filter
->name
);
873 struct filter_state fs
= {
877 .stack
= filter_stack
,
880 LOG_BUFFER_INIT(fs
.buf
);
882 enum filter_return fret
= interpret(&fs
, filter
->root
);
886 * Cached rta was modified and fs->rte contains now an uncached one,
887 * sharing some part with the cached one. The cached rta should
888 * be freed (if rte was originally COW, fs->old_rta is a clone
889 * obtained during rte_cow()).
891 * This also implements the exception mentioned in f_run()
892 * description. The reason for this is that rta reuses parts of
893 * fs->old_rta, and these may be freed during rta_free(fs->old_rta).
894 * This is not the problem if rte was COW, because original rte
895 * also holds the same rta.
898 (*fs
.rte
)->attrs
= rta_lookup((*fs
.rte
)->attrs
);
900 rta_free(fs
.old_rta
);
904 if (fret
< F_ACCEPT
) {
905 if (!(fs
.flags
& FF_SILENT
))
906 log_rl(&rl_runtime_err
, L_ERR
"Filter %s did not return accept nor reject. Make up your mind", filter
->name
);
909 DBG( "done (%u)\n", res
.val
.i
);
913 /* TODO: perhaps we could integrate f_eval(), f_eval_rte() and f_run() */
916 f_eval_rte(struct f_inst
*expr
, struct rte
**rte
, struct linpool
*tmp_pool
)
919 struct filter_state fs
= {
922 .stack
= filter_stack
,
925 LOG_BUFFER_INIT(fs
.buf
);
927 /* Note that in this function we assume that rte->attrs is private / uncached */
928 return interpret(&fs
, expr
);
932 f_eval(struct f_inst
*expr
, struct linpool
*tmp_pool
, struct f_val
*pres
)
934 struct filter_state fs
= {
936 .stack
= filter_stack
,
939 LOG_BUFFER_INIT(fs
.buf
);
941 enum filter_return fret
= interpret(&fs
, expr
);
942 *pres
= filter_stack
[0].val
;
947 f_eval_int(struct f_inst
*expr
)
949 /* Called independently in parse-time to eval expressions */
950 struct filter_state fs
= {
952 .stack
= filter_stack
,
955 LOG_BUFFER_INIT(fs
.buf
);
957 if (interpret(&fs
, expr
) > F_RETURN
)
958 cf_error("Runtime error while evaluating expression");
960 if (filter_stack
[0].val
.type
!= T_INT
)
961 cf_error("Integer expression expected");
963 return filter_stack
[0].val
.val
.i
;
967 * filter_same - compare two filters
968 * @new: first filter to be compared
969 * @old: second filter to be compared, notice that this filter is
970 * damaged while comparing.
972 * Returns 1 in case filters are same, otherwise 0. If there are
973 * underlying bugs, it will rather say 0 on same filters than say
977 filter_same(struct filter
*new, struct filter
*old
)
979 if (old
== new) /* Handle FILTER_ACCEPT and FILTER_REJECT */
981 if (old
== FILTER_ACCEPT
|| old
== FILTER_REJECT
||
982 new == FILTER_ACCEPT
|| new == FILTER_REJECT
)
984 return i_same(new->root
, old
->root
);