]>
git.ipfire.org Git - thirdparty/bird.git/blob - filter/filter.c
2 * Filters: utility functions
4 * Copyright 1998 Pavel Machek <pavel@ucw.cz>
6 * Can be freely distributed and used under the terms of the GNU GPL.
13 * You can find sources of the filter language in |filter/|
14 * directory. File |filter/config.Y| contains filter grammar and basically translates
15 * the source from user into a tree of &f_inst structures. These trees are
16 * later interpreted using code in |filter/filter.c|.
18 * A filter is represented by a tree of &f_inst structures, one structure per
19 * "instruction". Each &f_inst contains @code, @aux value which is
20 * usually the data type this instruction operates on and two generic
21 * arguments (@a1, @a2). Some instructions contain pointer(s) to other
22 * instructions in their (@a1, @a2) fields.
24 * Filters use a &f_val structure for their data. Each &f_val
25 * contains type and value (types are constants prefixed with %T_). Few
26 * of the types are special; %T_RETURN can be or-ed with a type to indicate
27 * that return from a function or from the whole filter should be
28 * forced. Important thing about &f_val's is that they may be copied
29 * with a simple |=|. That's fine for all currently defined types: strings
30 * are read-only (and therefore okay), paths are copied for each
31 * operation (okay too).
36 #include "nest/bird.h"
37 #include "lib/lists.h"
38 #include "lib/resource.h"
39 #include "lib/socket.h"
40 #include "lib/string.h"
41 #include "lib/unaligned.h"
44 #include "nest/route.h"
45 #include "nest/protocol.h"
46 #include "nest/iface.h"
47 #include "nest/attrs.h"
48 #include "conf/conf.h"
49 #include "filter/filter.h"
53 /* Internal filter state, to be allocated on stack when executing filters */
57 struct ea_list
**eattrs
;
63 void (*bt_assert_hook
)(int result
, struct f_inst
*assert);
65 static struct adata undef_adata
; /* adata of length 0 used for undefined */
67 /* Special undef value for paths and clists */
69 undef_value(struct f_val v
)
71 return ((v
.type
== T_PATH
) || (v
.type
== T_CLIST
) ||
72 (v
.type
== T_ECLIST
) || (v
.type
== T_LCLIST
)) &&
73 (v
.val
.ad
== &undef_adata
);
77 adata_empty(struct linpool
*pool
, int l
)
79 struct adata
*res
= lp_alloc(pool
, sizeof(struct adata
) + l
);
85 pm_format(struct f_path_mask
*p
, buffer
*buf
)
87 buffer_puts(buf
, "[= ");
94 buffer_print(buf
, "%u ", p
->val
);
98 buffer_puts(buf
, "? ");
102 buffer_puts(buf
, "* ");
106 buffer_print(buf
, "%u..%u ", p
->val
, p
->val2
);
116 buffer_puts(buf
, "=]");
119 static inline int val_is_ip4(const struct f_val v
)
120 { return (v
.type
== T_IP
) && ipa_is_ip4(v
.val
.ip
); }
123 lcomm_cmp(lcomm v1
, lcomm v2
)
125 if (v1
.asn
!= v2
.asn
)
126 return (v1
.asn
> v2
.asn
) ? 1 : -1;
127 if (v1
.ldp1
!= v2
.ldp1
)
128 return (v1
.ldp1
> v2
.ldp1
) ? 1 : -1;
129 if (v1
.ldp2
!= v2
.ldp2
)
130 return (v1
.ldp2
> v2
.ldp2
) ? 1 : -1;
135 * val_compare - compare two values
139 * Compares two values and returns -1, 0, 1 on <, =, > or CMP_ERROR on
140 * error. Tree module relies on this giving consistent results so
141 * that it can be used for building balanced trees.
144 val_compare(struct f_val v1
, struct f_val v2
)
146 if (v1
.type
!= v2
.type
) {
147 if (v1
.type
== T_VOID
) /* Hack for else */
149 if (v2
.type
== T_VOID
)
152 /* IP->Quad implicit conversion */
153 if ((v1
.type
== T_QUAD
) && val_is_ip4(v2
))
154 return uint_cmp(v1
.val
.i
, ipa_to_u32(v2
.val
.ip
));
155 if (val_is_ip4(v1
) && (v2
.type
== T_QUAD
))
156 return uint_cmp(ipa_to_u32(v1
.val
.ip
), v2
.val
.i
);
158 debug( "Types do not match in val_compare\n" );
170 return uint_cmp(v1
.val
.i
, v2
.val
.i
);
173 return u64_cmp(v1
.val
.ec
, v2
.val
.ec
);
175 return lcomm_cmp(v1
.val
.lc
, v2
.val
.lc
);
177 return ipa_compare(v1
.val
.ip
, v2
.val
.ip
);
179 return net_compare(v1
.val
.net
, v2
.val
.net
);
181 return strcmp(v1
.val
.s
, v2
.val
.s
);
188 pm_same(struct f_path_mask
*m1
, struct f_path_mask
*m2
)
192 if (m1
->kind
!= m2
->kind
)
195 if (m1
->kind
== PM_ASN_EXPR
)
197 if (!i_same((struct f_inst
*) m1
->val
, (struct f_inst
*) m2
->val
))
202 if ((m1
->val
!= m2
->val
) || (m1
->val2
!= m2
->val2
))
214 * val_same - compare two values
218 * Compares two values and returns 1 if they are same and 0 if not.
219 * Comparison of values of different types is valid and returns 0.
222 val_same(struct f_val v1
, struct f_val v2
)
226 rc
= val_compare(v1
, v2
);
230 if (v1
.type
!= v2
.type
)
235 return pm_same(v1
.val
.path_mask
, v2
.val
.path_mask
);
240 return adata_same(v1
.val
.ad
, v2
.val
.ad
);
242 return same_tree(v1
.val
.t
, v2
.val
.t
);
244 return trie_same(v1
.val
.ti
, v2
.val
.ti
);
246 bug("Invalid type in val_same(): %x", v1
.type
);
251 clist_set_type(struct f_tree
*set
, struct f_val
*v
)
253 switch (set
->from
.type
)
264 if (val_is_ip4(set
->from
) && val_is_ip4(set
->to
))
277 eclist_set_type(struct f_tree
*set
)
278 { return set
->from
.type
== T_EC
; }
281 lclist_set_type(struct f_tree
*set
)
282 { return set
->from
.type
== T_LC
; }
285 clist_match_set(struct adata
*clist
, struct f_tree
*set
)
291 if (!clist_set_type(set
, &v
))
294 u32
*l
= (u32
*) clist
->data
;
295 u32
*end
= l
+ clist
->length
/4;
299 if (find_tree(set
, v
))
306 eclist_match_set(struct adata
*list
, struct f_tree
*set
)
311 if (!eclist_set_type(set
))
315 u32
*l
= int_set_get_data(list
);
316 int len
= int_set_get_size(list
);
320 for (i
= 0; i
< len
; i
+= 2) {
321 v
.val
.ec
= ec_get(l
, i
);
322 if (find_tree(set
, v
))
330 lclist_match_set(struct adata
*list
, struct f_tree
*set
)
335 if (!lclist_set_type(set
))
339 u32
*l
= int_set_get_data(list
);
340 int len
= int_set_get_size(list
);
344 for (i
= 0; i
< len
; i
+= 3) {
345 v
.val
.lc
= lc_get(l
, i
);
346 if (find_tree(set
, v
))
353 static struct adata
*
354 clist_filter(struct linpool
*pool
, struct adata
*list
, struct f_val set
, int pos
)
359 int tree
= (set
.type
== T_SET
); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
362 clist_set_type(set
.val
.t
, &v
);
366 int len
= int_set_get_size(list
);
367 u32
*l
= int_set_get_data(list
);
374 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
375 if ((tree
? !!find_tree(set
.val
.t
, v
) : int_set_contains(set
.val
.ad
, v
.val
.i
)) == pos
)
379 uint nl
= (k
- tmp
) * sizeof(u32
);
380 if (nl
== list
->length
)
383 struct adata
*res
= adata_empty(pool
, nl
);
384 memcpy(res
->data
, tmp
, nl
);
388 static struct adata
*
389 eclist_filter(struct linpool
*pool
, struct adata
*list
, struct f_val set
, int pos
)
394 int tree
= (set
.type
== T_SET
); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
397 int len
= int_set_get_size(list
);
398 u32
*l
= int_set_get_data(list
);
404 for (i
= 0; i
< len
; i
+= 2) {
405 v
.val
.ec
= ec_get(l
, i
);
406 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
407 if ((tree
? !!find_tree(set
.val
.t
, v
) : ec_set_contains(set
.val
.ad
, v
.val
.ec
)) == pos
) {
413 uint nl
= (k
- tmp
) * sizeof(u32
);
414 if (nl
== list
->length
)
417 struct adata
*res
= adata_empty(pool
, nl
);
418 memcpy(res
->data
, tmp
, nl
);
422 static struct adata
*
423 lclist_filter(struct linpool
*pool
, struct adata
*list
, struct f_val set
, int pos
)
428 int tree
= (set
.type
== T_SET
); /* 1 -> set is T_SET, 0 -> set is T_CLIST */
431 int len
= int_set_get_size(list
);
432 u32
*l
= int_set_get_data(list
);
438 for (i
= 0; i
< len
; i
+= 3) {
439 v
.val
.lc
= lc_get(l
, i
);
440 /* pos && member(val, set) || !pos && !member(val, set), member() depends on tree */
441 if ((tree
? !!find_tree(set
.val
.t
, v
) : lc_set_contains(set
.val
.ad
, v
.val
.lc
)) == pos
)
445 uint nl
= (k
- tmp
) * sizeof(u32
);
446 if (nl
== list
->length
)
449 struct adata
*res
= adata_empty(pool
, nl
);
450 memcpy(res
->data
, tmp
, nl
);
455 * val_in_range - implement |~| operator
459 * Checks if @v1 is element (|~| operator) of @v2.
462 val_in_range(struct f_val v1
, struct f_val v2
)
464 if ((v1
.type
== T_PATH
) && (v2
.type
== T_PATH_MASK
))
465 return as_path_match(v1
.val
.ad
, v2
.val
.path_mask
);
467 if ((v1
.type
== T_INT
) && (v2
.type
== T_PATH
))
468 return as_path_contains(v2
.val
.ad
, v1
.val
.i
, 1);
470 if (((v1
.type
== T_PAIR
) || (v1
.type
== T_QUAD
)) && (v2
.type
== T_CLIST
))
471 return int_set_contains(v2
.val
.ad
, v1
.val
.i
);
472 /* IP->Quad implicit conversion */
473 if (val_is_ip4(v1
) && (v2
.type
== T_CLIST
))
474 return int_set_contains(v2
.val
.ad
, ipa_to_u32(v1
.val
.ip
));
476 if ((v1
.type
== T_EC
) && (v2
.type
== T_ECLIST
))
477 return ec_set_contains(v2
.val
.ad
, v1
.val
.ec
);
479 if ((v1
.type
== T_LC
) && (v2
.type
== T_LCLIST
))
480 return lc_set_contains(v2
.val
.ad
, v1
.val
.lc
);
482 if ((v1
.type
== T_STRING
) && (v2
.type
== T_STRING
))
483 return patmatch(v2
.val
.s
, v1
.val
.s
);
485 if ((v1
.type
== T_IP
) && (v2
.type
== T_NET
))
486 return ipa_in_netX(v1
.val
.ip
, v2
.val
.net
);
488 if ((v1
.type
== T_NET
) && (v2
.type
== T_NET
))
489 return net_in_netX(v1
.val
.net
, v2
.val
.net
);
491 if ((v1
.type
== T_NET
) && (v2
.type
== T_PREFIX_SET
))
492 return trie_match_net(v2
.val
.ti
, v1
.val
.net
);
494 if (v2
.type
!= T_SET
)
497 /* With integrated Quad<->IP implicit conversion */
498 if ((v1
.type
== v2
.val
.t
->from
.type
) ||
499 ((v1
.type
== T_QUAD
) && val_is_ip4(v2
.val
.t
->from
) && val_is_ip4(v2
.val
.t
->to
)))
500 return !!find_tree(v2
.val
.t
, v1
);
502 if (v1
.type
== T_CLIST
)
503 return clist_match_set(v1
.val
.ad
, v2
.val
.t
);
505 if (v1
.type
== T_ECLIST
)
506 return eclist_match_set(v1
.val
.ad
, v2
.val
.t
);
508 if (v1
.type
== T_LCLIST
)
509 return lclist_match_set(v1
.val
.ad
, v2
.val
.t
);
511 if (v1
.type
== T_PATH
)
512 return as_path_match_set(v1
.val
.ad
, v2
.val
.t
);
518 * val_format - format filter value
521 val_format(struct f_val v
, buffer
*buf
)
526 case T_VOID
: buffer_puts(buf
, "(void)"); return;
527 case T_BOOL
: buffer_puts(buf
, v
.val
.i
? "TRUE" : "FALSE"); return;
528 case T_INT
: buffer_print(buf
, "%u", v
.val
.i
); return;
529 case T_STRING
: buffer_print(buf
, "%s", v
.val
.s
); return;
530 case T_IP
: buffer_print(buf
, "%I", v
.val
.ip
); return;
531 case T_NET
: buffer_print(buf
, "%N", v
.val
.net
); return;
532 case T_PAIR
: buffer_print(buf
, "(%u,%u)", v
.val
.i
>> 16, v
.val
.i
& 0xffff); return;
533 case T_QUAD
: buffer_print(buf
, "%R", v
.val
.i
); return;
534 case T_EC
: ec_format(buf2
, v
.val
.ec
); buffer_print(buf
, "%s", buf2
); return;
535 case T_LC
: lc_format(buf2
, v
.val
.lc
); buffer_print(buf
, "%s", buf2
); return;
536 case T_RD
: rd_format(v
.val
.ec
, buf2
, 1024); buffer_print(buf
, "%s", buf2
); return;
537 case T_PREFIX_SET
: trie_format(v
.val
.ti
, buf
); return;
538 case T_SET
: tree_format(v
.val
.t
, buf
); return;
539 case T_ENUM
: buffer_print(buf
, "(enum %x)%u", v
.type
, v
.val
.i
); return;
540 case T_PATH
: as_path_format(v
.val
.ad
, buf2
, 1000); buffer_print(buf
, "(path %s)", buf2
); return;
541 case T_CLIST
: int_set_format(v
.val
.ad
, 1, -1, buf2
, 1000); buffer_print(buf
, "(clist %s)", buf2
); return;
542 case T_ECLIST
: ec_set_format(v
.val
.ad
, -1, buf2
, 1000); buffer_print(buf
, "(eclist %s)", buf2
); return;
543 case T_LCLIST
: lc_set_format(v
.val
.ad
, -1, buf2
, 1000); buffer_print(buf
, "(lclist %s)", buf2
); return;
544 case T_PATH_MASK
: pm_format(v
.val
.path_mask
, buf
); return;
545 default: buffer_print(buf
, "[unknown type %x]", v
.type
); return;
550 static inline void f_cache_eattrs(struct filter_state
*fs
)
552 fs
->eattrs
= &((*fs
->rte
)->attrs
->eattrs
);
555 static inline void f_rte_cow(struct filter_state
*fs
)
557 if (!((*fs
->rte
)->flags
& REF_COW
))
560 *fs
->rte
= rte_cow(*fs
->rte
);
564 * rta_cow - prepare rta for modification by filter
567 f_rta_cow(struct filter_state
*fs
)
569 if (!rta_is_cached((*fs
->rte
)->attrs
))
572 /* Prepare to modify rte */
575 /* Store old rta to free it later, it stores reference from rte_cow() */
576 fs
->old_rta
= (*fs
->rte
)->attrs
;
579 * Get shallow copy of rta. Fields eattrs and nexthops of rta are shared
580 * with fs->old_rta (they will be copied when the cached rta will be obtained
581 * at the end of f_run()), also the lock of hostentry is inherited (we
582 * suppose hostentry is not changed by filters).
584 (*fs
->rte
)->attrs
= rta_do_cow((*fs
->rte
)->attrs
, fs
->pool
);
586 /* Re-cache the ea_list */
591 val_format_str(struct filter_state
*fs
, struct f_val v
) {
595 return lp_strdup(fs
->pool
, b
.start
);
598 static struct tbf rl_runtime_err
= TBF_DEFAULT_LOG_LIMITS
;
603 * @what: filter to interpret
605 * Interpret given tree of filter instructions. This is core function
606 * of filter system and does all the hard work.
608 * Each instruction has 4 fields: code (which is instruction code),
609 * aux (which is extension to instruction code, typically type),
610 * arg1 and arg2 - arguments. Depending on instruction, arguments
611 * are either integers, or pointers to instruction trees. Common
612 * instructions like +, that have two expressions as arguments use
613 * TWOARGS macro to get both of them evaluated.
615 * &f_val structures are copied around, so there are no problems with
619 interpret(struct filter_state
*fs
, struct f_inst
*what
)
622 struct f_val v1
, v2
, v3
, res
= { .type
= T_VOID
}, *vp
;
627 for ( ; what
; what
= what
->next
) {
629 switch(what
->fi_code
) {
630 #define runtime(fmt, ...) do { \
631 if (!(fs->flags & FF_SILENT)) \
632 log_rl(&rl_runtime_err, L_ERR "filters, line %d: " fmt, what->lineno, ##__VA_ARGS__); \
633 res.type = T_RETURN; \
634 res.val.i = F_ERROR; \
638 #define ARG_ANY(n) INTERPRET(v##n, what->a##n.p)
640 #define ARG(n,t) ARG_ANY(n) \
641 if (v##n.type != t) \
642 runtime("Argument %d of instruction %s must be of type %02x, got %02x", \
643 n, f_instruction_name(what->fi_code), t, v##n.type);
645 #define INTERPRET(val, what_) \
646 val = interpret(fs, what_); \
647 if (val.type & T_RETURN) \
651 do { if (!fs->rte) runtime("No route to access"); } while (0)
653 #define ACCESS_EATTRS \
654 do { if (!fs->eattrs) f_cache_eattrs(fs); } while (0)
656 #define BITFIELD_MASK(what_) (1u << EA_BIT_GET(what_->a2.i))
658 #include "filter/f-inst.c"
672 if (!i_same(f1->a##n.p, f2->a##n.p)) \
675 #define ONEARG ARG(1);
676 #define TWOARGS ONEARG; ARG(2);
677 #define THREEARGS TWOARGS; ARG(3);
679 #define A2_SAME if (f1->a2.i != f2->a2.i) return 0;
682 * i_same - function that does real comparing of instruction trees, you should call filter_same from outside
685 i_same(struct f_inst
*f1
, struct f_inst
*f2
)
687 if ((!!f1
) != (!!f2
))
691 if (f1
->aux
!= f2
->aux
)
693 if (f1
->fi_code
!= f2
->fi_code
)
695 if (f1
== f2
) /* It looks strange, but it is possible with call rewriting trickery */
698 switch(f1
->fi_code
) {
699 case FI_ADD
: /* fall through */
705 case FI_PAIR_CONSTRUCT
:
706 case FI_EC_CONSTRUCT
:
710 case FI_LTE
: TWOARGS
; break;
712 case FI_PATHMASK_CONSTRUCT
: if (!pm_same(f1
->a1
.p
, f2
->a1
.p
)) return 0; break;
714 case FI_NOT
: ONEARG
; break;
716 case FI_MATCH
: TWOARGS
; break;
717 case FI_DEFINED
: ONEARG
; break;
718 case FI_TYPE
: ONEARG
; break;
720 case FI_LC_CONSTRUCT
:
727 struct symbol
*s1
, *s2
;
730 if (strcmp(s1
->name
, s2
->name
))
732 if (s1
->class != s2
->class)
741 if (!trie_same(f1
->a2
.p
, f2
->a2
.p
))
746 if (!same_tree(f1
->a2
.p
, f2
->a2
.p
))
751 if (strcmp(f1
->a2
.p
, f2
->a2
.p
))
760 case FI_CONSTANT_INDIRECT
:
761 if (!val_same(* (struct f_val
*) f1
->a1
.p
, * (struct f_val
*) f2
->a1
.p
))
766 if (strcmp((char *) f1
->a2
.p
, (char *) f2
->a2
.p
))
769 case FI_PRINT
: case FI_LENGTH
: ONEARG
; break;
770 case FI_CONDITION
: TWOARGS
; break;
771 case FI_NOP
: case FI_EMPTY
: break;
772 case FI_PRINT_AND_DIE
: ONEARG
; A2_SAME
; break;
774 case FI_RTA_GET
: A2_SAME
; break;
775 case FI_EA_GET
: A2_SAME
; break;
778 case FI_EA_SET
: ONEARG
; A2_SAME
; break;
780 case FI_RETURN
: ONEARG
; break;
781 case FI_ROA_MAXLEN
: ONEARG
; break;
782 case FI_ROA_ASN
: ONEARG
; break;
783 case FI_SADR_SRC
: ONEARG
; break;
784 case FI_IP
: ONEARG
; break;
785 case FI_IS_V4
: ONEARG
; break;
786 case FI_ROUTE_DISTINGUISHER
: ONEARG
; break;
787 case FI_CALL
: /* Call rewriting trickery to avoid exponential behaviour */
789 if (!i_same(f1
->a2
.p
, f2
->a2
.p
))
793 case FI_CLEAR_LOCAL_VARS
: break; /* internal instruction */
794 case FI_SWITCH
: ONEARG
; if (!same_tree(f1
->a2
.p
, f2
->a2
.p
)) return 0; break;
795 case FI_IP_MASK
: TWOARGS
; break;
796 case FI_PATH_PREPEND
: TWOARGS
; break;
797 case FI_CLIST_ADD_DEL
: TWOARGS
; break;
798 case FI_AS_PATH_FIRST
:
799 case FI_AS_PATH_LAST
:
800 case FI_AS_PATH_LAST_NAG
: ONEARG
; break;
803 /* Does not really make sense - ROA check results may change anyway */
804 if (strcmp(((struct f_inst_roa_check
*) f1
)->rtc
->name
,
805 ((struct f_inst_roa_check
*) f2
)->rtc
->name
))
808 case FI_FORMAT
: ONEARG
; break;
809 case FI_ASSERT
: ONEARG
; break;
811 bug( "Unknown instruction %d in same (%c)", f1
->fi_code
, f1
->fi_code
& 0xff);
813 return i_same(f1
->next
, f2
->next
);
817 * f_run - run a filter for a route
818 * @filter: filter to run
819 * @rte: route being filtered, may be modified
820 * @tmp_pool: all filter allocations go from this pool
823 * If filter needs to modify the route, there are several
824 * posibilities. @rte might be read-only (with REF_COW flag), in that
825 * case rw copy is obtained by rte_cow() and @rte is replaced. If
826 * @rte is originally rw, it may be directly modified (and it is never
829 * The returned rte may reuse the (possibly cached, cloned) rta, or
830 * (if rta was modificied) contains a modified uncached rta, which
831 * uses parts allocated from @tmp_pool and parts shared from original
832 * rta. There is one exception - if @rte is rw but contains a cached
833 * rta and that is modified, rta in returned rte is also cached.
835 * Ownership of cached rtas is consistent with rte, i.e.
836 * if a new rte is returned, it has its own clone of cached rta
837 * (and cached rta of read-only source rte is intact), if rte is
838 * modified in place, old cached rta is possibly freed.
841 f_run(struct filter
*filter
, struct rte
**rte
, struct linpool
*tmp_pool
, int flags
)
843 if (filter
== FILTER_ACCEPT
)
846 if (filter
== FILTER_REJECT
)
849 int rte_cow
= ((*rte
)->flags
& REF_COW
);
850 DBG( "Running filter `%s'...", filter
->name
);
852 struct filter_state fs
= {
858 LOG_BUFFER_INIT(fs
.buf
);
860 struct f_val res
= interpret(&fs
, filter
->root
);
864 * Cached rta was modified and fs->rte contains now an uncached one,
865 * sharing some part with the cached one. The cached rta should
866 * be freed (if rte was originally COW, fs->old_rta is a clone
867 * obtained during rte_cow()).
869 * This also implements the exception mentioned in f_run()
870 * description. The reason for this is that rta reuses parts of
871 * fs->old_rta, and these may be freed during rta_free(fs->old_rta).
872 * This is not the problem if rte was COW, because original rte
873 * also holds the same rta.
876 (*fs
.rte
)->attrs
= rta_lookup((*fs
.rte
)->attrs
);
878 rta_free(fs
.old_rta
);
882 if (res
.type
!= T_RETURN
) {
883 if (!(fs
.flags
& FF_SILENT
))
884 log_rl(&rl_runtime_err
, L_ERR
"Filter %s did not return accept nor reject. Make up your mind", filter
->name
);
887 DBG( "done (%u)\n", res
.val
.i
);
891 /* TODO: perhaps we could integrate f_eval(), f_eval_rte() and f_run() */
894 f_eval_rte(struct f_inst
*expr
, struct rte
**rte
, struct linpool
*tmp_pool
)
897 struct filter_state fs
= {
902 LOG_BUFFER_INIT(fs
.buf
);
904 /* Note that in this function we assume that rte->attrs is private / uncached */
905 struct f_val res
= interpret(&fs
, expr
);
911 f_eval(struct f_inst
*expr
, struct linpool
*tmp_pool
)
913 struct filter_state fs
= {
917 LOG_BUFFER_INIT(fs
.buf
);
919 return interpret(&fs
, expr
);
923 f_eval_int(struct f_inst
*expr
)
925 /* Called independently in parse-time to eval expressions */
926 struct f_val res
= f_eval(expr
, cfg_mem
);
928 if (res
.type
!= T_INT
)
929 cf_error("Integer expression expected");
935 * filter_same - compare two filters
936 * @new: first filter to be compared
937 * @old: second filter to be compared, notice that this filter is
938 * damaged while comparing.
940 * Returns 1 in case filters are same, otherwise 0. If there are
941 * underlying bugs, it will rather say 0 on same filters than say
945 filter_same(struct filter
*new, struct filter
*old
)
947 if (old
== new) /* Handle FILTER_ACCEPT and FILTER_REJECT */
949 if (old
== FILTER_ACCEPT
|| old
== FILTER_REJECT
||
950 new == FILTER_ACCEPT
|| new == FILTER_REJECT
)
952 return i_same(new->root
, old
->root
);