int set_to_intervals(const struct set *set, struct expr *init, bool add)
{
- struct expr *i, *n, *prev = NULL, *elem, *newelem = NULL, *root, *expr;
+ struct expr *i, *n, *prev = NULL, *elem, *root, *expr;
LIST_HEAD(intervals);
- uint32_t flags;
- mpz_t p, q;
-
- mpz_init2(p, set->key->len);
- mpz_init2(q, set->key->len);
+ mpz_t p;
list_for_each_entry_safe(i, n, &init->expressions, list) {
- flags = 0;
-
elem = interval_expr_key(i);
if (elem->key->etype == EXPR_SET_ELEM_CATCHALL)
continue;
- if (!prev && segtree_needs_first_segment(set, init, add) &&
+ if (prev)
+ break;
+
+ if (segtree_needs_first_segment(set, init, add) &&
mpz_cmp_ui(elem->key->range.low, 0)) {
+ mpz_init2(p, set->key->len);
mpz_set_ui(p, 0);
- expr = constant_expr_alloc(&internal_location,
- set->key->dtype,
- set->key->byteorder,
- set->key->len, NULL);
- mpz_set(expr->value, p);
+ expr = constant_range_expr_alloc(&internal_location,
+ set->key->dtype,
+ set->key->byteorder,
+ set->key->len, p, p);
+ mpz_clear(p);
+
root = set_elem_expr_alloc(&internal_location, expr);
if (i->etype == EXPR_MAPPING) {
root = mapping_expr_alloc(&internal_location,
}
root->flags |= EXPR_F_INTERVAL_END;
list_add(&root->list, &intervals);
- init->size++;
+ break;
}
+ prev = i;
+ }
- if (newelem) {
- mpz_set(p, interval_expr_key(newelem)->key->value);
- if (set->key->byteorder == BYTEORDER_HOST_ENDIAN)
- mpz_switch_byteorder(p, set->key->len / BITS_PER_BYTE);
+ list_splice_init(&intervals, &init->expressions);
- if (!(set->flags & NFT_SET_ANONYMOUS) ||
- mpz_cmp(p, elem->key->range.low) != 0)
- list_add_tail(&newelem->list, &intervals);
- else
- expr_free(newelem);
- }
- newelem = NULL;
-
- if (mpz_scan0(elem->key->range.high, 0) != set->key->len) {
- mpz_add_ui(p, elem->key->range.high, 1);
- expr = constant_expr_alloc(&elem->key->location, set->key->dtype,
- set->key->byteorder, set->key->len,
- NULL);
- mpz_set(expr->value, p);
- if (set->key->byteorder == BYTEORDER_HOST_ENDIAN)
- mpz_switch_byteorder(expr->value, set->key->len / BITS_PER_BYTE);
-
- newelem = set_elem_expr_alloc(&expr->location, expr);
- if (i->etype == EXPR_MAPPING) {
- newelem = mapping_expr_alloc(&expr->location,
- newelem,
- expr_get(i->right));
- }
- newelem->flags |= EXPR_F_INTERVAL_END;
- } else {
- flags = EXPR_F_INTERVAL_OPEN;
- }
+ return 0;
+}
- expr = constant_expr_alloc(&elem->key->location, set->key->dtype,
- set->key->byteorder, set->key->len, NULL);
+/* This only works for the supported stateful statements. */
+static void set_elem_stmt_clone(struct expr *dst, const struct expr *src)
+{
+ struct stmt *stmt, *nstmt;
- mpz_set(expr->value, elem->key->range.low);
- if (set->key->byteorder == BYTEORDER_HOST_ENDIAN)
- mpz_switch_byteorder(expr->value, set->key->len / BITS_PER_BYTE);
+ list_for_each_entry(stmt, &src->stmt_list, list) {
+ nstmt = xzalloc(sizeof(*stmt));
+ *nstmt = *stmt;
+ list_add_tail(&nstmt->list, &dst->stmt_list);
+ }
+}
- expr_free(elem->key);
- elem->key = expr;
- i->flags |= flags;
- init->size++;
- list_move_tail(&i->list, &intervals);
+static void set_elem_expr_copy(struct expr *dst, const struct expr *src)
+{
+ if (src->comment)
+ dst->comment = xstrdup(src->comment);
+ if (src->timeout)
+ dst->timeout = src->timeout;
+ if (src->expiration)
+ dst->expiration = src->expiration;
+
+ set_elem_stmt_clone(dst, src);
+}
- prev = i;
+static struct expr *setelem_key(struct expr *expr)
+{
+ struct expr *key;
+
+ switch (expr->etype) {
+ case EXPR_MAPPING:
+ key = expr->left->key;
+ break;
+ case EXPR_SET_ELEM:
+ key = expr->key;
+ break;
+ default:
+ BUG("unhandled expression type %d\n", expr->etype);
+ return NULL;
}
- if (newelem)
- list_add_tail(&newelem->list, &intervals);
+ return key;
+}
- list_splice_init(&intervals, &init->expressions);
+int setelem_to_interval(const struct set *set, struct expr *elem,
+ struct expr *next_elem, struct list_head *intervals)
+{
+ struct expr *key, *next_key = NULL, *low, *high;
+ bool adjacent = false;
+
+ key = setelem_key(elem);
+ if (key->etype == EXPR_SET_ELEM_CATCHALL)
+ return 0;
+
+ if (next_elem) {
+ next_key = setelem_key(next_elem);
+ if (next_key->etype == EXPR_SET_ELEM_CATCHALL)
+ next_key = NULL;
+ }
+
+ assert(key->etype == EXPR_RANGE_VALUE);
+ assert(!next_key || next_key->etype == EXPR_RANGE_VALUE);
+
+ /* skip end element for adjacents intervals in anonymous sets. */
+ if (!(elem->flags & EXPR_F_INTERVAL_END) && next_key) {
+ mpz_t p;
+
+ mpz_init2(p, set->key->len);
+ mpz_add_ui(p, key->range.high, 1);
+
+ if (!mpz_cmp(p, next_key->range.low))
+ adjacent = true;
+
+ mpz_clear(p);
+ }
+
+ low = constant_expr_alloc(&key->location, set->key->dtype,
+ set->key->byteorder, set->key->len, NULL);
+
+ mpz_set(low->value, key->range.low);
+ if (set->key->byteorder == BYTEORDER_HOST_ENDIAN)
+ mpz_switch_byteorder(low->value, set->key->len / BITS_PER_BYTE);
+
+ low = set_elem_expr_alloc(&key->location, low);
+ set_elem_expr_copy(low, interval_expr_key(elem));
+
+ if (elem->etype == EXPR_MAPPING)
+ low = mapping_expr_alloc(&elem->location,
+ low, expr_get(elem->right));
+
+ list_add_tail(&low->list, intervals);
+
+ if (adjacent)
+ return 0;
+ else if (!mpz_cmp_ui(key->value, 0) && elem->flags & EXPR_F_INTERVAL_END) {
+ low->flags |= EXPR_F_INTERVAL_END;
+ return 0;
+ } else if (mpz_scan0(key->range.high, 0) == set->key->len) {
+ low->flags |= EXPR_F_INTERVAL_OPEN;
+ return 0;
+ }
+
+ high = constant_expr_alloc(&key->location, set->key->dtype,
+ set->key->byteorder, set->key->len,
+ NULL);
+ mpz_set(high->value, key->range.high);
+ mpz_add_ui(high->value, high->value, 1);
+ if (set->key->byteorder == BYTEORDER_HOST_ENDIAN)
+ mpz_switch_byteorder(high->value, set->key->len / BITS_PER_BYTE);
+
+ high = set_elem_expr_alloc(&key->location, high);
- mpz_clear(p);
- mpz_clear(q);
+ high->flags |= EXPR_F_INTERVAL_END;
+ list_add_tail(&high->list, intervals);
return 0;
}
#include <mnl.h>
#include <cmd.h>
+#include <intervals.h>
#include <net/if.h>
#include <sys/socket.h>
#include <arpa/inet.h>
fprintf(fp, "\n");
}
+static struct nftnl_set_elem *
+alloc_nftnl_setelem_interval(const struct set *set, const struct expr *init,
+ struct expr *elem, struct expr *next_elem,
+ struct nftnl_set_elem **nlse_high)
+{
+ struct nftnl_set_elem *nlse[2] = {};
+ LIST_HEAD(interval_list);
+ struct expr *expr, *next;
+ int i = 0;
+
+ if (setelem_to_interval(set, elem, next_elem, &interval_list) < 0)
+ memory_allocation_error();
+
+ if (list_empty(&interval_list)) {
+ *nlse_high = NULL;
+ nlse[i++] = alloc_nftnl_setelem(init, elem);
+ return nlse[0];
+ }
+
+ list_for_each_entry_safe(expr, next, &interval_list, list) {
+ nlse[i++] = alloc_nftnl_setelem(init, expr);
+ list_del(&expr->list);
+ expr_free(expr);
+ }
+ *nlse_high = nlse[1];
+
+ return nlse[0];
+}
+
static int mnl_nft_setelem_batch(const struct nftnl_set *nls, struct cmd *cmd,
struct nftnl_batch *batch,
enum nf_tables_msg_types msg_type,
unsigned int flags, uint32_t *seqnum,
- const struct expr *init,
+ const struct set *set, const struct expr *init,
struct netlink_ctx *ctx)
{
+ struct nftnl_set_elem *nlse, *nlse_high = NULL;
+ struct expr *expr = NULL, *next;
struct nlattr *nest1, *nest2;
- struct nftnl_set_elem *nlse;
struct nlmsghdr *nlh;
- struct expr *expr = NULL;
int i = 0;
if (msg_type == NFT_MSG_NEWSETELEM)
assert(expr);
nest1 = mnl_attr_nest_start(nlh, NFTA_SET_ELEM_LIST_ELEMENTS);
list_for_each_entry_from(expr, &init->expressions, list) {
- nlse = alloc_nftnl_setelem(init, expr);
+
+ if (set_is_non_concat_range(set)) {
+ if (set_is_anonymous(set->flags) &&
+ !list_is_last(&expr->list, &init->expressions))
+ next = list_next_entry(expr, list);
+ else
+ next = NULL;
+
+ if (!nlse_high) {
+ nlse = alloc_nftnl_setelem_interval(set, init, expr, next, &nlse_high);
+ } else {
+ nlse = nlse_high;
+ nlse_high = NULL;
+ }
+ } else {
+ nlse = alloc_nftnl_setelem(init, expr);
+ }
cmd_add_loc(cmd, nlh, &expr->location);
+
+ /* remain with this element, range high still needs to be added. */
+ if (nlse_high)
+ expr = list_prev_entry(expr, list);
+
nest2 = mnl_attr_nest_start(nlh, ++i);
nftnl_set_elem_nlmsg_build_payload(nlh, nlse);
mnl_attr_nest_end(nlh, nest2);
netlink_dump_setelem(nlse, ctx);
nftnl_set_elem_free(nlse);
if (mnl_nft_attr_nest_overflow(nlh, nest1, nest2)) {
+ if (nlse_high) {
+ nftnl_set_elem_free(nlse_high);
+ nlse_high = NULL;
+ }
mnl_attr_nest_end(nlh, nest1);
mnl_nft_batch_continue(batch);
mnl_seqnum_inc(seqnum);
netlink_dump_set(nls, ctx);
err = mnl_nft_setelem_batch(nls, cmd, ctx->batch, NFT_MSG_NEWSETELEM,
- flags, &ctx->seqnum, expr, ctx);
+ flags, &ctx->seqnum, set, expr, ctx);
nftnl_set_free(nls);
return err;
}
int mnl_nft_setelem_del(struct netlink_ctx *ctx, struct cmd *cmd,
- const struct handle *h, const struct expr *init)
+ const struct handle *h, const struct set *set,
+ const struct expr *init)
{
enum nf_tables_msg_types msg_type = NFT_MSG_DELSETELEM;
struct nftnl_set *nls;
msg_type = NFT_MSG_DESTROYSETELEM;
err = mnl_nft_setelem_batch(nls, cmd, ctx->batch, msg_type, 0,
- &ctx->seqnum, init, ctx);
+ &ctx->seqnum, set, init, ctx);
nftnl_set_free(nls);
return err;