From: Maria Matejka Date: Thu, 9 Nov 2023 15:34:04 +0000 (+0100) Subject: Merge branch 'mq-aggregator-for-v3' into thread-next X-Git-Tag: v3.0.0~337 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=4f2212ccf2f351c388351c8669c85a9f46e33128;p=thirdparty%2Fbird.git Merge branch 'mq-aggregator-for-v3' into thread-next --- 4f2212ccf2f351c388351c8669c85a9f46e33128 diff --cc nest/mpls.c index 85fce9961,7c094c8b7..30b101923 --- a/nest/mpls.c +++ b/nest/mpls.c @@@ -784,12 -793,21 +792,20 @@@ mpls_get_fec_by_destination(struct mpls return fec; } + u32 label = mpls_new_label(m->domain, m->handle, 0); + + if (!label) + { - rta_free(rta); ++ ea_free(rta->l); + return NULL; + } + fec = sl_allocz(mpls_slab(m, 0)); fec->hash = hash; - fec->class_id = class_id; fec->rta = rta; - fec->label = mpls_new_label(m->domain, m->handle, 0); + fec->label = label; fec->policy = MPLS_POLICY_AGGREGATE; DBG("New FEC rta %u\n", fec->label); @@@ -989,9 -1016,11 +1010,9 @@@ mpls_apply_fec(rte *r, struct mpls_fec } - void + int -mpls_handle_rte(struct mpls_fec_map *m, const net_addr *n, rte *r, linpool *lp, struct mpls_fec **locked_fec) +mpls_handle_rte(struct mpls_fec_map *m, const net_addr *n, rte *r) { - ASSERT(!(r->flags & REF_COW)); - struct mpls_fec *fec = NULL; /* Select FEC for route */ @@@ -999,15 -1028,22 +1020,22 @@@ switch (policy) { case MPLS_POLICY_NONE: - return; + return 0; case MPLS_POLICY_STATIC:; - uint label = ea_get_int(r->attrs->eattrs, EA_MPLS_LABEL, 0); + uint label = ea_get_int(r->attrs, &ea_gen_mpls_label, 0); if (label < 16) - return; + return 0; fec = mpls_get_fec_by_label(m, label); + if (!fec) + { + log(L_WARN "Static label %u failed for %N from %s", - label, n, r->sender->proto->name); ++ label, n, r->sender->req->name); + return -1; + } + mpls_damage_fec(m, fec); break; @@@ -1029,7 -1066,15 +1057,15 @@@ default: log(L_WARN "Route %N has invalid MPLS policy %u", n, policy); - return; + return -1; + } + + /* Label allocation failure */ + if (!fec) + { + log(L_WARN "Label allocation in range %s failed for %N from %s", - m->handle->range->name, n, r->sender->proto->name); ++ m->handle->range->name, n, r->sender->req->name); + return -1; } /* Temporarily lock FEC */ @@@ -1041,24 -1087,36 +1077,26 @@@ /* Announce MPLS rule for new/updated FEC */ if (fec->state != MPLS_FEC_CLEAN) mpls_announce_fec(m, fec, r->attrs); + + return 0; } -void -mpls_handle_rte_cleanup(struct mpls_fec_map *m, struct mpls_fec **locked_fec) +static inline struct mpls_fec_tmp_lock +mpls_rte_get_fec_lock(const rte *r) { - /* Unlock temporarily locked FEC from mpls_handle_rte() */ - if (*locked_fec) - { - mpls_unlock_fec(m, *locked_fec); - *locked_fec = NULL; - } -} + struct mpls_fec_tmp_lock mt = { + .m = SKIP_BACK(struct proto, sources, r->src->owner)->mpls_map, + }; -void -mpls_rte_insert(net *n UNUSED, rte *r) -{ - struct proto *p = r->src->proto; - struct mpls_fec_map *m = p->mpls_map; + if (!mt.m) + return mt; - uint label = ea_get_int(r->attrs->eattrs, EA_MPLS_LABEL, 0); + uint label = ea_get_int(r->attrs, &ea_gen_mpls_label, 0); if (label < 16) - return; + return mt; - struct mpls_fec *fec = mpls_find_fec_by_label(m, label); - if (!fec) - return; - - mpls_lock_fec(m, fec); + mt.fec = mpls_find_fec_by_label(mt.m, label); + return mt; } void diff --cc nest/mpls.h index 22074126b,1f3d02dc5..7d5bc8d32 --- a/nest/mpls.h +++ b/nest/mpls.h @@@ -164,10 -166,12 +164,10 @@@ void mpls_fec_map_free(struct mpls_fec_ struct mpls_fec *mpls_find_fec_by_label(struct mpls_fec_map *x, u32 label); struct mpls_fec *mpls_get_fec_by_label(struct mpls_fec_map *m, u32 label); struct mpls_fec *mpls_get_fec_by_net(struct mpls_fec_map *m, const net_addr *net, u32 path_id); -struct mpls_fec *mpls_get_fec_by_rta(struct mpls_fec_map *m, const rta *src, u32 class_id); +struct mpls_fec *mpls_get_fec_by_destination(struct mpls_fec_map *m, ea_list *dest); void mpls_free_fec(struct mpls_fec_map *x, struct mpls_fec *fec); - void mpls_handle_rte(struct mpls_fec_map *m, const net_addr *n, rte *r); -int mpls_handle_rte(struct mpls_fec_map *m, const net_addr *n, rte *r, linpool *lp, struct mpls_fec **locked_fec); -void mpls_handle_rte_cleanup(struct mpls_fec_map *m, struct mpls_fec **locked_fec); -void mpls_rte_insert(net *n UNUSED, rte *r); -void mpls_rte_remove(net *n UNUSED, rte *r); ++int mpls_handle_rte(struct mpls_fec_map *m, const net_addr *n, rte *r); +void mpls_rte_preimport(rte *new, const rte *old); struct mpls_show_ranges_cmd { diff --cc nest/rt-table.c index e074c0bf9,6916488fe..d24998d4b --- a/nest/rt-table.c +++ b/nest/rt-table.c @@@ -1851,240 -1453,62 +1851,245 @@@ rte_recalculate(struct rtable_private * /* The fourth (empty) case - suboptimal route was removed, nothing to do */ } - if (new) + if (new_stored) { new->lastmod = current_time(); + new->id = hmap_first_zero(&table->id_map); + hmap_set(&table->id_map, new->id); + } - if (!old) - { - new->id = hmap_first_zero(&table->id_map); - hmap_set(&table->id_map, new->id); - } + /* Log the route change */ + if (new_ok) + rt_rte_trace_in(D_ROUTES, req, &new_stored->rte, new_stored == net->routes ? "added [best]" : "added"); + else if (old_ok) + { + if (old != old_best) + rt_rte_trace_in(D_ROUTES, req, old, "removed"); + else if (net->routes && rte_is_ok(&net->routes->rte)) + rt_rte_trace_in(D_ROUTES, req, old, "removed [replaced]"); else - new->id = old->id; + rt_rte_trace_in(D_ROUTES, req, old, "removed [sole]"); } + else + if (req->trace_routes & D_ROUTES) + log(L_TRACE "%s > ignored %N %s->%s", req->name, net->n.addr, old ? "filtered" : "none", new ? "filtered" : "none"); - /* Log the route change */ - if ((c->debug & D_ROUTES) || (p->debug & D_ROUTES)) + /* Propagate the route change */ + rte_announce(table, net, new_stored, old_stored, + net->routes, old_best_stored); + + return 1; +} + +int +channel_preimport(struct rt_import_request *req, rte *new, const rte *old) +{ + struct channel *c = SKIP_BACK(struct channel, in_req, req); + + if (new && !old) + if (CHANNEL_LIMIT_PUSH(c, RX)) + return 0; + + if (!new && old) + CHANNEL_LIMIT_POP(c, RX); + + int new_in = new && !rte_is_filtered(new); + int old_in = old && !rte_is_filtered(old); + + int verdict = 1; + + if (new_in && !old_in) + if (CHANNEL_LIMIT_PUSH(c, IN)) + if (c->in_keep & RIK_REJECTED) + new->flags |= REF_FILTERED; + else + verdict = 0; + + if (!new_in && old_in) + CHANNEL_LIMIT_POP(c, IN); + + mpls_rte_preimport(new_in ? new : NULL, old_in ? old : NULL); + + return verdict; +} + +void +rte_update(struct channel *c, const net_addr *n, rte *new, struct rte_src *src) +{ + if (!c->in_req.hook) + { + log(L_WARN "%s.%s: Called rte_update without import hook", c->proto->name, c->name); + return; + } + + ASSERT(c->channel_state == CS_UP); + + /* The import reloader requires prefilter routes to be the first layer */ + if (new && (c->in_keep & RIK_PREFILTER)) + if (ea_is_cached(new->attrs) && !new->attrs->next) + new->attrs = ea_clone(new->attrs); + else + new->attrs = ea_lookup(new->attrs, 0); + + const struct filter *filter = c->in_filter; + struct channel_import_stats *stats = &c->import_stats; + + if (new) { - if (new_ok) - rte_trace(c, new, '>', new == net->routes ? "added [best]" : "added"); - else if (old_ok) + new->net = n; + + int fr; + + stats->updates_received++; + if ((filter == FILTER_REJECT) || + ((fr = f_run(filter, new, 0)) > F_ACCEPT)) { - if (old != old_best) - rte_trace(c, old, '>', "removed"); - else if (rte_is_ok(net->routes)) - rte_trace(c, old, '>', "removed [replaced]"); + stats->updates_filtered++; + channel_rte_trace_in(D_FILTERS, c, new, "filtered out"); + + if (c->in_keep & RIK_REJECTED) + new->flags |= REF_FILTERED; else - rte_trace(c, old, '>', "removed [sole]"); + new = NULL; + } + + if (new && c->proto->mpls_map) - mpls_handle_rte(c->proto->mpls_map, n, new); ++ if (mpls_handle_rte(c->proto->mpls_map, n, new) < 0) ++ { ++ channel_rte_trace_in(D_FILTERS, c, new, "invalid"); ++ stats->updates_invalid++; ++ new = NULL; ++ } + + if (new) + if (net_is_flow(n)) + rt_flowspec_resolve_rte(new, c); + else + rt_next_hop_resolve_rte(new); + + if (new && !rte_validate(c, new)) + { + channel_rte_trace_in(D_FILTERS, c, new, "invalid"); + stats->updates_invalid++; + new = NULL; } } + else + stats->withdraws_received++; - /* Propagate the route change */ - rte_announce(table, RA_UNDEF, net, new, old, net->routes, old_best); + rte_import(&c->in_req, n, new, src); - if (!net->routes && - (table->gc_counter++ >= table->config->gc_threshold)) - rt_kick_prune_timer(table); + /* Now the route attributes are kept by the in-table cached version + * and we may drop the local handle */ + if (new && (c->in_keep & RIK_PREFILTER)) + { + /* There may be some updates on top of the original attribute block */ + ea_list *a = new->attrs; + while (a->next) + a = a->next; - if (old_ok && p->rte_remove) - p->rte_remove(net, old); - if (new_ok && p->rte_insert) - p->rte_insert(net, new); + ea_free(a); + } - if (old) +} + +void +rte_import(struct rt_import_request *req, const net_addr *n, rte *new, struct rte_src *src) +{ + struct rt_import_hook *hook = req->hook; + if (!hook) + { + log(L_WARN "%s: Called rte_import without import hook", req->name); + return; + } + + RT_LOCKED(hook->table, tab) + { + net *nn; + if (new) { - if (!new) - hmap_clear(&table->id_map, old->id); + /* Use the actual struct network, not the dummy one */ + nn = net_get(tab, n); + new->net = nn->n.addr; + new->sender = hook; - rte_free_quick(old); + /* Set the stale cycle */ + new->stale_cycle = hook->stale_set; + } + else if (!(nn = net_find(tab, n))) + { + req->hook->stats.withdraws_ignored++; + if (req->trace_routes & D_ROUTES) + log(L_TRACE "%s > ignored %N withdraw", req->name, n); + RT_RETURN(tab); } + + /* Recalculate the best route */ + if (rte_recalculate(tab, hook, nn, new, src)) + ev_send(req->list, &hook->announce_event); + } } -static int rte_update_nest_cnt; /* Nesting counter to allow recursive updates */ +/* Check rtable for best route to given net whether it would be exported do p */ +int +rt_examine(rtable *tp, net_addr *a, struct channel *c, const struct filter *filter) +{ + rte rt = {}; -static inline void -rte_update_lock(void) + RT_LOCKED(tp, t) + { + net *n = net_find(t, a); + if (n) + rt = RTE_COPY_VALID(n->routes); + } + + if (!rt.src) + return 0; + + int v = c->proto->preexport ? c->proto->preexport(c, &rt) : 0; + if (v == RIC_PROCESS) + v = (f_run(filter, &rt, FF_SILENT) <= F_ACCEPT); + + return v > 0; +} + +static void +rt_table_export_done(void *hh) +{ + struct rt_table_export_hook *hook = hh; + struct rt_export_request *req = hook->h.req; + void (*stopped)(struct rt_export_request *) = hook->h.stopped; + rtable *t = SKIP_BACK(rtable, priv.exporter, hook->table); + + RT_LOCKED(t, tab) + { + DBG("Export hook %p in table %s finished uc=%u\n", hook, tab->name, tab->use_count); + + /* Drop pending exports */ + rt_export_used(&tab->exporter, hook->h.req->name, "stopped"); + + /* Do the common code; this frees the hook */ + rt_export_stopped(&hook->h); + } + + /* Report the channel as stopped. */ + CALL(stopped, req); + + /* Unlock the table; this may free it */ + rt_unlock_table(t); +} + +void +rt_export_stopped(struct rt_export_hook *hook) { - rte_update_nest_cnt++; + /* Unlink from the request */ + hook->req->hook = NULL; + + /* Unlist */ + rem_node(&hook->n); + + /* Free the hook itself together with its pool */ + rp_free(hook->pool); } static inline void