/* TODO: use the information about what roa has changed */
settle_kick(&s->settle, s->c->proto->loop);
- rpe_mark_seen_all(req->hook, first, NULL);
+ rpe_mark_seen_all(req->hook, first, NULL, NULL);
}
static void
static void
rt_show_net_export_bulk(struct rt_export_request *req, const net_addr *n,
- struct rt_pending_export *rpe UNUSED, const rte **feed, uint count)
+ struct rt_pending_export *first UNUSED, struct rt_pending_export *last UNUSED,
+ const rte **feed, uint count)
{
struct rt_show_data *d = SKIP_BACK(struct rt_show_data, req, req);
return rt_show_net(d, n, feed, count);
}
void
-rt_notify_accepted(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *first,
+rt_notify_accepted(struct rt_export_request *req, const net_addr *n,
+ struct rt_pending_export *first, struct rt_pending_export *last,
const rte **feed, uint count)
{
struct channel *c = SKIP_BACK(struct channel, out_req, req);
old_best = &rpe->old->rte;
}
}
+ if (rpe == last)
+ break;
}
/* Nothing to export */
}
void
-rt_notify_merged(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *first,
+rt_notify_merged(struct rt_export_request *req, const net_addr *n,
+ struct rt_pending_export *first, struct rt_pending_export *last,
const rte **feed, uint count)
{
struct channel *c = SKIP_BACK(struct channel, out_req, req);
old_best = &rpe->old->rte;
}
}
+ if (rpe == last)
+ break;
}
/* Prepare new merged route */
}
void
-rt_feed_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, const rte **feed, uint count)
+rt_feed_any(struct rt_export_request *req, const net_addr *net,
+ struct rt_pending_export *first, struct rt_pending_export *last,
+ const rte **feed, uint count)
{
struct channel *c = SKIP_BACK(struct channel, out_req, req);
rte n0 = *feed[i];
rt_notify_basic(c, net, &n0, NULL);
}
+
+ RPE_WALK(first, rpe, NULL)
+ {
+ channel_rpe_mark_seen(req, rpe);
+ if (rpe == last)
+ break;
+ }
}
void
{
net *net = SKIP_BACK(struct network, n.addr, (net_addr (*)[0]) n);
RT_LOCK(tab);
+ struct rt_pending_export *last = net->last;
uint count = rte_feed_count(net);
const rte **feed = NULL;
if (count)
rte_feed_obtain(net, feed, count);
}
RT_UNLOCK(tab);
- hook->req->export_bulk(hook->req, n, rpe, feed, count);
+ hook->req->export_bulk(hook->req, n, rpe, last, feed, count);
}
else
bug("Export request must always provide an export method");
|| !trie_match_net(dst->flowspec_trie, net))
{
RT_UNLOCK(dst_pub);
- rpe_mark_seen_all(req->hook, first, NULL);
+ rpe_mark_seen_all(req->hook, first, NULL, NULL);
return;
}
struct rt_pending_export *rpe;
struct {
const rte **feed;
- uint *start;
+ struct rt_feed_block_aux {
+ struct rt_pending_export *first, *last;
+ uint start;
+ } *aux;
};
};
} rt_feed_block;
if (!b->cnt)
{
b->feed = tmp_alloc(sizeof(rte *) * MAX(MAX_FEED_BLOCK, cnt));
- b->start = tmp_alloc(sizeof(uint) * ((cnt >= MAX_FEED_BLOCK) ? 2 : (MAX_FEED_BLOCK + 2 - cnt)));
+
+ uint aux_block_size = (cnt >= MAX_FEED_BLOCK) ? 2 : (MAX_FEED_BLOCK + 2 - cnt);
+ b->aux = tmp_alloc(sizeof(struct rt_feed_block_aux) * aux_block_size);
}
rte_feed_obtain(n, &b->feed[b->cnt], cnt);
- b->start[b->pos++] = b->cnt;
+
+ b->aux[b->pos++] = (struct rt_feed_block_aux) {
+ .start = b->cnt,
+ .first = n->first,
+ .last = n->last,
+ };
+
b->cnt += cnt;
}
else if (b->pos == MAX_FEED_BLOCK)
}
}
- rpe_mark_seen_all(&c->h, n->first, NULL);
return 1;
}
if (c->h.req->export_bulk)
{
- b->start[b->pos] = b->cnt;
+ b->aux[b->pos].start = b->cnt;
for (uint p = 0; p < b->pos; p++)
{
- const rte **feed = &b->feed[b->start[p]];
- c->h.req->export_bulk(c->h.req, feed[0]->net, NULL, feed, b->start[p+1] - b->start[p]);
+ struct rt_feed_block_aux *aux = &b->aux[p];
+ const rte **feed = &b->feed[aux->start];
+
+ c->h.req->export_bulk(c->h.req, feed[0]->net, aux->first, aux->last, feed, (aux+1)->start - aux->start);
}
}
else
* Import table
*/
-void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe UNUSED, const rte **feed, uint count)
+void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net,
+ struct rt_pending_export *first, struct rt_pending_export *last,
+ const rte **feed, uint count)
{
struct channel *c = SKIP_BACK(struct channel, reload_req, req);
/* And reload the route */
rte_update(c, net, &new, new.src);
}
+
+ rpe_mark_seen_all(req->hook, first, last, NULL);
}
RT_LOCKED((rtable *) hc->update.data, tab)
if (ev_active(&hc->update) || !trie_match_net(hc->trie, net))
{
- rpe_mark_seen_all(req->hook, first, NULL);
+ rpe_mark_seen_all(req->hook, first, NULL, NULL);
interested = 0;
}
* and for RA_ANY, both are set to accomodate for feeding all routes but receiving single changes
*/
void (*export_one)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
- void (*export_bulk)(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, const rte **feed, uint count);
+ void (*export_bulk)(struct rt_export_request *req, const net_addr *net,
+ struct rt_pending_export *rpe, struct rt_pending_export *last,
+ const rte **feed, uint count);
void (*dump_req)(struct rt_export_request *req);
void (*log_state_change)(struct rt_export_request *req, u8);
/* Mark the pending export processed */
void rpe_mark_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
-#define rpe_mark_seen_all(hook, first, src) \
- RPE_WALK((first), _rpe, (src)) rpe_mark_seen((hook), _rpe)
+#define rpe_mark_seen_all(hook, first, last, src) do { \
+ RPE_WALK((first), _rpe, (src)) { \
+ rpe_mark_seen((hook), _rpe); \
+ if (_rpe == last) break; \
+ }} while (0)
/* Get pending export seen status */
int rpe_get_seen(struct rt_export_hook *hook, struct rt_pending_export *rpe);
int channel_preimport(struct rt_import_request *req, rte *new, rte *old);
-void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, const rte **feed, uint count);
+void channel_reload_export_bulk(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
void rt_notify_optimal(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
void rt_notify_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe);
-void rt_feed_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, const rte **feed, uint count);
-void rt_notify_accepted(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, const rte **feed, uint count);
-void rt_notify_merged(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *rpe, const rte **feed, uint count);
+void rt_feed_any(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
+void rt_notify_accepted(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
+void rt_notify_merged(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
if (hook->h.req->export_bulk)
{
const rte *feed = &es.rte;
- hook->h.req->export_bulk(hook->h.req, n->net, &rpe, &feed, 1);
+ hook->h.req->export_bulk(hook->h.req, n->net, &rpe, &rpe, &feed, 1);
}
else if (hook->h.req->export_one)
hook->h.req->export_one(hook->h.req, n->net, &rpe);
}
void
-bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *rpe UNUSED, const rte **feed, uint count)
+bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n,
+ struct rt_pending_export *first, struct rt_pending_export *last,
+ const rte **feed, uint count)
{
struct bgp_channel *c = SKIP_BACK(struct bgp_channel, stale_feed, req);
struct rt_import_hook *irh = c->c.in_req.hook;
/* Restore the memory state */
lp_restore(tmp_linpool, &tmpp);
}
+
+ rpe_mark_seen_all(req->hook, first, last, NULL);
}
int bgp_rte_better(const rte *, const rte *);
int bgp_rte_mergable(const rte *pri, const rte *sec);
int bgp_rte_recalculate(struct rtable_private *table, net *net, rte *new, rte *old, rte *old_best);
-void bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *rpe UNUSED, const rte **feed, uint count);
+void bgp_rte_modify_stale(struct rt_export_request *req, const net_addr *n, struct rt_pending_export *first, struct rt_pending_export *last, const rte **feed, uint count);
u32 bgp_rte_igp_metric(const rte *);
void bgp_rt_notify(struct proto *P, struct channel *C, const net_addr *n, rte *new, const rte *old);
int bgp_preexport(struct channel *, struct rte *);