netindex_hash *netindex; /* Table for net <-> id conversion */
void (*stopped)(struct rt_exporter *); /* Callback when exporter can stop */
void (*cleanup_done)(struct rt_exporter *, u64 end); /* Callback when cleanup has been done */
- struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, u32, bool (*)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *, const struct rt_export_item *first);
+ struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, u32, struct bmap *, bool (*)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *, const struct rt_export_item *first);
void (*feed_cleanup)(struct rt_exporter *, struct rt_export_feeder *);
};
void rt_feeder_unsubscribe(struct rt_export_feeder *);
void rt_export_refeed_feeder(struct rt_export_feeder *, struct rt_feeding_request *);
-struct rt_export_feed *rt_export_next_feed(struct rt_export_feeder *);
+struct rt_export_feed *rt_export_next_feed(struct rt_export_feeder *, struct bmap *seen);
#define RT_FEED_WALK(_feeder, _f) \
- for (const struct rt_export_feed *_f; _f = rt_export_next_feed(_feeder); ) \
+ for (const struct rt_export_feed *_f; _f = rt_export_next_feed(_feeder, NULL); ) \
static inline bool rt_export_feed_active(struct rt_export_feeder *f)
{ return !!atomic_load_explicit(&f->exporter, memory_order_acquire); }
rtex_trace(r, D_ROUTES, "Export drained");
return NULL;
}
- else if (feed = rt_export_next_feed(&r->feeder))
+ else if (feed = rt_export_next_feed(&r->feeder, &r->seq_map))
{
/* Feeding more */
rtex_trace(r, D_ROUTES, "Feeding %N", feed->ni->addr);
if (r->feeder.domain.rtable)
{
LOCK_DOMAIN(rtable, r->feeder.domain);
- feed = e->feed_net(e, NULL, ni->index, NULL, NULL, update);
+ feed = e->feed_net(e, NULL, ni->index, &r->seq_map, NULL, NULL, update);
UNLOCK_DOMAIN(rtable, r->feeder.domain);
}
else
{
RCU_ANCHOR(u);
- feed = e->feed_net(e, u, ni->index, NULL, NULL, update);
+ feed = e->feed_net(e, u, ni->index, &r->seq_map, NULL, NULL, update);
}
ASSERT_DIE(feed && (feed != &rt_feed_index_out_of_range));
}
static struct rt_export_feed *
-rt_export_get_next_feed(struct rt_export_feeder *f, struct rcu_unwinder *u)
+rt_export_get_next_feed(struct rt_export_feeder *f, struct rcu_unwinder *u, struct bmap *seen)
{
for (uint retry = 0; retry < (u ? 1024 : ~0U); retry++)
{
return NULL;
}
- struct rt_export_feed *feed = e->feed_net(e, u, f->feed_index,
+ struct rt_export_feed *feed = e->feed_net(e, u, f->feed_index, seen,
rt_net_is_feeding_feeder, f, NULL);
if (feed == &rt_feed_index_out_of_range)
{
}
struct rt_export_feed *
-rt_export_next_feed(struct rt_export_feeder *f)
+rt_export_next_feed(struct rt_export_feeder *f, struct bmap *seen)
{
ASSERT_DIE(f);
if (f->domain.rtable)
{
LOCK_DOMAIN(rtable, f->domain);
- feed = rt_export_get_next_feed(f, NULL);
+ feed = rt_export_get_next_feed(f, NULL, seen);
UNLOCK_DOMAIN(rtable, f->domain);
}
else
{
RCU_ANCHOR(u);
- feed = rt_export_get_next_feed(f, u);
+ feed = rt_export_get_next_feed(f, u, seen);
}
if (feed)
f->feeding = f->feed_pending;
f->feed_pending = NULL;
- return rt_export_next_feed(f);
+ return rt_export_next_feed(f, seen);
}
static void
}
static struct rt_export_feed *
-rt_net_feed_index(struct rtable_reading *tr, net *n, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_pending_export *first)
+rt_net_feed_index(struct rtable_reading *tr, net *n, struct bmap *seen, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_pending_export *first)
{
/* Get the feed itself. It may change under our hands tho. */
struct rt_pending_export *first_in_net, *last_in_net;
uint ocnt = 0;
for (const struct rt_pending_export *rpe = first; rpe;
rpe = atomic_load_explicit(&rpe->next, memory_order_acquire))
- {
- ecnt++;
- if (rpe->it.old)
- ocnt++;
- }
+ if (!seen || !bmap_test(seen, rpe->it.seq))
+ {
+ ecnt++;
+ if (rpe->it.old)
+ ocnt++;
+ }
if (ecnt) {
const net_addr *a = (first->it.new ?: first->it.old)->net;
rpe = atomic_load_explicit(&rpe->next, memory_order_acquire))
if (e >= ecnt)
RT_READ_RETRY(tr);
- else
+ else if (!seen || !bmap_test(seen, rpe->it.seq))
{
feed->exports[e++] = rpe->it.seq;
}
static struct rt_export_feed *
-rt_net_feed_internal(struct rtable_reading *tr, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_pending_export *first)
+rt_net_feed_internal(struct rtable_reading *tr, u32 index, struct bmap *seen, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_pending_export *first)
{
net *n = rt_net_feed_get_net(tr, index);
if (!n)
return &rt_feed_index_out_of_range;
- return rt_net_feed_index(tr, n, prefilter, f, first);
+ return rt_net_feed_index(tr, n, seen, prefilter, f, first);
}
struct rt_export_feed *
{
RT_READ(t, tr);
const struct netindex *ni = net_find_index(tr->t->netindex, a);
- return ni ? rt_net_feed_internal(tr, ni->index, NULL, NULL, first) : NULL;
+ return ni ? rt_net_feed_internal(tr, ni->index, NULL, NULL, NULL, first) : NULL;
}
static struct rt_export_feed *
-rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
+rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, struct bmap *seen, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
{
RT_READ_ANCHORED(SKIP_BACK(rtable, export_all, e), tr, u);
- return rt_net_feed_internal(tr, index, prefilter, f, SKIP_BACK(const struct rt_pending_export, it, _first));
+ return rt_net_feed_internal(tr, index, seen, prefilter, f, SKIP_BACK(const struct rt_pending_export, it, _first));
}
rte
}
static struct rt_export_feed *
-rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
+rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, struct bmap *seen, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
{
SKIP_BACK_DECLARE(rtable, t, export_best, e);
SKIP_BACK_DECLARE(const struct rt_pending_export, first, it, _first);
uint ecnt = 0, ocnt = 0;
for (const struct rt_pending_export *rpe = first; rpe;
rpe = atomic_load_explicit(&rpe->next, memory_order_acquire))
- {
- ecnt++;
- if (rpe->it.old && (!best || (rpe->it.old != &best->rte)))
- ocnt++;
- }
+ if (!seen || !bmap_test(seen, rpe->it.seq))
+ {
+ ecnt++;
+ if (rpe->it.old && (!best || (rpe->it.old != &best->rte)))
+ ocnt++;
+ }
if (ecnt) {
const net_addr *a = (first->it.new ?: first->it.old)->net;
rpe = atomic_load_explicit(&rpe->next, memory_order_acquire))
if (e >= ecnt)
RT_READ_RETRY(tr);
- else
+ else if (!seen || !bmap_test(seen, rpe->it.seq))
{
feed->exports[e++] = rpe->it.seq;
if (rpe->it.old && (!best || (rpe->it.old != &best->rte)))