#define TLIST_WANT_ADD_TAIL
/* Feeding itself */
- union {
- u64 feed_index; /* Index of the feed in progress */
- struct rt_feeding_index *feed_index_ptr; /* Use this when u64 is not enough */
- };
+ u32 feed_index; /* Index of the feed in progress */
struct rt_feeding_request {
struct rt_feeding_request *next; /* Next in request chain */
void (*done)(struct rt_feeding_request *);/* Called when this refeed finishes */
TLIST_LIST(rt_export_feeder) feeders; /* List of active feeder structures */
_Bool _Atomic feeders_lock; /* Spinlock for the above list */
u8 trace_routes; /* Debugging flags (D_*) */
+ u8 net_type; /* Which net this exporter provides */
+ u32 _Atomic max_feed_index; /* Stop feeding at this index */
const char *name; /* Name for logging */
+ netindex_hash *netindex; /* Table for net <-> id conversion */
void (*stopped)(struct rt_exporter *); /* Callback when exporter can stop */
void (*cleanup_done)(struct rt_exporter *, u64 end); /* Callback when cleanup has been done */
- struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, const net_addr *, const struct rt_export_item *first);
- const net_addr *(*feed_next)(struct rt_exporter *, struct rcu_unwinder *, struct rt_export_feeder *);
+ struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, const struct netindex *, const struct rt_export_item *first);
void (*feed_cleanup)(struct rt_exporter *, struct rt_export_feeder *);
};
/* Is this update allowed by prefilter? */
const net_addr *n = (update->new ?: update->old)->net;
+ struct netindex *ni = NET_TO_INDEX(n);
+
if (!rt_prefilter_net(&r->feeder.prefilter, n))
{
rtex_trace(r, D_ROUTES, "Not exporting %N due to prefilter", n);
/* But this net shall get a feed first! */
rtex_trace(r, D_ROUTES, "Expediting %N feed due to pending update %lu", n, update->seq);
RCU_ANCHOR(u);
- feed = e->feed_net(e, u, n, update);
+ feed = e->feed_net(e, u, ni, update);
- bmap_set(&r->feed_map, NET_TO_INDEX(n)->index);
+ bmap_set(&r->feed_map, ni->index);
ASSERT_DIE(feed);
EXPORT_FOUND(RT_EXPORT_FEED);
return NULL;
}
- const net_addr *a = e->feed_next(e, u, f);
- if (!a)
+ struct netindex *ni = NULL;
+ u32 mfi = atomic_load_explicit(&e->max_feed_index, memory_order_acquire);
+ for (; !ni && f->feed_index < mfi; f->feed_index++)
+ ni = net_resolve_index(e->netindex, e->net_type, f->feed_index);
+
+ if (!ni)
+ {
+ f->feed_index = ~0;
break;
+ }
- if (!rt_prefilter_net(&f->prefilter, a))
+ if (!rt_prefilter_net(&f->prefilter, ni->addr))
{
- rtex_trace(f, D_ROUTES, "Not feeding %N due to prefilter", a);
+ rtex_trace(f, D_ROUTES, "Not feeding %N due to prefilter", ni->addr);
continue;
}
- if (f->feeding && !rt_net_is_feeding_feeder(f, a))
+ if (f->feeding && !rt_net_is_feeding_feeder(f, ni->addr))
{
- rtex_trace(f, D_ROUTES, "Not feeding %N, not requested", a);
+ rtex_trace(f, D_ROUTES, "Not feeding %N, not requested", ni->addr);
continue;
}
- struct rt_export_feed *feed = e->feed_net(e, u, a, NULL);
+ struct rt_export_feed *feed = e->feed_net(e, u, ni, NULL);
if (feed)
{
- rtex_trace(f, D_ROUTES, "Feeding %u routes for %N", feed->count_routes, a);
+ rtex_trace(f, D_ROUTES, "Feeding %u routes for %N", feed->count_routes, ni->addr);
return feed;
}
}
e->journal.cleanup_done = rt_exporter_cleanup_done;
lfjour_init(&e->journal, scf);
ASSERT_DIE(e->feed_net);
- ASSERT_DIE(e->feed_next);
+ ASSERT_DIE(e->netindex);
}
struct rt_export_item *
}
static void
-rte_announce(struct rtable_private *tab, const struct netindex *i, net *net, const rte *new, const rte *old,
+rte_announce(struct rtable_private *tab, const struct netindex *i UNUSED, net *net, const rte *new, const rte *old,
const rte *new_best, const rte *old_best)
{
/* Update network count */
ASSERT_DIE(atomic_compare_exchange_strong_explicit(
&tab->routes_block_size, &bs, nbs,
memory_order_acq_rel, memory_order_relaxed));
+ ASSERT_DIE(atomic_compare_exchange_strong_explicit(
+ &tab->export_all.max_feed_index, &bs, nbs,
+ memory_order_acq_rel, memory_order_relaxed));
+ ASSERT_DIE(atomic_compare_exchange_strong_explicit(
+ &tab->export_best.max_feed_index, &bs, nbs,
+ memory_order_acq_rel, memory_order_relaxed));
synchronize_rcu();
mb_free(routes);
* Feeding
*/
-static const net_addr *
-rt_feed_next(struct rtable_reading *tr, struct rt_export_feeder *f)
-{
- u32 rbs = atomic_load_explicit(&tr->t->routes_block_size, memory_order_acquire);
- for (; f->feed_index < rbs; f->feed_index++)
- {
- struct netindex *ni = net_resolve_index(tr->t->netindex, tr->t->addr_type, f->feed_index);
- if (ni)
- {
- f->feed_index++;
- return ni->addr;
- }
- }
-
- f->feed_index = ~0ULL;
- return NULL;
-}
-
-static const net_addr *
-rt_feed_next_best(struct rt_exporter *e, struct rcu_unwinder *u, struct rt_export_feeder *f)
-{
- RT_READ_ANCHORED(SKIP_BACK(rtable, priv.export_best, e), tr, u);
- return rt_feed_next(tr, f);
-}
-
-static const net_addr *
-rt_feed_next_all(struct rt_exporter *e, struct rcu_unwinder *u, struct rt_export_feeder *f)
-{
- RT_READ_ANCHORED(SKIP_BACK(rtable, priv.export_all, e), tr, u);
- return rt_feed_next(tr, f);
-}
-
static struct rt_export_feed *
rt_alloc_feed(uint routes, uint exports)
{
}
static struct rt_export_feed *
-rt_net_feed_internal(struct rtable_reading *tr, const net_addr *a, const struct rt_pending_export *first)
+rt_net_feed_internal(struct rtable_reading *tr, const struct netindex *ni, const struct rt_pending_export *first)
{
- const struct netindex *i = net_find_index(tr->t->netindex, a);
- net *n = rt_net_feed_get_net(tr, i->index);
+ net *n = rt_net_feed_get_net(tr, ni->index);
if (!n)
return NULL;
rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first)
{
RT_READ(t, tr);
- return rt_net_feed_internal(tr, a, first);
+ const struct netindex *ni = net_find_index(tr->t->netindex, a);
+ return ni ? rt_net_feed_internal(tr, ni, first) : NULL;
}
static struct rt_export_feed *
-rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, const net_addr *a, const struct rt_export_item *_first)
+rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, const struct netindex *ni, const struct rt_export_item *_first)
{
RT_READ_ANCHORED(SKIP_BACK(rtable, export_all, e), tr, u);
- return rt_net_feed_internal(tr, a, SKIP_BACK(const struct rt_pending_export, it, _first));
+ return rt_net_feed_internal(tr, ni, SKIP_BACK(const struct rt_pending_export, it, _first));
}
rte
}
static struct rt_export_feed *
-rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, const net_addr *a, const struct rt_export_item *_first)
+rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, const struct netindex *ni, const struct rt_export_item *_first)
{
SKIP_BACK_DECLARE(rtable, t, export_best, e);
SKIP_BACK_DECLARE(const struct rt_pending_export, first, it, _first);
- struct netindex *ni = NET_TO_INDEX(a);
-
RT_READ_ANCHORED(t, tr, u);
net *n = rt_net_feed_get_net(tr, ni->index);
.item_done = rt_cleanup_export_best,
},
.name = mb_sprintf(p, "%s.export-best", t->name),
+ .net_type = t->addr_type,
+ .max_feed_index = RT_INITIAL_ROUTES_BLOCK_SIZE,
+ .netindex = t->netindex,
.trace_routes = t->debug,
.cleanup_done = rt_cleanup_done_best,
.feed_net = rt_feed_net_best,
- .feed_next = rt_feed_next_best,
};
rt_exporter_init(&t->export_best, &cf->export_settle);
.item_done = rt_cleanup_export_all,
},
.name = mb_sprintf(p, "%s.export-all", t->name),
+ .net_type = t->addr_type,
+ .max_feed_index = RT_INITIAL_ROUTES_BLOCK_SIZE,
+ .netindex = t->netindex,
.trace_routes = t->debug,
.cleanup_done = rt_cleanup_done_all,
.feed_net = rt_feed_net_all,
- .feed_next = rt_feed_next_all,
};
rt_exporter_init(&t->export_all, &cf->export_settle);