struct rt_export_request *req;
};
+struct rt_feed_retry {
+ struct rcu_unwinder *u;
+ void *feed_block;
+ u32 feed_size;
+ u32 feed_request;
+};
+
struct rt_exporter {
struct lfjour journal; /* Journal for update keeping */
TLIST_LIST(rt_export_feeder) feeders; /* List of active feeder structures */
netindex_hash *netindex; /* Table for net <-> id conversion */
void (*stopped)(struct rt_exporter *); /* Callback when exporter can stop */
void (*cleanup_done)(struct rt_exporter *, u64 end); /* Callback when cleanup has been done */
- struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rcu_unwinder *, u32, bool (*)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *, const struct rt_export_item *first);
+ struct rt_export_feed *(*feed_net)(struct rt_exporter *, struct rt_feed_retry *, u32, bool (*)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *, const struct rt_export_item *first);
void (*feed_cleanup)(struct rt_exporter *, struct rt_export_feeder *);
};
/* Exporter API */
void rt_exporter_init(struct rt_exporter *, struct settle_config *);
struct rt_export_item *rt_exporter_push(struct rt_exporter *, const struct rt_export_item *);
-struct rt_export_feed *rt_alloc_feed(uint routes, uint exports);
+struct rt_export_feed *rt_alloc_feed(struct rt_feed_retry *ur, uint routes, uint exports);
void rt_exporter_shutdown(struct rt_exporter *, void (*stopped)(struct rt_exporter *));
/* Standalone feeds */
rt_unlock_table(_tp); \
}} while (0) \
+#define RT_EXPORT_RETRY_ANCHOR(ur, u) \
+ struct rt_feed_retry ur = { \
+ .feed_block = tmp_alloc(512), \
+ .feed_size = 512, \
+ }; \
+ RCU_ANCHOR(u); \
+ ur.u = u; \
+ if (ur.feed_request > ur.feed_size) \
+ { \
+ rcu_read_unlock(); \
+ ur.feed_size = ur.feed_request; \
+ /* allocate a little bit more just for good measure */ \
+ ur.feed_block = tmp_alloc((ur.feed_request * 3) / 2); \
+ rcu_read_lock(); \
+ } \
+
static inline int rt_prefilter_net(const struct rt_prefilter *p, const net_addr *n)
{
switch (p->mode)
}
else
{
- RCU_ANCHOR(u);
- feed = e->feed_net(e, u, ni->index, NULL, NULL, update);
+ RT_EXPORT_RETRY_ANCHOR(ur, u);
+ feed = e->feed_net(e, &ur, ni->index, NULL, NULL, update);
}
ASSERT_DIE(feed && (feed != &rt_feed_index_out_of_range));
}
struct rt_export_feed *
-rt_alloc_feed(uint routes, uint exports)
+rt_alloc_feed(struct rt_feed_retry *ur, uint routes, uint exports)
{
struct rt_export_feed *feed;
uint size = sizeof *feed
+ routes * sizeof *feed->block + _Alignof(typeof(*feed->block))
+ exports * sizeof *feed->exports + _Alignof(typeof(*feed->exports));
- feed = tmp_alloc(size);
+ if (ur)
+ {
+ if (size > ur->feed_size)
+ {
+ ur->feed_request = size;
+ RCU_RETRY(ur->u);
+ }
+
+ feed = ur->feed_block;
+ }
+ else
+ feed = tmp_alloc(size);
feed->count_routes = routes;
feed->count_exports = exports;
}
static struct rt_export_feed *
-rt_export_get_next_feed(struct rt_export_feeder *f, struct rcu_unwinder *u)
+rt_export_get_next_feed(struct rt_export_feeder *f, struct rt_feed_retry *ur)
{
- for (uint retry = 0; retry < (u ? 1024 : ~0U); retry++)
+ for (uint retry = 0; retry < (ur ? 1024 : ~0U); retry++)
{
- ASSERT_DIE(u || DOMAIN_IS_LOCKED(rtable, f->domain));
+ ASSERT_DIE(ur->u || DOMAIN_IS_LOCKED(rtable, f->domain));
struct rt_exporter *e = atomic_load_explicit(&f->exporter, memory_order_acquire);
if (!e)
return NULL;
}
- struct rt_export_feed *feed = e->feed_net(e, u, f->feed_index,
+ struct rt_export_feed *feed = e->feed_net(e, ur, f->feed_index,
rt_net_is_feeding_feeder, f, NULL);
if (feed == &rt_feed_index_out_of_range)
{
return feed;
}
- RCU_RETRY_FAST(u);
+ RCU_RETRY_FAST(ur->u);
}
struct rt_export_feed *
}
else
{
- RCU_ANCHOR(u);
- feed = rt_export_get_next_feed(f, u);
+ RT_EXPORT_RETRY_ANCHOR(ur, u);
+
+ feed = rt_export_get_next_feed(f, &ur);
}
if (feed)
struct rtable_reading {
rtable *t;
struct rcu_unwinder *u;
+ struct rt_export_feeder f;
};
#define RT_READ_ANCHORED(_o, _i, _u) \
if (!ecnt && prefilter && !prefilter(f, NET_READ_BEST_ROUTE(tr, n)->rte.net))
return NULL;
- feed = rt_alloc_feed(rcnt+ocnt, ecnt);
+ feed = rt_alloc_feed(NULL, rcnt+ocnt, ecnt);
if (rcnt)
+ {
+ rcu_read_unlock();
+ defer_expect(rcnt * sizeof (rte));
+ rcu_read_lock();
+
rte_feed_obtain_copy(tr, n, feed->block, rcnt);
+ }
if (ecnt)
{
}
static struct rt_export_feed *
-rt_feed_net_all(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
+rt_feed_net_all(struct rt_exporter *e, struct rt_feed_retry *ur, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
{
- RT_READ_ANCHORED(SKIP_BACK(rtable, export_all, e), tr, u);
+ RT_READ_ANCHORED(SKIP_BACK(rtable, export_all, e), tr, ur->u);
return rt_net_feed_internal(tr, index, prefilter, f, SKIP_BACK(const struct rt_pending_export, it, _first));
}
}
static struct rt_export_feed *
-rt_feed_net_best(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
+rt_feed_net_best(struct rt_exporter *e, struct rt_feed_retry *ur, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, const struct rt_export_item *_first)
{
SKIP_BACK_DECLARE(rtable, t, export_best, e);
SKIP_BACK_DECLARE(const struct rt_pending_export, first, it, _first);
- RT_READ_ANCHORED(t, tr, u);
+ RT_READ_ANCHORED(t, tr, ur->u);
net *n = rt_net_feed_get_net(tr, index);
if (!n)
if (!ecnt && (!best || prefilter && !prefilter(f, best->rte.net)))
return NULL;
- struct rt_export_feed *feed = rt_alloc_feed(!!best, ecnt);
+ struct rt_export_feed *feed = rt_alloc_feed(ur, !!best, ecnt);
if (best)
{
feed->block[0] = best->rte;
{}
static struct rt_export_feed *
-bgp_out_feed_net(struct rt_exporter *e, struct rcu_unwinder *u, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, UNUSED const struct rt_export_item *_first)
+bgp_out_feed_net(struct rt_exporter *e, struct rt_feed_retry *ur, u32 index, bool (*prefilter)(struct rt_export_feeder *, const net_addr *), struct rt_export_feeder *f, UNUSED const struct rt_export_item *_first)
{
- ASSERT_DIE(u == NULL);
+ ASSERT_DIE(ur->u == NULL);
SKIP_BACK_DECLARE(struct bgp_ptx_private, c, exporter, e);
ASSERT_DIE(DOMAIN_IS_LOCKED(rtable, c->lock));
if (count)
{
- feed = rt_alloc_feed(count, 0);
+ feed = rt_alloc_feed(NULL, count, 0);
feed->ni = ni;
uint pos = 0;
/* We can't lock and we actually shouldn't alloc either when rcu is active
* but that's a quest for another day. */
atomic_fetch_add_explicit(&cold_memory_failed_to_use, 1, memory_order_relaxed);
+ bug("bug");
}
else
{