s *_ptr = ((s *)((char *)_orig - OFFSETOF(s, i))); \
SAME_TYPE(&_ptr->i, _orig); \
_ptr; })
+#define SKIP_BACK_DECLARE(s, n, i, p) s *n = SKIP_BACK(s, i, p)
#define BIRD_ALIGN(s, a) (((s)+a-1)&~(a-1))
#define BIRD_SET_ALIGNED_POINTER(ptr, val) do { \
size_t _alignment = _Alignof(typeof(*ptr)); \
void lfuc_unlock_deferred(struct deferred_call *dc)
{
- struct lfuc_unlock_queue_item *luqi = SKIP_BACK(struct lfuc_unlock_queue_item, dc, dc);
+ SKIP_BACK_DECLARE(struct lfuc_unlock_queue_item, luqi, dc, dc);
lfuc_unlock_immediately(luqi->c, luqi->el, luqi->ev);
}
void *
mb_realloc(void *m, unsigned size)
{
- struct mblock *b = SKIP_BACK(struct mblock, data, m);
+ SKIP_BACK_DECLARE(struct mblock, b, data, m);
struct pool *p = resource_parent(&b->r);
ASSERT_DIE(DG_IS_LOCKED(p->domain));
if (!m)
return;
- struct mblock *b = SKIP_BACK(struct mblock, data, m);
+ SKIP_BACK_DECLARE(struct mblock, b, data, m);
rfree(&b->r);
}
void
sl_free(void *oo)
{
- struct sl_obj *o = SKIP_BACK(struct sl_obj, data, oo);
+ SKIP_BACK_DECLARE(struct sl_obj, o, data, oo);
rem_node(&o->n);
xfree(o);
static inline void
ifa_send_notify(struct iface_subscription *s, unsigned c, struct ifa *a)
{
- struct proto *p = SKIP_BACK(struct proto, iface_sub, s);
+ SKIP_BACK_DECLARE(struct proto, p, iface_sub, s);
if (s->ifa_notify &&
(p->proto_state != PS_DOWN) &&
static inline void
if_send_notify(struct iface_subscription *s, unsigned c, struct iface *i)
{
- struct proto *p = SKIP_BACK(struct proto, iface_sub, s);
+ SKIP_BACK_DECLARE(struct proto, p, iface_sub, s);
if (s->if_notify &&
(p->proto_state != PS_DOWN) &&
{
IFACE_LOCK;
- struct proto *p = SKIP_BACK(struct proto, iface_sub, s);
+ SKIP_BACK_DECLARE(struct proto, p, iface_sub, s);
WALK_TLIST_DELSAFE(proto_neigh, n, &p->neighbors)
neigh_unlink(n);
olock_free(resource *r)
{
/* Called externally from rfree() */
- struct object_lock *l = SKIP_BACK(struct object_lock, r, r);
+ SKIP_BACK_DECLARE(struct object_lock, l, r, r);
node *n;
OBJ_LOCK;
n = HEAD(l->waiters);
if (NODE_VALID(n))
{
- struct object_lock *q = SKIP_BACK(struct object_lock, n, n);
+ SKIP_BACK_DECLARE(struct object_lock, q, n, n);
/* Remove this candidate from waiters list */
rem_node(n);
const struct eattr *heea = ea_find_by_class(src, &ea_gen_hostentry);
if (heea) {
/* The same hostentry, but different dependent table */
- struct hostentry_adata *head = SKIP_BACK(struct hostentry_adata, ad, heea->u.ad);
+ SKIP_BACK_DECLARE(struct hostentry_adata, head, ad, heea->u.ad);
struct hostentry *he = head->he;
ea_set_hostentry(&e.attrs, m->channel->table, he->owner, he->addr, he->link,
HOSTENTRY_LABEL_COUNT(head), head->labels);
int
mpls_handle_rte(struct channel *c, const net_addr *n, rte *r, struct mpls_fec **fecp)
{
- struct mpls_channel *mc = SKIP_BACK(struct mpls_channel, c, c->proto->mpls_channel);
+ SKIP_BACK_DECLARE(struct mpls_channel, mc, c, c->proto->mpls_channel);
struct mpls_fec_map *m = mc->mpls_map;
struct mpls_fec *fec = *fecp = NULL;
void
channel_import_log_state_change(struct rt_import_request *req, u8 state)
{
- struct channel *c = SKIP_BACK(struct channel, in_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, in_req, req);
CD(c, "Channel import state changed to %s", rt_import_state_name(state));
}
void
channel_export_log_state_change(struct rt_export_request *req, u8 state)
{
- struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, out_req, req);
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
switch (state)
void
channel_refeed_log_state_change(struct rt_export_request *req, u8 state)
{
- struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, refeed_req, req);
CD(c, "Channel export state changed to %s", rt_export_state_name(state));
switch (state)
static void
channel_dump_import_req(struct rt_import_request *req)
{
- struct channel *c = SKIP_BACK(struct channel, in_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, in_req, req);
debug(" Channel %s.%s import request %p\n", c->proto->name, c->name, req);
}
static void
channel_dump_export_req(struct rt_export_request *req)
{
- struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, out_req, req);
debug(" Channel %s.%s export request %p\n", c->proto->name, c->name, req);
}
static void
channel_dump_refeed_req(struct rt_export_request *req)
{
- struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, refeed_req, req);
debug(" Channel %s.%s refeed request %p\n", c->proto->name, c->name, req);
}
static void
channel_roa_in_changed(struct settle *se)
{
- struct roa_subscription *s = SKIP_BACK(struct roa_subscription, settle, se);
+ SKIP_BACK_DECLARE(struct roa_subscription, s, settle, se);
struct channel *c = s->c;
CD(c, "Reload triggered by RPKI change");
static void
channel_roa_out_changed(struct settle *se)
{
- struct roa_subscription *s = SKIP_BACK(struct roa_subscription, settle, se);
+ SKIP_BACK_DECLARE(struct roa_subscription, s, settle, se);
struct channel *c = s->c;
CD(c, "Feeding triggered by RPKI change");
static void
channel_export_one_roa(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
- struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
+ SKIP_BACK_DECLARE(struct roa_subscription, s, req, req);
switch (net->type)
{
static void
channel_dump_roa_req(struct rt_export_request *req)
{
- struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
+ SKIP_BACK_DECLARE(struct roa_subscription, s, req, req);
struct channel *c = s->c;
debug(" Channel %s.%s ROA %s change notifier request %p\n",
static void
channel_roa_unsubscribed(struct rt_export_request *req)
{
- struct roa_subscription *s = SKIP_BACK(struct roa_subscription, req, req);
+ SKIP_BACK_DECLARE(struct roa_subscription, s, req, req);
struct channel *c = s->c;
rem_node(&s->roa_node);
void
channel_import_stopped(struct rt_import_request *req)
{
- struct channel *c = SKIP_BACK(struct channel, in_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, in_req, req);
mb_free(c->in_req.name);
c->in_req.name = NULL;
static void
channel_export_stopped(struct rt_export_request *req)
{
- struct channel *c = SKIP_BACK(struct channel, out_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, out_req, req);
/* The hook has already stopped */
req->hook = NULL;
static void
channel_refeed_stopped(struct rt_export_request *req)
{
- struct channel *c = SKIP_BACK(struct channel, refeed_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, refeed_req, req);
req->hook = NULL;
static void
channel_reload_stopped(struct rt_export_request *req)
{
- struct channel *c = SKIP_BACK(struct channel, reload_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
req->hook = NULL;
static void
channel_reload_log_state_change(struct rt_export_request *req, u8 state)
{
- struct channel *c = SKIP_BACK(struct channel, reload_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
if (state == TES_READY)
{
static void
channel_reload_dump_req(struct rt_export_request *req)
{
- struct channel *c = SKIP_BACK(struct channel, reload_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
debug(" Channel %s.%s import reload request %p\n", c->proto->name, c->name, req);
}
static void
channel_reload_out_done(struct channel_feeding_request *cfr)
{
- struct channel_cmd_reload_feeding_request *ccrfr = SKIP_BACK(struct channel_cmd_reload_feeding_request, cfr, cfr);
+ SKIP_BACK_DECLARE(struct channel_cmd_reload_feeding_request, ccrfr, cfr, cfr);
if (atomic_fetch_sub_explicit(&ccrfr->prr->counter, 1, memory_order_acq_rel) == 1)
ev_send_loop(&main_birdloop, &ccrfr->prr->ev);
}
static void
channel_reload_in_done(struct channel_import_request *cir)
{
- struct channel_cmd_reload_import_request *ccrir = SKIP_BACK(struct channel_cmd_reload_import_request, cir, cir);
+ SKIP_BACK_DECLARE(struct channel_cmd_reload_import_request, ccrir, cir, cir);
if (atomic_fetch_sub_explicit(&ccrir->prr->counter, 1, memory_order_acq_rel) == 1)
ev_send_loop(&main_birdloop, &ccrir->prr->ev);
}
static void
ea_class_ref_free(resource *r)
{
- struct ea_class_ref *ref = SKIP_BACK(struct ea_class_ref, r, r);
+ SKIP_BACK_DECLARE(struct ea_class_ref, ref, r, r);
if (!--ref->class->uc)
ea_class_free(ref->class);
}
static void
ea_class_ref_dump(resource *r, unsigned indent UNUSED)
{
- struct ea_class_ref *ref = SKIP_BACK(struct ea_class_ref, r, r);
+ SKIP_BACK_DECLARE(struct ea_class_ref, ref, r, r);
debug("name \"%s\", type=%d\n", ref->class->name, ref->class->type);
}
struct rt_pending_export *first UNUSED, struct rt_pending_export *last UNUSED,
const rte **feed, uint count)
{
- struct rt_show_data *d = SKIP_BACK(struct rt_show_data, req, req);
+ SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
return rt_show_net(d, n, feed, count);
}
static void
rt_show_export_stopped_cleanup(struct rt_export_request *req)
{
- struct rt_show_data *d = SKIP_BACK(struct rt_show_data, req, req);
+ SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
/* The hook is now invalid */
req->hook = NULL;
static void
rt_show_export_stopped(struct rt_export_request *req)
{
- struct rt_show_data *d = SKIP_BACK(struct rt_show_data, req, req);
+ SKIP_BACK_DECLARE(struct rt_show_data, d, req, req);
/* The hook is now invalid */
req->hook = NULL;
net_route(struct rtable_reading *tr, const net_addr *n)
{
ASSERT(tr->t->addr_type == n->type);
- net_addr_union *nu = SKIP_BACK(net_addr_union, n, n);
+ SKIP_BACK_DECLARE(net_addr_union, nu, n, n);
const struct f_trie *trie = atomic_load_explicit(&tr->t->trie, memory_order_acquire);
int
net_roa_check(rtable *tp, const net_addr *n, u32 asn)
{
- net_addr_union *nu = SKIP_BACK(net_addr_union, n, n);
+ SKIP_BACK_DECLARE(net_addr_union, nu, n, n);
int anything = 0;
#define TW(ipv) do { \
uint count = 0;
const rte **feed = NULL;
- const struct netindex *i = SKIP_BACK(struct netindex, addr, (net_addr (*)[0]) n);
+ const SKIP_BACK_DECLARE(struct netindex, i, addr, (net_addr (*)[0]) n);
ASSERT_DIE(i->index < atomic_load_explicit(&hook->tab->routes_block_size, memory_order_relaxed));
struct rt_pending_export *last;
if (old_best_valid)
old_best->sender->stats.pref--;
- struct rt_pending_export *rpe = SKIP_BACK(struct rt_pending_export, li, lfjour_push_prepare(&tab->journal));
+ SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, lfjour_push_prepare(&tab->journal));
if (!rpe)
{
static void
rt_cleanup_export(struct lfjour *j, struct lfjour_item *i)
{
- struct rtable_private *tab = SKIP_BACK(struct rtable_private, journal, j);
- struct rt_pending_export *rpe = SKIP_BACK(struct rt_pending_export, li, i);
+ SKIP_BACK_DECLARE(struct rtable_private, tab, journal, j);
+ SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, i);
/* Unlink this export from struct network */
ASSERT_DIE(rpe->new || rpe->old);
static void
rt_cleanup_done(struct lfjour *j, u64 begin_seq, u64 end_seq)
{
- struct rtable_private *tab = SKIP_BACK(struct rtable_private, journal, j);
+ SKIP_BACK_DECLARE(struct rtable_private, tab, journal, j);
ASSERT_DIE(DG_IS_LOCKED(tab->lock.rtable));
if (~end_seq)
int
channel_preimport(struct rt_import_request *req, rte *new, const rte *old)
{
- struct channel *c = SKIP_BACK(struct channel, in_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, in_req, req);
if (new && !old)
if (CHANNEL_LIMIT_PUSH(c, RX))
};
lfjour_register(&tab->journal, &hook->recipient);
- struct rt_pending_export *rpe = SKIP_BACK(struct rt_pending_export, li, atomic_load_explicit(&hook->recipient.last, memory_order_relaxed));
+ SKIP_BACK_DECLARE(struct rt_pending_export, rpe, li, atomic_load_explicit(&hook->recipient.last, memory_order_relaxed));
req_trace(req, D_STATES, "Export initialized, last export %p (%lu)", rpe, rpe ? rpe->seq : 0);
bmap_init(&hook->seq_map, hook->pool, 16);
WALK_TLIST(lfjour_recipient, r, &tab->journal.recipients)
{
- struct rt_export_hook *eh = SKIP_BACK(struct rt_export_hook, recipient, r);
+ SKIP_BACK_DECLARE(struct rt_export_hook, eh, recipient, r);
eh->req->dump_req(eh->req);
debug(" Export hook %p requested by %p:"
" refeed_pending=%u last_state_change=%t export_state=%u\n",
static void
rt_flowspec_export_one(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
- struct rt_flowspec_link *ln = SKIP_BACK(struct rt_flowspec_link, req, req);
+ SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
rtable *dst_pub = ln->dst;
ASSUME(rt_is_flow(dst_pub));
static void
rt_flowspec_dump_req(struct rt_export_request *req)
{
- struct rt_flowspec_link *ln = SKIP_BACK(struct rt_flowspec_link, req, req);
+ SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
debug(" Flowspec link for table %s (%p)\n", ln->dst->name, req);
}
static void
rt_flowspec_log_state_change(struct rt_export_request *req, u8 state)
{
- struct rt_flowspec_link *ln = SKIP_BACK(struct rt_flowspec_link, req, req);
+ SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
rt_trace(ln->dst, D_STATES, "Flowspec link from %s export state changed to %s",
ln->src->name, rt_export_state_name(state));
}
static void
rt_flowspec_link_stopped(struct rt_export_request *req)
{
- struct rt_flowspec_link *ln = SKIP_BACK(struct rt_flowspec_link, req, req);
+ SKIP_BACK_DECLARE(struct rt_flowspec_link, ln, req, req);
rtable *dst = ln->dst;
mb_free(ln);
static void
rt_free(resource *_r)
{
- struct rtable_private *r = SKIP_BACK(struct rtable_private, r, _r);
+ SKIP_BACK_DECLARE(struct rtable_private, r, r, _r);
DBG("Deleting routing table %s\n", r->name);
ASSERT_DIE(r->use_count == 0);
static void
rt_res_dump(resource *_r, unsigned indent)
{
- struct rtable_private *r = SKIP_BACK(struct rtable_private, r, _r);
+ SKIP_BACK_DECLARE(struct rtable_private, r, r, _r);
debug("name \"%s\", addr_type=%s, rt_count=%u, use_count=%d\n",
r->name, net_label[r->addr_type], r->rt_count, r->use_count);
if (!bc->base_table)
return 0;
- struct bgp_proto *p = SKIP_BACK(struct bgp_proto, p, bc->c.proto);
+ SKIP_BACK_DECLARE(struct bgp_proto, p, p, bc->c.proto);
enum flowspec_valid old = rt_get_flowspec_valid(r),
valid = rt_flowspec_check(bc->base_table, tab, r->net, r->attrs, p->is_interior);
&& (c->class == &channel_bgp)
&& (bc->base_table))
{
- struct bgp_proto *p = SKIP_BACK(struct bgp_proto, p, bc->c.proto);
+ SKIP_BACK_DECLARE(struct bgp_proto, p, p, bc->c.proto);
RT_LOCKED(c->in_req.hook->table, tab)
valid = rt_flowspec_check(
bc->base_table, tab,
struct rt_pending_export *first, struct rt_pending_export *last,
const rte **feed, uint count)
{
- struct channel *c = SKIP_BACK(struct channel, reload_req, req);
+ SKIP_BACK_DECLARE(struct channel, c, reload_req, req);
for (uint i=0; i<count; i++)
if (feed[i]->sender == c->in_req.hook)
static void
hc_notify_log_state_change(struct rt_export_request *req, u8 state)
{
- struct hostcache *hc = SKIP_BACK(struct hostcache, req, req);
+ SKIP_BACK_DECLARE(struct hostcache, hc, req, req);
rt_trace(hc->tab, D_STATES, "HCU Export state changed to %s", rt_export_state_name(state));
}
static void
hc_notify_export_one(struct rt_export_request *req, const net_addr *net, struct rt_pending_export *first)
{
- struct hostcache *hc = SKIP_BACK(struct hostcache, req, req);
+ SKIP_BACK_DECLARE(struct hostcache, hc, req, req);
RT_LOCKED(hc->tab, tab)
if (ev_active(tab->hcu_event) || !trie_match_net(hc->trie, net))
node *n;
WALK_LIST(n, hc->hostentries)
{
- struct hostentry *he = SKIP_BACK(struct hostentry, ln, n);
+ SKIP_BACK_DECLARE(struct hostentry, he, ln, n);
ea_free(he->src);
if (!lfuc_finished(&he->uc))
static void
aggregator_rt_notify(struct proto *P, struct channel *src_ch, const net_addr *net, rte *new, const rte *old)
{
- struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
+ SKIP_BACK_DECLARE(struct aggregator_proto, p, p, P);
ASSERT_DIE(src_ch == p->src);
struct aggregator_bucket *new_bucket = NULL, *old_bucket = NULL;
struct aggregator_route *old_route = NULL;
static int
aggregator_preexport(struct channel *C, struct rte *new)
{
- struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, C->proto);
+ SKIP_BACK_DECLARE(struct aggregator_proto, p, p, C->proto);
/* Reject our own routes */
if (new->sender == p->dst->in_req.hook)
return -1;
static void
aggregator_postconfig(struct proto_config *CF)
{
- struct aggregator_config *cf = SKIP_BACK(struct aggregator_config, c, CF);
+ SKIP_BACK_DECLARE(struct aggregator_config, cf, c, CF);
if (!cf->dst->table)
cf_error("Source table not specified");
aggregator_init(struct proto_config *CF)
{
struct proto *P = proto_new(CF);
- struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
- struct aggregator_config *cf = SKIP_BACK(struct aggregator_config, c, CF);
+ SKIP_BACK_DECLARE(struct aggregator_proto, p, p, P);
+ SKIP_BACK_DECLARE(struct aggregator_config, cf, c, CF);
proto_configure_channel(P, &p->src, cf->src);
proto_configure_channel(P, &p->dst, cf->dst);
static int
aggregator_start(struct proto *P)
{
- struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
+ SKIP_BACK_DECLARE(struct aggregator_proto, p, p, P);
p->bucket_slab = sl_new(P->pool, sizeof(struct aggregator_bucket) + AGGR_DATA_MEMSIZE);
HASH_INIT(p->buckets, P->pool, AGGR_BUCK_ORDER);
static int
aggregator_shutdown(struct proto *P)
{
- struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
+ SKIP_BACK_DECLARE(struct aggregator_proto, p, p, P);
HASH_WALK_DELSAFE(p->buckets, next_hash, b)
{
static int
aggregator_reconfigure(struct proto *P, struct proto_config *CF)
{
- struct aggregator_proto *p = SKIP_BACK(struct aggregator_proto, p, P);
- struct aggregator_config *cf = SKIP_BACK(struct aggregator_config, c, CF);
+ SKIP_BACK_DECLARE(struct aggregator_proto, p, p, P);
+ SKIP_BACK_DECLARE(struct aggregator_config, cf, c, CF);
TRACE(D_EVENTS, "Reconfiguring");
ASSERT_DIE(birdloop_inside(p->p.loop));
- struct bfd_request *req = SKIP_BACK(struct bfd_request, n, HEAD(s->request_list));
+ SKIP_BACK_DECLARE(struct bfd_request, req, n, HEAD(s->request_list));
s->cf = bfd_merge_options(s->ifa->cf, &req->opts);
u32 tx = (s->loc_state == BFD_STATE_UP) ? s->cf.min_tx_int : s->cf.idle_tx_int;
node *n;
WALK_LIST(n, bfd_global.proto_list)
{
- struct bfd_proto *p = SKIP_BACK(struct bfd_proto, bfd_node, n);
+ SKIP_BACK_DECLARE(struct bfd_proto, p, bfd_node, n);
birdloop_enter(p->p.loop);
BFD_LOCK;
{
WALK_LIST_FIRST(n, s->request_list)
{
- struct bfd_request *req = SKIP_BACK(struct bfd_request, n, n);
+ SKIP_BACK_DECLARE(struct bfd_request, req, n, n);
rem_node(&req->n);
add_tail(&bfd_global.pickup_list, &req->n);
req->session = NULL;
static void
bgp_pending_tx_rfree(resource *r)
{
- struct bgp_pending_tx *ptx = SKIP_BACK(struct bgp_pending_tx, r, r);
+ SKIP_BACK_DECLARE(struct bgp_pending_tx, ptx, r, r);
HASH_WALK(ptx->prefix_hash, next, n)
rt_unlock_source(rt_find_source_global(n->path_id));
bgp_out_table_feed(void *data)
{
struct bgp_out_export_hook *hook = data;
- struct bgp_channel *bc = SKIP_BACK(struct bgp_channel, prefix_exporter, hook->h.table);
+ SKIP_BACK_DECLARE(struct bgp_channel, bc, prefix_exporter, hook->h.table);
struct bgp_pending_tx *c = bc->ptx;
int max = 512;
req->hook = rt_alloc_export(re, req->pool, sizeof(struct bgp_out_export_hook));
req->hook->req = req;
- struct bgp_out_export_hook *hook = SKIP_BACK(struct bgp_out_export_hook, h, req->hook);
+ SKIP_BACK_DECLARE(struct bgp_out_export_hook, hook, h, req->hook);
hook->h.event.hook = bgp_out_table_feed;
rt_init_export(re, req->hook);
struct rt_pending_export *first, struct rt_pending_export *last,
const rte **feed, uint count)
{
- struct bgp_channel *c = SKIP_BACK(struct bgp_channel, stale_feed, req);
+ SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
struct rt_import_hook *irh = c->c.in_req.hook;
/* Find our routes among others */
/* Get the first request to match */
struct bgp_listen_request *req = HEAD(bgp_listen_pending);
- struct bgp_proto *p = SKIP_BACK(struct bgp_proto, listen, req);
+ SKIP_BACK_DECLARE(struct bgp_proto, p, listen, req);
rem_node(&req->n);
/* First try to find existing socket */
static void
bgp_graceful_restart_feed_dump_req(struct rt_export_request *req)
{
- struct bgp_channel *c = SKIP_BACK(struct bgp_channel, stale_feed, req);
+ SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
debug(" BGP-GR %s.%s export request %p\n", c->c.proto->name, c->c.name, req);
}
static void
bgp_graceful_restart_feed_log_state_change(struct rt_export_request *req, u8 state)
{
- struct bgp_channel *c = SKIP_BACK(struct bgp_channel, stale_feed, req);
+ SKIP_BACK_DECLARE(struct bgp_channel, c, stale_feed, req);
struct bgp_proto *p = (void *) c->c.proto;
BGP_TRACE(D_EVENTS, "Long-lived graceful restart export state changed to %s", rt_export_state_name(state));
WALK_LIST(req, bs->requests)
{
- struct bgp_proto *p = SKIP_BACK(struct bgp_proto, listen, req);
+ SKIP_BACK_DECLARE(struct bgp_proto, p, listen, req);
if ((p->p.proto == &proto_bgp) &&
(ipa_equal(p->remote_ip, sk->daddr) || bgp_is_dynamic(p)) &&
(!p->cf->remote_range || ipa_in_netX(sk->daddr, p->cf->remote_range)) &&
if (s->channel->cf->gw_mode == GW_DIRECT)
{
eattr *e = ea_find(*to, &ea_gen_nexthop);
- struct nexthop_adata_mpls *namp = SKIP_BACK(struct nexthop_adata_mpls, nhad.ad, e->u.ptr);
+ SKIP_BACK_DECLARE(struct nexthop_adata_mpls, namp, nhad.ad, e->u.ptr);
namp->nhad.nh.labels = lnum;
memcpy(namp->nhad.nh.label, labels, lnum * sizeof(u32));
void
pipe_import_by_refeed_free(struct channel_feeding_request *cfr)
{
- struct import_to_export_reload *reload = SKIP_BACK(struct import_to_export_reload, cfr, cfr);
+ SKIP_BACK_DECLARE(struct import_to_export_reload, reload, cfr, cfr);
reload->cir->done(reload->cir);
}
node *n;
WALK_LIST(n, loop->sock_list)
{
- sock *s = SKIP_BACK(sock, n, n);
+ SKIP_BACK_DECLARE(sock, s, n, n);
uint w = sk_want_events(s);
if (!w)
static void
bird_thread_show(struct bird_thread_syncer *sync)
{
- struct bird_thread_show_data *tsd = SKIP_BACK(struct bird_thread_show_data, sync, sync);
+ SKIP_BACK_DECLARE(struct bird_thread_show_data, tsd, sync, sync);
if (!tsd->lp)
tsd->lp = lp_new(tsd->sync.pool);
static void
cmd_show_threads_done(struct bird_thread_syncer *sync)
{
- struct bird_thread_show_data *tsd = SKIP_BACK(struct bird_thread_show_data, sync, sync);
+ SKIP_BACK_DECLARE(struct bird_thread_show_data, tsd, sync, sync);
ASSERT_DIE(birdloop_inside(&main_birdloop));
tsd->cli->cont = NULL;
static void
lts_done(struct bird_thread_syncer *sync)
{
- struct log_thread_syncer *lts = SKIP_BACK(struct log_thread_syncer, sync, sync);
+ SKIP_BACK_DECLARE(struct log_thread_syncer, lts, sync, sync);
log_lock();
if (lts->lc_close)