Originally, this mechanism required to check whether there's enough time to work
and then to send an event. This macro combines all the logic and goes more straightforwardly
to the _end_ of the export processing loop.
One should note that there were two cases where the export processing loop
was deferred at the _beginning_, which led to ignoring some routes on
reimports. This wasn't easily noticeable in the tests until the one-task
limit got a ceiling on 300 ms to keep reasonable latency.
/* Check that the task has enough time to do a bit more */
_Bool task_still_in_limit(void);
+#define MAYBE_DEFER_TASK(target, event, fmt, args...) do { \
+ if (!task_still_in_limit()) { \
+ if (config && config->latency_debug) \
+ log(L_TRACE "Deferring " fmt, ##args); \
+ return ev_send(target, event); \
+ } } while (0)
+
/* Start a new birdloop owned by given pool and domain */
struct birdloop *birdloop_new(pool *p, uint order, btime max_latency, const char *fmt, ...);
struct channel *c = _c;
RT_FEED_WALK(&c->reimporter, f)
- if (task_still_in_limit())
+ {
+ for (uint i = 0; i < f->count_routes; i++)
{
- for (uint i = 0; i < f->count_routes; i++)
- {
- rte *r = &f->block[i];
+ rte *r = &f->block[i];
- if (r->flags & REF_OBSOLETE)
- break;
+ if (r->flags & REF_OBSOLETE)
+ break;
- if (r->sender == c->in_req.hook)
- {
- /* Strip the table-specific information */
- rte new = rte_init_from(r);
+ if (r->sender == c->in_req.hook)
+ {
+ /* Strip the table-specific information */
+ rte new = rte_init_from(r);
- /* Strip the later attribute layers */
- new.attrs = ea_strip_to(new.attrs, BIT32_ALL(EALS_PREIMPORT));
+ /* Strip the later attribute layers */
+ new.attrs = ea_strip_to(new.attrs, BIT32_ALL(EALS_PREIMPORT));
- /* And reload the route */
- rte_update(c, r->net, &new, new.src);
- }
+ /* And reload the route */
+ rte_update(c, r->net, &new, new.src);
}
-
- /* Local data needed no more */
- tmp_flush();
- }
- else
- {
- ev_send(proto_work_list(c->proto), &c->reimport_event);
- return;
}
+
+ /* Local data needed no more */
+ tmp_flush();
+
+ MAYBE_DEFER_TASK(proto_work_list(c->proto), &c->reimport_event,
+ "%s.%s reimport", c->proto->name, c->name);
+ }
}
/* Called by protocol to activate in_table */
}
}
- if (!task_still_in_limit())
- return ev_send(c->out_req.r.target, c->out_req.r.event);
+ MAYBE_DEFER_TASK(c->out_req.r.target, c->out_req.r.event,
+ "export to %s.%s (secondary)", c->proto->name, c->name);
}
}
}
}
- if (!task_still_in_limit())
- return ev_send(c->out_req.r.target, c->out_req.r.event);
+ MAYBE_DEFER_TASK(c->out_req.r.target, c->out_req.r.event,
+ "export to %s.%s (merged)", c->proto->name, c->name);
}
}
}
}
- if (!task_still_in_limit())
- return ev_send(c->out_req.r.target, c->out_req.r.event);
+ MAYBE_DEFER_TASK(c->out_req.r.target, c->out_req.r.event,
+ "export to %s.%s (regular)", c->proto->name, c->name);
}
}
rt_schedule_nhu(dst);
}
- if (!task_still_in_limit())
- return ev_send_loop(dst_pub->loop, &ln->event);
+ MAYBE_DEFER_TASK(birdloop_event_list(dst_pub->loop), &ln->event,
+ "flowspec ctl export from %s to %s", ln->src->name, dst_pub->name);
}
}
}
}
- if (!task_still_in_limit())
- return ev_send(hc->req.r.target, hc->req.r.event);
+ MAYBE_DEFER_TASK(hc->req.r.target, hc->req.r.event,
+ "hostcache updater in %s", tab->name);
}
}
struct rt_import_hook *irh = c->c.in_req.hook;
RT_FEED_WALK(&c->stale_feed, f) TMP_SAVED
- if (task_still_in_limit())
+ {
+ for (uint i = 0; i < f->count_routes; i++)
{
- for (uint i = 0; i < f->count_routes; i++)
+ rte *r = &f->block[i];
+ if ((r->sender != irh) || /* Not our route */
+ (r->stale_cycle == irh->stale_set)) /* A new route, do not mark as stale */
+ continue;
+
+ /* Don't reinstate obsolete routes */
+ if (r->flags & REF_OBSOLETE)
+ break;
+
+ eattr *ea = ea_find(r->attrs, BGP_EA_ID(BA_COMMUNITY));
+ const struct adata *ad = ea ? ea->u.ptr : NULL;
+ uint flags = ea ? ea->flags : BAF_PARTIAL;
+
+ /* LLGR not allowed, withdraw the route */
+ if (ad && int_set_contains(ad, BGP_COMM_NO_LLGR))
{
- rte *r = &f->block[i];
- if ((r->sender != irh) || /* Not our route */
- (r->stale_cycle == irh->stale_set)) /* A new route, do not mark as stale */
- continue;
-
- /* Don't reinstate obsolete routes */
- if (r->flags & REF_OBSOLETE)
- break;
-
- eattr *ea = ea_find(r->attrs, BGP_EA_ID(BA_COMMUNITY));
- const struct adata *ad = ea ? ea->u.ptr : NULL;
- uint flags = ea ? ea->flags : BAF_PARTIAL;
-
- /* LLGR not allowed, withdraw the route */
- if (ad && int_set_contains(ad, BGP_COMM_NO_LLGR))
- {
- rte_import(&c->c.in_req, r->net, NULL, r->src);
- continue;
- }
-
- /* Route already marked as LLGR, do nothing */
- if (ad && int_set_contains(ad, BGP_COMM_LLGR_STALE))
- continue;
-
- /* Mark the route as LLGR */
- bgp_set_attr_ptr(&r->attrs, BA_COMMUNITY, flags, int_set_add(tmp_linpool, ad, BGP_COMM_LLGR_STALE));
-
- /* We need to update the route but keep it stale. */
- ASSERT_DIE(irh->stale_set == irh->stale_valid + 1);
- irh->stale_set--;
- rte_import(&c->c.in_req, r->net, r, r->src);
- irh->stale_set++;
+ rte_import(&c->c.in_req, r->net, NULL, r->src);
+ continue;
}
+
+ /* Route already marked as LLGR, do nothing */
+ if (ad && int_set_contains(ad, BGP_COMM_LLGR_STALE))
+ continue;
+
+ /* Mark the route as LLGR */
+ bgp_set_attr_ptr(&r->attrs, BA_COMMUNITY, flags, int_set_add(tmp_linpool, ad, BGP_COMM_LLGR_STALE));
+
+ /* We need to update the route but keep it stale. */
+ ASSERT_DIE(irh->stale_set == irh->stale_valid + 1);
+ irh->stale_set--;
+ rte_import(&c->c.in_req, r->net, r, r->src);
+ irh->stale_set++;
}
- else
- {
- proto_send_event(c->c.proto, &c->stale_event);
- return;
- }
+
+ MAYBE_DEFER_TASK(proto_event_list(c->c.proto), &c->stale_event,
+ "BGP %s.%s LLGR updater", c->c.proto->name, c->c.name);
+ }
rt_feeder_unsubscribe(&c->stale_feed);
}