ev_schedule(p->attn);
}
- proto_flush_hooks(p);
+/*
+ * Flushing loop is responsible for flushing routes and protocols
+ * after they went down. It runs in proto_flush_event. At the start of
+ * one round, protocols waiting to flush are marked in
+ * proto_schedule_flush_loop(). At the end of the round (when routing
+ * table flush is complete), marked protocols are flushed and a next
+ * round may start.
+ */
+
+static int flush_loop_state; /* 1 -> running */
+
+static void
+proto_schedule_flush_loop(void)
+{
+ struct proto *p;
+
+ if (flush_loop_state)
+ return;
+ flush_loop_state = 1;
+
+ rt_schedule_prune_all();
+ WALK_LIST(p, flush_proto_list)
+ p->flushing = 1;
+
+ ev_schedule(proto_flush_event);
+}
+
+static void
+proto_flush_loop(void *unused UNUSED)
+{
+ struct proto *p;
+
+ if (! rt_prune_loop())
+ {
+ /* Rtable pruning is not finished */
+ ev_schedule(proto_flush_event);
+ return;
+ }
+
+ again:
+ WALK_LIST(p, flush_proto_list)
+ if (p->flushing)
+ {
+ /* This will flush interfaces in the same manner
+ like rt_prune_all() flushes routes */
+ if (p->proto == &proto_unix_iface)
+ if_flush_ifaces(p);
+
+ DBG("Flushing protocol %s\n", p->name);
+ p->flushing = 0;
+ p->core_state = FS_HUNGRY;
+ proto_relink(p);
+ if (p->proto_state == PS_DOWN)
+ proto_fell_down(p);
+ goto again;
+ }
+
+ /* This round finished, perhaps there will be another one */
+ flush_loop_state = 0;
+ if (!EMPTY_LIST(flush_proto_list))
+ proto_schedule_flush_loop();
+}
+
+static void
+proto_schedule_flush(struct proto *p)
+{
+ /* Need to abort feeding */
+ if (p->core_state == FS_FEEDING)
+ rt_feed_baby_abort(p);
+
+ DBG("%s: Scheduling flush\n", p->name);
+ p->core_state = FS_FLUSHING;
+ proto_relink(p);
-
++ proto_unlink_ahooks(p);
+ proto_schedule_flush_loop();
+}
+
+
/**
* proto_request_feeding - request feeding routes to the protocol
* @p: given protocol
#ifdef DEBUGGING
fib_check(&tab->fib);
#endif
- FIB_ITERATE_INIT(&fit, &tab->fib);
+
+ if (tab->prune_state == 0)
+ return 1;
+
+ if (tab->prune_state == 1)
+ {
+ FIB_ITERATE_INIT(fit, &tab->fib);
+ tab->prune_state = 2;
+ }
+
again:
- FIB_ITERATE_START(&tab->fib, &fit, f)
+ FIB_ITERATE_START(&tab->fib, fit, fn)
{
- net *n = (net *) f;
+ net *n = (net *) fn;
rte *e;
- ncnt++;
+
rescan:
- for (e=n->routes; e; e=e->next, rcnt++)
+ for (e=n->routes; e; e=e->next)
- if (e->sender->core_state != FS_HAPPY &&
- e->sender->core_state != FS_FEEDING)
+ if (e->sender->proto->core_state != FS_HAPPY &&
+ e->sender->proto->core_state != FS_FEEDING)
{
+ if (*max_feed <= 0)
+ {
+ FIB_ITERATE_PUT(fit, fn);
+ return 0;
+ }
+
rte_discard(tab, e);
- rdel++;
+ (*max_feed)--;
+
goto rescan;
}
- if (!n->routes) /* Orphaned FIB entry? */
+ if (!n->routes) /* Orphaned FIB entry */
{
- FIB_ITERATE_PUT(&fit, f);
- fib_delete(&tab->fib, f);
- ndel++;
+ FIB_ITERATE_PUT(fit, fn);
+ fib_delete(&tab->fib, fn);
goto again;
}
}
rte_free(e);
}
- struct filter *filter = p->p.out_filter;
+static inline int
+krt_export_rte(struct krt_proto *p, rte **new, ea_list **tmpa)
+{
++ struct filter *filter = p->p.main_ahook->out_filter;
+
+ if (! *new)
+ return 0;
+
+ if (filter == FILTER_REJECT)
+ return 0;
+
+ if (filter == FILTER_ACCEPT)
+ return 1;
+
+ struct proto *src = (*new)->attrs->proto;
+ *tmpa = src->make_tmp_attrs ? src->make_tmp_attrs(*new, krt_filter_lp) : NULL;
+ return f_run(filter, new, tmpa, krt_filter_lp, FF_FORCE_TMPATTR) <= F_ACCEPT;
+}
+
static void
krt_prune(struct krt_proto *p)
{