#define DOMAIN(type) struct domain__##type
#define DOMAIN_ORDER(type) OFFSETOF(struct lock_order, type)
-#define DOMAIN_NEW(type) (DOMAIN(type)) { .type = domain_new(DOMAIN_ORDER(type), 1) }
-#define DOMAIN_NEW_RCU_SYNC(type) (DOMAIN(type)) { .type = domain_new(DOMAIN_ORDER(type), 0) }
-struct domain_generic *domain_new(uint order, bool allow_rcu);
+#define DOMAIN_NEW(type) (DOMAIN(type)) { .type = domain_new(DOMAIN_ORDER(type)) }
+struct domain_generic *domain_new(uint order);
#define DOMAIN_FREE(type, d) domain_free((d).type)
void domain_free(struct domain_generic *);
netindex_hash *
netindex_hash_new(pool *sp, event_list *cleanup_target, u8 type)
{
- DOMAIN(attrs) dom = DOMAIN_NEW_RCU_SYNC(attrs);
+ DOMAIN(attrs) dom = DOMAIN_NEW(attrs);
LOCK_DOMAIN(attrs, dom);
pool *p = rp_new(sp, dom.attrs, "Network index");
_Atomic u64 rcu_global_phase = RCU_GP_PHASE;
_Thread_local struct rcu_thread this_rcu_thread;
-_Thread_local uint rcu_blocked;
static struct rcu_thread * _Atomic rcu_thread_list = NULL;
void
synchronize_rcu(void)
{
- if (!rcu_blocked && (last_locked > &locking_stack.meta))
- bug("Forbidden to synchronize RCU unless an appropriate lock is taken");
-
/* Increment phase */
u64 phase = atomic_fetch_add_explicit(&rcu_global_phase, RCU_GP_PHASE, memory_order_acq_rel);
};
extern _Thread_local struct rcu_thread this_rcu_thread;
-extern _Thread_local uint rcu_blocked;
static inline void rcu_read_lock(void)
{
void
rta_init(void)
{
- attrs_domain = DOMAIN_NEW_RCU_SYNC(attrs);
+ attrs_domain = DOMAIN_NEW(attrs);
RTA_LOCK;
rta_pool = rp_new(&root_pool, attrs_domain.attrs, "Attributes");
pool *sp = birdloop_pool(loop);
/* Create the table domain and pool */
- DOMAIN(rtable) dom = DOMAIN_NEW_RCU_SYNC(rtable);
+ DOMAIN(rtable) dom = DOMAIN_NEW(rtable);
LOCK_DOMAIN(rtable, dom);
pool *p = rp_newf(sp, dom.rtable, "Routing table data %s", cf->name);
init_list(&routing_tables);
init_list(&deleted_routing_tables);
ev_init_list(&rt_cork.queue, &main_birdloop, "Route cork release");
- rt_cork.dom = DOMAIN_NEW_RCU_SYNC(resource);
+ rt_cork.dom = DOMAIN_NEW(resource);
idm_init(&rtable_idm, rt_table_pool, 256);
ea_register_init(&ea_roa_aggregated);
ASSERT_DIE(c->c.out_table == NULL);
ASSERT_DIE(c->tx == NULL);
- DOMAIN(rtable) dom = DOMAIN_NEW_RCU_SYNC(rtable);
+ DOMAIN(rtable) dom = DOMAIN_NEW(rtable);
LOCK_DOMAIN(rtable, dom);
pool *p = rp_newf(c->pool, dom.rtable, "%s.%s TX", c->c.proto->name, c->c.name);
/* Reinstate the stack with zero */
PAGE_STACK_PUT(NULL);
+ if (rcu_read_active())
+ {
+ /* We can't lock and we actually shouldn't alloc either when rcu is active
+ * but that's a quest for another day. */
+ }
+ else
+ {
+
/* If there is any free page kept cold, we use that. */
LOCK_DOMAIN(resource, empty_pages_domain);
if (empty_pages) {
if (fp)
return fp;
+ }
+
/* And in the worst case, allocate some new pages by mmap() */
void *ptr = alloc_sys_page();
ajlog(ptr, NULL, 0, AJT_ALLOC_MMAP);
struct domain_generic {
pthread_mutex_t mutex;
uint order;
- bool forbidden_when_reading_rcu;
struct domain_generic **prev;
struct lock_order *locked_by;
const char *name;
pool *pool;
};
-#define DOMAIN_INIT(_order, _allow_rcu) { \
+#define DOMAIN_INIT(_order) { \
.mutex = PTHREAD_MUTEX_INITIALIZER, \
.order = _order, \
- .forbidden_when_reading_rcu = !_allow_rcu, \
}
-static struct domain_generic the_bird_domain_gen = DOMAIN_INIT(OFFSETOF(struct lock_order, the_bird), 1);
+static struct domain_generic the_bird_domain_gen = DOMAIN_INIT(OFFSETOF(struct lock_order, the_bird));
DOMAIN(the_bird) the_bird_domain = { .the_bird = &the_bird_domain_gen };
struct domain_generic *
-domain_new(uint order, bool allow_rcu)
+domain_new(uint order)
{
ASSERT_DIE(order < sizeof(struct lock_order));
struct domain_generic *dg = xmalloc(sizeof(struct domain_generic));
- *dg = (struct domain_generic) DOMAIN_INIT(order, allow_rcu);
+ *dg = (struct domain_generic) DOMAIN_INIT(order);
return dg;
}
memcpy(&stack_copy, &locking_stack, sizeof(stack_copy));
struct domain_generic **lll = last_locked;
- if (dg->forbidden_when_reading_rcu)
- if (rcu_read_active())
- bug("Locking of this lock forbidden while RCU reader is active");
- else
- rcu_blocked++;
+ if (rcu_read_active())
+ bug("Locking forbidden while RCU reader is active");
if ((char *) lsp - (char *) &locking_stack != dg->order)
bug("Trying to lock on bad position: order=%u, lsp=%p, base=%p", dg->order, lsp, &locking_stack);
void do_unlock(struct domain_generic *dg, struct domain_generic **lsp)
{
- if (dg->forbidden_when_reading_rcu)
- ASSERT_DIE(rcu_blocked--);
-
if ((char *) lsp - (char *) &locking_stack != dg->order)
bug("Trying to unlock on bad position: order=%u, lsp=%p, base=%p", dg->order, lsp, &locking_stack);
static struct birdloop *
birdloop_vnew_internal(pool *pp, uint order, struct birdloop_pickup_group *group, const char *name, va_list args)
{
- struct domain_generic *dg = domain_new(order, 1);
+ struct domain_generic *dg = domain_new(order);
DG_LOCK(dg);
pool *p = rp_vnewf(pp, dg, name, args);