if (!c)
return;
- ASSERT(!c->obstacle_count);
+ ASSERT(!atomic_load_explicit(&c->obstacle_count, memory_order_relaxed));
rp_free(c->pool);
}
void
config_free_old(void)
{
- if (!old_config || old_config->obstacle_count)
+ if (!old_config || atomic_load_explicit(&old_config->obstacle_count, memory_order_acquire))
return;
tm_stop(config_timer);
void
config_add_obstacle(struct config *c)
{
- DBG("+++ adding obstacle %d\n", c->obstacle_count);
- atomic_fetch_add_explicit(&c->obstacle_count, 1, memory_order_acq_rel);
+ UNUSED int obs = atomic_fetch_add_explicit(&c->obstacle_count, 1, memory_order_acq_rel);
+ DBG("+++ adding obstacle %d\n", obs);
}
void
config_del_obstacle(struct config *c)
{
- DBG("+++ deleting obstacle %d\n", c->obstacle_count);
- if (atomic_fetch_sub_explicit(&c->obstacle_count, 1, memory_order_acq_rel) == 1)
+ int obs = atomic_fetch_sub_explicit(&c->obstacle_count, 1, memory_order_acq_rel);
+ DBG("+++ deleting obstacle %d\n", obs);
+ if (obs == 1)
ev_send_loop(&main_birdloop, &c->done_event);
}
log(L_INFO "Reconfiguring");
if (old_config)
- old_config->obstacle_count++;
+ config_add_obstacle(old_config);
DBG("filter_commit\n");
filter_commit(c, old_config);
rt_commit(c, old_config);
DBG("protos_commit\n");
protos_commit(c, old_config, force_restart, type);
-
- int obs = 0;
- if (old_config)
- obs = --old_config->obstacle_count;
+ int obs = old_config ?
+ atomic_fetch_sub_explicit(&old_config->obstacle_count, 1, memory_order_acq_rel) - 1
+ : 0;
DBG("do_commit finished with %d obstacles remaining\n", obs);
return !obs;
rcu_gp_ongoing(_Atomic uint *ctl)
{
uint val = atomic_load(ctl);
- return (val & RCU_NEST_CNT) && ((val ^ rcu_gp_ctl) & RCU_GP_PHASE);
+ return (val & RCU_NEST_CNT) && ((val ^ atomic_load_explicit(&rcu_gp_ctl, memory_order_acquire)) & RCU_GP_PHASE);
}
static void
/* Need to abort feeding */
c->reload_pending = 0;
- if (c->reload_req.hook && c->reload_req.hook->export_state != TES_STOP)
+ if (c->reload_req.hook && atomic_load_explicit(&c->reload_req.hook->export_state, memory_order_acquire) != TES_STOP)
rt_stop_export(&c->reload_req, channel_reload_stopped);
/* Stop export */
static void
channel_stop_export(struct channel *c)
{
- if (c->refeed_req.hook && (c->refeed_req.hook->export_state != TES_STOP))
+ if (c->refeed_req.hook && (atomic_load_explicit(&c->refeed_req.hook->export_state, memory_order_acquire) != TES_STOP))
rt_stop_export(&c->refeed_req, channel_refeed_stopped);
- if (c->out_req.hook && (c->out_req.hook->export_state != TES_STOP))
+ if (c->out_req.hook && (atomic_load_explicit(&c->out_req.hook->export_state, memory_order_acquire) != TES_STOP))
rt_stop_export(&c->out_req, channel_export_stopped);
}
const char *rt_export_state_name(u8 state);
static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
-static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? eh->export_state : TES_DOWN; }
+static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? atomic_load_explicit(&eh->export_state, memory_order_acquire) : TES_DOWN; }
u8 rt_set_export_state(struct rt_export_hook *hook, u32 expected_mask, u8 state);
(e->flags & EALF_SORTED) ? 'S' : 's',
(e->flags & EALF_BISECT) ? 'B' : 'b',
(e->flags & EALF_CACHED) ? 'C' : 'c',
- s ? s->uc : 0, s ? s->hash_key : 0);
+ s ? atomic_load_explicit(&s->uc, memory_order_relaxed) : 0,
+ s ? s->hash_key : 0);
for(i=0; i<e->count; i++)
{
eattr *a = &e->attrs[i];
r->l->flags |= EALF_CACHED | huge;
r->hash_key = h;
- r->uc = 1;
+ atomic_store_explicit(&r->uc, 1, memory_order_release);
rta_insert(r);
ASSERT_DIE(c->out_req.hook);
rpe_mark_seen(c->out_req.hook, rpe);
- if (c->refeed_req.hook && (c->refeed_req.hook->export_state == TES_FEEDING))
+ if (c->refeed_req.hook && (atomic_load_explicit(&c->refeed_req.hook->export_state, memory_order_acquire) == TES_FEEDING))
rpe_mark_seen(c->refeed_req.hook, rpe);
if (rpe->old)
struct rt_pending_export *last_seen = last_seen_item ? SKIP_BACK(struct rt_pending_export, li, last_seen_item) : NULL;
while (last_seen && first && (first->seq <= last_seen->seq))
- first = first->next;
+ first = atomic_load_explicit(&first->next, memory_order_relaxed);
b->aux[b->pos++] = (struct rt_feed_block_aux) {
.start = b->cnt,
struct free_page *fp = local_page_stack;
if (fp)
{
- local_page_stack = fp->next;
+ local_page_stack = atomic_load_explicit(&fp->next, memory_order_relaxed);
atomic_fetch_sub_explicit(&pages_kept_locally, 1, memory_order_relaxed);
pages_kept_here--;
UNPROTECT_PAGE(fp);
if (fp = PAGE_STACK_GET)
{
/* Reinstate the stack with the next page in list */
- PAGE_STACK_PUT(fp->next);
+ PAGE_STACK_PUT(atomic_load_explicit(&fp->next, memory_order_relaxed));
/* Update the counters */
UNUSED uint pk = atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
/* Release the page */
UNPROTECT_PAGE(fp);
- ajlog(fp, fp->next, pk, AJT_ALLOC_GLOBAL_HOT);
+ ajlog(fp, atomic_load_explicit(&fp->next, memory_order_relaxed), pk, AJT_ALLOC_GLOBAL_HOT);
return fp;
}
* Also, we need to know the last page. */
struct free_page *last = local_page_stack, *next;
int check_count = 1;
- while (next = last->next)
+ while (next = atomic_load_explicit(&last->next, memory_order_relaxed))
{
check_count++;
last = next;
/* Block the stack by a cork */
UNPROTECT_PAGE(last);
- last->next = PAGE_STACK_GET;
+ atomic_store_explicit(&last->next, PAGE_STACK_GET, memory_order_relaxed);
PROTECT_PAGE(last);
/* Update the stack */
do {
struct free_page *fp = stack;
- stack = fp->next;
+ stack = atomic_load_explicit(&fp->next, memory_order_relaxed);
LOCK_DOMAIN(resource, empty_pages_domain);
/* Empty pages are stored as pointers. To store them, we need a pointer block. */
while (stack)
{
struct free_page *f = stack;
- stack = f->next;
+ stack = atomic_load_explicit(&f->next, memory_order_acquire);
UNPROTECT_PAGE(f);
free_page(f);
{
case RF_APPEND:
rf_stat(r);
- r->pos = S_ISREG(r->stat.st_mode) ? r->stat.st_size : 0;
+ atomic_store_explicit(&r->pos, S_ISREG(r->stat.st_mode) ? r->stat.st_size : 0, memory_order_relaxed);
break;
case RF_FIXED:
pprev = &ol->next)
{
ol->new_mask = 0;
- if (ol->rf)
+ struct rfile *orf = atomic_load_explicit(&ol->rf, memory_order_relaxed);
+ if (orf)
{
WALK_LIST(l, *logs)
- if (l->rf && rf_same(l->rf, ol->rf))
+ if (l->rf && rf_same(l->rf, orf))
{
/* Merge the mask */
ol->new_mask |= l->mask;
/* Find more */
for (struct log_config *ll = NODE_NEXT(l); NODE_VALID(ll); ll = NODE_NEXT(ll))
- if (ll->filename && ll->rf && rf_same(lc->rf, ll->rf))
+ {
+ struct rfile *crf = atomic_load_explicit(&lc->rf, memory_order_relaxed);
+ if (ll->filename && ll->rf && rf_same(crf, ll->rf))
{
/* Merged with this channel */
lc->new_mask |= ll->mask;
}
ll->rf = NULL;
}
+ }
}
else if (l->udp_port)
{
atomic_store_explicit(&ol->mask, ol->new_mask, memory_order_release);
/* Never close syslog channel or debug */
- if (ol->new_mask || (!ol->rf && !ol->udp_sk) || (ol->rf == dbg_rf))
+ struct rfile *orf = atomic_load_explicit(&ol->rf, memory_order_relaxed);
+ sock *ousk = atomic_load_explicit(&ol->udp_sk, memory_order_relaxed);
+ if (ol->new_mask || (!orf && !ousk) || (orf == dbg_rf))
{
pprev = &ol->next;
ol = atomic_load_explicit(pprev, memory_order_acquire);