]> git.ipfire.org Git - thirdparty/bird.git/commitdiff
Fixed all implicit seq_cst warnings caused by the previous commit
authorMaria Matejka <mq@ucw.cz>
Mon, 1 Apr 2024 14:01:26 +0000 (16:01 +0200)
committerMaria Matejka <mq@ucw.cz>
Wed, 22 May 2024 09:34:34 +0000 (11:34 +0200)
conf/conf.c
lib/rcu.c
nest/proto.c
nest/route.h
nest/rt-attr.c
nest/rt-table.c
sysdep/unix/alloc.c
sysdep/unix/io.c
sysdep/unix/log.c

index c5ce6f5ac4d353cf7e11e3cc064cc32e35b3544f..8fdf4b37a64b4655c1155440323f8cf99d99a6b1 100644 (file)
@@ -202,7 +202,7 @@ config_free(struct config *c)
   if (!c)
     return;
 
-  ASSERT(!c->obstacle_count);
+  ASSERT(!atomic_load_explicit(&c->obstacle_count, memory_order_relaxed));
 
   rp_free(c->pool);
 }
@@ -218,7 +218,7 @@ config_free(struct config *c)
 void
 config_free_old(void)
 {
-  if (!old_config || old_config->obstacle_count)
+  if (!old_config || atomic_load_explicit(&old_config->obstacle_count, memory_order_acquire))
     return;
 
   tm_stop(config_timer);
@@ -231,15 +231,16 @@ config_free_old(void)
 void
 config_add_obstacle(struct config *c)
 {
-  DBG("+++ adding obstacle %d\n", c->obstacle_count);
-  atomic_fetch_add_explicit(&c->obstacle_count, 1, memory_order_acq_rel);
+  UNUSED int obs = atomic_fetch_add_explicit(&c->obstacle_count, 1, memory_order_acq_rel);
+  DBG("+++ adding obstacle %d\n", obs);
 }
 
 void
 config_del_obstacle(struct config *c)
 {
-  DBG("+++ deleting obstacle %d\n", c->obstacle_count);
-  if (atomic_fetch_sub_explicit(&c->obstacle_count, 1, memory_order_acq_rel) == 1)
+  int obs = atomic_fetch_sub_explicit(&c->obstacle_count, 1, memory_order_acq_rel);
+  DBG("+++ deleting obstacle %d\n", obs);
+  if (obs == 1)
     ev_send_loop(&main_birdloop, &c->done_event);
 }
 
@@ -294,7 +295,7 @@ config_do_commit(struct config *c, int type)
     log(L_INFO "Reconfiguring");
 
   if (old_config)
-    old_config->obstacle_count++;
+    config_add_obstacle(old_config);
 
   DBG("filter_commit\n");
   filter_commit(c, old_config);
@@ -307,10 +308,9 @@ config_do_commit(struct config *c, int type)
   rt_commit(c, old_config);
   DBG("protos_commit\n");
   protos_commit(c, old_config, force_restart, type);
-
-  int obs = 0;
-  if (old_config)
-    obs = --old_config->obstacle_count;
+  int obs = old_config ?
+    atomic_fetch_sub_explicit(&old_config->obstacle_count, 1, memory_order_acq_rel) - 1
+    : 0;
 
   DBG("do_commit finished with %d obstacles remaining\n", obs);
   return !obs;
index c4b4721b1b3cc671dde4816c6412c3ab27910e9d..3491b1eca170c8e9107f96cc24027ea5660048e7 100644 (file)
--- a/lib/rcu.c
+++ b/lib/rcu.c
@@ -29,7 +29,7 @@ static int
 rcu_gp_ongoing(_Atomic uint *ctl)
 {
   uint val = atomic_load(ctl);
-  return (val & RCU_NEST_CNT) && ((val ^ rcu_gp_ctl) & RCU_GP_PHASE);
+  return (val & RCU_NEST_CNT) && ((val ^ atomic_load_explicit(&rcu_gp_ctl, memory_order_acquire)) & RCU_GP_PHASE);
 }
 
 static void
index 789d964bd69c60518b9a42c96139796a2edc0158..5a8240e221d56cba3f447dd4eeabe92252902187 100644 (file)
@@ -1082,7 +1082,7 @@ channel_do_pause(struct channel *c)
   /* Need to abort feeding */
   c->reload_pending = 0;
 
-  if (c->reload_req.hook && c->reload_req.hook->export_state != TES_STOP)
+  if (c->reload_req.hook && atomic_load_explicit(&c->reload_req.hook->export_state, memory_order_acquire) != TES_STOP)
     rt_stop_export(&c->reload_req, channel_reload_stopped);
 
   /* Stop export */
@@ -1236,10 +1236,10 @@ channel_request_feeding_dynamic(struct channel *c, enum channel_feeding_request_
 static void
 channel_stop_export(struct channel *c)
 {
-  if (c->refeed_req.hook && (c->refeed_req.hook->export_state != TES_STOP))
+  if (c->refeed_req.hook && (atomic_load_explicit(&c->refeed_req.hook->export_state, memory_order_acquire) != TES_STOP))
     rt_stop_export(&c->refeed_req, channel_refeed_stopped);
 
-  if (c->out_req.hook && (c->out_req.hook->export_state != TES_STOP))
+  if (c->out_req.hook && (atomic_load_explicit(&c->out_req.hook->export_state, memory_order_acquire) != TES_STOP))
     rt_stop_export(&c->out_req, channel_export_stopped);
 }
 
index d92c4946da6165d62fbe4428b5546a45ca643ff8..351b74f7fd118ca49eccac41ae8137e7103f10ea 100644 (file)
@@ -415,7 +415,7 @@ const char *rt_import_state_name(u8 state);
 const char *rt_export_state_name(u8 state);
 
 static inline u8 rt_import_get_state(struct rt_import_hook *ih) { return ih ? ih->import_state : TIS_DOWN; }
-static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? eh->export_state : TES_DOWN; }
+static inline u8 rt_export_get_state(struct rt_export_hook *eh) { return eh ? atomic_load_explicit(&eh->export_state, memory_order_acquire) : TES_DOWN; }
 
 u8 rt_set_export_state(struct rt_export_hook *hook, u32 expected_mask, u8 state);
 
index fa30010678fabd7a5ee20ec017675d734844c659..b81b12d6d77d742ce6aa83b76cee0fd4afb9b2cd 100644 (file)
@@ -1516,7 +1516,8 @@ ea_dump(ea_list *e)
            (e->flags & EALF_SORTED) ? 'S' : 's',
            (e->flags & EALF_BISECT) ? 'B' : 'b',
            (e->flags & EALF_CACHED) ? 'C' : 'c',
-           s ? s->uc : 0, s ? s->hash_key : 0);
+           s ? atomic_load_explicit(&s->uc, memory_order_relaxed) : 0,
+           s ? s->hash_key : 0);
       for(i=0; i<e->count; i++)
        {
          eattr *a = &e->attrs[i];
@@ -1714,7 +1715,7 @@ ea_lookup(ea_list *o, int overlay)
 
   r->l->flags |= EALF_CACHED | huge;
   r->hash_key = h;
-  r->uc = 1;
+  atomic_store_explicit(&r->uc, 1, memory_order_release);
 
   rta_insert(r);
 
index 68799177e01464f766cfa23f9b509af8bbdf1059..27616c21d61ca57ba6bf35dc0e5e13934da06c12 100644 (file)
@@ -779,7 +779,7 @@ channel_rpe_mark_seen(struct channel *c, struct rt_pending_export *rpe)
   ASSERT_DIE(c->out_req.hook);
   rpe_mark_seen(c->out_req.hook, rpe);
 
-  if (c->refeed_req.hook && (c->refeed_req.hook->export_state == TES_FEEDING))
+  if (c->refeed_req.hook && (atomic_load_explicit(&c->refeed_req.hook->export_state, memory_order_acquire) == TES_FEEDING))
     rpe_mark_seen(c->refeed_req.hook, rpe);
 
   if (rpe->old)
@@ -3916,7 +3916,7 @@ rt_prepare_feed(struct rt_export_hook *c, net *n, rt_feed_block *b)
       struct rt_pending_export *last_seen = last_seen_item ? SKIP_BACK(struct rt_pending_export, li, last_seen_item) : NULL;
 
       while (last_seen && first && (first->seq <= last_seen->seq))
-       first = first->next;
+       first = atomic_load_explicit(&first->next, memory_order_relaxed);
 
       b->aux[b->pos++] = (struct rt_feed_block_aux) {
        .start = b->cnt,
index 6a2799939f1b24631a3f38eb1a5f1c6cb0523410..af7e66a7b4eef789de2cdae639aa5d994cb2fc6a 100644 (file)
@@ -180,7 +180,7 @@ alloc_page(void)
   struct free_page *fp = local_page_stack;
   if (fp)
   {
-    local_page_stack = fp->next;
+    local_page_stack = atomic_load_explicit(&fp->next, memory_order_relaxed);
     atomic_fetch_sub_explicit(&pages_kept_locally, 1, memory_order_relaxed);
     pages_kept_here--;
     UNPROTECT_PAGE(fp);
@@ -194,14 +194,14 @@ alloc_page(void)
   if (fp = PAGE_STACK_GET)
   {
     /* Reinstate the stack with the next page in list */
-    PAGE_STACK_PUT(fp->next);
+    PAGE_STACK_PUT(atomic_load_explicit(&fp->next, memory_order_relaxed));
 
     /* Update the counters */
     UNUSED uint pk = atomic_fetch_sub_explicit(&pages_kept, 1, memory_order_relaxed);
 
     /* Release the page */
     UNPROTECT_PAGE(fp);
-    ajlog(fp, fp->next, pk, AJT_ALLOC_GLOBAL_HOT);
+    ajlog(fp, atomic_load_explicit(&fp->next, memory_order_relaxed), pk, AJT_ALLOC_GLOBAL_HOT);
     return fp;
   }
 
@@ -302,7 +302,7 @@ flush_local_pages(void)
    * Also, we need to know the last page. */
   struct free_page *last = local_page_stack, *next;
   int check_count = 1;
-  while (next = last->next)
+  while (next = atomic_load_explicit(&last->next, memory_order_relaxed))
   {
     check_count++;
     last = next;
@@ -313,7 +313,7 @@ flush_local_pages(void)
 
   /* Block the stack by a cork */
   UNPROTECT_PAGE(last);
-  last->next = PAGE_STACK_GET;
+  atomic_store_explicit(&last->next, PAGE_STACK_GET, memory_order_relaxed);
   PROTECT_PAGE(last);
 
   /* Update the stack */
@@ -355,7 +355,7 @@ page_cleanup(void *_ UNUSED)
 
   do {
     struct free_page *fp = stack;
-    stack = fp->next;
+    stack = atomic_load_explicit(&fp->next, memory_order_relaxed);
 
     LOCK_DOMAIN(resource, empty_pages_domain);
     /* Empty pages are stored as pointers. To store them, we need a pointer block. */
@@ -397,7 +397,7 @@ page_cleanup(void *_ UNUSED)
   while (stack)
   {
     struct free_page *f = stack;
-    stack = f->next;
+    stack = atomic_load_explicit(&f->next, memory_order_acquire);
     UNPROTECT_PAGE(f);
     free_page(f);
 
index caaa994f22e732d32144cb59b8a11fd0f68feb6f..4c326f60f6496bd32652ee261e6152952c01fe6a 100644 (file)
@@ -151,7 +151,7 @@ rf_open(pool *p, const char *name, enum rf_mode mode, off_t limit)
   {
     case RF_APPEND:
       rf_stat(r);
-      r->pos = S_ISREG(r->stat.st_mode) ? r->stat.st_size : 0;
+      atomic_store_explicit(&r->pos, S_ISREG(r->stat.st_mode) ? r->stat.st_size : 0, memory_order_relaxed);
       break;
 
     case RF_FIXED:
index 60ecbf71b4c346fae87f19e41e35ed16f81a171a..f8ffb0de64d6de846a942ab824eb258c47c4701b 100644 (file)
@@ -565,10 +565,11 @@ log_switch(int initial, list *logs, const char *new_syslog_name)
       pprev = &ol->next)
   {
     ol->new_mask = 0;
-    if (ol->rf)
+    struct rfile *orf = atomic_load_explicit(&ol->rf, memory_order_relaxed);
+    if (orf)
     {
       WALK_LIST(l, *logs)
-       if (l->rf && rf_same(l->rf, ol->rf))
+       if (l->rf && rf_same(l->rf, orf))
        {
          /* Merge the mask */
          ol->new_mask |= l->mask;
@@ -657,7 +658,9 @@ log_switch(int initial, list *logs, const char *new_syslog_name)
 
       /* Find more */
       for (struct log_config *ll = NODE_NEXT(l); NODE_VALID(ll); ll = NODE_NEXT(ll))
-       if (ll->filename && ll->rf && rf_same(lc->rf, ll->rf))
+      {
+       struct rfile *crf = atomic_load_explicit(&lc->rf, memory_order_relaxed);
+       if (ll->filename && ll->rf && rf_same(crf, ll->rf))
        {
          /* Merged with this channel */
          lc->new_mask |= ll->mask;
@@ -671,6 +674,7 @@ log_switch(int initial, list *logs, const char *new_syslog_name)
          }
          ll->rf = NULL;
        }
+      }
     }
     else if (l->udp_port)
     {
@@ -765,7 +769,9 @@ resolve_fail:
     atomic_store_explicit(&ol->mask, ol->new_mask, memory_order_release);
 
     /* Never close syslog channel or debug */
-    if (ol->new_mask || (!ol->rf && !ol->udp_sk) || (ol->rf == dbg_rf))
+    struct rfile *orf = atomic_load_explicit(&ol->rf, memory_order_relaxed);
+    sock *ousk = atomic_load_explicit(&ol->udp_sk, memory_order_relaxed);
+    if (ol->new_mask || (!orf && !ousk) || (orf == dbg_rf))
     {
       pprev = &ol->next;
       ol = atomic_load_explicit(pprev, memory_order_acquire);