]> git.ipfire.org Git - thirdparty/systemd.git/commitdiff
basic: replace __sync intrinsics with __atomic
authormatoro <matoro@users.noreply.github.com>
Wed, 13 Jul 2022 22:12:03 +0000 (18:12 -0400)
committermatoro <matoro@users.noreply.github.com>
Thu, 14 Jul 2022 21:34:15 +0000 (17:34 -0400)
Commented reasoning on why it's safe to replace
__sync_bool_compare_and_swap with __atomic_compare_exchange_n in each
location even though the latter has an additional side effect.

__sync_synchronize should be equivalent to __atomic_thread_fence with
__ATOMIC_SEQ_CST memory ordering.

src/basic/sigbus.c

index 95ecb60fe225703139739e3dc53c6d17720f3fc7..d570b1df4783583bfce01464b61b3858c4a9dba7 100644 (file)
@@ -28,24 +28,31 @@ static void sigbus_push(void *addr) {
         assert(addr);
 
         /* Find a free place, increase the number of entries and leave, if we can */
-        for (size_t u = 0; u < SIGBUS_QUEUE_MAX; u++)
-                if (__sync_bool_compare_and_swap(&sigbus_queue[u], NULL, addr)) {
-                        __sync_fetch_and_add(&n_sigbus_queue, 1);
+        for (size_t u = 0; u < SIGBUS_QUEUE_MAX; u++) {
+                /* OK to initialize this here since we haven't started the atomic ops yet */
+                void *tmp = NULL;
+                if (__atomic_compare_exchange_n(&sigbus_queue[u], &tmp, addr, false,
+                                                __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
+                        __atomic_fetch_add(&n_sigbus_queue, 1, __ATOMIC_SEQ_CST);
                         return;
                 }
+        }
 
         /* If we can't, make sure the queue size is out of bounds, to
          * mark it as overflow */
         for (;;) {
-                unsigned c;
+                sig_atomic_t c;
 
-                __sync_synchronize();
+                __atomic_thread_fence(__ATOMIC_SEQ_CST);
                 c = n_sigbus_queue;
 
                 if (c > SIGBUS_QUEUE_MAX) /* already overflow */
                         return;
 
-                if (__sync_bool_compare_and_swap(&n_sigbus_queue, c, c + SIGBUS_QUEUE_MAX))
+                /* OK if we clobber c here, since we either immediately return
+                 * or it will be immediately reinitialized on next loop */
+                if (__atomic_compare_exchange_n(&n_sigbus_queue, &c, c + SIGBUS_QUEUE_MAX, false,
+                                                __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
                         return;
         }
 }
@@ -56,7 +63,7 @@ int sigbus_pop(void **ret) {
         for (;;) {
                 unsigned u, c;
 
-                __sync_synchronize();
+                __atomic_thread_fence(__ATOMIC_SEQ_CST);
                 c = n_sigbus_queue;
 
                 if (_likely_(c == 0))
@@ -72,8 +79,13 @@ int sigbus_pop(void **ret) {
                         if (!addr)
                                 continue;
 
-                        if (__sync_bool_compare_and_swap(&sigbus_queue[u], addr, NULL)) {
-                                __sync_fetch_and_sub(&n_sigbus_queue, 1);
+                        /* OK if we clobber addr here, since we either immediately return
+                         * or it will be immediately reinitialized on next loop */
+                        if (__atomic_compare_exchange_n(&sigbus_queue[u], &addr, NULL, false,
+                                                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
+                                __atomic_fetch_sub(&n_sigbus_queue, 1, __ATOMIC_SEQ_CST);
+                                /* If we successfully entered this if condition, addr won't
+                                 * have been modified since its assignment, so safe to use it */
                                 *ret = addr;
                                 return 1;
                         }