From: matoro Date: Wed, 13 Jul 2022 22:12:03 +0000 (-0400) Subject: basic: replace __sync intrinsics with __atomic X-Git-Tag: v252-rc1~611^2~2 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5ba4295bea5731d6085f97d40eb9b89abd1adb93;p=thirdparty%2Fsystemd.git basic: replace __sync intrinsics with __atomic Commented reasoning on why it's safe to replace __sync_bool_compare_and_swap with __atomic_compare_exchange_n in each location even though the latter has an additional side effect. __sync_synchronize should be equivalent to __atomic_thread_fence with __ATOMIC_SEQ_CST memory ordering. --- diff --git a/src/basic/sigbus.c b/src/basic/sigbus.c index 95ecb60fe22..d570b1df478 100644 --- a/src/basic/sigbus.c +++ b/src/basic/sigbus.c @@ -28,24 +28,31 @@ static void sigbus_push(void *addr) { assert(addr); /* Find a free place, increase the number of entries and leave, if we can */ - for (size_t u = 0; u < SIGBUS_QUEUE_MAX; u++) - if (__sync_bool_compare_and_swap(&sigbus_queue[u], NULL, addr)) { - __sync_fetch_and_add(&n_sigbus_queue, 1); + for (size_t u = 0; u < SIGBUS_QUEUE_MAX; u++) { + /* OK to initialize this here since we haven't started the atomic ops yet */ + void *tmp = NULL; + if (__atomic_compare_exchange_n(&sigbus_queue[u], &tmp, addr, false, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { + __atomic_fetch_add(&n_sigbus_queue, 1, __ATOMIC_SEQ_CST); return; } + } /* If we can't, make sure the queue size is out of bounds, to * mark it as overflow */ for (;;) { - unsigned c; + sig_atomic_t c; - __sync_synchronize(); + __atomic_thread_fence(__ATOMIC_SEQ_CST); c = n_sigbus_queue; if (c > SIGBUS_QUEUE_MAX) /* already overflow */ return; - if (__sync_bool_compare_and_swap(&n_sigbus_queue, c, c + SIGBUS_QUEUE_MAX)) + /* OK if we clobber c here, since we either immediately return + * or it will be immediately reinitialized on next loop */ + if (__atomic_compare_exchange_n(&n_sigbus_queue, &c, c + SIGBUS_QUEUE_MAX, false, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) return; } } @@ -56,7 +63,7 @@ int sigbus_pop(void **ret) { for (;;) { unsigned u, c; - __sync_synchronize(); + __atomic_thread_fence(__ATOMIC_SEQ_CST); c = n_sigbus_queue; if (_likely_(c == 0)) @@ -72,8 +79,13 @@ int sigbus_pop(void **ret) { if (!addr) continue; - if (__sync_bool_compare_and_swap(&sigbus_queue[u], addr, NULL)) { - __sync_fetch_and_sub(&n_sigbus_queue, 1); + /* OK if we clobber addr here, since we either immediately return + * or it will be immediately reinitialized on next loop */ + if (__atomic_compare_exchange_n(&sigbus_queue[u], &addr, NULL, false, + __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { + __atomic_fetch_sub(&n_sigbus_queue, 1, __ATOMIC_SEQ_CST); + /* If we successfully entered this if condition, addr won't + * have been modified since its assignment, so safe to use it */ *ret = addr; return 1; }