commit
7fec8a5de6826ef9ae440238d698f0fe5a5fb372
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date: Thu Nov 13 14:26:08 2025 -0300
Revert __HAVE_64B_ATOMICS configure check
uses 64-bit atomic operations on sem_t if 64-bit atomics are supported.
But sem_t may be aligned to 32-bit on 32-bit architectures.
1. Add a macro, SEM_T_ALIGN, for sem_t alignment.
2. Add a macro, HAVE_UNALIGNED_64B_ATOMICS. Define it if unaligned 64-bit
atomic operations are supported.
3. Add a macro, USE_64B_ATOMICS_ON_SEM_T. Define to 1 if 64-bit atomic
operations are supported and SEM_T_ALIGN is at least 8-byte aligned or
HAVE_UNALIGNED_64B_ATOMICS is defined.
4. Assert that size and alignment of sem_t are not lower than those of
the internal struct new_sem.
5. Check USE_64B_ATOMICS_ON_SEM_T, instead of USE_64B_ATOMICS, when using
64-bit atomic operations on sem_t.
This fixes BZ #33632.
Reviewed-by: Wilco Dijkstra <Wilco.Dijkstra@arm.com>
/* Define if SFrame v2 is enabled. */
#define ENABLE_SFRAME 0
+/* Define if unaligned 64-bit atomic operations are supported. */
+#undef HAVE_UNALIGNED_64B_ATOMICS
+
/* The default value of x86 CET control. */
#define DEFAULT_DL_X86_CET_CONTROL cet_elf_property
generated += abi-tag.h
# Put it here to generate it earlier.
-gen-as-const-headers += rtld-sizes.sym
+gen-as-const-headers += rtld-sizes.sym sem_t-align.sym
# These are the special initializer/finalizer files. They are always the
# first and last file in the link. crti.o ... crtn.o define the global
--- /dev/null
+#include <semaphore.h>
+
+--
+SEM_T_ALIGN __alignof (sem_t)
#endif
#include <tls.h>
+#include <semaphore.h>
+#include <atomic-sem_t.h>
/* Thread state. */
enum pthread_state
See nptl implementation for the details. */
struct new_sem
{
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
/* The data field holds both value (in the least-significant 32 bits) and
nwaiters. */
# if __BYTE_ORDER == __LITTLE_ENDIAN
#endif
};
+_Static_assert (sizeof (sem_t) >= sizeof (struct new_sem),
+ "sizeof (sem_t) >= sizeof (struct new_sem)");
+
+_Static_assert (__alignof (sem_t) >= __alignof (struct new_sem),
+ "__alignof (sem_t) >= __alignof (struct new_sem)");
+
extern int __sem_waitfast (struct new_sem *isem, int definitive_result);
#endif /* pt-internal.h */
necessary, use a stronger MO here and elsewhere (e.g., potentially
release MO in all places where we consume a token). */
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
*sval = atomic_load_relaxed (&isem->data) & SEM_VALUE_MASK;
#else
*sval = atomic_load_relaxed (&isem->value) >> SEM_VALUE_SHIFT;
struct new_sem *isem = (struct new_sem *) sem;
/* Use the values the caller provided. */
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
isem->data = value;
#else
isem->value = value << SEM_VALUE_SHIFT;
struct new_sem *isem = (struct new_sem *) sem;
int private = isem->private;
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
/* Add a token to the semaphore. We use release MO to make sure that a
thread acquiring this token synchronizes with us and other threads that
added tokens before (the release sequence includes atomic RMW operations
requirement because the semaphore must not be destructed while any sem_wait
is still executing. */
-#if !USE_64B_ATOMICS
+#if !USE_64B_ATOMICS_ON_SEM_T
static void
__sem_wait_32_finish (struct new_sem *sem);
#endif
{
struct new_sem *sem = (struct new_sem *) arg;
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
/* Stop being registered as a waiter. See below for MO. */
atomic_fetch_add_relaxed (&sem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
#else
{
int err;
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
err = __futex_abstimed_wait_cancelable64 (
(unsigned int *) &sem->data + SEM_VALUE_OFFSET, 0,
clockid, abstime,
synchronize memory); thus, relaxed MO is sufficient for the initial load
and the failure path of the CAS. If the weak CAS fails and we need a
definitive result, retry. */
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
uint64_t d = atomic_load_relaxed (&sem->data);
do
{
{
int err = 0;
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
/* Add a waiter. Relaxed MO is sufficient because we can rely on the
ordering provided by the RMW operations we use. */
uint64_t d = atomic_fetch_add_relaxed (&sem->data,
/* If there is no token, wait. */
if ((v >> SEM_VALUE_SHIFT) == 0)
{
- /* See USE_64B_ATOMICS variant. */
+ /* See USE_64B_ATOMICS_ON_SEM_T variant. */
err = do_futex_wait (sem, clockid, abstime);
if (err == ETIMEDOUT || err == EINTR)
{
}
/* Stop being a registered waiter (non-64b-atomics code only). */
-#if !USE_64B_ATOMICS
+#if !USE_64B_ATOMICS_ON_SEM_T
static void
__sem_wait_32_finish (struct new_sem *sem)
{
static inline void __new_sem_open_init (struct new_sem *sem, unsigned value)
{
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
sem->data = value;
#else
sem->value = value << SEM_VALUE_SHIFT;
puts ("sem_init failed");
return 1;
}
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
if ((u.ns.data >> SEM_NWAITERS_SHIFT) != 0)
#else
if (u.ns.nwaiters != 0)
goto again;
}
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
if ((u.ns.data >> SEM_NWAITERS_SHIFT) != 0)
#else
if (u.ns.nwaiters != 0)
TEST_VERIFY_EXIT (waitfn (&u.s, &ts) < 0);
TEST_COMPARE (errno, EINVAL);
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
unsigned int nwaiters = (u.ns.data >> SEM_NWAITERS_SHIFT);
#else
unsigned int nwaiters = u.ns.nwaiters;
errno = 0;
TEST_VERIFY_EXIT (waitfn (&u.s, &ts) < 0);
TEST_COMPARE (errno, ETIMEDOUT);
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
nwaiters = (u.ns.data >> SEM_NWAITERS_SHIFT);
#else
nwaiters = u.ns.nwaiters;
--- /dev/null
+/* Copyright (C) 2025 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, see
+ <https://www.gnu.org/licenses/>. */
+
+#include <atomic-machine.h>
+#include <sem_t-align.h>
+
+#if USE_64B_ATOMICS && (SEM_T_ALIGN >= 8 \
+ || defined HAVE_UNALIGNED_64B_ATOMICS)
+# define USE_64B_ATOMICS_ON_SEM_T 1
+#else
+# define USE_64B_ATOMICS_ON_SEM_T 0
+#endif
{
struct new_sem *isem = (struct new_sem *) sem;
if (
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
atomic_load_relaxed (&isem->data) >> SEM_NWAITERS_SHIFT
#else
atomic_load_relaxed (&isem->value) & SEM_NWAITERS_MASK
{
struct new_sem *isem = (struct new_sem *) sem;
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
*value = atomic_load_relaxed (&isem->data) & SEM_VALUE_MASK;
#else
*value = atomic_load_relaxed (&isem->value) >> SEM_VALUE_SHIFT;
struct new_sem *isem = (struct new_sem *) sem;
int flags = isem->pshared ? GSYNC_SHARED : 0;
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
uint64_t d = atomic_load_relaxed (&isem->data);
do
#include <pt-internal.h>
#include <shlib-compat.h>
-#if !USE_64B_ATOMICS
+#if !USE_64B_ATOMICS_ON_SEM_T
static void
__sem_wait_32_finish (struct new_sem *isem);
#endif
{
struct new_sem *isem = arg;
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
atomic_fetch_add_relaxed (&isem->data, -((uint64_t) 1 << SEM_NWAITERS_SHIFT));
#else
__sem_wait_32_finish (isem);
int cancel_oldtype = LIBC_CANCEL_ASYNC();
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
uint64_t d = atomic_fetch_add_relaxed (&isem->data,
(uint64_t) 1 << SEM_NWAITERS_SHIFT);
return ret;
}
-#if !USE_64B_ATOMICS
+#if !USE_64B_ATOMICS_ON_SEM_T
/* Stop being a registered waiter (non-64b-atomics code only). */
static void
__sem_wait_32_finish (struct new_sem *isem)
int
__sem_waitfast (struct new_sem *isem, int definitive_result)
{
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
uint64_t d = atomic_load_relaxed (&isem->data);
do
#include <stdint.h>
#include <atomic.h>
#include <endian.h>
+#include <semaphore.h>
+#include <atomic-sem_t.h>
struct pthread_attr
/* Semaphore variable structure. */
struct new_sem
{
-#if USE_64B_ATOMICS
+#if USE_64B_ATOMICS_ON_SEM_T
/* The data field holds both value (in the least-significant 32 bits) and
nwaiters. */
# if __BYTE_ORDER == __LITTLE_ENDIAN
#endif
};
+_Static_assert (sizeof (sem_t) >= sizeof (struct new_sem),
+ "sizeof (sem_t) >= sizeof (struct new_sem)");
+
+_Static_assert (__alignof (sem_t) >= __alignof (struct new_sem),
+ "__alignof (sem_t) >= __alignof (struct new_sem)");
+
struct old_sem
{
unsigned int value;