__nptl_free_tcb (struct pthread *pd)
{
/* The thread is exiting now. */
- if (atomic_bit_test_set (&pd->cancelhandling, TERMINATED_BIT) == 0)
+ if ((atomic_fetch_or_relaxed (&pd->cancelhandling, TERMINATED_BITMASK)
+ & TERMINATED_BITMASK) == 0)
{
/* Free TPP data. */
if (pd->tpp != NULL)
/* The thread is exiting now. Don't set this bit until after we've hit
the event-reporting breakpoint, so that td_thr_get_info on us while at
the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
- atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
+ atomic_fetch_or_relaxed (&pd->cancelhandling, EXITING_BITMASK);
if (__glibc_unlikely (atomic_decrement_and_test (&__nptl_nthreads)))
/* This was the last thread. */
atomic_compare_and_exchange_val_acq (&(descr)->member, new, old)
#endif
-#ifndef THREAD_ATOMIC_BIT_SET
-# define THREAD_ATOMIC_BIT_SET(descr, member, bit) \
- atomic_bit_set (&(descr)->member, bit)
-#endif
-
-
static inline short max_adaptive_count (void)
{
#if HAVE_TUNABLES
struct pthread *self = THREAD_SELF;
/* Make sure we get no more cancellations. */
- atomic_bit_set (&self->cancelhandling, EXITING_BIT);
+ atomic_fetch_or_relaxed (&self->cancelhandling, EXITING_BITMASK);
__pthread_unwind ((__pthread_unwind_buf_t *)
THREAD_GETMEM (self, cleanup_jmp_buf));