if (mutex->__data.__owner == id)
{
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
}
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
{
int kind = PTHREAD_MUTEX_TYPE (mutex);
if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
NULL);
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
oldval = mutex->__data.__lock;
/* Check whether we already hold the mutex. */
- if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
{
if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
{
THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;
oldval = mutex->__data.__lock;
}
- if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
+ if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
{
atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
{
/* Just bump the counter. */
- if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
/* Overflow of the counter. */
return EAGAIN;