int
__lll_lock_elision (int *lock, short *adapt_count, EXTRAARG int pshared)
{
- if (*adapt_count > 0)
+ /* adapt_count is accessed concurrently but is just a hint. Thus,
+ use atomic accesses but relaxed MO is sufficient. */
+ if (atomic_load_relaxed (adapt_count) > 0)
{
goto use_lock;
}
if (_TEXASRU_FAILURE_PERSISTENT (__builtin_get_texasru ()))
{
if (aconf.skip_lock_internal_abort > 0)
- *adapt_count = aconf.skip_lock_internal_abort;
+ atomic_store_relaxed (adapt_count,
+ aconf.skip_lock_internal_abort);
goto use_lock;
}
}
/* Fall back to locks for a bit if retries have been exhausted */
if (aconf.try_tbegin > 0 && aconf.skip_lock_out_of_tbegin_retries > 0)
- *adapt_count = aconf.skip_lock_out_of_tbegin_retries;
+ atomic_store_relaxed (adapt_count,
+ aconf.skip_lock_out_of_tbegin_retries);
use_lock:
return LLL_LOCK ((*lock), pshared);
__libc_tabort (_ABORT_NESTED_TRYLOCK);
/* Only try a transaction if it's worth it. */
- if (*adapt_count > 0)
+ if (atomic_load_relaxed (adapt_count) > 0)
{
goto use_lock;
}
__libc_tend (0);
if (aconf.skip_lock_busy > 0)
- *adapt_count = aconf.skip_lock_busy;
+ atomic_store_relaxed (adapt_count, aconf.skip_lock_busy);
}
else
{
result in another failure. Use normal locking now and
for the next couple of calls. */
if (aconf.skip_trylock_internal_abort > 0)
- *adapt_count = aconf.skip_trylock_internal_abort;
+ atomic_store_relaxed (adapt_count,
+ aconf.skip_trylock_internal_abort);
}
}
__libc_tend (0);
else
{
- lll_unlock ((*lock), pshared);
+ /* Update adapt_count in the critical section to prevent a
+ write-after-destroy error as mentioned in BZ 20822. The
+ following update of adapt_count has to be contained within
+ the critical region of the fall-back lock in order to not violate
+ the mutex destruction requirements. */
+ short __tmp = atomic_load_relaxed (adapt_count);
+ if (__tmp > 0)
+ atomic_store_relaxed (adapt_count, __tmp - 1);
- /* Update the adapt count AFTER completing the critical section.
- Doing this here prevents unneeded stalling when entering
- a critical section. Saving about 8% runtime on P8. */
- if (*adapt_count > 0)
- (*adapt_count)--;
+ lll_unlock ((*lock), pshared);
}
return 0;
}