1 /* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
22 #include <sys/param.h>
26 #include <lowlevellock.h>
27 #include <not-cancel.h>
28 #include <futex-internal.h>
30 #include <stap-probe.h>
32 #ifndef lll_clocklock_elision
33 #define lll_clocklock_elision(futex, adapt_count, clockid, abstime, private) \
34 __futex_clocklock64 (&(futex), clockid, abstime, private)
37 #ifndef lll_trylock_elision
38 #define lll_trylock_elision(a,t) lll_trylock(a)
42 #define FORCE_ELISION(m, s)
46 __pthread_mutex_clocklock_common (pthread_mutex_t
*mutex
,
48 const struct __timespec64
*abstime
)
51 pid_t id
= THREAD_GETMEM (THREAD_SELF
, tid
);
54 /* We must not check ABSTIME here. If the thread does not block
55 abstime must not be checked for a valid value. */
57 /* See concurrency notes regarding mutex type which is loaded from __kind
58 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
59 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex
),
60 PTHREAD_MUTEX_TIMED_NP
))
62 /* Recursive mutex. */
63 case PTHREAD_MUTEX_RECURSIVE_NP
|PTHREAD_MUTEX_ELISION_NP
:
64 case PTHREAD_MUTEX_RECURSIVE_NP
:
65 /* Check whether we already hold the mutex. */
66 if (mutex
->__data
.__owner
== id
)
68 /* Just bump the counter. */
69 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
70 /* Overflow of the counter. */
73 ++mutex
->__data
.__count
;
78 /* We have to get the mutex. */
79 result
= __futex_clocklock64 (&mutex
->__data
.__lock
, clockid
, abstime
,
80 PTHREAD_MUTEX_PSHARED (mutex
));
85 /* Only locked once so far. */
86 mutex
->__data
.__count
= 1;
89 /* Error checking mutex. */
90 case PTHREAD_MUTEX_ERRORCHECK_NP
:
91 /* Check whether we already hold the mutex. */
92 if (__glibc_unlikely (mutex
->__data
.__owner
== id
))
95 /* Don't do lock elision on an error checking mutex. */
98 case PTHREAD_MUTEX_TIMED_NP
:
99 FORCE_ELISION (mutex
, goto elision
);
102 result
= __futex_clocklock64 (&mutex
->__data
.__lock
, clockid
, abstime
,
103 PTHREAD_MUTEX_PSHARED (mutex
));
106 case PTHREAD_MUTEX_TIMED_ELISION_NP
:
107 elision
: __attribute__((unused
))
108 /* Don't record ownership */
109 return lll_clocklock_elision (mutex
->__data
.__lock
,
110 mutex
->__data
.__spins
,
112 PTHREAD_MUTEX_PSHARED (mutex
));
115 case PTHREAD_MUTEX_ADAPTIVE_NP
:
116 if (lll_trylock (mutex
->__data
.__lock
) != 0)
119 int max_cnt
= MIN (max_adaptive_count (),
120 mutex
->__data
.__spins
* 2 + 10);
123 if (cnt
++ >= max_cnt
)
125 result
= __futex_clocklock64 (&mutex
->__data
.__lock
,
127 PTHREAD_MUTEX_PSHARED (mutex
));
132 while (lll_trylock (mutex
->__data
.__lock
) != 0);
134 mutex
->__data
.__spins
+= (cnt
- mutex
->__data
.__spins
) / 8;
138 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
:
139 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
:
140 case PTHREAD_MUTEX_ROBUST_NORMAL_NP
:
141 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP
:
142 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
143 &mutex
->__data
.__list
.__next
);
144 /* We need to set op_pending before starting the operation. Also
145 see comments at ENQUEUE_MUTEX. */
146 __asm ("" ::: "memory");
148 oldval
= mutex
->__data
.__lock
;
149 /* This is set to FUTEX_WAITERS iff we might have shared the
150 FUTEX_WAITERS flag with other threads, and therefore need to keep it
151 set to avoid lost wake-ups. We have the same requirement in the
152 simple mutex algorithm. */
153 unsigned int assume_other_futex_waiters
= 0;
156 /* Try to acquire the lock through a CAS from 0 (not acquired) to
157 our TID | assume_other_futex_waiters. */
158 if (__glibc_likely (oldval
== 0))
161 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
162 id
| assume_other_futex_waiters
, 0);
163 if (__glibc_likely (oldval
== 0))
167 if ((oldval
& FUTEX_OWNER_DIED
) != 0)
169 /* The previous owner died. Try locking the mutex. */
170 int newval
= id
| (oldval
& FUTEX_WAITERS
)
171 | assume_other_futex_waiters
;
174 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
176 if (newval
!= oldval
)
182 /* We got the mutex. */
183 mutex
->__data
.__count
= 1;
184 /* But it is inconsistent unless marked otherwise. */
185 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
187 /* We must not enqueue the mutex before we have acquired it.
188 Also see comments at ENQUEUE_MUTEX. */
189 __asm ("" ::: "memory");
190 ENQUEUE_MUTEX (mutex
);
191 /* We need to clear op_pending after we enqueue the mutex. */
192 __asm ("" ::: "memory");
193 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
195 /* Note that we deliberately exit here. If we fall
196 through to the end of the function __nusers would be
197 incremented which is not correct because the old
198 owner has to be discounted. */
202 /* Check whether we already hold the mutex. */
203 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
205 int kind
= PTHREAD_MUTEX_TYPE (mutex
);
206 if (kind
== PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP
)
208 /* We do not need to ensure ordering wrt another memory
209 access. Also see comments at ENQUEUE_MUTEX. */
210 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
215 if (kind
== PTHREAD_MUTEX_ROBUST_RECURSIVE_NP
)
217 /* We do not need to ensure ordering wrt another memory
219 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
222 /* Just bump the counter. */
223 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
224 /* Overflow of the counter. */
227 ++mutex
->__data
.__count
;
229 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
235 /* We are about to block; check whether the timeout is invalid. */
236 if (! valid_nanoseconds (abstime
->tv_nsec
))
238 /* Work around the fact that the kernel rejects negative timeout
239 values despite them being valid. */
240 if (__glibc_unlikely (abstime
->tv_sec
< 0))
243 /* We cannot acquire the mutex nor has its owner died. Thus, try
244 to block using futexes. Set FUTEX_WAITERS if necessary so that
245 other threads are aware that there are potentially threads
246 blocked on the futex. Restart if oldval changed in the
248 if ((oldval
& FUTEX_WAITERS
) == 0)
250 if (atomic_compare_and_exchange_bool_acq (&mutex
->__data
.__lock
,
251 oldval
| FUTEX_WAITERS
,
255 oldval
= mutex
->__data
.__lock
;
258 oldval
|= FUTEX_WAITERS
;
261 /* It is now possible that we share the FUTEX_WAITERS flag with
262 another thread; therefore, update assume_other_futex_waiters so
263 that we do not forget about this when handling other cases
264 above and thus do not cause lost wake-ups. */
265 assume_other_futex_waiters
|= FUTEX_WAITERS
;
267 /* Block using the futex. */
268 int err
= __futex_abstimed_wait64 (
269 (unsigned int *) &mutex
->__data
.__lock
,
270 oldval
, clockid
, abstime
,
271 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
272 /* The futex call timed out. */
273 if (err
== ETIMEDOUT
|| err
== EOVERFLOW
)
275 /* Reload current lock value. */
276 oldval
= mutex
->__data
.__lock
;
279 /* We have acquired the mutex; check if it is still consistent. */
280 if (__builtin_expect (mutex
->__data
.__owner
281 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
283 /* This mutex is now not recoverable. */
284 mutex
->__data
.__count
= 0;
285 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex
);
286 lll_unlock (mutex
->__data
.__lock
, private);
287 /* FIXME This violates the mutex destruction requirements. See
288 __pthread_mutex_unlock_full. */
289 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
290 return ENOTRECOVERABLE
;
293 mutex
->__data
.__count
= 1;
294 /* We must not enqueue the mutex before we have acquired it.
295 Also see comments at ENQUEUE_MUTEX. */
296 __asm ("" ::: "memory");
297 ENQUEUE_MUTEX (mutex
);
298 /* We need to clear op_pending after we enqueue the mutex. */
299 __asm ("" ::: "memory");
300 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
303 /* The PI support requires the Linux futex system call. If that's not
304 available, pthread_mutex_init should never have allowed the type to
305 be set. So it will get the default case for an invalid type. */
307 case PTHREAD_MUTEX_PI_RECURSIVE_NP
:
308 case PTHREAD_MUTEX_PI_ERRORCHECK_NP
:
309 case PTHREAD_MUTEX_PI_NORMAL_NP
:
310 case PTHREAD_MUTEX_PI_ADAPTIVE_NP
:
311 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP
:
312 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP
:
313 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP
:
314 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP
:
316 /* Currently futex FUTEX_LOCK_PI operation only provides support for
317 CLOCK_REALTIME and trying to emulate by converting a
318 CLOCK_MONOTONIC to CLOCK_REALTIME will take in account possible
319 changes to the wall clock. */
320 if (__glibc_unlikely (clockid
!= CLOCK_REALTIME
))
325 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
326 in sysdeps/nptl/bits/thread-shared-types.h. */
327 int mutex_kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
));
328 kind
= mutex_kind
& PTHREAD_MUTEX_KIND_MASK_NP
;
329 robust
= mutex_kind
& PTHREAD_MUTEX_ROBUST_NORMAL_NP
;
334 /* Note: robust PI futexes are signaled by setting bit 0. */
335 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
,
336 (void *) (((uintptr_t) &mutex
->__data
.__list
.__next
)
338 /* We need to set op_pending before starting the operation. Also
339 see comments at ENQUEUE_MUTEX. */
340 __asm ("" ::: "memory");
343 oldval
= mutex
->__data
.__lock
;
345 /* Check whether we already hold the mutex. */
346 if (__glibc_unlikely ((oldval
& FUTEX_TID_MASK
) == id
))
348 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
350 /* We do not need to ensure ordering wrt another memory
352 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
356 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
358 /* We do not need to ensure ordering wrt another memory
360 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
362 /* Just bump the counter. */
363 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
364 /* Overflow of the counter. */
367 ++mutex
->__data
.__count
;
369 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
375 oldval
= atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
380 /* The mutex is locked. The kernel will now take care of
381 everything. The timeout value must be a relative value.
383 int private = (robust
384 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex
)
385 : PTHREAD_MUTEX_PSHARED (mutex
));
386 int e
= futex_lock_pi64 (&mutex
->__data
.__lock
, abstime
, private);
389 else if (e
== ESRCH
|| e
== EDEADLK
)
392 || (kind
!= PTHREAD_MUTEX_ERRORCHECK_NP
393 && kind
!= PTHREAD_MUTEX_RECURSIVE_NP
));
394 /* ESRCH can happen only for non-robust PI mutexes where
395 the owner of the lock died. */
396 assert (e
!= ESRCH
|| !robust
);
398 /* Delay the thread until the timeout is reached. Then return
401 e
= __futex_abstimed_wait64 (&(unsigned int){0}, 0, clockid
,
403 while (e
!= ETIMEDOUT
);
409 oldval
= mutex
->__data
.__lock
;
411 assert (robust
|| (oldval
& FUTEX_OWNER_DIED
) == 0);
414 if (__glibc_unlikely (oldval
& FUTEX_OWNER_DIED
))
416 atomic_and (&mutex
->__data
.__lock
, ~FUTEX_OWNER_DIED
);
418 /* We got the mutex. */
419 mutex
->__data
.__count
= 1;
420 /* But it is inconsistent unless marked otherwise. */
421 mutex
->__data
.__owner
= PTHREAD_MUTEX_INCONSISTENT
;
423 /* We must not enqueue the mutex before we have acquired it.
424 Also see comments at ENQUEUE_MUTEX. */
425 __asm ("" ::: "memory");
426 ENQUEUE_MUTEX_PI (mutex
);
427 /* We need to clear op_pending after we enqueue the mutex. */
428 __asm ("" ::: "memory");
429 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
431 /* Note that we deliberately exit here. If we fall
432 through to the end of the function __nusers would be
433 incremented which is not correct because the old owner
434 has to be discounted. */
439 && __builtin_expect (mutex
->__data
.__owner
440 == PTHREAD_MUTEX_NOTRECOVERABLE
, 0))
442 /* This mutex is now not recoverable. */
443 mutex
->__data
.__count
= 0;
445 futex_unlock_pi ((unsigned int *) &mutex
->__data
.__lock
,
446 PTHREAD_ROBUST_MUTEX_PSHARED (mutex
));
448 /* To the kernel, this will be visible after the kernel has
449 acquired the mutex in the syscall. */
450 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
451 return ENOTRECOVERABLE
;
454 mutex
->__data
.__count
= 1;
457 /* We must not enqueue the mutex before we have acquired it.
458 Also see comments at ENQUEUE_MUTEX. */
459 __asm ("" ::: "memory");
460 ENQUEUE_MUTEX_PI (mutex
);
461 /* We need to clear op_pending after we enqueue the mutex. */
462 __asm ("" ::: "memory");
463 THREAD_SETMEM (THREAD_SELF
, robust_head
.list_op_pending
, NULL
);
467 #endif /* __NR_futex. */
469 case PTHREAD_MUTEX_PP_RECURSIVE_NP
:
470 case PTHREAD_MUTEX_PP_ERRORCHECK_NP
:
471 case PTHREAD_MUTEX_PP_NORMAL_NP
:
472 case PTHREAD_MUTEX_PP_ADAPTIVE_NP
:
474 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
475 in sysdeps/nptl/bits/thread-shared-types.h. */
476 int kind
= atomic_load_relaxed (&(mutex
->__data
.__kind
))
477 & PTHREAD_MUTEX_KIND_MASK_NP
;
479 oldval
= mutex
->__data
.__lock
;
481 /* Check whether we already hold the mutex. */
482 if (mutex
->__data
.__owner
== id
)
484 if (kind
== PTHREAD_MUTEX_ERRORCHECK_NP
)
487 if (kind
== PTHREAD_MUTEX_RECURSIVE_NP
)
489 /* Just bump the counter. */
490 if (__glibc_unlikely (mutex
->__data
.__count
+ 1 == 0))
491 /* Overflow of the counter. */
494 ++mutex
->__data
.__count
;
496 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
502 int oldprio
= -1, ceilval
;
505 int ceiling
= (oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
)
506 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
508 if (__pthread_current_priority () > ceiling
)
513 __pthread_tpp_change_priority (oldprio
, -1);
517 result
= __pthread_tpp_change_priority (oldprio
, ceiling
);
521 ceilval
= ceiling
<< PTHREAD_MUTEX_PRIO_CEILING_SHIFT
;
525 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
526 ceilval
| 1, ceilval
);
528 if (oldval
== ceilval
)
534 = atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
538 if ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
)
541 if (oldval
!= ceilval
)
543 /* Reject invalid timeouts. */
544 if (! valid_nanoseconds (abstime
->tv_nsec
))
550 int e
= __futex_abstimed_wait64 (
551 (unsigned int *) &mutex
->__data
.__lock
, ceilval
| 2,
552 clockid
, abstime
, PTHREAD_MUTEX_PSHARED (mutex
));
553 if (e
== ETIMEDOUT
|| e
== EOVERFLOW
)
557 while (atomic_compare_and_exchange_val_acq (&mutex
->__data
.__lock
,
558 ceilval
| 2, ceilval
)
561 while ((oldval
& PTHREAD_MUTEX_PRIO_CEILING_MASK
) != ceilval
);
563 assert (mutex
->__data
.__owner
== 0);
564 mutex
->__data
.__count
= 1;
569 /* Correct code cannot set any other type. */
575 /* Record the ownership. */
576 mutex
->__data
.__owner
= id
;
577 ++mutex
->__data
.__nusers
;
579 LIBC_PROBE (mutex_timedlock_acquired
, 1, mutex
);
587 __pthread_mutex_clocklock64 (pthread_mutex_t
*mutex
,
589 const struct __timespec64
*abstime
)
591 if (__glibc_unlikely (!futex_abstimed_supported_clockid (clockid
)))
594 LIBC_PROBE (mutex_clocklock_entry
, 3, mutex
, clockid
, abstime
);
595 return __pthread_mutex_clocklock_common (mutex
, clockid
, abstime
);
599 libpthread_hidden_def (__pthread_mutex_clocklock64
)
602 __pthread_mutex_clocklock (pthread_mutex_t
*mutex
,
604 const struct timespec
*abstime
)
606 struct __timespec64 ts64
= valid_timespec_to_timespec64 (*abstime
);
608 return __pthread_mutex_clocklock64 (mutex
, clockid
, &ts64
);
611 weak_alias (__pthread_mutex_clocklock
, pthread_mutex_clocklock
)
614 __pthread_mutex_timedlock64 (pthread_mutex_t
*mutex
,
615 const struct __timespec64
*abstime
)
617 LIBC_PROBE (mutex_timedlock_entry
, 2, mutex
, abstime
);
618 return __pthread_mutex_clocklock_common (mutex
, CLOCK_REALTIME
, abstime
);
622 libpthread_hidden_def (__pthread_mutex_timedlock64
)
625 __pthread_mutex_timedlock (pthread_mutex_t
*mutex
,
626 const struct timespec
*abstime
)
628 struct __timespec64 ts64
= valid_timespec_to_timespec64 (*abstime
);
630 return __pthread_mutex_timedlock64 (mutex
, &ts64
);
633 weak_alias (__pthread_mutex_timedlock
, pthread_mutex_timedlock
)