]> git.ipfire.org Git - thirdparty/glibc.git/blob - nptl/pthread_mutex_timedlock.c
Update copyright dates with scripts/update-copyrights
[thirdparty/glibc.git] / nptl / pthread_mutex_timedlock.c
1 /* Copyright (C) 2002-2021 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <time.h>
22 #include <sys/param.h>
23 #include <sys/time.h>
24 #include "pthreadP.h"
25 #include <atomic.h>
26 #include <lowlevellock.h>
27 #include <not-cancel.h>
28 #include <futex-internal.h>
29
30 #include <stap-probe.h>
31
32 #ifndef lll_clocklock_elision
33 #define lll_clocklock_elision(futex, adapt_count, clockid, abstime, private) \
34 __futex_clocklock64 (&(futex), clockid, abstime, private)
35 #endif
36
37 #ifndef lll_trylock_elision
38 #define lll_trylock_elision(a,t) lll_trylock(a)
39 #endif
40
41 #ifndef FORCE_ELISION
42 #define FORCE_ELISION(m, s)
43 #endif
44
45 int
46 __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
47 clockid_t clockid,
48 const struct __timespec64 *abstime)
49 {
50 int oldval;
51 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
52 int result = 0;
53
54 /* We must not check ABSTIME here. If the thread does not block
55 abstime must not be checked for a valid value. */
56
57 /* See concurrency notes regarding mutex type which is loaded from __kind
58 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
59 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
60 PTHREAD_MUTEX_TIMED_NP))
61 {
62 /* Recursive mutex. */
63 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
64 case PTHREAD_MUTEX_RECURSIVE_NP:
65 /* Check whether we already hold the mutex. */
66 if (mutex->__data.__owner == id)
67 {
68 /* Just bump the counter. */
69 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
70 /* Overflow of the counter. */
71 return EAGAIN;
72
73 ++mutex->__data.__count;
74
75 goto out;
76 }
77
78 /* We have to get the mutex. */
79 result = __futex_clocklock64 (&mutex->__data.__lock, clockid, abstime,
80 PTHREAD_MUTEX_PSHARED (mutex));
81
82 if (result != 0)
83 goto out;
84
85 /* Only locked once so far. */
86 mutex->__data.__count = 1;
87 break;
88
89 /* Error checking mutex. */
90 case PTHREAD_MUTEX_ERRORCHECK_NP:
91 /* Check whether we already hold the mutex. */
92 if (__glibc_unlikely (mutex->__data.__owner == id))
93 return EDEADLK;
94
95 /* Don't do lock elision on an error checking mutex. */
96 goto simple;
97
98 case PTHREAD_MUTEX_TIMED_NP:
99 FORCE_ELISION (mutex, goto elision);
100 simple:
101 /* Normal mutex. */
102 result = __futex_clocklock64 (&mutex->__data.__lock, clockid, abstime,
103 PTHREAD_MUTEX_PSHARED (mutex));
104 break;
105
106 case PTHREAD_MUTEX_TIMED_ELISION_NP:
107 elision: __attribute__((unused))
108 /* Don't record ownership */
109 return lll_clocklock_elision (mutex->__data.__lock,
110 mutex->__data.__spins,
111 clockid, abstime,
112 PTHREAD_MUTEX_PSHARED (mutex));
113
114
115 case PTHREAD_MUTEX_ADAPTIVE_NP:
116 if (lll_trylock (mutex->__data.__lock) != 0)
117 {
118 int cnt = 0;
119 int max_cnt = MIN (max_adaptive_count (),
120 mutex->__data.__spins * 2 + 10);
121 do
122 {
123 if (cnt++ >= max_cnt)
124 {
125 result = __futex_clocklock64 (&mutex->__data.__lock,
126 clockid, abstime,
127 PTHREAD_MUTEX_PSHARED (mutex));
128 break;
129 }
130 atomic_spin_nop ();
131 }
132 while (lll_trylock (mutex->__data.__lock) != 0);
133
134 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
135 }
136 break;
137
138 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
139 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
140 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
141 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
142 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
143 &mutex->__data.__list.__next);
144 /* We need to set op_pending before starting the operation. Also
145 see comments at ENQUEUE_MUTEX. */
146 __asm ("" ::: "memory");
147
148 oldval = mutex->__data.__lock;
149 /* This is set to FUTEX_WAITERS iff we might have shared the
150 FUTEX_WAITERS flag with other threads, and therefore need to keep it
151 set to avoid lost wake-ups. We have the same requirement in the
152 simple mutex algorithm. */
153 unsigned int assume_other_futex_waiters = 0;
154 while (1)
155 {
156 /* Try to acquire the lock through a CAS from 0 (not acquired) to
157 our TID | assume_other_futex_waiters. */
158 if (__glibc_likely (oldval == 0))
159 {
160 oldval
161 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
162 id | assume_other_futex_waiters, 0);
163 if (__glibc_likely (oldval == 0))
164 break;
165 }
166
167 if ((oldval & FUTEX_OWNER_DIED) != 0)
168 {
169 /* The previous owner died. Try locking the mutex. */
170 int newval = id | (oldval & FUTEX_WAITERS)
171 | assume_other_futex_waiters;
172
173 newval
174 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
175 newval, oldval);
176 if (newval != oldval)
177 {
178 oldval = newval;
179 continue;
180 }
181
182 /* We got the mutex. */
183 mutex->__data.__count = 1;
184 /* But it is inconsistent unless marked otherwise. */
185 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
186
187 /* We must not enqueue the mutex before we have acquired it.
188 Also see comments at ENQUEUE_MUTEX. */
189 __asm ("" ::: "memory");
190 ENQUEUE_MUTEX (mutex);
191 /* We need to clear op_pending after we enqueue the mutex. */
192 __asm ("" ::: "memory");
193 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
194
195 /* Note that we deliberately exit here. If we fall
196 through to the end of the function __nusers would be
197 incremented which is not correct because the old
198 owner has to be discounted. */
199 return EOWNERDEAD;
200 }
201
202 /* Check whether we already hold the mutex. */
203 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
204 {
205 int kind = PTHREAD_MUTEX_TYPE (mutex);
206 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
207 {
208 /* We do not need to ensure ordering wrt another memory
209 access. Also see comments at ENQUEUE_MUTEX. */
210 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
211 NULL);
212 return EDEADLK;
213 }
214
215 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
216 {
217 /* We do not need to ensure ordering wrt another memory
218 access. */
219 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
220 NULL);
221
222 /* Just bump the counter. */
223 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
224 /* Overflow of the counter. */
225 return EAGAIN;
226
227 ++mutex->__data.__count;
228
229 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
230
231 return 0;
232 }
233 }
234
235 /* We are about to block; check whether the timeout is invalid. */
236 if (! valid_nanoseconds (abstime->tv_nsec))
237 return EINVAL;
238 /* Work around the fact that the kernel rejects negative timeout
239 values despite them being valid. */
240 if (__glibc_unlikely (abstime->tv_sec < 0))
241 return ETIMEDOUT;
242
243 /* We cannot acquire the mutex nor has its owner died. Thus, try
244 to block using futexes. Set FUTEX_WAITERS if necessary so that
245 other threads are aware that there are potentially threads
246 blocked on the futex. Restart if oldval changed in the
247 meantime. */
248 if ((oldval & FUTEX_WAITERS) == 0)
249 {
250 if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
251 oldval | FUTEX_WAITERS,
252 oldval)
253 != 0)
254 {
255 oldval = mutex->__data.__lock;
256 continue;
257 }
258 oldval |= FUTEX_WAITERS;
259 }
260
261 /* It is now possible that we share the FUTEX_WAITERS flag with
262 another thread; therefore, update assume_other_futex_waiters so
263 that we do not forget about this when handling other cases
264 above and thus do not cause lost wake-ups. */
265 assume_other_futex_waiters |= FUTEX_WAITERS;
266
267 /* Block using the futex. */
268 int err = __futex_abstimed_wait64 (
269 (unsigned int *) &mutex->__data.__lock,
270 oldval, clockid, abstime,
271 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
272 /* The futex call timed out. */
273 if (err == ETIMEDOUT || err == EOVERFLOW)
274 return err;
275 /* Reload current lock value. */
276 oldval = mutex->__data.__lock;
277 }
278
279 /* We have acquired the mutex; check if it is still consistent. */
280 if (__builtin_expect (mutex->__data.__owner
281 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
282 {
283 /* This mutex is now not recoverable. */
284 mutex->__data.__count = 0;
285 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
286 lll_unlock (mutex->__data.__lock, private);
287 /* FIXME This violates the mutex destruction requirements. See
288 __pthread_mutex_unlock_full. */
289 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
290 return ENOTRECOVERABLE;
291 }
292
293 mutex->__data.__count = 1;
294 /* We must not enqueue the mutex before we have acquired it.
295 Also see comments at ENQUEUE_MUTEX. */
296 __asm ("" ::: "memory");
297 ENQUEUE_MUTEX (mutex);
298 /* We need to clear op_pending after we enqueue the mutex. */
299 __asm ("" ::: "memory");
300 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
301 break;
302
303 /* The PI support requires the Linux futex system call. If that's not
304 available, pthread_mutex_init should never have allowed the type to
305 be set. So it will get the default case for an invalid type. */
306 #ifdef __NR_futex
307 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
308 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
309 case PTHREAD_MUTEX_PI_NORMAL_NP:
310 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
311 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
312 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
313 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
314 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
315 {
316 /* Currently futex FUTEX_LOCK_PI operation only provides support for
317 CLOCK_REALTIME and trying to emulate by converting a
318 CLOCK_MONOTONIC to CLOCK_REALTIME will take in account possible
319 changes to the wall clock. */
320 if (__glibc_unlikely (clockid != CLOCK_REALTIME))
321 return EINVAL;
322
323 int kind, robust;
324 {
325 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
326 in sysdeps/nptl/bits/thread-shared-types.h. */
327 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
328 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
329 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
330 }
331
332 if (robust)
333 {
334 /* Note: robust PI futexes are signaled by setting bit 0. */
335 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
336 (void *) (((uintptr_t) &mutex->__data.__list.__next)
337 | 1));
338 /* We need to set op_pending before starting the operation. Also
339 see comments at ENQUEUE_MUTEX. */
340 __asm ("" ::: "memory");
341 }
342
343 oldval = mutex->__data.__lock;
344
345 /* Check whether we already hold the mutex. */
346 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
347 {
348 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
349 {
350 /* We do not need to ensure ordering wrt another memory
351 access. */
352 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
353 return EDEADLK;
354 }
355
356 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
357 {
358 /* We do not need to ensure ordering wrt another memory
359 access. */
360 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
361
362 /* Just bump the counter. */
363 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
364 /* Overflow of the counter. */
365 return EAGAIN;
366
367 ++mutex->__data.__count;
368
369 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
370
371 return 0;
372 }
373 }
374
375 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
376 id, 0);
377
378 if (oldval != 0)
379 {
380 /* The mutex is locked. The kernel will now take care of
381 everything. The timeout value must be a relative value.
382 Convert it. */
383 int private = (robust
384 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
385 : PTHREAD_MUTEX_PSHARED (mutex));
386 int e = futex_lock_pi64 (&mutex->__data.__lock, abstime, private);
387 if (e == ETIMEDOUT)
388 return ETIMEDOUT;
389 else if (e == ESRCH || e == EDEADLK)
390 {
391 assert (e != EDEADLK
392 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
393 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
394 /* ESRCH can happen only for non-robust PI mutexes where
395 the owner of the lock died. */
396 assert (e != ESRCH || !robust);
397
398 /* Delay the thread until the timeout is reached. Then return
399 ETIMEDOUT. */
400 do
401 e = __futex_abstimed_wait64 (&(unsigned int){0}, 0, clockid,
402 abstime, private);
403 while (e != ETIMEDOUT);
404 return ETIMEDOUT;
405 }
406 else if (e != 0)
407 return e;
408
409 oldval = mutex->__data.__lock;
410
411 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
412 }
413
414 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
415 {
416 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
417
418 /* We got the mutex. */
419 mutex->__data.__count = 1;
420 /* But it is inconsistent unless marked otherwise. */
421 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
422
423 /* We must not enqueue the mutex before we have acquired it.
424 Also see comments at ENQUEUE_MUTEX. */
425 __asm ("" ::: "memory");
426 ENQUEUE_MUTEX_PI (mutex);
427 /* We need to clear op_pending after we enqueue the mutex. */
428 __asm ("" ::: "memory");
429 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
430
431 /* Note that we deliberately exit here. If we fall
432 through to the end of the function __nusers would be
433 incremented which is not correct because the old owner
434 has to be discounted. */
435 return EOWNERDEAD;
436 }
437
438 if (robust
439 && __builtin_expect (mutex->__data.__owner
440 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
441 {
442 /* This mutex is now not recoverable. */
443 mutex->__data.__count = 0;
444
445 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
446 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
447
448 /* To the kernel, this will be visible after the kernel has
449 acquired the mutex in the syscall. */
450 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
451 return ENOTRECOVERABLE;
452 }
453
454 mutex->__data.__count = 1;
455 if (robust)
456 {
457 /* We must not enqueue the mutex before we have acquired it.
458 Also see comments at ENQUEUE_MUTEX. */
459 __asm ("" ::: "memory");
460 ENQUEUE_MUTEX_PI (mutex);
461 /* We need to clear op_pending after we enqueue the mutex. */
462 __asm ("" ::: "memory");
463 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
464 }
465 }
466 break;
467 #endif /* __NR_futex. */
468
469 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
470 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
471 case PTHREAD_MUTEX_PP_NORMAL_NP:
472 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
473 {
474 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
475 in sysdeps/nptl/bits/thread-shared-types.h. */
476 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
477 & PTHREAD_MUTEX_KIND_MASK_NP;
478
479 oldval = mutex->__data.__lock;
480
481 /* Check whether we already hold the mutex. */
482 if (mutex->__data.__owner == id)
483 {
484 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
485 return EDEADLK;
486
487 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
488 {
489 /* Just bump the counter. */
490 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
491 /* Overflow of the counter. */
492 return EAGAIN;
493
494 ++mutex->__data.__count;
495
496 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
497
498 return 0;
499 }
500 }
501
502 int oldprio = -1, ceilval;
503 do
504 {
505 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
506 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
507
508 if (__pthread_current_priority () > ceiling)
509 {
510 result = EINVAL;
511 failpp:
512 if (oldprio != -1)
513 __pthread_tpp_change_priority (oldprio, -1);
514 return result;
515 }
516
517 result = __pthread_tpp_change_priority (oldprio, ceiling);
518 if (result)
519 return result;
520
521 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
522 oldprio = ceiling;
523
524 oldval
525 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
526 ceilval | 1, ceilval);
527
528 if (oldval == ceilval)
529 break;
530
531 do
532 {
533 oldval
534 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
535 ceilval | 2,
536 ceilval | 1);
537
538 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
539 break;
540
541 if (oldval != ceilval)
542 {
543 /* Reject invalid timeouts. */
544 if (! valid_nanoseconds (abstime->tv_nsec))
545 {
546 result = EINVAL;
547 goto failpp;
548 }
549
550 int e = __futex_abstimed_wait64 (
551 (unsigned int *) &mutex->__data.__lock, ceilval | 2,
552 clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex));
553 if (e == ETIMEDOUT || e == EOVERFLOW)
554 return e;
555 }
556 }
557 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
558 ceilval | 2, ceilval)
559 != ceilval);
560 }
561 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
562
563 assert (mutex->__data.__owner == 0);
564 mutex->__data.__count = 1;
565 }
566 break;
567
568 default:
569 /* Correct code cannot set any other type. */
570 return EINVAL;
571 }
572
573 if (result == 0)
574 {
575 /* Record the ownership. */
576 mutex->__data.__owner = id;
577 ++mutex->__data.__nusers;
578
579 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
580 }
581
582 out:
583 return result;
584 }
585
586 int
587 __pthread_mutex_clocklock64 (pthread_mutex_t *mutex,
588 clockid_t clockid,
589 const struct __timespec64 *abstime)
590 {
591 if (__glibc_unlikely (!futex_abstimed_supported_clockid (clockid)))
592 return EINVAL;
593
594 LIBC_PROBE (mutex_clocklock_entry, 3, mutex, clockid, abstime);
595 return __pthread_mutex_clocklock_common (mutex, clockid, abstime);
596 }
597
598 #if __TIMESIZE != 64
599 libpthread_hidden_def (__pthread_mutex_clocklock64)
600
601 int
602 __pthread_mutex_clocklock (pthread_mutex_t *mutex,
603 clockid_t clockid,
604 const struct timespec *abstime)
605 {
606 struct __timespec64 ts64 = valid_timespec_to_timespec64 (*abstime);
607
608 return __pthread_mutex_clocklock64 (mutex, clockid, &ts64);
609 }
610 #endif
611 weak_alias (__pthread_mutex_clocklock, pthread_mutex_clocklock)
612
613 int
614 __pthread_mutex_timedlock64 (pthread_mutex_t *mutex,
615 const struct __timespec64 *abstime)
616 {
617 LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
618 return __pthread_mutex_clocklock_common (mutex, CLOCK_REALTIME, abstime);
619 }
620
621 #if __TIMESIZE != 64
622 libpthread_hidden_def (__pthread_mutex_timedlock64)
623
624 int
625 __pthread_mutex_timedlock (pthread_mutex_t *mutex,
626 const struct timespec *abstime)
627 {
628 struct __timespec64 ts64 = valid_timespec_to_timespec64 (*abstime);
629
630 return __pthread_mutex_timedlock64 (mutex, &ts64);
631 }
632 #endif
633 weak_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)