]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_mutex_timedlock.c
hurd: Fix build
[thirdparty/glibc.git] / nptl / pthread_mutex_timedlock.c
CommitLineData
04277e02 1/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
76a50749 18
1bcfb5a5 19#include <assert.h>
76a50749 20#include <errno.h>
b894c2ea 21#include <time.h>
ca06321d 22#include <sys/param.h>
6af246cf 23#include <sys/time.h>
76a50749 24#include "pthreadP.h"
4eb984d3 25#include <atomic.h>
76a50749 26#include <lowlevellock.h>
b894c2ea 27#include <not-cancel.h>
76a50749 28
5acf7263
RM
29#include <stap-probe.h>
30
59213094
MC
31#ifndef lll_clocklock_elision
32#define lll_clocklock_elision(futex, adapt_count, clockid, abstime, private) \
33 lll_clocklock (futex, clockid, abstime, private)
e8c659d7
AK
34#endif
35
36#ifndef lll_trylock_elision
37#define lll_trylock_elision(a,t) lll_trylock(a)
38#endif
39
40#ifndef FORCE_ELISION
41#define FORCE_ELISION(m, s)
42#endif
76a50749
UD
43
44int
9d20e22e
MC
45__pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
46 clockid_t clockid,
47 const struct timespec *abstime)
76a50749 48{
683040c3 49 int oldval;
61623643 50 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
76a50749
UD
51 int result = 0;
52
53 /* We must not check ABSTIME here. If the thread does not block
54 abstime must not be checked for a valid value. */
55
403b4feb
SL
56 /* See concurrency notes regarding mutex type which is loaded from __kind
57 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
e8c659d7 58 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
ae1ad3ae 59 PTHREAD_MUTEX_TIMED_NP))
76a50749
UD
60 {
61 /* Recursive mutex. */
e8c659d7 62 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
76a50749
UD
63 case PTHREAD_MUTEX_RECURSIVE_NP:
64 /* Check whether we already hold the mutex. */
9a7178d6 65 if (mutex->__data.__owner == id)
76a50749
UD
66 {
67 /* Just bump the counter. */
a1ffb40e 68 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
76a50749
UD
69 /* Overflow of the counter. */
70 return EAGAIN;
71
72 ++mutex->__data.__count;
73
74 goto out;
75 }
76a50749 76
1bcfb5a5 77 /* We have to get the mutex. */
9d20e22e 78 result = lll_clocklock (mutex->__data.__lock, clockid, abstime,
5bd8a249 79 PTHREAD_MUTEX_PSHARED (mutex));
0ecb606c 80
1bcfb5a5
UD
81 if (result != 0)
82 goto out;
83
84 /* Only locked once so far. */
85 mutex->__data.__count = 1;
76a50749
UD
86 break;
87
88 /* Error checking mutex. */
89 case PTHREAD_MUTEX_ERRORCHECK_NP:
90 /* Check whether we already hold the mutex. */
a1ffb40e 91 if (__glibc_unlikely (mutex->__data.__owner == id))
76a50749
UD
92 return EDEADLK;
93
5aded6f2
AS
94 /* Don't do lock elision on an error checking mutex. */
95 goto simple;
76a50749 96
76a50749 97 case PTHREAD_MUTEX_TIMED_NP:
e8c659d7 98 FORCE_ELISION (mutex, goto elision);
2c0b891a 99 simple:
76a50749 100 /* Normal mutex. */
9d20e22e 101 result = lll_clocklock (mutex->__data.__lock, clockid, abstime,
5bd8a249 102 PTHREAD_MUTEX_PSHARED (mutex));
76a50749 103 break;
2c0b891a 104
e8c659d7
AK
105 case PTHREAD_MUTEX_TIMED_ELISION_NP:
106 elision: __attribute__((unused))
107 /* Don't record ownership */
59213094 108 return lll_clocklock_elision (mutex->__data.__lock,
e8c659d7 109 mutex->__data.__spins,
9d20e22e 110 clockid, abstime,
e8c659d7
AK
111 PTHREAD_MUTEX_PSHARED (mutex));
112
113
2c0b891a
UD
114 case PTHREAD_MUTEX_ADAPTIVE_NP:
115 if (! __is_smp)
116 goto simple;
117
e51deae7 118 if (lll_trylock (mutex->__data.__lock) != 0)
2c0b891a
UD
119 {
120 int cnt = 0;
6310e6be 121 int max_cnt = MIN (max_adaptive_count (),
2c0b891a
UD
122 mutex->__data.__spins * 2 + 10);
123 do
124 {
125 if (cnt++ >= max_cnt)
126 {
59213094 127 result = lll_clocklock (mutex->__data.__lock,
9d20e22e 128 clockid, abstime,
5bd8a249 129 PTHREAD_MUTEX_PSHARED (mutex));
2c0b891a
UD
130 break;
131 }
4eb984d3 132 atomic_spin_nop ();
2c0b891a 133 }
e51deae7 134 while (lll_trylock (mutex->__data.__lock) != 0);
2c0b891a
UD
135
136 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
137 }
138 break;
dcc73a8d 139
0f6699ea
UD
140 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
141 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
142 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
143 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
144 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
145 &mutex->__data.__list.__next);
8f9450a0
TR
146 /* We need to set op_pending before starting the operation. Also
147 see comments at ENQUEUE_MUTEX. */
148 __asm ("" ::: "memory");
0f6699ea 149
683040c3 150 oldval = mutex->__data.__lock;
353683a2
TR
151 /* This is set to FUTEX_WAITERS iff we might have shared the
152 FUTEX_WAITERS flag with other threads, and therefore need to keep it
153 set to avoid lost wake-ups. We have the same requirement in the
154 simple mutex algorithm. */
155 unsigned int assume_other_futex_waiters = 0;
65810f0e 156 while (1)
1bcfb5a5 157 {
65810f0e
TR
158 /* Try to acquire the lock through a CAS from 0 (not acquired) to
159 our TID | assume_other_futex_waiters. */
5920a4a6
CD
160 if (__glibc_likely (oldval == 0))
161 {
162 oldval
163 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
164 id | assume_other_futex_waiters, 0);
165 if (__glibc_likely (oldval == 0))
166 break;
167 }
65810f0e 168
683040c3
UD
169 if ((oldval & FUTEX_OWNER_DIED) != 0)
170 {
171 /* The previous owner died. Try locking the mutex. */
353683a2
TR
172 int newval = id | (oldval & FUTEX_WAITERS)
173 | assume_other_futex_waiters;
113ad5fc
UD
174
175 newval
052757bf 176 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
113ad5fc 177 newval, oldval);
0f6699ea 178 if (newval != oldval)
683040c3 179 {
683040c3 180 oldval = newval;
65810f0e 181 continue;
683040c3 182 }
1bcfb5a5 183
683040c3
UD
184 /* We got the mutex. */
185 mutex->__data.__count = 1;
186 /* But it is inconsistent unless marked otherwise. */
187 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
1bcfb5a5 188
8f9450a0
TR
189 /* We must not enqueue the mutex before we have acquired it.
190 Also see comments at ENQUEUE_MUTEX. */
191 __asm ("" ::: "memory");
683040c3 192 ENQUEUE_MUTEX (mutex);
8f9450a0
TR
193 /* We need to clear op_pending after we enqueue the mutex. */
194 __asm ("" ::: "memory");
0f6699ea 195 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
1bcfb5a5 196
df47504c 197 /* Note that we deliberately exit here. If we fall
683040c3
UD
198 through to the end of the function __nusers would be
199 incremented which is not correct because the old
200 owner has to be discounted. */
201 return EOWNERDEAD;
202 }
1bcfb5a5 203
683040c3 204 /* Check whether we already hold the mutex. */
a1ffb40e 205 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
683040c3 206 {
5bd8a249
UD
207 int kind = PTHREAD_MUTEX_TYPE (mutex);
208 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
0f6699ea 209 {
8f9450a0
TR
210 /* We do not need to ensure ordering wrt another memory
211 access. Also see comments at ENQUEUE_MUTEX. */
0f6699ea
UD
212 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
213 NULL);
214 return EDEADLK;
215 }
1bcfb5a5 216
5bd8a249 217 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
683040c3 218 {
8f9450a0
TR
219 /* We do not need to ensure ordering wrt another memory
220 access. */
0f6699ea
UD
221 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
222 NULL);
223
683040c3 224 /* Just bump the counter. */
a1ffb40e 225 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
683040c3
UD
226 /* Overflow of the counter. */
227 return EAGAIN;
1bcfb5a5 228
683040c3 229 ++mutex->__data.__count;
1bcfb5a5 230
5acf7263
RM
231 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
232
683040c3
UD
233 return 0;
234 }
235 }
1bcfb5a5 236
65810f0e
TR
237 /* We are about to block; check whether the timeout is invalid. */
238 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
239 return EINVAL;
240 /* Work around the fact that the kernel rejects negative timeout
241 values despite them being valid. */
242 if (__glibc_unlikely (abstime->tv_sec < 0))
243 return ETIMEDOUT;
1bcfb5a5 244
65810f0e
TR
245 /* We cannot acquire the mutex nor has its owner died. Thus, try
246 to block using futexes. Set FUTEX_WAITERS if necessary so that
247 other threads are aware that there are potentially threads
248 blocked on the futex. Restart if oldval changed in the
249 meantime. */
250 if ((oldval & FUTEX_WAITERS) == 0)
683040c3 251 {
65810f0e
TR
252 if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
253 oldval | FUTEX_WAITERS,
254 oldval)
255 != 0)
256 {
257 oldval = mutex->__data.__lock;
258 continue;
259 }
260 oldval |= FUTEX_WAITERS;
683040c3 261 }
1bcfb5a5 262
65810f0e
TR
263 /* It is now possible that we share the FUTEX_WAITERS flag with
264 another thread; therefore, update assume_other_futex_waiters so
265 that we do not forget about this when handling other cases
266 above and thus do not cause lost wake-ups. */
267 assume_other_futex_waiters |= FUTEX_WAITERS;
268
269 /* Block using the futex. */
99d01ffc 270 int err = lll_futex_clock_wait_bitset (&mutex->__data.__lock,
9d20e22e 271 oldval, clockid, abstime,
65810f0e
TR
272 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
273 /* The futex call timed out. */
274 if (err == -ETIMEDOUT)
275 return -err;
65810f0e
TR
276 /* Reload current lock value. */
277 oldval = mutex->__data.__lock;
278 }
1bcfb5a5 279
65810f0e
TR
280 /* We have acquired the mutex; check if it is still consistent. */
281 if (__builtin_expect (mutex->__data.__owner
282 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
283 {
284 /* This mutex is now not recoverable. */
285 mutex->__data.__count = 0;
286 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
287 lll_unlock (mutex->__data.__lock, private);
8f9450a0
TR
288 /* FIXME This violates the mutex destruction requirements. See
289 __pthread_mutex_unlock_full. */
65810f0e
TR
290 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
291 return ENOTRECOVERABLE;
1bcfb5a5
UD
292 }
293
683040c3 294 mutex->__data.__count = 1;
8f9450a0
TR
295 /* We must not enqueue the mutex before we have acquired it.
296 Also see comments at ENQUEUE_MUTEX. */
297 __asm ("" ::: "memory");
1bcfb5a5 298 ENQUEUE_MUTEX (mutex);
8f9450a0
TR
299 /* We need to clear op_pending after we enqueue the mutex. */
300 __asm ("" ::: "memory");
0f6699ea 301 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
1bcfb5a5
UD
302 break;
303
184ee940
RM
304 /* The PI support requires the Linux futex system call. If that's not
305 available, pthread_mutex_init should never have allowed the type to
306 be set. So it will get the default case for an invalid type. */
307#ifdef __NR_futex
df47504c
UD
308 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
309 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
310 case PTHREAD_MUTEX_PI_NORMAL_NP:
311 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
312 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
313 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
314 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
315 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
316 {
403b4feb
SL
317 int kind, robust;
318 {
319 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
320 in sysdeps/nptl/bits/thread-shared-types.h. */
321 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
322 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
323 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
324 }
df47504c
UD
325
326 if (robust)
8f9450a0
TR
327 {
328 /* Note: robust PI futexes are signaled by setting bit 0. */
329 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
330 (void *) (((uintptr_t) &mutex->__data.__list.__next)
331 | 1));
332 /* We need to set op_pending before starting the operation. Also
333 see comments at ENQUEUE_MUTEX. */
334 __asm ("" ::: "memory");
335 }
df47504c
UD
336
337 oldval = mutex->__data.__lock;
338
339 /* Check whether we already hold the mutex. */
a1ffb40e 340 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
df47504c
UD
341 {
342 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
343 {
8f9450a0
TR
344 /* We do not need to ensure ordering wrt another memory
345 access. */
df47504c
UD
346 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
347 return EDEADLK;
348 }
349
350 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
351 {
8f9450a0
TR
352 /* We do not need to ensure ordering wrt another memory
353 access. */
df47504c
UD
354 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
355
356 /* Just bump the counter. */
a1ffb40e 357 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
df47504c
UD
358 /* Overflow of the counter. */
359 return EAGAIN;
360
361 ++mutex->__data.__count;
362
5acf7263
RM
363 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
364
df47504c
UD
365 return 0;
366 }
367 }
368
052757bf 369 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
df47504c
UD
370 id, 0);
371
372 if (oldval != 0)
373 {
374 /* The mutex is locked. The kernel will now take care of
375 everything. The timeout value must be a relative value.
376 Convert it. */
efac1fce
UD
377 int private = (robust
378 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
379 : PTHREAD_MUTEX_PSHARED (mutex));
df47504c
UD
380 INTERNAL_SYSCALL_DECL (__err);
381
382 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
efac1fce
UD
383 __lll_private_flag (FUTEX_LOCK_PI,
384 private), 1,
385 abstime);
df47504c
UD
386 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
387 {
388 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
389 return ETIMEDOUT;
390
391 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
392 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
393 {
394 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
395 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
396 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
397 /* ESRCH can happen only for non-robust PI mutexes where
398 the owner of the lock died. */
399 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
400 || !robust);
401
402 /* Delay the thread until the timeout is reached.
403 Then return ETIMEDOUT. */
404 struct timespec reltime;
405 struct timespec now;
406
9d20e22e 407 INTERNAL_SYSCALL (clock_gettime, __err, 2, clockid,
df47504c
UD
408 &now);
409 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
410 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
411 if (reltime.tv_nsec < 0)
412 {
413 reltime.tv_nsec += 1000000000;
414 --reltime.tv_sec;
415 }
416 if (reltime.tv_sec >= 0)
6f33fd04 417 while (__nanosleep_nocancel (&reltime, &reltime) != 0)
df47504c
UD
418 continue;
419
420 return ETIMEDOUT;
421 }
422
423 return INTERNAL_SYSCALL_ERRNO (e, __err);
424 }
425
426 oldval = mutex->__data.__lock;
427
428 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
429 }
430
a1ffb40e 431 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
df47504c
UD
432 {
433 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
434
435 /* We got the mutex. */
436 mutex->__data.__count = 1;
437 /* But it is inconsistent unless marked otherwise. */
438 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
439
8f9450a0
TR
440 /* We must not enqueue the mutex before we have acquired it.
441 Also see comments at ENQUEUE_MUTEX. */
442 __asm ("" ::: "memory");
df47504c 443 ENQUEUE_MUTEX_PI (mutex);
8f9450a0
TR
444 /* We need to clear op_pending after we enqueue the mutex. */
445 __asm ("" ::: "memory");
df47504c
UD
446 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
447
448 /* Note that we deliberately exit here. If we fall
449 through to the end of the function __nusers would be
450 incremented which is not correct because the old owner
451 has to be discounted. */
452 return EOWNERDEAD;
453 }
454
455 if (robust
456 && __builtin_expect (mutex->__data.__owner
457 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
458 {
459 /* This mutex is now not recoverable. */
460 mutex->__data.__count = 0;
461
462 INTERNAL_SYSCALL_DECL (__err);
463 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
efac1fce
UD
464 __lll_private_flag (FUTEX_UNLOCK_PI,
465 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
466 0, 0);
df47504c 467
8f9450a0
TR
468 /* To the kernel, this will be visible after the kernel has
469 acquired the mutex in the syscall. */
df47504c
UD
470 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
471 return ENOTRECOVERABLE;
472 }
473
474 mutex->__data.__count = 1;
475 if (robust)
476 {
8f9450a0
TR
477 /* We must not enqueue the mutex before we have acquired it.
478 Also see comments at ENQUEUE_MUTEX. */
479 __asm ("" ::: "memory");
df47504c 480 ENQUEUE_MUTEX_PI (mutex);
8f9450a0
TR
481 /* We need to clear op_pending after we enqueue the mutex. */
482 __asm ("" ::: "memory");
df47504c
UD
483 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
484 }
485 }
486 break;
184ee940 487#endif /* __NR_futex. */
df47504c 488
f17efcb4
UD
489 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
490 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
491 case PTHREAD_MUTEX_PP_NORMAL_NP:
492 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
493 {
403b4feb
SL
494 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
495 in sysdeps/nptl/bits/thread-shared-types.h. */
496 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
497 & PTHREAD_MUTEX_KIND_MASK_NP;
f17efcb4
UD
498
499 oldval = mutex->__data.__lock;
500
501 /* Check whether we already hold the mutex. */
502 if (mutex->__data.__owner == id)
503 {
504 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
505 return EDEADLK;
506
507 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
508 {
509 /* Just bump the counter. */
a1ffb40e 510 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
f17efcb4
UD
511 /* Overflow of the counter. */
512 return EAGAIN;
513
514 ++mutex->__data.__count;
515
5acf7263
RM
516 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
517
f17efcb4
UD
518 return 0;
519 }
520 }
521
522 int oldprio = -1, ceilval;
523 do
524 {
525 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
526 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
527
528 if (__pthread_current_priority () > ceiling)
529 {
530 result = EINVAL;
531 failpp:
532 if (oldprio != -1)
533 __pthread_tpp_change_priority (oldprio, -1);
534 return result;
535 }
536
537 result = __pthread_tpp_change_priority (oldprio, ceiling);
538 if (result)
539 return result;
540
541 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
542 oldprio = ceiling;
543
544 oldval
052757bf 545 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
546 ceilval | 1, ceilval);
547
548 if (oldval == ceilval)
549 break;
550
551 do
552 {
553 oldval
052757bf 554 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
555 ceilval | 2,
556 ceilval | 1);
557
558 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
559 break;
560
561 if (oldval != ceilval)
562 {
563 /* Reject invalid timeouts. */
564 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
565 {
566 result = EINVAL;
567 goto failpp;
568 }
569
570 struct timeval tv;
571 struct timespec rt;
572
573 /* Get the current time. */
574 (void) __gettimeofday (&tv, NULL);
575
576 /* Compute relative timeout. */
577 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
578 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
579 if (rt.tv_nsec < 0)
580 {
581 rt.tv_nsec += 1000000000;
582 --rt.tv_sec;
583 }
584
585 /* Already timed out? */
586 if (rt.tv_sec < 0)
587 {
588 result = ETIMEDOUT;
589 goto failpp;
590 }
591
592 lll_futex_timed_wait (&mutex->__data.__lock,
835abc5c 593 ceilval | 2, &rt,
5bd8a249 594 PTHREAD_MUTEX_PSHARED (mutex));
f17efcb4
UD
595 }
596 }
052757bf 597 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
598 ceilval | 2, ceilval)
599 != ceilval);
600 }
601 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
602
603 assert (mutex->__data.__owner == 0);
604 mutex->__data.__count = 1;
605 }
606 break;
607
dcc73a8d
UD
608 default:
609 /* Correct code cannot set any other type. */
610 return EINVAL;
76a50749
UD
611 }
612
613 if (result == 0)
61623643
UD
614 {
615 /* Record the ownership. */
616 mutex->__data.__owner = id;
617 ++mutex->__data.__nusers;
5acf7263
RM
618
619 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
61623643 620 }
76a50749
UD
621
622 out:
623 return result;
624}
9d20e22e
MC
625
626int
627__pthread_mutex_clocklock (pthread_mutex_t *mutex,
628 clockid_t clockid,
629 const struct timespec *abstime)
630{
631 if (__glibc_unlikely (!lll_futex_supported_clockid (clockid)))
632 return EINVAL;
633
634 LIBC_PROBE (mutex_clocklock_entry, 3, mutex, clockid, abstime);
635 return __pthread_mutex_clocklock_common (mutex, clockid, abstime);
636}
637weak_alias (__pthread_mutex_clocklock, pthread_mutex_clocklock)
638
639int
640__pthread_mutex_timedlock (pthread_mutex_t *mutex,
641 const struct timespec *abstime)
642{
643 LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
644 return __pthread_mutex_clocklock_common (mutex, CLOCK_REALTIME, abstime);
645}
fa872e1b 646weak_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)