]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_mutex_timedlock.c
nptl: Assume __ASSUME_FUTEX_CLOCK_REALTIME support
[thirdparty/glibc.git] / nptl / pthread_mutex_timedlock.c
CommitLineData
04277e02 1/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
76a50749 18
1bcfb5a5 19#include <assert.h>
76a50749 20#include <errno.h>
b894c2ea 21#include <time.h>
ca06321d 22#include <sys/param.h>
6af246cf 23#include <sys/time.h>
76a50749 24#include "pthreadP.h"
4eb984d3 25#include <atomic.h>
76a50749 26#include <lowlevellock.h>
b894c2ea 27#include <not-cancel.h>
76a50749 28
5acf7263
RM
29#include <stap-probe.h>
30
e8c659d7
AK
31#ifndef lll_timedlock_elision
32#define lll_timedlock_elision(a,dummy,b,c) lll_timedlock(a, b, c)
33#endif
34
35#ifndef lll_trylock_elision
36#define lll_trylock_elision(a,t) lll_trylock(a)
37#endif
38
39#ifndef FORCE_ELISION
40#define FORCE_ELISION(m, s)
41#endif
76a50749
UD
42
43int
fa872e1b
AZ
44__pthread_mutex_timedlock (pthread_mutex_t *mutex,
45 const struct timespec *abstime)
76a50749 46{
683040c3 47 int oldval;
61623643 48 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
76a50749
UD
49 int result = 0;
50
5acf7263
RM
51 LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
52
76a50749
UD
53 /* We must not check ABSTIME here. If the thread does not block
54 abstime must not be checked for a valid value. */
55
403b4feb
SL
56 /* See concurrency notes regarding mutex type which is loaded from __kind
57 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
e8c659d7 58 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
ae1ad3ae 59 PTHREAD_MUTEX_TIMED_NP))
76a50749
UD
60 {
61 /* Recursive mutex. */
e8c659d7 62 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
76a50749
UD
63 case PTHREAD_MUTEX_RECURSIVE_NP:
64 /* Check whether we already hold the mutex. */
9a7178d6 65 if (mutex->__data.__owner == id)
76a50749
UD
66 {
67 /* Just bump the counter. */
a1ffb40e 68 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
76a50749
UD
69 /* Overflow of the counter. */
70 return EAGAIN;
71
72 ++mutex->__data.__count;
73
74 goto out;
75 }
76a50749 76
1bcfb5a5 77 /* We have to get the mutex. */
e51deae7 78 result = lll_timedlock (mutex->__data.__lock, abstime,
5bd8a249 79 PTHREAD_MUTEX_PSHARED (mutex));
0ecb606c 80
1bcfb5a5
UD
81 if (result != 0)
82 goto out;
83
84 /* Only locked once so far. */
85 mutex->__data.__count = 1;
76a50749
UD
86 break;
87
88 /* Error checking mutex. */
89 case PTHREAD_MUTEX_ERRORCHECK_NP:
90 /* Check whether we already hold the mutex. */
a1ffb40e 91 if (__glibc_unlikely (mutex->__data.__owner == id))
76a50749
UD
92 return EDEADLK;
93
5aded6f2
AS
94 /* Don't do lock elision on an error checking mutex. */
95 goto simple;
76a50749 96
76a50749 97 case PTHREAD_MUTEX_TIMED_NP:
e8c659d7 98 FORCE_ELISION (mutex, goto elision);
2c0b891a 99 simple:
76a50749 100 /* Normal mutex. */
e51deae7 101 result = lll_timedlock (mutex->__data.__lock, abstime,
5bd8a249 102 PTHREAD_MUTEX_PSHARED (mutex));
76a50749 103 break;
2c0b891a 104
e8c659d7
AK
105 case PTHREAD_MUTEX_TIMED_ELISION_NP:
106 elision: __attribute__((unused))
107 /* Don't record ownership */
108 return lll_timedlock_elision (mutex->__data.__lock,
109 mutex->__data.__spins,
110 abstime,
111 PTHREAD_MUTEX_PSHARED (mutex));
112
113
2c0b891a
UD
114 case PTHREAD_MUTEX_ADAPTIVE_NP:
115 if (! __is_smp)
116 goto simple;
117
e51deae7 118 if (lll_trylock (mutex->__data.__lock) != 0)
2c0b891a
UD
119 {
120 int cnt = 0;
6310e6be 121 int max_cnt = MIN (max_adaptive_count (),
2c0b891a
UD
122 mutex->__data.__spins * 2 + 10);
123 do
124 {
125 if (cnt++ >= max_cnt)
126 {
e51deae7 127 result = lll_timedlock (mutex->__data.__lock, abstime,
5bd8a249 128 PTHREAD_MUTEX_PSHARED (mutex));
2c0b891a
UD
129 break;
130 }
4eb984d3 131 atomic_spin_nop ();
2c0b891a 132 }
e51deae7 133 while (lll_trylock (mutex->__data.__lock) != 0);
2c0b891a
UD
134
135 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
136 }
137 break;
dcc73a8d 138
0f6699ea
UD
139 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
140 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
141 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
142 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
143 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
144 &mutex->__data.__list.__next);
8f9450a0
TR
145 /* We need to set op_pending before starting the operation. Also
146 see comments at ENQUEUE_MUTEX. */
147 __asm ("" ::: "memory");
0f6699ea 148
683040c3 149 oldval = mutex->__data.__lock;
353683a2
TR
150 /* This is set to FUTEX_WAITERS iff we might have shared the
151 FUTEX_WAITERS flag with other threads, and therefore need to keep it
152 set to avoid lost wake-ups. We have the same requirement in the
153 simple mutex algorithm. */
154 unsigned int assume_other_futex_waiters = 0;
65810f0e 155 while (1)
1bcfb5a5 156 {
65810f0e
TR
157 /* Try to acquire the lock through a CAS from 0 (not acquired) to
158 our TID | assume_other_futex_waiters. */
5920a4a6
CD
159 if (__glibc_likely (oldval == 0))
160 {
161 oldval
162 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
163 id | assume_other_futex_waiters, 0);
164 if (__glibc_likely (oldval == 0))
165 break;
166 }
65810f0e 167
683040c3
UD
168 if ((oldval & FUTEX_OWNER_DIED) != 0)
169 {
170 /* The previous owner died. Try locking the mutex. */
353683a2
TR
171 int newval = id | (oldval & FUTEX_WAITERS)
172 | assume_other_futex_waiters;
113ad5fc
UD
173
174 newval
052757bf 175 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
113ad5fc 176 newval, oldval);
0f6699ea 177 if (newval != oldval)
683040c3 178 {
683040c3 179 oldval = newval;
65810f0e 180 continue;
683040c3 181 }
1bcfb5a5 182
683040c3
UD
183 /* We got the mutex. */
184 mutex->__data.__count = 1;
185 /* But it is inconsistent unless marked otherwise. */
186 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
1bcfb5a5 187
8f9450a0
TR
188 /* We must not enqueue the mutex before we have acquired it.
189 Also see comments at ENQUEUE_MUTEX. */
190 __asm ("" ::: "memory");
683040c3 191 ENQUEUE_MUTEX (mutex);
8f9450a0
TR
192 /* We need to clear op_pending after we enqueue the mutex. */
193 __asm ("" ::: "memory");
0f6699ea 194 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
1bcfb5a5 195
df47504c 196 /* Note that we deliberately exit here. If we fall
683040c3
UD
197 through to the end of the function __nusers would be
198 incremented which is not correct because the old
199 owner has to be discounted. */
200 return EOWNERDEAD;
201 }
1bcfb5a5 202
683040c3 203 /* Check whether we already hold the mutex. */
a1ffb40e 204 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
683040c3 205 {
5bd8a249
UD
206 int kind = PTHREAD_MUTEX_TYPE (mutex);
207 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
0f6699ea 208 {
8f9450a0
TR
209 /* We do not need to ensure ordering wrt another memory
210 access. Also see comments at ENQUEUE_MUTEX. */
0f6699ea
UD
211 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
212 NULL);
213 return EDEADLK;
214 }
1bcfb5a5 215
5bd8a249 216 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
683040c3 217 {
8f9450a0
TR
218 /* We do not need to ensure ordering wrt another memory
219 access. */
0f6699ea
UD
220 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
221 NULL);
222
683040c3 223 /* Just bump the counter. */
a1ffb40e 224 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
683040c3
UD
225 /* Overflow of the counter. */
226 return EAGAIN;
1bcfb5a5 227
683040c3 228 ++mutex->__data.__count;
1bcfb5a5 229
5acf7263
RM
230 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
231
683040c3
UD
232 return 0;
233 }
234 }
1bcfb5a5 235
65810f0e
TR
236 /* We are about to block; check whether the timeout is invalid. */
237 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
238 return EINVAL;
239 /* Work around the fact that the kernel rejects negative timeout
240 values despite them being valid. */
241 if (__glibc_unlikely (abstime->tv_sec < 0))
242 return ETIMEDOUT;
1bcfb5a5 243
65810f0e
TR
244 /* We cannot acquire the mutex nor has its owner died. Thus, try
245 to block using futexes. Set FUTEX_WAITERS if necessary so that
246 other threads are aware that there are potentially threads
247 blocked on the futex. Restart if oldval changed in the
248 meantime. */
249 if ((oldval & FUTEX_WAITERS) == 0)
683040c3 250 {
65810f0e
TR
251 if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
252 oldval | FUTEX_WAITERS,
253 oldval)
254 != 0)
255 {
256 oldval = mutex->__data.__lock;
257 continue;
258 }
259 oldval |= FUTEX_WAITERS;
683040c3 260 }
1bcfb5a5 261
65810f0e
TR
262 /* It is now possible that we share the FUTEX_WAITERS flag with
263 another thread; therefore, update assume_other_futex_waiters so
264 that we do not forget about this when handling other cases
265 above and thus do not cause lost wake-ups. */
266 assume_other_futex_waiters |= FUTEX_WAITERS;
267
268 /* Block using the futex. */
65810f0e
TR
269 int err = lll_futex_timed_wait_bitset (&mutex->__data.__lock,
270 oldval, abstime, FUTEX_CLOCK_REALTIME,
271 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
272 /* The futex call timed out. */
273 if (err == -ETIMEDOUT)
274 return -err;
65810f0e
TR
275 /* Reload current lock value. */
276 oldval = mutex->__data.__lock;
277 }
1bcfb5a5 278
65810f0e
TR
279 /* We have acquired the mutex; check if it is still consistent. */
280 if (__builtin_expect (mutex->__data.__owner
281 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
282 {
283 /* This mutex is now not recoverable. */
284 mutex->__data.__count = 0;
285 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
286 lll_unlock (mutex->__data.__lock, private);
8f9450a0
TR
287 /* FIXME This violates the mutex destruction requirements. See
288 __pthread_mutex_unlock_full. */
65810f0e
TR
289 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
290 return ENOTRECOVERABLE;
1bcfb5a5
UD
291 }
292
683040c3 293 mutex->__data.__count = 1;
8f9450a0
TR
294 /* We must not enqueue the mutex before we have acquired it.
295 Also see comments at ENQUEUE_MUTEX. */
296 __asm ("" ::: "memory");
1bcfb5a5 297 ENQUEUE_MUTEX (mutex);
8f9450a0
TR
298 /* We need to clear op_pending after we enqueue the mutex. */
299 __asm ("" ::: "memory");
0f6699ea 300 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
1bcfb5a5
UD
301 break;
302
184ee940
RM
303 /* The PI support requires the Linux futex system call. If that's not
304 available, pthread_mutex_init should never have allowed the type to
305 be set. So it will get the default case for an invalid type. */
306#ifdef __NR_futex
df47504c
UD
307 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
308 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
309 case PTHREAD_MUTEX_PI_NORMAL_NP:
310 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
311 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
312 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
313 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
314 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
315 {
403b4feb
SL
316 int kind, robust;
317 {
318 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
319 in sysdeps/nptl/bits/thread-shared-types.h. */
320 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
321 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
322 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
323 }
df47504c
UD
324
325 if (robust)
8f9450a0
TR
326 {
327 /* Note: robust PI futexes are signaled by setting bit 0. */
328 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
329 (void *) (((uintptr_t) &mutex->__data.__list.__next)
330 | 1));
331 /* We need to set op_pending before starting the operation. Also
332 see comments at ENQUEUE_MUTEX. */
333 __asm ("" ::: "memory");
334 }
df47504c
UD
335
336 oldval = mutex->__data.__lock;
337
338 /* Check whether we already hold the mutex. */
a1ffb40e 339 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
df47504c
UD
340 {
341 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
342 {
8f9450a0
TR
343 /* We do not need to ensure ordering wrt another memory
344 access. */
df47504c
UD
345 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
346 return EDEADLK;
347 }
348
349 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
350 {
8f9450a0
TR
351 /* We do not need to ensure ordering wrt another memory
352 access. */
df47504c
UD
353 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
354
355 /* Just bump the counter. */
a1ffb40e 356 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
df47504c
UD
357 /* Overflow of the counter. */
358 return EAGAIN;
359
360 ++mutex->__data.__count;
361
5acf7263
RM
362 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
363
df47504c
UD
364 return 0;
365 }
366 }
367
052757bf 368 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
df47504c
UD
369 id, 0);
370
371 if (oldval != 0)
372 {
373 /* The mutex is locked. The kernel will now take care of
374 everything. The timeout value must be a relative value.
375 Convert it. */
efac1fce
UD
376 int private = (robust
377 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
378 : PTHREAD_MUTEX_PSHARED (mutex));
df47504c
UD
379 INTERNAL_SYSCALL_DECL (__err);
380
381 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
efac1fce
UD
382 __lll_private_flag (FUTEX_LOCK_PI,
383 private), 1,
384 abstime);
df47504c
UD
385 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
386 {
387 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
388 return ETIMEDOUT;
389
390 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
391 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
392 {
393 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
394 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
395 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
396 /* ESRCH can happen only for non-robust PI mutexes where
397 the owner of the lock died. */
398 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
399 || !robust);
400
401 /* Delay the thread until the timeout is reached.
402 Then return ETIMEDOUT. */
403 struct timespec reltime;
404 struct timespec now;
405
406 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
407 &now);
408 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
409 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
410 if (reltime.tv_nsec < 0)
411 {
412 reltime.tv_nsec += 1000000000;
413 --reltime.tv_sec;
414 }
415 if (reltime.tv_sec >= 0)
6f33fd04 416 while (__nanosleep_nocancel (&reltime, &reltime) != 0)
df47504c
UD
417 continue;
418
419 return ETIMEDOUT;
420 }
421
422 return INTERNAL_SYSCALL_ERRNO (e, __err);
423 }
424
425 oldval = mutex->__data.__lock;
426
427 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
428 }
429
a1ffb40e 430 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
df47504c
UD
431 {
432 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
433
434 /* We got the mutex. */
435 mutex->__data.__count = 1;
436 /* But it is inconsistent unless marked otherwise. */
437 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
438
8f9450a0
TR
439 /* We must not enqueue the mutex before we have acquired it.
440 Also see comments at ENQUEUE_MUTEX. */
441 __asm ("" ::: "memory");
df47504c 442 ENQUEUE_MUTEX_PI (mutex);
8f9450a0
TR
443 /* We need to clear op_pending after we enqueue the mutex. */
444 __asm ("" ::: "memory");
df47504c
UD
445 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
446
447 /* Note that we deliberately exit here. If we fall
448 through to the end of the function __nusers would be
449 incremented which is not correct because the old owner
450 has to be discounted. */
451 return EOWNERDEAD;
452 }
453
454 if (robust
455 && __builtin_expect (mutex->__data.__owner
456 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
457 {
458 /* This mutex is now not recoverable. */
459 mutex->__data.__count = 0;
460
461 INTERNAL_SYSCALL_DECL (__err);
462 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
efac1fce
UD
463 __lll_private_flag (FUTEX_UNLOCK_PI,
464 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
465 0, 0);
df47504c 466
8f9450a0
TR
467 /* To the kernel, this will be visible after the kernel has
468 acquired the mutex in the syscall. */
df47504c
UD
469 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
470 return ENOTRECOVERABLE;
471 }
472
473 mutex->__data.__count = 1;
474 if (robust)
475 {
8f9450a0
TR
476 /* We must not enqueue the mutex before we have acquired it.
477 Also see comments at ENQUEUE_MUTEX. */
478 __asm ("" ::: "memory");
df47504c 479 ENQUEUE_MUTEX_PI (mutex);
8f9450a0
TR
480 /* We need to clear op_pending after we enqueue the mutex. */
481 __asm ("" ::: "memory");
df47504c
UD
482 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
483 }
484 }
485 break;
184ee940 486#endif /* __NR_futex. */
df47504c 487
f17efcb4
UD
488 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
489 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
490 case PTHREAD_MUTEX_PP_NORMAL_NP:
491 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
492 {
403b4feb
SL
493 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
494 in sysdeps/nptl/bits/thread-shared-types.h. */
495 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
496 & PTHREAD_MUTEX_KIND_MASK_NP;
f17efcb4
UD
497
498 oldval = mutex->__data.__lock;
499
500 /* Check whether we already hold the mutex. */
501 if (mutex->__data.__owner == id)
502 {
503 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
504 return EDEADLK;
505
506 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
507 {
508 /* Just bump the counter. */
a1ffb40e 509 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
f17efcb4
UD
510 /* Overflow of the counter. */
511 return EAGAIN;
512
513 ++mutex->__data.__count;
514
5acf7263
RM
515 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
516
f17efcb4
UD
517 return 0;
518 }
519 }
520
521 int oldprio = -1, ceilval;
522 do
523 {
524 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
525 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
526
527 if (__pthread_current_priority () > ceiling)
528 {
529 result = EINVAL;
530 failpp:
531 if (oldprio != -1)
532 __pthread_tpp_change_priority (oldprio, -1);
533 return result;
534 }
535
536 result = __pthread_tpp_change_priority (oldprio, ceiling);
537 if (result)
538 return result;
539
540 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
541 oldprio = ceiling;
542
543 oldval
052757bf 544 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
545 ceilval | 1, ceilval);
546
547 if (oldval == ceilval)
548 break;
549
550 do
551 {
552 oldval
052757bf 553 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
554 ceilval | 2,
555 ceilval | 1);
556
557 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
558 break;
559
560 if (oldval != ceilval)
561 {
562 /* Reject invalid timeouts. */
563 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
564 {
565 result = EINVAL;
566 goto failpp;
567 }
568
569 struct timeval tv;
570 struct timespec rt;
571
572 /* Get the current time. */
573 (void) __gettimeofday (&tv, NULL);
574
575 /* Compute relative timeout. */
576 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
577 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
578 if (rt.tv_nsec < 0)
579 {
580 rt.tv_nsec += 1000000000;
581 --rt.tv_sec;
582 }
583
584 /* Already timed out? */
585 if (rt.tv_sec < 0)
586 {
587 result = ETIMEDOUT;
588 goto failpp;
589 }
590
591 lll_futex_timed_wait (&mutex->__data.__lock,
835abc5c 592 ceilval | 2, &rt,
5bd8a249 593 PTHREAD_MUTEX_PSHARED (mutex));
f17efcb4
UD
594 }
595 }
052757bf 596 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
f17efcb4
UD
597 ceilval | 2, ceilval)
598 != ceilval);
599 }
600 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
601
602 assert (mutex->__data.__owner == 0);
603 mutex->__data.__count = 1;
604 }
605 break;
606
dcc73a8d
UD
607 default:
608 /* Correct code cannot set any other type. */
609 return EINVAL;
76a50749
UD
610 }
611
612 if (result == 0)
61623643
UD
613 {
614 /* Record the ownership. */
615 mutex->__data.__owner = id;
616 ++mutex->__data.__nusers;
5acf7263
RM
617
618 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
61623643 619 }
76a50749
UD
620
621 out:
622 return result;
623}
fa872e1b 624weak_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)