]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / x86_64 / pthread_cond_timedwait.S
CommitLineData
b168057a 1/* Copyright (C) 2002-2015 Free Software Foundation, Inc.
35e148cb
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6
PE
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
35e148cb
UD
18
19#include <sysdep.h>
20#include <shlib-compat.h>
e51deae7 21#include <lowlevellock.h>
35e148cb 22#include <lowlevelcond.h>
42e69bcf 23#include <pthread-pi-defines.h>
326132db 24#include <pthread-errnos.h>
5acf7263 25#include <stap-probe.h>
35e148cb 26
46cbcd0a
UD
27#include <kernel-features.h>
28
35e148cb
UD
29
30 .text
31
30b1954a 32
35e148cb
UD
33/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
34 const struct timespec *abstime) */
35 .globl __pthread_cond_timedwait
36 .type __pthread_cond_timedwait, @function
37 .align 16
38__pthread_cond_timedwait:
92618c95 39.LSTARTCODE:
71a5bd3e 40 cfi_startproc
92618c95
UD
41#ifdef SHARED
42 cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
43 DW.ref.__gcc_personality_v0)
44 cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
45#else
46 cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
47 cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
48#endif
49
35e148cb 50 pushq %r12
71a5bd3e
UD
51 cfi_adjust_cfa_offset(8)
52 cfi_rel_offset(%r12, 0)
35e148cb 53 pushq %r13
71a5bd3e
UD
54 cfi_adjust_cfa_offset(8)
55 cfi_rel_offset(%r13, 0)
35e148cb 56 pushq %r14
71a5bd3e
UD
57 cfi_adjust_cfa_offset(8)
58 cfi_rel_offset(%r14, 0)
42e69bcf
UD
59 pushq %r15
60 cfi_adjust_cfa_offset(8)
61 cfi_rel_offset(%r15, 0)
e88726b4 62#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
a724d1b9 63# define FRAME_SIZE (32+8)
e88726b4 64#else
a724d1b9 65# define FRAME_SIZE (48+8)
e88726b4 66#endif
893a3511 67 subq $FRAME_SIZE, %rsp
71a5bd3e 68 cfi_adjust_cfa_offset(FRAME_SIZE)
62616842 69 cfi_remember_state
7c3164bc 70
5acf7263
RM
71 LIBC_PROBE (cond_timedwait, 3, %rdi, %rsi, %rdx)
72
7c3164bc 73 cmpq $1000000000, 8(%rdx)
ee618985 74 movl $EINVAL, %eax
e88726b4 75 jae 48f
7c3164bc 76
7abed170
UD
77 /* Stack frame:
78
92618c95 79 rsp + 48
62616842 80 +--------------------------+
e88726b4 81 rsp + 32 | timeout value |
62616842 82 +--------------------------+
e88726b4 83 rsp + 24 | old wake_seq value |
62616842 84 +--------------------------+
7abed170 85 rsp + 16 | mutex pointer |
62616842 86 +--------------------------+
7abed170 87 rsp + 8 | condvar pointer |
62616842 88 +--------------------------+
893a3511 89 rsp + 4 | old broadcast_seq value |
62616842 90 +--------------------------+
7abed170 91 rsp + 0 | old cancellation mode |
62616842 92 +--------------------------+
7abed170 93 */
35e148cb 94
0e8860ad 95 LP_OP(cmp) $-1, dep_mutex(%rdi)
e42a990e 96
35e148cb
UD
97 /* Prepare structure passed to cancellation handler. */
98 movq %rdi, 8(%rsp)
99 movq %rsi, 16(%rsp)
100 movq %rdx, %r13
101
e42a990e 102 je 22f
0e8860ad 103 mov %RSI_LP, dep_mutex(%rdi)
7661d9f7 104
e88726b4 10522:
c30e8edf 106 xorb %r15b, %r15b
c2b18f7a 107
e88726b4
UD
108#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
109# ifdef PIC
110 cmpl $0, __have_futex_clock_realtime(%rip)
111# else
112 cmpl $0, __have_futex_clock_realtime
113# endif
114 je .Lreltmo
115#endif
116
35e148cb 117 /* Get internal lock. */
e88726b4 118 movl $1, %esi
3a226d33 119 xorl %eax, %eax
35e148cb
UD
120 LOCK
121#if cond_lock == 0
3a226d33 122 cmpxchgl %esi, (%rdi)
35e148cb 123#else
3a226d33 124 cmpxchgl %esi, cond_lock(%rdi)
35e148cb 125#endif
e88726b4 126 jnz 31f
35e148cb
UD
127
128 /* Unlock the mutex. */
e88726b4 12932: movq 16(%rsp), %rdi
ee618985 130 xorl %esi, %esi
61623643 131 callq __pthread_mutex_unlock_usercnt
35e148cb
UD
132
133 testl %eax, %eax
e88726b4 134 jne 46f
35e148cb
UD
135
136 movq 8(%rsp), %rdi
7abed170 137 incq total_seq(%rdi)
75fccede 138 incl cond_futex(%rdi)
ee5d5755 139 addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
35e148cb 140
35e148cb
UD
141 /* Get and store current wakeup_seq value. */
142 movq 8(%rsp), %rdi
75fccede 143 movq wakeup_seq(%rdi), %r9
893a3511 144 movl broadcast_seq(%rdi), %edx
e88726b4 145 movq %r9, 24(%rsp)
893a3511 146 movl %edx, 4(%rsp)
35e148cb 147
2983d85e
AS
148 cmpq $0, (%r13)
149 movq $-ETIMEDOUT, %r14
150 js 36f
151
e88726b4 15238: movl cond_futex(%rdi), %r12d
75fccede 153
dc391246
UD
154 /* Unlock. */
155 LOCK
156#if cond_lock == 0
157 decl (%rdi)
158#else
159 decl cond_lock(%rdi)
160#endif
e88726b4 161 jne 33f
dc391246 162
e88726b4
UD
163.LcleanupSTART1:
16434: callq __pthread_enable_asynccancel
dc391246
UD
165 movl %eax, (%rsp)
166
e88726b4 167 movq %r13, %r10
c3db953c 168 movl $FUTEX_WAIT_BITSET, %esi
0e8860ad 169 LP_OP(cmp) $-1, dep_mutex(%rdi)
42e69bcf
UD
170 je 60f
171
0e8860ad 172 mov dep_mutex(%rdi), %R8_LP
b0948ffd
UD
173 /* Requeue to a non-robust PI mutex if the PI bit is set and
174 the robust bit is not set. */
175 movl MUTEX_KIND(%r8), %eax
176 andl $(ROBUST_BIT|PI_BIT), %eax
177 cmpl $PI_BIT, %eax
178 jne 61f
42e69bcf
UD
179
180 movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %esi
181 xorl %eax, %eax
e88726b4
UD
182 /* The following only works like this because we only support
183 two clocks, represented using a single bit. */
42e69bcf
UD
184 testl $1, cond_nwaiters(%rdi)
185 movl $FUTEX_CLOCK_REALTIME, %edx
186 cmove %edx, %eax
187 orl %eax, %esi
188 movq %r12, %rdx
189 addq $cond_futex, %rdi
190 movl $SYS_futex, %eax
191 syscall
192
c30e8edf
SP
193 cmpl $0, %eax
194 sete %r15b
195
42e69bcf
UD
196#ifdef __ASSUME_REQUEUE_PI
197 jmp 62f
198#else
c30e8edf
SP
199 je 62f
200
201 /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
202 successfully, it has already locked the mutex for us and the
203 pi_flag (%r15b) is set to denote that fact. However, if another
204 thread changed the futex value before we entered the wait, the
205 syscall may return an EAGAIN and the mutex is not locked. We go
206 ahead with a success anyway since later we look at the pi_flag to
207 decide if we got the mutex or not. The sequence numbers then make
208 sure that only one of the threads actually wake up. We retry using
209 normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
210 and PI futexes don't mix.
211
212 Note that we don't check for EAGAIN specifically; we assume that the
213 only other error the futex function could return is EAGAIN (barring
214 the ETIMEOUT of course, for the timeout case in futex) since
215 anything else would mean an error in our function. It is too
216 expensive to do that check for every call (which is quite common in
217 case of a large number of threads), so it has been skipped. */
218 cmpl $-ENOSYS, %eax
219 jne 62f
42e69bcf 220
42e69bcf
UD
221 subq $cond_futex, %rdi
222#endif
223
c3db953c 22461: movl $(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi
c30e8edf 22560: xorb %r15b, %r15b
e88726b4 226 xorl %eax, %eax
42e69bcf
UD
227 /* The following only works like this because we only support
228 two clocks, represented using a single bit. */
e88726b4
UD
229 testl $1, cond_nwaiters(%rdi)
230 movl $FUTEX_CLOCK_REALTIME, %edx
231 movl $0xffffffff, %r9d
232 cmove %edx, %eax
233 orl %eax, %esi
234 movq %r12, %rdx
75fccede 235 addq $cond_futex, %rdi
ee618985 236 movl $SYS_futex, %eax
35e148cb 237 syscall
42e69bcf 23862: movq %rax, %r14
35e148cb 239
7661d9f7 240 movl (%rsp), %edi
35e148cb 241 callq __pthread_disable_asynccancel
e88726b4 242.LcleanupEND1:
35e148cb
UD
243
244 /* Lock. */
245 movq 8(%rsp), %rdi
246 movl $1, %esi
3a226d33 247 xorl %eax, %eax
35e148cb
UD
248 LOCK
249#if cond_lock == 0
3a226d33 250 cmpxchgl %esi, (%rdi)
35e148cb 251#else
3a226d33 252 cmpxchgl %esi, cond_lock(%rdi)
35e148cb 253#endif
e88726b4 254 jne 35f
35e148cb 255
e88726b4 25636: movl broadcast_seq(%rdi), %edx
893a3511
UD
257
258 movq woken_seq(%rdi), %rax
35e148cb 259
75fccede 260 movq wakeup_seq(%rdi), %r9
35e148cb 261
893a3511 262 cmpl 4(%rsp), %edx
e88726b4 263 jne 53f
893a3511 264
e88726b4
UD
265 cmpq 24(%rsp), %r9
266 jbe 45f
35e148cb 267
75fccede 268 cmpq %rax, %r9
e88726b4 269 ja 39f
35e148cb 270
e88726b4 27145: cmpq $-ETIMEDOUT, %r14
c30e8edf
SP
272 je 99f
273
274 /* We need to go back to futex_wait. If we're using requeue_pi, then
275 release the mutex we had acquired and go back. */
276 test %r15b, %r15b
277 jz 38b
278
279 /* Adjust the mutex values first and then unlock it. The unlock
280 should always succeed or else the kernel did not lock the
281 mutex correctly. */
282 movq %r8, %rdi
283 callq __pthread_mutex_cond_lock_adjust
284 xorl %esi, %esi
285 callq __pthread_mutex_unlock_usercnt
286 /* Reload cond_var. */
287 movq 8(%rsp), %rdi
288 jmp 38b
3730d95c 289
e88726b4 29099: incq wakeup_seq(%rdi)
75fccede 291 incl cond_futex(%rdi)
ee618985 292 movl $ETIMEDOUT, %r14d
e88726b4 293 jmp 44f
35e148cb 294
e88726b4
UD
29553: xorq %r14, %r14
296 jmp 54f
893a3511 297
e88726b4
UD
29839: xorq %r14, %r14
29944: incq woken_seq(%rdi)
35e148cb 300
e88726b4 30154: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
73f7c32c
UD
302
303 /* Wake up a thread which wants to destroy the condvar object. */
304 cmpq $0xffffffffffffffff, total_seq(%rdi)
e88726b4 305 jne 55f
73f7c32c 306 movl cond_nwaiters(%rdi), %eax
ee5d5755 307 andl $~((1 << nwaiters_shift) - 1), %eax
e88726b4 308 jne 55f
73f7c32c
UD
309
310 addq $cond_nwaiters, %rdi
0e8860ad 311 LP_OP(cmp) $-1, dep_mutex-cond_nwaiters(%rdi)
73f7c32c 312 movl $1, %edx
5bd8a249
UD
313#ifdef __ASSUME_PRIVATE_FUTEX
314 movl $FUTEX_WAKE, %eax
315 movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
316 cmove %eax, %esi
317#else
318 movl $0, %eax
319 movl %fs:PRIVATE_FUTEX, %esi
320 cmove %eax, %esi
321 orl $FUTEX_WAKE, %esi
322#endif
323 movl $SYS_futex, %eax
73f7c32c
UD
324 syscall
325 subq $cond_nwaiters, %rdi
326
e88726b4 32755: LOCK
35e148cb
UD
328#if cond_lock == 0
329 decl (%rdi)
330#else
331 decl cond_lock(%rdi)
332#endif
e88726b4 333 jne 40f
35e148cb 334
42e69bcf
UD
335 /* If requeue_pi is used the kernel performs the locking of the
336 mutex. */
b0948ffd 33741: movq 16(%rsp), %rdi
c30e8edf 338 testb %r15b, %r15b
b0948ffd 339 jnz 64f
42e69bcf 340
69431c9a 341 callq __pthread_mutex_cond_lock
35e148cb 342
42e69bcf 34363: testq %rax, %rax
46a32546 344 cmoveq %r14, %rax
35e148cb 345
e88726b4 34648: addq $FRAME_SIZE, %rsp
71a5bd3e 347 cfi_adjust_cfa_offset(-FRAME_SIZE)
42e69bcf
UD
348 popq %r15
349 cfi_adjust_cfa_offset(-8)
350 cfi_restore(%r15)
35e148cb 351 popq %r14
71a5bd3e
UD
352 cfi_adjust_cfa_offset(-8)
353 cfi_restore(%r14)
35e148cb 354 popq %r13
71a5bd3e
UD
355 cfi_adjust_cfa_offset(-8)
356 cfi_restore(%r13)
35e148cb 357 popq %r12
71a5bd3e
UD
358 cfi_adjust_cfa_offset(-8)
359 cfi_restore(%r12)
35e148cb
UD
360
361 retq
362
62616842 363 cfi_restore_state
b0948ffd
UD
364
36564: callq __pthread_mutex_cond_lock_adjust
366 movq %r14, %rax
367 jmp 48b
368
369 /* Initial locking failed. */
37031:
35e148cb
UD
371#if cond_lock != 0
372 addq $cond_lock, %rdi
373#endif
0e8860ad 374 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
5bd8a249 375 movl $LLL_PRIVATE, %eax
e51deae7 376 movl $LLL_SHARED, %esi
5bd8a249 377 cmovne %eax, %esi
e51deae7 378 callq __lll_lock_wait
e88726b4 379 jmp 32b
35e148cb 380
893a3511 381 /* Unlock in loop requires wakeup. */
e88726b4 38233:
35e148cb
UD
383#if cond_lock != 0
384 addq $cond_lock, %rdi
385#endif
0e8860ad 386 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
5bd8a249 387 movl $LLL_PRIVATE, %eax
e51deae7 388 movl $LLL_SHARED, %esi
5bd8a249 389 cmovne %eax, %esi
e51deae7 390 callq __lll_unlock_wake
e88726b4 391 jmp 34b
35e148cb
UD
392
393 /* Locking in loop failed. */
e88726b4 39435:
35e148cb
UD
395#if cond_lock != 0
396 addq $cond_lock, %rdi
397#endif
0e8860ad 398 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
5bd8a249 399 movl $LLL_PRIVATE, %eax
e51deae7 400 movl $LLL_SHARED, %esi
5bd8a249 401 cmovne %eax, %esi
e51deae7 402 callq __lll_lock_wait
35e148cb
UD
403#if cond_lock != 0
404 subq $cond_lock, %rdi
405#endif
e88726b4 406 jmp 36b
35e148cb 407
893a3511 408 /* Unlock after loop requires wakeup. */
e88726b4 40940:
35e148cb
UD
410#if cond_lock != 0
411 addq $cond_lock, %rdi
412#endif
0e8860ad 413 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
5bd8a249 414 movl $LLL_PRIVATE, %eax
e51deae7 415 movl $LLL_SHARED, %esi
5bd8a249 416 cmovne %eax, %esi
e51deae7 417 callq __lll_unlock_wake
e88726b4 418 jmp 41b
35e148cb
UD
419
420 /* The initial unlocking of the mutex failed. */
e88726b4 42146: movq 8(%rsp), %rdi
35e148cb
UD
422 movq %rax, (%rsp)
423 LOCK
424#if cond_lock == 0
425 decl (%rdi)
426#else
427 decl cond_lock(%rdi)
428#endif
e88726b4 429 jne 47f
35e148cb
UD
430
431#if cond_lock != 0
432 addq $cond_lock, %rdi
433#endif
0e8860ad 434 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
5bd8a249 435 movl $LLL_PRIVATE, %eax
e51deae7 436 movl $LLL_SHARED, %esi
5bd8a249 437 cmovne %eax, %esi
e51deae7 438 callq __lll_unlock_wake
35e148cb 439
e88726b4
UD
44047: movq (%rsp), %rax
441 jmp 48b
442
443
444#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
445.Lreltmo:
446 /* Get internal lock. */
447 movl $1, %esi
448 xorl %eax, %eax
449 LOCK
450# if cond_lock == 0
451 cmpxchgl %esi, (%rdi)
452# else
453 cmpxchgl %esi, cond_lock(%rdi)
454# endif
455 jnz 1f
456
457 /* Unlock the mutex. */
4582: movq 16(%rsp), %rdi
459 xorl %esi, %esi
460 callq __pthread_mutex_unlock_usercnt
461
462 testl %eax, %eax
463 jne 46b
464
465 movq 8(%rsp), %rdi
466 incq total_seq(%rdi)
467 incl cond_futex(%rdi)
468 addl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
469
470 /* Get and store current wakeup_seq value. */
471 movq 8(%rsp), %rdi
472 movq wakeup_seq(%rdi), %r9
473 movl broadcast_seq(%rdi), %edx
474 movq %r9, 24(%rsp)
475 movl %edx, 4(%rsp)
560a784f 476
e88726b4
UD
477 /* Get the current time. */
4788:
479# ifdef __NR_clock_gettime
480 /* Get the clock number. Note that the field in the condvar
481 structure stores the number minus 1. */
482 movq 8(%rsp), %rdi
483 movl cond_nwaiters(%rdi), %edi
484 andl $((1 << nwaiters_shift) - 1), %edi
485 /* Only clocks 0 and 1 are allowed so far. Both are handled in the
486 kernel. */
487 leaq 32(%rsp), %rsi
488# ifdef SHARED
0e8860ad
L
489 mov __vdso_clock_gettime@GOTPCREL(%rip), %RAX_LP
490 mov (%rax), %RAX_LP
491 PTR_DEMANGLE (%RAX_LP)
e88726b4 492 call *%rax
9e5c9dcd
UD
493# else
494 movl $__NR_clock_gettime, %eax
e88726b4 495 syscall
9e5c9dcd 496# endif
e88726b4
UD
497
498 /* Compute relative timeout. */
499 movq (%r13), %rcx
500 movq 8(%r13), %rdx
501 subq 32(%rsp), %rcx
502 subq 40(%rsp), %rdx
503# else
504 leaq 24(%rsp), %rdi
505 xorl %esi, %esi
9e5c9dcd
UD
506 /* This call works because we directly jump to a system call entry
507 which preserves all the registers. */
508 call JUMPTARGET(__gettimeofday)
e88726b4
UD
509
510 /* Compute relative timeout. */
511 movq 40(%rsp), %rax
512 movl $1000, %edx
513 mul %rdx /* Milli seconds to nano seconds. */
514 movq (%r13), %rcx
515 movq 8(%r13), %rdx
516 subq 32(%rsp), %rcx
517 subq %rax, %rdx
518# endif
519 jns 12f
520 addq $1000000000, %rdx
521 decq %rcx
52212: testq %rcx, %rcx
523 movq 8(%rsp), %rdi
524 movq $-ETIMEDOUT, %r14
525 js 6f
526
527 /* Store relative timeout. */
52821: movq %rcx, 32(%rsp)
529 movq %rdx, 40(%rsp)
530
531 movl cond_futex(%rdi), %r12d
532
533 /* Unlock. */
534 LOCK
535# if cond_lock == 0
536 decl (%rdi)
537# else
538 decl cond_lock(%rdi)
539# endif
540 jne 3f
541
542.LcleanupSTART2:
5434: callq __pthread_enable_asynccancel
544 movl %eax, (%rsp)
545
546 leaq 32(%rsp), %r10
0e8860ad 547 LP_OP(cmp) $-1, dep_mutex(%rdi)
e88726b4
UD
548 movq %r12, %rdx
549# ifdef __ASSUME_PRIVATE_FUTEX
550 movl $FUTEX_WAIT, %eax
551 movl $(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
552 cmove %eax, %esi
553# else
554 movl $0, %eax
555 movl %fs:PRIVATE_FUTEX, %esi
556 cmove %eax, %esi
557# if FUTEX_WAIT != 0
558 orl $FUTEX_WAIT, %esi
559# endif
560# endif
561 addq $cond_futex, %rdi
562 movl $SYS_futex, %eax
563 syscall
564 movq %rax, %r14
565
566 movl (%rsp), %edi
567 callq __pthread_disable_asynccancel
568.LcleanupEND2:
569
570 /* Lock. */
571 movq 8(%rsp), %rdi
572 movl $1, %esi
573 xorl %eax, %eax
574 LOCK
575# if cond_lock == 0
576 cmpxchgl %esi, (%rdi)
577# else
578 cmpxchgl %esi, cond_lock(%rdi)
579# endif
580 jne 5f
581
5826: movl broadcast_seq(%rdi), %edx
583
584 movq woken_seq(%rdi), %rax
585
586 movq wakeup_seq(%rdi), %r9
587
588 cmpl 4(%rsp), %edx
589 jne 53b
590
591 cmpq 24(%rsp), %r9
f1adf1f4 592 jbe 15f
e88726b4
UD
593
594 cmpq %rax, %r9
595 ja 39b
596
f1adf1f4 59715: cmpq $-ETIMEDOUT, %r14
e88726b4
UD
598 jne 8b
599
600 jmp 99b
601
602 /* Initial locking failed. */
d9201c13 6031:
e88726b4
UD
604# if cond_lock != 0
605 addq $cond_lock, %rdi
606# endif
0e8860ad 607 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
e88726b4
UD
608 movl $LLL_PRIVATE, %eax
609 movl $LLL_SHARED, %esi
610 cmovne %eax, %esi
611 callq __lll_lock_wait
612 jmp 2b
613
614 /* Unlock in loop requires wakeup. */
6153:
616# if cond_lock != 0
617 addq $cond_lock, %rdi
618# endif
0e8860ad 619 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
e88726b4
UD
620 movl $LLL_PRIVATE, %eax
621 movl $LLL_SHARED, %esi
622 cmovne %eax, %esi
623 callq __lll_unlock_wake
624 jmp 4b
625
626 /* Locking in loop failed. */
6275:
628# if cond_lock != 0
629 addq $cond_lock, %rdi
630# endif
0e8860ad 631 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
e88726b4
UD
632 movl $LLL_PRIVATE, %eax
633 movl $LLL_SHARED, %esi
634 cmovne %eax, %esi
635 callq __lll_lock_wait
636# if cond_lock != 0
637 subq $cond_lock, %rdi
638# endif
639 jmp 6b
560a784f 640#endif
35e148cb
UD
641 .size __pthread_cond_timedwait, .-__pthread_cond_timedwait
642versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
643 GLIBC_2_3_2)
92618c95
UD
644
645
646 .align 16
647 .type __condvar_cleanup2, @function
648__condvar_cleanup2:
649 /* Stack frame:
650
651 rsp + 72
652 +--------------------------+
653 rsp + 64 | %r12 |
654 +--------------------------+
655 rsp + 56 | %r13 |
656 +--------------------------+
657 rsp + 48 | %r14 |
658 +--------------------------+
659 rsp + 24 | unused |
62616842 660 +--------------------------+
92618c95 661 rsp + 16 | mutex pointer |
62616842 662 +--------------------------+
92618c95 663 rsp + 8 | condvar pointer |
62616842 664 +--------------------------+
92618c95 665 rsp + 4 | old broadcast_seq value |
62616842 666 +--------------------------+
92618c95 667 rsp + 0 | old cancellation mode |
62616842 668 +--------------------------+
92618c95
UD
669 */
670
671 movq %rax, 24(%rsp)
672
673 /* Get internal lock. */
674 movq 8(%rsp), %rdi
675 movl $1, %esi
676 xorl %eax, %eax
677 LOCK
678#if cond_lock == 0
679 cmpxchgl %esi, (%rdi)
680#else
681 cmpxchgl %esi, cond_lock(%rdi)
682#endif
683 jz 1f
684
685#if cond_lock != 0
686 addq $cond_lock, %rdi
687#endif
0e8860ad 688 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
92618c95
UD
689 movl $LLL_PRIVATE, %eax
690 movl $LLL_SHARED, %esi
691 cmovne %eax, %esi
692 callq __lll_lock_wait
693#if cond_lock != 0
694 subq $cond_lock, %rdi
695#endif
696
6971: movl broadcast_seq(%rdi), %edx
698 cmpl 4(%rsp), %edx
699 jne 3f
700
701 /* We increment the wakeup_seq counter only if it is lower than
702 total_seq. If this is not the case the thread was woken and
703 then canceled. In this case we ignore the signal. */
704 movq total_seq(%rdi), %rax
705 cmpq wakeup_seq(%rdi), %rax
706 jbe 6f
707 incq wakeup_seq(%rdi)
708 incl cond_futex(%rdi)
7096: incq woken_seq(%rdi)
710
7113: subl $(1 << nwaiters_shift), cond_nwaiters(%rdi)
712
713 /* Wake up a thread which wants to destroy the condvar object. */
714 xorq %r12, %r12
715 cmpq $0xffffffffffffffff, total_seq(%rdi)
716 jne 4f
717 movl cond_nwaiters(%rdi), %eax
718 andl $~((1 << nwaiters_shift) - 1), %eax
719 jne 4f
720
0e8860ad 721 LP_OP(cmp) $-1, dep_mutex(%rdi)
92618c95
UD
722 leaq cond_nwaiters(%rdi), %rdi
723 movl $1, %edx
724#ifdef __ASSUME_PRIVATE_FUTEX
725 movl $FUTEX_WAKE, %eax
726 movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
727 cmove %eax, %esi
728#else
729 movl $0, %eax
730 movl %fs:PRIVATE_FUTEX, %esi
731 cmove %eax, %esi
732 orl $FUTEX_WAKE, %esi
733#endif
734 movl $SYS_futex, %eax
735 syscall
736 subq $cond_nwaiters, %rdi
737 movl $1, %r12d
738
7394: LOCK
740#if cond_lock == 0
741 decl (%rdi)
742#else
743 decl cond_lock(%rdi)
744#endif
745 je 2f
746#if cond_lock != 0
747 addq $cond_lock, %rdi
748#endif
0e8860ad 749 LP_OP(cmp) $-1, dep_mutex-cond_lock(%rdi)
92618c95
UD
750 movl $LLL_PRIVATE, %eax
751 movl $LLL_SHARED, %esi
752 cmovne %eax, %esi
753 callq __lll_unlock_wake
754
755 /* Wake up all waiters to make sure no signal gets lost. */
7562: testq %r12, %r12
757 jnz 5f
758 addq $cond_futex, %rdi
0e8860ad 759 LP_OP(cmp) $-1, dep_mutex-cond_futex(%rdi)
92618c95
UD
760 movl $0x7fffffff, %edx
761#ifdef __ASSUME_PRIVATE_FUTEX
762 movl $FUTEX_WAKE, %eax
763 movl $(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
764 cmove %eax, %esi
765#else
766 movl $0, %eax
767 movl %fs:PRIVATE_FUTEX, %esi
768 cmove %eax, %esi
769 orl $FUTEX_WAKE, %esi
770#endif
771 movl $SYS_futex, %eax
772 syscall
773
0e3b5d6a
SP
774 /* Lock the mutex only if we don't own it already. This only happens
775 in case of PI mutexes, if we got cancelled after a successful
776 return of the futex syscall and before disabling async
777 cancellation. */
92618c95 7785: movq 16(%rsp), %rdi
0e3b5d6a
SP
779 movl MUTEX_KIND(%rdi), %eax
780 andl $(ROBUST_BIT|PI_BIT), %eax
781 cmpl $PI_BIT, %eax
782 jne 7f
783
784 movl (%rdi), %eax
785 andl $TID_MASK, %eax
786 cmpl %eax, %fs:TID
9485a404
SP
787 jne 7f
788 /* We managed to get the lock. Fix it up before returning. */
789 callq __pthread_mutex_cond_lock_adjust
790 jmp 8f
0e3b5d6a
SP
791
7927: callq __pthread_mutex_cond_lock
92618c95 793
0e3b5d6a 7948: movq 24(%rsp), %rdi
42e69bcf
UD
795 movq FRAME_SIZE(%rsp), %r15
796 movq FRAME_SIZE+8(%rsp), %r14
797 movq FRAME_SIZE+16(%rsp), %r13
798 movq FRAME_SIZE+24(%rsp), %r12
92618c95
UD
799.LcallUR:
800 call _Unwind_Resume@PLT
801 hlt
802.LENDCODE:
803 cfi_endproc
804 .size __condvar_cleanup2, .-__condvar_cleanup2
805
806
807 .section .gcc_except_table,"a",@progbits
808.LexceptSTART:
809 .byte DW_EH_PE_omit # @LPStart format
810 .byte DW_EH_PE_omit # @TType format
811 .byte DW_EH_PE_uleb128 # call-site format
812 .uleb128 .Lcstend-.Lcstbegin
813.Lcstbegin:
e88726b4
UD
814 .uleb128 .LcleanupSTART1-.LSTARTCODE
815 .uleb128 .LcleanupEND1-.LcleanupSTART1
92618c95
UD
816 .uleb128 __condvar_cleanup2-.LSTARTCODE
817 .uleb128 0
e88726b4
UD
818#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
819 .uleb128 .LcleanupSTART2-.LSTARTCODE
820 .uleb128 .LcleanupEND2-.LcleanupSTART2
821 .uleb128 __condvar_cleanup2-.LSTARTCODE
822 .uleb128 0
823#endif
92618c95
UD
824 .uleb128 .LcallUR-.LSTARTCODE
825 .uleb128 .LENDCODE-.LcallUR
826 .uleb128 0
827 .uleb128 0
828.Lcstend:
829
830
831#ifdef SHARED
832 .hidden DW.ref.__gcc_personality_v0
833 .weak DW.ref.__gcc_personality_v0
834 .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
0e8860ad 835 .align LP_SIZE
92618c95 836 .type DW.ref.__gcc_personality_v0, @object
0e8860ad 837 .size DW.ref.__gcc_personality_v0, LP_SIZE
92618c95 838DW.ref.__gcc_personality_v0:
0e8860ad 839 ASM_ADDR __gcc_personality_v0
92618c95 840#endif