]> git.ipfire.org Git - thirdparty/glibc.git/blob - nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S
Update copyright notices with scripts/update-copyrights
[thirdparty/glibc.git] / nptl / sysdeps / unix / sysv / linux / i386 / i486 / pthread_cond_wait.S
1 /* Copyright (C) 2002-2014 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <sysdep.h>
20 #include <shlib-compat.h>
21 #include <lowlevellock.h>
22 #include <lowlevelcond.h>
23 #include <tcb-offsets.h>
24 #include <pthread-errnos.h>
25 #include <pthread-pi-defines.h>
26 #include <kernel-features.h>
27 #include <stap-probe.h>
28
29
30 .text
31
32 /* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */
33 .globl __pthread_cond_wait
34 .type __pthread_cond_wait, @function
35 .align 16
36 __pthread_cond_wait:
37 .LSTARTCODE:
38 cfi_startproc
39 #ifdef SHARED
40 cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
41 DW.ref.__gcc_personality_v0)
42 cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
43 #else
44 cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
45 cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
46 #endif
47
48 pushl %ebp
49 cfi_adjust_cfa_offset(4)
50 cfi_rel_offset(%ebp, 0)
51 pushl %edi
52 cfi_adjust_cfa_offset(4)
53 cfi_rel_offset(%edi, 0)
54 pushl %esi
55 cfi_adjust_cfa_offset(4)
56 cfi_rel_offset(%esi, 0)
57 pushl %ebx
58 cfi_adjust_cfa_offset(4)
59 cfi_rel_offset(%ebx, 0)
60
61 xorl %esi, %esi
62 movl 20(%esp), %ebx
63
64 LIBC_PROBE (cond_wait, 2, 24(%esp), %ebx)
65
66 /* Get internal lock. */
67 movl $1, %edx
68 xorl %eax, %eax
69 LOCK
70 #if cond_lock == 0
71 cmpxchgl %edx, (%ebx)
72 #else
73 cmpxchgl %edx, cond_lock(%ebx)
74 #endif
75 jnz 1f
76
77 /* Store the reference to the mutex. If there is already a
78 different value in there this is a bad user bug. */
79 2: cmpl $-1, dep_mutex(%ebx)
80 movl 24(%esp), %eax
81 je 15f
82 movl %eax, dep_mutex(%ebx)
83
84 /* Unlock the mutex. */
85 15: xorl %edx, %edx
86 call __pthread_mutex_unlock_usercnt
87
88 testl %eax, %eax
89 jne 12f
90
91 addl $1, total_seq(%ebx)
92 adcl $0, total_seq+4(%ebx)
93 addl $1, cond_futex(%ebx)
94 addl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
95
96 #define FRAME_SIZE 20
97 subl $FRAME_SIZE, %esp
98 cfi_adjust_cfa_offset(FRAME_SIZE)
99 cfi_remember_state
100
101 /* Get and store current wakeup_seq value. */
102 movl wakeup_seq(%ebx), %edi
103 movl wakeup_seq+4(%ebx), %edx
104 movl broadcast_seq(%ebx), %eax
105 movl %edi, 4(%esp)
106 movl %edx, 8(%esp)
107 movl %eax, 12(%esp)
108
109 /* Reset the pi-requeued flag. */
110 8: movl $0, 16(%esp)
111 movl cond_futex(%ebx), %ebp
112
113 /* Unlock. */
114 LOCK
115 #if cond_lock == 0
116 subl $1, (%ebx)
117 #else
118 subl $1, cond_lock(%ebx)
119 #endif
120 jne 3f
121
122 .LcleanupSTART:
123 4: call __pthread_enable_asynccancel
124 movl %eax, (%esp)
125
126 xorl %ecx, %ecx
127 cmpl $-1, dep_mutex(%ebx)
128 sete %cl
129 je 18f
130
131 movl dep_mutex(%ebx), %edi
132 /* Requeue to a non-robust PI mutex if the PI bit is set and
133 the robust bit is not set. */
134 movl MUTEX_KIND(%edi), %eax
135 andl $(ROBUST_BIT|PI_BIT), %eax
136 cmpl $PI_BIT, %eax
137 jne 18f
138
139 movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
140 movl %ebp, %edx
141 xorl %esi, %esi
142 addl $cond_futex, %ebx
143 .Ladd_cond_futex_pi:
144 movl $SYS_futex, %eax
145 ENTER_KERNEL
146 subl $cond_futex, %ebx
147 .Lsub_cond_futex_pi:
148 /* Set the pi-requeued flag only if the kernel has returned 0. The
149 kernel does not hold the mutex on error. */
150 cmpl $0, %eax
151 sete 16(%esp)
152 je 19f
153
154 /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns
155 successfully, it has already locked the mutex for us and the
156 pi_flag (16(%esp)) is set to denote that fact. However, if another
157 thread changed the futex value before we entered the wait, the
158 syscall may return an EAGAIN and the mutex is not locked. We go
159 ahead with a success anyway since later we look at the pi_flag to
160 decide if we got the mutex or not. The sequence numbers then make
161 sure that only one of the threads actually wake up. We retry using
162 normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal
163 and PI futexes don't mix.
164
165 Note that we don't check for EAGAIN specifically; we assume that the
166 only other error the futex function could return is EAGAIN since
167 anything else would mean an error in our function. It is too
168 expensive to do that check for every call (which is quite common in
169 case of a large number of threads), so it has been skipped. */
170 cmpl $-ENOSYS, %eax
171 jne 19f
172 xorl %ecx, %ecx
173
174 18: subl $1, %ecx
175 #ifdef __ASSUME_PRIVATE_FUTEX
176 andl $FUTEX_PRIVATE_FLAG, %ecx
177 #else
178 andl %gs:PRIVATE_FUTEX, %ecx
179 #endif
180 #if FUTEX_WAIT != 0
181 addl $FUTEX_WAIT, %ecx
182 #endif
183 movl %ebp, %edx
184 addl $cond_futex, %ebx
185 .Ladd_cond_futex:
186 movl $SYS_futex, %eax
187 ENTER_KERNEL
188 subl $cond_futex, %ebx
189 .Lsub_cond_futex:
190
191 19: movl (%esp), %eax
192 call __pthread_disable_asynccancel
193 .LcleanupEND:
194
195 /* Lock. */
196 movl $1, %edx
197 xorl %eax, %eax
198 LOCK
199 #if cond_lock == 0
200 cmpxchgl %edx, (%ebx)
201 #else
202 cmpxchgl %edx, cond_lock(%ebx)
203 #endif
204 jnz 5f
205
206 6: movl broadcast_seq(%ebx), %eax
207 cmpl 12(%esp), %eax
208 jne 16f
209
210 movl woken_seq(%ebx), %eax
211 movl woken_seq+4(%ebx), %ecx
212
213 movl wakeup_seq(%ebx), %edi
214 movl wakeup_seq+4(%ebx), %edx
215
216 cmpl 8(%esp), %edx
217 jne 7f
218 cmpl 4(%esp), %edi
219 je 22f
220
221 7: cmpl %ecx, %edx
222 jne 9f
223 cmp %eax, %edi
224 je 22f
225
226 9: addl $1, woken_seq(%ebx)
227 adcl $0, woken_seq+4(%ebx)
228
229 /* Unlock */
230 16: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
231
232 /* Wake up a thread which wants to destroy the condvar object. */
233 movl total_seq(%ebx), %eax
234 andl total_seq+4(%ebx), %eax
235 cmpl $0xffffffff, %eax
236 jne 17f
237 movl cond_nwaiters(%ebx), %eax
238 andl $~((1 << nwaiters_shift) - 1), %eax
239 jne 17f
240
241 addl $cond_nwaiters, %ebx
242 movl $SYS_futex, %eax
243 #if FUTEX_PRIVATE_FLAG > 255
244 xorl %ecx, %ecx
245 #endif
246 cmpl $-1, dep_mutex-cond_nwaiters(%ebx)
247 sete %cl
248 subl $1, %ecx
249 #ifdef __ASSUME_PRIVATE_FUTEX
250 andl $FUTEX_PRIVATE_FLAG, %ecx
251 #else
252 andl %gs:PRIVATE_FUTEX, %ecx
253 #endif
254 addl $FUTEX_WAKE, %ecx
255 movl $1, %edx
256 ENTER_KERNEL
257 subl $cond_nwaiters, %ebx
258
259 17: LOCK
260 #if cond_lock == 0
261 subl $1, (%ebx)
262 #else
263 subl $1, cond_lock(%ebx)
264 #endif
265 jne 10f
266
267 /* With requeue_pi, the mutex lock is held in the kernel. */
268 11: movl 24+FRAME_SIZE(%esp), %eax
269 movl 16(%esp), %ecx
270 testl %ecx, %ecx
271 jnz 21f
272
273 call __pthread_mutex_cond_lock
274 20: addl $FRAME_SIZE, %esp
275 cfi_adjust_cfa_offset(-FRAME_SIZE);
276
277 14: popl %ebx
278 cfi_adjust_cfa_offset(-4)
279 cfi_restore(%ebx)
280 popl %esi
281 cfi_adjust_cfa_offset(-4)
282 cfi_restore(%esi)
283 popl %edi
284 cfi_adjust_cfa_offset(-4)
285 cfi_restore(%edi)
286 popl %ebp
287 cfi_adjust_cfa_offset(-4)
288 cfi_restore(%ebp)
289
290 /* We return the result of the mutex_lock operation. */
291 ret
292
293 cfi_restore_state
294
295 21: call __pthread_mutex_cond_lock_adjust
296 xorl %eax, %eax
297 jmp 20b
298
299 cfi_adjust_cfa_offset(-FRAME_SIZE);
300
301 /* We need to go back to futex_wait. If we're using requeue_pi, then
302 release the mutex we had acquired and go back. */
303 22: movl 16(%esp), %edx
304 test %edx, %edx
305 jz 8b
306
307 /* Adjust the mutex values first and then unlock it. The unlock
308 should always succeed or else the kernel did not lock the mutex
309 correctly. */
310 movl dep_mutex(%ebx), %eax
311 call __pthread_mutex_cond_lock_adjust
312 xorl %edx, %edx
313 call __pthread_mutex_unlock_usercnt
314 jmp 8b
315
316 /* Initial locking failed. */
317 1:
318 #if cond_lock == 0
319 movl %ebx, %edx
320 #else
321 leal cond_lock(%ebx), %edx
322 #endif
323 #if (LLL_SHARED-LLL_PRIVATE) > 255
324 xorl %ecx, %ecx
325 #endif
326 cmpl $-1, dep_mutex(%ebx)
327 setne %cl
328 subl $1, %ecx
329 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
330 #if LLL_PRIVATE != 0
331 addl $LLL_PRIVATE, %ecx
332 #endif
333 call __lll_lock_wait
334 jmp 2b
335
336 /* The initial unlocking of the mutex failed. */
337 12:
338 LOCK
339 #if cond_lock == 0
340 subl $1, (%ebx)
341 #else
342 subl $1, cond_lock(%ebx)
343 #endif
344 jne 14b
345
346 movl %eax, %esi
347 #if cond_lock == 0
348 movl %ebx, %eax
349 #else
350 leal cond_lock(%ebx), %eax
351 #endif
352 #if (LLL_SHARED-LLL_PRIVATE) > 255
353 xorl %ecx, %ecx
354 #endif
355 cmpl $-1, dep_mutex(%ebx)
356 setne %cl
357 subl $1, %ecx
358 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
359 #if LLL_PRIVATE != 0
360 addl $LLL_PRIVATE, %ecx
361 #endif
362 call __lll_unlock_wake
363
364 movl %esi, %eax
365 jmp 14b
366
367 cfi_adjust_cfa_offset(FRAME_SIZE)
368
369 /* Unlock in loop requires wakeup. */
370 3:
371 #if cond_lock == 0
372 movl %ebx, %eax
373 #else
374 leal cond_lock(%ebx), %eax
375 #endif
376 #if (LLL_SHARED-LLL_PRIVATE) > 255
377 xorl %ecx, %ecx
378 #endif
379 cmpl $-1, dep_mutex(%ebx)
380 setne %cl
381 subl $1, %ecx
382 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
383 #if LLL_PRIVATE != 0
384 addl $LLL_PRIVATE, %ecx
385 #endif
386 call __lll_unlock_wake
387 jmp 4b
388
389 /* Locking in loop failed. */
390 5:
391 #if cond_lock == 0
392 movl %ebx, %edx
393 #else
394 leal cond_lock(%ebx), %edx
395 #endif
396 #if (LLL_SHARED-LLL_PRIVATE) > 255
397 xorl %ecx, %ecx
398 #endif
399 cmpl $-1, dep_mutex(%ebx)
400 setne %cl
401 subl $1, %ecx
402 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
403 #if LLL_PRIVATE != 0
404 addl $LLL_PRIVATE, %ecx
405 #endif
406 call __lll_lock_wait
407 jmp 6b
408
409 /* Unlock after loop requires wakeup. */
410 10:
411 #if cond_lock == 0
412 movl %ebx, %eax
413 #else
414 leal cond_lock(%ebx), %eax
415 #endif
416 #if (LLL_SHARED-LLL_PRIVATE) > 255
417 xorl %ecx, %ecx
418 #endif
419 cmpl $-1, dep_mutex(%ebx)
420 setne %cl
421 subl $1, %ecx
422 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
423 #if LLL_PRIVATE != 0
424 addl $LLL_PRIVATE, %ecx
425 #endif
426 call __lll_unlock_wake
427 jmp 11b
428
429 .size __pthread_cond_wait, .-__pthread_cond_wait
430 versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
431 GLIBC_2_3_2)
432
433
434 .type __condvar_w_cleanup2, @function
435 __condvar_w_cleanup2:
436 subl $cond_futex, %ebx
437 .size __condvar_w_cleanup2, .-__condvar_w_cleanup2
438 .LSbl4:
439 .type __condvar_w_cleanup, @function
440 __condvar_w_cleanup:
441 movl %eax, %esi
442
443 /* Get internal lock. */
444 movl $1, %edx
445 xorl %eax, %eax
446 LOCK
447 #if cond_lock == 0
448 cmpxchgl %edx, (%ebx)
449 #else
450 cmpxchgl %edx, cond_lock(%ebx)
451 #endif
452 jz 1f
453
454 #if cond_lock == 0
455 movl %ebx, %edx
456 #else
457 leal cond_lock(%ebx), %edx
458 #endif
459 #if (LLL_SHARED-LLL_PRIVATE) > 255
460 xorl %ecx, %ecx
461 #endif
462 cmpl $-1, dep_mutex(%ebx)
463 setne %cl
464 subl $1, %ecx
465 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
466 #if LLL_PRIVATE != 0
467 addl $LLL_PRIVATE, %ecx
468 #endif
469 call __lll_lock_wait
470
471 1: movl broadcast_seq(%ebx), %eax
472 cmpl 12(%esp), %eax
473 jne 3f
474
475 /* We increment the wakeup_seq counter only if it is lower than
476 total_seq. If this is not the case the thread was woken and
477 then canceled. In this case we ignore the signal. */
478 movl total_seq(%ebx), %eax
479 movl total_seq+4(%ebx), %edi
480 cmpl wakeup_seq+4(%ebx), %edi
481 jb 6f
482 ja 7f
483 cmpl wakeup_seq(%ebx), %eax
484 jbe 7f
485
486 6: addl $1, wakeup_seq(%ebx)
487 adcl $0, wakeup_seq+4(%ebx)
488 addl $1, cond_futex(%ebx)
489
490 7: addl $1, woken_seq(%ebx)
491 adcl $0, woken_seq+4(%ebx)
492
493 3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx)
494
495 /* Wake up a thread which wants to destroy the condvar object. */
496 xorl %edi, %edi
497 movl total_seq(%ebx), %eax
498 andl total_seq+4(%ebx), %eax
499 cmpl $0xffffffff, %eax
500 jne 4f
501 movl cond_nwaiters(%ebx), %eax
502 andl $~((1 << nwaiters_shift) - 1), %eax
503 jne 4f
504
505 addl $cond_nwaiters, %ebx
506 movl $SYS_futex, %eax
507 #if FUTEX_PRIVATE_FLAG > 255
508 xorl %ecx, %ecx
509 #endif
510 cmpl $-1, dep_mutex-cond_nwaiters(%ebx)
511 sete %cl
512 subl $1, %ecx
513 #ifdef __ASSUME_PRIVATE_FUTEX
514 andl $FUTEX_PRIVATE_FLAG, %ecx
515 #else
516 andl %gs:PRIVATE_FUTEX, %ecx
517 #endif
518 addl $FUTEX_WAKE, %ecx
519 movl $1, %edx
520 ENTER_KERNEL
521 subl $cond_nwaiters, %ebx
522 movl $1, %edi
523
524 4: LOCK
525 #if cond_lock == 0
526 subl $1, (%ebx)
527 #else
528 subl $1, cond_lock(%ebx)
529 #endif
530 je 2f
531
532 #if cond_lock == 0
533 movl %ebx, %eax
534 #else
535 leal cond_lock(%ebx), %eax
536 #endif
537 #if (LLL_SHARED-LLL_PRIVATE) > 255
538 xorl %ecx, %ecx
539 #endif
540 cmpl $-1, dep_mutex(%ebx)
541 setne %cl
542 subl $1, %ecx
543 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
544 #if LLL_PRIVATE != 0
545 addl $LLL_PRIVATE, %ecx
546 #endif
547 call __lll_unlock_wake
548
549 /* Wake up all waiters to make sure no signal gets lost. */
550 2: testl %edi, %edi
551 jnz 5f
552 addl $cond_futex, %ebx
553 #if FUTEX_PRIVATE_FLAG > 255
554 xorl %ecx, %ecx
555 #endif
556 cmpl $-1, dep_mutex-cond_futex(%ebx)
557 sete %cl
558 subl $1, %ecx
559 #ifdef __ASSUME_PRIVATE_FUTEX
560 andl $FUTEX_PRIVATE_FLAG, %ecx
561 #else
562 andl %gs:PRIVATE_FUTEX, %ecx
563 #endif
564 addl $FUTEX_WAKE, %ecx
565 movl $SYS_futex, %eax
566 movl $0x7fffffff, %edx
567 ENTER_KERNEL
568
569 /* Lock the mutex only if we don't own it already. This only happens
570 in case of PI mutexes, if we got cancelled after a successful
571 return of the futex syscall and before disabling async
572 cancellation. */
573 5: movl 24+FRAME_SIZE(%esp), %eax
574 movl MUTEX_KIND(%eax), %ebx
575 andl $(ROBUST_BIT|PI_BIT), %ebx
576 cmpl $PI_BIT, %ebx
577 jne 8f
578
579 movl (%eax), %ebx
580 andl $TID_MASK, %ebx
581 cmpl %ebx, %gs:TID
582 jne 8f
583 /* We managed to get the lock. Fix it up before returning. */
584 call __pthread_mutex_cond_lock_adjust
585 jmp 9f
586
587 8: call __pthread_mutex_cond_lock
588
589 9: movl %esi, (%esp)
590 .LcallUR:
591 call _Unwind_Resume
592 hlt
593 .LENDCODE:
594 cfi_endproc
595 .size __condvar_w_cleanup, .-__condvar_w_cleanup
596
597
598 .section .gcc_except_table,"a",@progbits
599 .LexceptSTART:
600 .byte DW_EH_PE_omit # @LPStart format (omit)
601 .byte DW_EH_PE_omit # @TType format (omit)
602 .byte DW_EH_PE_sdata4 # call-site format
603 # DW_EH_PE_sdata4
604 .uleb128 .Lcstend-.Lcstbegin
605 .Lcstbegin:
606 .long .LcleanupSTART-.LSTARTCODE
607 .long .Ladd_cond_futex_pi-.LcleanupSTART
608 .long __condvar_w_cleanup-.LSTARTCODE
609 .uleb128 0
610 .long .Ladd_cond_futex_pi-.LSTARTCODE
611 .long .Lsub_cond_futex_pi-.Ladd_cond_futex_pi
612 .long __condvar_w_cleanup2-.LSTARTCODE
613 .uleb128 0
614 .long .Lsub_cond_futex_pi-.LSTARTCODE
615 .long .Ladd_cond_futex-.Lsub_cond_futex_pi
616 .long __condvar_w_cleanup-.LSTARTCODE
617 .uleb128 0
618 .long .Ladd_cond_futex-.LSTARTCODE
619 .long .Lsub_cond_futex-.Ladd_cond_futex
620 .long __condvar_w_cleanup2-.LSTARTCODE
621 .uleb128 0
622 .long .Lsub_cond_futex-.LSTARTCODE
623 .long .LcleanupEND-.Lsub_cond_futex
624 .long __condvar_w_cleanup-.LSTARTCODE
625 .uleb128 0
626 .long .LcallUR-.LSTARTCODE
627 .long .LENDCODE-.LcallUR
628 .long 0
629 .uleb128 0
630 .Lcstend:
631
632 #ifdef SHARED
633 .hidden DW.ref.__gcc_personality_v0
634 .weak DW.ref.__gcc_personality_v0
635 .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
636 .align 4
637 .type DW.ref.__gcc_personality_v0, @object
638 .size DW.ref.__gcc_personality_v0, 4
639 DW.ref.__gcc_personality_v0:
640 .long __gcc_personality_v0
641 #endif