]> git.ipfire.org Git - thirdparty/glibc.git/blob - nptl/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
05cda25d3e3fc2649c0bf5cdf655de2635e002da
[thirdparty/glibc.git] / nptl / sysdeps / unix / sysv / linux / i386 / i486 / pthread_cond_signal.S
1 /* Copyright (C) 2002-2005,2007,2009,2010 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
18 02111-1307 USA. */
19
20 #include <sysdep.h>
21 #include <shlib-compat.h>
22 #include <lowlevellock.h>
23 #include <lowlevelcond.h>
24 #include <kernel-features.h>
25 #include <pthread-pi-defines.h>
26 #include <pthread-errnos.h>
27
28
29 .text
30
31 /* int pthread_cond_signal (pthread_cond_t *cond) */
32 .globl __pthread_cond_signal
33 .type __pthread_cond_signal, @function
34 .align 16
35 __pthread_cond_signal:
36
37 cfi_startproc
38 pushl %ebx
39 cfi_adjust_cfa_offset(4)
40 cfi_rel_offset(%ebx, 0)
41 pushl %edi
42 cfi_adjust_cfa_offset(4)
43 cfi_rel_offset(%edi, 0)
44 cfi_remember_state
45
46 movl 12(%esp), %edi
47
48 /* Get internal lock. */
49 movl $1, %edx
50 xorl %eax, %eax
51 LOCK
52 #if cond_lock == 0
53 cmpxchgl %edx, (%edi)
54 #else
55 cmpxchgl %edx, cond_lock(%edi)
56 #endif
57 jnz 1f
58
59 2: leal cond_futex(%edi), %ebx
60 movl total_seq+4(%edi), %eax
61 movl total_seq(%edi), %ecx
62 cmpl wakeup_seq+4(%edi), %eax
63 #if cond_lock != 0
64 /* Must use leal to preserve the flags. */
65 leal cond_lock(%edi), %edi
66 #endif
67 ja 3f
68 jb 4f
69 cmpl wakeup_seq-cond_futex(%ebx), %ecx
70 jbe 4f
71
72 /* Bump the wakeup number. */
73 3: addl $1, wakeup_seq-cond_futex(%ebx)
74 adcl $0, wakeup_seq-cond_futex+4(%ebx)
75 addl $1, (%ebx)
76
77 /* Wake up one thread. */
78 pushl %esi
79 cfi_adjust_cfa_offset(4)
80 cfi_rel_offset(%esi, 0)
81 pushl %ebp
82 cfi_adjust_cfa_offset(4)
83 cfi_rel_offset(%ebp, 0)
84
85 #if FUTEX_PRIVATE_FLAG > 255
86 xorl %ecx, %ecx
87 #endif
88 cmpl $-1, dep_mutex-cond_futex(%ebx)
89 sete %cl
90 je 8f
91
92 movl dep_mutex-cond_futex(%ebx), %edx
93 /* Requeue to a non-robust PI mutex if the PI bit is set and
94 the robust bit is not set. */
95 movl MUTEX_KIND(%edx), %eax
96 andl $(ROBUST_BIT|PI_BIT), %eax
97 cmpl $PI_BIT, %eax
98 je 9f
99
100 8: subl $1, %ecx
101 #ifdef __ASSUME_PRIVATE_FUTEX
102 andl $FUTEX_PRIVATE_FLAG, %ecx
103 #else
104 andl %gs:PRIVATE_FUTEX, %ecx
105 #endif
106 addl $FUTEX_WAKE_OP, %ecx
107 movl $SYS_futex, %eax
108 movl $1, %edx
109 movl $1, %esi
110 movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %ebp
111 /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
112 sysenter.
113 ENTER_KERNEL */
114 int $0x80
115 popl %ebp
116 cfi_adjust_cfa_offset(-4)
117 cfi_restore(%ebp)
118 popl %esi
119 cfi_adjust_cfa_offset(-4)
120 cfi_restore(%esi)
121
122 /* For any kind of error, we try again with WAKE.
123 The general test also covers running on old kernels. */
124 cmpl $-4095, %eax
125 jae 7f
126
127 6: xorl %eax, %eax
128 popl %edi
129 cfi_adjust_cfa_offset(-4)
130 cfi_restore(%edi)
131 popl %ebx
132 cfi_adjust_cfa_offset(-4)
133 cfi_restore(%ebx)
134 ret
135
136 cfi_restore_state
137
138 9: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
139 movl $SYS_futex, %eax
140 movl $1, %edx
141 xorl %esi, %esi
142 movl dep_mutex-cond_futex(%ebx), %edi
143 movl (%ebx), %ebp
144 /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
145 sysenter.
146 ENTER_KERNEL */
147 int $0x80
148 popl %ebp
149 popl %esi
150
151 leal -cond_futex(%ebx), %edi
152
153 /* For any kind of error, we try again with WAKE.
154 The general test also covers running on old kernels. */
155 cmpl $-4095, %eax
156 jb 4f
157
158 7:
159 #ifdef __ASSUME_PRIVATE_FUTEX
160 andl $FUTEX_PRIVATE_FLAG, %ecx
161 #else
162 andl %gs:PRIVATE_FUTEX, %ecx
163 #endif
164 orl $FUTEX_WAKE, %ecx
165
166 movl $SYS_futex, %eax
167 /* %edx should be 1 already from $FUTEX_WAKE_OP syscall.
168 movl $1, %edx */
169 ENTER_KERNEL
170
171 /* Unlock. Note that at this point %edi always points to
172 cond_lock. */
173 4: LOCK
174 subl $1, (%edi)
175 je 6b
176
177 /* Unlock in loop requires wakeup. */
178 5: movl %edi, %eax
179 #if (LLL_SHARED-LLL_PRIVATE) > 255
180 xorl %ecx, %ecx
181 #endif
182 cmpl $-1, dep_mutex-cond_futex(%ebx)
183 setne %cl
184 subl $1, %ecx
185 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
186 #if LLL_PRIVATE != 0
187 addl $LLL_PRIVATE, %ecx
188 #endif
189 call __lll_unlock_wake
190 jmp 6b
191
192 /* Initial locking failed. */
193 1:
194 #if cond_lock == 0
195 movl %edi, %edx
196 #else
197 leal cond_lock(%edi), %edx
198 #endif
199 #if (LLL_SHARED-LLL_PRIVATE) > 255
200 xorl %ecx, %ecx
201 #endif
202 cmpl $-1, dep_mutex(%edi)
203 setne %cl
204 subl $1, %ecx
205 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
206 #if LLL_PRIVATE != 0
207 addl $LLL_PRIVATE, %ecx
208 #endif
209 call __lll_lock_wait
210 jmp 2b
211
212 cfi_endproc
213 .size __pthread_cond_signal, .-__pthread_cond_signal
214 versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
215 GLIBC_2_3_2)