]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / unix / sysv / linux / i386 / i486 / pthread_cond_signal.S
1 /* Copyright (C) 2002-2015 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
18
19 #include <sysdep.h>
20 #include <shlib-compat.h>
21 #include <lowlevellock.h>
22 #include <lowlevelcond.h>
23 #include <kernel-features.h>
24 #include <pthread-pi-defines.h>
25 #include <pthread-errnos.h>
26 #include <stap-probe.h>
27
28 .text
29
30 /* int pthread_cond_signal (pthread_cond_t *cond) */
31 .globl __pthread_cond_signal
32 .type __pthread_cond_signal, @function
33 .align 16
34 __pthread_cond_signal:
35
36 cfi_startproc
37 pushl %ebx
38 cfi_adjust_cfa_offset(4)
39 cfi_rel_offset(%ebx, 0)
40 pushl %edi
41 cfi_adjust_cfa_offset(4)
42 cfi_rel_offset(%edi, 0)
43 cfi_remember_state
44
45 movl 12(%esp), %edi
46
47 LIBC_PROBE (cond_signal, 1, %edi)
48
49 /* Get internal lock. */
50 movl $1, %edx
51 xorl %eax, %eax
52 LOCK
53 #if cond_lock == 0
54 cmpxchgl %edx, (%edi)
55 #else
56 cmpxchgl %edx, cond_lock(%edi)
57 #endif
58 jnz 1f
59
60 2: leal cond_futex(%edi), %ebx
61 movl total_seq+4(%edi), %eax
62 movl total_seq(%edi), %ecx
63 cmpl wakeup_seq+4(%edi), %eax
64 #if cond_lock != 0
65 /* Must use leal to preserve the flags. */
66 leal cond_lock(%edi), %edi
67 #endif
68 ja 3f
69 jb 4f
70 cmpl wakeup_seq-cond_futex(%ebx), %ecx
71 jbe 4f
72
73 /* Bump the wakeup number. */
74 3: addl $1, wakeup_seq-cond_futex(%ebx)
75 adcl $0, wakeup_seq-cond_futex+4(%ebx)
76 addl $1, (%ebx)
77
78 /* Wake up one thread. */
79 pushl %esi
80 cfi_adjust_cfa_offset(4)
81 cfi_rel_offset(%esi, 0)
82 pushl %ebp
83 cfi_adjust_cfa_offset(4)
84 cfi_rel_offset(%ebp, 0)
85
86 #if FUTEX_PRIVATE_FLAG > 255
87 xorl %ecx, %ecx
88 #endif
89 cmpl $-1, dep_mutex-cond_futex(%ebx)
90 sete %cl
91 je 8f
92
93 movl dep_mutex-cond_futex(%ebx), %edx
94 /* Requeue to a non-robust PI mutex if the PI bit is set and
95 the robust bit is not set. */
96 movl MUTEX_KIND(%edx), %eax
97 andl $(ROBUST_BIT|PI_BIT), %eax
98 cmpl $PI_BIT, %eax
99 je 9f
100
101 8: subl $1, %ecx
102 #ifdef __ASSUME_PRIVATE_FUTEX
103 andl $FUTEX_PRIVATE_FLAG, %ecx
104 #else
105 andl %gs:PRIVATE_FUTEX, %ecx
106 #endif
107 addl $FUTEX_WAKE_OP, %ecx
108 movl $SYS_futex, %eax
109 movl $1, %edx
110 movl $1, %esi
111 movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %ebp
112 /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
113 sysenter.
114 ENTER_KERNEL */
115 int $0x80
116 popl %ebp
117 cfi_adjust_cfa_offset(-4)
118 cfi_restore(%ebp)
119 popl %esi
120 cfi_adjust_cfa_offset(-4)
121 cfi_restore(%esi)
122
123 /* For any kind of error, we try again with WAKE.
124 The general test also covers running on old kernels. */
125 cmpl $-4095, %eax
126 jae 7f
127
128 6: xorl %eax, %eax
129 popl %edi
130 cfi_adjust_cfa_offset(-4)
131 cfi_restore(%edi)
132 popl %ebx
133 cfi_adjust_cfa_offset(-4)
134 cfi_restore(%ebx)
135 ret
136
137 cfi_restore_state
138
139 9: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx
140 movl $SYS_futex, %eax
141 movl $1, %edx
142 xorl %esi, %esi
143 movl dep_mutex-cond_futex(%ebx), %edi
144 movl (%ebx), %ebp
145 /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for
146 sysenter.
147 ENTER_KERNEL */
148 int $0x80
149 popl %ebp
150 popl %esi
151
152 leal -cond_futex(%ebx), %edi
153
154 /* For any kind of error, we try again with WAKE.
155 The general test also covers running on old kernels. */
156 cmpl $-4095, %eax
157 jb 4f
158
159 7:
160 #ifdef __ASSUME_PRIVATE_FUTEX
161 andl $FUTEX_PRIVATE_FLAG, %ecx
162 #else
163 andl %gs:PRIVATE_FUTEX, %ecx
164 #endif
165 orl $FUTEX_WAKE, %ecx
166
167 movl $SYS_futex, %eax
168 /* %edx should be 1 already from $FUTEX_WAKE_OP syscall.
169 movl $1, %edx */
170 ENTER_KERNEL
171
172 /* Unlock. Note that at this point %edi always points to
173 cond_lock. */
174 4: LOCK
175 subl $1, (%edi)
176 je 6b
177
178 /* Unlock in loop requires wakeup. */
179 5: movl %edi, %eax
180 #if (LLL_SHARED-LLL_PRIVATE) > 255
181 xorl %ecx, %ecx
182 #endif
183 cmpl $-1, dep_mutex-cond_futex(%ebx)
184 setne %cl
185 subl $1, %ecx
186 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
187 #if LLL_PRIVATE != 0
188 addl $LLL_PRIVATE, %ecx
189 #endif
190 call __lll_unlock_wake
191 jmp 6b
192
193 /* Initial locking failed. */
194 1:
195 #if cond_lock == 0
196 movl %edi, %edx
197 #else
198 leal cond_lock(%edi), %edx
199 #endif
200 #if (LLL_SHARED-LLL_PRIVATE) > 255
201 xorl %ecx, %ecx
202 #endif
203 cmpl $-1, dep_mutex(%edi)
204 setne %cl
205 subl $1, %ecx
206 andl $(LLL_SHARED-LLL_PRIVATE), %ecx
207 #if LLL_PRIVATE != 0
208 addl $LLL_PRIVATE, %ecx
209 #endif
210 call __lll_lock_wait
211 jmp 2b
212
213 cfi_endproc
214 .size __pthread_cond_signal, .-__pthread_cond_signal
215 versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
216 GLIBC_2_3_2)