]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_mutex_unlock.c
nptl: Replace lll_futex_supported_clockid with futex-internal.h
[thirdparty/glibc.git] / nptl / pthread_mutex_unlock.c
CommitLineData
d614a753 1/* Copyright (C) 2002-2020 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
76a50749 18
6de79a49 19#include <assert.h>
76a50749 20#include <errno.h>
1bcfb5a5 21#include <stdlib.h>
76a50749
UD
22#include "pthreadP.h"
23#include <lowlevellock.h>
5acf7263 24#include <stap-probe.h>
6b1472eb 25#include <futex-internal.h>
76a50749 26
e8c659d7 27#ifndef lll_unlock_elision
e5e6bea2 28#define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
e8c659d7
AK
29#endif
30
6de79a49 31static int
6de79a49
UD
32__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
33 __attribute_noinline__;
76a50749
UD
34
35int
83b09837 36attribute_hidden
9dd346ff 37__pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
76a50749 38{
403b4feb
SL
39 /* See concurrency notes regarding mutex type which is loaded from __kind
40 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
e8c659d7 41 int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
a04549c1
JM
42 if (__builtin_expect (type
43 & ~(PTHREAD_MUTEX_KIND_MASK_NP
44 |PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
6de79a49 45 return __pthread_mutex_unlock_full (mutex, decr);
1bcfb5a5 46
6de79a49
UD
47 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
48 == PTHREAD_MUTEX_TIMED_NP)
49 {
50 /* Always reset the owner field. */
51 normal:
52 mutex->__data.__owner = 0;
53 if (decr)
54 /* One less user. */
55 --mutex->__data.__nusers;
56
57 /* Unlock. */
58 lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
5acf7263
RM
59
60 LIBC_PROBE (mutex_release, 1, mutex);
61
6de79a49
UD
62 return 0;
63 }
a1ffb40e 64 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
e8c659d7
AK
65 {
66 /* Don't reset the owner/users fields for elision. */
e5e6bea2 67 return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
e8c659d7
AK
68 PTHREAD_MUTEX_PSHARED (mutex));
69 }
70 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
71 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
76a50749 72 {
76a50749 73 /* Recursive mutex. */
61623643 74 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
76a50749
UD
75 return EPERM;
76
77 if (--mutex->__data.__count != 0)
78 /* We still hold the mutex. */
79 return 0;
683040c3 80 goto normal;
6de79a49 81 }
e8c659d7
AK
82 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
83 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
6de79a49
UD
84 goto normal;
85 else
86 {
76a50749 87 /* Error checking mutex. */
6de79a49 88 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
61623643 89 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
e51deae7 90 || ! lll_islocked (mutex->__data.__lock))
76a50749 91 return EPERM;
6de79a49
UD
92 goto normal;
93 }
94}
76a50749 95
683040c3 96
6de79a49 97static int
6de79a49
UD
98__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
99{
100 int newowner = 0;
65810f0e 101 int private;
dcc73a8d 102
6de79a49
UD
103 switch (PTHREAD_MUTEX_TYPE (mutex))
104 {
0f6699ea 105 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
1bcfb5a5 106 /* Recursive mutex. */
683040c3 107 if ((mutex->__data.__lock & FUTEX_TID_MASK)
0f6699ea
UD
108 == THREAD_GETMEM (THREAD_SELF, tid)
109 && __builtin_expect (mutex->__data.__owner
110 == PTHREAD_MUTEX_INCONSISTENT, 0))
1bcfb5a5
UD
111 {
112 if (--mutex->__data.__count != 0)
113 /* We still hold the mutex. */
114 return ENOTRECOVERABLE;
115
116 goto notrecoverable;
117 }
118
119 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
120 return EPERM;
121
122 if (--mutex->__data.__count != 0)
123 /* We still hold the mutex. */
124 return 0;
125
126 goto robust;
127
0f6699ea
UD
128 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
129 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
130 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
683040c3
UD
131 if ((mutex->__data.__lock & FUTEX_TID_MASK)
132 != THREAD_GETMEM (THREAD_SELF, tid)
e51deae7 133 || ! lll_islocked (mutex->__data.__lock))
1bcfb5a5
UD
134 return EPERM;
135
1bcfb5a5
UD
136 /* If the previous owner died and the caller did not succeed in
137 making the state consistent, mark the mutex as unrecoverable
138 and make all waiters. */
139 if (__builtin_expect (mutex->__data.__owner
683040c3 140 == PTHREAD_MUTEX_INCONSISTENT, 0))
1bcfb5a5
UD
141 notrecoverable:
142 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
143
144 robust:
145 /* Remove mutex from the list. */
0f6699ea
UD
146 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
147 &mutex->__data.__list.__next);
8f9450a0
TR
148 /* We must set op_pending before we dequeue the mutex. Also see
149 comments at ENQUEUE_MUTEX. */
150 __asm ("" ::: "memory");
1bcfb5a5 151 DEQUEUE_MUTEX (mutex);
683040c3
UD
152
153 mutex->__data.__owner = newowner;
154 if (decr)
155 /* One less user. */
156 --mutex->__data.__nusers;
157
65810f0e
TR
158 /* Unlock by setting the lock to 0 (not acquired); if the lock had
159 FUTEX_WAITERS set previously, then wake any waiters.
160 The unlock operation must be the last access to the mutex to not
161 violate the mutex destruction requirements (see __lll_unlock). */
162 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
163 if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
164 & FUTEX_WAITERS) != 0))
165 lll_futex_wake (&mutex->__data.__lock, 1, private);
0f6699ea 166
8f9450a0
TR
167 /* We must clear op_pending after we release the mutex.
168 FIXME However, this violates the mutex destruction requirements
169 because another thread could acquire the mutex, destroy it, and
170 reuse the memory for something else; then, if this thread crashes,
171 and the memory happens to have a value equal to the TID, the kernel
172 will believe it is still related to the mutex (which has been
173 destroyed already) and will modify some other random object. */
174 __asm ("" ::: "memory");
0f6699ea 175 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
1bcfb5a5
UD
176 break;
177
184ee940
RM
178 /* The PI support requires the Linux futex system call. If that's not
179 available, pthread_mutex_init should never have allowed the type to
180 be set. So it will get the default case for an invalid type. */
181#ifdef __NR_futex
df47504c
UD
182 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
183 /* Recursive mutex. */
184 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
185 return EPERM;
186
187 if (--mutex->__data.__count != 0)
188 /* We still hold the mutex. */
189 return 0;
b0948ffd 190 goto continue_pi_non_robust;
df47504c
UD
191
192 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
193 /* Recursive mutex. */
194 if ((mutex->__data.__lock & FUTEX_TID_MASK)
195 == THREAD_GETMEM (THREAD_SELF, tid)
196 && __builtin_expect (mutex->__data.__owner
197 == PTHREAD_MUTEX_INCONSISTENT, 0))
198 {
199 if (--mutex->__data.__count != 0)
200 /* We still hold the mutex. */
201 return ENOTRECOVERABLE;
202
203 goto pi_notrecoverable;
204 }
205
206 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
207 return EPERM;
208
209 if (--mutex->__data.__count != 0)
210 /* We still hold the mutex. */
211 return 0;
212
b0948ffd 213 goto continue_pi_robust;
df47504c
UD
214
215 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
216 case PTHREAD_MUTEX_PI_NORMAL_NP:
217 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
218 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
219 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
220 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
221 if ((mutex->__data.__lock & FUTEX_TID_MASK)
222 != THREAD_GETMEM (THREAD_SELF, tid)
e51deae7 223 || ! lll_islocked (mutex->__data.__lock))
df47504c
UD
224 return EPERM;
225
226 /* If the previous owner died and the caller did not succeed in
227 making the state consistent, mark the mutex as unrecoverable
228 and make all waiters. */
403b4feb
SL
229 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
230 in sysdeps/nptl/bits/thread-shared-types.h. */
231 if ((atomic_load_relaxed (&(mutex->__data.__kind))
232 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
df47504c
UD
233 && __builtin_expect (mutex->__data.__owner
234 == PTHREAD_MUTEX_INCONSISTENT, 0))
235 pi_notrecoverable:
236 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
237
403b4feb
SL
238 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
239 in sysdeps/nptl/bits/thread-shared-types.h. */
240 if ((atomic_load_relaxed (&(mutex->__data.__kind))
241 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
df47504c 242 {
b0948ffd 243 continue_pi_robust:
df47504c
UD
244 /* Remove mutex from the list.
245 Note: robust PI futexes are signaled by setting bit 0. */
246 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
247 (void *) (((uintptr_t) &mutex->__data.__list.__next)
248 | 1));
8f9450a0
TR
249 /* We must set op_pending before we dequeue the mutex. Also see
250 comments at ENQUEUE_MUTEX. */
251 __asm ("" ::: "memory");
df47504c
UD
252 DEQUEUE_MUTEX (mutex);
253 }
254
b0948ffd 255 continue_pi_non_robust:
df47504c
UD
256 mutex->__data.__owner = newowner;
257 if (decr)
258 /* One less user. */
259 --mutex->__data.__nusers;
260
389fdf78
TR
261 /* Unlock. Load all necessary mutex data before releasing the mutex
262 to not violate the mutex destruction requirements (see
263 lll_unlock). */
403b4feb
SL
264 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
265 in sysdeps/nptl/bits/thread-shared-types.h. */
266 int robust = atomic_load_relaxed (&(mutex->__data.__kind))
267 & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
65810f0e
TR
268 private = (robust
269 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
270 : PTHREAD_MUTEX_PSHARED (mutex));
76a0b73e
TR
271 /* Unlock the mutex using a CAS unless there are futex waiters or our
272 TID is not the value of __lock anymore, in which case we let the
273 kernel take care of the situation. Use release MO in the CAS to
274 synchronize with acquire MO in lock acquisitions. */
275 int l = atomic_load_relaxed (&mutex->__data.__lock);
276 do
df47504c 277 {
76a0b73e
TR
278 if (((l & FUTEX_WAITERS) != 0)
279 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
280 {
6b1472eb
AZ
281 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
282 private);
76a0b73e
TR
283 break;
284 }
df47504c 285 }
76a0b73e
TR
286 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
287 &l, 0));
df47504c 288
8f9450a0
TR
289 /* This happens after the kernel releases the mutex but violates the
290 mutex destruction requirements; see comments in the code handling
291 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
df47504c
UD
292 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
293 break;
184ee940 294#endif /* __NR_futex. */
df47504c 295
f17efcb4
UD
296 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
297 /* Recursive mutex. */
298 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
299 return EPERM;
300
301 if (--mutex->__data.__count != 0)
302 /* We still hold the mutex. */
303 return 0;
304 goto pp;
305
306 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
307 /* Error checking mutex. */
308 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
309 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
310 return EPERM;
311 /* FALLTHROUGH */
312
313 case PTHREAD_MUTEX_PP_NORMAL_NP:
314 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
315 /* Always reset the owner field. */
316 pp:
317 mutex->__data.__owner = 0;
318
319 if (decr)
320 /* One less user. */
321 --mutex->__data.__nusers;
322
76a0b73e
TR
323 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
324 lock acquisitions. */
325 int newval;
326 int oldval = atomic_load_relaxed (&mutex->__data.__lock);
f17efcb4
UD
327 do
328 {
f17efcb4
UD
329 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
330 }
76a0b73e
TR
331 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
332 &oldval, newval));
f17efcb4
UD
333
334 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
835abc5c 335 lll_futex_wake (&mutex->__data.__lock, 1,
5bd8a249 336 PTHREAD_MUTEX_PSHARED (mutex));
f17efcb4
UD
337
338 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
5acf7263
RM
339
340 LIBC_PROBE (mutex_release, 1, mutex);
341
f17efcb4
UD
342 return __pthread_tpp_change_priority (oldprio, -1);
343
dcc73a8d
UD
344 default:
345 /* Correct code cannot set any other type. */
346 return EINVAL;
a334319f 347 }
0ecb606c 348
5acf7263 349 LIBC_PROBE (mutex_release, 1, mutex);
76a50749
UD
350 return 0;
351}
61623643
UD
352
353
354int
9dd346ff 355__pthread_mutex_unlock (pthread_mutex_t *mutex)
61623643 356{
8f31c0ef 357 return __pthread_mutex_unlock_usercnt (mutex, 1);
61623643 358}
fa872e1b 359weak_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
4d17e683 360hidden_def (__pthread_mutex_unlock)