]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_mutex_unlock.c
powerpc: Remove duplicated versionsort from libm.a (BZ 31789)
[thirdparty/glibc.git] / nptl / pthread_mutex_unlock.c
CommitLineData
dff8da6b 1/* Copyright (C) 2002-2024 Free Software Foundation, Inc.
76a50749 2 This file is part of the GNU C Library.
76a50749
UD
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
59ba27a6 15 License along with the GNU C Library; if not, see
5a82c748 16 <https://www.gnu.org/licenses/>. */
76a50749 17
6de79a49 18#include <assert.h>
76a50749 19#include <errno.h>
1bcfb5a5 20#include <stdlib.h>
76a50749
UD
21#include "pthreadP.h"
22#include <lowlevellock.h>
5acf7263 23#include <stap-probe.h>
6b1472eb 24#include <futex-internal.h>
27a44822 25#include <shlib-compat.h>
76a50749 26
6de79a49 27static int
6de79a49
UD
28__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
29 __attribute_noinline__;
76a50749 30
99f841c4
FW
31/* lll_lock with single-thread optimization. */
32static inline void
33lll_mutex_unlock_optimized (pthread_mutex_t *mutex)
34{
35 /* The single-threaded optimization is only valid for private
36 mutexes. For process-shared mutexes, the mutex could be in a
37 shared mapping, so synchronization with another process is needed
38 even without any threads. */
39 int private = PTHREAD_MUTEX_PSHARED (mutex);
40 if (private == LLL_PRIVATE && SINGLE_THREAD_P)
41 mutex->__data.__lock = 0;
42 else
43 lll_unlock (mutex->__data.__lock, private);
44}
45
76a50749 46int
9dd346ff 47__pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
76a50749 48{
403b4feb
SL
49 /* See concurrency notes regarding mutex type which is loaded from __kind
50 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
e8c659d7 51 int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
a04549c1
JM
52 if (__builtin_expect (type
53 & ~(PTHREAD_MUTEX_KIND_MASK_NP
54 |PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
6de79a49 55 return __pthread_mutex_unlock_full (mutex, decr);
1bcfb5a5 56
6de79a49
UD
57 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
58 == PTHREAD_MUTEX_TIMED_NP)
59 {
60 /* Always reset the owner field. */
61 normal:
62 mutex->__data.__owner = 0;
63 if (decr)
64 /* One less user. */
65 --mutex->__data.__nusers;
66
67 /* Unlock. */
99f841c4 68 lll_mutex_unlock_optimized (mutex);
5acf7263
RM
69
70 LIBC_PROBE (mutex_release, 1, mutex);
71
6de79a49
UD
72 return 0;
73 }
a1ffb40e 74 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
e8c659d7
AK
75 {
76 /* Don't reset the owner/users fields for elision. */
e5e6bea2 77 return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
e8c659d7
AK
78 PTHREAD_MUTEX_PSHARED (mutex));
79 }
80 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
81 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
76a50749 82 {
76a50749 83 /* Recursive mutex. */
61623643 84 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
76a50749
UD
85 return EPERM;
86
87 if (--mutex->__data.__count != 0)
88 /* We still hold the mutex. */
89 return 0;
683040c3 90 goto normal;
6de79a49 91 }
e8c659d7
AK
92 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
93 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
6de79a49
UD
94 goto normal;
95 else
96 {
76a50749 97 /* Error checking mutex. */
6de79a49 98 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
61623643 99 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
e51deae7 100 || ! lll_islocked (mutex->__data.__lock))
76a50749 101 return EPERM;
6de79a49
UD
102 goto normal;
103 }
104}
27a44822 105libc_hidden_def (__pthread_mutex_unlock_usercnt)
76a50749 106
683040c3 107
6de79a49 108static int
6de79a49
UD
109__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
110{
111 int newowner = 0;
65810f0e 112 int private;
dcc73a8d 113
6de79a49
UD
114 switch (PTHREAD_MUTEX_TYPE (mutex))
115 {
0f6699ea 116 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
1bcfb5a5 117 /* Recursive mutex. */
683040c3 118 if ((mutex->__data.__lock & FUTEX_TID_MASK)
0f6699ea
UD
119 == THREAD_GETMEM (THREAD_SELF, tid)
120 && __builtin_expect (mutex->__data.__owner
121 == PTHREAD_MUTEX_INCONSISTENT, 0))
1bcfb5a5
UD
122 {
123 if (--mutex->__data.__count != 0)
124 /* We still hold the mutex. */
125 return ENOTRECOVERABLE;
126
127 goto notrecoverable;
128 }
129
130 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
131 return EPERM;
132
133 if (--mutex->__data.__count != 0)
134 /* We still hold the mutex. */
135 return 0;
136
137 goto robust;
138
0f6699ea
UD
139 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
140 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
141 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
683040c3
UD
142 if ((mutex->__data.__lock & FUTEX_TID_MASK)
143 != THREAD_GETMEM (THREAD_SELF, tid)
e51deae7 144 || ! lll_islocked (mutex->__data.__lock))
1bcfb5a5
UD
145 return EPERM;
146
1bcfb5a5
UD
147 /* If the previous owner died and the caller did not succeed in
148 making the state consistent, mark the mutex as unrecoverable
149 and make all waiters. */
150 if (__builtin_expect (mutex->__data.__owner
683040c3 151 == PTHREAD_MUTEX_INCONSISTENT, 0))
1bcfb5a5
UD
152 notrecoverable:
153 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
154
155 robust:
156 /* Remove mutex from the list. */
0f6699ea
UD
157 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
158 &mutex->__data.__list.__next);
8f9450a0
TR
159 /* We must set op_pending before we dequeue the mutex. Also see
160 comments at ENQUEUE_MUTEX. */
161 __asm ("" ::: "memory");
1bcfb5a5 162 DEQUEUE_MUTEX (mutex);
683040c3
UD
163
164 mutex->__data.__owner = newowner;
165 if (decr)
166 /* One less user. */
167 --mutex->__data.__nusers;
168
65810f0e
TR
169 /* Unlock by setting the lock to 0 (not acquired); if the lock had
170 FUTEX_WAITERS set previously, then wake any waiters.
171 The unlock operation must be the last access to the mutex to not
172 violate the mutex destruction requirements (see __lll_unlock). */
173 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
22f4ab2d 174 if (__glibc_unlikely ((atomic_exchange_release (&mutex->__data.__lock, 0)
65810f0e 175 & FUTEX_WAITERS) != 0))
b45b1c5b 176 futex_wake ((unsigned int *) &mutex->__data.__lock, 1, private);
0f6699ea 177
8f9450a0
TR
178 /* We must clear op_pending after we release the mutex.
179 FIXME However, this violates the mutex destruction requirements
180 because another thread could acquire the mutex, destroy it, and
181 reuse the memory for something else; then, if this thread crashes,
182 and the memory happens to have a value equal to the TID, the kernel
183 will believe it is still related to the mutex (which has been
184 destroyed already) and will modify some other random object. */
185 __asm ("" ::: "memory");
0f6699ea 186 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
1bcfb5a5
UD
187 break;
188
184ee940
RM
189 /* The PI support requires the Linux futex system call. If that's not
190 available, pthread_mutex_init should never have allowed the type to
191 be set. So it will get the default case for an invalid type. */
192#ifdef __NR_futex
df47504c
UD
193 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
194 /* Recursive mutex. */
195 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
196 return EPERM;
197
198 if (--mutex->__data.__count != 0)
199 /* We still hold the mutex. */
200 return 0;
b0948ffd 201 goto continue_pi_non_robust;
df47504c
UD
202
203 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
204 /* Recursive mutex. */
205 if ((mutex->__data.__lock & FUTEX_TID_MASK)
206 == THREAD_GETMEM (THREAD_SELF, tid)
207 && __builtin_expect (mutex->__data.__owner
208 == PTHREAD_MUTEX_INCONSISTENT, 0))
209 {
210 if (--mutex->__data.__count != 0)
211 /* We still hold the mutex. */
212 return ENOTRECOVERABLE;
213
214 goto pi_notrecoverable;
215 }
216
217 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
218 return EPERM;
219
220 if (--mutex->__data.__count != 0)
221 /* We still hold the mutex. */
222 return 0;
223
b0948ffd 224 goto continue_pi_robust;
df47504c
UD
225
226 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
227 case PTHREAD_MUTEX_PI_NORMAL_NP:
228 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
229 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
230 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
231 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
232 if ((mutex->__data.__lock & FUTEX_TID_MASK)
233 != THREAD_GETMEM (THREAD_SELF, tid)
e51deae7 234 || ! lll_islocked (mutex->__data.__lock))
df47504c
UD
235 return EPERM;
236
237 /* If the previous owner died and the caller did not succeed in
238 making the state consistent, mark the mutex as unrecoverable
239 and make all waiters. */
403b4feb
SL
240 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
241 in sysdeps/nptl/bits/thread-shared-types.h. */
242 if ((atomic_load_relaxed (&(mutex->__data.__kind))
243 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
df47504c
UD
244 && __builtin_expect (mutex->__data.__owner
245 == PTHREAD_MUTEX_INCONSISTENT, 0))
246 pi_notrecoverable:
247 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
248
403b4feb
SL
249 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
250 in sysdeps/nptl/bits/thread-shared-types.h. */
251 if ((atomic_load_relaxed (&(mutex->__data.__kind))
252 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
df47504c 253 {
b0948ffd 254 continue_pi_robust:
df47504c
UD
255 /* Remove mutex from the list.
256 Note: robust PI futexes are signaled by setting bit 0. */
257 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
258 (void *) (((uintptr_t) &mutex->__data.__list.__next)
259 | 1));
8f9450a0
TR
260 /* We must set op_pending before we dequeue the mutex. Also see
261 comments at ENQUEUE_MUTEX. */
262 __asm ("" ::: "memory");
df47504c
UD
263 DEQUEUE_MUTEX (mutex);
264 }
265
b0948ffd 266 continue_pi_non_robust:
df47504c
UD
267 mutex->__data.__owner = newowner;
268 if (decr)
269 /* One less user. */
270 --mutex->__data.__nusers;
271
389fdf78
TR
272 /* Unlock. Load all necessary mutex data before releasing the mutex
273 to not violate the mutex destruction requirements (see
274 lll_unlock). */
403b4feb
SL
275 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
276 in sysdeps/nptl/bits/thread-shared-types.h. */
277 int robust = atomic_load_relaxed (&(mutex->__data.__kind))
278 & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
65810f0e
TR
279 private = (robust
280 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
281 : PTHREAD_MUTEX_PSHARED (mutex));
76a0b73e
TR
282 /* Unlock the mutex using a CAS unless there are futex waiters or our
283 TID is not the value of __lock anymore, in which case we let the
284 kernel take care of the situation. Use release MO in the CAS to
285 synchronize with acquire MO in lock acquisitions. */
286 int l = atomic_load_relaxed (&mutex->__data.__lock);
287 do
df47504c 288 {
76a0b73e
TR
289 if (((l & FUTEX_WAITERS) != 0)
290 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
291 {
6b1472eb
AZ
292 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
293 private);
76a0b73e
TR
294 break;
295 }
df47504c 296 }
76a0b73e
TR
297 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
298 &l, 0));
df47504c 299
8f9450a0
TR
300 /* This happens after the kernel releases the mutex but violates the
301 mutex destruction requirements; see comments in the code handling
302 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
df47504c
UD
303 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
304 break;
184ee940 305#endif /* __NR_futex. */
df47504c 306
f17efcb4
UD
307 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
308 /* Recursive mutex. */
309 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
310 return EPERM;
311
312 if (--mutex->__data.__count != 0)
313 /* We still hold the mutex. */
314 return 0;
315 goto pp;
316
317 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
318 /* Error checking mutex. */
319 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
320 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
321 return EPERM;
322 /* FALLTHROUGH */
323
324 case PTHREAD_MUTEX_PP_NORMAL_NP:
325 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
326 /* Always reset the owner field. */
327 pp:
328 mutex->__data.__owner = 0;
329
330 if (decr)
331 /* One less user. */
332 --mutex->__data.__nusers;
333
76a0b73e
TR
334 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
335 lock acquisitions. */
336 int newval;
337 int oldval = atomic_load_relaxed (&mutex->__data.__lock);
f17efcb4
UD
338 do
339 {
f17efcb4
UD
340 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
341 }
76a0b73e
TR
342 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
343 &oldval, newval));
f17efcb4
UD
344
345 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
b45b1c5b
AZ
346 futex_wake ((unsigned int *)&mutex->__data.__lock, 1,
347 PTHREAD_MUTEX_PSHARED (mutex));
f17efcb4
UD
348
349 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
5acf7263
RM
350
351 LIBC_PROBE (mutex_release, 1, mutex);
352
f17efcb4
UD
353 return __pthread_tpp_change_priority (oldprio, -1);
354
dcc73a8d
UD
355 default:
356 /* Correct code cannot set any other type. */
357 return EINVAL;
a334319f 358 }
0ecb606c 359
5acf7263 360 LIBC_PROBE (mutex_release, 1, mutex);
76a50749
UD
361 return 0;
362}
61623643
UD
363
364
365int
27a44822 366___pthread_mutex_unlock (pthread_mutex_t *mutex)
61623643 367{
8f31c0ef 368 return __pthread_mutex_unlock_usercnt (mutex, 1);
61623643 369}
27a44822 370libc_hidden_ver (___pthread_mutex_unlock, __pthread_mutex_unlock)
8ec022a0
FW
371#ifndef SHARED
372strong_alias (___pthread_mutex_unlock, __pthread_mutex_unlock)
373#endif
27a44822
FW
374versioned_symbol (libpthread, ___pthread_mutex_unlock, pthread_mutex_unlock,
375 GLIBC_2_0);
376
377#if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
378compat_symbol (libpthread, ___pthread_mutex_unlock, __pthread_mutex_unlock,
379 GLIBC_2_0);
380#endif