]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_mutex_unlock.c
nptl: Move internal symbol __mutex_aconf into libc
[thirdparty/glibc.git] / nptl / pthread_mutex_unlock.c
CommitLineData
2b778ceb 1/* Copyright (C) 2002-2021 Free Software Foundation, Inc.
76a50749
UD
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
59ba27a6 16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
76a50749 18
6de79a49 19#include <assert.h>
76a50749 20#include <errno.h>
1bcfb5a5 21#include <stdlib.h>
76a50749
UD
22#include "pthreadP.h"
23#include <lowlevellock.h>
5acf7263 24#include <stap-probe.h>
6b1472eb 25#include <futex-internal.h>
76a50749 26
6de79a49 27static int
6de79a49
UD
28__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
29 __attribute_noinline__;
76a50749
UD
30
31int
83b09837 32attribute_hidden
9dd346ff 33__pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr)
76a50749 34{
403b4feb
SL
35 /* See concurrency notes regarding mutex type which is loaded from __kind
36 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
e8c659d7 37 int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);
a04549c1
JM
38 if (__builtin_expect (type
39 & ~(PTHREAD_MUTEX_KIND_MASK_NP
40 |PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
6de79a49 41 return __pthread_mutex_unlock_full (mutex, decr);
1bcfb5a5 42
6de79a49
UD
43 if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP)
44 == PTHREAD_MUTEX_TIMED_NP)
45 {
46 /* Always reset the owner field. */
47 normal:
48 mutex->__data.__owner = 0;
49 if (decr)
50 /* One less user. */
51 --mutex->__data.__nusers;
52
53 /* Unlock. */
54 lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex));
5acf7263
RM
55
56 LIBC_PROBE (mutex_release, 1, mutex);
57
6de79a49
UD
58 return 0;
59 }
a1ffb40e 60 else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP))
e8c659d7
AK
61 {
62 /* Don't reset the owner/users fields for elision. */
e5e6bea2 63 return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision,
e8c659d7
AK
64 PTHREAD_MUTEX_PSHARED (mutex));
65 }
66 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
67 == PTHREAD_MUTEX_RECURSIVE_NP, 1))
76a50749 68 {
76a50749 69 /* Recursive mutex. */
61623643 70 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
76a50749
UD
71 return EPERM;
72
73 if (--mutex->__data.__count != 0)
74 /* We still hold the mutex. */
75 return 0;
683040c3 76 goto normal;
6de79a49 77 }
e8c659d7
AK
78 else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex)
79 == PTHREAD_MUTEX_ADAPTIVE_NP, 1))
6de79a49
UD
80 goto normal;
81 else
82 {
76a50749 83 /* Error checking mutex. */
6de79a49 84 assert (type == PTHREAD_MUTEX_ERRORCHECK_NP);
61623643 85 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
e51deae7 86 || ! lll_islocked (mutex->__data.__lock))
76a50749 87 return EPERM;
6de79a49
UD
88 goto normal;
89 }
90}
76a50749 91
683040c3 92
6de79a49 93static int
6de79a49
UD
94__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
95{
96 int newowner = 0;
65810f0e 97 int private;
dcc73a8d 98
6de79a49
UD
99 switch (PTHREAD_MUTEX_TYPE (mutex))
100 {
0f6699ea 101 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
1bcfb5a5 102 /* Recursive mutex. */
683040c3 103 if ((mutex->__data.__lock & FUTEX_TID_MASK)
0f6699ea
UD
104 == THREAD_GETMEM (THREAD_SELF, tid)
105 && __builtin_expect (mutex->__data.__owner
106 == PTHREAD_MUTEX_INCONSISTENT, 0))
1bcfb5a5
UD
107 {
108 if (--mutex->__data.__count != 0)
109 /* We still hold the mutex. */
110 return ENOTRECOVERABLE;
111
112 goto notrecoverable;
113 }
114
115 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
116 return EPERM;
117
118 if (--mutex->__data.__count != 0)
119 /* We still hold the mutex. */
120 return 0;
121
122 goto robust;
123
0f6699ea
UD
124 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
125 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
126 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
683040c3
UD
127 if ((mutex->__data.__lock & FUTEX_TID_MASK)
128 != THREAD_GETMEM (THREAD_SELF, tid)
e51deae7 129 || ! lll_islocked (mutex->__data.__lock))
1bcfb5a5
UD
130 return EPERM;
131
1bcfb5a5
UD
132 /* If the previous owner died and the caller did not succeed in
133 making the state consistent, mark the mutex as unrecoverable
134 and make all waiters. */
135 if (__builtin_expect (mutex->__data.__owner
683040c3 136 == PTHREAD_MUTEX_INCONSISTENT, 0))
1bcfb5a5
UD
137 notrecoverable:
138 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
139
140 robust:
141 /* Remove mutex from the list. */
0f6699ea
UD
142 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
143 &mutex->__data.__list.__next);
8f9450a0
TR
144 /* We must set op_pending before we dequeue the mutex. Also see
145 comments at ENQUEUE_MUTEX. */
146 __asm ("" ::: "memory");
1bcfb5a5 147 DEQUEUE_MUTEX (mutex);
683040c3
UD
148
149 mutex->__data.__owner = newowner;
150 if (decr)
151 /* One less user. */
152 --mutex->__data.__nusers;
153
65810f0e
TR
154 /* Unlock by setting the lock to 0 (not acquired); if the lock had
155 FUTEX_WAITERS set previously, then wake any waiters.
156 The unlock operation must be the last access to the mutex to not
157 violate the mutex destruction requirements (see __lll_unlock). */
158 private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
159 if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0)
160 & FUTEX_WAITERS) != 0))
b45b1c5b 161 futex_wake ((unsigned int *) &mutex->__data.__lock, 1, private);
0f6699ea 162
8f9450a0
TR
163 /* We must clear op_pending after we release the mutex.
164 FIXME However, this violates the mutex destruction requirements
165 because another thread could acquire the mutex, destroy it, and
166 reuse the memory for something else; then, if this thread crashes,
167 and the memory happens to have a value equal to the TID, the kernel
168 will believe it is still related to the mutex (which has been
169 destroyed already) and will modify some other random object. */
170 __asm ("" ::: "memory");
0f6699ea 171 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
1bcfb5a5
UD
172 break;
173
184ee940
RM
174 /* The PI support requires the Linux futex system call. If that's not
175 available, pthread_mutex_init should never have allowed the type to
176 be set. So it will get the default case for an invalid type. */
177#ifdef __NR_futex
df47504c
UD
178 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
179 /* Recursive mutex. */
180 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
181 return EPERM;
182
183 if (--mutex->__data.__count != 0)
184 /* We still hold the mutex. */
185 return 0;
b0948ffd 186 goto continue_pi_non_robust;
df47504c
UD
187
188 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
189 /* Recursive mutex. */
190 if ((mutex->__data.__lock & FUTEX_TID_MASK)
191 == THREAD_GETMEM (THREAD_SELF, tid)
192 && __builtin_expect (mutex->__data.__owner
193 == PTHREAD_MUTEX_INCONSISTENT, 0))
194 {
195 if (--mutex->__data.__count != 0)
196 /* We still hold the mutex. */
197 return ENOTRECOVERABLE;
198
199 goto pi_notrecoverable;
200 }
201
202 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
203 return EPERM;
204
205 if (--mutex->__data.__count != 0)
206 /* We still hold the mutex. */
207 return 0;
208
b0948ffd 209 goto continue_pi_robust;
df47504c
UD
210
211 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
212 case PTHREAD_MUTEX_PI_NORMAL_NP:
213 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
214 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
215 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
216 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
217 if ((mutex->__data.__lock & FUTEX_TID_MASK)
218 != THREAD_GETMEM (THREAD_SELF, tid)
e51deae7 219 || ! lll_islocked (mutex->__data.__lock))
df47504c
UD
220 return EPERM;
221
222 /* If the previous owner died and the caller did not succeed in
223 making the state consistent, mark the mutex as unrecoverable
224 and make all waiters. */
403b4feb
SL
225 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
226 in sysdeps/nptl/bits/thread-shared-types.h. */
227 if ((atomic_load_relaxed (&(mutex->__data.__kind))
228 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0
df47504c
UD
229 && __builtin_expect (mutex->__data.__owner
230 == PTHREAD_MUTEX_INCONSISTENT, 0))
231 pi_notrecoverable:
232 newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
233
403b4feb
SL
234 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
235 in sysdeps/nptl/bits/thread-shared-types.h. */
236 if ((atomic_load_relaxed (&(mutex->__data.__kind))
237 & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0)
df47504c 238 {
b0948ffd 239 continue_pi_robust:
df47504c
UD
240 /* Remove mutex from the list.
241 Note: robust PI futexes are signaled by setting bit 0. */
242 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
243 (void *) (((uintptr_t) &mutex->__data.__list.__next)
244 | 1));
8f9450a0
TR
245 /* We must set op_pending before we dequeue the mutex. Also see
246 comments at ENQUEUE_MUTEX. */
247 __asm ("" ::: "memory");
df47504c
UD
248 DEQUEUE_MUTEX (mutex);
249 }
250
b0948ffd 251 continue_pi_non_robust:
df47504c
UD
252 mutex->__data.__owner = newowner;
253 if (decr)
254 /* One less user. */
255 --mutex->__data.__nusers;
256
389fdf78
TR
257 /* Unlock. Load all necessary mutex data before releasing the mutex
258 to not violate the mutex destruction requirements (see
259 lll_unlock). */
403b4feb
SL
260 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
261 in sysdeps/nptl/bits/thread-shared-types.h. */
262 int robust = atomic_load_relaxed (&(mutex->__data.__kind))
263 & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
65810f0e
TR
264 private = (robust
265 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
266 : PTHREAD_MUTEX_PSHARED (mutex));
76a0b73e
TR
267 /* Unlock the mutex using a CAS unless there are futex waiters or our
268 TID is not the value of __lock anymore, in which case we let the
269 kernel take care of the situation. Use release MO in the CAS to
270 synchronize with acquire MO in lock acquisitions. */
271 int l = atomic_load_relaxed (&mutex->__data.__lock);
272 do
df47504c 273 {
76a0b73e
TR
274 if (((l & FUTEX_WAITERS) != 0)
275 || (l != THREAD_GETMEM (THREAD_SELF, tid)))
276 {
6b1472eb
AZ
277 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
278 private);
76a0b73e
TR
279 break;
280 }
df47504c 281 }
76a0b73e
TR
282 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
283 &l, 0));
df47504c 284
8f9450a0
TR
285 /* This happens after the kernel releases the mutex but violates the
286 mutex destruction requirements; see comments in the code handling
287 PTHREAD_MUTEX_ROBUST_NORMAL_NP. */
df47504c
UD
288 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
289 break;
184ee940 290#endif /* __NR_futex. */
df47504c 291
f17efcb4
UD
292 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
293 /* Recursive mutex. */
294 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid))
295 return EPERM;
296
297 if (--mutex->__data.__count != 0)
298 /* We still hold the mutex. */
299 return 0;
300 goto pp;
301
302 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
303 /* Error checking mutex. */
304 if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
305 || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0)
306 return EPERM;
307 /* FALLTHROUGH */
308
309 case PTHREAD_MUTEX_PP_NORMAL_NP:
310 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
311 /* Always reset the owner field. */
312 pp:
313 mutex->__data.__owner = 0;
314
315 if (decr)
316 /* One less user. */
317 --mutex->__data.__nusers;
318
76a0b73e
TR
319 /* Unlock. Use release MO in the CAS to synchronize with acquire MO in
320 lock acquisitions. */
321 int newval;
322 int oldval = atomic_load_relaxed (&mutex->__data.__lock);
f17efcb4
UD
323 do
324 {
f17efcb4
UD
325 newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK;
326 }
76a0b73e
TR
327 while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock,
328 &oldval, newval));
f17efcb4
UD
329
330 if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1)
b45b1c5b
AZ
331 futex_wake ((unsigned int *)&mutex->__data.__lock, 1,
332 PTHREAD_MUTEX_PSHARED (mutex));
f17efcb4
UD
333
334 int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
5acf7263
RM
335
336 LIBC_PROBE (mutex_release, 1, mutex);
337
f17efcb4
UD
338 return __pthread_tpp_change_priority (oldprio, -1);
339
dcc73a8d
UD
340 default:
341 /* Correct code cannot set any other type. */
342 return EINVAL;
a334319f 343 }
0ecb606c 344
5acf7263 345 LIBC_PROBE (mutex_release, 1, mutex);
76a50749
UD
346 return 0;
347}
61623643
UD
348
349
350int
9dd346ff 351__pthread_mutex_unlock (pthread_mutex_t *mutex)
61623643 352{
8f31c0ef 353 return __pthread_mutex_unlock_usercnt (mutex, 1);
61623643 354}
fa872e1b 355weak_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
4d17e683 356hidden_def (__pthread_mutex_unlock)