]> git.ipfire.org Git - thirdparty/glibc.git/blame - nptl/pthread_mutex_trylock.c
x86: Add seperate non-temporal tunable for memset
[thirdparty/glibc.git] / nptl / pthread_mutex_trylock.c
CommitLineData
dff8da6b 1/* Copyright (C) 2002-2024 Free Software Foundation, Inc.
76a50749 2 This file is part of the GNU C Library.
76a50749
UD
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
59ba27a6 15 License along with the GNU C Library; if not, see
5a82c748 16 <https://www.gnu.org/licenses/>. */
76a50749 17
1bcfb5a5 18#include <assert.h>
76a50749 19#include <errno.h>
1bcfb5a5 20#include <stdlib.h>
76a50749
UD
21#include "pthreadP.h"
22#include <lowlevellock.h>
6b1472eb 23#include <futex-internal.h>
76a50749 24
76a50749 25int
a2b0f2e1 26___pthread_mutex_trylock (pthread_mutex_t *mutex)
76a50749 27{
683040c3 28 int oldval;
1bcfb5a5 29 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
76a50749 30
403b4feb
SL
31 /* See concurrency notes regarding mutex type which is loaded from __kind
32 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
e8c659d7 33 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
ae1ad3ae 34 PTHREAD_MUTEX_TIMED_NP))
76a50749
UD
35 {
36 /* Recursive mutex. */
e8c659d7 37 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
76a50749
UD
38 case PTHREAD_MUTEX_RECURSIVE_NP:
39 /* Check whether we already hold the mutex. */
9a7178d6 40 if (mutex->__data.__owner == id)
76a50749
UD
41 {
42 /* Just bump the counter. */
a1ffb40e 43 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
0a37669a
UD
44 /* Overflow of the counter. */
45 return EAGAIN;
46
76a50749
UD
47 ++mutex->__data.__count;
48 return 0;
49 }
50
e51deae7 51 if (lll_trylock (mutex->__data.__lock) == 0)
76a50749
UD
52 {
53 /* Record the ownership. */
9a7178d6 54 mutex->__data.__owner = id;
76a50749 55 mutex->__data.__count = 1;
61623643 56 ++mutex->__data.__nusers;
76a50749
UD
57 return 0;
58 }
59 break;
60
e8c659d7 61 case PTHREAD_MUTEX_TIMED_ELISION_NP:
b0a3c164 62 elision: __attribute__((unused))
e8c659d7
AK
63 if (lll_trylock_elision (mutex->__data.__lock,
64 mutex->__data.__elision) != 0)
887865fc 65 break;
075b9322 66 /* Don't record the ownership. */
e8c659d7
AK
67 return 0;
68
76a50749 69 case PTHREAD_MUTEX_TIMED_NP:
b0a3c164 70 FORCE_ELISION (mutex, goto elision);
e8c659d7 71 /*FALL THROUGH*/
76a50749 72 case PTHREAD_MUTEX_ADAPTIVE_NP:
e8c659d7 73 case PTHREAD_MUTEX_ERRORCHECK_NP:
e51deae7 74 if (lll_trylock (mutex->__data.__lock) != 0)
1bcfb5a5
UD
75 break;
76
77 /* Record the ownership. */
78 mutex->__data.__owner = id;
79 ++mutex->__data.__nusers;
80
81 return 0;
82
0f6699ea
UD
83 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
84 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
85 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
86 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
87 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
88 &mutex->__data.__list.__next);
823624bd
SL
89 /* We need to set op_pending before starting the operation. Also
90 see comments at ENQUEUE_MUTEX. */
91 __asm ("" ::: "memory");
0f6699ea 92
683040c3
UD
93 oldval = mutex->__data.__lock;
94 do
1bcfb5a5 95 {
0f6699ea 96 again:
683040c3
UD
97 if ((oldval & FUTEX_OWNER_DIED) != 0)
98 {
99 /* The previous owner died. Try locking the mutex. */
113ad5fc
UD
100 int newval = id | (oldval & FUTEX_WAITERS);
101
102 newval
0f6699ea 103 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
113ad5fc 104 newval, oldval);
0f6699ea
UD
105
106 if (newval != oldval)
683040c3 107 {
683040c3 108 oldval = newval;
0f6699ea 109 goto again;
683040c3
UD
110 }
111
112 /* We got the mutex. */
113 mutex->__data.__count = 1;
114 /* But it is inconsistent unless marked otherwise. */
115 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
116
823624bd
SL
117 /* We must not enqueue the mutex before we have acquired it.
118 Also see comments at ENQUEUE_MUTEX. */
119 __asm ("" ::: "memory");
683040c3 120 ENQUEUE_MUTEX (mutex);
823624bd
SL
121 /* We need to clear op_pending after we enqueue the mutex. */
122 __asm ("" ::: "memory");
0f6699ea 123 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
683040c3 124
04de9302 125 /* Note that we deliberately exit here. If we fall
683040c3
UD
126 through to the end of the function __nusers would be
127 incremented which is not correct because the old
128 owner has to be discounted. */
129 return EOWNERDEAD;
130 }
131
683040c3 132 /* Check whether we already hold the mutex. */
a1ffb40e 133 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
683040c3 134 {
5bd8a249
UD
135 int kind = PTHREAD_MUTEX_TYPE (mutex);
136 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
0f6699ea 137 {
823624bd
SL
138 /* We do not need to ensure ordering wrt another memory
139 access. Also see comments at ENQUEUE_MUTEX. */
0f6699ea
UD
140 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
141 NULL);
142 return EDEADLK;
143 }
683040c3 144
5bd8a249 145 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
683040c3 146 {
823624bd
SL
147 /* We do not need to ensure ordering wrt another memory
148 access. */
0f6699ea
UD
149 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
150 NULL);
151
683040c3 152 /* Just bump the counter. */
a1ffb40e 153 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
683040c3
UD
154 /* Overflow of the counter. */
155 return EAGAIN;
156
157 ++mutex->__data.__count;
158
159 return 0;
160 }
161 }
162
887865fc
RM
163 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
164 id, 0);
683040c3 165 if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
0f6699ea 166 {
823624bd
SL
167 /* We haven't acquired the lock as it is already acquired by
168 another owner. We do not need to ensure ordering wrt another
169 memory access. */
0f6699ea
UD
170 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
171
172 return EBUSY;
173 }
683040c3 174
683040c3
UD
175 if (__builtin_expect (mutex->__data.__owner
176 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
177 {
178 /* This mutex is now not recoverable. */
179 mutex->__data.__count = 0;
180 if (oldval == id)
5bd8a249
UD
181 lll_unlock (mutex->__data.__lock,
182 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
823624bd
SL
183 /* FIXME This violates the mutex destruction requirements. See
184 __pthread_mutex_unlock_full. */
0f6699ea 185 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
683040c3
UD
186 return ENOTRECOVERABLE;
187 }
1bcfb5a5 188 }
683040c3 189 while ((oldval & FUTEX_OWNER_DIED) != 0);
1bcfb5a5 190
823624bd
SL
191 /* We must not enqueue the mutex before we have acquired it.
192 Also see comments at ENQUEUE_MUTEX. */
193 __asm ("" ::: "memory");
1bcfb5a5 194 ENQUEUE_MUTEX (mutex);
823624bd
SL
195 /* We need to clear op_pending after we enqueue the mutex. */
196 __asm ("" ::: "memory");
0f6699ea 197 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
1bcfb5a5
UD
198
199 mutex->__data.__owner = id;
200 ++mutex->__data.__nusers;
683040c3
UD
201 mutex->__data.__count = 1;
202
203 return 0;
1bcfb5a5 204
184ee940
RM
205 /* The PI support requires the Linux futex system call. If that's not
206 available, pthread_mutex_init should never have allowed the type to
207 be set. So it will get the default case for an invalid type. */
208#ifdef __NR_futex
df47504c
UD
209 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
210 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
211 case PTHREAD_MUTEX_PI_NORMAL_NP:
212 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
213 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
214 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
215 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
216 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
217 {
403b4feb
SL
218 int kind, robust;
219 {
220 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
221 in sysdeps/nptl/bits/thread-shared-types.h. */
222 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
223 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
224 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
225 }
df47504c
UD
226
227 if (robust)
823624bd
SL
228 {
229 /* Note: robust PI futexes are signaled by setting bit 0. */
230 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
231 (void *) (((uintptr_t) &mutex->__data.__list.__next)
232 | 1));
233 /* We need to set op_pending before starting the operation. Also
234 see comments at ENQUEUE_MUTEX. */
235 __asm ("" ::: "memory");
236 }
df47504c
UD
237
238 oldval = mutex->__data.__lock;
239
240 /* Check whether we already hold the mutex. */
a1ffb40e 241 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
df47504c
UD
242 {
243 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
244 {
823624bd
SL
245 /* We do not need to ensure ordering wrt another memory
246 access. */
df47504c
UD
247 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
248 return EDEADLK;
249 }
250
251 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
252 {
823624bd
SL
253 /* We do not need to ensure ordering wrt another memory
254 access. */
df47504c
UD
255 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
256
257 /* Just bump the counter. */
a1ffb40e 258 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
df47504c
UD
259 /* Overflow of the counter. */
260 return EAGAIN;
261
262 ++mutex->__data.__count;
263
264 return 0;
265 }
266 }
267
268 oldval
269 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
270 id, 0);
271
272 if (oldval != 0)
273 {
274 if ((oldval & FUTEX_OWNER_DIED) == 0)
275 {
823624bd
SL
276 /* We haven't acquired the lock as it is already acquired by
277 another owner. We do not need to ensure ordering wrt another
278 memory access. */
df47504c
UD
279 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
280
281 return EBUSY;
282 }
283
284 assert (robust);
285
286 /* The mutex owner died. The kernel will now take care of
287 everything. */
efac1fce
UD
288 int private = (robust
289 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
290 : PTHREAD_MUTEX_PSHARED (mutex));
bc2eb932
AZ
291 int e = INTERNAL_SYSCALL_CALL (futex, &mutex->__data.__lock,
292 __lll_private_flag (FUTEX_TRYLOCK_PI,
293 private), 0, 0);
df47504c 294
bc2eb932
AZ
295 if (INTERNAL_SYSCALL_ERROR_P (e)
296 && INTERNAL_SYSCALL_ERRNO (e) == EWOULDBLOCK)
df47504c 297 {
823624bd
SL
298 /* The kernel has not yet finished the mutex owner death.
299 We do not need to ensure ordering wrt another memory
300 access. */
df47504c
UD
301 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
302
303 return EBUSY;
304 }
305
306 oldval = mutex->__data.__lock;
307 }
308
a1ffb40e 309 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
df47504c 310 {
8114b95c 311 atomic_fetch_and_acquire (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
df47504c
UD
312
313 /* We got the mutex. */
314 mutex->__data.__count = 1;
315 /* But it is inconsistent unless marked otherwise. */
316 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
317
823624bd
SL
318 /* We must not enqueue the mutex before we have acquired it.
319 Also see comments at ENQUEUE_MUTEX. */
320 __asm ("" ::: "memory");
df47504c 321 ENQUEUE_MUTEX (mutex);
823624bd
SL
322 /* We need to clear op_pending after we enqueue the mutex. */
323 __asm ("" ::: "memory");
df47504c
UD
324 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
325
326 /* Note that we deliberately exit here. If we fall
327 through to the end of the function __nusers would be
328 incremented which is not correct because the old owner
329 has to be discounted. */
330 return EOWNERDEAD;
331 }
332
333 if (robust
334 && __builtin_expect (mutex->__data.__owner
335 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
336 {
337 /* This mutex is now not recoverable. */
338 mutex->__data.__count = 0;
339
6b1472eb
AZ
340 futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
341 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
df47504c 342
823624bd
SL
343 /* To the kernel, this will be visible after the kernel has
344 acquired the mutex in the syscall. */
df47504c
UD
345 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
346 return ENOTRECOVERABLE;
347 }
348
349 if (robust)
350 {
823624bd
SL
351 /* We must not enqueue the mutex before we have acquired it.
352 Also see comments at ENQUEUE_MUTEX. */
353 __asm ("" ::: "memory");
df47504c 354 ENQUEUE_MUTEX_PI (mutex);
823624bd
SL
355 /* We need to clear op_pending after we enqueue the mutex. */
356 __asm ("" ::: "memory");
df47504c
UD
357 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
358 }
359
360 mutex->__data.__owner = id;
361 ++mutex->__data.__nusers;
362 mutex->__data.__count = 1;
363
364 return 0;
365 }
184ee940 366#endif /* __NR_futex. */
df47504c 367
f17efcb4
UD
368 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
369 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
370 case PTHREAD_MUTEX_PP_NORMAL_NP:
371 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
372 {
403b4feb
SL
373 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
374 in sysdeps/nptl/bits/thread-shared-types.h. */
375 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
376 & PTHREAD_MUTEX_KIND_MASK_NP;
f17efcb4
UD
377
378 oldval = mutex->__data.__lock;
379
380 /* Check whether we already hold the mutex. */
381 if (mutex->__data.__owner == id)
382 {
383 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
384 return EDEADLK;
385
386 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
387 {
388 /* Just bump the counter. */
a1ffb40e 389 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
f17efcb4
UD
390 /* Overflow of the counter. */
391 return EAGAIN;
392
393 ++mutex->__data.__count;
394
395 return 0;
396 }
397 }
398
399 int oldprio = -1, ceilval;
400 do
401 {
402 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
403 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
404
405 if (__pthread_current_priority () > ceiling)
406 {
407 if (oldprio != -1)
408 __pthread_tpp_change_priority (oldprio, -1);
409 return EINVAL;
410 }
411
412 int retval = __pthread_tpp_change_priority (oldprio, ceiling);
413 if (retval)
414 return retval;
415
416 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
417 oldprio = ceiling;
418
419 oldval
420 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
421 ceilval | 1, ceilval);
422
423 if (oldval == ceilval)
424 break;
425 }
426 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
427
428 if (oldval != ceilval)
429 {
430 __pthread_tpp_change_priority (oldprio, -1);
431 break;
432 }
433
434 assert (mutex->__data.__owner == 0);
435 /* Record the ownership. */
436 mutex->__data.__owner = id;
437 ++mutex->__data.__nusers;
438 mutex->__data.__count = 1;
439
440 return 0;
441 }
442 break;
443
dcc73a8d
UD
444 default:
445 /* Correct code cannot set any other type. */
446 return EINVAL;
76a50749
UD
447 }
448
449 return EBUSY;
450}
a2b0f2e1
FW
451versioned_symbol (libc, ___pthread_mutex_trylock,
452 pthread_mutex_trylock, GLIBC_2_34);
08970190
FW
453libc_hidden_ver (___pthread_mutex_trylock, __pthread_mutex_trylock)
454#ifndef SHARED
455strong_alias (___pthread_mutex_trylock, __pthread_mutex_trylock)
456#endif
457
a2b0f2e1
FW
458#if OTHER_SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_34)
459compat_symbol (libpthread, ___pthread_mutex_trylock,
460 pthread_mutex_trylock, GLIBC_2_0);
461compat_symbol (libpthread, ___pthread_mutex_trylock,
462 __pthread_mutex_trylock, GLIBC_2_0);
e8c659d7 463#endif