]> git.ipfire.org Git - thirdparty/openssl.git/blame - crypto/threads_pthread.c
Remove repetitive words
[thirdparty/openssl.git] / crypto / threads_pthread.c
CommitLineData
b1322259 1/*
b6461792 2 * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
71a04cfc 3 *
0e9725bc 4 * Licensed under the Apache License 2.0 (the "License"). You may not use
b1322259
RS
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
71a04cfc
AG
8 */
9
9750b4d3
RB
10/* We need to use the OPENSSL_fork_*() deprecated APIs */
11#define OPENSSL_SUPPRESS_DEPRECATED
12
71a04cfc 13#include <openssl/crypto.h>
d0e1a0ae 14#include <crypto/cryptlib.h>
5f8dd0f8 15#include "internal/cryptlib.h"
d0e1a0ae
NH
16#include "internal/rcu.h"
17#include "rcu_internal.h"
71a04cfc 18
d6dda392
VK
19#if defined(__sun)
20# include <atomic.h>
21#endif
22
d39de479
KK
23#if defined(__apple_build_version__) && __apple_build_version__ < 6000000
24/*
25 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
26 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
27 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
28 * All of this makes impossible to use __atomic_is_lock_free here.
29 *
30 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
31 */
32#define BROKEN_CLANG_ATOMICS
33#endif
34
71a04cfc
AG
35#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
36
84952925
DMSP
37# if defined(OPENSSL_SYS_UNIX)
38# include <sys/types.h>
39# include <unistd.h>
40#endif
41
0d407456
RB
42# include <assert.h>
43
ec93a292
DK
44# ifdef PTHREAD_RWLOCK_INITIALIZER
45# define USE_RWLOCK
46# endif
2accf3f7 47
d0e1a0ae 48# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
f5b5a35c
NH
49# if defined(__APPLE__) && defined(__clang__) && defined(__aarch64__)
50/*
51 * Apple M1 virtualized cpu seems to have some problem using the ldapr instruction
52 * (see https://github.com/openssl/openssl/pull/23974)
53 * When using the native apple clang compiler, this instruction is emitted for
54 * atomic loads, which is bad. So, if
55 * 1) We are building on a target that defines __APPLE__ AND
56 * 2) We are building on a target using clang (__clang__) AND
57 * 3) We are building for an M1 processor (__aarch64__)
58 * Then we shold not use __atomic_load_n and instead implement our own
59 * function to issue the ldar instruction instead, which procuces the proper
60 * sequencing guarantees
61 */
62static inline void *apple_atomic_load_n(void **p)
63{
64 void *ret;
65
66 __asm volatile("ldar %0, [%1]" : "=r" (ret): "r" (p):);
67
68 return ret;
69}
70
71# define ATOMIC_LOAD_N(p, o) apple_atomic_load_n((void **)p)
72# else
73# define ATOMIC_LOAD_N(p,o) __atomic_load_n(p, o)
74# endif
d0e1a0ae
NH
75# define ATOMIC_STORE_N(p, v, o) __atomic_store_n(p, v, o)
76# define ATOMIC_STORE(p, v, o) __atomic_store(p, v, o)
77# define ATOMIC_EXCHANGE_N(p, v, o) __atomic_exchange_n(p, v, o)
78# define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
79# define ATOMIC_FETCH_ADD(p, v, o) __atomic_fetch_add(p, v, o)
80# define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
81# define ATOMIC_AND_FETCH(p, m, o) __atomic_and_fetch(p, m, o)
82# define ATOMIC_OR_FETCH(p, m, o) __atomic_or_fetch(p, m, o)
83#else
84static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
85
86static inline void *fallback_atomic_load_n(void **p)
87{
88 void *ret;
89
90 pthread_mutex_lock(&atomic_sim_lock);
91 ret = *(void **)p;
92 pthread_mutex_unlock(&atomic_sim_lock);
93 return ret;
94}
95
96# define ATOMIC_LOAD_N(p, o) fallback_atomic_load_n((void **)p)
97
98static inline void *fallback_atomic_store_n(void **p, void *v)
99{
100 void *ret;
101
102 pthread_mutex_lock(&atomic_sim_lock);
103 ret = *p;
104 *p = v;
105 pthread_mutex_unlock(&atomic_sim_lock);
106 return ret;
107}
108
109# define ATOMIC_STORE_N(p, v, o) fallback_atomic_store_n((void **)p, (void *)v)
110
111static inline void fallback_atomic_store(void **p, void **v)
112{
113 void *ret;
114
115 pthread_mutex_lock(&atomic_sim_lock);
116 ret = *p;
117 *p = *v;
118 v = ret;
119 pthread_mutex_unlock(&atomic_sim_lock);
120}
121
122# define ATOMIC_STORE(p, v, o) fallback_atomic_store((void **)p, (void **)v)
123
124static inline void *fallback_atomic_exchange_n(void **p, void *v)
125{
126 void *ret;
127
128 pthread_mutex_lock(&atomic_sim_lock);
129 ret = *p;
130 *p = v;
131 pthread_mutex_unlock(&atomic_sim_lock);
132 return ret;
133}
134
135#define ATOMIC_EXCHANGE_N(p, v, o) fallback_atomic_exchange_n((void **)p, (void *)v)
136
137static inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
138{
139 uint64_t ret;
140
141 pthread_mutex_lock(&atomic_sim_lock);
142 *p += v;
143 ret = *p;
144 pthread_mutex_unlock(&atomic_sim_lock);
145 return ret;
146}
147
148# define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
149
150static inline uint64_t fallback_atomic_fetch_add(uint64_t *p, uint64_t v)
151{
152 uint64_t ret;
153
154 pthread_mutex_lock(&atomic_sim_lock);
155 ret = *p;
156 *p += v;
157 pthread_mutex_unlock(&atomic_sim_lock);
158 return ret;
159}
160
161# define ATOMIC_FETCH_ADD(p, v, o) fallback_atomic_fetch_add(p, v)
162
163static inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
164{
165 uint64_t ret;
166
167 pthread_mutex_lock(&atomic_sim_lock);
168 *p -= v;
169 ret = *p;
170 pthread_mutex_unlock(&atomic_sim_lock);
171 return ret;
172}
173
174# define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
175
176static inline uint64_t fallback_atomic_and_fetch(uint64_t *p, uint64_t m)
177{
178 uint64_t ret;
179
180 pthread_mutex_lock(&atomic_sim_lock);
181 *p &= m;
182 ret = *p;
183 pthread_mutex_unlock(&atomic_sim_lock);
184 return ret;
185}
186
187# define ATOMIC_AND_FETCH(p, v, o) fallback_atomic_and_fetch(p, v)
188
189static inline uint64_t fallback_atomic_or_fetch(uint64_t *p, uint64_t m)
190{
191 uint64_t ret;
192
193 pthread_mutex_lock(&atomic_sim_lock);
194 *p |= m;
195 ret = *p;
196 pthread_mutex_unlock(&atomic_sim_lock);
197 return ret;
198}
199
200# define ATOMIC_OR_FETCH(p, v, o) fallback_atomic_or_fetch(p, v)
201#endif
202
203static CRYPTO_THREAD_LOCAL rcu_thr_key;
204
205/*
206 * users is broken up into 2 parts
207 * bits 0-15 current readers
208 * bit 32-63 - ID
209 */
210# define READER_SHIFT 0
211# define ID_SHIFT 32
212# define READER_SIZE 16
213# define ID_SIZE 32
214
215# define READER_MASK (((uint64_t)1 << READER_SIZE) - 1)
216# define ID_MASK (((uint64_t)1 << ID_SIZE) - 1)
217# define READER_COUNT(x) (((uint64_t)(x) >> READER_SHIFT) & READER_MASK)
218# define ID_VAL(x) (((uint64_t)(x) >> ID_SHIFT) & ID_MASK)
219# define VAL_READER ((uint64_t)1 << READER_SHIFT)
220# define VAL_ID(x) ((uint64_t)x << ID_SHIFT)
221
222/*
223 * This is the core of an rcu lock. It tracks the readers and writers for the
224 * current quiescence point for a given lock. Users is the 64 bit value that
225 * stores the READERS/ID as defined above
226 *
227 */
228struct rcu_qp {
229 uint64_t users;
230};
231
232struct thread_qp {
233 struct rcu_qp *qp;
234 unsigned int depth;
235 CRYPTO_RCU_LOCK *lock;
236};
237
238#define MAX_QPS 10
239/*
240 * This is the per thread tracking data
241 * that is assigned to each thread participating
242 * in an rcu qp
243 *
244 * qp points to the qp that it last acquired
245 *
246 */
247struct rcu_thr_data {
248 struct thread_qp thread_qps[MAX_QPS];
249};
250
251/*
252 * This is the internal version of a CRYPTO_RCU_LOCK
253 * it is cast from CRYPTO_RCU_LOCK
254 */
255struct rcu_lock_st {
256 /* Callbacks to call for next ossl_synchronize_rcu */
257 struct rcu_cb_item *cb_items;
258
259 /* rcu generation counter for in-order retirement */
260 uint32_t id_ctr;
261
262 /* Array of quiescent points for synchronization */
263 struct rcu_qp *qp_group;
264
265 /* Number of elements in qp_group array */
266 size_t group_count;
267
268 /* Index of the current qp in the qp_group array */
269 uint64_t reader_idx;
270
271 /* value of the next id_ctr value to be retired */
272 uint32_t next_to_retire;
273
274 /* index of the next free rcu_qp in the qp_group */
275 uint64_t current_alloc_idx;
276
277 /* number of qp's in qp_group array currently being retired */
278 uint32_t writers_alloced;
279
280 /* lock protecting write side operations */
281 pthread_mutex_t write_lock;
282
283 /* lock protecting updates to writers_alloced/current_alloc_idx */
284 pthread_mutex_t alloc_lock;
285
286 /* signal to wake threads waiting on alloc_lock */
287 pthread_cond_t alloc_signal;
288
289 /* lock to enforce in-order retirement */
290 pthread_mutex_t prior_lock;
291
292 /* signal to wake threads waiting on prior_lock */
293 pthread_cond_t prior_signal;
294};
295
296/*
297 * Called on thread exit to free the pthread key
298 * associated with this thread, if any
299 */
300static void free_rcu_thr_data(void *ptr)
301{
302 struct rcu_thr_data *data =
303 (struct rcu_thr_data *)CRYPTO_THREAD_get_local(&rcu_thr_key);
304
305 OPENSSL_free(data);
306 CRYPTO_THREAD_set_local(&rcu_thr_key, NULL);
307}
308
309static void ossl_rcu_init(void)
310{
311 CRYPTO_THREAD_init_local(&rcu_thr_key, NULL);
312}
313
314/* Read side acquisition of the current qp */
315static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
316{
317 uint64_t qp_idx;
318
319 /* get the current qp index */
320 for (;;) {
321 /*
322 * Notes on use of __ATOMIC_ACQUIRE
323 * We need to ensure the following:
324 * 1) That subsequent operations aren't optimized by hoisting them above
325 * this operation. Specifically, we don't want the below re-load of
326 * qp_idx to get optimized away
327 * 2) We want to ensure that any updating of reader_idx on the write side
328 * of the lock is flushed from a local cpu cache so that we see any
329 * updates prior to the load. This is a non-issue on cache coherent
330 * systems like x86, but is relevant on other arches
331 * Note: This applies to the reload below as well
332 */
333 qp_idx = (uint64_t)ATOMIC_LOAD_N(&lock->reader_idx, __ATOMIC_ACQUIRE);
334
335 /*
336 * Notes of use of __ATOMIC_RELEASE
337 * This counter is only read by the write side of the lock, and so we
338 * specify __ATOMIC_RELEASE here to ensure that the write side of the
339 * lock see this during the spin loop read of users, as it waits for the
340 * reader count to approach zero
341 */
342 ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
343 __ATOMIC_RELEASE);
344
345 /* if the idx hasn't changed, we're good, else try again */
346 if (qp_idx == (uint64_t)ATOMIC_LOAD_N(&lock->reader_idx, __ATOMIC_ACQUIRE))
347 break;
348
349 /*
350 * Notes on use of __ATOMIC_RELEASE
351 * As with the add above, we want to ensure that this decrement is
352 * seen by the write side of the lock as soon as it happens to prevent
353 * undue spinning waiting for write side completion
354 */
355 ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
356 __ATOMIC_RELEASE);
357 }
358
359 return &lock->qp_group[qp_idx];
360}
361
362void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
363{
364 struct rcu_thr_data *data;
365 int i, available_qp = -1;
366
367 /*
368 * we're going to access current_qp here so ask the
369 * processor to fetch it
370 */
371 data = CRYPTO_THREAD_get_local(&rcu_thr_key);
372
373 if (data == NULL) {
374 data = OPENSSL_zalloc(sizeof(*data));
375 OPENSSL_assert(data != NULL);
376 CRYPTO_THREAD_set_local(&rcu_thr_key, data);
377 ossl_init_thread_start(NULL, NULL, free_rcu_thr_data);
378 }
379
380 for (i = 0; i < MAX_QPS; i++) {
381 if (data->thread_qps[i].qp == NULL && available_qp == -1)
382 available_qp = i;
383 /* If we have a hold on this lock already, we're good */
384 if (data->thread_qps[i].lock == lock) {
385 data->thread_qps[i].depth++;
386 return;
387 }
388 }
389
390 /*
391 * if we get here, then we don't have a hold on this lock yet
392 */
393 assert(available_qp != -1);
394
395 data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
396 data->thread_qps[available_qp].depth = 1;
397 data->thread_qps[available_qp].lock = lock;
398}
399
400void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
401{
402 int i;
403 struct rcu_thr_data *data = CRYPTO_THREAD_get_local(&rcu_thr_key);
404 uint64_t ret;
405
406 assert(data != NULL);
407
408 for (i = 0; i < MAX_QPS; i++) {
409 if (data->thread_qps[i].lock == lock) {
410 /*
411 * As with read side acquisition, we use __ATOMIC_RELEASE here
412 * to ensure that the decrement is published immediately
413 * to any write side waiters
414 */
415 data->thread_qps[i].depth--;
416 if (data->thread_qps[i].depth == 0) {
417 ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users, VAL_READER,
418 __ATOMIC_RELEASE);
419 OPENSSL_assert(ret != UINT64_MAX);
420 data->thread_qps[i].qp = NULL;
421 data->thread_qps[i].lock = NULL;
422 }
423 return;
424 }
425 }
426 /*
39fe3e5d
DP
427 * If we get here, we're trying to unlock a lock that we never acquired -
428 * that's fatal.
d0e1a0ae
NH
429 */
430 assert(0);
431}
432
433/*
434 * Write side allocation routine to get the current qp
435 * and replace it with a new one
436 */
437static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
438{
439 uint64_t new_id;
440 uint64_t current_idx;
441
442 pthread_mutex_lock(&lock->alloc_lock);
443
444 /*
445 * we need at least one qp to be available with one
446 * left over, so that readers can start working on
447 * one that isn't yet being waited on
448 */
449 while (lock->group_count - lock->writers_alloced < 2)
450 /* we have to wait for one to be free */
451 pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
452
453 current_idx = lock->current_alloc_idx;
454
455 /* Allocate the qp */
456 lock->writers_alloced++;
457
458 /* increment the allocation index */
459 lock->current_alloc_idx =
460 (lock->current_alloc_idx + 1) % lock->group_count;
461
462 /* get and insert a new id */
463 new_id = lock->id_ctr;
464 lock->id_ctr++;
465
466 new_id = VAL_ID(new_id);
467 /*
468 * Even though we are under a write side lock here
469 * We need to use atomic instructions to ensure that the results
470 * of this update are published to the read side prior to updating the
471 * reader idx below
472 */
473 ATOMIC_AND_FETCH(&lock->qp_group[current_idx].users, ID_MASK,
474 __ATOMIC_RELEASE);
475 ATOMIC_OR_FETCH(&lock->qp_group[current_idx].users, new_id,
476 __ATOMIC_RELEASE);
477
478 /*
39fe3e5d 479 * Update the reader index to be the prior qp.
d0e1a0ae 480 * Note the use of __ATOMIC_RELEASE here is based on the corresponding use
f7241edd 481 * of __ATOMIC_ACQUIRE in get_hold_current_qp, as we want any publication
d0e1a0ae
NH
482 * of this value to be seen on the read side immediately after it happens
483 */
484 ATOMIC_STORE_N(&lock->reader_idx, lock->current_alloc_idx,
485 __ATOMIC_RELEASE);
486
487 /* wake up any waiters */
488 pthread_cond_signal(&lock->alloc_signal);
489 pthread_mutex_unlock(&lock->alloc_lock);
490 return &lock->qp_group[current_idx];
491}
492
493static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
494{
495 pthread_mutex_lock(&lock->alloc_lock);
496 lock->writers_alloced--;
497 pthread_cond_signal(&lock->alloc_signal);
498 pthread_mutex_unlock(&lock->alloc_lock);
499}
500
501static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
502 int count)
503{
504 struct rcu_qp *new =
505 OPENSSL_zalloc(sizeof(*new) * count);
506
507 lock->group_count = count;
508 return new;
509}
510
511void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
512{
513 pthread_mutex_lock(&lock->write_lock);
514}
515
516void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
517{
518 pthread_mutex_unlock(&lock->write_lock);
519}
520
521void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
522{
523 struct rcu_qp *qp;
524 uint64_t count;
525 struct rcu_cb_item *cb_items, *tmpcb;
526
527 /*
528 * __ATOMIC_ACQ_REL is used here to ensure that we get any prior published
529 * writes before we read, and publish our write immediately
530 */
531 cb_items = ATOMIC_EXCHANGE_N(&lock->cb_items, NULL, __ATOMIC_ACQ_REL);
532
533 qp = update_qp(lock);
534
535 /*
536 * wait for the reader count to reach zero
537 * Note the use of __ATOMIC_ACQUIRE here to ensure that any
538 * prior __ATOMIC_RELEASE write operation in get_hold_current_qp
539 * is visible prior to our read
540 */
541 do {
542 count = (uint64_t)ATOMIC_LOAD_N(&qp->users, __ATOMIC_ACQUIRE);
543 } while (READER_COUNT(count) != 0);
544
545 /* retire in order */
546 pthread_mutex_lock(&lock->prior_lock);
547 while (lock->next_to_retire != ID_VAL(count))
548 pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
549 lock->next_to_retire++;
550 pthread_cond_broadcast(&lock->prior_signal);
551 pthread_mutex_unlock(&lock->prior_lock);
552
553 retire_qp(lock, qp);
554
555 /* handle any callbacks that we have */
556 while (cb_items != NULL) {
557 tmpcb = cb_items;
558 cb_items = cb_items->next;
559 tmpcb->fn(tmpcb->data);
560 OPENSSL_free(tmpcb);
561 }
562}
563
564int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
565{
566 struct rcu_cb_item *new =
567 OPENSSL_zalloc(sizeof(*new));
568
569 if (new == NULL)
570 return 0;
571
572 new->data = data;
573 new->fn = cb;
574 /*
575 * Use __ATOMIC_ACQ_REL here to indicate that any prior writes to this
576 * list are visible to us prior to reading, and publish the new value
577 * immediately
578 */
579 new->next = ATOMIC_EXCHANGE_N(&lock->cb_items, new, __ATOMIC_ACQ_REL);
580
581 return 1;
582}
583
584void *ossl_rcu_uptr_deref(void **p)
585{
586 return (void *)ATOMIC_LOAD_N(p, __ATOMIC_ACQUIRE);
587}
588
589void ossl_rcu_assign_uptr(void **p, void **v)
590{
591 ATOMIC_STORE(p, v, __ATOMIC_RELEASE);
592}
593
594static CRYPTO_ONCE rcu_init_once = CRYPTO_ONCE_STATIC_INIT;
595
596CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers)
597{
598 struct rcu_lock_st *new;
599
600 if (!CRYPTO_THREAD_run_once(&rcu_init_once, ossl_rcu_init))
601 return NULL;
602
603 if (num_writers < 1)
604 num_writers = 1;
605
606 new = OPENSSL_zalloc(sizeof(*new));
607 if (new == NULL)
608 return NULL;
609
610 pthread_mutex_init(&new->write_lock, NULL);
611 pthread_mutex_init(&new->prior_lock, NULL);
612 pthread_mutex_init(&new->alloc_lock, NULL);
613 pthread_cond_init(&new->prior_signal, NULL);
614 pthread_cond_init(&new->alloc_signal, NULL);
615 new->qp_group = allocate_new_qp_group(new, num_writers + 1);
616 if (new->qp_group == NULL) {
617 OPENSSL_free(new);
618 new = NULL;
619 }
620 return new;
621}
622
623void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
624{
625 struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
626
627 if (lock == NULL)
628 return;
629
630 /* make sure we're synchronized */
631 ossl_synchronize_rcu(rlock);
632
633 OPENSSL_free(rlock->qp_group);
634 /* There should only be a single qp left now */
635 OPENSSL_free(rlock);
636}
637
71a04cfc
AG
638CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
639{
ec93a292 640# ifdef USE_RWLOCK
7de2b9c4
RS
641 CRYPTO_RWLOCK *lock;
642
d0e1a0ae 643 if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
7de2b9c4 644 /* Don't set error, to avoid recursion blowup. */
71a04cfc
AG
645 return NULL;
646
0b2fc928
F
647 if (pthread_rwlock_init(lock, NULL) != 0) {
648 OPENSSL_free(lock);
71a04cfc 649 return NULL;
0b2fc928 650 }
ec93a292
DK
651# else
652 pthread_mutexattr_t attr;
7de2b9c4
RS
653 CRYPTO_RWLOCK *lock;
654
d0e1a0ae 655 if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
7de2b9c4 656 /* Don't set error, to avoid recursion blowup. */
2accf3f7
DK
657 return NULL;
658
e60147fe
RS
659 /*
660 * We don't use recursive mutexes, but try to catch errors if we do.
661 */
2accf3f7 662 pthread_mutexattr_init(&attr);
6870c1e7
RB
663# if !defined (__TANDEM) && !defined (_SPT_MODEL_)
664# if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
e60147fe 665 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
6870c1e7
RB
666# endif
667# else
668 /* The SPT Thread Library does not define MUTEX attributes. */
e60147fe 669# endif
5d5eed44 670
2accf3f7
DK
671 if (pthread_mutex_init(lock, &attr) != 0) {
672 pthread_mutexattr_destroy(&attr);
673 OPENSSL_free(lock);
674 return NULL;
675 }
5d5eed44 676
2accf3f7 677 pthread_mutexattr_destroy(&attr);
ec93a292 678# endif
71a04cfc
AG
679
680 return lock;
681}
682
cd3f8c1b 683__owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
71a04cfc 684{
ec93a292 685# ifdef USE_RWLOCK
71a04cfc
AG
686 if (pthread_rwlock_rdlock(lock) != 0)
687 return 0;
ec93a292 688# else
e60147fe
RS
689 if (pthread_mutex_lock(lock) != 0) {
690 assert(errno != EDEADLK && errno != EBUSY);
2accf3f7 691 return 0;
e60147fe 692 }
ec93a292 693# endif
71a04cfc
AG
694
695 return 1;
696}
697
cd3f8c1b 698__owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
71a04cfc 699{
ec93a292 700# ifdef USE_RWLOCK
71a04cfc
AG
701 if (pthread_rwlock_wrlock(lock) != 0)
702 return 0;
ec93a292 703# else
e60147fe
RS
704 if (pthread_mutex_lock(lock) != 0) {
705 assert(errno != EDEADLK && errno != EBUSY);
2accf3f7 706 return 0;
e60147fe 707 }
ec93a292 708# endif
71a04cfc
AG
709
710 return 1;
711}
712
713int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
714{
ec93a292 715# ifdef USE_RWLOCK
71a04cfc
AG
716 if (pthread_rwlock_unlock(lock) != 0)
717 return 0;
ec93a292 718# else
e60147fe
RS
719 if (pthread_mutex_unlock(lock) != 0) {
720 assert(errno != EPERM);
2accf3f7 721 return 0;
e60147fe 722 }
ec93a292 723# endif
71a04cfc
AG
724
725 return 1;
726}
727
728void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
729{
730 if (lock == NULL)
731 return;
732
ec93a292 733# ifdef USE_RWLOCK
71a04cfc 734 pthread_rwlock_destroy(lock);
ec93a292 735# else
2accf3f7 736 pthread_mutex_destroy(lock);
ec93a292 737# endif
71a04cfc
AG
738 OPENSSL_free(lock);
739
740 return;
741}
742
743int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
744{
745 if (pthread_once(once, init) != 0)
746 return 0;
747
748 return 1;
749}
750
751int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
752{
753 if (pthread_key_create(key, cleanup) != 0)
754 return 0;
755
756 return 1;
757}
758
759void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
760{
761 return pthread_getspecific(*key);
762}
763
764int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
765{
766 if (pthread_setspecific(*key, val) != 0)
767 return 0;
768
769 return 1;
770}
771
772int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
773{
774 if (pthread_key_delete(*key) != 0)
775 return 0;
776
777 return 1;
778}
779
780CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
781{
782 return pthread_self();
783}
784
785int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
786{
787 return pthread_equal(a, b);
788}
789
790int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
791{
d39de479 792# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
1beca676
RL
793 if (__atomic_is_lock_free(sizeof(*val), val)) {
794 *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
795 return 1;
796 }
d6dda392
VK
797# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
798 /* This will work for all future Solaris versions. */
799 if (ret != NULL) {
800 *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
801 return 1;
802 }
1beca676 803# endif
d5e742de 804 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
71a04cfc
AG
805 return 0;
806
807 *val += amount;
808 *ret = *val;
809
810 if (!CRYPTO_THREAD_unlock(lock))
811 return 0;
71a04cfc
AG
812
813 return 1;
814}
815
d5e742de
MC
816int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
817 CRYPTO_RWLOCK *lock)
818{
d39de479 819# if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
d5e742de
MC
820 if (__atomic_is_lock_free(sizeof(*val), val)) {
821 *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
822 return 1;
823 }
824# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
825 /* This will work for all future Solaris versions. */
826 if (ret != NULL) {
827 *ret = atomic_or_64_nv(val, op);
828 return 1;
829 }
830# endif
831 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
832 return 0;
833 *val |= op;
834 *ret = *val;
835
836 if (!CRYPTO_THREAD_unlock(lock))
837 return 0;
838
839 return 1;
840}
841
842int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
843{
d39de479 844# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
d5e742de
MC
845 if (__atomic_is_lock_free(sizeof(*val), val)) {
846 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
847 return 1;
848 }
849# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
850 /* This will work for all future Solaris versions. */
851 if (ret != NULL) {
852 *ret = atomic_or_64_nv(val, 0);
853 return 1;
854 }
855# endif
856 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
857 return 0;
858 *ret = *val;
859 if (!CRYPTO_THREAD_unlock(lock))
860 return 0;
861
862 return 1;
863}
629b408c
HL
864
865int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
866{
867# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
868 if (__atomic_is_lock_free(sizeof(*val), val)) {
869 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
870 return 1;
871 }
872# elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
873 /* This will work for all future Solaris versions. */
874 if (ret != NULL) {
875 *ret = (int *)atomic_or_uint_nv((unsigned int *)val, 0);
876 return 1;
877 }
878# endif
879 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
880 return 0;
881 *ret = *val;
882 if (!CRYPTO_THREAD_unlock(lock))
883 return 0;
884
885 return 1;
886}
887
f844f9eb 888# ifndef FIPS_MODULE
2915fe19
RS
889int openssl_init_fork_handlers(void)
890{
59795962 891 return 1;
2915fe19 892}
f844f9eb 893# endif /* FIPS_MODULE */
84952925
DMSP
894
895int openssl_get_fork_id(void)
896{
897 return getpid();
898}
71a04cfc 899#endif