2 * Copyright 2016-2025 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 /* We need to use the OPENSSL_fork_*() deprecated APIs */
11 #define OPENSSL_SUPPRESS_DEPRECATED
13 #include <openssl/crypto.h>
14 #include <crypto/cryptlib.h>
15 #include <crypto/sparse_array.h>
16 #include "internal/cryptlib.h"
17 #include "internal/threads_common.h"
18 #include "internal/rcu.h"
19 #include "rcu_internal.h"
21 #if defined(__clang__) && defined(__has_feature)
22 # if __has_feature(thread_sanitizer)
23 # define __SANITIZE_THREAD__
27 #if defined(__SANITIZE_THREAD__)
28 # include <sanitizer/tsan_interface.h>
29 # define TSAN_FAKE_UNLOCK(x) __tsan_mutex_pre_unlock((x), 0); \
30 __tsan_mutex_post_unlock((x), 0)
32 # define TSAN_FAKE_LOCK(x) __tsan_mutex_pre_lock((x), 0); \
33 __tsan_mutex_post_lock((x), 0, 0)
35 # define TSAN_FAKE_UNLOCK(x)
36 # define TSAN_FAKE_LOCK(x)
43 #if defined(__apple_build_version__) && __apple_build_version__ < 6000000
45 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
46 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
47 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
48 * All of this makes impossible to use __atomic_is_lock_free here.
50 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
52 # define BROKEN_CLANG_ATOMICS
55 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
57 # if defined(OPENSSL_SYS_UNIX)
58 # include <sys/types.h>
65 * The Non-Stop KLT thread model currently seems broken in its rwlock
68 # if defined(PTHREAD_RWLOCK_INITIALIZER) && !defined(_KLT_MODEL_)
73 * For all GNU/clang atomic builtins, we also need fallbacks, to cover all
76 * Unfortunately, we can't do that with some "generic type", because there's no
77 * guarantee that the chosen generic type is large enough to cover all cases.
78 * Therefore, we implement fallbacks for each applicable type, with composed
79 * names that include the type they handle.
81 * (an anecdote: we previously tried to use |void *| as the generic type, with
82 * the thought that the pointer itself is the largest type. However, this is
83 * not true on 32-bit pointer platforms, as a |uint64_t| is twice as large)
85 * All applicable ATOMIC_ macros take the intended type as first parameter, so
86 * they can map to the correct fallback function. In the GNU/clang case, that
87 * parameter is simply ignored.
91 * Internal types used with the ATOMIC_ macros, to make it possible to compose
92 * fallback function names.
96 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
97 && !defined(USE_ATOMIC_FALLBACKS)
98 # define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
99 # define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
100 # define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
101 # define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
102 # define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
104 static pthread_mutex_t atomic_sim_lock
= PTHREAD_MUTEX_INITIALIZER
;
106 # define IMPL_fallback_atomic_load_n(t) \
107 static ossl_inline t fallback_atomic_load_n_##t(t *p) \
111 pthread_mutex_lock(&atomic_sim_lock); \
113 pthread_mutex_unlock(&atomic_sim_lock); \
116 IMPL_fallback_atomic_load_n(uint32_t)
117 IMPL_fallback_atomic_load_n(uint64_t)
118 IMPL_fallback_atomic_load_n(pvoid
)
120 # define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
122 # define IMPL_fallback_atomic_store_n(t) \
123 static ossl_inline t fallback_atomic_store_n_##t(t *p, t v) \
127 pthread_mutex_lock(&atomic_sim_lock); \
130 pthread_mutex_unlock(&atomic_sim_lock); \
133 IMPL_fallback_atomic_store_n(uint32_t)
135 # define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
137 # define IMPL_fallback_atomic_store(t) \
138 static ossl_inline void fallback_atomic_store_##t(t *p, t *v) \
140 pthread_mutex_lock(&atomic_sim_lock); \
142 pthread_mutex_unlock(&atomic_sim_lock); \
144 IMPL_fallback_atomic_store(pvoid
)
146 # define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
149 * The fallbacks that follow don't need any per type implementation, as
150 * they are designed for uint64_t only. If there comes a time when multiple
151 * types need to be covered, it's relatively easy to refactor them the same
152 * way as the fallbacks above.
155 static ossl_inline
uint64_t fallback_atomic_add_fetch(uint64_t *p
, uint64_t v
)
159 pthread_mutex_lock(&atomic_sim_lock
);
162 pthread_mutex_unlock(&atomic_sim_lock
);
166 # define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
168 static ossl_inline
uint64_t fallback_atomic_sub_fetch(uint64_t *p
, uint64_t v
)
172 pthread_mutex_lock(&atomic_sim_lock
);
175 pthread_mutex_unlock(&atomic_sim_lock
);
179 # define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
183 * This is the core of an rcu lock. It tracks the readers and writers for the
184 * current quiescence point for a given lock. Users is the 64 bit value that
185 * stores the READERS/ID as defined above
195 CRYPTO_RCU_LOCK
*lock
;
200 * This is the per thread tracking data
201 * that is assigned to each thread participating
204 * qp points to the qp that it last acquired
207 struct rcu_thr_data
{
208 struct thread_qp thread_qps
[MAX_QPS
];
212 * This is the internal version of a CRYPTO_RCU_LOCK
213 * it is cast from CRYPTO_RCU_LOCK
216 /* Callbacks to call for next ossl_synchronize_rcu */
217 struct rcu_cb_item
*cb_items
;
219 /* The context we are being created against */
222 /* Array of quiescent points for synchronization */
223 struct rcu_qp
*qp_group
;
225 /* rcu generation counter for in-order retirement */
228 /* Number of elements in qp_group array */
229 uint32_t group_count
;
231 /* Index of the current qp in the qp_group array */
234 /* value of the next id_ctr value to be retired */
235 uint32_t next_to_retire
;
237 /* index of the next free rcu_qp in the qp_group */
238 uint32_t current_alloc_idx
;
240 /* number of qp's in qp_group array currently being retired */
241 uint32_t writers_alloced
;
243 /* lock protecting write side operations */
244 pthread_mutex_t write_lock
;
246 /* lock protecting updates to writers_alloced/current_alloc_idx */
247 pthread_mutex_t alloc_lock
;
249 /* signal to wake threads waiting on alloc_lock */
250 pthread_cond_t alloc_signal
;
252 /* lock to enforce in-order retirement */
253 pthread_mutex_t prior_lock
;
255 /* signal to wake threads waiting on prior_lock */
256 pthread_cond_t prior_signal
;
259 /* Read side acquisition of the current qp */
260 static struct rcu_qp
*get_hold_current_qp(struct rcu_lock_st
*lock
)
264 /* get the current qp index */
266 qp_idx
= ATOMIC_LOAD_N(uint32_t, &lock
->reader_idx
, __ATOMIC_RELAXED
);
269 * Notes on use of __ATOMIC_ACQUIRE
270 * We need to ensure the following:
271 * 1) That subsequent operations aren't optimized by hoisting them above
272 * this operation. Specifically, we don't want the below re-load of
273 * qp_idx to get optimized away
274 * 2) We want to ensure that any updating of reader_idx on the write side
275 * of the lock is flushed from a local cpu cache so that we see any
276 * updates prior to the load. This is a non-issue on cache coherent
277 * systems like x86, but is relevant on other arches
279 ATOMIC_ADD_FETCH(&lock
->qp_group
[qp_idx
].users
, (uint64_t)1,
282 /* if the idx hasn't changed, we're good, else try again */
283 if (qp_idx
== ATOMIC_LOAD_N(uint32_t, &lock
->reader_idx
,
287 ATOMIC_SUB_FETCH(&lock
->qp_group
[qp_idx
].users
, (uint64_t)1,
291 return &lock
->qp_group
[qp_idx
];
294 static void ossl_rcu_free_local_data(void *arg
)
296 OSSL_LIB_CTX
*ctx
= arg
;
297 struct rcu_thr_data
*data
= CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY
, ctx
);
299 CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY
, ctx
, NULL
);
303 void ossl_rcu_read_lock(CRYPTO_RCU_LOCK
*lock
)
305 struct rcu_thr_data
*data
;
306 int i
, available_qp
= -1;
309 * we're going to access current_qp here so ask the
310 * processor to fetch it
312 data
= CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY
, lock
->ctx
);
315 data
= OPENSSL_zalloc(sizeof(*data
));
316 OPENSSL_assert(data
!= NULL
);
317 CRYPTO_THREAD_set_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY
, lock
->ctx
, data
);
318 ossl_init_thread_start(NULL
, lock
->ctx
, ossl_rcu_free_local_data
);
321 for (i
= 0; i
< MAX_QPS
; i
++) {
322 if (data
->thread_qps
[i
].qp
== NULL
&& available_qp
== -1)
324 /* If we have a hold on this lock already, we're good */
325 if (data
->thread_qps
[i
].lock
== lock
) {
326 data
->thread_qps
[i
].depth
++;
332 * if we get here, then we don't have a hold on this lock yet
334 assert(available_qp
!= -1);
336 data
->thread_qps
[available_qp
].qp
= get_hold_current_qp(lock
);
337 data
->thread_qps
[available_qp
].depth
= 1;
338 data
->thread_qps
[available_qp
].lock
= lock
;
341 void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK
*lock
)
344 struct rcu_thr_data
*data
= CRYPTO_THREAD_get_local_ex(CRYPTO_THREAD_LOCAL_RCU_KEY
, lock
->ctx
);
347 assert(data
!= NULL
);
349 for (i
= 0; i
< MAX_QPS
; i
++) {
350 if (data
->thread_qps
[i
].lock
== lock
) {
352 * we have to use __ATOMIC_RELEASE here
353 * to ensure that all preceding read instructions complete
354 * before the decrement is visible to ossl_synchronize_rcu
356 data
->thread_qps
[i
].depth
--;
357 if (data
->thread_qps
[i
].depth
== 0) {
358 ret
= ATOMIC_SUB_FETCH(&data
->thread_qps
[i
].qp
->users
,
359 (uint64_t)1, __ATOMIC_RELEASE
);
360 OPENSSL_assert(ret
!= UINT64_MAX
);
361 data
->thread_qps
[i
].qp
= NULL
;
362 data
->thread_qps
[i
].lock
= NULL
;
368 * If we get here, we're trying to unlock a lock that we never acquired -
375 * Write side allocation routine to get the current qp
376 * and replace it with a new one
378 static struct rcu_qp
*update_qp(CRYPTO_RCU_LOCK
*lock
, uint32_t *curr_id
)
380 uint32_t current_idx
;
382 pthread_mutex_lock(&lock
->alloc_lock
);
385 * we need at least one qp to be available with one
386 * left over, so that readers can start working on
387 * one that isn't yet being waited on
389 while (lock
->group_count
- lock
->writers_alloced
< 2)
390 /* we have to wait for one to be free */
391 pthread_cond_wait(&lock
->alloc_signal
, &lock
->alloc_lock
);
393 current_idx
= lock
->current_alloc_idx
;
395 /* Allocate the qp */
396 lock
->writers_alloced
++;
398 /* increment the allocation index */
399 lock
->current_alloc_idx
=
400 (lock
->current_alloc_idx
+ 1) % lock
->group_count
;
402 *curr_id
= lock
->id_ctr
;
405 ATOMIC_STORE_N(uint32_t, &lock
->reader_idx
, lock
->current_alloc_idx
,
409 * this should make sure that the new value of reader_idx is visible in
410 * get_hold_current_qp, directly after incrementing the users count
412 ATOMIC_ADD_FETCH(&lock
->qp_group
[current_idx
].users
, (uint64_t)0,
415 /* wake up any waiters */
416 pthread_cond_signal(&lock
->alloc_signal
);
417 pthread_mutex_unlock(&lock
->alloc_lock
);
418 return &lock
->qp_group
[current_idx
];
421 static void retire_qp(CRYPTO_RCU_LOCK
*lock
, struct rcu_qp
*qp
)
423 pthread_mutex_lock(&lock
->alloc_lock
);
424 lock
->writers_alloced
--;
425 pthread_cond_signal(&lock
->alloc_signal
);
426 pthread_mutex_unlock(&lock
->alloc_lock
);
429 static struct rcu_qp
*allocate_new_qp_group(CRYPTO_RCU_LOCK
*lock
,
433 OPENSSL_zalloc(sizeof(*new) * count
);
435 lock
->group_count
= count
;
439 void ossl_rcu_write_lock(CRYPTO_RCU_LOCK
*lock
)
441 pthread_mutex_lock(&lock
->write_lock
);
442 TSAN_FAKE_UNLOCK(&lock
->write_lock
);
445 void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK
*lock
)
447 TSAN_FAKE_LOCK(&lock
->write_lock
);
448 pthread_mutex_unlock(&lock
->write_lock
);
451 void ossl_synchronize_rcu(CRYPTO_RCU_LOCK
*lock
)
456 struct rcu_cb_item
*cb_items
, *tmpcb
;
458 pthread_mutex_lock(&lock
->write_lock
);
459 cb_items
= lock
->cb_items
;
460 lock
->cb_items
= NULL
;
461 pthread_mutex_unlock(&lock
->write_lock
);
463 qp
= update_qp(lock
, &curr_id
);
465 /* retire in order */
466 pthread_mutex_lock(&lock
->prior_lock
);
467 while (lock
->next_to_retire
!= curr_id
)
468 pthread_cond_wait(&lock
->prior_signal
, &lock
->prior_lock
);
471 * wait for the reader count to reach zero
472 * Note the use of __ATOMIC_ACQUIRE here to ensure that any
473 * prior __ATOMIC_RELEASE write operation in ossl_rcu_read_unlock
474 * is visible prior to our read
475 * however this is likely just necessary to silence a tsan warning
476 * because the read side should not do any write operation
477 * outside the atomic itself
480 count
= ATOMIC_LOAD_N(uint64_t, &qp
->users
, __ATOMIC_ACQUIRE
);
481 } while (count
!= (uint64_t)0);
483 lock
->next_to_retire
++;
484 pthread_cond_broadcast(&lock
->prior_signal
);
485 pthread_mutex_unlock(&lock
->prior_lock
);
489 /* handle any callbacks that we have */
490 while (cb_items
!= NULL
) {
492 cb_items
= cb_items
->next
;
493 tmpcb
->fn(tmpcb
->data
);
499 * Note: This call assumes its made under the protection of
500 * ossl_rcu_write_lock
502 int ossl_rcu_call(CRYPTO_RCU_LOCK
*lock
, rcu_cb_fn cb
, void *data
)
504 struct rcu_cb_item
*new =
505 OPENSSL_zalloc(sizeof(*new));
513 new->next
= lock
->cb_items
;
514 lock
->cb_items
= new;
519 void *ossl_rcu_uptr_deref(void **p
)
521 return ATOMIC_LOAD_N(pvoid
, p
, __ATOMIC_ACQUIRE
);
524 void ossl_rcu_assign_uptr(void **p
, void **v
)
526 ATOMIC_STORE(pvoid
, p
, v
, __ATOMIC_RELEASE
);
529 CRYPTO_RCU_LOCK
*ossl_rcu_lock_new(int num_writers
, OSSL_LIB_CTX
*ctx
)
531 struct rcu_lock_st
*new;
534 * We need a minimum of 2 qp's
539 ctx
= ossl_lib_ctx_get_concrete(ctx
);
543 new = OPENSSL_zalloc(sizeof(*new));
548 pthread_mutex_init(&new->write_lock
, NULL
);
549 pthread_mutex_init(&new->prior_lock
, NULL
);
550 pthread_mutex_init(&new->alloc_lock
, NULL
);
551 pthread_cond_init(&new->prior_signal
, NULL
);
552 pthread_cond_init(&new->alloc_signal
, NULL
);
554 new->qp_group
= allocate_new_qp_group(new, num_writers
);
555 if (new->qp_group
== NULL
) {
563 void ossl_rcu_lock_free(CRYPTO_RCU_LOCK
*lock
)
565 struct rcu_lock_st
*rlock
= (struct rcu_lock_st
*)lock
;
570 /* make sure we're synchronized */
571 ossl_synchronize_rcu(rlock
);
573 OPENSSL_free(rlock
->qp_group
);
574 /* There should only be a single qp left now */
578 CRYPTO_RWLOCK
*CRYPTO_THREAD_lock_new(void)
583 if ((lock
= OPENSSL_zalloc(sizeof(pthread_rwlock_t
))) == NULL
)
584 /* Don't set error, to avoid recursion blowup. */
587 if (pthread_rwlock_init(lock
, NULL
) != 0) {
592 pthread_mutexattr_t attr
;
595 if ((lock
= OPENSSL_zalloc(sizeof(pthread_mutex_t
))) == NULL
)
596 /* Don't set error, to avoid recursion blowup. */
600 * We don't use recursive mutexes, but try to catch errors if we do.
602 pthread_mutexattr_init(&attr
);
603 # if !defined (__TANDEM) && !defined (_SPT_MODEL_)
604 # if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
605 pthread_mutexattr_settype(&attr
, PTHREAD_MUTEX_ERRORCHECK
);
608 /* The SPT Thread Library does not define MUTEX attributes. */
611 if (pthread_mutex_init(lock
, &attr
) != 0) {
612 pthread_mutexattr_destroy(&attr
);
617 pthread_mutexattr_destroy(&attr
);
623 __owur
int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK
*lock
)
626 if (!ossl_assert(pthread_rwlock_rdlock(lock
) == 0))
629 if (pthread_mutex_lock(lock
) != 0) {
630 assert(errno
!= EDEADLK
&& errno
!= EBUSY
);
638 __owur
int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK
*lock
)
641 if (!ossl_assert(pthread_rwlock_wrlock(lock
) == 0))
644 if (pthread_mutex_lock(lock
) != 0) {
645 assert(errno
!= EDEADLK
&& errno
!= EBUSY
);
653 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK
*lock
)
656 if (pthread_rwlock_unlock(lock
) != 0)
659 if (pthread_mutex_unlock(lock
) != 0) {
660 assert(errno
!= EPERM
);
668 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK
*lock
)
674 pthread_rwlock_destroy(lock
);
676 pthread_mutex_destroy(lock
);
683 int CRYPTO_THREAD_run_once(CRYPTO_ONCE
*once
, void (*init
)(void))
685 if (pthread_once(once
, init
) != 0)
691 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL
*key
, void (*cleanup
)(void *))
693 if (pthread_key_create(key
, cleanup
) != 0)
699 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL
*key
)
701 return pthread_getspecific(*key
);
704 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL
*key
, void *val
)
706 if (pthread_setspecific(*key
, val
) != 0)
712 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL
*key
)
714 if (pthread_key_delete(*key
) != 0)
720 CRYPTO_THREAD_ID
CRYPTO_THREAD_get_current_id(void)
722 return pthread_self();
725 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a
, CRYPTO_THREAD_ID b
)
727 return pthread_equal(a
, b
);
730 int CRYPTO_atomic_add(int *val
, int amount
, int *ret
, CRYPTO_RWLOCK
*lock
)
732 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
733 if (__atomic_is_lock_free(sizeof(*val
), val
)) {
734 *ret
= __atomic_add_fetch(val
, amount
, __ATOMIC_ACQ_REL
);
737 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
738 /* This will work for all future Solaris versions. */
740 *ret
= atomic_add_int_nv((volatile unsigned int *)val
, amount
);
744 if (lock
== NULL
|| !CRYPTO_THREAD_write_lock(lock
))
750 if (!CRYPTO_THREAD_unlock(lock
))
756 int CRYPTO_atomic_add64(uint64_t *val
, uint64_t op
, uint64_t *ret
,
759 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
760 if (__atomic_is_lock_free(sizeof(*val
), val
)) {
761 *ret
= __atomic_add_fetch(val
, op
, __ATOMIC_ACQ_REL
);
764 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
765 /* This will work for all future Solaris versions. */
767 *ret
= atomic_add_64_nv(val
, op
);
771 if (lock
== NULL
|| !CRYPTO_THREAD_write_lock(lock
))
776 if (!CRYPTO_THREAD_unlock(lock
))
782 int CRYPTO_atomic_and(uint64_t *val
, uint64_t op
, uint64_t *ret
,
785 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
786 if (__atomic_is_lock_free(sizeof(*val
), val
)) {
787 *ret
= __atomic_and_fetch(val
, op
, __ATOMIC_ACQ_REL
);
790 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
791 /* This will work for all future Solaris versions. */
793 *ret
= atomic_and_64_nv(val
, op
);
797 if (lock
== NULL
|| !CRYPTO_THREAD_write_lock(lock
))
802 if (!CRYPTO_THREAD_unlock(lock
))
808 int CRYPTO_atomic_or(uint64_t *val
, uint64_t op
, uint64_t *ret
,
811 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
812 if (__atomic_is_lock_free(sizeof(*val
), val
)) {
813 *ret
= __atomic_or_fetch(val
, op
, __ATOMIC_ACQ_REL
);
816 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
817 /* This will work for all future Solaris versions. */
819 *ret
= atomic_or_64_nv(val
, op
);
823 if (lock
== NULL
|| !CRYPTO_THREAD_write_lock(lock
))
828 if (!CRYPTO_THREAD_unlock(lock
))
834 int CRYPTO_atomic_load(uint64_t *val
, uint64_t *ret
, CRYPTO_RWLOCK
*lock
)
836 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
837 if (__atomic_is_lock_free(sizeof(*val
), val
)) {
838 __atomic_load(val
, ret
, __ATOMIC_ACQUIRE
);
841 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
842 /* This will work for all future Solaris versions. */
844 *ret
= atomic_or_64_nv(val
, 0);
848 if (lock
== NULL
|| !CRYPTO_THREAD_read_lock(lock
))
851 if (!CRYPTO_THREAD_unlock(lock
))
857 int CRYPTO_atomic_store(uint64_t *dst
, uint64_t val
, CRYPTO_RWLOCK
*lock
)
859 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
860 if (__atomic_is_lock_free(sizeof(*dst
), dst
)) {
861 __atomic_store(dst
, &val
, __ATOMIC_RELEASE
);
864 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
865 /* This will work for all future Solaris versions. */
867 atomic_swap_64(dst
, val
);
871 if (lock
== NULL
|| !CRYPTO_THREAD_write_lock(lock
))
874 if (!CRYPTO_THREAD_unlock(lock
))
880 int CRYPTO_atomic_load_int(int *val
, int *ret
, CRYPTO_RWLOCK
*lock
)
882 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
883 if (__atomic_is_lock_free(sizeof(*val
), val
)) {
884 __atomic_load(val
, ret
, __ATOMIC_ACQUIRE
);
887 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
888 /* This will work for all future Solaris versions. */
890 *ret
= (int)atomic_or_uint_nv((unsigned int *)val
, 0);
894 if (lock
== NULL
|| !CRYPTO_THREAD_read_lock(lock
))
897 if (!CRYPTO_THREAD_unlock(lock
))
904 int openssl_init_fork_handlers(void)
908 # endif /* FIPS_MODULE */
910 int openssl_get_fork_id(void)