]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/threads_pthread.c
ad6c259d92a4fea4c9f11701db0e04dce307e52d
[thirdparty/openssl.git] / crypto / threads_pthread.c
1 /*
2 * Copyright 2016-2023 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 /* We need to use the OPENSSL_fork_*() deprecated APIs */
11 #define OPENSSL_SUPPRESS_DEPRECATED
12
13 #include <openssl/crypto.h>
14 #include <crypto/cryptlib.h>
15 #include "internal/cryptlib.h"
16 #include "internal/rcu.h"
17 #include "rcu_internal.h"
18
19 #if defined(__sun)
20 # include <atomic.h>
21 #endif
22
23 #if defined(__apple_build_version__) && __apple_build_version__ < 6000000
24 /*
25 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
26 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
27 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
28 * All of this makes impossible to use __atomic_is_lock_free here.
29 *
30 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
31 */
32 #define BROKEN_CLANG_ATOMICS
33 #endif
34
35 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
36
37 # if defined(OPENSSL_SYS_UNIX)
38 # include <sys/types.h>
39 # include <unistd.h>
40 #endif
41
42 # include <assert.h>
43
44 # ifdef PTHREAD_RWLOCK_INITIALIZER
45 # define USE_RWLOCK
46 # endif
47
48 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
49 # define ATOMIC_LOAD_N(p,o) __atomic_load_n(p, o)
50 # define ATOMIC_STORE_N(p, v, o) __atomic_store_n(p, v, o)
51 # define ATOMIC_STORE(p, v, o) __atomic_store(p, v, o)
52 # define ATOMIC_EXCHANGE_N(p, v, o) __atomic_exchange_n(p, v, o)
53 # define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
54 # define ATOMIC_FETCH_ADD(p, v, o) __atomic_fetch_add(p, v, o)
55 # define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
56 # define ATOMIC_AND_FETCH(p, m, o) __atomic_and_fetch(p, m, o)
57 # define ATOMIC_OR_FETCH(p, m, o) __atomic_or_fetch(p, m, o)
58 #else
59 static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
60
61 static inline void *fallback_atomic_load_n(void **p)
62 {
63 void *ret;
64
65 pthread_mutex_lock(&atomic_sim_lock);
66 ret = *(void **)p;
67 pthread_mutex_unlock(&atomic_sim_lock);
68 return ret;
69 }
70
71 # define ATOMIC_LOAD_N(p, o) fallback_atomic_load_n((void **)p)
72
73 static inline void *fallback_atomic_store_n(void **p, void *v)
74 {
75 void *ret;
76
77 pthread_mutex_lock(&atomic_sim_lock);
78 ret = *p;
79 *p = v;
80 pthread_mutex_unlock(&atomic_sim_lock);
81 return ret;
82 }
83
84 # define ATOMIC_STORE_N(p, v, o) fallback_atomic_store_n((void **)p, (void *)v)
85
86 static inline void fallback_atomic_store(void **p, void **v)
87 {
88 void *ret;
89
90 pthread_mutex_lock(&atomic_sim_lock);
91 ret = *p;
92 *p = *v;
93 v = ret;
94 pthread_mutex_unlock(&atomic_sim_lock);
95 }
96
97 # define ATOMIC_STORE(p, v, o) fallback_atomic_store((void **)p, (void **)v)
98
99 static inline void *fallback_atomic_exchange_n(void **p, void *v)
100 {
101 void *ret;
102
103 pthread_mutex_lock(&atomic_sim_lock);
104 ret = *p;
105 *p = v;
106 pthread_mutex_unlock(&atomic_sim_lock);
107 return ret;
108 }
109
110 #define ATOMIC_EXCHANGE_N(p, v, o) fallback_atomic_exchange_n((void **)p, (void *)v)
111
112 static inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
113 {
114 uint64_t ret;
115
116 pthread_mutex_lock(&atomic_sim_lock);
117 *p += v;
118 ret = *p;
119 pthread_mutex_unlock(&atomic_sim_lock);
120 return ret;
121 }
122
123 # define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
124
125 static inline uint64_t fallback_atomic_fetch_add(uint64_t *p, uint64_t v)
126 {
127 uint64_t ret;
128
129 pthread_mutex_lock(&atomic_sim_lock);
130 ret = *p;
131 *p += v;
132 pthread_mutex_unlock(&atomic_sim_lock);
133 return ret;
134 }
135
136 # define ATOMIC_FETCH_ADD(p, v, o) fallback_atomic_fetch_add(p, v)
137
138 static inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
139 {
140 uint64_t ret;
141
142 pthread_mutex_lock(&atomic_sim_lock);
143 *p -= v;
144 ret = *p;
145 pthread_mutex_unlock(&atomic_sim_lock);
146 return ret;
147 }
148
149 # define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
150
151 static inline uint64_t fallback_atomic_and_fetch(uint64_t *p, uint64_t m)
152 {
153 uint64_t ret;
154
155 pthread_mutex_lock(&atomic_sim_lock);
156 *p &= m;
157 ret = *p;
158 pthread_mutex_unlock(&atomic_sim_lock);
159 return ret;
160 }
161
162 # define ATOMIC_AND_FETCH(p, v, o) fallback_atomic_and_fetch(p, v)
163
164 static inline uint64_t fallback_atomic_or_fetch(uint64_t *p, uint64_t m)
165 {
166 uint64_t ret;
167
168 pthread_mutex_lock(&atomic_sim_lock);
169 *p |= m;
170 ret = *p;
171 pthread_mutex_unlock(&atomic_sim_lock);
172 return ret;
173 }
174
175 # define ATOMIC_OR_FETCH(p, v, o) fallback_atomic_or_fetch(p, v)
176 #endif
177
178 static CRYPTO_THREAD_LOCAL rcu_thr_key;
179
180 /*
181 * users is broken up into 2 parts
182 * bits 0-15 current readers
183 * bit 32-63 - ID
184 */
185 # define READER_SHIFT 0
186 # define ID_SHIFT 32
187 # define READER_SIZE 16
188 # define ID_SIZE 32
189
190 # define READER_MASK (((uint64_t)1 << READER_SIZE) - 1)
191 # define ID_MASK (((uint64_t)1 << ID_SIZE) - 1)
192 # define READER_COUNT(x) (((uint64_t)(x) >> READER_SHIFT) & READER_MASK)
193 # define ID_VAL(x) (((uint64_t)(x) >> ID_SHIFT) & ID_MASK)
194 # define VAL_READER ((uint64_t)1 << READER_SHIFT)
195 # define VAL_ID(x) ((uint64_t)x << ID_SHIFT)
196
197 /*
198 * This is the core of an rcu lock. It tracks the readers and writers for the
199 * current quiescence point for a given lock. Users is the 64 bit value that
200 * stores the READERS/ID as defined above
201 *
202 */
203 struct rcu_qp {
204 uint64_t users;
205 };
206
207 struct thread_qp {
208 struct rcu_qp *qp;
209 unsigned int depth;
210 CRYPTO_RCU_LOCK *lock;
211 };
212
213 #define MAX_QPS 10
214 /*
215 * This is the per thread tracking data
216 * that is assigned to each thread participating
217 * in an rcu qp
218 *
219 * qp points to the qp that it last acquired
220 *
221 */
222 struct rcu_thr_data {
223 struct thread_qp thread_qps[MAX_QPS];
224 };
225
226 /*
227 * This is the internal version of a CRYPTO_RCU_LOCK
228 * it is cast from CRYPTO_RCU_LOCK
229 */
230 struct rcu_lock_st {
231 /* Callbacks to call for next ossl_synchronize_rcu */
232 struct rcu_cb_item *cb_items;
233
234 /* rcu generation counter for in-order retirement */
235 uint32_t id_ctr;
236
237 /* Array of quiescent points for synchronization */
238 struct rcu_qp *qp_group;
239
240 /* Number of elements in qp_group array */
241 size_t group_count;
242
243 /* Index of the current qp in the qp_group array */
244 uint64_t reader_idx;
245
246 /* value of the next id_ctr value to be retired */
247 uint32_t next_to_retire;
248
249 /* index of the next free rcu_qp in the qp_group */
250 uint64_t current_alloc_idx;
251
252 /* number of qp's in qp_group array currently being retired */
253 uint32_t writers_alloced;
254
255 /* lock protecting write side operations */
256 pthread_mutex_t write_lock;
257
258 /* lock protecting updates to writers_alloced/current_alloc_idx */
259 pthread_mutex_t alloc_lock;
260
261 /* signal to wake threads waiting on alloc_lock */
262 pthread_cond_t alloc_signal;
263
264 /* lock to enforce in-order retirement */
265 pthread_mutex_t prior_lock;
266
267 /* signal to wake threads waiting on prior_lock */
268 pthread_cond_t prior_signal;
269 };
270
271 /*
272 * Called on thread exit to free the pthread key
273 * associated with this thread, if any
274 */
275 static void free_rcu_thr_data(void *ptr)
276 {
277 struct rcu_thr_data *data =
278 (struct rcu_thr_data *)CRYPTO_THREAD_get_local(&rcu_thr_key);
279
280 OPENSSL_free(data);
281 CRYPTO_THREAD_set_local(&rcu_thr_key, NULL);
282 }
283
284 static void ossl_rcu_init(void)
285 {
286 CRYPTO_THREAD_init_local(&rcu_thr_key, NULL);
287 }
288
289 /* Read side acquisition of the current qp */
290 static struct rcu_qp *get_hold_current_qp(struct rcu_lock_st *lock)
291 {
292 uint64_t qp_idx;
293
294 /* get the current qp index */
295 for (;;) {
296 /*
297 * Notes on use of __ATOMIC_ACQUIRE
298 * We need to ensure the following:
299 * 1) That subsequent operations aren't optimized by hoisting them above
300 * this operation. Specifically, we don't want the below re-load of
301 * qp_idx to get optimized away
302 * 2) We want to ensure that any updating of reader_idx on the write side
303 * of the lock is flushed from a local cpu cache so that we see any
304 * updates prior to the load. This is a non-issue on cache coherent
305 * systems like x86, but is relevant on other arches
306 * Note: This applies to the reload below as well
307 */
308 qp_idx = (uint64_t)ATOMIC_LOAD_N(&lock->reader_idx, __ATOMIC_ACQUIRE);
309
310 /*
311 * Notes of use of __ATOMIC_RELEASE
312 * This counter is only read by the write side of the lock, and so we
313 * specify __ATOMIC_RELEASE here to ensure that the write side of the
314 * lock see this during the spin loop read of users, as it waits for the
315 * reader count to approach zero
316 */
317 ATOMIC_ADD_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
318 __ATOMIC_RELEASE);
319
320 /* if the idx hasn't changed, we're good, else try again */
321 if (qp_idx == (uint64_t)ATOMIC_LOAD_N(&lock->reader_idx, __ATOMIC_ACQUIRE))
322 break;
323
324 /*
325 * Notes on use of __ATOMIC_RELEASE
326 * As with the add above, we want to ensure that this decrement is
327 * seen by the write side of the lock as soon as it happens to prevent
328 * undue spinning waiting for write side completion
329 */
330 ATOMIC_SUB_FETCH(&lock->qp_group[qp_idx].users, VAL_READER,
331 __ATOMIC_RELEASE);
332 }
333
334 return &lock->qp_group[qp_idx];
335 }
336
337 void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock)
338 {
339 struct rcu_thr_data *data;
340 int i, available_qp = -1;
341
342 /*
343 * we're going to access current_qp here so ask the
344 * processor to fetch it
345 */
346 data = CRYPTO_THREAD_get_local(&rcu_thr_key);
347
348 if (data == NULL) {
349 data = OPENSSL_zalloc(sizeof(*data));
350 OPENSSL_assert(data != NULL);
351 CRYPTO_THREAD_set_local(&rcu_thr_key, data);
352 ossl_init_thread_start(NULL, NULL, free_rcu_thr_data);
353 }
354
355 for (i = 0; i < MAX_QPS; i++) {
356 if (data->thread_qps[i].qp == NULL && available_qp == -1)
357 available_qp = i;
358 /* If we have a hold on this lock already, we're good */
359 if (data->thread_qps[i].lock == lock) {
360 data->thread_qps[i].depth++;
361 return;
362 }
363 }
364
365 /*
366 * if we get here, then we don't have a hold on this lock yet
367 */
368 assert(available_qp != -1);
369
370 data->thread_qps[available_qp].qp = get_hold_current_qp(lock);
371 data->thread_qps[available_qp].depth = 1;
372 data->thread_qps[available_qp].lock = lock;
373 }
374
375 void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock)
376 {
377 int i;
378 struct rcu_thr_data *data = CRYPTO_THREAD_get_local(&rcu_thr_key);
379 uint64_t ret;
380
381 assert(data != NULL);
382
383 for (i = 0; i < MAX_QPS; i++) {
384 if (data->thread_qps[i].lock == lock) {
385 /*
386 * As with read side acquisition, we use __ATOMIC_RELEASE here
387 * to ensure that the decrement is published immediately
388 * to any write side waiters
389 */
390 data->thread_qps[i].depth--;
391 if (data->thread_qps[i].depth == 0) {
392 ret = ATOMIC_SUB_FETCH(&data->thread_qps[i].qp->users, VAL_READER,
393 __ATOMIC_RELEASE);
394 OPENSSL_assert(ret != UINT64_MAX);
395 data->thread_qps[i].qp = NULL;
396 data->thread_qps[i].lock = NULL;
397 }
398 return;
399 }
400 }
401 /*
402 * if we get here, we're trying to unlock a lock that we never acquired
403 * thats fatal
404 */
405 assert(0);
406 }
407
408 /*
409 * Write side allocation routine to get the current qp
410 * and replace it with a new one
411 */
412 static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock)
413 {
414 uint64_t new_id;
415 uint64_t current_idx;
416
417 pthread_mutex_lock(&lock->alloc_lock);
418
419 /*
420 * we need at least one qp to be available with one
421 * left over, so that readers can start working on
422 * one that isn't yet being waited on
423 */
424 while (lock->group_count - lock->writers_alloced < 2)
425 /* we have to wait for one to be free */
426 pthread_cond_wait(&lock->alloc_signal, &lock->alloc_lock);
427
428 current_idx = lock->current_alloc_idx;
429
430 /* Allocate the qp */
431 lock->writers_alloced++;
432
433 /* increment the allocation index */
434 lock->current_alloc_idx =
435 (lock->current_alloc_idx + 1) % lock->group_count;
436
437 /* get and insert a new id */
438 new_id = lock->id_ctr;
439 lock->id_ctr++;
440
441 new_id = VAL_ID(new_id);
442 /*
443 * Even though we are under a write side lock here
444 * We need to use atomic instructions to ensure that the results
445 * of this update are published to the read side prior to updating the
446 * reader idx below
447 */
448 ATOMIC_AND_FETCH(&lock->qp_group[current_idx].users, ID_MASK,
449 __ATOMIC_RELEASE);
450 ATOMIC_OR_FETCH(&lock->qp_group[current_idx].users, new_id,
451 __ATOMIC_RELEASE);
452
453 /*
454 * update the reader index to be the prior qp
455 * Note the use of __ATOMIC_RELEASE here is based on the corresponding use
456 * of __ATOMIC_ACQUIRE in get_hold_current_qp, as we wan't any publication
457 * of this value to be seen on the read side immediately after it happens
458 */
459 ATOMIC_STORE_N(&lock->reader_idx, lock->current_alloc_idx,
460 __ATOMIC_RELEASE);
461
462 /* wake up any waiters */
463 pthread_cond_signal(&lock->alloc_signal);
464 pthread_mutex_unlock(&lock->alloc_lock);
465 return &lock->qp_group[current_idx];
466 }
467
468 static void retire_qp(CRYPTO_RCU_LOCK *lock, struct rcu_qp *qp)
469 {
470 pthread_mutex_lock(&lock->alloc_lock);
471 lock->writers_alloced--;
472 pthread_cond_signal(&lock->alloc_signal);
473 pthread_mutex_unlock(&lock->alloc_lock);
474 }
475
476 static struct rcu_qp *allocate_new_qp_group(CRYPTO_RCU_LOCK *lock,
477 int count)
478 {
479 struct rcu_qp *new =
480 OPENSSL_zalloc(sizeof(*new) * count);
481
482 lock->group_count = count;
483 return new;
484 }
485
486 void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock)
487 {
488 pthread_mutex_lock(&lock->write_lock);
489 }
490
491 void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock)
492 {
493 pthread_mutex_unlock(&lock->write_lock);
494 }
495
496 void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock)
497 {
498 struct rcu_qp *qp;
499 uint64_t count;
500 struct rcu_cb_item *cb_items, *tmpcb;
501
502 /*
503 * __ATOMIC_ACQ_REL is used here to ensure that we get any prior published
504 * writes before we read, and publish our write immediately
505 */
506 cb_items = ATOMIC_EXCHANGE_N(&lock->cb_items, NULL, __ATOMIC_ACQ_REL);
507
508 qp = update_qp(lock);
509
510 /*
511 * wait for the reader count to reach zero
512 * Note the use of __ATOMIC_ACQUIRE here to ensure that any
513 * prior __ATOMIC_RELEASE write operation in get_hold_current_qp
514 * is visible prior to our read
515 */
516 do {
517 count = (uint64_t)ATOMIC_LOAD_N(&qp->users, __ATOMIC_ACQUIRE);
518 } while (READER_COUNT(count) != 0);
519
520 /* retire in order */
521 pthread_mutex_lock(&lock->prior_lock);
522 while (lock->next_to_retire != ID_VAL(count))
523 pthread_cond_wait(&lock->prior_signal, &lock->prior_lock);
524 lock->next_to_retire++;
525 pthread_cond_broadcast(&lock->prior_signal);
526 pthread_mutex_unlock(&lock->prior_lock);
527
528 retire_qp(lock, qp);
529
530 /* handle any callbacks that we have */
531 while (cb_items != NULL) {
532 tmpcb = cb_items;
533 cb_items = cb_items->next;
534 tmpcb->fn(tmpcb->data);
535 OPENSSL_free(tmpcb);
536 }
537 }
538
539 int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data)
540 {
541 struct rcu_cb_item *new =
542 OPENSSL_zalloc(sizeof(*new));
543
544 if (new == NULL)
545 return 0;
546
547 new->data = data;
548 new->fn = cb;
549 /*
550 * Use __ATOMIC_ACQ_REL here to indicate that any prior writes to this
551 * list are visible to us prior to reading, and publish the new value
552 * immediately
553 */
554 new->next = ATOMIC_EXCHANGE_N(&lock->cb_items, new, __ATOMIC_ACQ_REL);
555
556 return 1;
557 }
558
559 void *ossl_rcu_uptr_deref(void **p)
560 {
561 return (void *)ATOMIC_LOAD_N(p, __ATOMIC_ACQUIRE);
562 }
563
564 void ossl_rcu_assign_uptr(void **p, void **v)
565 {
566 ATOMIC_STORE(p, v, __ATOMIC_RELEASE);
567 }
568
569 static CRYPTO_ONCE rcu_init_once = CRYPTO_ONCE_STATIC_INIT;
570
571 CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers)
572 {
573 struct rcu_lock_st *new;
574
575 if (!CRYPTO_THREAD_run_once(&rcu_init_once, ossl_rcu_init))
576 return NULL;
577
578 if (num_writers < 1)
579 num_writers = 1;
580
581 new = OPENSSL_zalloc(sizeof(*new));
582 if (new == NULL)
583 return NULL;
584
585 pthread_mutex_init(&new->write_lock, NULL);
586 pthread_mutex_init(&new->prior_lock, NULL);
587 pthread_mutex_init(&new->alloc_lock, NULL);
588 pthread_cond_init(&new->prior_signal, NULL);
589 pthread_cond_init(&new->alloc_signal, NULL);
590 new->qp_group = allocate_new_qp_group(new, num_writers + 1);
591 if (new->qp_group == NULL) {
592 OPENSSL_free(new);
593 new = NULL;
594 }
595 return new;
596 }
597
598 void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock)
599 {
600 struct rcu_lock_st *rlock = (struct rcu_lock_st *)lock;
601
602 if (lock == NULL)
603 return;
604
605 /* make sure we're synchronized */
606 ossl_synchronize_rcu(rlock);
607
608 OPENSSL_free(rlock->qp_group);
609 /* There should only be a single qp left now */
610 OPENSSL_free(rlock);
611 }
612
613 CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
614 {
615 # ifdef USE_RWLOCK
616 CRYPTO_RWLOCK *lock;
617
618 if ((lock = OPENSSL_zalloc(sizeof(pthread_rwlock_t))) == NULL)
619 /* Don't set error, to avoid recursion blowup. */
620 return NULL;
621
622 if (pthread_rwlock_init(lock, NULL) != 0) {
623 OPENSSL_free(lock);
624 return NULL;
625 }
626 # else
627 pthread_mutexattr_t attr;
628 CRYPTO_RWLOCK *lock;
629
630 if ((lock = OPENSSL_zalloc(sizeof(pthread_mutex_t))) == NULL)
631 /* Don't set error, to avoid recursion blowup. */
632 return NULL;
633
634 /*
635 * We don't use recursive mutexes, but try to catch errors if we do.
636 */
637 pthread_mutexattr_init(&attr);
638 # if !defined (__TANDEM) && !defined (_SPT_MODEL_)
639 # if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
640 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
641 # endif
642 # else
643 /* The SPT Thread Library does not define MUTEX attributes. */
644 # endif
645
646 if (pthread_mutex_init(lock, &attr) != 0) {
647 pthread_mutexattr_destroy(&attr);
648 OPENSSL_free(lock);
649 return NULL;
650 }
651
652 pthread_mutexattr_destroy(&attr);
653 # endif
654
655 return lock;
656 }
657
658 __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
659 {
660 # ifdef USE_RWLOCK
661 if (pthread_rwlock_rdlock(lock) != 0)
662 return 0;
663 # else
664 if (pthread_mutex_lock(lock) != 0) {
665 assert(errno != EDEADLK && errno != EBUSY);
666 return 0;
667 }
668 # endif
669
670 return 1;
671 }
672
673 __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
674 {
675 # ifdef USE_RWLOCK
676 if (pthread_rwlock_wrlock(lock) != 0)
677 return 0;
678 # else
679 if (pthread_mutex_lock(lock) != 0) {
680 assert(errno != EDEADLK && errno != EBUSY);
681 return 0;
682 }
683 # endif
684
685 return 1;
686 }
687
688 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
689 {
690 # ifdef USE_RWLOCK
691 if (pthread_rwlock_unlock(lock) != 0)
692 return 0;
693 # else
694 if (pthread_mutex_unlock(lock) != 0) {
695 assert(errno != EPERM);
696 return 0;
697 }
698 # endif
699
700 return 1;
701 }
702
703 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
704 {
705 if (lock == NULL)
706 return;
707
708 # ifdef USE_RWLOCK
709 pthread_rwlock_destroy(lock);
710 # else
711 pthread_mutex_destroy(lock);
712 # endif
713 OPENSSL_free(lock);
714
715 return;
716 }
717
718 int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
719 {
720 if (pthread_once(once, init) != 0)
721 return 0;
722
723 return 1;
724 }
725
726 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
727 {
728 if (pthread_key_create(key, cleanup) != 0)
729 return 0;
730
731 return 1;
732 }
733
734 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
735 {
736 return pthread_getspecific(*key);
737 }
738
739 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
740 {
741 if (pthread_setspecific(*key, val) != 0)
742 return 0;
743
744 return 1;
745 }
746
747 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
748 {
749 if (pthread_key_delete(*key) != 0)
750 return 0;
751
752 return 1;
753 }
754
755 CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
756 {
757 return pthread_self();
758 }
759
760 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
761 {
762 return pthread_equal(a, b);
763 }
764
765 int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
766 {
767 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
768 if (__atomic_is_lock_free(sizeof(*val), val)) {
769 *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
770 return 1;
771 }
772 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
773 /* This will work for all future Solaris versions. */
774 if (ret != NULL) {
775 *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
776 return 1;
777 }
778 # endif
779 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
780 return 0;
781
782 *val += amount;
783 *ret = *val;
784
785 if (!CRYPTO_THREAD_unlock(lock))
786 return 0;
787
788 return 1;
789 }
790
791 int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
792 CRYPTO_RWLOCK *lock)
793 {
794 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
795 if (__atomic_is_lock_free(sizeof(*val), val)) {
796 *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
797 return 1;
798 }
799 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
800 /* This will work for all future Solaris versions. */
801 if (ret != NULL) {
802 *ret = atomic_or_64_nv(val, op);
803 return 1;
804 }
805 # endif
806 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
807 return 0;
808 *val |= op;
809 *ret = *val;
810
811 if (!CRYPTO_THREAD_unlock(lock))
812 return 0;
813
814 return 1;
815 }
816
817 int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
818 {
819 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
820 if (__atomic_is_lock_free(sizeof(*val), val)) {
821 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
822 return 1;
823 }
824 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
825 /* This will work for all future Solaris versions. */
826 if (ret != NULL) {
827 *ret = atomic_or_64_nv(val, 0);
828 return 1;
829 }
830 # endif
831 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
832 return 0;
833 *ret = *val;
834 if (!CRYPTO_THREAD_unlock(lock))
835 return 0;
836
837 return 1;
838 }
839
840 int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
841 {
842 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
843 if (__atomic_is_lock_free(sizeof(*val), val)) {
844 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
845 return 1;
846 }
847 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
848 /* This will work for all future Solaris versions. */
849 if (ret != NULL) {
850 *ret = (int *)atomic_or_uint_nv((unsigned int *)val, 0);
851 return 1;
852 }
853 # endif
854 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
855 return 0;
856 *ret = *val;
857 if (!CRYPTO_THREAD_unlock(lock))
858 return 0;
859
860 return 1;
861 }
862
863 # ifndef FIPS_MODULE
864 int openssl_init_fork_handlers(void)
865 {
866 return 1;
867 }
868 # endif /* FIPS_MODULE */
869
870 int openssl_get_fork_id(void)
871 {
872 return getpid();
873 }
874 #endif