]> git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/threads_pthread.c
Copyright year updates
[thirdparty/openssl.git] / crypto / threads_pthread.c
1 /*
2 * Copyright 2016-2023 The OpenSSL Project Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
8 */
9
10 /* We need to use the OPENSSL_fork_*() deprecated APIs */
11 #define OPENSSL_SUPPRESS_DEPRECATED
12
13 #include <openssl/crypto.h>
14 #include "internal/cryptlib.h"
15
16 #if defined(__sun)
17 # include <atomic.h>
18 #endif
19
20 #if defined(__apple_build_version__) && __apple_build_version__ < 6000000
21 /*
22 * OS/X 10.7 and 10.8 had a weird version of clang which has __ATOMIC_ACQUIRE and
23 * __ATOMIC_ACQ_REL but which expects only one parameter for __atomic_is_lock_free()
24 * rather than two which has signature __atomic_is_lock_free(sizeof(_Atomic(T))).
25 * All of this makes impossible to use __atomic_is_lock_free here.
26 *
27 * See: https://github.com/llvm/llvm-project/commit/a4c2602b714e6c6edb98164550a5ae829b2de760
28 */
29 #define BROKEN_CLANG_ATOMICS
30 #endif
31
32 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && !defined(OPENSSL_SYS_WINDOWS)
33
34 # if defined(OPENSSL_SYS_UNIX)
35 # include <sys/types.h>
36 # include <unistd.h>
37 #endif
38
39 # include <assert.h>
40
41 # ifdef PTHREAD_RWLOCK_INITIALIZER
42 # define USE_RWLOCK
43 # endif
44
45 CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void)
46 {
47 # ifdef USE_RWLOCK
48 CRYPTO_RWLOCK *lock;
49
50 if ((lock = CRYPTO_zalloc(sizeof(pthread_rwlock_t), NULL, 0)) == NULL)
51 /* Don't set error, to avoid recursion blowup. */
52 return NULL;
53
54 if (pthread_rwlock_init(lock, NULL) != 0) {
55 OPENSSL_free(lock);
56 return NULL;
57 }
58 # else
59 pthread_mutexattr_t attr;
60 CRYPTO_RWLOCK *lock;
61
62 if ((lock = CRYPTO_zalloc(sizeof(pthread_mutex_t), NULL, 0)) == NULL)
63 /* Don't set error, to avoid recursion blowup. */
64 return NULL;
65
66 /*
67 * We don't use recursive mutexes, but try to catch errors if we do.
68 */
69 pthread_mutexattr_init(&attr);
70 # if !defined (__TANDEM) && !defined (_SPT_MODEL_)
71 # if !defined(NDEBUG) && !defined(OPENSSL_NO_MUTEX_ERRORCHECK)
72 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
73 # endif
74 # else
75 /* The SPT Thread Library does not define MUTEX attributes. */
76 # endif
77
78 if (pthread_mutex_init(lock, &attr) != 0) {
79 pthread_mutexattr_destroy(&attr);
80 OPENSSL_free(lock);
81 return NULL;
82 }
83
84 pthread_mutexattr_destroy(&attr);
85 # endif
86
87 return lock;
88 }
89
90 __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock)
91 {
92 # ifdef USE_RWLOCK
93 if (pthread_rwlock_rdlock(lock) != 0)
94 return 0;
95 # else
96 if (pthread_mutex_lock(lock) != 0) {
97 assert(errno != EDEADLK && errno != EBUSY);
98 return 0;
99 }
100 # endif
101
102 return 1;
103 }
104
105 __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock)
106 {
107 # ifdef USE_RWLOCK
108 if (pthread_rwlock_wrlock(lock) != 0)
109 return 0;
110 # else
111 if (pthread_mutex_lock(lock) != 0) {
112 assert(errno != EDEADLK && errno != EBUSY);
113 return 0;
114 }
115 # endif
116
117 return 1;
118 }
119
120 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock)
121 {
122 # ifdef USE_RWLOCK
123 if (pthread_rwlock_unlock(lock) != 0)
124 return 0;
125 # else
126 if (pthread_mutex_unlock(lock) != 0) {
127 assert(errno != EPERM);
128 return 0;
129 }
130 # endif
131
132 return 1;
133 }
134
135 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock)
136 {
137 if (lock == NULL)
138 return;
139
140 # ifdef USE_RWLOCK
141 pthread_rwlock_destroy(lock);
142 # else
143 pthread_mutex_destroy(lock);
144 # endif
145 OPENSSL_free(lock);
146
147 return;
148 }
149
150 int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void))
151 {
152 if (pthread_once(once, init) != 0)
153 return 0;
154
155 return 1;
156 }
157
158 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *))
159 {
160 if (pthread_key_create(key, cleanup) != 0)
161 return 0;
162
163 return 1;
164 }
165
166 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key)
167 {
168 return pthread_getspecific(*key);
169 }
170
171 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val)
172 {
173 if (pthread_setspecific(*key, val) != 0)
174 return 0;
175
176 return 1;
177 }
178
179 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key)
180 {
181 if (pthread_key_delete(*key) != 0)
182 return 0;
183
184 return 1;
185 }
186
187 CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void)
188 {
189 return pthread_self();
190 }
191
192 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b)
193 {
194 return pthread_equal(a, b);
195 }
196
197 int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock)
198 {
199 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
200 if (__atomic_is_lock_free(sizeof(*val), val)) {
201 *ret = __atomic_add_fetch(val, amount, __ATOMIC_ACQ_REL);
202 return 1;
203 }
204 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
205 /* This will work for all future Solaris versions. */
206 if (ret != NULL) {
207 *ret = atomic_add_int_nv((volatile unsigned int *)val, amount);
208 return 1;
209 }
210 # endif
211 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
212 return 0;
213
214 *val += amount;
215 *ret = *val;
216
217 if (!CRYPTO_THREAD_unlock(lock))
218 return 0;
219
220 return 1;
221 }
222
223 int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret,
224 CRYPTO_RWLOCK *lock)
225 {
226 # if defined(__GNUC__) && defined(__ATOMIC_ACQ_REL) && !defined(BROKEN_CLANG_ATOMICS)
227 if (__atomic_is_lock_free(sizeof(*val), val)) {
228 *ret = __atomic_or_fetch(val, op, __ATOMIC_ACQ_REL);
229 return 1;
230 }
231 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
232 /* This will work for all future Solaris versions. */
233 if (ret != NULL) {
234 *ret = atomic_or_64_nv(val, op);
235 return 1;
236 }
237 # endif
238 if (lock == NULL || !CRYPTO_THREAD_write_lock(lock))
239 return 0;
240 *val |= op;
241 *ret = *val;
242
243 if (!CRYPTO_THREAD_unlock(lock))
244 return 0;
245
246 return 1;
247 }
248
249 int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock)
250 {
251 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
252 if (__atomic_is_lock_free(sizeof(*val), val)) {
253 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
254 return 1;
255 }
256 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
257 /* This will work for all future Solaris versions. */
258 if (ret != NULL) {
259 *ret = atomic_or_64_nv(val, 0);
260 return 1;
261 }
262 # endif
263 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
264 return 0;
265 *ret = *val;
266 if (!CRYPTO_THREAD_unlock(lock))
267 return 0;
268
269 return 1;
270 }
271
272 int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock)
273 {
274 # if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS)
275 if (__atomic_is_lock_free(sizeof(*val), val)) {
276 __atomic_load(val, ret, __ATOMIC_ACQUIRE);
277 return 1;
278 }
279 # elif defined(__sun) && (defined(__SunOS_5_10) || defined(__SunOS_5_11))
280 /* This will work for all future Solaris versions. */
281 if (ret != NULL) {
282 *ret = (int *)atomic_or_uint_nv((unsigned int *)val, 0);
283 return 1;
284 }
285 # endif
286 if (lock == NULL || !CRYPTO_THREAD_read_lock(lock))
287 return 0;
288 *ret = *val;
289 if (!CRYPTO_THREAD_unlock(lock))
290 return 0;
291
292 return 1;
293 }
294
295 # ifndef FIPS_MODULE
296 int openssl_init_fork_handlers(void)
297 {
298 return 1;
299 }
300 # endif /* FIPS_MODULE */
301
302 int openssl_get_fork_id(void)
303 {
304 return getpid();
305 }
306 #endif