]>
Commit | Line | Data |
---|---|---|
aa6bb135 | 1 | /* |
b6461792 | 2 | * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved. |
71a04cfc | 3 | * |
0e9725bc | 4 | * Licensed under the Apache License 2.0 (the "License"). You may not use |
aa6bb135 RS |
5 | * this file except in compliance with the License. You can obtain a copy |
6 | * in the file LICENSE in the source distribution or at | |
7 | * https://www.openssl.org/source/license.html | |
71a04cfc AG |
8 | */ |
9 | ||
f1f5ee17 AP |
10 | #if defined(_WIN32) |
11 | # include <windows.h> | |
f70863d9 | 12 | # if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600 |
f70863d9 VD |
13 | # define USE_RWLOCK |
14 | # endif | |
f1f5ee17 | 15 | #endif |
d0e1a0ae | 16 | #include <assert.h> |
f1f5ee17 | 17 | |
2d46a44f DN |
18 | /* |
19 | * VC++ 2008 or earlier x86 compilers do not have an inline implementation | |
20 | * of InterlockedOr64 for 32bit and will fail to run on Windows XP 32bit. | |
21 | * https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions#requirements | |
22 | * To work around this problem, we implement a manual locking mechanism for | |
23 | * only VC++ 2008 or earlier x86 compilers. | |
24 | */ | |
25 | ||
8bdc3708 | 26 | #if (defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER <= 1600) |
2d46a44f DN |
27 | # define NO_INTERLOCKEDOR64 |
28 | #endif | |
29 | ||
71a04cfc | 30 | #include <openssl/crypto.h> |
d0e1a0ae NH |
31 | #include <crypto/cryptlib.h> |
32 | #include "internal/common.h" | |
33 | #include "internal/thread_arch.h" | |
34 | #include "internal/rcu.h" | |
35 | #include "rcu_internal.h" | |
71a04cfc AG |
36 | |
37 | #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && defined(OPENSSL_SYS_WINDOWS) | |
38 | ||
f70863d9 VD |
39 | # ifdef USE_RWLOCK |
40 | typedef struct { | |
41 | SRWLOCK lock; | |
42 | int exclusive; | |
43 | } CRYPTO_win_rwlock; | |
44 | # endif | |
45 | ||
d0e1a0ae NH |
46 | # define READER_SHIFT 0 |
47 | # define ID_SHIFT 32 | |
48 | # define READER_SIZE 32 | |
49 | # define ID_SIZE 32 | |
50 | ||
51 | # define READER_MASK (((LONG64)1 << READER_SIZE)-1) | |
52 | # define ID_MASK (((LONG64)1 << ID_SIZE)-1) | |
53 | # define READER_COUNT(x) (((LONG64)(x) >> READER_SHIFT) & READER_MASK) | |
54 | # define ID_VAL(x) (((LONG64)(x) >> ID_SHIFT) & ID_MASK) | |
55 | # define VAL_READER ((LONG64)1 << READER_SHIFT) | |
56 | # define VAL_ID(x) ((LONG64)x << ID_SHIFT) | |
57 | ||
58 | /* | |
59 | * This defines a quescent point (qp) | |
60 | * This is the barrier beyond which a writer | |
61 | * must wait before freeing data that was | |
62 | * atomically updated | |
63 | */ | |
64 | struct rcu_qp { | |
65 | volatile LONG64 users; | |
66 | }; | |
67 | ||
68 | struct thread_qp { | |
69 | struct rcu_qp *qp; | |
70 | unsigned int depth; | |
71 | CRYPTO_RCU_LOCK *lock; | |
72 | }; | |
73 | ||
74 | #define MAX_QPS 10 | |
75 | /* | |
76 | * This is the per thread tracking data | |
77 | * that is assigned to each thread participating | |
78 | * in an rcu qp | |
79 | * | |
80 | * qp points to the qp that it last acquired | |
81 | * | |
82 | */ | |
83 | struct rcu_thr_data { | |
84 | struct thread_qp thread_qps[MAX_QPS]; | |
85 | }; | |
86 | ||
87 | /* | |
88 | * This is the internal version of a CRYPTO_RCU_LOCK | |
89 | * it is cast from CRYPTO_RCU_LOCK | |
90 | */ | |
91 | struct rcu_lock_st { | |
92 | struct rcu_cb_item *cb_items; | |
24d16d3a | 93 | OSSL_LIB_CTX *ctx; |
d0e1a0ae NH |
94 | uint32_t id_ctr; |
95 | struct rcu_qp *qp_group; | |
96 | size_t group_count; | |
97 | uint32_t next_to_retire; | |
98 | volatile long int reader_idx; | |
99 | uint32_t current_alloc_idx; | |
100 | uint32_t writers_alloced; | |
101 | CRYPTO_MUTEX *write_lock; | |
102 | CRYPTO_MUTEX *alloc_lock; | |
103 | CRYPTO_CONDVAR *alloc_signal; | |
104 | CRYPTO_MUTEX *prior_lock; | |
105 | CRYPTO_CONDVAR *prior_signal; | |
106 | }; | |
107 | ||
d0e1a0ae NH |
108 | static struct rcu_qp *allocate_new_qp_group(struct rcu_lock_st *lock, |
109 | int count) | |
110 | { | |
111 | struct rcu_qp *new = | |
112 | OPENSSL_zalloc(sizeof(*new) * count); | |
113 | ||
114 | lock->group_count = count; | |
115 | return new; | |
116 | } | |
117 | ||
24d16d3a | 118 | CRYPTO_RCU_LOCK *ossl_rcu_lock_new(int num_writers, OSSL_LIB_CTX *ctx) |
d0e1a0ae NH |
119 | { |
120 | struct rcu_lock_st *new; | |
121 | ||
d0e1a0ae NH |
122 | if (num_writers < 1) |
123 | num_writers = 1; | |
124 | ||
24d16d3a NH |
125 | ctx = ossl_lib_ctx_get_concrete(ctx); |
126 | if (ctx == NULL) | |
127 | return 0; | |
128 | ||
d0e1a0ae NH |
129 | new = OPENSSL_zalloc(sizeof(*new)); |
130 | ||
131 | if (new == NULL) | |
132 | return NULL; | |
133 | ||
24d16d3a | 134 | new->ctx = ctx; |
d0e1a0ae NH |
135 | new->write_lock = ossl_crypto_mutex_new(); |
136 | new->alloc_signal = ossl_crypto_condvar_new(); | |
137 | new->prior_signal = ossl_crypto_condvar_new(); | |
138 | new->alloc_lock = ossl_crypto_mutex_new(); | |
139 | new->prior_lock = ossl_crypto_mutex_new(); | |
d0e1a0ae NH |
140 | new->qp_group = allocate_new_qp_group(new, num_writers + 1); |
141 | if (new->qp_group == NULL | |
142 | || new->alloc_signal == NULL | |
143 | || new->prior_signal == NULL | |
144 | || new->write_lock == NULL | |
145 | || new->alloc_lock == NULL | |
146 | || new->prior_lock == NULL) { | |
147 | OPENSSL_free(new->qp_group); | |
148 | ossl_crypto_condvar_free(&new->alloc_signal); | |
149 | ossl_crypto_condvar_free(&new->prior_signal); | |
150 | ossl_crypto_mutex_free(&new->alloc_lock); | |
151 | ossl_crypto_mutex_free(&new->prior_lock); | |
152 | ossl_crypto_mutex_free(&new->write_lock); | |
153 | OPENSSL_free(new); | |
154 | new = NULL; | |
155 | } | |
156 | return new; | |
157 | ||
158 | } | |
159 | ||
160 | void ossl_rcu_lock_free(CRYPTO_RCU_LOCK *lock) | |
161 | { | |
162 | OPENSSL_free(lock->qp_group); | |
163 | ossl_crypto_condvar_free(&lock->alloc_signal); | |
164 | ossl_crypto_condvar_free(&lock->prior_signal); | |
165 | ossl_crypto_mutex_free(&lock->alloc_lock); | |
166 | ossl_crypto_mutex_free(&lock->prior_lock); | |
167 | ossl_crypto_mutex_free(&lock->write_lock); | |
168 | OPENSSL_free(lock); | |
169 | } | |
170 | ||
171 | static inline struct rcu_qp *get_hold_current_qp(CRYPTO_RCU_LOCK *lock) | |
172 | { | |
173 | uint32_t qp_idx; | |
174 | ||
175 | /* get the current qp index */ | |
176 | for (;;) { | |
177 | qp_idx = InterlockedOr(&lock->reader_idx, 0); | |
178 | InterlockedAdd64(&lock->qp_group[qp_idx].users, VAL_READER); | |
179 | if (qp_idx == InterlockedOr(&lock->reader_idx, 0)) | |
180 | break; | |
181 | InterlockedAdd64(&lock->qp_group[qp_idx].users, -VAL_READER); | |
182 | } | |
183 | ||
184 | return &lock->qp_group[qp_idx]; | |
185 | } | |
186 | ||
24d16d3a NH |
187 | static void ossl_rcu_free_local_data(void *arg) |
188 | { | |
189 | OSSL_LIB_CTX *ctx = arg; | |
190 | CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(ctx); | |
191 | struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey); | |
192 | OPENSSL_free(data); | |
193 | } | |
194 | ||
d0e1a0ae NH |
195 | void ossl_rcu_read_lock(CRYPTO_RCU_LOCK *lock) |
196 | { | |
197 | struct rcu_thr_data *data; | |
198 | int i; | |
199 | int available_qp = -1; | |
24d16d3a | 200 | CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx); |
d0e1a0ae NH |
201 | |
202 | /* | |
203 | * we're going to access current_qp here so ask the | |
204 | * processor to fetch it | |
205 | */ | |
24d16d3a | 206 | data = CRYPTO_THREAD_get_local(lkey); |
d0e1a0ae NH |
207 | |
208 | if (data == NULL) { | |
209 | data = OPENSSL_zalloc(sizeof(*data)); | |
210 | OPENSSL_assert(data != NULL); | |
24d16d3a NH |
211 | CRYPTO_THREAD_set_local(lkey, data); |
212 | ossl_init_thread_start(NULL, lock->ctx, ossl_rcu_free_local_data); | |
d0e1a0ae NH |
213 | } |
214 | ||
215 | for (i = 0; i < MAX_QPS; i++) { | |
216 | if (data->thread_qps[i].qp == NULL && available_qp == -1) | |
217 | available_qp = i; | |
218 | /* If we have a hold on this lock already, we're good */ | |
219 | if (data->thread_qps[i].lock == lock) | |
220 | return; | |
221 | } | |
222 | ||
223 | /* | |
224 | * if we get here, then we don't have a hold on this lock yet | |
225 | */ | |
226 | assert(available_qp != -1); | |
227 | ||
228 | data->thread_qps[available_qp].qp = get_hold_current_qp(lock); | |
229 | data->thread_qps[available_qp].depth = 1; | |
230 | data->thread_qps[available_qp].lock = lock; | |
231 | } | |
232 | ||
233 | void ossl_rcu_write_lock(CRYPTO_RCU_LOCK *lock) | |
234 | { | |
235 | ossl_crypto_mutex_lock(lock->write_lock); | |
236 | } | |
237 | ||
238 | void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK *lock) | |
239 | { | |
240 | ossl_crypto_mutex_unlock(lock->write_lock); | |
241 | } | |
242 | ||
243 | void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK *lock) | |
244 | { | |
24d16d3a NH |
245 | CRYPTO_THREAD_LOCAL *lkey = ossl_lib_ctx_get_rcukey(lock->ctx); |
246 | struct rcu_thr_data *data = CRYPTO_THREAD_get_local(lkey); | |
d0e1a0ae NH |
247 | int i; |
248 | LONG64 ret; | |
249 | ||
250 | assert(data != NULL); | |
251 | ||
252 | for (i = 0; i < MAX_QPS; i++) { | |
253 | if (data->thread_qps[i].lock == lock) { | |
254 | data->thread_qps[i].depth--; | |
255 | if (data->thread_qps[i].depth == 0) { | |
256 | ret = InterlockedAdd64(&data->thread_qps[i].qp->users, -VAL_READER); | |
257 | OPENSSL_assert(ret >= 0); | |
258 | data->thread_qps[i].qp = NULL; | |
259 | data->thread_qps[i].lock = NULL; | |
260 | } | |
261 | return; | |
262 | } | |
263 | } | |
264 | } | |
265 | ||
266 | static struct rcu_qp *update_qp(CRYPTO_RCU_LOCK *lock) | |
267 | { | |
268 | uint64_t new_id; | |
269 | uint32_t current_idx; | |
270 | uint32_t tmp; | |
271 | ||
272 | ossl_crypto_mutex_lock(lock->alloc_lock); | |
273 | /* | |
274 | * we need at least one qp to be available with one | |
275 | * left over, so that readers can start working on | |
276 | * one that isn't yet being waited on | |
277 | */ | |
278 | while (lock->group_count - lock->writers_alloced < 2) | |
279 | ossl_crypto_condvar_wait(lock->alloc_signal, lock->alloc_lock); | |
280 | ||
281 | current_idx = lock->current_alloc_idx; | |
282 | /* Allocate the qp */ | |
283 | lock->writers_alloced++; | |
284 | ||
285 | /* increment the allocation index */ | |
286 | lock->current_alloc_idx = | |
287 | (lock->current_alloc_idx + 1) % lock->group_count; | |
288 | ||
289 | /* get and insert a new id */ | |
290 | new_id = lock->id_ctr; | |
291 | lock->id_ctr++; | |
292 | ||
293 | new_id = VAL_ID(new_id); | |
294 | InterlockedAnd64(&lock->qp_group[current_idx].users, ID_MASK); | |
295 | InterlockedAdd64(&lock->qp_group[current_idx].users, new_id); | |
296 | ||
297 | /* update the reader index to be the prior qp */ | |
298 | tmp = lock->current_alloc_idx; | |
299 | InterlockedExchange(&lock->reader_idx, tmp); | |
300 | ||
301 | /* wake up any waiters */ | |
302 | ossl_crypto_condvar_broadcast(lock->alloc_signal); | |
303 | ossl_crypto_mutex_unlock(lock->alloc_lock); | |
304 | return &lock->qp_group[current_idx]; | |
305 | } | |
306 | ||
307 | static void retire_qp(CRYPTO_RCU_LOCK *lock, | |
308 | struct rcu_qp *qp) | |
309 | { | |
310 | ossl_crypto_mutex_lock(lock->alloc_lock); | |
311 | lock->writers_alloced--; | |
312 | ossl_crypto_condvar_broadcast(lock->alloc_signal); | |
313 | ossl_crypto_mutex_unlock(lock->alloc_lock); | |
314 | } | |
315 | ||
316 | ||
317 | void ossl_synchronize_rcu(CRYPTO_RCU_LOCK *lock) | |
318 | { | |
319 | struct rcu_qp *qp; | |
320 | uint64_t count; | |
321 | struct rcu_cb_item *cb_items, *tmpcb; | |
322 | ||
323 | /* before we do anything else, lets grab the cb list */ | |
324 | cb_items = InterlockedExchangePointer((void * volatile *)&lock->cb_items, NULL); | |
325 | ||
326 | qp = update_qp(lock); | |
327 | ||
328 | /* wait for the reader count to reach zero */ | |
329 | do { | |
330 | count = InterlockedOr64(&qp->users, 0); | |
331 | } while (READER_COUNT(count) != 0); | |
332 | ||
333 | /* retire in order */ | |
334 | ossl_crypto_mutex_lock(lock->prior_lock); | |
335 | while (lock->next_to_retire != ID_VAL(count)) | |
336 | ossl_crypto_condvar_wait(lock->prior_signal, lock->prior_lock); | |
337 | ||
338 | lock->next_to_retire++; | |
339 | ossl_crypto_condvar_broadcast(lock->prior_signal); | |
340 | ossl_crypto_mutex_unlock(lock->prior_lock); | |
341 | ||
342 | retire_qp(lock, qp); | |
343 | ||
344 | /* handle any callbacks that we have */ | |
345 | while (cb_items != NULL) { | |
346 | tmpcb = cb_items; | |
347 | cb_items = cb_items->next; | |
348 | tmpcb->fn(tmpcb->data); | |
349 | OPENSSL_free(tmpcb); | |
350 | } | |
351 | ||
352 | /* and we're done */ | |
353 | return; | |
354 | ||
355 | } | |
356 | ||
357 | int ossl_rcu_call(CRYPTO_RCU_LOCK *lock, rcu_cb_fn cb, void *data) | |
358 | { | |
359 | struct rcu_cb_item *new; | |
360 | struct rcu_cb_item *prev; | |
361 | ||
362 | new = OPENSSL_zalloc(sizeof(struct rcu_cb_item)); | |
363 | if (new == NULL) | |
364 | return 0; | |
365 | prev = new; | |
366 | new->data = data; | |
367 | new->fn = cb; | |
368 | ||
369 | InterlockedExchangePointer((void * volatile *)&lock->cb_items, prev); | |
370 | new->next = prev; | |
371 | return 1; | |
372 | } | |
373 | ||
374 | void *ossl_rcu_uptr_deref(void **p) | |
375 | { | |
376 | return (void *)*p; | |
377 | } | |
378 | ||
379 | void ossl_rcu_assign_uptr(void **p, void **v) | |
380 | { | |
381 | InterlockedExchangePointer((void * volatile *)p, (void *)*v); | |
382 | } | |
383 | ||
384 | ||
71a04cfc AG |
385 | CRYPTO_RWLOCK *CRYPTO_THREAD_lock_new(void) |
386 | { | |
7de2b9c4 | 387 | CRYPTO_RWLOCK *lock; |
f70863d9 VD |
388 | # ifdef USE_RWLOCK |
389 | CRYPTO_win_rwlock *rwlock; | |
390 | ||
d0e1a0ae | 391 | if ((lock = OPENSSL_zalloc(sizeof(CRYPTO_win_rwlock))) == NULL) |
894f2166 | 392 | /* Don't set error, to avoid recursion blowup. */ |
f70863d9 VD |
393 | return NULL; |
394 | rwlock = lock; | |
395 | InitializeSRWLock(&rwlock->lock); | |
396 | # else | |
7de2b9c4 | 397 | |
d0e1a0ae | 398 | if ((lock = OPENSSL_zalloc(sizeof(CRITICAL_SECTION))) == NULL) |
7de2b9c4 | 399 | /* Don't set error, to avoid recursion blowup. */ |
71a04cfc AG |
400 | return NULL; |
401 | ||
f70863d9 | 402 | # if !defined(_WIN32_WCE) |
71a04cfc | 403 | /* 0x400 is the spin count value suggested in the documentation */ |
0b2fc928 F |
404 | if (!InitializeCriticalSectionAndSpinCount(lock, 0x400)) { |
405 | OPENSSL_free(lock); | |
71a04cfc | 406 | return NULL; |
0b2fc928 | 407 | } |
f70863d9 | 408 | # else |
09305a7d | 409 | InitializeCriticalSection(lock); |
f70863d9 | 410 | # endif |
7f0a8dc7 | 411 | # endif |
71a04cfc AG |
412 | |
413 | return lock; | |
414 | } | |
415 | ||
cd3f8c1b | 416 | __owur int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK *lock) |
71a04cfc | 417 | { |
f70863d9 VD |
418 | # ifdef USE_RWLOCK |
419 | CRYPTO_win_rwlock *rwlock = lock; | |
420 | ||
421 | AcquireSRWLockShared(&rwlock->lock); | |
422 | # else | |
71a04cfc | 423 | EnterCriticalSection(lock); |
f70863d9 | 424 | # endif |
71a04cfc AG |
425 | return 1; |
426 | } | |
427 | ||
cd3f8c1b | 428 | __owur int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK *lock) |
71a04cfc | 429 | { |
f70863d9 VD |
430 | # ifdef USE_RWLOCK |
431 | CRYPTO_win_rwlock *rwlock = lock; | |
432 | ||
433 | AcquireSRWLockExclusive(&rwlock->lock); | |
434 | rwlock->exclusive = 1; | |
435 | # else | |
71a04cfc | 436 | EnterCriticalSection(lock); |
f70863d9 | 437 | # endif |
71a04cfc AG |
438 | return 1; |
439 | } | |
440 | ||
441 | int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK *lock) | |
442 | { | |
f70863d9 VD |
443 | # ifdef USE_RWLOCK |
444 | CRYPTO_win_rwlock *rwlock = lock; | |
445 | ||
446 | if (rwlock->exclusive) { | |
447 | rwlock->exclusive = 0; | |
448 | ReleaseSRWLockExclusive(&rwlock->lock); | |
449 | } else { | |
450 | ReleaseSRWLockShared(&rwlock->lock); | |
451 | } | |
452 | # else | |
71a04cfc | 453 | LeaveCriticalSection(lock); |
f70863d9 | 454 | # endif |
71a04cfc AG |
455 | return 1; |
456 | } | |
457 | ||
458 | void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK *lock) | |
459 | { | |
460 | if (lock == NULL) | |
461 | return; | |
462 | ||
f70863d9 | 463 | # ifndef USE_RWLOCK |
71a04cfc | 464 | DeleteCriticalSection(lock); |
f70863d9 | 465 | # endif |
71a04cfc AG |
466 | OPENSSL_free(lock); |
467 | ||
468 | return; | |
469 | } | |
470 | ||
d5e742de MC |
471 | # define ONCE_UNINITED 0 |
472 | # define ONCE_ININIT 1 | |
473 | # define ONCE_DONE 2 | |
71a04cfc | 474 | |
fcb318c6 MC |
475 | /* |
476 | * We don't use InitOnceExecuteOnce because that isn't available in WinXP which | |
477 | * we still have to support. | |
478 | */ | |
71a04cfc AG |
479 | int CRYPTO_THREAD_run_once(CRYPTO_ONCE *once, void (*init)(void)) |
480 | { | |
481 | LONG volatile *lock = (LONG *)once; | |
482 | LONG result; | |
483 | ||
484 | if (*lock == ONCE_DONE) | |
485 | return 1; | |
486 | ||
487 | do { | |
488 | result = InterlockedCompareExchange(lock, ONCE_ININIT, ONCE_UNINITED); | |
489 | if (result == ONCE_UNINITED) { | |
349d1cfd | 490 | init(); |
1fda5bc4 | 491 | *lock = ONCE_DONE; |
71a04cfc AG |
492 | return 1; |
493 | } | |
494 | } while (result == ONCE_ININIT); | |
495 | ||
496 | return (*lock == ONCE_DONE); | |
497 | } | |
498 | ||
71a04cfc AG |
499 | int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL *key, void (*cleanup)(void *)) |
500 | { | |
501 | *key = TlsAlloc(); | |
502 | if (*key == TLS_OUT_OF_INDEXES) | |
503 | return 0; | |
504 | ||
505 | return 1; | |
506 | } | |
507 | ||
508 | void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL *key) | |
509 | { | |
2de108df DB |
510 | DWORD last_error; |
511 | void *ret; | |
512 | ||
513 | /* | |
514 | * TlsGetValue clears the last error even on success, so that callers may | |
515 | * distinguish it successfully returning NULL or failing. It is documented | |
516 | * to never fail if the argument is a valid index from TlsAlloc, so we do | |
517 | * not need to handle this. | |
518 | * | |
519 | * However, this error-mangling behavior interferes with the caller's use of | |
520 | * GetLastError. In particular SSL_get_error queries the error queue to | |
521 | * determine whether the caller should look at the OS's errors. To avoid | |
522 | * destroying state, save and restore the Windows error. | |
523 | * | |
524 | * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686812(v=vs.85).aspx | |
525 | */ | |
526 | last_error = GetLastError(); | |
527 | ret = TlsGetValue(*key); | |
528 | SetLastError(last_error); | |
529 | return ret; | |
71a04cfc AG |
530 | } |
531 | ||
532 | int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL *key, void *val) | |
533 | { | |
534 | if (TlsSetValue(*key, val) == 0) | |
535 | return 0; | |
536 | ||
537 | return 1; | |
538 | } | |
539 | ||
540 | int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL *key) | |
541 | { | |
542 | if (TlsFree(*key) == 0) | |
543 | return 0; | |
544 | ||
545 | return 1; | |
546 | } | |
547 | ||
548 | CRYPTO_THREAD_ID CRYPTO_THREAD_get_current_id(void) | |
549 | { | |
550 | return GetCurrentThreadId(); | |
551 | } | |
552 | ||
553 | int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a, CRYPTO_THREAD_ID b) | |
554 | { | |
555 | return (a == b); | |
556 | } | |
557 | ||
558 | int CRYPTO_atomic_add(int *val, int amount, int *ret, CRYPTO_RWLOCK *lock) | |
559 | { | |
7da7b27e | 560 | *ret = (int)InterlockedExchangeAdd((long volatile *)val, (long)amount) + amount; |
71a04cfc AG |
561 | return 1; |
562 | } | |
563 | ||
d5e742de MC |
564 | int CRYPTO_atomic_or(uint64_t *val, uint64_t op, uint64_t *ret, |
565 | CRYPTO_RWLOCK *lock) | |
566 | { | |
2d46a44f DN |
567 | #if (defined(NO_INTERLOCKEDOR64)) |
568 | if (lock == NULL || !CRYPTO_THREAD_write_lock(lock)) | |
569 | return 0; | |
570 | *val |= op; | |
571 | *ret = *val; | |
572 | ||
573 | if (!CRYPTO_THREAD_unlock(lock)) | |
574 | return 0; | |
575 | ||
576 | return 1; | |
577 | #else | |
d5e742de MC |
578 | *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, (LONG64)op) | op; |
579 | return 1; | |
2d46a44f | 580 | #endif |
d5e742de MC |
581 | } |
582 | ||
583 | int CRYPTO_atomic_load(uint64_t *val, uint64_t *ret, CRYPTO_RWLOCK *lock) | |
584 | { | |
2d46a44f DN |
585 | #if (defined(NO_INTERLOCKEDOR64)) |
586 | if (lock == NULL || !CRYPTO_THREAD_read_lock(lock)) | |
587 | return 0; | |
588 | *ret = *val; | |
589 | if (!CRYPTO_THREAD_unlock(lock)) | |
590 | return 0; | |
591 | ||
592 | return 1; | |
593 | #else | |
d5e742de MC |
594 | *ret = (uint64_t)InterlockedOr64((LONG64 volatile *)val, 0); |
595 | return 1; | |
2d46a44f | 596 | #endif |
d5e742de MC |
597 | } |
598 | ||
629b408c HL |
599 | int CRYPTO_atomic_load_int(int *val, int *ret, CRYPTO_RWLOCK *lock) |
600 | { | |
601 | #if (defined(NO_INTERLOCKEDOR64)) | |
602 | if (lock == NULL || !CRYPTO_THREAD_read_lock(lock)) | |
603 | return 0; | |
604 | *ret = *val; | |
605 | if (!CRYPTO_THREAD_unlock(lock)) | |
606 | return 0; | |
607 | ||
608 | return 1; | |
609 | #else | |
a2c61e41 | 610 | /* On Windows, LONG is always the same size as int. */ |
629b408c HL |
611 | *ret = (int)InterlockedOr((LONG volatile *)val, 0); |
612 | return 1; | |
613 | #endif | |
614 | } | |
615 | ||
2915fe19 RS |
616 | int openssl_init_fork_handlers(void) |
617 | { | |
618 | return 0; | |
619 | } | |
620 | ||
84952925 DMSP |
621 | int openssl_get_fork_id(void) |
622 | { | |
623 | return 0; | |
624 | } | |
71a04cfc | 625 | #endif |