]>
git.ipfire.org Git - thirdparty/openssl.git/blob - crypto/threads_win.c
2 * Copyright 2016-2024 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
12 # if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
19 * VC++ 2008 or earlier x86 compilers do not have an inline implementation
20 * of InterlockedOr64 for 32bit and will fail to run on Windows XP 32bit.
21 * https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions#requirements
22 * To work around this problem, we implement a manual locking mechanism for
23 * only VC++ 2008 or earlier x86 compilers.
26 #if (defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER <= 1600)
27 # define NO_INTERLOCKEDOR64
30 #include <openssl/crypto.h>
31 #include <crypto/cryptlib.h>
32 #include "internal/common.h"
33 #include "internal/thread_arch.h"
34 #include "internal/rcu.h"
35 #include "rcu_internal.h"
37 #if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG) && defined(OPENSSL_SYS_WINDOWS)
46 # define READER_SHIFT 0
48 # define READER_SIZE 32
51 # define READER_MASK (((LONG64)1 << READER_SIZE)-1)
52 # define ID_MASK (((LONG64)1 << ID_SIZE)-1)
53 # define READER_COUNT(x) (((LONG64)(x) >> READER_SHIFT) & READER_MASK)
54 # define ID_VAL(x) (((LONG64)(x) >> ID_SHIFT) & ID_MASK)
55 # define VAL_READER ((LONG64)1 << READER_SHIFT)
56 # define VAL_ID(x) ((LONG64)x << ID_SHIFT)
59 * This defines a quescent point (qp)
60 * This is the barrier beyond which a writer
61 * must wait before freeing data that was
65 volatile LONG64 users
;
71 CRYPTO_RCU_LOCK
*lock
;
76 * This is the per thread tracking data
77 * that is assigned to each thread participating
80 * qp points to the qp that it last acquired
84 struct thread_qp thread_qps
[MAX_QPS
];
88 * This is the internal version of a CRYPTO_RCU_LOCK
89 * it is cast from CRYPTO_RCU_LOCK
92 struct rcu_cb_item
*cb_items
;
95 struct rcu_qp
*qp_group
;
97 uint32_t next_to_retire
;
98 volatile long int reader_idx
;
99 uint32_t current_alloc_idx
;
100 uint32_t writers_alloced
;
101 CRYPTO_MUTEX
*write_lock
;
102 CRYPTO_MUTEX
*alloc_lock
;
103 CRYPTO_CONDVAR
*alloc_signal
;
104 CRYPTO_MUTEX
*prior_lock
;
105 CRYPTO_CONDVAR
*prior_signal
;
108 static struct rcu_qp
*allocate_new_qp_group(struct rcu_lock_st
*lock
,
112 OPENSSL_zalloc(sizeof(*new) * count
);
114 lock
->group_count
= count
;
118 CRYPTO_RCU_LOCK
*ossl_rcu_lock_new(int num_writers
, OSSL_LIB_CTX
*ctx
)
120 struct rcu_lock_st
*new;
125 ctx
= ossl_lib_ctx_get_concrete(ctx
);
129 new = OPENSSL_zalloc(sizeof(*new));
135 new->write_lock
= ossl_crypto_mutex_new();
136 new->alloc_signal
= ossl_crypto_condvar_new();
137 new->prior_signal
= ossl_crypto_condvar_new();
138 new->alloc_lock
= ossl_crypto_mutex_new();
139 new->prior_lock
= ossl_crypto_mutex_new();
140 new->qp_group
= allocate_new_qp_group(new, num_writers
+ 1);
141 if (new->qp_group
== NULL
142 || new->alloc_signal
== NULL
143 || new->prior_signal
== NULL
144 || new->write_lock
== NULL
145 || new->alloc_lock
== NULL
146 || new->prior_lock
== NULL
) {
147 OPENSSL_free(new->qp_group
);
148 ossl_crypto_condvar_free(&new->alloc_signal
);
149 ossl_crypto_condvar_free(&new->prior_signal
);
150 ossl_crypto_mutex_free(&new->alloc_lock
);
151 ossl_crypto_mutex_free(&new->prior_lock
);
152 ossl_crypto_mutex_free(&new->write_lock
);
160 void ossl_rcu_lock_free(CRYPTO_RCU_LOCK
*lock
)
162 OPENSSL_free(lock
->qp_group
);
163 ossl_crypto_condvar_free(&lock
->alloc_signal
);
164 ossl_crypto_condvar_free(&lock
->prior_signal
);
165 ossl_crypto_mutex_free(&lock
->alloc_lock
);
166 ossl_crypto_mutex_free(&lock
->prior_lock
);
167 ossl_crypto_mutex_free(&lock
->write_lock
);
171 static inline struct rcu_qp
*get_hold_current_qp(CRYPTO_RCU_LOCK
*lock
)
175 /* get the current qp index */
177 qp_idx
= InterlockedOr(&lock
->reader_idx
, 0);
178 InterlockedAdd64(&lock
->qp_group
[qp_idx
].users
, VAL_READER
);
179 if (qp_idx
== InterlockedOr(&lock
->reader_idx
, 0))
181 InterlockedAdd64(&lock
->qp_group
[qp_idx
].users
, -VAL_READER
);
184 return &lock
->qp_group
[qp_idx
];
187 static void ossl_rcu_free_local_data(void *arg
)
189 OSSL_LIB_CTX
*ctx
= arg
;
190 CRYPTO_THREAD_LOCAL
*lkey
= ossl_lib_ctx_get_rcukey(ctx
);
191 struct rcu_thr_data
*data
= CRYPTO_THREAD_get_local(lkey
);
195 void ossl_rcu_read_lock(CRYPTO_RCU_LOCK
*lock
)
197 struct rcu_thr_data
*data
;
199 int available_qp
= -1;
200 CRYPTO_THREAD_LOCAL
*lkey
= ossl_lib_ctx_get_rcukey(lock
->ctx
);
203 * we're going to access current_qp here so ask the
204 * processor to fetch it
206 data
= CRYPTO_THREAD_get_local(lkey
);
209 data
= OPENSSL_zalloc(sizeof(*data
));
210 OPENSSL_assert(data
!= NULL
);
211 CRYPTO_THREAD_set_local(lkey
, data
);
212 ossl_init_thread_start(NULL
, lock
->ctx
, ossl_rcu_free_local_data
);
215 for (i
= 0; i
< MAX_QPS
; i
++) {
216 if (data
->thread_qps
[i
].qp
== NULL
&& available_qp
== -1)
218 /* If we have a hold on this lock already, we're good */
219 if (data
->thread_qps
[i
].lock
== lock
)
224 * if we get here, then we don't have a hold on this lock yet
226 assert(available_qp
!= -1);
228 data
->thread_qps
[available_qp
].qp
= get_hold_current_qp(lock
);
229 data
->thread_qps
[available_qp
].depth
= 1;
230 data
->thread_qps
[available_qp
].lock
= lock
;
233 void ossl_rcu_write_lock(CRYPTO_RCU_LOCK
*lock
)
235 ossl_crypto_mutex_lock(lock
->write_lock
);
238 void ossl_rcu_write_unlock(CRYPTO_RCU_LOCK
*lock
)
240 ossl_crypto_mutex_unlock(lock
->write_lock
);
243 void ossl_rcu_read_unlock(CRYPTO_RCU_LOCK
*lock
)
245 CRYPTO_THREAD_LOCAL
*lkey
= ossl_lib_ctx_get_rcukey(lock
->ctx
);
246 struct rcu_thr_data
*data
= CRYPTO_THREAD_get_local(lkey
);
250 assert(data
!= NULL
);
252 for (i
= 0; i
< MAX_QPS
; i
++) {
253 if (data
->thread_qps
[i
].lock
== lock
) {
254 data
->thread_qps
[i
].depth
--;
255 if (data
->thread_qps
[i
].depth
== 0) {
256 ret
= InterlockedAdd64(&data
->thread_qps
[i
].qp
->users
, -VAL_READER
);
257 OPENSSL_assert(ret
>= 0);
258 data
->thread_qps
[i
].qp
= NULL
;
259 data
->thread_qps
[i
].lock
= NULL
;
266 static struct rcu_qp
*update_qp(CRYPTO_RCU_LOCK
*lock
)
269 uint32_t current_idx
;
272 ossl_crypto_mutex_lock(lock
->alloc_lock
);
274 * we need at least one qp to be available with one
275 * left over, so that readers can start working on
276 * one that isn't yet being waited on
278 while (lock
->group_count
- lock
->writers_alloced
< 2)
279 ossl_crypto_condvar_wait(lock
->alloc_signal
, lock
->alloc_lock
);
281 current_idx
= lock
->current_alloc_idx
;
282 /* Allocate the qp */
283 lock
->writers_alloced
++;
285 /* increment the allocation index */
286 lock
->current_alloc_idx
=
287 (lock
->current_alloc_idx
+ 1) % lock
->group_count
;
289 /* get and insert a new id */
290 new_id
= lock
->id_ctr
;
293 new_id
= VAL_ID(new_id
);
294 InterlockedAnd64(&lock
->qp_group
[current_idx
].users
, ID_MASK
);
295 InterlockedAdd64(&lock
->qp_group
[current_idx
].users
, new_id
);
297 /* update the reader index to be the prior qp */
298 tmp
= lock
->current_alloc_idx
;
299 InterlockedExchange(&lock
->reader_idx
, tmp
);
301 /* wake up any waiters */
302 ossl_crypto_condvar_broadcast(lock
->alloc_signal
);
303 ossl_crypto_mutex_unlock(lock
->alloc_lock
);
304 return &lock
->qp_group
[current_idx
];
307 static void retire_qp(CRYPTO_RCU_LOCK
*lock
,
310 ossl_crypto_mutex_lock(lock
->alloc_lock
);
311 lock
->writers_alloced
--;
312 ossl_crypto_condvar_broadcast(lock
->alloc_signal
);
313 ossl_crypto_mutex_unlock(lock
->alloc_lock
);
317 void ossl_synchronize_rcu(CRYPTO_RCU_LOCK
*lock
)
321 struct rcu_cb_item
*cb_items
, *tmpcb
;
323 /* before we do anything else, lets grab the cb list */
324 cb_items
= InterlockedExchangePointer((void * volatile *)&lock
->cb_items
, NULL
);
326 qp
= update_qp(lock
);
328 /* wait for the reader count to reach zero */
330 count
= InterlockedOr64(&qp
->users
, 0);
331 } while (READER_COUNT(count
) != 0);
333 /* retire in order */
334 ossl_crypto_mutex_lock(lock
->prior_lock
);
335 while (lock
->next_to_retire
!= ID_VAL(count
))
336 ossl_crypto_condvar_wait(lock
->prior_signal
, lock
->prior_lock
);
338 lock
->next_to_retire
++;
339 ossl_crypto_condvar_broadcast(lock
->prior_signal
);
340 ossl_crypto_mutex_unlock(lock
->prior_lock
);
344 /* handle any callbacks that we have */
345 while (cb_items
!= NULL
) {
347 cb_items
= cb_items
->next
;
348 tmpcb
->fn(tmpcb
->data
);
357 int ossl_rcu_call(CRYPTO_RCU_LOCK
*lock
, rcu_cb_fn cb
, void *data
)
359 struct rcu_cb_item
*new;
361 new = OPENSSL_zalloc(sizeof(struct rcu_cb_item
));
367 new->next
= InterlockedExchangePointer((void * volatile *)&lock
->cb_items
, new);
371 void *ossl_rcu_uptr_deref(void **p
)
376 void ossl_rcu_assign_uptr(void **p
, void **v
)
378 InterlockedExchangePointer((void * volatile *)p
, (void *)*v
);
382 CRYPTO_RWLOCK
*CRYPTO_THREAD_lock_new(void)
386 CRYPTO_win_rwlock
*rwlock
;
388 if ((lock
= OPENSSL_zalloc(sizeof(CRYPTO_win_rwlock
))) == NULL
)
389 /* Don't set error, to avoid recursion blowup. */
392 InitializeSRWLock(&rwlock
->lock
);
395 if ((lock
= OPENSSL_zalloc(sizeof(CRITICAL_SECTION
))) == NULL
)
396 /* Don't set error, to avoid recursion blowup. */
399 # if !defined(_WIN32_WCE)
400 /* 0x400 is the spin count value suggested in the documentation */
401 if (!InitializeCriticalSectionAndSpinCount(lock
, 0x400)) {
406 InitializeCriticalSection(lock
);
413 __owur
int CRYPTO_THREAD_read_lock(CRYPTO_RWLOCK
*lock
)
416 CRYPTO_win_rwlock
*rwlock
= lock
;
418 AcquireSRWLockShared(&rwlock
->lock
);
420 EnterCriticalSection(lock
);
425 __owur
int CRYPTO_THREAD_write_lock(CRYPTO_RWLOCK
*lock
)
428 CRYPTO_win_rwlock
*rwlock
= lock
;
430 AcquireSRWLockExclusive(&rwlock
->lock
);
431 rwlock
->exclusive
= 1;
433 EnterCriticalSection(lock
);
438 int CRYPTO_THREAD_unlock(CRYPTO_RWLOCK
*lock
)
441 CRYPTO_win_rwlock
*rwlock
= lock
;
443 if (rwlock
->exclusive
) {
444 rwlock
->exclusive
= 0;
445 ReleaseSRWLockExclusive(&rwlock
->lock
);
447 ReleaseSRWLockShared(&rwlock
->lock
);
450 LeaveCriticalSection(lock
);
455 void CRYPTO_THREAD_lock_free(CRYPTO_RWLOCK
*lock
)
461 DeleteCriticalSection(lock
);
468 # define ONCE_UNINITED 0
469 # define ONCE_ININIT 1
473 * We don't use InitOnceExecuteOnce because that isn't available in WinXP which
474 * we still have to support.
476 int CRYPTO_THREAD_run_once(CRYPTO_ONCE
*once
, void (*init
)(void))
478 LONG
volatile *lock
= (LONG
*)once
;
481 if (*lock
== ONCE_DONE
)
485 result
= InterlockedCompareExchange(lock
, ONCE_ININIT
, ONCE_UNINITED
);
486 if (result
== ONCE_UNINITED
) {
491 } while (result
== ONCE_ININIT
);
493 return (*lock
== ONCE_DONE
);
496 int CRYPTO_THREAD_init_local(CRYPTO_THREAD_LOCAL
*key
, void (*cleanup
)(void *))
499 if (*key
== TLS_OUT_OF_INDEXES
)
505 void *CRYPTO_THREAD_get_local(CRYPTO_THREAD_LOCAL
*key
)
511 * TlsGetValue clears the last error even on success, so that callers may
512 * distinguish it successfully returning NULL or failing. It is documented
513 * to never fail if the argument is a valid index from TlsAlloc, so we do
514 * not need to handle this.
516 * However, this error-mangling behavior interferes with the caller's use of
517 * GetLastError. In particular SSL_get_error queries the error queue to
518 * determine whether the caller should look at the OS's errors. To avoid
519 * destroying state, save and restore the Windows error.
521 * https://msdn.microsoft.com/en-us/library/windows/desktop/ms686812(v=vs.85).aspx
523 last_error
= GetLastError();
524 ret
= TlsGetValue(*key
);
525 SetLastError(last_error
);
529 int CRYPTO_THREAD_set_local(CRYPTO_THREAD_LOCAL
*key
, void *val
)
531 if (TlsSetValue(*key
, val
) == 0)
537 int CRYPTO_THREAD_cleanup_local(CRYPTO_THREAD_LOCAL
*key
)
539 if (TlsFree(*key
) == 0)
545 CRYPTO_THREAD_ID
CRYPTO_THREAD_get_current_id(void)
547 return GetCurrentThreadId();
550 int CRYPTO_THREAD_compare_id(CRYPTO_THREAD_ID a
, CRYPTO_THREAD_ID b
)
555 int CRYPTO_atomic_add(int *val
, int amount
, int *ret
, CRYPTO_RWLOCK
*lock
)
557 *ret
= (int)InterlockedExchangeAdd((long volatile *)val
, (long)amount
) + amount
;
561 int CRYPTO_atomic_or(uint64_t *val
, uint64_t op
, uint64_t *ret
,
564 #if (defined(NO_INTERLOCKEDOR64))
565 if (lock
== NULL
|| !CRYPTO_THREAD_write_lock(lock
))
570 if (!CRYPTO_THREAD_unlock(lock
))
575 *ret
= (uint64_t)InterlockedOr64((LONG64
volatile *)val
, (LONG64
)op
) | op
;
580 int CRYPTO_atomic_load(uint64_t *val
, uint64_t *ret
, CRYPTO_RWLOCK
*lock
)
582 #if (defined(NO_INTERLOCKEDOR64))
583 if (lock
== NULL
|| !CRYPTO_THREAD_read_lock(lock
))
586 if (!CRYPTO_THREAD_unlock(lock
))
591 *ret
= (uint64_t)InterlockedOr64((LONG64
volatile *)val
, 0);
596 int CRYPTO_atomic_store(uint64_t *dst
, uint64_t val
, CRYPTO_RWLOCK
*lock
)
598 #if (defined(NO_INTERLOCKEDOR64))
599 if (lock
== NULL
|| !CRYPTO_THREAD_read_lock(lock
))
602 if (!CRYPTO_THREAD_unlock(lock
))
607 InterlockedExchange64(dst
, val
);
612 int CRYPTO_atomic_load_int(int *val
, int *ret
, CRYPTO_RWLOCK
*lock
)
614 #if (defined(NO_INTERLOCKEDOR64))
615 if (lock
== NULL
|| !CRYPTO_THREAD_read_lock(lock
))
618 if (!CRYPTO_THREAD_unlock(lock
))
623 /* On Windows, LONG is always the same size as int. */
624 *ret
= (int)InterlockedOr((LONG
volatile *)val
, 0);
629 int openssl_init_fork_handlers(void)
634 int openssl_get_fork_id(void)