]>
Commit | Line | Data |
---|---|---|
2f545ae4 | 1 | /* |
96d7852c | 2 | * Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved. |
2f545ae4 | 3 | * |
48f4ad77 | 4 | * Licensed under the Apache License 2.0 (the "License"). You may not use |
2f545ae4 KR |
5 | * this file except in compliance with the License. You can obtain a copy |
6 | * in the file LICENSE in the source distribution or at | |
7 | * https://www.openssl.org/source/license.html | |
8 | */ | |
ae4186b0 DMSP |
9 | #ifndef OSSL_INTERNAL_REFCOUNT_H |
10 | # define OSSL_INTERNAL_REFCOUNT_H | |
2f545ae4 | 11 | |
cd420b0b P |
12 | /* Used to checking reference counts, most while doing perl5 stuff :-) */ |
13 | # if defined(OPENSSL_NO_STDIO) | |
14 | # if defined(REF_PRINT) | |
15 | # error "REF_PRINT requires stdio" | |
16 | # endif | |
17 | # endif | |
18 | ||
503d4745 RL |
19 | # ifndef OPENSSL_DEV_NO_ATOMICS |
20 | # if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ | |
21 | && !defined(__STDC_NO_ATOMICS__) | |
22 | # include <stdatomic.h> | |
23 | # define HAVE_C11_ATOMICS | |
24 | # endif | |
2f545ae4 | 25 | |
503d4745 RL |
26 | # if defined(HAVE_C11_ATOMICS) && defined(ATOMIC_INT_LOCK_FREE) \ |
27 | && ATOMIC_INT_LOCK_FREE > 0 | |
2f545ae4 | 28 | |
503d4745 | 29 | # define HAVE_ATOMICS 1 |
2f545ae4 KR |
30 | |
31 | typedef _Atomic int CRYPTO_REF_COUNT; | |
32 | ||
96d7852c | 33 | static inline int CRYPTO_UP_REF(_Atomic int *val, int *ret, void *lock) |
2f545ae4 KR |
34 | { |
35 | *ret = atomic_fetch_add_explicit(val, 1, memory_order_relaxed) + 1; | |
36 | return 1; | |
37 | } | |
38 | ||
96d7852c AP |
39 | /* |
40 | * Changes to shared structure other than reference counter have to be | |
41 | * serialized. And any kind of serialization implies a release fence. This | |
42 | * means that by the time reference counter is decremented all other | |
43 | * changes are visible on all processors. Hence decrement itself can be | |
44 | * relaxed. In case it hits zero, object will be destructed. Since it's | |
45 | * last use of the object, destructor programmer might reason that access | |
46 | * to mutable members doesn't have to be serialized anymore, which would | |
47 | * otherwise imply an acquire fence. Hence conditional acquire fence... | |
48 | */ | |
49 | static inline int CRYPTO_DOWN_REF(_Atomic int *val, int *ret, void *lock) | |
2f545ae4 | 50 | { |
96d7852c | 51 | *ret = atomic_fetch_sub_explicit(val, 1, memory_order_relaxed) - 1; |
2f545ae4 KR |
52 | if (*ret == 0) |
53 | atomic_thread_fence(memory_order_acquire); | |
54 | return 1; | |
55 | } | |
56 | ||
503d4745 | 57 | # elif defined(__GNUC__) && defined(__ATOMIC_RELAXED) && __GCC_ATOMIC_INT_LOCK_FREE > 0 |
2f545ae4 | 58 | |
503d4745 | 59 | # define HAVE_ATOMICS 1 |
2f545ae4 KR |
60 | |
61 | typedef int CRYPTO_REF_COUNT; | |
62 | ||
96d7852c | 63 | static __inline__ int CRYPTO_UP_REF(int *val, int *ret, void *lock) |
2f545ae4 KR |
64 | { |
65 | *ret = __atomic_fetch_add(val, 1, __ATOMIC_RELAXED) + 1; | |
66 | return 1; | |
67 | } | |
68 | ||
96d7852c | 69 | static __inline__ int CRYPTO_DOWN_REF(int *val, int *ret, void *lock) |
2f545ae4 | 70 | { |
96d7852c | 71 | *ret = __atomic_fetch_sub(val, 1, __ATOMIC_RELAXED) - 1; |
2f545ae4 KR |
72 | if (*ret == 0) |
73 | __atomic_thread_fence(__ATOMIC_ACQUIRE); | |
74 | return 1; | |
75 | } | |
ecae0575 SL |
76 | # elif defined(__ICL) && defined(_WIN32) |
77 | # define HAVE_ATOMICS 1 | |
78 | typedef volatile int CRYPTO_REF_COUNT; | |
79 | ||
80 | static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock) | |
81 | { | |
82 | *ret = _InterlockedExchangeAdd((void *)val, 1) + 1; | |
83 | return 1; | |
84 | } | |
85 | ||
86 | static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock) | |
87 | { | |
88 | *ret = _InterlockedExchangeAdd((void *)val, -1) - 1; | |
89 | return 1; | |
90 | } | |
2f545ae4 | 91 | |
503d4745 | 92 | # elif defined(_MSC_VER) && _MSC_VER>=1200 |
96d7852c | 93 | |
503d4745 | 94 | # define HAVE_ATOMICS 1 |
96d7852c AP |
95 | |
96 | typedef volatile int CRYPTO_REF_COUNT; | |
97 | ||
88ffc8de | 98 | # if (defined(_M_ARM) && _M_ARM>=7 && !defined(_WIN32_WCE)) || defined(_M_ARM64) |
503d4745 RL |
99 | # include <intrin.h> |
100 | # if defined(_M_ARM64) && !defined(_ARM_BARRIER_ISH) | |
101 | # define _ARM_BARRIER_ISH _ARM64_BARRIER_ISH | |
102 | # endif | |
96d7852c AP |
103 | |
104 | static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock) | |
105 | { | |
106 | *ret = _InterlockedExchangeAdd_nf(val, 1) + 1; | |
107 | return 1; | |
108 | } | |
109 | ||
110 | static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock) | |
111 | { | |
112 | *ret = _InterlockedExchangeAdd_nf(val, -1) - 1; | |
113 | if (*ret == 0) | |
114 | __dmb(_ARM_BARRIER_ISH); | |
115 | return 1; | |
116 | } | |
503d4745 | 117 | # else |
88ffc8de ST |
118 | # if !defined(_WIN32_WCE) |
119 | # pragma intrinsic(_InterlockedExchangeAdd) | |
120 | # else | |
121 | # if _WIN32_WCE >= 0x600 | |
122 | extern long __cdecl _InterlockedExchangeAdd(long volatile*, long); | |
123 | # else | |
aa447d6f | 124 | /* under Windows CE we still have old-style Interlocked* functions */ |
88ffc8de ST |
125 | extern long __cdecl InterlockedExchangeAdd(long volatile*, long); |
126 | # define _InterlockedExchangeAdd InterlockedExchangeAdd | |
127 | # endif | |
128 | # endif | |
96d7852c AP |
129 | |
130 | static __inline int CRYPTO_UP_REF(volatile int *val, int *ret, void *lock) | |
131 | { | |
132 | *ret = _InterlockedExchangeAdd(val, 1) + 1; | |
133 | return 1; | |
134 | } | |
135 | ||
136 | static __inline int CRYPTO_DOWN_REF(volatile int *val, int *ret, void *lock) | |
137 | { | |
138 | *ret = _InterlockedExchangeAdd(val, -1) - 1; | |
139 | return 1; | |
140 | } | |
503d4745 RL |
141 | # endif |
142 | ||
96d7852c | 143 | # endif |
503d4745 | 144 | # endif /* !OPENSSL_DEV_NO_ATOMICS */ |
96d7852c | 145 | |
503d4745 RL |
146 | /* |
147 | * All the refcounting implementations above define HAVE_ATOMICS, so if it's | |
148 | * still undefined here (such as when OPENSSL_DEV_NO_ATMOICS is defined), it | |
149 | * means we need to implement a fallback. This fallback uses locks. | |
150 | */ | |
151 | # ifndef HAVE_ATOMICS | |
2f545ae4 KR |
152 | |
153 | typedef int CRYPTO_REF_COUNT; | |
154 | ||
155 | # define CRYPTO_UP_REF(val, ret, lock) CRYPTO_atomic_add(val, 1, ret, lock) | |
156 | # define CRYPTO_DOWN_REF(val, ret, lock) CRYPTO_atomic_add(val, -1, ret, lock) | |
157 | ||
158 | # endif | |
cd420b0b P |
159 | |
160 | # if !defined(NDEBUG) && !defined(OPENSSL_NO_STDIO) | |
161 | # define REF_ASSERT_ISNT(test) \ | |
162 | (void)((test) ? (OPENSSL_die("refcount error", __FILE__, __LINE__), 1) : 0) | |
163 | # else | |
164 | # define REF_ASSERT_ISNT(i) | |
165 | # endif | |
166 | ||
167 | # ifdef REF_PRINT | |
168 | # define REF_PRINT_COUNT(a, b) \ | |
169 | fprintf(stderr, "%p:%4d:%s\n", b, b->references, a) | |
170 | # else | |
171 | # define REF_PRINT_COUNT(a, b) | |
172 | # endif | |
173 | ||
2f545ae4 | 174 | #endif |