]> git.ipfire.org Git - thirdparty/openssl.git/commitdiff
threads_pthread.c: change inline to ossl_inline
authorsanumesh <sanumesh@in.ibm.com>
Mon, 27 May 2024 10:00:00 +0000 (05:00 -0500)
committerTomas Mraz <tomas@openssl.org>
Tue, 28 May 2024 15:19:49 +0000 (17:19 +0200)
Reviewed-by: Neil Horman <nhorman@openssl.org>
Reviewed-by: Matt Caswell <matt@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/24502)

(cherry picked from commit 36ba419286843bcaeb497b3451540ab7587cf9d2)

crypto/threads_pthread.c

index 30540f5e8ad9787c8b7f1392ceef46643d04a336..92346e1689ac3b907528e60933ef37d5fed46c62 100644 (file)
@@ -114,7 +114,7 @@ static inline void *apple_atomic_load_n_pvoid(void **p,
 static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
 
 #  define IMPL_fallback_atomic_load_n(t)                        \
-    static inline t fallback_atomic_load_n_##t(t *p)            \
+    static ossl_inline t fallback_atomic_load_n_##t(t *p)            \
     {                                                           \
         t ret;                                                  \
                                                                 \
@@ -129,7 +129,7 @@ IMPL_fallback_atomic_load_n(pvoid)
 #  define ATOMIC_LOAD_N(t, p, o) fallback_atomic_load_n_##t(p)
 
 #  define IMPL_fallback_atomic_store_n(t)                       \
-    static inline t fallback_atomic_store_n_##t(t *p, t v)      \
+    static ossl_inline t fallback_atomic_store_n_##t(t *p, t v)      \
     {                                                           \
         t ret;                                                  \
                                                                 \
@@ -144,7 +144,7 @@ IMPL_fallback_atomic_store_n(uint64_t)
 #  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
 
 #  define IMPL_fallback_atomic_store(t)                         \
-    static inline void fallback_atomic_store_##t(t *p, t *v)    \
+    static ossl_inline void fallback_atomic_store_##t(t *p, t *v)    \
     {                                                           \
         pthread_mutex_lock(&atomic_sim_lock);                   \
         *p = *v;                                                \
@@ -156,7 +156,7 @@ IMPL_fallback_atomic_store(pvoid)
 #  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
 
 #  define IMPL_fallback_atomic_exchange_n(t)                            \
-    static inline t fallback_atomic_exchange_n_##t(t *p, t v)           \
+    static ossl_inline t fallback_atomic_exchange_n_##t(t *p, t v)           \
     {                                                                   \
         t ret;                                                          \
                                                                         \
@@ -178,7 +178,7 @@ IMPL_fallback_atomic_exchange_n(prcu_cb_item)
  * way as the fallbacks above.
  */
 
-static inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
+static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
 {
     uint64_t ret;
 
@@ -191,7 +191,7 @@ static inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
 
 #  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
 
-static inline uint64_t fallback_atomic_fetch_add(uint64_t *p, uint64_t v)
+static ossl_inline uint64_t fallback_atomic_fetch_add(uint64_t *p, uint64_t v)
 {
     uint64_t ret;
 
@@ -204,7 +204,7 @@ static inline uint64_t fallback_atomic_fetch_add(uint64_t *p, uint64_t v)
 
 #  define ATOMIC_FETCH_ADD(p, v, o) fallback_atomic_fetch_add(p, v)
 
-static inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
+static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
 {
     uint64_t ret;
 
@@ -217,7 +217,7 @@ static inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
 
 #  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
 
-static inline uint64_t fallback_atomic_and_fetch(uint64_t *p, uint64_t m)
+static ossl_inline uint64_t fallback_atomic_and_fetch(uint64_t *p, uint64_t m)
 {
     uint64_t ret;
 
@@ -230,7 +230,7 @@ static inline uint64_t fallback_atomic_and_fetch(uint64_t *p, uint64_t m)
 
 #  define ATOMIC_AND_FETCH(p, v, o) fallback_atomic_and_fetch(p, v)
 
-static inline uint64_t fallback_atomic_or_fetch(uint64_t *p, uint64_t m)
+static ossl_inline uint64_t fallback_atomic_or_fetch(uint64_t *p, uint64_t m)
 {
     uint64_t ret;