]> git.ipfire.org Git - thirdparty/openssl.git/commitdiff
Cleanup atomic fallbacks in threads_pthread.c
authorBernd Edlinger <bernd.edlinger@hotmail.de>
Tue, 18 Feb 2025 11:37:42 +0000 (12:37 +0100)
committerBernd Edlinger <bernd.edlinger@hotmail.de>
Fri, 21 Feb 2025 13:01:51 +0000 (14:01 +0100)
the unused atomic stub functions make clang issue
unused function warnings -Wunused-function

Reviewed-by: Tomas Mraz <tomas@openssl.org>
Reviewed-by: Neil Horman <nhorman@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/26815)

crypto/threads_pthread.c

index 4d2e1e125e9c21329f5a60a362310e9519e08815..6d87947aeab1222a569a7dbb42d145a187ecefd2 100644 (file)
@@ -127,12 +127,8 @@ static inline void *apple_atomic_load_n_pvoid(void **p,
 #  endif
 #  define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
 #  define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
-#  define ATOMIC_EXCHANGE_N(t, p, v, o) __atomic_exchange_n(p, v, o)
 #  define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)
-#  define ATOMIC_FETCH_ADD(p, v, o) __atomic_fetch_add(p, v, o)
 #  define ATOMIC_SUB_FETCH(p, v, o) __atomic_sub_fetch(p, v, o)
-#  define ATOMIC_AND_FETCH(p, m, o) __atomic_and_fetch(p, m, o)
-#  define ATOMIC_OR_FETCH(p, m, o) __atomic_or_fetch(p, m, o)
 # else
 static pthread_mutex_t atomic_sim_lock = PTHREAD_MUTEX_INITIALIZER;
 
@@ -164,7 +160,6 @@ IMPL_fallback_atomic_load_n(pvoid)
         return ret;                                             \
     }
 IMPL_fallback_atomic_store_n(uint32_t)
-IMPL_fallback_atomic_store_n(uint64_t)
 
 #  define ATOMIC_STORE_N(t, p, v, o) fallback_atomic_store_n_##t(p, v)
 
@@ -175,26 +170,10 @@ IMPL_fallback_atomic_store_n(uint64_t)
         *p = *v;                                                \
         pthread_mutex_unlock(&atomic_sim_lock);                 \
     }
-IMPL_fallback_atomic_store(uint64_t)
 IMPL_fallback_atomic_store(pvoid)
 
 #  define ATOMIC_STORE(t, p, v, o) fallback_atomic_store_##t(p, v)
 
-#  define IMPL_fallback_atomic_exchange_n(t)                            \
-    static ossl_inline t fallback_atomic_exchange_n_##t(t *p, t v)           \
-    {                                                                   \
-        t ret;                                                          \
-                                                                        \
-        pthread_mutex_lock(&atomic_sim_lock);                           \
-        ret = *p;                                                       \
-        *p = v;                                                         \
-        pthread_mutex_unlock(&atomic_sim_lock);                         \
-        return ret;                                                     \
-    }
-IMPL_fallback_atomic_exchange_n(uint64_t)
-
-#  define ATOMIC_EXCHANGE_N(t, p, v, o) fallback_atomic_exchange_n_##t(p, v)
-
 /*
  * The fallbacks that follow don't need any per type implementation, as
  * they are designed for uint64_t only.  If there comes a time when multiple
@@ -215,19 +194,6 @@ static ossl_inline uint64_t fallback_atomic_add_fetch(uint64_t *p, uint64_t v)
 
 #  define ATOMIC_ADD_FETCH(p, v, o) fallback_atomic_add_fetch(p, v)
 
-static ossl_inline uint64_t fallback_atomic_fetch_add(uint64_t *p, uint64_t v)
-{
-    uint64_t ret;
-
-    pthread_mutex_lock(&atomic_sim_lock);
-    ret = *p;
-    *p += v;
-    pthread_mutex_unlock(&atomic_sim_lock);
-    return ret;
-}
-
-#  define ATOMIC_FETCH_ADD(p, v, o) fallback_atomic_fetch_add(p, v)
-
 static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
 {
     uint64_t ret;
@@ -240,32 +206,6 @@ static ossl_inline uint64_t fallback_atomic_sub_fetch(uint64_t *p, uint64_t v)
 }
 
 #  define ATOMIC_SUB_FETCH(p, v, o) fallback_atomic_sub_fetch(p, v)
-
-static ossl_inline uint64_t fallback_atomic_and_fetch(uint64_t *p, uint64_t m)
-{
-    uint64_t ret;
-
-    pthread_mutex_lock(&atomic_sim_lock);
-    *p &= m;
-    ret = *p;
-    pthread_mutex_unlock(&atomic_sim_lock);
-    return ret;
-}
-
-#  define ATOMIC_AND_FETCH(p, v, o) fallback_atomic_and_fetch(p, v)
-
-static ossl_inline uint64_t fallback_atomic_or_fetch(uint64_t *p, uint64_t m)
-{
-    uint64_t ret;
-
-    pthread_mutex_lock(&atomic_sim_lock);
-    *p |= m;
-    ret = *p;
-    pthread_mutex_unlock(&atomic_sim_lock);
-    return ret;
-}
-
-#  define ATOMIC_OR_FETCH(p, v, o) fallback_atomic_or_fetch(p, v)
 # endif
 
 /*