# if defined(__GNUC__) && defined(__ATOMIC_ACQUIRE) && !defined(BROKEN_CLANG_ATOMICS) \
&& !defined(USE_ATOMIC_FALLBACKS)
-# if defined(__APPLE__) && defined(__clang__) && defined(__aarch64__) && defined(__LP64__)
-/*
- * For pointers, Apple M1 virtualized cpu seems to have some problem using the
- * ldapr instruction (see https://github.com/openssl/openssl/pull/23974)
- * When using the native apple clang compiler, this instruction is emitted for
- * atomic loads, which is bad. So, if
- * 1) We are building on a target that defines __APPLE__ AND
- * 2) We are building on a target using clang (__clang__) AND
- * 3) We are building for an M1 processor (__aarch64__) AND
- * 4) We are building with 64 bit pointers
- * Then we should not use __atomic_load_n and instead implement our own
- * function to issue the ldar instruction instead, which produces the proper
- * sequencing guarantees
- */
-static inline void *apple_atomic_load_n_pvoid(void **p,
- ossl_unused int memorder)
-{
- void *ret;
-
- __asm volatile("ldar %0, [%1]" : "=r" (ret): "r" (p):);
-
- return ret;
-}
-
-/* For uint64_t, we should be fine, though */
-# define apple_atomic_load_n_uint32_t(p, o) __atomic_load_n(p, o)
-# define apple_atomic_load_n_uint64_t(p, o) __atomic_load_n(p, o)
-
-# define ATOMIC_LOAD_N(t, p, o) apple_atomic_load_n_##t(p, o)
-# else
-# define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
-# endif
+# define ATOMIC_LOAD_N(t, p, o) __atomic_load_n(p, o)
# define ATOMIC_STORE_N(t, p, v, o) __atomic_store_n(p, v, o)
# define ATOMIC_STORE(t, p, v, o) __atomic_store(p, v, o)
# define ATOMIC_ADD_FETCH(p, v, o) __atomic_add_fetch(p, v, o)