]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
ARC: atomics: Implement arch_atomic64_cmpxchg using _relaxed
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 8 Apr 2025 17:22:56 +0000 (14:22 -0300)
committerVineet Gupta <vgupta@kernel.org>
Mon, 9 Jun 2025 16:18:12 +0000 (09:18 -0700)
The core atomic code has a number of macros where it elaborates
architecture primitives into more functions. ARC uses
arch_atomic64_cmpxchg() as it's architecture primitive which disable alot
of the additional functions.

Instead provide arch_cmpxchg64_relaxed() as the primitive and rely on the
core macros to create arch_cmpxchg64().

The macros will also provide other functions, for instance,
try_cmpxchg64_release(), giving a more complete implementation.

Suggested-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/Z0747n5bSep4_1VX@J2N7QTR9R3
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Vineet Gupta <vgupta@kernel.org>
arch/arc/include/asm/atomic64-arcv2.h

index 9b5791b8547133b1d7ec4c53ab4a2f611857d165..73080a664369b447d8e67dc973e1fa9822e5af60 100644 (file)
@@ -137,12 +137,9 @@ ATOMIC64_OPS(xor, xor, xor)
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-static inline s64
-arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
+static inline u64 __arch_cmpxchg64_relaxed(volatile void *ptr, u64 old, u64 new)
 {
-       s64 prev;
-
-       smp_mb();
+       u64 prev;
 
        __asm__ __volatile__(
        "1:     llockd  %0, [%1]        \n"
@@ -152,14 +149,12 @@ arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
        "       bnz     1b              \n"
        "2:                             \n"
        : "=&r"(prev)
-       : "r"(ptr), "ir"(expected), "r"(new)
-       : "cc");        /* memory clobber comes from smp_mb() */
-
-       smp_mb();
+       : "r"(ptr), "ir"(old), "r"(new)
+       : "memory", "cc");
 
        return prev;
 }
-#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
+#define arch_cmpxchg64_relaxed __arch_cmpxchg64_relaxed
 
 static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
 {