]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
s390/atomic: Consistent layering between atomic.h and atomic_ops.h
authorHeiko Carstens <hca@linux.ibm.com>
Wed, 4 Dec 2024 11:30:59 +0000 (12:30 +0100)
committerAlexander Gordeev <agordeev@linux.ibm.com>
Sun, 15 Dec 2024 14:13:43 +0000 (15:13 +0100)
With commit c8a91c285d8c ("s390/atomic: move remaining inline assemblies to
atomic_ops.h") all remaining atomic inline assemblies have been moved to
atomic_ops.h.

However the result is inconsistent: the functions in atomic_ops.h are
supposed to be used with integral types like int and long pointers, while
the functions in atomic.h work with atomic types.

This layering got violated with the named commit. Therefore adjust this
now, and also use consistent variable names in atomic_ops.h.

Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
arch/s390/include/asm/atomic.h
arch/s390/include/asm/atomic_ops.h

index c1a4a06e83408947016387c384c755df0f1a70c9..92d6f33b604681c5faf06ef4e525876c62944a90 100644 (file)
 
 static __always_inline int arch_atomic_read(const atomic_t *v)
 {
-       return __atomic_read(v);
+       return __atomic_read(&v->counter);
 }
 #define arch_atomic_read arch_atomic_read
 
 static __always_inline void arch_atomic_set(atomic_t *v, int i)
 {
-       __atomic_set(v, i);
+       __atomic_set(&v->counter, i);
 }
 #define arch_atomic_set arch_atomic_set
 
@@ -106,13 +106,13 @@ static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int n
 
 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
 {
-       return __atomic64_read(v);
+       return __atomic64_read((long *)&v->counter);
 }
 #define arch_atomic64_read arch_atomic64_read
 
 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
 {
-       __atomic64_set(v, i);
+       __atomic64_set((long *)&v->counter, i);
 }
 #define arch_atomic64_set arch_atomic64_set
 
index 1d6b2056fad8543e92b73dd758e06b03cd80d442..90573508d0454e5a9016ce86be42cc464330e69b 100644 (file)
 #include <linux/limits.h>
 #include <asm/march.h>
 
-static __always_inline int __atomic_read(const atomic_t *v)
+static __always_inline int __atomic_read(const int *ptr)
 {
-       int c;
+       int val;
 
        asm volatile(
-               "       l       %[c],%[counter]\n"
-               : [c] "=d" (c) : [counter] "R" (v->counter));
-       return c;
+               "       l       %[val],%[ptr]\n"
+               : [val] "=d" (val) : [ptr] "R" (*ptr));
+       return val;
 }
 
-static __always_inline void __atomic_set(atomic_t *v, int i)
+static __always_inline void __atomic_set(int *ptr, int val)
 {
-       if (__builtin_constant_p(i) && i >= S16_MIN && i <= S16_MAX) {
+       if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) {
                asm volatile(
-                       "       mvhi    %[counter], %[i]\n"
-                       : [counter] "=Q" (v->counter) : [i] "K" (i));
+                       "       mvhi    %[ptr],%[val]\n"
+                       : [ptr] "=Q" (*ptr) : [val] "K" (val));
        } else {
                asm volatile(
-                       "       st      %[i],%[counter]\n"
-                       : [counter] "=R" (v->counter) : [i] "d" (i));
+                       "       st      %[val],%[ptr]\n"
+                       : [ptr] "=R" (*ptr) : [val] "d" (val));
        }
 }
 
-static __always_inline s64 __atomic64_read(const atomic64_t *v)
+static __always_inline long __atomic64_read(const long *ptr)
 {
-       s64 c;
+       long val;
 
        asm volatile(
-               "       lg      %[c],%[counter]\n"
-               : [c] "=d" (c) : [counter] "RT" (v->counter));
-       return c;
+               "       lg      %[val],%[ptr]\n"
+               : [val] "=d" (val) : [ptr] "RT" (*ptr));
+       return val;
 }
 
-static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void __atomic64_set(long *ptr, long val)
 {
-       if (__builtin_constant_p(i) && i >= S16_MIN && i <= S16_MAX) {
+       if (__builtin_constant_p(val) && val >= S16_MIN && val <= S16_MAX) {
                asm volatile(
-                       "       mvghi   %[counter], %[i]\n"
-                       : [counter] "=Q" (v->counter) : [i] "K" (i));
+                       "       mvghi   %[ptr],%[val]\n"
+                       : [ptr] "=Q" (*ptr) : [val] "K" (val));
        } else {
                asm volatile(
-                       "       stg     %[i],%[counter]\n"
-                       : [counter] "=RT" (v->counter) : [i] "d" (i));
+                       "       stg     %[val],%[ptr]\n"
+                       : [ptr] "=RT" (*ptr) : [val] "d" (val));
        }
 }