]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
s390/locking: Use arch_try_cmpxchg() instead of __atomic_cmpxchg_bool()
authorHeiko Carstens <hca@linux.ibm.com>
Wed, 6 Nov 2024 10:03:15 +0000 (11:03 +0100)
committerHeiko Carstens <hca@linux.ibm.com>
Tue, 12 Nov 2024 13:01:29 +0000 (14:01 +0100)
Use arch_try_cmpxchg() instead of __atomic_cmpxchg_bool() everywhere.
This generates the same code like before, but uses the standard
cmpxchg() implementation instead of a custom __atomic_cmpxchg_bool().

Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
arch/s390/include/asm/spinlock.h
arch/s390/lib/spinlock.c

index 77d5e804af93e5a2ed2412f20a8f5a6b72ed65e5..ac868a9bb0d180fe5bc469106c043b5714f97083 100644 (file)
@@ -57,8 +57,10 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lp)
 
 static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
 {
+       int old = 0;
+
        barrier();
-       return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
+       return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL));
 }
 
 static inline void arch_spin_lock(arch_spinlock_t *lp)
@@ -118,7 +120,9 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 
 static inline void arch_write_lock(arch_rwlock_t *rw)
 {
-       if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
+       int old = 0;
+
+       if (!arch_try_cmpxchg(&rw->cnts, &old, 0x30000))
                arch_write_lock_wait(rw);
 }
 
@@ -133,8 +137,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
        int old;
 
        old = READ_ONCE(rw->cnts);
-       return (!(old & 0xffff0000) &&
-               __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
+       return (!(old & 0xffff0000) && arch_try_cmpxchg(&rw->cnts, &old, old + 1));
 }
 
 static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -142,7 +145,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
        int old;
 
        old = READ_ONCE(rw->cnts);
-       return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
+       return !old && arch_try_cmpxchg(&rw->cnts, &old, 0x30000);
 }
 
 #endif /* __ASM_SPINLOCK_H */
index 9f86ad8fa8b4dca716de58d4fe0313cdc4818f31..09d735010ee1b8d17884ae9b8af627f3b9f37f55 100644 (file)
@@ -127,8 +127,8 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
        node_id = node->node_id;
 
        /* Enqueue the node for this CPU in the spinlock wait queue */
+       old = READ_ONCE(lp->lock);
        while (1) {
-               old = READ_ONCE(lp->lock);
                if ((old & _Q_LOCK_CPU_MASK) == 0 &&
                    (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) {
                        /*
@@ -139,7 +139,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
                         * waiter will get the lock.
                         */
                        new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval;
-                       if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+                       if (arch_try_cmpxchg(&lp->lock, &old, new))
                                /* Got the lock */
                                goto out;
                        /* lock passing in progress */
@@ -147,7 +147,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
                }
                /* Make the node of this CPU the new tail. */
                new = node_id | (old & _Q_LOCK_MASK);
-               if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+               if (arch_try_cmpxchg(&lp->lock, &old, new))
                        break;
        }
        /* Set the 'next' pointer of the tail node in the queue */
@@ -184,7 +184,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
                if (!owner) {
                        tail_id = old & _Q_TAIL_MASK;
                        new = ((tail_id != node_id) ? tail_id : 0) | lockval;
-                       if (__atomic_cmpxchg_bool(&lp->lock, old, new))
+                       if (arch_try_cmpxchg(&lp->lock, &old, new))
                                /* Got the lock */
                                break;
                        continue;
@@ -258,7 +258,7 @@ int arch_spin_trylock_retry(arch_spinlock_t *lp)
                owner = READ_ONCE(lp->lock);
                /* Try to get the lock if it is free. */
                if (!owner) {
-                       if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
+                       if (arch_try_cmpxchg(&lp->lock, &owner, cpu))
                                return 1;
                }
        }
@@ -300,7 +300,7 @@ void arch_write_lock_wait(arch_rwlock_t *rw)
        while (1) {
                old = READ_ONCE(rw->cnts);
                if ((old & 0x1ffff) == 0 &&
-                   __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000))
+                   arch_try_cmpxchg(&rw->cnts, &old, old | 0x10000))
                        /* Got the lock */
                        break;
                barrier();