]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
s390/spinlock: Implement SPINLOCK_LOCKVAL with inline assembly
authorHeiko Carstens <hca@linux.ibm.com>
Mon, 10 Mar 2025 09:33:41 +0000 (10:33 +0100)
committerVasily Gorbik <gor@linux.ibm.com>
Tue, 18 Mar 2025 16:13:04 +0000 (17:13 +0100)
Implement SPINLOCK_LOCKVAL with an inline assembly, which makes use of the
ALTERNATIVE macro, to read spinlock_lockval from lowcore. Provide an
alternative instruction with a different offset in case lowcore is
relocated.

This replaces sequences of two instructions with one instruction.

Before:
  10602a:       a7 78 00 00             lhi     %r7,0
  10602e:       a5 8e 00 00             llilh   %r8,0
  106032:       58 d0 83 ac             l       %r13,940(%r8)
  106036:       ba 7d b5 80             cs      %r7,%r13,1408(%r11)

After:
  10602a:       a7 88 00 00             lhi     %r8,0
  10602e:       e3 70 03 ac 00 58       ly      %r7,940
  106034:       ba 87 b5 80             cs      %r8,%r7,1408(%r11)

Kernel image size change:
add/remove: 756/750 grow/shrink: 646/3435 up/down: 30778/-46326 (-15548)

Acked-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
arch/s390/include/asm/spinlock.h
arch/s390/lib/spinlock.c

index f87dd0a84855da86815a4db2d7c6473fcb1947eb..f9935db9fd768c84cf4c6bb618411293a76de4c3 100644 (file)
 #include <asm/processor.h>
 #include <asm/alternative.h>
 
-#define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval)
+static __always_inline unsigned int spinlock_lockval(void)
+{
+       unsigned long lc_lockval;
+       unsigned int lockval;
+
+       BUILD_BUG_ON(sizeof_field(struct lowcore, spinlock_lockval) != sizeof(lockval));
+       lc_lockval = offsetof(struct lowcore, spinlock_lockval);
+       asm_inline(
+               ALTERNATIVE("   ly      %[lockval],%[offzero](%%r0)\n",
+                           "   ly      %[lockval],%[offalt](%%r0)\n",
+                           ALT_FEATURE(MFEATURE_LOWCORE))
+               : [lockval] "=d" (lockval)
+               : [offzero] "i" (lc_lockval),
+                 [offalt] "i" (lc_lockval + LOWCORE_ALT_ADDRESS),
+                 "m" (((struct lowcore *)0)->spinlock_lockval));
+       return lockval;
+}
 
 extern int spin_retry;
 
@@ -60,7 +76,7 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
        int old = 0;
 
        barrier();
-       return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL));
+       return likely(arch_try_cmpxchg(&lp->lock, &old, spinlock_lockval()));
 }
 
 static inline void arch_spin_lock(arch_spinlock_t *lp)
index 0c895c869ebd188fb3c518f3fe820f58d5cad4a7..ad9da40385119a61adecd49b9a344f4564610c08 100644 (file)
@@ -160,7 +160,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp)
 
        ix = get_lowcore()->spinlock_index++;
        barrier();
-       lockval = SPINLOCK_LOCKVAL;     /* cpu + 1 */
+       lockval = spinlock_lockval();   /* cpu + 1 */
        node = this_cpu_ptr(&spin_wait[ix]);
        node->prev = node->next = NULL;
        node_id = node->node_id;
@@ -251,7 +251,7 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp)
 {
        int lockval, old, new, owner, count;
 
-       lockval = SPINLOCK_LOCKVAL;     /* cpu + 1 */
+       lockval = spinlock_lockval();   /* cpu + 1 */
 
        /* Pass the virtual CPU to the lock holder if it is not running */
        owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(arch_spin_lock_wait);
 
 int arch_spin_trylock_retry(arch_spinlock_t *lp)
 {
-       int cpu = SPINLOCK_LOCKVAL;
+       int cpu = spinlock_lockval();
        int owner, count;
 
        for (count = spin_retry; count > 0; count--) {