From: Heiko Carstens Date: Mon, 10 Mar 2025 09:33:41 +0000 (+0100) Subject: s390/spinlock: Implement SPINLOCK_LOCKVAL with inline assembly X-Git-Tag: v6.15-rc1~113^2~9 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b46525437e1728596fda558894011ce64e5b0a9f;p=thirdparty%2Fkernel%2Flinux.git s390/spinlock: Implement SPINLOCK_LOCKVAL with inline assembly Implement SPINLOCK_LOCKVAL with an inline assembly, which makes use of the ALTERNATIVE macro, to read spinlock_lockval from lowcore. Provide an alternative instruction with a different offset in case lowcore is relocated. This replaces sequences of two instructions with one instruction. Before: 10602a: a7 78 00 00 lhi %r7,0 10602e: a5 8e 00 00 llilh %r8,0 106032: 58 d0 83 ac l %r13,940(%r8) 106036: ba 7d b5 80 cs %r7,%r13,1408(%r11) After: 10602a: a7 88 00 00 lhi %r8,0 10602e: e3 70 03 ac 00 58 ly %r7,940 106034: ba 87 b5 80 cs %r8,%r7,1408(%r11) Kernel image size change: add/remove: 756/750 grow/shrink: 646/3435 up/down: 30778/-46326 (-15548) Acked-by: Vasily Gorbik Signed-off-by: Heiko Carstens Signed-off-by: Vasily Gorbik --- diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index f87dd0a84855d..f9935db9fd768 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -16,7 +16,23 @@ #include #include -#define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval) +static __always_inline unsigned int spinlock_lockval(void) +{ + unsigned long lc_lockval; + unsigned int lockval; + + BUILD_BUG_ON(sizeof_field(struct lowcore, spinlock_lockval) != sizeof(lockval)); + lc_lockval = offsetof(struct lowcore, spinlock_lockval); + asm_inline( + ALTERNATIVE(" ly %[lockval],%[offzero](%%r0)\n", + " ly %[lockval],%[offalt](%%r0)\n", + ALT_FEATURE(MFEATURE_LOWCORE)) + : [lockval] "=d" (lockval) + : [offzero] "i" (lc_lockval), + [offalt] "i" (lc_lockval + LOWCORE_ALT_ADDRESS), + "m" (((struct lowcore *)0)->spinlock_lockval)); + return lockval; +} extern int spin_retry; @@ -60,7 +76,7 @@ static inline int arch_spin_trylock_once(arch_spinlock_t *lp) int old = 0; barrier(); - return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL)); + return likely(arch_try_cmpxchg(&lp->lock, &old, spinlock_lockval())); } static inline void arch_spin_lock(arch_spinlock_t *lp) diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index 0c895c869ebd1..ad9da40385119 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -160,7 +160,7 @@ static inline void arch_spin_lock_queued(arch_spinlock_t *lp) ix = get_lowcore()->spinlock_index++; barrier(); - lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */ + lockval = spinlock_lockval(); /* cpu + 1 */ node = this_cpu_ptr(&spin_wait[ix]); node->prev = node->next = NULL; node_id = node->node_id; @@ -251,7 +251,7 @@ static inline void arch_spin_lock_classic(arch_spinlock_t *lp) { int lockval, old, new, owner, count; - lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */ + lockval = spinlock_lockval(); /* cpu + 1 */ /* Pass the virtual CPU to the lock holder if it is not running */ owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL); @@ -290,7 +290,7 @@ EXPORT_SYMBOL(arch_spin_lock_wait); int arch_spin_trylock_retry(arch_spinlock_t *lp) { - int cpu = SPINLOCK_LOCKVAL; + int cpu = spinlock_lockval(); int owner, count; for (count = spin_retry; count > 0; count--) {