]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
seqlock: Cure some more scoped_seqlock() optimization fails
authorPeter Zijlstra <peterz@infradead.org>
Thu, 4 Dec 2025 10:43:32 +0000 (11:43 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 6 Dec 2025 08:53:05 +0000 (09:53 +0100)
Arnd reported an x86 randconfig using gcc-15 tripped over
__scoped_seqlock_bug(). Turns out GCC chose not to inline the
scoped_seqlock helper functions and as such was not able to optimize
properly.

[ mingo: Clang fails the build too in some circumstances. ]

Reported-by: Arnd Bergmann <arnd@arndb.de>
Tested-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Link: https://patch.msgid.link/20251204104332.GG2528459@noisy.programming.kicks-ass.net
include/linux/seqlock.h

index a8a8661839b637dfc84ebff49adb5ec163013090..221123660e710ee2dff4e97be9d1e605aa0714e0 100644 (file)
@@ -1224,7 +1224,7 @@ struct ss_tmp {
        spinlock_t      *lock_irqsave;
 };
 
-static inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
+static __always_inline void __scoped_seqlock_cleanup(struct ss_tmp *sst)
 {
        if (sst->lock)
                spin_unlock(sst->lock);
@@ -1252,7 +1252,7 @@ static inline void __scoped_seqlock_bug(void) { }
 extern void __scoped_seqlock_bug(void);
 #endif
 
-static inline void
+static __always_inline void
 __scoped_seqlock_next(struct ss_tmp *sst, seqlock_t *lock, enum ss_state target)
 {
        switch (sst->state) {