From 38f1311a2219220a3962fae464ca6300ef60b4c1 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Fri, 19 Dec 2025 16:39:58 +0100 Subject: [PATCH] compiler-context-analysis: Change __cond_acquires to take return value While Sparse is oblivious to the return value of conditional acquire functions, Clang's context analysis needs to know the return value which indicates successful acquisition. Add the additional argument, and convert existing uses. Notably, Clang's interpretation of the value merely relates to the use in a later conditional branch, i.e. 1 ==> context lock acquired in branch taken if condition non-zero, and 0 ==> context lock acquired in branch taken if condition is zero. Given the precise value does not matter, introduce symbolic variants to use instead of either 0 or 1, which should be more intuitive. No functional change intended. Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Link: https://patch.msgid.link/20251219154418.3592607-10-elver@google.com --- fs/dlm/lock.c | 2 +- include/linux/compiler-context-analysis.h | 31 +++++++++++++++++++---- include/linux/refcount.h | 6 ++--- include/linux/spinlock.h | 6 ++--- include/linux/spinlock_api_smp.h | 8 +++--- net/ipv4/tcp_sigpool.c | 2 +- 6 files changed, 38 insertions(+), 17 deletions(-) diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index be938fdf17d96..0ce04be0d3dee 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -343,7 +343,7 @@ void dlm_hold_rsb(struct dlm_rsb *r) /* TODO move this to lib/refcount.c */ static __must_check bool dlm_refcount_dec_and_write_lock_bh(refcount_t *r, rwlock_t *lock) -__cond_acquires(lock) + __cond_acquires(true, lock) { if (refcount_dec_not_one(r)) return false; diff --git a/include/linux/compiler-context-analysis.h b/include/linux/compiler-context-analysis.h index d0b3cf0ebfe95..a6a34985dbb2a 100644 --- a/include/linux/compiler-context-analysis.h +++ b/include/linux/compiler-context-analysis.h @@ -271,7 +271,7 @@ static inline void _context_unsafe_alias(void **p) { } # define __must_hold(x) __attribute__((context(x,1,1))) # define __must_not_hold(x) # define __acquires(x) __attribute__((context(x,0,1))) -# define __cond_acquires(x) __attribute__((context(x,0,-1))) +# define __cond_acquires(ret, x) __attribute__((context(x,0,-1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) @@ -314,15 +314,32 @@ static inline void _context_unsafe_alias(void **p) { } */ # define __acquires(x) __acquires_ctx_lock(x) +/* + * Clang's analysis does not care precisely about the value, only that it is + * either zero or non-zero. So the __cond_acquires() interface might be + * misleading if we say that @ret is the value returned if acquired. Instead, + * provide symbolic variants which we translate. + */ +#define __cond_acquires_impl_true(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x) +#define __cond_acquires_impl_false(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x) +#define __cond_acquires_impl_nonzero(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x) +#define __cond_acquires_impl_0(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x) +#define __cond_acquires_impl_nonnull(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(1, x) +#define __cond_acquires_impl_NULL(x, ...) __try_acquires##__VA_ARGS__##_ctx_lock(0, x) + /** * __cond_acquires() - function attribute, function conditionally * acquires a context lock exclusively + * @ret: abstract value returned by function if context lock acquired * @x: context lock instance pointer * * Function attribute declaring that the function conditionally acquires the - * given context lock instance @x exclusively, but does not release it. + * given context lock instance @x exclusively, but does not release it. The + * function return value @ret denotes when the context lock is acquired. + * + * @ret may be one of: true, false, nonzero, 0, nonnull, NULL. */ -# define __cond_acquires(x) __try_acquires_ctx_lock(1, x) +# define __cond_acquires(ret, x) __cond_acquires_impl_##ret(x) /** * __releases() - function attribute, function releases a context lock exclusively @@ -389,12 +406,16 @@ static inline void _context_unsafe_alias(void **p) { } /** * __cond_acquires_shared() - function attribute, function conditionally * acquires a context lock shared + * @ret: abstract value returned by function if context lock acquired * @x: context lock instance pointer * * Function attribute declaring that the function conditionally acquires the - * given context lock instance @x with shared access, but does not release it. + * given context lock instance @x with shared access, but does not release it. The + * function return value @ret denotes when the context lock is acquired. + * + * @ret may be one of: true, false, nonzero, 0, nonnull, NULL. */ -# define __cond_acquires_shared(x) __try_acquires_shared_ctx_lock(1, x) +# define __cond_acquires_shared(ret, x) __cond_acquires_impl_##ret(x, _shared) /** * __releases_shared() - function attribute, function releases a diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 80dc023ac2bf6..3da377ffb0c28 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -478,9 +478,9 @@ static inline void refcount_dec(refcount_t *r) extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); -extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock); -extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock); +extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(true, lock); +extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(true, lock); extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, - unsigned long *flags) __cond_acquires(lock); + unsigned long *flags) __cond_acquires(true, lock); #endif /* _LINUX_REFCOUNT_H */ diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 72aabdd4fa3f5..7e560c7a7b23a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -362,7 +362,7 @@ static __always_inline void spin_lock_bh(spinlock_t *lock) } static __always_inline int spin_trylock(spinlock_t *lock) - __cond_acquires(lock) __no_context_analysis + __cond_acquires(true, lock) __no_context_analysis { return raw_spin_trylock(&lock->rlock); } @@ -422,13 +422,13 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned lo } static __always_inline int spin_trylock_bh(spinlock_t *lock) - __cond_acquires(lock) __no_context_analysis + __cond_acquires(true, lock) __no_context_analysis { return raw_spin_trylock_bh(&lock->rlock); } static __always_inline int spin_trylock_irq(spinlock_t *lock) - __cond_acquires(lock) __no_context_analysis + __cond_acquires(true, lock) __no_context_analysis { return raw_spin_trylock_irq(&lock->rlock); } diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index d19327e04df95..7e7d7d3732132 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -34,8 +34,8 @@ unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) __acquires(lock); -int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) __cond_acquires(lock); -int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) __cond_acquires(lock); +int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) __cond_acquires(true, lock); +int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) __cond_acquires(true, lock); void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); @@ -84,7 +84,7 @@ _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) #endif static inline int __raw_spin_trylock(raw_spinlock_t *lock) - __cond_acquires(lock) + __cond_acquires(true, lock) { preempt_disable(); if (do_raw_spin_trylock(lock)) { @@ -177,7 +177,7 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) } static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) - __cond_acquires(lock) + __cond_acquires(true, lock) { __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); if (do_raw_spin_trylock(lock)) { diff --git a/net/ipv4/tcp_sigpool.c b/net/ipv4/tcp_sigpool.c index d8a4f192873a2..10b2e5970c402 100644 --- a/net/ipv4/tcp_sigpool.c +++ b/net/ipv4/tcp_sigpool.c @@ -257,7 +257,7 @@ void tcp_sigpool_get(unsigned int id) } EXPORT_SYMBOL_GPL(tcp_sigpool_get); -int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RCU_BH) +int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(0, RCU_BH) { struct crypto_ahash *hash; -- 2.47.3