]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
compiler-context-analysis: Remove __cond_lock() function-like helper
authorMarco Elver <elver@google.com>
Fri, 19 Dec 2025 15:40:13 +0000 (16:40 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 5 Jan 2026 15:43:33 +0000 (16:43 +0100)
As discussed in [1], removing __cond_lock() will improve the readability
of trylock code. Now that Sparse context tracking support has been
removed, we can also remove __cond_lock().

Change existing APIs to either drop __cond_lock() completely, or make
use of the __cond_acquires() function attribute instead.

In particular, spinlock and rwlock implementations required switching
over to inline helpers rather than statement-expressions for their
trylock_* variants.

Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Marco Elver <elver@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/all/20250207082832.GU7145@noisy.programming.kicks-ass.net/
Link: https://patch.msgid.link/20251219154418.3592607-25-elver@google.com
24 files changed:
Documentation/dev-tools/context-analysis.rst
Documentation/mm/process_addrs.rst
drivers/net/wireless/intel/iwlwifi/iwl-trans.c
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c
include/linux/compiler-context-analysis.h
include/linux/lockref.h
include/linux/mm.h
include/linux/rwlock.h
include/linux/rwlock_api_smp.h
include/linux/rwlock_rt.h
include/linux/sched/signal.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h
include/linux/spinlock_api_up.h
include/linux/spinlock_rt.h
kernel/signal.c
kernel/time/posix-timers.c
lib/dec_and_lock.c
lib/lockref.c
mm/memory.c
mm/pgtable-generic.c
tools/include/linux/compiler_types.h

index 8dd6c0d695aa224c335fa59ba8091fca9832ce5a..e69896e597b63e44dec61ac2af9d26dba0f76a78 100644 (file)
@@ -112,10 +112,8 @@ Keywords
                  __releases_shared
                  __acquire
                  __release
-                 __cond_lock
                  __acquire_shared
                  __release_shared
-                 __cond_lock_shared
                  __acquire_ret
                  __acquire_shared_ret
                  context_unsafe
index 7f2f3e87071df00623582f926b5e0b382a86af79..851680ead45fa16741fa2a4daacddd2d716a6c7b 100644 (file)
@@ -583,7 +583,7 @@ To access PTE-level page tables, a helper like :c:func:`!pte_offset_map_lock` or
 :c:func:`!pte_offset_map` can be used depending on stability requirements.
 These map the page table into kernel memory if required, take the RCU lock, and
 depending on variant, may also look up or acquire the PTE lock.
-See the comment on :c:func:`!__pte_offset_map_lock`.
+See the comment on :c:func:`!pte_offset_map_lock`.
 
 Atomicity
 ^^^^^^^^^
@@ -667,7 +667,7 @@ must be released via :c:func:`!pte_unmap_unlock`.
 .. note:: There are some variants on this, such as
    :c:func:`!pte_offset_map_rw_nolock` when we know we hold the PTE stable but
    for brevity we do not explore this.  See the comment for
-   :c:func:`!__pte_offset_map_lock` for more details.
+   :c:func:`!pte_offset_map_lock` for more details.
 
 When modifying data in ranges we typically only wish to allocate higher page
 tables as necessary, using these locks to avoid races or overwriting anything,
@@ -686,7 +686,7 @@ At the leaf page table, that is the PTE, we can't entirely rely on this pattern
 as we have separate PMD and PTE locks and a THP collapse for instance might have
 eliminated the PMD entry as well as the PTE from under us.
 
-This is why :c:func:`!__pte_offset_map_lock` locklessly retrieves the PMD entry
+This is why :c:func:`!pte_offset_map_lock` locklessly retrieves the PMD entry
 for the PTE, carefully checking it is as expected, before acquiring the
 PTE-specific lock, and then *again* checking that the PMD entry is as expected.
 
index cc8a84018f70e77966d9e7701586866c3eeb70ca..fa14422466627e14c16cd1b158e6cb818fa5a1f6 100644 (file)
@@ -548,11 +548,11 @@ int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
        return iwl_trans_pcie_read_config32(trans, ofs, val);
 }
 
-bool _iwl_trans_grab_nic_access(struct iwl_trans *trans)
+bool iwl_trans_grab_nic_access(struct iwl_trans *trans)
 {
        return iwl_trans_pcie_grab_nic_access(trans);
 }
-IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access);
+IWL_EXPORT_SYMBOL(iwl_trans_grab_nic_access);
 
 void __releases(nic_access)
 iwl_trans_release_nic_access(struct iwl_trans *trans)
index a552669db6e2a2158dee14510ade1599257b3e67..688f9fee2821043ebf358cbc288969dbfcb4342c 100644 (file)
@@ -1063,11 +1063,7 @@ int iwl_trans_sw_reset(struct iwl_trans *trans);
 void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
                             u32 mask, u32 value);
 
-bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
-
-#define iwl_trans_grab_nic_access(trans)               \
-       __cond_lock(nic_access,                         \
-                   likely(_iwl_trans_grab_nic_access(trans)))
+bool iwl_trans_grab_nic_access(struct iwl_trans *trans);
 
 void __releases(nic_access)
 iwl_trans_release_nic_access(struct iwl_trans *trans);
index 207c56e338dde260734c98977373f691a6c5d486..7b7b35e442f9c205017e98c093ada67609b91550 100644 (file)
@@ -553,10 +553,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans);
 void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
                                           struct device *dev);
 
-bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent);
-#define _iwl_trans_pcie_grab_nic_access(trans, silent)         \
-       __cond_lock(nic_access_nobh,                            \
-                   likely(__iwl_trans_pcie_grab_nic_access(trans, silent)))
+bool _iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent);
 
 void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev);
 void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev);
index 164d060ec617c3e52627b0e4afd1a24db3485684..415a19ea9f060d2e59f7f16b49660ec3a1a2098a 100644 (file)
@@ -2327,7 +2327,7 @@ EXPORT_SYMBOL(iwl_trans_pcie_reset);
  * This version doesn't disable BHs but rather assumes they're
  * already disabled.
  */
-bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
+bool _iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
 {
        int ret;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2415,7 +2415,7 @@ bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
        bool ret;
 
        local_bh_disable();
-       ret = __iwl_trans_pcie_grab_nic_access(trans, false);
+       ret = _iwl_trans_pcie_grab_nic_access(trans, false);
        if (ret) {
                /* keep BHs disabled until iwl_trans_pcie_release_nic_access */
                return ret;
index cb728822343f8bd3b8d7e20744774668ab40497f..4f7559d7ae91bf6f2aa1ff592969e200654b0557 100644 (file)
@@ -341,24 +341,6 @@ static inline void _context_unsafe_alias(void **p) { }
  */
 #define __release(x)           __release_ctx_lock(x)
 
-/**
- * __cond_lock() - function that conditionally acquires a context lock
- *                 exclusively
- * @x: context lock instance pinter
- * @c: boolean expression
- *
- * Return: result of @c
- *
- * No-op function that conditionally acquires context lock instance @x
- * exclusively, if the boolean expression @c is true. The result of @c is the
- * return value; for example:
- *
- * .. code-block:: c
- *
- *     #define spin_trylock(l) __cond_lock(&lock, _spin_trylock(&lock))
- */
-#define __cond_lock(x, c)      __try_acquire_ctx_lock(x, c)
-
 /**
  * __must_hold_shared() - function attribute, caller must hold shared context lock
  *
@@ -417,19 +399,6 @@ static inline void _context_unsafe_alias(void **p) { }
  */
 #define __release_shared(x)    __release_shared_ctx_lock(x)
 
-/**
- * __cond_lock_shared() - function that conditionally acquires a context lock shared
- * @x: context lock instance pinter
- * @c: boolean expression
- *
- * Return: result of @c
- *
- * No-op function that conditionally acquires context lock instance @x with
- * shared access, if the boolean expression @c is true. The result of @c is the
- * return value.
- */
-#define __cond_lock_shared(x, c) __try_acquire_shared_ctx_lock(x, c)
-
 /**
  * __acquire_ret() - helper to acquire context lock of return value
  * @call: call expression
index 815d871fadfcbb437517e64e8b46c6260f0af547..6ded24cdb4a82c238bd89f946f511e140326ebdf 100644 (file)
@@ -49,9 +49,7 @@ static inline void lockref_init(struct lockref *lockref)
 void lockref_get(struct lockref *lockref);
 int lockref_put_return(struct lockref *lockref);
 bool lockref_get_not_zero(struct lockref *lockref);
-bool lockref_put_or_lock(struct lockref *lockref);
-#define lockref_put_or_lock(_lockref) \
-       (!__cond_lock((_lockref)->lock, !lockref_put_or_lock(_lockref)))
+bool lockref_put_or_lock(struct lockref *lockref) __cond_acquires(false, &lockref->lock);
 
 void lockref_mark_dead(struct lockref *lockref);
 bool lockref_get_not_dead(struct lockref *lockref);
index 15076261d0c2eb3ff400d9ea61cb6befccd11eed..f369cb633516b6cb9c62ff4c1ffccbec4e552174 100644 (file)
@@ -2975,15 +2975,8 @@ static inline pud_t pud_mkspecial(pud_t pud)
 }
 #endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
 
-extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
-                              spinlock_t **ptl);
-static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
-                                   spinlock_t **ptl)
-{
-       pte_t *ptep;
-       __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
-       return ptep;
-}
+extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+                            spinlock_t **ptl);
 
 #ifdef __PAGETABLE_P4D_FOLDED
 static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
@@ -3337,31 +3330,15 @@ static inline bool pagetable_pte_ctor(struct mm_struct *mm,
        return true;
 }
 
-pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
-static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
-                       pmd_t *pmdvalp)
-{
-       pte_t *pte;
+pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
 
-       __cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
-       return pte;
-}
 static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
 {
        return __pte_offset_map(pmd, addr, NULL);
 }
 
-pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
-                       unsigned long addr, spinlock_t **ptlp);
-static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
-                       unsigned long addr, spinlock_t **ptlp)
-{
-       pte_t *pte;
-
-       __cond_lock(RCU, __cond_lock(*ptlp,
-                       pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
-       return pte;
-}
+pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+                          unsigned long addr, spinlock_t **ptlp);
 
 pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
                                unsigned long addr, spinlock_t **ptlp);
index 151f9d5f32881ae711a78441199d5657e833008b..65a5b55e1bcdc9c8c1510f6f48c4f2dacd3759ba 100644 (file)
@@ -50,8 +50,8 @@ do {                                                          \
  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
  * methods are defined as nops in the case they are not required.
  */
-#define read_trylock(lock)     __cond_lock_shared(lock, _raw_read_trylock(lock))
-#define write_trylock(lock)    __cond_lock(lock, _raw_write_trylock(lock))
+#define read_trylock(lock)     _raw_read_trylock(lock)
+#define write_trylock(lock)    _raw_write_trylock(lock)
 
 #define write_lock(lock)       _raw_write_lock(lock)
 #define read_lock(lock)                _raw_read_lock(lock)
@@ -113,12 +113,7 @@ do {                                                               \
        } while (0)
 #define write_unlock_bh(lock)          _raw_write_unlock_bh(lock)
 
-#define write_trylock_irqsave(lock, flags)             \
-       __cond_lock(lock, ({                            \
-               local_irq_save(flags);                  \
-               _raw_write_trylock(lock) ?              \
-               1 : ({ local_irq_restore(flags); 0; }); \
-       }))
+#define write_trylock_irqsave(lock, flags) _raw_write_trylock_irqsave(lock, &(flags))
 
 #ifdef arch_rwlock_is_contended
 #define rwlock_is_contended(lock) \
index 6d5cc0b7be1f986efd9c997570def0a7eaea437d..d903b17c46ca64f3303db356d762cd608b96d3ad 100644 (file)
@@ -26,8 +26,8 @@ unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
                                                        __acquires(lock);
 unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
                                                        __acquires(lock);
-int __lockfunc _raw_read_trylock(rwlock_t *lock);
-int __lockfunc _raw_write_trylock(rwlock_t *lock);
+int __lockfunc _raw_read_trylock(rwlock_t *lock)       __cond_acquires_shared(true, lock);
+int __lockfunc _raw_write_trylock(rwlock_t *lock)      __cond_acquires(true, lock);
 void __lockfunc _raw_read_unlock(rwlock_t *lock)       __releases_shared(lock);
 void __lockfunc _raw_write_unlock(rwlock_t *lock)      __releases(lock);
 void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)    __releases_shared(lock);
@@ -41,6 +41,16 @@ void __lockfunc
 _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
                                                        __releases(lock);
 
+static inline bool _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+       __cond_acquires(true, lock)
+{
+       local_irq_save(*flags);
+       if (_raw_write_trylock(lock))
+               return true;
+       local_irq_restore(*flags);
+       return false;
+}
+
 #ifdef CONFIG_INLINE_READ_LOCK
 #define _raw_read_lock(lock) __raw_read_lock(lock)
 #endif
index f64d6d319a479163eb39752771dafb437d9e7094..37b387dcab21bbd02c45f07d11ec6b2ad34c91f4 100644 (file)
@@ -26,11 +26,11 @@ do {                                                        \
 } while (0)
 
 extern void rt_read_lock(rwlock_t *rwlock)     __acquires_shared(rwlock);
-extern int rt_read_trylock(rwlock_t *rwlock);
+extern int rt_read_trylock(rwlock_t *rwlock)   __cond_acquires_shared(true, rwlock);
 extern void rt_read_unlock(rwlock_t *rwlock)   __releases_shared(rwlock);
 extern void rt_write_lock(rwlock_t *rwlock)    __acquires(rwlock);
 extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass)       __acquires(rwlock);
-extern int rt_write_trylock(rwlock_t *rwlock);
+extern int rt_write_trylock(rwlock_t *rwlock)  __cond_acquires(true, rwlock);
 extern void rt_write_unlock(rwlock_t *rwlock)  __releases(rwlock);
 
 static __always_inline void read_lock(rwlock_t *rwlock)
@@ -59,7 +59,7 @@ static __always_inline void read_lock_irq(rwlock_t *rwlock)
                flags = 0;                              \
        } while (0)
 
-#define read_trylock(lock)     __cond_lock_shared(lock, rt_read_trylock(lock))
+#define read_trylock(lock)     rt_read_trylock(lock)
 
 static __always_inline void read_unlock(rwlock_t *rwlock)
        __releases_shared(rwlock)
@@ -123,14 +123,15 @@ static __always_inline void write_lock_irq(rwlock_t *rwlock)
                flags = 0;                              \
        } while (0)
 
-#define write_trylock(lock)    __cond_lock(lock, rt_write_trylock(lock))
+#define write_trylock(lock)    rt_write_trylock(lock)
 
-#define write_trylock_irqsave(lock, flags)             \
-       __cond_lock(lock, ({                            \
-               typecheck(unsigned long, flags);        \
-               flags = 0;                              \
-               rt_write_trylock(lock);                 \
-       }))
+static __always_inline bool _write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
+       __cond_acquires(true, rwlock)
+{
+       *flags = 0;
+       return rt_write_trylock(rwlock);
+}
+#define write_trylock_irqsave(lock, flags) _write_trylock_irqsave(lock, &(flags))
 
 static __always_inline void write_unlock(rwlock_t *rwlock)
        __releases(rwlock)
index 7d6449982822e552c315aae521b8d453879cf6e9..a63f65aa5bdde6a241a3a73ce8f7396131894766 100644 (file)
@@ -737,18 +737,8 @@ static inline int thread_group_empty(struct task_struct *p)
 #define delay_group_leader(p) \
                (thread_group_leader(p) && !thread_group_empty(p))
 
-extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
-                                                       unsigned long *flags);
-
-static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
-                                                      unsigned long *flags)
-{
-       struct sighand_struct *ret;
-
-       ret = __lock_task_sighand(task, flags);
-       (void)__cond_lock(&task->sighand->siglock, ret);
-       return ret;
-}
+extern struct sighand_struct *lock_task_sighand(struct task_struct *task,
+                                               unsigned long *flags);
 
 static inline void unlock_task_sighand(struct task_struct *task,
                                                unsigned long *flags)
index 7e560c7a7b23ab38081bc7f0f362ce6b2c138ec8..396b8c5d6c1b324bf9dc60b588cd87cd351d135f 100644 (file)
@@ -213,7 +213,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  * various methods are defined as nops in the case they are not
  * required.
  */
-#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
+#define raw_spin_trylock(lock) _raw_spin_trylock(lock)
 
 #define raw_spin_lock(lock)    _raw_spin_lock(lock)
 
@@ -284,22 +284,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
        } while (0)
 #define raw_spin_unlock_bh(lock)       _raw_spin_unlock_bh(lock)
 
-#define raw_spin_trylock_bh(lock) \
-       __cond_lock(lock, _raw_spin_trylock_bh(lock))
+#define raw_spin_trylock_bh(lock)      _raw_spin_trylock_bh(lock)
 
-#define raw_spin_trylock_irq(lock)                     \
-       __cond_lock(lock, ({                            \
-               local_irq_disable();                    \
-               _raw_spin_trylock(lock) ?               \
-               1 : ({ local_irq_enable(); 0;  });      \
-       }))
+#define raw_spin_trylock_irq(lock)     _raw_spin_trylock_irq(lock)
 
-#define raw_spin_trylock_irqsave(lock, flags)          \
-       __cond_lock(lock, ({                            \
-               local_irq_save(flags);                  \
-               _raw_spin_trylock(lock) ?               \
-               1 : ({ local_irq_restore(flags); 0; }); \
-       }))
+#define raw_spin_trylock_irqsave(lock, flags) _raw_spin_trylock_irqsave(lock, &(flags))
 
 #ifndef CONFIG_PREEMPT_RT
 /* Include rwlock functions for !RT */
@@ -433,8 +422,12 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock)
        return raw_spin_trylock_irq(&lock->rlock);
 }
 
-#define spin_trylock_irqsave(lock, flags)                      \
-       __cond_lock(lock, raw_spin_trylock_irqsave(spinlock_check(lock), flags))
+static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+       __cond_acquires(true, lock) __no_context_analysis
+{
+       return raw_spin_trylock_irqsave(spinlock_check(lock), *flags);
+}
+#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
 
 /**
  * spin_is_locked() - Check whether a spinlock is locked.
@@ -512,23 +505,17 @@ static inline int rwlock_needbreak(rwlock_t *lock)
  * Decrements @atomic by 1.  If the result is 0, returns true and locks
  * @lock.  Returns false for all other cases.
  */
-extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
-#define atomic_dec_and_lock(atomic, lock) \
-               __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+extern int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) __cond_acquires(true, lock);
 
 extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
-                                       unsigned long *flags);
-#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
-               __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+                                       unsigned long *flags) __cond_acquires(true, lock);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) _atomic_dec_and_lock_irqsave(atomic, lock, &(flags))
 
-extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
-#define atomic_dec_and_raw_lock(atomic, lock) \
-               __cond_lock(lock, _atomic_dec_and_raw_lock(atomic, lock))
+extern int atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) __cond_acquires(true, lock);
 
 extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
-                                       unsigned long *flags);
-#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) \
-               __cond_lock(lock, _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags)))
+                                           unsigned long *flags) __cond_acquires(true, lock);
+#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) _atomic_dec_and_raw_lock_irqsave(atomic, lock, &(flags))
 
 int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
                             size_t max_size, unsigned int cpu_mult,
index 7e7d7d37321325fc2d462356bbb253b444880fc9..bda5e7a390cd0b14246bdde4dde01ba3e637662a 100644 (file)
@@ -95,6 +95,26 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
        return 0;
 }
 
+static __always_inline bool _raw_spin_trylock_irq(raw_spinlock_t *lock)
+       __cond_acquires(true, lock)
+{
+       local_irq_disable();
+       if (_raw_spin_trylock(lock))
+               return true;
+       local_irq_enable();
+       return false;
+}
+
+static __always_inline bool _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
+       __cond_acquires(true, lock)
+{
+       local_irq_save(*flags);
+       if (_raw_spin_trylock(lock))
+               return true;
+       local_irq_restore(*flags);
+       return false;
+}
+
 /*
  * If lockdep is enabled then we use the non-preemption spin-ops
  * even on CONFIG_PREEMPTION, because lockdep assumes that interrupts are
index 018f5aabc1be127e54d59579746bd1c9ab8808d9..a9d5c7c66e032b82df96922a93d26fcc3a747a17 100644 (file)
  * flags straight, to suppress compiler warnings of unused lock
  * variables, and to add the proper checker annotations:
  */
-#define ___LOCK_void(lock) \
-  do { (void)(lock); } while (0)
-
 #define ___LOCK_(lock) \
-  do { __acquire(lock); ___LOCK_void(lock); } while (0)
+  do { __acquire(lock); (void)(lock); } while (0)
 
 #define ___LOCK_shared(lock) \
-  do { __acquire_shared(lock); ___LOCK_void(lock); } while (0)
+  do { __acquire_shared(lock); (void)(lock); } while (0)
 
 #define __LOCK(lock, ...) \
   do { preempt_disable(); ___LOCK_##__VA_ARGS__(lock); } while (0)
 #define _raw_spin_lock_irqsave(lock, flags)    __LOCK_IRQSAVE(lock, flags)
 #define _raw_read_lock_irqsave(lock, flags)    __LOCK_IRQSAVE(lock, flags, shared)
 #define _raw_write_lock_irqsave(lock, flags)   __LOCK_IRQSAVE(lock, flags)
-#define _raw_spin_trylock(lock)                        ({ __LOCK(lock, void); 1; })
-#define _raw_read_trylock(lock)                        ({ __LOCK(lock, void); 1; })
-#define _raw_write_trylock(lock)                       ({ __LOCK(lock, void); 1; })
-#define _raw_spin_trylock_bh(lock)             ({ __LOCK_BH(lock, void); 1; })
+
+static __always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
+       __cond_acquires(true, lock)
+{
+       __LOCK(lock);
+       return 1;
+}
+
+static __always_inline int _raw_spin_trylock_bh(raw_spinlock_t *lock)
+       __cond_acquires(true, lock)
+{
+       __LOCK_BH(lock);
+       return 1;
+}
+
+static __always_inline int _raw_spin_trylock_irq(raw_spinlock_t *lock)
+       __cond_acquires(true, lock)
+{
+       __LOCK_IRQ(lock);
+       return 1;
+}
+
+static __always_inline int _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
+       __cond_acquires(true, lock)
+{
+       __LOCK_IRQSAVE(lock, *(flags));
+       return 1;
+}
+
+static __always_inline int _raw_read_trylock(rwlock_t *lock)
+       __cond_acquires_shared(true, lock)
+{
+       __LOCK(lock, shared);
+       return 1;
+}
+
+static __always_inline int _raw_write_trylock(rwlock_t *lock)
+       __cond_acquires(true, lock)
+{
+       __LOCK(lock);
+       return 1;
+}
+
+static __always_inline int _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
+       __cond_acquires(true, lock)
+{
+       __LOCK_IRQSAVE(lock, *(flags));
+       return 1;
+}
+
 #define _raw_spin_unlock(lock)                 __UNLOCK(lock)
 #define _raw_read_unlock(lock)                 __UNLOCK(lock, shared)
 #define _raw_write_unlock(lock)                        __UNLOCK(lock)
index 6bab73ee1384ab25dd52f4c6c3e3f01c2190f0d5..0a585768358f13f51674fe9dbe14517963eda74b 100644 (file)
@@ -37,8 +37,8 @@ extern void rt_spin_lock_nested(spinlock_t *lock, int subclass)       __acquires(lock)
 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
 extern void rt_spin_unlock(spinlock_t *lock)   __releases(lock);
 extern void rt_spin_lock_unlock(spinlock_t *lock);
-extern int rt_spin_trylock_bh(spinlock_t *lock);
-extern int rt_spin_trylock(spinlock_t *lock);
+extern int rt_spin_trylock_bh(spinlock_t *lock) __cond_acquires(true, lock);
+extern int rt_spin_trylock(spinlock_t *lock) __cond_acquires(true, lock);
 
 static __always_inline void spin_lock(spinlock_t *lock)
        __acquires(lock)
@@ -130,21 +130,19 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
        rt_spin_unlock(lock);
 }
 
-#define spin_trylock(lock)                             \
-       __cond_lock(lock, rt_spin_trylock(lock))
+#define spin_trylock(lock)     rt_spin_trylock(lock)
 
-#define spin_trylock_bh(lock)                          \
-       __cond_lock(lock, rt_spin_trylock_bh(lock))
+#define spin_trylock_bh(lock)  rt_spin_trylock_bh(lock)
 
-#define spin_trylock_irq(lock)                         \
-       __cond_lock(lock, rt_spin_trylock(lock))
+#define spin_trylock_irq(lock) rt_spin_trylock(lock)
 
-#define spin_trylock_irqsave(lock, flags)              \
-       __cond_lock(lock, ({                            \
-               typecheck(unsigned long, flags);        \
-               flags = 0;                              \
-               rt_spin_trylock(lock);                  \
-       }))
+static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+       __cond_acquires(true, lock)
+{
+       *flags = 0;
+       return rt_spin_trylock(lock);
+}
+#define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
 
 #define spin_is_contended(lock)                (((void)(lock), 0))
 
index e42b8bd6922fcb08a09de7b35e8160a5acf79133..d65d0fe24bfb8c6f859ce47f36ab975308b1b598 100644 (file)
@@ -1355,8 +1355,8 @@ int zap_other_threads(struct task_struct *p)
        return count;
 }
 
-struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
-                                          unsigned long *flags)
+struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+                                        unsigned long *flags)
 {
        struct sighand_struct *sighand;
 
index 80a8a09a21a0ae7e2d436e4f01c1ce492434ace5..413e2389f0a53f6ca310d2b9a1d2196c686eec89 100644 (file)
@@ -66,14 +66,7 @@ static const struct k_clock clock_realtime, clock_monotonic;
 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
 #endif
 
-static struct k_itimer *__lock_timer(timer_t timer_id);
-
-#define lock_timer(tid)                                                        \
-({     struct k_itimer *__timr;                                        \
-       __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid));      \
-       __timr;                                                         \
-})
-
+static struct k_itimer *lock_timer(timer_t timer_id);
 static inline void unlock_timer(struct k_itimer *timr)
 {
        if (likely((timr)))
@@ -85,7 +78,7 @@ static inline void unlock_timer(struct k_itimer *timr)
 
 #define scoped_timer                           (scope)
 
-DEFINE_CLASS(lock_timer, struct k_itimer *, unlock_timer(_T), __lock_timer(id), timer_t id);
+DEFINE_CLASS(lock_timer, struct k_itimer *, unlock_timer(_T), lock_timer(id), timer_t id);
 DEFINE_CLASS_IS_COND_GUARD(lock_timer);
 
 static struct timer_hash_bucket *hash_bucket(struct signal_struct *sig, unsigned int nr)
@@ -600,7 +593,7 @@ COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
 }
 #endif
 
-static struct k_itimer *__lock_timer(timer_t timer_id)
+static struct k_itimer *lock_timer(timer_t timer_id)
 {
        struct k_itimer *timr;
 
index 1dcca8f2e194204e63fb8bf443c71b052dfab7ee..8c7c398fd7701a243c251c5a9d3e7dcaa8beeb37 100644 (file)
@@ -18,7 +18,7 @@
  * because the spin-lock and the decrement must be
  * "atomic".
  */
-int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
+int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 {
        /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
        if (atomic_add_unless(atomic, -1, 1))
@@ -32,7 +32,7 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
        return 0;
 }
 
-EXPORT_SYMBOL(_atomic_dec_and_lock);
+EXPORT_SYMBOL(atomic_dec_and_lock);
 
 int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
                                 unsigned long *flags)
@@ -50,7 +50,7 @@ int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
 }
 EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
 
-int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
+int atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
 {
        /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
        if (atomic_add_unless(atomic, -1, 1))
@@ -63,7 +63,7 @@ int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock)
        raw_spin_unlock(lock);
        return 0;
 }
-EXPORT_SYMBOL(_atomic_dec_and_raw_lock);
+EXPORT_SYMBOL(atomic_dec_and_raw_lock);
 
 int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
                                     unsigned long *flags)
index 9210fc6ae71499b2e945f731b30fa44c2ee433c4..5d8e3ef3860ee1290dbde1d01edb6f992b60fac5 100644 (file)
@@ -105,7 +105,6 @@ EXPORT_SYMBOL(lockref_put_return);
  * @lockref: pointer to lockref structure
  * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  */
-#undef lockref_put_or_lock
 bool lockref_put_or_lock(struct lockref *lockref)
 {
        CMPXCHG_LOOP(
index 2a55edc48a6562c7af235121eb6fb9abdd183697..b751e1f85abc4e20f302a99f077916128697af2c 100644 (file)
@@ -2210,8 +2210,8 @@ static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
        return pmd;
 }
 
-pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
-                       spinlock_t **ptl)
+pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+                     spinlock_t **ptl)
 {
        pmd_t *pmd = walk_to_pmd(mm, addr);
 
index d3aec7a9926adbb42593a1be61b883ffcf6c7131..af7966169d6955323ecc3a7b2b921f704d7b2b60 100644 (file)
@@ -280,7 +280,7 @@ static unsigned long pmdp_get_lockless_start(void) { return 0; }
 static void pmdp_get_lockless_end(unsigned long irqflags) { }
 #endif
 
-pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
+pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
 {
        unsigned long irqflags;
        pmd_t pmdval;
@@ -332,13 +332,12 @@ pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
 }
 
 /*
- * pte_offset_map_lock(mm, pmd, addr, ptlp), and its internal implementation
- * __pte_offset_map_lock() below, is usually called with the pmd pointer for
- * addr, reached by walking down the mm's pgd, p4d, pud for addr: either while
- * holding mmap_lock or vma lock for read or for write; or in truncate or rmap
- * context, while holding file's i_mmap_lock or anon_vma lock for read (or for
- * write). In a few cases, it may be used with pmd pointing to a pmd_t already
- * copied to or constructed on the stack.
+ * pte_offset_map_lock(mm, pmd, addr, ptlp) is usually called with the pmd
+ * pointer for addr, reached by walking down the mm's pgd, p4d, pud for addr:
+ * either while holding mmap_lock or vma lock for read or for write; or in
+ * truncate or rmap context, while holding file's i_mmap_lock or anon_vma lock
+ * for read (or for write). In a few cases, it may be used with pmd pointing to
+ * a pmd_t already copied to or constructed on the stack.
  *
  * When successful, it returns the pte pointer for addr, with its page table
  * kmapped if necessary (when CONFIG_HIGHPTE), and locked against concurrent
@@ -389,8 +388,8 @@ pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
  * table, and may not use RCU at all: "outsiders" like khugepaged should avoid
  * pte_offset_map() and co once the vma is detached from mm or mm_users is zero.
  */
-pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
-                            unsigned long addr, spinlock_t **ptlp)
+pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
+                          unsigned long addr, spinlock_t **ptlp)
 {
        spinlock_t *ptl;
        pmd_t pmdval;
index d09f9dc172a486875e2e62cf8550a69f24c9beed..067a5b4e0f7b2751fdfff53812afbd1936181668 100644 (file)
@@ -20,7 +20,6 @@
 # define __releases(x) __attribute__((context(x,1,0)))
 # define __acquire(x)  __context__(x,1)
 # define __release(x)  __context__(x,-1)
-# define __cond_lock(x,c)      ((c) ? ({ __acquire(x); 1; }) : 0)
 #else /* __CHECKER__ */
 /* context/locking */
 # define __must_hold(x)
@@ -28,7 +27,6 @@
 # define __releases(x)
 # define __acquire(x)  (void)0
 # define __release(x)  (void)0
-# define __cond_lock(x,c) (c)
 #endif /* __CHECKER__ */
 
 /* Compiler specific macros. */