_cond_resched(); \
})
-extern int __cond_resched_lock(spinlock_t *lock);
-extern int __cond_resched_rwlock_read(rwlock_t *lock);
-extern int __cond_resched_rwlock_write(rwlock_t *lock);
+extern int __cond_resched_lock(spinlock_t *lock) __must_hold(lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock) __must_hold_shared(lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock) __must_hold(lock);
#define MIGHT_RESCHED_RCU_SHIFT 8
#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
(thread_group_leader(p) && !thread_group_empty(p))
extern struct sighand_struct *lock_task_sighand(struct task_struct *task,
- unsigned long *flags);
+ unsigned long *flags)
+ __acquires(&task->sighand->siglock);
static inline void unlock_task_sighand(struct task_struct *task,
unsigned long *flags)
+ __releases(&task->sighand->siglock)
{
spin_unlock_irqrestore(&task->sighand->siglock, *flags);
}
* write_lock_irq(&tasklist_lock), neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
+ __acquires(&p->alloc_lock)
{
spin_lock(&p->alloc_lock);
}
static inline void task_unlock(struct task_struct *p)
+ __releases(&p->alloc_lock)
{
spin_unlock(&p->alloc_lock);
}
-DEFINE_GUARD(task_lock, struct task_struct *, task_lock(_T), task_unlock(_T))
+DEFINE_LOCK_GUARD_1(task_lock, struct task_struct, task_lock(_T->lock), task_unlock(_T->lock))
+DECLARE_LOCK_GUARD_1_ATTRS(task_lock, __acquires(&_T->alloc_lock), __releases(&(*(struct task_struct **)_T)->alloc_lock))
+#define class_task_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(task_lock, _T)
#endif /* _LINUX_SCHED_TASK_H */
/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
static inline
void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock(lock);
static inline
void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock_irq(lock);
static inline
void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
struct wake_q_head *wake_q)
+ __releases(lock)
{
guard(preempt)();
raw_spin_unlock_irqrestore(lock, flags);
# SPDX-License-Identifier: GPL-2.0
+CONTEXT_ANALYSIS_core.o := y
+CONTEXT_ANALYSIS_fair.o := y
+
# The compilers are complaining about unused variables inside an if(0) scope
# block. This is daft, shut them up.
ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
static struct cpumask sched_core_mask;
static void sched_core_lock(int cpu, unsigned long *flags)
+ __context_unsafe(/* acquires multiple */)
+ __acquires(&runqueues.__lock) /* overapproximation */
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
int t, i = 0;
}
static void sched_core_unlock(int cpu, unsigned long *flags)
+ __context_unsafe(/* releases multiple */)
+ __releases(&runqueues.__lock) /* overapproximation */
{
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
int t;
*/
void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
+ __context_unsafe()
{
raw_spinlock_t *lock;
}
bool raw_spin_rq_trylock(struct rq *rq)
+ __context_unsafe()
{
raw_spinlock_t *lock;
bool ret;
raw_spin_rq_lock(rq1);
if (__rq_lockp(rq1) != __rq_lockp(rq2))
raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
+ else
+ __acquire_ctx_lock(__rq_lockp(rq2)); /* fake acquire */
double_rq_clock_clear_update(rq1, rq2);
}
/*
- * __task_rq_lock - lock the rq @p resides on.
+ * ___task_rq_lock - lock the rq @p resides on.
*/
-struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
- __acquires(rq->lock)
+struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf)
{
struct rq *rq;
/*
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
*/
-struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
- __acquires(p->pi_lock)
- __acquires(rq->lock)
+struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf)
{
struct rq *rq;
*/
static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int new_cpu)
+ __must_hold(__rq_lockp(rq))
{
lockdep_assert_rq_held(rq);
*/
static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int dest_cpu)
+ __must_hold(__rq_lockp(rq))
{
/* Affinity changed (again). */
if (!is_cpu_allowed(p, dest_cpu))
*/
flush_smp_call_function_queue();
+ /*
+ * We may change the underlying rq, but the locks held will
+ * appropriately be "transferred" when switching.
+ */
+ context_unsafe_alias(rq);
+
raw_spin_lock(&p->pi_lock);
rq_lock(rq, &rf);
if (!lowest_rq)
goto out_unlock;
+ lockdep_assert_rq_held(lowest_rq);
+
// XXX validate p is still the highest prio task
if (task_rq(p) == rq) {
move_queued_task_locked(rq, lowest_rq, p);
*/
static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flags *rf,
int dest_cpu, unsigned int flags)
- __releases(rq->lock)
- __releases(p->pi_lock)
+ __releases(__rq_lockp(rq), &p->pi_lock)
{
struct set_affinity_pending my_pending = { }, *pending = NULL;
bool stop_pending, complete = false;
struct affinity_context *ctx,
struct rq *rq,
struct rq_flags *rf)
- __releases(rq->lock)
- __releases(p->pi_lock)
+ __releases(__rq_lockp(rq), &p->pi_lock)
{
const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
const struct cpumask *cpu_valid_mask = cpu_active_mask;
*/
int task_call_func(struct task_struct *p, task_call_f func, void *arg)
{
- struct rq *rq = NULL;
struct rq_flags rf;
int ret;
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
- if (__task_needs_rq_lock(p))
- rq = __task_rq_lock(p, &rf);
+ if (__task_needs_rq_lock(p)) {
+ struct rq *rq = __task_rq_lock(p, &rf);
- /*
- * At this point the task is pinned; either:
- * - blocked and we're holding off wakeups (pi->lock)
- * - woken, and we're holding off enqueue (rq->lock)
- * - queued, and we're holding off schedule (rq->lock)
- * - running, and we're holding off de-schedule (rq->lock)
- *
- * The called function (@func) can use: task_curr(), p->on_rq and
- * p->__state to differentiate between these states.
- */
- ret = func(p, arg);
+ /*
+ * At this point the task is pinned; either:
+ * - blocked and we're holding off wakeups (pi->lock)
+ * - woken, and we're holding off enqueue (rq->lock)
+ * - queued, and we're holding off schedule (rq->lock)
+ * - running, and we're holding off de-schedule (rq->lock)
+ *
+ * The called function (@func) can use: task_curr(), p->on_rq and
+ * p->__state to differentiate between these states.
+ */
+ ret = func(p, arg);
- if (rq)
__task_rq_unlock(rq, p, &rf);
+ } else {
+ ret = func(p, arg);
+ }
raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
return ret;
static inline void
prepare_lock_switch(struct rq *rq, struct task_struct *next, struct rq_flags *rf)
+ __releases(__rq_lockp(rq))
+ __acquires(__rq_lockp(this_rq()))
{
/*
* Since the runqueue lock will be released by the next
/* this is a valid case when another task releases the spinlock */
rq_lockp(rq)->owner = next;
#endif
+ /*
+ * Model the rq reference switcheroo.
+ */
+ __release(__rq_lockp(rq));
+ __acquire(__rq_lockp(this_rq()));
}
static inline void finish_lock_switch(struct rq *rq)
+ __releases(__rq_lockp(rq))
{
/*
* If we are tracking spinlock dependencies then we have to
static inline void
prepare_task_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next)
+ __must_hold(__rq_lockp(rq))
{
kcov_prepare_switch(prev);
sched_info_switch(rq, prev, next);
* because prev may have moved to another CPU.
*/
static struct rq *finish_task_switch(struct task_struct *prev)
- __releases(rq->lock)
+ __releases(__rq_lockp(this_rq()))
{
struct rq *rq = this_rq();
struct mm_struct *mm = rq->prev_mm;
* @prev: the thread we just switched away from.
*/
asmlinkage __visible void schedule_tail(struct task_struct *prev)
- __releases(rq->lock)
+ __releases(__rq_lockp(this_rq()))
{
/*
* New tasks start with FORK_PREEMPT_COUNT, see there and
static __always_inline struct rq *
context_switch(struct rq *rq, struct task_struct *prev,
struct task_struct *next, struct rq_flags *rf)
+ __releases(__rq_lockp(rq))
{
prepare_task_switch(rq, prev, next);
*/
static inline struct task_struct *
__pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ __must_hold(__rq_lockp(rq))
{
const struct sched_class *class;
struct task_struct *p;
static struct task_struct *
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ __must_hold(__rq_lockp(rq))
{
struct task_struct *next, *p, *max;
const struct cpumask *smt_mask;
}
static void sched_core_balance(struct rq *rq)
+ __must_hold(__rq_lockp(rq))
{
struct sched_domain *sd;
int cpu = cpu_of(rq);
static struct task_struct *
pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ __must_hold(__rq_lockp(rq))
{
return __pick_next_task(rq, prev, rf);
}
int cpu;
scoped_guard (raw_spinlock_irq, &p->pi_lock) {
+ /*
+ * We may change the underlying rq, but the locks held will
+ * appropriately be "transferred" when switching.
+ */
+ context_unsafe_alias(rq);
+
cpu = select_fallback_rq(rq->cpu, p);
rq_lock(rq, &rf);
* effective when the hotplug motion is down.
*/
static void balance_push(struct rq *rq)
+ __must_hold(__rq_lockp(rq))
{
struct task_struct *push_task = rq->curr;
}
static void task_numa_placement(struct task_struct *p)
+ __context_unsafe(/* conditional locking */)
{
int seq, nid, max_nid = NUMA_NO_NODE;
unsigned long max_faults = 0;
return cfs_rq->avg.load_avg;
}
-static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf);
+static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
+ __must_hold(__rq_lockp(this_rq));
static inline unsigned long task_util(struct task_struct *p)
{
* used to track this state.
*/
static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
+ __must_hold(&cfs_b->lock)
{
int throttled;
struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
+ __must_hold(__rq_lockp(rq))
{
struct sched_entity *se;
struct task_struct *p;
* > 0 - success, new (fair) tasks present
*/
static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
+ __must_hold(__rq_lockp(this_rq))
{
unsigned long next_balance = jiffies + HZ;
int this_cpu = this_rq->cpu;
return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
}
+static __always_inline struct rq *__this_rq(void)
+{
+ return this_cpu_ptr(&runqueues);
+}
+
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
-#define this_rq() this_cpu_ptr(&runqueues)
+#define this_rq() __this_rq()
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
#define raw_rq() raw_cpu_ptr(&runqueues)
}
static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
+ __returns_ctx_lock(rq_lockp(rq)) /* alias them */
{
if (rq->core_enabled)
return &rq->core->__lock;
}
static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
+ __returns_ctx_lock(rq_lockp(rq)) /* alias them */
{
return &rq->__lock;
}
#endif /* !CONFIG_RT_GROUP_SCHED */
static inline void lockdep_assert_rq_held(struct rq *rq)
+ __assumes_ctx_lock(__rq_lockp(rq))
{
lockdep_assert_held(__rq_lockp(rq));
}
-extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
-extern bool raw_spin_rq_trylock(struct rq *rq);
-extern void raw_spin_rq_unlock(struct rq *rq);
+extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
+ __acquires(__rq_lockp(rq));
+
+extern bool raw_spin_rq_trylock(struct rq *rq)
+ __cond_acquires(true, __rq_lockp(rq));
+
+extern void raw_spin_rq_unlock(struct rq *rq)
+ __releases(__rq_lockp(rq));
static inline void raw_spin_rq_lock(struct rq *rq)
+ __acquires(__rq_lockp(rq))
{
raw_spin_rq_lock_nested(rq, 0);
}
static inline void raw_spin_rq_lock_irq(struct rq *rq)
+ __acquires(__rq_lockp(rq))
{
local_irq_disable();
raw_spin_rq_lock(rq);
}
static inline void raw_spin_rq_unlock_irq(struct rq *rq)
+ __releases(__rq_lockp(rq))
{
raw_spin_rq_unlock(rq);
local_irq_enable();
}
static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
+ __acquires(__rq_lockp(rq))
{
unsigned long flags;
}
static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
+ __releases(__rq_lockp(rq))
{
raw_spin_rq_unlock(rq);
local_irq_restore(flags);
rq->clock_update_flags |= rf->clock_update_flags;
}
-extern
-struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
- __acquires(rq->lock);
+#define __task_rq_lock(...) __acquire_ret(___task_rq_lock(__VA_ARGS__), __rq_lockp(__ret))
+extern struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf) __acquires_ret;
-extern
-struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
- __acquires(p->pi_lock)
- __acquires(rq->lock);
+#define task_rq_lock(...) __acquire_ret(_task_rq_lock(__VA_ARGS__), __rq_lockp(__ret))
+extern struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+ __acquires(&p->pi_lock) __acquires_ret;
static inline void
__task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
- __releases(rq->lock)
+ __releases(__rq_lockp(rq))
{
rq_unpin_lock(rq, rf);
raw_spin_rq_unlock(rq);
static inline void
task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
- __releases(rq->lock)
- __releases(p->pi_lock)
+ __releases(__rq_lockp(rq), &p->pi_lock)
{
__task_rq_unlock(rq, p, rf);
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
_T->rq = task_rq_lock(_T->lock, &_T->rf),
task_rq_unlock(_T->rq, _T->lock, &_T->rf),
struct rq *rq; struct rq_flags rf)
+DECLARE_LOCK_GUARD_1_ATTRS(task_rq_lock, __acquires(_T->pi_lock), __releases((*(struct task_struct **)_T)->pi_lock))
+#define class_task_rq_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(task_rq_lock, _T)
DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct,
_T->rq = __task_rq_lock(_T->lock, &_T->rf),
struct rq *rq; struct rq_flags rf)
static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
- __acquires(rq->lock)
+ __acquires(__rq_lockp(rq))
{
raw_spin_rq_lock_irqsave(rq, rf->flags);
rq_pin_lock(rq, rf);
}
static inline void rq_lock_irq(struct rq *rq, struct rq_flags *rf)
- __acquires(rq->lock)
+ __acquires(__rq_lockp(rq))
{
raw_spin_rq_lock_irq(rq);
rq_pin_lock(rq, rf);
}
static inline void rq_lock(struct rq *rq, struct rq_flags *rf)
- __acquires(rq->lock)
+ __acquires(__rq_lockp(rq))
{
raw_spin_rq_lock(rq);
rq_pin_lock(rq, rf);
}
static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
- __releases(rq->lock)
+ __releases(__rq_lockp(rq))
{
rq_unpin_lock(rq, rf);
raw_spin_rq_unlock_irqrestore(rq, rf->flags);
}
static inline void rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
- __releases(rq->lock)
+ __releases(__rq_lockp(rq))
{
rq_unpin_lock(rq, rf);
raw_spin_rq_unlock_irq(rq);
}
static inline void rq_unlock(struct rq *rq, struct rq_flags *rf)
- __releases(rq->lock)
+ __releases(__rq_lockp(rq))
{
rq_unpin_lock(rq, rf);
raw_spin_rq_unlock(rq);
rq_unlock(_T->lock, &_T->rf),
struct rq_flags rf)
+DECLARE_LOCK_GUARD_1_ATTRS(rq_lock, __acquires(__rq_lockp(_T)), __releases(__rq_lockp(*(struct rq **)_T)));
+#define class_rq_lock_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rq_lock, _T)
+
DEFINE_LOCK_GUARD_1(rq_lock_irq, struct rq,
rq_lock_irq(_T->lock, &_T->rf),
rq_unlock_irq(_T->lock, &_T->rf),
struct rq_flags rf)
+DECLARE_LOCK_GUARD_1_ATTRS(rq_lock_irq, __acquires(__rq_lockp(_T)), __releases(__rq_lockp(*(struct rq **)_T)));
+#define class_rq_lock_irq_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rq_lock_irq, _T)
+
DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
rq_lock_irqsave(_T->lock, &_T->rf),
rq_unlock_irqrestore(_T->lock, &_T->rf),
struct rq_flags rf)
-static inline struct rq *this_rq_lock_irq(struct rq_flags *rf)
- __acquires(rq->lock)
+DECLARE_LOCK_GUARD_1_ATTRS(rq_lock_irqsave, __acquires(__rq_lockp(_T)), __releases(__rq_lockp(*(struct rq **)_T)));
+#define class_rq_lock_irqsave_constructor(_T) WITH_LOCK_GUARD_1_ATTRS(rq_lock_irqsave, _T)
+
+#define this_rq_lock_irq(...) __acquire_ret(_this_rq_lock_irq(__VA_ARGS__), __rq_lockp(__ret))
+static inline struct rq *_this_rq_lock_irq(struct rq_flags *rf) __acquires_ret
{
struct rq *rq;
#define DEFINE_LOCK_GUARD_2(name, type, _lock, _unlock, ...) \
__DEFINE_UNLOCK_GUARD(name, type, _unlock, type *lock2; __VA_ARGS__) \
static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \
+ __no_context_analysis \
{ class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \
_lock; return _t; }
+#define DECLARE_LOCK_GUARD_2_ATTRS(_name, _lock, _unlock1, _unlock2) \
+static inline class_##_name##_t class_##_name##_constructor(lock_##_name##_t *_T1, \
+ lock_##_name##_t *_T2) _lock; \
+static __always_inline void __class_##_name##_cleanup_ctx1(class_##_name##_t **_T1) \
+ __no_context_analysis _unlock1 { } \
+static __always_inline void __class_##_name##_cleanup_ctx2(class_##_name##_t **_T2) \
+ __no_context_analysis _unlock2 { }
+#define WITH_LOCK_GUARD_2_ATTRS(_name, _T1, _T2) \
+ class_##_name##_constructor(_T1, _T2), \
+ *__UNIQUE_ID(unlock1) __cleanup(__class_##_name##_cleanup_ctx1) = (void *)(_T1),\
+ *__UNIQUE_ID(unlock2) __cleanup(__class_##_name##_cleanup_ctx2) = (void *)(_T2)
static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
{
return rq1->cpu < rq2->cpu;
}
-extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
+extern void double_rq_lock(struct rq *rq1, struct rq *rq2)
+ __acquires(__rq_lockp(rq1), __rq_lockp(rq2));
#ifdef CONFIG_PREEMPTION
* also adds more overhead and therefore may reduce throughput.
*/
static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
- __releases(this_rq->lock)
- __acquires(busiest->lock)
- __acquires(this_rq->lock)
+ __must_hold(__rq_lockp(this_rq))
+ __acquires(__rq_lockp(busiest))
{
raw_spin_rq_unlock(this_rq);
double_rq_lock(this_rq, busiest);
* regardless of entry order into the function.
*/
static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
- __releases(this_rq->lock)
- __acquires(busiest->lock)
- __acquires(this_rq->lock)
+ __must_hold(__rq_lockp(this_rq))
+ __acquires(__rq_lockp(busiest))
{
- if (__rq_lockp(this_rq) == __rq_lockp(busiest) ||
- likely(raw_spin_rq_trylock(busiest))) {
+ if (__rq_lockp(this_rq) == __rq_lockp(busiest)) {
+ __acquire(__rq_lockp(busiest)); /* already held */
+ double_rq_clock_clear_update(this_rq, busiest);
+ return 0;
+ }
+
+ if (likely(raw_spin_rq_trylock(busiest))) {
double_rq_clock_clear_update(this_rq, busiest);
return 0;
}
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
*/
static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
+ __must_hold(__rq_lockp(this_rq))
+ __acquires(__rq_lockp(busiest))
{
lockdep_assert_irqs_disabled();
}
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
- __releases(busiest->lock)
+ __releases(__rq_lockp(busiest))
{
if (__rq_lockp(this_rq) != __rq_lockp(busiest))
raw_spin_rq_unlock(busiest);
+ else
+ __release(__rq_lockp(busiest)); /* fake release */
lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
}
static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
+ __acquires(l1, l2)
{
if (l1 > l2)
swap(l1, l2);
}
static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
+ __acquires(l1, l2)
{
if (l1 > l2)
swap(l1, l2);
}
static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
+ __acquires(l1, l2)
{
if (l1 > l2)
swap(l1, l2);
}
static inline void double_raw_unlock(raw_spinlock_t *l1, raw_spinlock_t *l2)
+ __releases(l1, l2)
{
raw_spin_unlock(l1);
raw_spin_unlock(l2);
double_raw_lock(_T->lock, _T->lock2),
double_raw_unlock(_T->lock, _T->lock2))
+DECLARE_LOCK_GUARD_2_ATTRS(double_raw_spinlock,
+ __acquires(_T1, _T2),
+ __releases(*(raw_spinlock_t **)_T1),
+ __releases(*(raw_spinlock_t **)_T2));
+#define class_double_raw_spinlock_constructor(_T1, _T2) \
+ WITH_LOCK_GUARD_2_ATTRS(double_raw_spinlock, _T1, _T2)
+
/*
* double_rq_unlock - safely unlock two runqueues
*
* you need to do so manually after calling.
*/
static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
- __releases(rq1->lock)
- __releases(rq2->lock)
+ __releases(__rq_lockp(rq1), __rq_lockp(rq2))
{
if (__rq_lockp(rq1) != __rq_lockp(rq2))
raw_spin_rq_unlock(rq2);
else
- __release(rq2->lock);
+ __release(__rq_lockp(rq2)); /* fake release */
raw_spin_rq_unlock(rq1);
}
src:*include/linux/rhashtable.h=emit
src:*include/linux/rwlock*.h=emit
src:*include/linux/rwsem.h=emit
+src:*include/linux/sched*=emit
src:*include/linux/seqlock*.h=emit
src:*include/linux/spinlock*.h=emit
src:*include/linux/srcu*.h=emit