}
pick_again:
+ assert_balance_callbacks_empty(rq);
next = pick_next_task(rq, rq->donor, &rf);
rq->next_class = next->sched_class;
if (sched_proxy_exec()) {
static inline void scx_rq_clock_invalidate(struct rq *rq) {}
#endif /* !CONFIG_SCHED_CLASS_EXT */
+static inline void assert_balance_callbacks_empty(struct rq *rq)
+{
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_LOCKING) &&
+ rq->balance_callback &&
+ rq->balance_callback != &balance_push_callback);
+}
+
/*
* Lockdep annotation that avoids accidental unlocks; it's like a
* sticky/continuous lockdep_assert_held().
rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
rf->clock_update_flags = 0;
- WARN_ON_ONCE(rq->balance_callback && rq->balance_callback != &balance_push_callback);
+ assert_balance_callbacks_empty(rq);
}
static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)