.lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
};
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key bh_lock_key;
+struct lockdep_map bh_lock_map = {
+ .name = "local_bh",
+ .key = &bh_lock_key,
+ .wait_type_outer = LD_WAIT_FREE,
+ .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
+ .lock_type = LD_LOCK_PERCPU,
+};
+EXPORT_SYMBOL_GPL(bh_lock_map);
+#endif
+
/**
* local_bh_blocked() - Check for idle whether BH processing is blocked
*
WARN_ON_ONCE(in_hardirq());
+ lock_map_acquire_read(&bh_lock_map);
+
/* First entry of a task into a BH disabled section? */
if (!current->softirq_disable_cnt) {
if (preemptible()) {
WARN_ON_ONCE(in_hardirq());
lockdep_assert_irqs_enabled();
+ lock_map_release(&bh_lock_map);
+
local_irq_save(flags);
curcnt = __this_cpu_read(softirq_ctrl.cnt);
/* Counterpart to ksoftirqd_run_begin() */
static inline void ksoftirqd_run_end(void)
{
+ /* pairs with the lock_map_acquire_read() in ksoftirqd_run_begin() */
+ lock_map_release(&bh_lock_map);
__local_bh_enable(SOFTIRQ_OFFSET, true);
WARN_ON_ONCE(in_interrupt());
local_irq_enable();