bool affinity_broken;
desc = irq_to_desc(irq);
- scoped_guard(raw_spinlock, &desc->lock)
+ scoped_guard(raw_spinlock, &desc->lock) {
affinity_broken = migrate_one_irq(desc);
-
+ if (affinity_broken && desc->affinity_notify)
+ irq_affinity_schedule_notify_work(desc);
+ }
if (affinity_broken) {
pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
irq, smp_processor_id());
extern int irq_do_set_affinity(struct irq_data *data,
const struct cpumask *dest, bool force);
+extern void irq_affinity_schedule_notify_work(struct irq_desc *desc);
#ifdef CONFIG_SMP
extern int irq_setup_affinity(struct irq_desc *desc);
static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; }
#endif
-
#define for_each_action_of_desc(desc, act) \
for (act = desc->action; act; act = act->next)
return true;
}
+/**
+ * irq_affinity_schedule_notify_work - Schedule work to notify about affinity change
+ * @desc: Interrupt descriptor whose affinity changed
+ */
+void irq_affinity_schedule_notify_work(struct irq_desc *desc)
+{
+ lockdep_assert_held(&desc->lock);
+
+ kref_get(&desc->affinity_notify->kref);
+ if (!schedule_work(&desc->affinity_notify->work)) {
+ /* Work was already scheduled, drop our extra ref */
+ kref_put(&desc->affinity_notify->kref, desc->affinity_notify->release);
+ }
+}
+
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
bool force)
{
irq_copy_pending(desc, mask);
}
- if (desc->affinity_notify) {
- kref_get(&desc->affinity_notify->kref);
- if (!schedule_work(&desc->affinity_notify->work)) {
- /* Work was already scheduled, drop our extra ref */
- kref_put(&desc->affinity_notify->kref,
- desc->affinity_notify->release);
- }
- }
+ if (desc->affinity_notify)
+ irq_affinity_schedule_notify_work(desc);
+
irqd_set(data, IRQD_AFFINITY_SET);
return ret;