]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
genirq/cpuhotplug: Notify about affinity changes breaking the affinity mask
authorImran Khan <imran.f.khan@oracle.com>
Tue, 13 Jan 2026 14:37:27 +0000 (22:37 +0800)
committerThomas Gleixner <tglx@kernel.org>
Tue, 13 Jan 2026 20:18:16 +0000 (21:18 +0100)
During CPU offlining the interrupts affined to that CPU are moved to other
online CPUs, which might break the original affinity mask if the outgoing
CPU was the last online CPU in that mask. This change is not propagated to
irq_desc::affinity_notify(), which leaves users of the affinity notifier
mechanism with stale information.

Avoid this by scheduling affinity change notification work for interrupts
that were affined to the CPU being offlined, if the new target CPU is not
part of the original affinity mask.

Since irq_set_affinity_locked() uses the same logic to schedule affinity
change notification work, split out this logic into a dedicated function
and use that at both places.

[ tglx: Removed the EXPORT(), removed the !SMP stub, moved the prototype,
   added a lockdep assert instead of a comment, fixed up coding style
   and name space. Polished and clarified the change log ]

Signed-off-by: Imran Khan <imran.f.khan@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@kernel.org>
Link: https://patch.msgid.link/20260113143727.1041265-1-imran.f.khan@oracle.com
kernel/irq/cpuhotplug.c
kernel/irq/internals.h
kernel/irq/manage.c

index 755346ea98196747f690aba2f1a39af9b0b9a16f..cd5689e383b003270bf68213275f762e4cc165e3 100644 (file)
@@ -177,9 +177,11 @@ void irq_migrate_all_off_this_cpu(void)
                bool affinity_broken;
 
                desc = irq_to_desc(irq);
-               scoped_guard(raw_spinlock, &desc->lock)
+               scoped_guard(raw_spinlock, &desc->lock) {
                        affinity_broken = migrate_one_irq(desc);
-
+                       if (affinity_broken && desc->affinity_notify)
+                               irq_affinity_schedule_notify_work(desc);
+               }
                if (affinity_broken) {
                        pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
                                            irq, smp_processor_id());
index 202c50f0fcb2679dd5cdea3a837098b647bb8861..9412e57056f5cb8312ff54690a738c137dca88ee 100644 (file)
@@ -135,6 +135,7 @@ extern bool irq_can_set_affinity_usr(unsigned int irq);
 
 extern int irq_do_set_affinity(struct irq_data *data,
                               const struct cpumask *dest, bool force);
+extern void irq_affinity_schedule_notify_work(struct irq_desc *desc);
 
 #ifdef CONFIG_SMP
 extern int irq_setup_affinity(struct irq_desc *desc);
@@ -142,7 +143,6 @@ extern int irq_setup_affinity(struct irq_desc *desc);
 static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; }
 #endif
 
-
 #define for_each_action_of_desc(desc, act)                     \
        for (act = desc->action; act; act = act->next)
 
index dde1aa62ffe80d0d66a5b371b78d100f172160f7..9927e0893be6a14d724daeba04aab8c939467b1a 100644 (file)
@@ -347,6 +347,21 @@ static bool irq_set_affinity_deactivated(struct irq_data *data,
        return true;
 }
 
+/**
+ * irq_affinity_schedule_notify_work - Schedule work to notify about affinity change
+ * @desc:  Interrupt descriptor whose affinity changed
+ */
+void irq_affinity_schedule_notify_work(struct irq_desc *desc)
+{
+       lockdep_assert_held(&desc->lock);
+
+       kref_get(&desc->affinity_notify->kref);
+       if (!schedule_work(&desc->affinity_notify->work)) {
+               /* Work was already scheduled, drop our extra ref */
+               kref_put(&desc->affinity_notify->kref, desc->affinity_notify->release);
+       }
+}
+
 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
                            bool force)
 {
@@ -367,14 +382,9 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
                irq_copy_pending(desc, mask);
        }
 
-       if (desc->affinity_notify) {
-               kref_get(&desc->affinity_notify->kref);
-               if (!schedule_work(&desc->affinity_notify->work)) {
-                       /* Work was already scheduled, drop our extra ref */
-                       kref_put(&desc->affinity_notify->kref,
-                                desc->affinity_notify->release);
-               }
-       }
+       if (desc->affinity_notify)
+               irq_affinity_schedule_notify_work(desc);
+
        irqd_set(data, IRQD_AFFINITY_SET);
 
        return ret;