hpfs-don-t-truncate-the-file-when-delete-fails.patch
x86-mpx-fix-off-by-one-comparison-with-nr_registers.patch
x86-entry-compat-add-missing-clac-to-entry_int80_32.patch
+x86-irq-call-chip-irq_set_affinity-in-proper-context.patch
+x86-irq-fix-a-race-in-x86_vector_free_irqs.patch
+x86-irq-validate-that-irq-descriptor-is-still-active.patch
+x86-irq-do-not-use-apic_chip_data.old_domain-as-temporary-buffer.patch
+x86-irq-reorganize-the-return-path-in-assign_irq_vector.patch
+x86-irq-reorganize-the-search-in-assign_irq_vector.patch
+x86-irq-check-vector-allocation-early.patch
+x86-irq-copy-vectormask-instead-of-an-and-operation.patch
+x86-irq-get-rid-of-code-duplication.patch
+x86-irq-remove-offline-cpus-from-vector-cleanup.patch
+x86-irq-clear-move_in_progress-before-sending-cleanup-ipi.patch
+x86-irq-remove-the-cpumask-allocation-from-send_cleanup_vector.patch
+x86-irq-remove-outgoing-cpu-from-vector-cleanup-mask.patch
+x86-irq-call-irq_force_move_complete-with-irq-descriptor.patch
+x86-irq-plug-vector-cleanup-race.patch
--- /dev/null
+From e23b257c293ce4bcc8cabb2aa3097b6ed8a8261a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Jan 2016 08:43:38 +0100
+Subject: x86/irq: Call chip->irq_set_affinity in proper context
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit e23b257c293ce4bcc8cabb2aa3097b6ed8a8261a upstream.
+
+setup_ioapic_dest() calls irqchip->irq_set_affinity() completely
+unprotected. That's wrong in several aspects:
+
+ - it opens a race window where irq_set_affinity() can be interrupted and the
+ irq chip left in unconsistent state.
+
+ - it triggers a lockdep splat when we fix the vector race for 4.3+ because
+ vector lock is taken with interrupts enabled.
+
+The proper calling convention is irq descriptor lock held and interrupts
+disabled.
+
+Reported-and-tested-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Cc: Joe Lawrence <joe.lawrence@stratus.com>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.11.1601140919420.3575@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/io_apic.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
+ {
+ int pin, ioapic, irq, irq_entry;
+ const struct cpumask *mask;
++ struct irq_desc *desc;
+ struct irq_data *idata;
+ struct irq_chip *chip;
+
+@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
+ if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
+ continue;
+
+- idata = irq_get_irq_data(irq);
++ desc = irq_to_desc(irq);
++ raw_spin_lock_irq(&desc->lock);
++ idata = irq_desc_get_irq_data(desc);
+
+ /*
+ * Honour affinities which have been set in early boot
+@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
+ /* Might be lapic_chip for irq 0 */
+ if (chip->irq_set_affinity)
+ chip->irq_set_affinity(idata, mask, false);
++ raw_spin_unlock_irq(&desc->lock);
+ }
+ }
+ #endif
--- /dev/null
+From 90a2282e23f0522e4b3f797ad447c5e91bf7fe32 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:53 +0000
+Subject: x86/irq: Call irq_force_move_complete with irq descriptor
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 90a2282e23f0522e4b3f797ad447c5e91bf7fe32 upstream.
+
+First of all there is no point in looking up the irq descriptor again, but we
+also need the descriptor for the final cleanup race fix in the next
+patch. Make that change seperate. No functional difference.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160107.125211743@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/irq.h | 5 +++--
+ arch/x86/kernel/apic/vector.c | 11 +++++++----
+ arch/x86/kernel/irq.c | 2 +-
+ 3 files changed, 11 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/include/asm/irq.h
++++ b/arch/x86/include/asm/irq.h
+@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
+
+ #define __ARCH_HAS_DO_SOFTIRQ
+
++struct irq_desc;
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ #include <linux/cpumask.h>
+ extern int check_irq_vectors_for_cpu_disable(void);
+ extern void fixup_irqs(void);
+-extern void irq_force_complete_move(int);
++extern void irq_force_complete_move(struct irq_desc *desc);
+ #endif
+
+ #ifdef CONFIG_HAVE_KVM
+@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_h
+ extern void (*x86_platform_ipi_callback)(void);
+ extern void native_init_IRQ(void);
+
+-struct irq_desc;
+ extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
+
+ extern __visible unsigned int do_IRQ(struct pt_regs *regs);
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -628,10 +628,14 @@ void irq_complete_move(struct irq_cfg *c
+ __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
+ }
+
+-void irq_force_complete_move(int irq)
++/*
++ * Called with @desc->lock held and interrupts disabled.
++ */
++void irq_force_complete_move(struct irq_desc *desc)
+ {
+- struct irq_cfg *cfg = irq_cfg(irq);
+- struct apic_chip_data *data;
++ struct irq_data *irqdata = irq_desc_get_irq_data(desc);
++ struct apic_chip_data *data = apic_chip_data(irqdata);
++ struct irq_cfg *cfg = data ? &data->cfg : NULL;
+
+ if (!cfg)
+ return;
+@@ -645,7 +649,6 @@ void irq_force_complete_move(int irq)
+ * the way out.
+ */
+ raw_spin_lock(&vector_lock);
+- data = container_of(cfg, struct apic_chip_data, cfg);
+ cpumask_clear_cpu(smp_processor_id(), data->old_domain);
+ raw_spin_unlock(&vector_lock);
+ }
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -462,7 +462,7 @@ void fixup_irqs(void)
+ * non intr-remapping case, we can't wait till this interrupt
+ * arrives at this cpu before completing the irq move.
+ */
+- irq_force_complete_move(irq);
++ irq_force_complete_move(desc);
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ break_affinity = 1;
--- /dev/null
+From 3716fd27a604d61a91cda47083504971486b80f1 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:48 +0000
+Subject: x86/irq: Check vector allocation early
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 3716fd27a604d61a91cda47083504971486b80f1 upstream.
+
+__assign_irq_vector() uses the vector_cpumask which is assigned by
+apic->vector_allocation_domain() without doing basic sanity checks. That can
+result in a situation where the final assignement of a newly found vector
+fails in apic->cpu_mask_to_apicid_and(). So we have to do rollbacks for no
+reason.
+
+apic->cpu_mask_to_apicid_and() only fails if
+
+ vector_cpumask & requested_cpumask & cpu_online_mask
+
+is empty.
+
+Check for this condition right away and if the result is empty try immediately
+the next possible cpu in the requested mask. So in case of a failure the old
+setting is unchanged and we can remove the rollback code.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160106.561877324@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 38 +++++++++++++++++++++++++-------------
+ 1 file changed, 25 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -30,7 +30,7 @@ struct apic_chip_data {
+
+ struct irq_domain *x86_vector_domain;
+ static DEFINE_RAW_SPINLOCK(vector_lock);
+-static cpumask_var_t vector_cpumask, searched_cpumask;
++static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
+ static struct irq_chip lapic_controller;
+ #ifdef CONFIG_X86_IO_APIC
+ static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
+@@ -128,8 +128,20 @@ static int __assign_irq_vector(int irq,
+ while (cpu < nr_cpu_ids) {
+ int new_cpu, vector, offset;
+
++ /* Get the possible target cpus for @mask/@cpu from the apic */
+ apic->vector_allocation_domain(cpu, vector_cpumask, mask);
+
++ /*
++ * Clear the offline cpus from @vector_cpumask for searching
++ * and verify whether the result overlaps with @mask. If true,
++ * then the call to apic->cpu_mask_to_apicid_and() will
++ * succeed as well. If not, no point in trying to find a
++ * vector in this mask.
++ */
++ cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
++ if (!cpumask_intersects(vector_searchmask, mask))
++ goto next_cpu;
++
+ if (cpumask_subset(vector_cpumask, d->domain)) {
+ if (cpumask_equal(vector_cpumask, d->domain))
+ goto success;
+@@ -162,7 +174,7 @@ next:
+ if (test_bit(vector, used_vectors))
+ goto next;
+
+- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
++ for_each_cpu(new_cpu, vector_searchmask) {
+ if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
+ goto next;
+ }
+@@ -174,7 +186,7 @@ next:
+ d->move_in_progress =
+ cpumask_intersects(d->old_domain, cpu_online_mask);
+ }
+- for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
++ for_each_cpu(new_cpu, vector_searchmask)
+ per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
+ d->cfg.vector = vector;
+ cpumask_copy(d->domain, vector_cpumask);
+@@ -196,8 +208,14 @@ next_cpu:
+ return -ENOSPC;
+
+ success:
+- /* cache destination APIC IDs into cfg->dest_apicid */
+- return apic->cpu_mask_to_apicid_and(mask, d->domain, &d->cfg.dest_apicid);
++ /*
++ * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
++ * as we already established, that mask & d->domain & cpu_online_mask
++ * is not empty.
++ */
++ BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
++ &d->cfg.dest_apicid));
++ return 0;
+ }
+
+ static int assign_irq_vector(int irq, struct apic_chip_data *data,
+@@ -407,6 +425,7 @@ int __init arch_early_irq_init(void)
+ arch_init_htirq_domain(x86_vector_domain);
+
+ BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
++ BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
+ BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
+
+ return arch_early_ioapic_init();
+@@ -496,14 +515,7 @@ static int apic_set_affinity(struct irq_
+ return -EINVAL;
+
+ err = assign_irq_vector(irq, data, dest);
+- if (err) {
+- if (assign_irq_vector(irq, data,
+- irq_data_get_affinity_mask(irq_data)))
+- pr_err("Failed to recover vector for irq %d\n", irq);
+- return err;
+- }
+-
+- return IRQ_SET_MASK_OK;
++ return err ? err : IRQ_SET_MASK_OK;
+ }
+
+ static struct irq_chip lapic_controller = {
--- /dev/null
+From c1684f5035b60e9f98566493e869496fb5de1d89 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:51 +0000
+Subject: x86/irq: Clear move_in_progress before sending cleanup IPI
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit c1684f5035b60e9f98566493e869496fb5de1d89 upstream.
+
+send_cleanup_vector() fiddles with the old_domain mask unprotected because it
+relies on the protection by the move_in_progress flag. But this is fatal, as
+the flag is reset after the IPI has been sent. So a cpu which receives the IPI
+can still see the flag set and therefor ignores the cleanup request. If no
+other cleanup request happens then the vector stays stale on that cpu and in
+case of an irq removal the vector still persists. That can lead to use after
+free when the next cleanup IPI happens.
+
+Protect the code with vector_lock and clear move_in_progress before sending
+the IPI.
+
+This does not plug the race which Joe reported because:
+
+CPU0 CPU1 CPU2
+lock_vector()
+data->move_in_progress=0
+sendIPI()
+unlock_vector()
+ set_affinity()
+ assign_irq_vector()
+ lock_vector() handle_IPI
+ move_in_progress = 1 lock_vector()
+ unlock_vector()
+ move_in_progress == 1
+
+The full fix comes with a later patch.
+
+Reported-and-tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160106.892412198@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -530,6 +530,8 @@ static void __send_cleanup_vector(struct
+ {
+ cpumask_var_t cleanup_mask;
+
++ raw_spin_lock(&vector_lock);
++ data->move_in_progress = 0;
+ if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
+ unsigned int i;
+
+@@ -541,7 +543,7 @@ static void __send_cleanup_vector(struct
+ apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+ free_cpumask_var(cleanup_mask);
+ }
+- data->move_in_progress = 0;
++ raw_spin_unlock(&vector_lock);
+ }
+
+ void send_cleanup_vector(struct irq_cfg *cfg)
--- /dev/null
+From 9ac15b7a8af4cf3337a101498c0ed690d23ade75 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:49 +0000
+Subject: x86/irq: Copy vectormask instead of an AND operation
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 9ac15b7a8af4cf3337a101498c0ed690d23ade75 upstream.
+
+In the case that the new vector mask is a subset of the existing mask there is
+no point to do a AND operation of currentmask & newmask. The result is
+newmask. So we can simply copy the new mask to the current mask and be done
+with it. Preparatory patch for further consolidation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160106.640253454@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -154,7 +154,7 @@ static int __assign_irq_vector(int irq,
+ vector_cpumask);
+ d->move_in_progress =
+ cpumask_intersects(d->old_domain, cpu_online_mask);
+- cpumask_and(d->domain, d->domain, vector_cpumask);
++ cpumask_copy(d->domain, vector_cpumask);
+ goto success;
+ }
+
--- /dev/null
+From 8a580f70f6936ec095da217018cdeeb5835c0207 Mon Sep 17 00:00:00 2001
+From: Jiang Liu <jiang.liu@linux.intel.com>
+Date: Thu, 31 Dec 2015 16:30:46 +0000
+Subject: x86/irq: Do not use apic_chip_data.old_domain as temporary buffer
+
+From: Jiang Liu <jiang.liu@linux.intel.com>
+
+commit 8a580f70f6936ec095da217018cdeeb5835c0207 upstream.
+
+Function __assign_irq_vector() makes use of apic_chip_data.old_domain as a
+temporary buffer, which is in the way of using apic_chip_data.old_domain for
+synchronizing the vector cleanup with the vector assignement code.
+
+Use a proper temporary cpumask for this.
+
+[ tglx: Renamed the mask to searched_cpumask for clarity ]
+
+Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/1450880014-11741-1-git-send-email-jiang.liu@linux.intel.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -30,7 +30,7 @@ struct apic_chip_data {
+
+ struct irq_domain *x86_vector_domain;
+ static DEFINE_RAW_SPINLOCK(vector_lock);
+-static cpumask_var_t vector_cpumask;
++static cpumask_var_t vector_cpumask, searched_cpumask;
+ static struct irq_chip lapic_controller;
+ #ifdef CONFIG_X86_IO_APIC
+ static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
+@@ -124,6 +124,7 @@ static int __assign_irq_vector(int irq,
+ /* Only try and allocate irqs on cpus that are present */
+ err = -ENOSPC;
+ cpumask_clear(d->old_domain);
++ cpumask_clear(searched_cpumask);
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ while (cpu < nr_cpu_ids) {
+ int new_cpu, vector, offset;
+@@ -157,9 +158,9 @@ next:
+ }
+
+ if (unlikely(current_vector == vector)) {
+- cpumask_or(d->old_domain, d->old_domain,
++ cpumask_or(searched_cpumask, searched_cpumask,
+ vector_cpumask);
+- cpumask_andnot(vector_cpumask, mask, d->old_domain);
++ cpumask_andnot(vector_cpumask, mask, searched_cpumask);
+ cpu = cpumask_first_and(vector_cpumask,
+ cpu_online_mask);
+ continue;
+@@ -404,6 +405,7 @@ int __init arch_early_irq_init(void)
+ arch_init_htirq_domain(x86_vector_domain);
+
+ BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
++ BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
+
+ return arch_early_ioapic_init();
+ }
--- /dev/null
+From 111abeba67e0dbdc26537429de9155e4f1d807d8 Mon Sep 17 00:00:00 2001
+From: Jiang Liu <jiang.liu@linux.intel.com>
+Date: Thu, 31 Dec 2015 16:30:44 +0000
+Subject: x86/irq: Fix a race in x86_vector_free_irqs()
+
+From: Jiang Liu <jiang.liu@linux.intel.com>
+
+commit 111abeba67e0dbdc26537429de9155e4f1d807d8 upstream.
+
+There's a race condition between
+
+x86_vector_free_irqs()
+{
+ free_apic_chip_data(irq_data->chip_data);
+ xxxxx //irq_data->chip_data has been freed, but the pointer
+ //hasn't been reset yet
+ irq_domain_reset_irq_data(irq_data);
+}
+
+and
+
+smp_irq_move_cleanup_interrupt()
+{
+ raw_spin_lock(&vector_lock);
+ data = apic_chip_data(irq_desc_get_irq_data(desc));
+ access data->xxxx // may access freed memory
+ raw_spin_unlock(&desc->lock);
+}
+
+which may cause smp_irq_move_cleanup_interrupt() to access freed memory.
+
+Call irq_domain_reset_irq_data(), which clears the pointer with vector lock
+held.
+
+[ tglx: Free memory outside of lock held region. ]
+
+Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/1450880014-11741-3-git-send-email-jiang.liu@linux.intel.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -224,10 +224,8 @@ static int assign_irq_vector_policy(int
+ static void clear_irq_vector(int irq, struct apic_chip_data *data)
+ {
+ struct irq_desc *desc;
+- unsigned long flags;
+ int cpu, vector;
+
+- raw_spin_lock_irqsave(&vector_lock, flags);
+ BUG_ON(!data->cfg.vector);
+
+ vector = data->cfg.vector;
+@@ -237,10 +235,8 @@ static void clear_irq_vector(int irq, st
+ data->cfg.vector = 0;
+ cpumask_clear(data->domain);
+
+- if (likely(!data->move_in_progress)) {
+- raw_spin_unlock_irqrestore(&vector_lock, flags);
++ if (likely(!data->move_in_progress))
+ return;
+- }
+
+ desc = irq_to_desc(irq);
+ for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
+@@ -253,7 +249,6 @@ static void clear_irq_vector(int irq, st
+ }
+ }
+ data->move_in_progress = 0;
+- raw_spin_unlock_irqrestore(&vector_lock, flags);
+ }
+
+ void init_irq_alloc_info(struct irq_alloc_info *info,
+@@ -274,19 +269,24 @@ void copy_irq_alloc_info(struct irq_allo
+ static void x86_vector_free_irqs(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+ {
++ struct apic_chip_data *apic_data;
+ struct irq_data *irq_data;
++ unsigned long flags;
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
+ if (irq_data && irq_data->chip_data) {
++ raw_spin_lock_irqsave(&vector_lock, flags);
+ clear_irq_vector(virq + i, irq_data->chip_data);
+- free_apic_chip_data(irq_data->chip_data);
++ apic_data = irq_data->chip_data;
++ irq_domain_reset_irq_data(irq_data);
++ raw_spin_unlock_irqrestore(&vector_lock, flags);
++ free_apic_chip_data(apic_data);
+ #ifdef CONFIG_X86_IO_APIC
+ if (virq + i < nr_legacy_irqs())
+ legacy_irq_data[virq + i] = NULL;
+ #endif
+- irq_domain_reset_irq_data(irq_data);
+ }
+ }
+ }
--- /dev/null
+From ab25ac02148b600e645f77cfb8b8ea415ed75bb4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:49 +0000
+Subject: x86/irq: Get rid of code duplication
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit ab25ac02148b600e645f77cfb8b8ea415ed75bb4 upstream.
+
+Reusing an existing vector and assigning a new vector has duplicated
+code. Consolidate it.
+
+This is also a preparatory patch for finally plugging the cleanup race.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160106.721599216@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 33 +++++++++++++++------------------
+ 1 file changed, 15 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -116,7 +116,7 @@ static int __assign_irq_vector(int irq,
+ */
+ static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
+ static int current_offset = VECTOR_OFFSET_START % 16;
+- int cpu;
++ int cpu, vector;
+
+ if (d->move_in_progress)
+ return -EBUSY;
+@@ -126,7 +126,7 @@ static int __assign_irq_vector(int irq,
+ cpumask_clear(searched_cpumask);
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ while (cpu < nr_cpu_ids) {
+- int new_cpu, vector, offset;
++ int new_cpu, offset;
+
+ /* Get the possible target cpus for @mask/@cpu from the apic */
+ apic->vector_allocation_domain(cpu, vector_cpumask, mask);
+@@ -146,16 +146,12 @@ static int __assign_irq_vector(int irq,
+ if (cpumask_equal(vector_cpumask, d->domain))
+ goto success;
+ /*
+- * New cpumask using the vector is a proper subset of
+- * the current in use mask. So cleanup the vector
+- * allocation for the members that are not used anymore.
++ * Mark the cpus which are not longer in the mask for
++ * cleanup.
+ */
+- cpumask_andnot(d->old_domain, d->domain,
+- vector_cpumask);
+- d->move_in_progress =
+- cpumask_intersects(d->old_domain, cpu_online_mask);
+- cpumask_copy(d->domain, vector_cpumask);
+- goto success;
++ cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
++ vector = d->cfg.vector;
++ goto update;
+ }
+
+ vector = current_vector;
+@@ -181,16 +177,12 @@ next:
+ /* Found one! */
+ current_vector = vector;
+ current_offset = offset;
+- if (d->cfg.vector) {
++ /* Schedule the old vector for cleanup on all cpus */
++ if (d->cfg.vector)
+ cpumask_copy(d->old_domain, d->domain);
+- d->move_in_progress =
+- cpumask_intersects(d->old_domain, cpu_online_mask);
+- }
+ for_each_cpu(new_cpu, vector_searchmask)
+ per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
+- d->cfg.vector = vector;
+- cpumask_copy(d->domain, vector_cpumask);
+- goto success;
++ goto update;
+
+ next_cpu:
+ /*
+@@ -207,6 +199,11 @@ next_cpu:
+ }
+ return -ENOSPC;
+
++update:
++ /* Cleanup required ? */
++ d->move_in_progress = cpumask_intersects(d->old_domain, cpu_online_mask);
++ d->cfg.vector = vector;
++ cpumask_copy(d->domain, vector_cpumask);
+ success:
+ /*
+ * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
--- /dev/null
+From 98229aa36caa9c769b13565523de9b813013c703 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:54 +0000
+Subject: x86/irq: Plug vector cleanup race
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 98229aa36caa9c769b13565523de9b813013c703 upstream.
+
+We still can end up with a stale vector due to the following:
+
+CPU0 CPU1 CPU2
+lock_vector()
+data->move_in_progress=0
+sendIPI()
+unlock_vector()
+ set_affinity()
+ assign_irq_vector()
+ lock_vector() handle_IPI
+ move_in_progress = 1 lock_vector()
+ unlock_vector()
+ move_in_progress == 1
+
+So we need to serialize the vector assignment against a pending cleanup. The
+solution is rather simple now. We not only check for the move_in_progress flag
+in assign_irq_vector(), we also check whether there is still a cleanup pending
+in the old_domain cpumask. If so, we return -EBUSY to the caller and let him
+deal with it. Though we have to be careful in the cpu unplug case. If the
+cleanout has not yet completed then the following setaffinity() call would
+return -EBUSY. Add code which prevents this.
+
+Full context is here: http://lkml.kernel.org/r/5653B688.4050809@stratus.com
+
+Reported-and-tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160107.207265407@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 63 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 53 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -118,7 +118,12 @@ static int __assign_irq_vector(int irq,
+ static int current_offset = VECTOR_OFFSET_START % 16;
+ int cpu, vector;
+
+- if (d->move_in_progress)
++ /*
++ * If there is still a move in progress or the previous move has not
++ * been cleaned up completely, tell the caller to come back later.
++ */
++ if (d->move_in_progress ||
++ cpumask_intersects(d->old_domain, cpu_online_mask))
+ return -EBUSY;
+
+ /* Only try and allocate irqs on cpus that are present */
+@@ -257,7 +262,12 @@ static void clear_irq_vector(int irq, st
+ data->cfg.vector = 0;
+ cpumask_clear(data->domain);
+
+- if (likely(!data->move_in_progress))
++ /*
++ * If move is in progress or the old_domain mask is not empty,
++ * i.e. the cleanup IPI has not been processed yet, we need to remove
++ * the old references to desc from all cpus vector tables.
++ */
++ if (!data->move_in_progress && cpumask_empty(data->old_domain))
+ return;
+
+ desc = irq_to_desc(irq);
+@@ -577,12 +587,25 @@ asmlinkage __visible void smp_irq_move_c
+ goto unlock;
+
+ /*
+- * Check if the irq migration is in progress. If so, we
+- * haven't received the cleanup request yet for this irq.
++ * Nothing to cleanup if irq migration is in progress
++ * or this cpu is not set in the cleanup mask.
+ */
+- if (data->move_in_progress)
++ if (data->move_in_progress ||
++ !cpumask_test_cpu(me, data->old_domain))
+ goto unlock;
+
++ /*
++ * We have two cases to handle here:
++ * 1) vector is unchanged but the target mask got reduced
++ * 2) vector and the target mask has changed
++ *
++ * #1 is obvious, but in #2 we have two vectors with the same
++ * irq descriptor: the old and the new vector. So we need to
++ * make sure that we only cleanup the old vector. The new
++ * vector has the current @vector number in the config and
++ * this cpu is part of the target mask. We better leave that
++ * one alone.
++ */
+ if (vector == data->cfg.vector &&
+ cpumask_test_cpu(me, data->domain))
+ goto unlock;
+@@ -600,6 +623,7 @@ asmlinkage __visible void smp_irq_move_c
+ goto unlock;
+ }
+ __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
++ cpumask_clear_cpu(me, data->old_domain);
+ unlock:
+ raw_spin_unlock(&desc->lock);
+ }
+@@ -643,13 +667,32 @@ void irq_force_complete_move(struct irq_
+ __irq_complete_move(cfg, cfg->vector);
+
+ /*
+- * Remove this cpu from the cleanup mask. The IPI might have been sent
+- * just before the cpu was removed from the offline mask, but has not
+- * been processed because the CPU has interrupts disabled and is on
+- * the way out.
++ * This is tricky. If the cleanup of @data->old_domain has not been
++ * done yet, then the following setaffinity call will fail with
++ * -EBUSY. This can leave the interrupt in a stale state.
++ *
++ * The cleanup cannot make progress because we hold @desc->lock. So in
++ * case @data->old_domain is not yet cleaned up, we need to drop the
++ * lock and acquire it again. @desc cannot go away, because the
++ * hotplug code holds the sparse irq lock.
+ */
+ raw_spin_lock(&vector_lock);
+- cpumask_clear_cpu(smp_processor_id(), data->old_domain);
++ /* Clean out all offline cpus (including ourself) first. */
++ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
++ while (!cpumask_empty(data->old_domain)) {
++ raw_spin_unlock(&vector_lock);
++ raw_spin_unlock(&desc->lock);
++ cpu_relax();
++ raw_spin_lock(&desc->lock);
++ /*
++ * Reevaluate apic_chip_data. It might have been cleared after
++ * we dropped @desc->lock.
++ */
++ data = apic_chip_data(irqdata);
++ if (!data)
++ return;
++ raw_spin_lock(&vector_lock);
++ }
+ raw_spin_unlock(&vector_lock);
+ }
+ #endif
--- /dev/null
+From 847667ef10356b824a11c853fc8a8b1b437b6a8d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:50 +0000
+Subject: x86/irq: Remove offline cpus from vector cleanup
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 847667ef10356b824a11c853fc8a8b1b437b6a8d upstream.
+
+No point of keeping offline cpus in the cleanup mask.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160106.808642683@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -200,8 +200,12 @@ next_cpu:
+ return -ENOSPC;
+
+ update:
+- /* Cleanup required ? */
+- d->move_in_progress = cpumask_intersects(d->old_domain, cpu_online_mask);
++ /*
++ * Exclude offline cpus from the cleanup mask and set the
++ * move_in_progress flag when the result is not empty.
++ */
++ cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
++ d->move_in_progress = !cpumask_empty(d->old_domain);
+ d->cfg.vector = vector;
+ cpumask_copy(d->domain, vector_cpumask);
+ success:
--- /dev/null
+From 56d7d2f4bbd00fb198b7907cb3ab657d06115a42 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:52 +0000
+Subject: x86/irq: Remove outgoing CPU from vector cleanup mask
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 56d7d2f4bbd00fb198b7907cb3ab657d06115a42 upstream.
+
+We want to synchronize new vector assignments with a pending cleanup. Remove a
+dying cpu from a pending cleanup mask.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160107.045961667@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -631,9 +631,23 @@ void irq_complete_move(struct irq_cfg *c
+ void irq_force_complete_move(int irq)
+ {
+ struct irq_cfg *cfg = irq_cfg(irq);
++ struct apic_chip_data *data;
+
+- if (cfg)
+- __irq_complete_move(cfg, cfg->vector);
++ if (!cfg)
++ return;
++
++ __irq_complete_move(cfg, cfg->vector);
++
++ /*
++ * Remove this cpu from the cleanup mask. The IPI might have been sent
++ * just before the cpu was removed from the offline mask, but has not
++ * been processed because the CPU has interrupts disabled and is on
++ * the way out.
++ */
++ raw_spin_lock(&vector_lock);
++ data = container_of(cfg, struct apic_chip_data, cfg);
++ cpumask_clear_cpu(smp_processor_id(), data->old_domain);
++ raw_spin_unlock(&vector_lock);
+ }
+ #endif
+
--- /dev/null
+From 5da0c1217f05d2ccc9a8ed6e6e5c23a8a1d24dd6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:52 +0000
+Subject: x86/irq: Remove the cpumask allocation from send_cleanup_vector()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 5da0c1217f05d2ccc9a8ed6e6e5c23a8a1d24dd6 upstream.
+
+There is no need to allocate a new cpumask for sending the cleanup vector. The
+old_domain mask is now protected by the vector_lock, so we can safely remove
+the offline cpus from it and send the IPI with the resulting mask.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160106.967993932@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 16 +++-------------
+ 1 file changed, 3 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -528,21 +528,11 @@ static struct irq_chip lapic_controller
+ #ifdef CONFIG_SMP
+ static void __send_cleanup_vector(struct apic_chip_data *data)
+ {
+- cpumask_var_t cleanup_mask;
+-
+ raw_spin_lock(&vector_lock);
++ cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
+ data->move_in_progress = 0;
+- if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
+- unsigned int i;
+-
+- for_each_cpu_and(i, data->old_domain, cpu_online_mask)
+- apic->send_IPI_mask(cpumask_of(i),
+- IRQ_MOVE_CLEANUP_VECTOR);
+- } else {
+- cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
+- apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+- free_cpumask_var(cleanup_mask);
+- }
++ if (!cpumask_empty(data->old_domain))
++ apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
+ raw_spin_unlock(&vector_lock);
+ }
+
--- /dev/null
+From 433cbd57d190a1cdd02f243df41c3d7f55ec4b94 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:46 +0000
+Subject: x86/irq: Reorganize the return path in assign_irq_vector
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 433cbd57d190a1cdd02f243df41c3d7f55ec4b94 upstream.
+
+Use an explicit goto for the cases where we have success in the search/update
+and return -ENOSPC if the search loop ends due to no space.
+
+Preparatory patch for fixes. No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160106.403491024@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 22 ++++++++--------------
+ 1 file changed, 8 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -116,13 +116,12 @@ static int __assign_irq_vector(int irq,
+ */
+ static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
+ static int current_offset = VECTOR_OFFSET_START % 16;
+- int cpu, err;
++ int cpu;
+
+ if (d->move_in_progress)
+ return -EBUSY;
+
+ /* Only try and allocate irqs on cpus that are present */
+- err = -ENOSPC;
+ cpumask_clear(d->old_domain);
+ cpumask_clear(searched_cpumask);
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+@@ -132,9 +131,8 @@ static int __assign_irq_vector(int irq,
+ apic->vector_allocation_domain(cpu, vector_cpumask, mask);
+
+ if (cpumask_subset(vector_cpumask, d->domain)) {
+- err = 0;
+ if (cpumask_equal(vector_cpumask, d->domain))
+- break;
++ goto success;
+ /*
+ * New cpumask using the vector is a proper subset of
+ * the current in use mask. So cleanup the vector
+@@ -145,7 +143,7 @@ static int __assign_irq_vector(int irq,
+ d->move_in_progress =
+ cpumask_intersects(d->old_domain, cpu_online_mask);
+ cpumask_and(d->domain, d->domain, vector_cpumask);
+- break;
++ goto success;
+ }
+
+ vector = current_vector;
+@@ -185,17 +183,13 @@ next:
+ per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
+ d->cfg.vector = vector;
+ cpumask_copy(d->domain, vector_cpumask);
+- err = 0;
+- break;
++ goto success;
+ }
++ return -ENOSPC;
+
+- if (!err) {
+- /* cache destination APIC IDs into cfg->dest_apicid */
+- err = apic->cpu_mask_to_apicid_and(mask, d->domain,
+- &d->cfg.dest_apicid);
+- }
+-
+- return err;
++success:
++ /* cache destination APIC IDs into cfg->dest_apicid */
++ return apic->cpu_mask_to_apicid_and(mask, d->domain, &d->cfg.dest_apicid);
+ }
+
+ static int assign_irq_vector(int irq, struct apic_chip_data *data,
--- /dev/null
+From 95ffeb4b5baca266e1d0d2bc90f1513e6f419cdd Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:47 +0000
+Subject: x86/irq: Reorganize the search in assign_irq_vector
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 95ffeb4b5baca266e1d0d2bc90f1513e6f419cdd upstream.
+
+Split out the code which advances the target cpu for the search so we can
+reuse it for the next patch which adds an early validation check for the
+vectormask which we get from the apic.
+
+Add comments while at it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@alien8.de>
+Tested-by: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160106.484562040@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/apic/vector.c | 24 ++++++++++++++++--------
+ 1 file changed, 16 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kernel/apic/vector.c
++++ b/arch/x86/kernel/apic/vector.c
+@@ -155,14 +155,9 @@ next:
+ vector = FIRST_EXTERNAL_VECTOR + offset;
+ }
+
+- if (unlikely(current_vector == vector)) {
+- cpumask_or(searched_cpumask, searched_cpumask,
+- vector_cpumask);
+- cpumask_andnot(vector_cpumask, mask, searched_cpumask);
+- cpu = cpumask_first_and(vector_cpumask,
+- cpu_online_mask);
+- continue;
+- }
++ /* If the search wrapped around, try the next cpu */
++ if (unlikely(current_vector == vector))
++ goto next_cpu;
+
+ if (test_bit(vector, used_vectors))
+ goto next;
+@@ -184,6 +179,19 @@ next:
+ d->cfg.vector = vector;
+ cpumask_copy(d->domain, vector_cpumask);
+ goto success;
++
++next_cpu:
++ /*
++ * We exclude the current @vector_cpumask from the requested
++ * @mask and try again with the next online cpu in the
++ * result. We cannot modify @mask, so we use @vector_cpumask
++ * as a temporary buffer here as it will be reassigned when
++ * calling apic->vector_allocation_domain() above.
++ */
++ cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
++ cpumask_andnot(vector_cpumask, mask, searched_cpumask);
++ cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
++ continue;
+ }
+ return -ENOSPC;
+
--- /dev/null
+From 36f34c8c63da3e272fd66f91089228c22d2b6e8b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Dec 2015 16:30:45 +0000
+Subject: x86/irq: Validate that irq descriptor is still active
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 36f34c8c63da3e272fd66f91089228c22d2b6e8b upstream.
+
+In fixup_irqs() we unconditionally dereference the irq chip of an irq
+descriptor. The descriptor might still be valid, but already cleaned up,
+i.e. the chip removed. Add a check for this condition.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Cc: Joe Lawrence <joe.lawrence@stratus.com>
+Cc: Jeremiah Mahler <jmmahler@gmail.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: andy.shevchenko@gmail.com
+Cc: Guenter Roeck <linux@roeck-us.net>
+Link: http://lkml.kernel.org/r/20151231160106.236423282@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index f8062aaf5df9..c0b58dd1ca04 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -470,6 +470,15 @@ void fixup_irqs(void)
+ }
+
+ chip = irq_data_get_irq_chip(data);
++ /*
++ * The interrupt descriptor might have been cleaned up
++ * already, but it is not yet removed from the radix tree
++ */
++ if (!chip) {
++ raw_spin_unlock(&desc->lock);
++ continue;
++ }
++
+ if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
+ chip->irq_mask(data);
+