--- /dev/null
+From f44310b98ddb7f0d06550d73ed67df5865e3eda5 Mon Sep 17 00:00:00 2001
+From: Wang YanQing <udknight@gmail.com>
+Date: Sat, 26 Jan 2013 15:53:57 +0800
+Subject: smp: Fix SMP function call empty cpu mask race
+
+From: Wang YanQing <udknight@gmail.com>
+
+commit f44310b98ddb7f0d06550d73ed67df5865e3eda5 upstream.
+
+I get the following warning every day with v3.7, once or
+twice a day:
+
+ [ 2235.186027] WARNING: at /mnt/sda7/kernel/linux/arch/x86/kernel/apic/ipi.c:109 default_send_IPI_mask_logical+0x2f/0xb8()
+
+As explained by Linus as well:
+
+ |
+ | Once we've done the "list_add_rcu()" to add it to the
+ | queue, we can have (another) IPI to the target CPU that can
+ | now see it and clear the mask.
+ |
+ | So by the time we get to actually send the IPI, the mask might
+ | have been cleared by another IPI.
+ |
+
+This patch also fixes a system hang problem, if the data->cpumask
+gets cleared after passing this point:
+
+ if (WARN_ONCE(!mask, "empty IPI mask"))
+ return;
+
+then the problem in commit 83d349f35e1a ("x86: don't send an IPI to
+the empty set of CPU's") will happen again.
+
+Signed-off-by: Wang YanQing <udknight@gmail.com>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Acked-by: Jan Beulich <jbeulich@suse.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: peterz@infradead.org
+Cc: mina86@mina86.org
+Cc: srivatsa.bhat@linux.vnet.ibm.com
+Link: http://lkml.kernel.org/r/20130126075357.GA3205@udknight
+[ Tidied up the changelog and the comment in the code. ]
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/smp.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -31,6 +31,7 @@ struct call_function_data {
+ struct call_single_data csd;
+ atomic_t refs;
+ cpumask_var_t cpumask;
++ cpumask_var_t cpumask_ipi;
+ };
+
+ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
+@@ -54,6 +55,9 @@ hotplug_cfd(struct notifier_block *nfb,
+ if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+ cpu_to_node(cpu)))
+ return notifier_from_errno(-ENOMEM);
++ if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
++ cpu_to_node(cpu)))
++ return notifier_from_errno(-ENOMEM);
+ break;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -63,6 +67,7 @@ hotplug_cfd(struct notifier_block *nfb,
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ free_cpumask_var(cfd->cpumask);
++ free_cpumask_var(cfd->cpumask_ipi);
+ break;
+ #endif
+ };
+@@ -524,6 +529,12 @@ void smp_call_function_many(const struct
+ return;
+ }
+
++ /*
++ * After we put an entry into the list, data->cpumask
++ * may be cleared again when another CPU sends another IPI for
++ * a SMP function call, so data->cpumask will be zero.
++ */
++ cpumask_copy(data->cpumask_ipi, data->cpumask);
+ raw_spin_lock_irqsave(&call_function.lock, flags);
+ /*
+ * Place entry at the _HEAD_ of the list, so that any cpu still
+@@ -547,7 +558,7 @@ void smp_call_function_many(const struct
+ smp_mb();
+
+ /* Send a message to all CPUs in the map */
+- arch_send_call_function_ipi_mask(data->cpumask);
++ arch_send_call_function_ipi_mask(data->cpumask_ipi);
+
+ /* Optionally wait for the CPUs to complete */
+ if (wait)
--- /dev/null
+From c903f0456bc69176912dee6dd25c6a66ee1aed00 Mon Sep 17 00:00:00 2001
+From: Alan Cox <alan@linux.intel.com>
+Date: Thu, 15 Nov 2012 13:06:22 +0000
+Subject: x86/msr: Add capabilities check
+
+From: Alan Cox <alan@linux.intel.com>
+
+commit c903f0456bc69176912dee6dd25c6a66ee1aed00 upstream.
+
+At the moment the MSR driver only relies upon file system
+checks. This means that anything as root with any capability set
+can write to MSRs. Historically that wasn't very interesting but
+on modern processors the MSRs are such that writing to them
+provides several ways to execute arbitary code in kernel space.
+Sample code and documentation on doing this is circulating and
+MSR attacks are used on Windows 64bit rootkits already.
+
+In the Linux case you still need to be able to open the device
+file so the impact is fairly limited and reduces the security of
+some capability and security model based systems down towards
+that of a generic "root owns the box" setup.
+
+Therefore they should require CAP_SYS_RAWIO to prevent an
+elevation of capabilities. The impact of this is fairly minimal
+on most setups because they don't have heavy use of
+capabilities. Those using SELinux, SMACK or AppArmor rules might
+want to consider if their rulesets on the MSR driver could be
+tighter.
+
+Signed-off-by: Alan Cox <alan@linux.intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/msr.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kernel/msr.c
++++ b/arch/x86/kernel/msr.c
+@@ -175,6 +175,9 @@ static int msr_open(struct inode *inode,
+ unsigned int cpu;
+ struct cpuinfo_x86 *c;
+
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
+ cpu = iminor(file->f_path.dentry->d_inode);
+ if (cpu >= nr_cpu_ids || !cpu_online(cpu))
+ return -ENXIO; /* No such CPU */