--- /dev/null
+From d5421ea43d30701e03cadc56a38854c36a8b4433 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 26 Jan 2018 14:54:32 +0100
+Subject: hrtimer: Reset hrtimer cpu base proper on CPU hotplug
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit d5421ea43d30701e03cadc56a38854c36a8b4433 upstream.
+
+The hrtimer interrupt code contains a hang detection and mitigation
+mechanism, which prevents that a long delayed hrtimer interrupt causes a
+continous retriggering of interrupts which prevent the system from making
+progress. If a hang is detected then the timer hardware is programmed with
+a certain delay into the future and a flag is set in the hrtimer cpu base
+which prevents newly enqueued timers from reprogramming the timer hardware
+prior to the chosen delay. The subsequent hrtimer interrupt after the delay
+clears the flag and resumes normal operation.
+
+If such a hang happens in the last hrtimer interrupt before a CPU is
+unplugged then the hang_detected flag is set and stays that way when the
+CPU is plugged in again. At that point the timer hardware is not armed and
+it cannot be armed because the hang_detected flag is still active, so
+nothing clears that flag. As a consequence the CPU does not receive hrtimer
+interrupts and no timers expire on that CPU which results in RCU stalls and
+other malfunctions.
+
+Clear the flag along with some other less critical members of the hrtimer
+cpu base to ensure starting from a clean state when a CPU is plugged in.
+
+Thanks to Paul, Sebastian and Anna-Maria for their help to get down to the
+root cause of that hard to reproduce heisenbug. Once understood it's
+trivial and certainly justifies a brown paperbag.
+
+Fixes: 41d2e4949377 ("hrtimer: Tune hrtimer_interrupt hang logic")
+Reported-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Sewior <bigeasy@linutronix.de>
+Cc: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Link: https://lkml.kernel.org/r/alpine.DEB.2.20.1801261447590.2067@nanos
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/hrtimer.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -652,7 +652,9 @@ static void hrtimer_reprogram(struct hrt
+ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+ {
+ base->expires_next.tv64 = KTIME_MAX;
++ base->hang_detected = 0;
+ base->hres_active = 0;
++ base->next_timer = NULL;
+ }
+
+ /*
+@@ -1610,6 +1612,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+ timerqueue_init_head(&cpu_base->clock_base[i].active);
+ }
+
++ cpu_base->active_bases = 0;
+ cpu_base->cpu = cpu;
+ hrtimer_init_hres(cpu_base);
+ return 0;
--- /dev/null
+From 40d4071ce2d20840d224b4a77b5dc6f752c9ab15 Mon Sep 17 00:00:00 2001
+From: Xiao Liang <xiliang@redhat.com>
+Date: Mon, 22 Jan 2018 14:12:52 +0800
+Subject: perf/x86/amd/power: Do not load AMD power module on !AMD platforms
+
+From: Xiao Liang <xiliang@redhat.com>
+
+commit 40d4071ce2d20840d224b4a77b5dc6f752c9ab15 upstream.
+
+The AMD power module can be loaded on non AMD platforms, but unload fails
+with the following Oops:
+
+ BUG: unable to handle kernel NULL pointer dereference at (null)
+ IP: __list_del_entry_valid+0x29/0x90
+ Call Trace:
+ perf_pmu_unregister+0x25/0xf0
+ amd_power_pmu_exit+0x1c/0xd23 [power]
+ SyS_delete_module+0x1a8/0x2b0
+ ? exit_to_usermode_loop+0x8f/0xb0
+ entry_SYSCALL_64_fastpath+0x20/0x83
+
+Return -ENODEV instead of 0 from the module init function if the CPU does
+not match.
+
+Fixes: c7ab62bfbe0e ("perf/x86/amd/power: Add AMD accumulated power reporting mechanism")
+Signed-off-by: Xiao Liang <xiliang@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lkml.kernel.org/r/20180122061252.6394-1-xiliang@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/events/amd/power.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/events/amd/power.c
++++ b/arch/x86/events/amd/power.c
+@@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(voi
+ int ret;
+
+ if (!x86_match_cpu(cpu_match))
+- return 0;
++ return -ENODEV;
+
+ if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
+ return -ENODEV;
mlxsw-spectrum_router-don-t-log-an-error-on-missing-neighbor.patch
tun-fix-a-memory-leak-for-tfile-tx_array.patch
flow_dissector-properly-cap-thoff-field.patch
+perf-x86-amd-power-do-not-load-amd-power-module-on-amd-platforms.patch
+x86-microcode-intel-extend-bdw-late-loading-further-with-llc-size-check.patch
+hrtimer-reset-hrtimer-cpu-base-proper-on-cpu-hotplug.patch
--- /dev/null
+From 7e702d17ed138cf4ae7c00e8c00681ed464587c7 Mon Sep 17 00:00:00 2001
+From: Jia Zhang <zhang.jia@linux.alibaba.com>
+Date: Tue, 23 Jan 2018 11:41:32 +0100
+Subject: x86/microcode/intel: Extend BDW late-loading further with LLC size check
+
+From: Jia Zhang <zhang.jia@linux.alibaba.com>
+
+commit 7e702d17ed138cf4ae7c00e8c00681ed464587c7 upstream.
+
+Commit b94b73733171 ("x86/microcode/intel: Extend BDW late-loading with a
+revision check") reduced the impact of erratum BDF90 for Broadwell model
+79.
+
+The impact can be reduced further by checking the size of the last level
+cache portion per core.
+
+Tony: "The erratum says the problem only occurs on the large-cache SKUs.
+So we only need to avoid the update if we are on a big cache SKU that is
+also running old microcode."
+
+For more details, see erratum BDF90 in document #334165 (Intel Xeon
+Processor E7-8800/4800 v4 Product Family Specification Update) from
+September 2017.
+
+Fixes: b94b73733171 ("x86/microcode/intel: Extend BDW late-loading with a revision check")
+Signed-off-by: Jia Zhang <zhang.jia@linux.alibaba.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Tony Luck <tony.luck@intel.com>
+Link: https://lkml.kernel.org/r/1516321542-31161-1-git-send-email-zhang.jia@linux.alibaba.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/microcode/intel.c | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -40,6 +40,9 @@
+ #include <asm/setup.h>
+ #include <asm/msr.h>
+
++/* last level cache size per core */
++static int llc_size_per_core;
++
+ /*
+ * Temporary microcode blobs pointers storage. We note here during early load
+ * the pointers to microcode blobs we've got from whatever storage (detached
+@@ -1053,12 +1056,14 @@ static bool is_blacklisted(unsigned int
+
+ /*
+ * Late loading on model 79 with microcode revision less than 0x0b000021
+- * may result in a system hang. This behavior is documented in item
+- * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
++ * and LLC size per core bigger than 2.5MB may result in a system hang.
++ * This behavior is documented in item BDF90, #334165 (Intel Xeon
++ * Processor E7-8800/4800 v4 Product Family).
+ */
+ if (c->x86 == 6 &&
+ c->x86_model == INTEL_FAM6_BROADWELL_X &&
+ c->x86_mask == 0x01 &&
++ llc_size_per_core > 2621440 &&
+ c->microcode < 0x0b000021) {
+ pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
+ pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
+@@ -1125,6 +1130,15 @@ static struct microcode_ops microcode_in
+ .microcode_fini_cpu = microcode_fini_cpu,
+ };
+
++static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
++{
++ u64 llc_size = c->x86_cache_size * 1024;
++
++ do_div(llc_size, c->x86_max_cores);
++
++ return (int)llc_size;
++}
++
+ struct microcode_ops * __init init_intel_microcode(void)
+ {
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+@@ -1135,6 +1149,8 @@ struct microcode_ops * __init init_intel
+ return NULL;
+ }
+
++ llc_size_per_core = calc_llc_size_per_core(c);
++
+ return µcode_intel_ops;
+ }
+