--- /dev/null
+From 9e3f7a29694049edd728e2400ab57ad7553e5aa9 Mon Sep 17 00:00:00 2001
+From: Wei Huang <wei@redhat.com>
+Date: Wed, 16 Nov 2016 09:20:57 +0000
+Subject: arm64: KVM: pmu: Fix AArch32 cycle counter access
+
+From: Wei Huang <wei@redhat.com>
+
+commit 9e3f7a29694049edd728e2400ab57ad7553e5aa9 upstream.
+
+We're missing the handling code for the cycle counter accessed
+from a 32bit guest, leading to unexpected results.
+
+Signed-off-by: Wei Huang <wei@redhat.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kvm/sys_regs.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -602,8 +602,14 @@ static bool access_pmu_evcntr(struct kvm
+
+ idx = ARMV8_PMU_CYCLE_IDX;
+ } else {
+- BUG();
++ return false;
+ }
++ } else if (r->CRn == 0 && r->CRm == 9) {
++ /* PMCCNTR */
++ if (pmu_access_event_counter_el0_disabled(vcpu))
++ return false;
++
++ idx = ARMV8_PMU_CYCLE_IDX;
+ } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
+ /* PMEVCNTRn_EL0 */
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+@@ -611,7 +617,7 @@ static bool access_pmu_evcntr(struct kvm
+
+ idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+ } else {
+- BUG();
++ return false;
+ }
+
+ if (!pmu_counter_idx_valid(vcpu, idx))
--- /dev/null
+From b112c84a6ff035271d41d548c10215f18443d6a6 Mon Sep 17 00:00:00 2001
+From: Wei Huang <wei@redhat.com>
+Date: Wed, 16 Nov 2016 11:09:20 -0600
+Subject: KVM: arm64: Fix the issues when guest PMCCFILTR is configured
+
+From: Wei Huang <wei@redhat.com>
+
+commit b112c84a6ff035271d41d548c10215f18443d6a6 upstream.
+
+KVM calls kvm_pmu_set_counter_event_type() when PMCCFILTR is configured.
+But this function can't deals with PMCCFILTR correctly because the evtCount
+bits of PMCCFILTR, which is reserved 0, conflits with the SW_INCR event
+type of other PMXEVTYPER<n> registers. To fix it, when eventsel == 0, this
+function shouldn't return immediately; instead it needs to check further
+if select_idx is ARMV8_PMU_CYCLE_IDX.
+
+Another issue is that KVM shouldn't copy the eventsel bits of PMCCFILTER
+blindly to attr.config. Instead it ought to convert the request to the
+"cpu cycle" event type (i.e. 0x11).
+
+To support this patch and to prevent duplicated definitions, a limited
+set of ARMv8 perf event types were relocated from perf_event.c to
+asm/perf_event.h.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Wei Huang <wei@redhat.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/perf_event.h | 10 +++++++++-
+ arch/arm64/kernel/perf_event.c | 10 +---------
+ virt/kvm/arm/pmu.c | 8 +++++---
+ 3 files changed, 15 insertions(+), 13 deletions(-)
+
+--- a/arch/arm64/include/asm/perf_event.h
++++ b/arch/arm64/include/asm/perf_event.h
+@@ -46,7 +46,15 @@
+ #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
+ #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
+
+-#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
++/*
++ * PMUv3 event types: required events
++ */
++#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
++#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
++#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
++#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
++#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
++#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
+
+ /*
+ * Event filters for PMUv3
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -30,17 +30,9 @@
+
+ /*
+ * ARMv8 PMUv3 Performance Events handling code.
+- * Common event types.
++ * Common event types (some are defined in asm/perf_event.h).
+ */
+
+-/* Required events. */
+-#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
+-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
+-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
+-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
+-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
+-#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
+-
+ /* At least one of the following is required. */
+ #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
+ #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
+--- a/virt/kvm/arm/pmu.c
++++ b/virt/kvm/arm/pmu.c
+@@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct k
+ continue;
+ type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
+ & ARMV8_PMU_EVTYPE_EVENT;
+- if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
++ if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR)
+ && (enable & BIT(i))) {
+ reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+ reg = lower_32_bits(reg);
+@@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(stru
+ eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
+
+ /* Software increment event does't need to be backed by a perf event */
+- if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
++ if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR &&
++ select_idx != ARMV8_PMU_CYCLE_IDX)
+ return;
+
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+@@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(stru
+ attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
+ attr.exclude_hv = 1; /* Don't count EL2 events */
+ attr.exclude_host = 1; /* Don't count host events */
+- attr.config = eventsel;
++ attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ?
++ ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel;
+
+ counter = kvm_pmu_get_counter_value(vcpu, select_idx);
+ /* The initial sample period (overflow count) of an event. */
--- /dev/null
+From 1650b4ebc99da4c137bfbfc531be4a2405f951dd Mon Sep 17 00:00:00 2001
+From: Ignacio Alvarado <ikalvarado@google.com>
+Date: Fri, 4 Nov 2016 12:15:55 -0700
+Subject: KVM: Disable irq while unregistering user notifier
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Ignacio Alvarado <ikalvarado@google.com>
+
+commit 1650b4ebc99da4c137bfbfc531be4a2405f951dd upstream.
+
+Function user_notifier_unregister should be called only once for each
+registered user notifier.
+
+Function kvm_arch_hardware_disable can be executed from an IPI context
+which could cause a race condition with a VCPU returning to user mode
+and attempting to unregister the notifier.
+
+Signed-off-by: Ignacio Alvarado <ikalvarado@google.com>
+Fixes: 18863bdd60f8 ("KVM: x86 shared msr infrastructure")
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct us
+ struct kvm_shared_msrs *locals
+ = container_of(urn, struct kvm_shared_msrs, urn);
+ struct kvm_shared_msr_values *values;
++ unsigned long flags;
+
++ /*
++ * Disabling irqs at this point since the following code could be
++ * interrupted and executed through kvm_arch_hardware_disable()
++ */
++ local_irq_save(flags);
++ if (locals->registered) {
++ locals->registered = false;
++ user_return_notifier_unregister(urn);
++ }
++ local_irq_restore(flags);
+ for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
+ values = &locals->values[slot];
+ if (values->host != values->curr) {
+@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct us
+ values->curr = values->host;
+ }
+ }
+- locals->registered = false;
+- user_return_notifier_unregister(urn);
+ }
+
+ static void shared_msr_update(unsigned slot, u32 msr)
--- /dev/null
+From 7301d6abaea926d685832f7e1f0c37dd206b01f4 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 17 Nov 2016 15:55:46 +0100
+Subject: KVM: x86: fix missed SRCU usage in kvm_lapic_set_vapic_addr
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+commit 7301d6abaea926d685832f7e1f0c37dd206b01f4 upstream.
+
+Reported by syzkaller:
+
+ [ INFO: suspicious RCU usage. ]
+ 4.9.0-rc4+ #47 Not tainted
+ -------------------------------
+ ./include/linux/kvm_host.h:536 suspicious rcu_dereference_check() usage!
+
+ stack backtrace:
+ CPU: 1 PID: 6679 Comm: syz-executor Not tainted 4.9.0-rc4+ #47
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
+ ffff880039e2f6d0 ffffffff81c2e46b ffff88003e3a5b40 0000000000000000
+ 0000000000000001 ffffffff83215600 ffff880039e2f700 ffffffff81334ea9
+ ffffc9000730b000 0000000000000004 ffff88003c4f8420 ffff88003d3f8000
+ Call Trace:
+ [< inline >] __dump_stack lib/dump_stack.c:15
+ [<ffffffff81c2e46b>] dump_stack+0xb3/0x118 lib/dump_stack.c:51
+ [<ffffffff81334ea9>] lockdep_rcu_suspicious+0x139/0x180 kernel/locking/lockdep.c:4445
+ [< inline >] __kvm_memslots include/linux/kvm_host.h:534
+ [< inline >] kvm_memslots include/linux/kvm_host.h:541
+ [<ffffffff8105d6ae>] kvm_gfn_to_hva_cache_init+0xa1e/0xce0 virt/kvm/kvm_main.c:1941
+ [<ffffffff8112685d>] kvm_lapic_set_vapic_addr+0xed/0x140 arch/x86/kvm/lapic.c:2217
+
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Fixes: fda4e2e85589191b123d31cdc21fd33ee70f50fd
+Cc: Andrew Honig <ahonig@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3372,6 +3372,7 @@ long kvm_arch_vcpu_ioctl(struct file *fi
+ };
+ case KVM_SET_VAPIC_ADDR: {
+ struct kvm_vapic_addr va;
++ int idx;
+
+ r = -EINVAL;
+ if (!lapic_in_kernel(vcpu))
+@@ -3379,7 +3380,9 @@ long kvm_arch_vcpu_ioctl(struct file *fi
+ r = -EFAULT;
+ if (copy_from_user(&va, argp, sizeof va))
+ goto out;
++ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
++ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ break;
+ }
+ case KVM_X86_SETUP_MCE: {
--- /dev/null
+From b0b6e86846093c5f8820386bc01515f857dd8faa Mon Sep 17 00:00:00 2001
+From: Yazen Ghannam <Yazen.Ghannam@amd.com>
+Date: Tue, 8 Nov 2016 09:35:06 +0100
+Subject: x86/cpu/AMD: Fix cpu_llc_id for AMD Fam17h systems
+
+From: Yazen Ghannam <Yazen.Ghannam@amd.com>
+
+commit b0b6e86846093c5f8820386bc01515f857dd8faa upstream.
+
+cpu_llc_id (Last Level Cache ID) derivation on AMD Fam17h has an
+underflow bug when extracting the socket_id value. It starts from 0
+so subtracting 1 from it will result in an invalid value. This breaks
+scheduling topology later on since the cpu_llc_id will be incorrect.
+
+For example, the the cpu_llc_id of the *other* CPU in the loops in
+set_cpu_sibling_map() underflows and we're generating the funniest
+thread_siblings masks and then when I run 8 threads of nbench, they get
+spread around the LLC domains in a very strange pattern which doesn't
+give you the normal scheduling spread one would expect for performance.
+
+Other things like EDAC use cpu_llc_id so they will be b0rked too.
+
+So, the APIC ID is preset in APICx020 for bits 3 and above: they contain
+the core complex, node and socket IDs.
+
+The LLC is at the core complex level so we can find a unique cpu_llc_id
+by right shifting the APICID by 3 because then the least significant bit
+will be the Core Complex ID.
+
+Tested-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Yazen Ghannam <Yazen.Ghannam@amd.com>
+[ Cleaned up and extended the commit message. ]
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Aravind Gopalakrishnan <aravindksg.lkml@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Fixes: 3849e91f571d ("x86/AMD: Fix last level cache topology for AMD Fam17h systems")
+Link: http://lkml.kernel.org/r/20161108083506.rvqb5h4chrcptj7d@pd.tnic
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/amd.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinf
+ #ifdef CONFIG_SMP
+ unsigned bits;
+ int cpu = smp_processor_id();
+- unsigned int socket_id, core_complex_id;
+
+ bits = c->x86_coreid_bits;
+ /* Low order bits define the core id (index of core in socket) */
+@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinf
+ if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
+ return;
+
+- socket_id = (c->apicid >> bits) - 1;
+- core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
+-
+- per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
++ per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
+ #endif
+ }
+
--- /dev/null
+From d49597fd3bc7d9534de55e9256767f073be1b33a Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 9 Nov 2016 16:35:51 +0100
+Subject: x86/cpu: Deal with broken firmware (VMWare/XEN)
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit d49597fd3bc7d9534de55e9256767f073be1b33a upstream.
+
+Both ACPI and MP specifications require that the APIC id in the respective
+tables must be the same as the APIC id in CPUID.
+
+The kernel retrieves the physical package id from the APIC id during the
+ACPI/MP table scan and builds the physical to logical package map. The
+physical package id which is used after a CPU comes up is retrieved from
+CPUID. So we rely on ACPI/MP tables and CPUID agreeing in that respect.
+
+There exist VMware and XEN implementations which violate the spec. As a
+result the physical to logical package map, which relies on the ACPI/MP
+tables does not work on those systems, because the CPUID initialized
+physical package id does not match the firmware id. This causes system
+crashes and malfunction due to invalid package mappings.
+
+The only way to cure this is to sanitize the physical package id after the
+CPUID enumeration and yell when the APIC ids are different. Fix up the
+initial APIC id, which is fine as it is only used printout purposes.
+
+If the physical package IDs differ yell and use the package information
+from the ACPI/MP tables so the existing logical package map just works.
+
+Chas provided the resulting dmesg output for his affected 4 virtual
+sockets, 1 core per socket VM:
+
+[Firmware Bug]: CPU1: APIC id mismatch. Firmware: 1 CPUID: 2
+[Firmware Bug]: CPU1: Using firmware package id 1 instead of 2
+....
+
+Reported-and-tested-by: "Charles (Chas) Williams" <ciwillia@brocade.com>,
+Reported-by: M. Vefa Bicakci <m.v.b@runbox.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Alok Kataria <akataria@vmware.com>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1611091613540.3501@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/common.c | 32 ++++++++++++++++++++++++++++++--
+ 1 file changed, 30 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -979,6 +979,35 @@ static void x86_init_cache_qos(struct cp
+ }
+
+ /*
++ * The physical to logical package id mapping is initialized from the
++ * acpi/mptables information. Make sure that CPUID actually agrees with
++ * that.
++ */
++static void sanitize_package_id(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ unsigned int pkg, apicid, cpu = smp_processor_id();
++
++ apicid = apic->cpu_present_to_apicid(cpu);
++ pkg = apicid >> boot_cpu_data.x86_coreid_bits;
++
++ if (apicid != c->initial_apicid) {
++ pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n",
++ cpu, apicid, c->initial_apicid);
++ c->initial_apicid = apicid;
++ }
++ if (pkg != c->phys_proc_id) {
++ pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n",
++ cpu, pkg, c->phys_proc_id);
++ c->phys_proc_id = pkg;
++ }
++ c->logical_proc_id = topology_phys_to_logical_pkg(pkg);
++#else
++ c->logical_proc_id = 0;
++#endif
++}
++
++/*
+ * This does the hard work of actually picking apart the CPU stuff...
+ */
+ static void identify_cpu(struct cpuinfo_x86 *c)
+@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_
+ #ifdef CONFIG_NUMA
+ numa_add_cpu(smp_processor_id());
+ #endif
+- /* The boot/hotplug time assigment got cleared, restore it */
+- c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
++ sanitize_package_id(c);
+ }
+
+ /*