--- /dev/null
+From 1f7becf1b7e21794fc9d460765fe09679bc9b9e0 Mon Sep 17 00:00:00 2001
+From: Jay Zhou <jianjay.zhou@huawei.com>
+Date: Mon, 18 Jan 2021 16:47:20 +0800
+Subject: KVM: x86: get smi pending status correctly
+
+From: Jay Zhou <jianjay.zhou@huawei.com>
+
+commit 1f7becf1b7e21794fc9d460765fe09679bc9b9e0 upstream.
+
+The injection process of smi has two steps:
+
+ Qemu KVM
+Step1:
+ cpu->interrupt_request &= \
+ ~CPU_INTERRUPT_SMI;
+ kvm_vcpu_ioctl(cpu, KVM_SMI)
+
+ call kvm_vcpu_ioctl_smi() and
+ kvm_make_request(KVM_REQ_SMI, vcpu);
+
+Step2:
+ kvm_vcpu_ioctl(cpu, KVM_RUN, 0)
+
+ call process_smi() if
+ kvm_check_request(KVM_REQ_SMI, vcpu) is
+ true, mark vcpu->arch.smi_pending = true;
+
+The vcpu->arch.smi_pending will be set true in step2, unfortunately if
+vcpu paused between step1 and step2, the kvm_run->immediate_exit will be
+set and vcpu has to exit to Qemu immediately during step2 before mark
+vcpu->arch.smi_pending true.
+During VM migration, Qemu will get the smi pending status from KVM using
+KVM_GET_VCPU_EVENTS ioctl at the downtime, then the smi pending status
+will be lost.
+
+Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
+Signed-off-by: Shengen Zhuang <zhuangshengen@huawei.com>
+Message-Id: <20210118084720.1585-1-jianjay.zhou@huawei.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -97,6 +97,7 @@ static u64 __read_mostly efer_reserved_b
+
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
+ static void process_nmi(struct kvm_vcpu *vcpu);
++static void process_smi(struct kvm_vcpu *vcpu);
+ static void enter_smm(struct kvm_vcpu *vcpu);
+ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+
+@@ -3199,6 +3200,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_
+ struct kvm_vcpu_events *events)
+ {
+ process_nmi(vcpu);
++
++ if (kvm_check_request(KVM_REQ_SMI, vcpu))
++ process_smi(vcpu);
++
+ events->exception.injected =
+ vcpu->arch.exception.pending &&
+ !kvm_exception_is_soft(vcpu->arch.exception.nr);
--- /dev/null
+From 98dd2f108e448988d91e296173e773b06fb978b8 Mon Sep 17 00:00:00 2001
+From: Like Xu <like.xu@linux.intel.com>
+Date: Wed, 30 Dec 2020 16:19:16 +0800
+Subject: KVM: x86/pmu: Fix HW_REF_CPU_CYCLES event pseudo-encoding in intel_arch_events[]
+
+From: Like Xu <like.xu@linux.intel.com>
+
+commit 98dd2f108e448988d91e296173e773b06fb978b8 upstream.
+
+The HW_REF_CPU_CYCLES event on the fixed counter 2 is pseudo-encoded as
+0x0300 in the intel_perfmon_event_map[]. Correct its usage.
+
+Fixes: 62079d8a4312 ("KVM: PMU: add proper support for fixed counter 2")
+Signed-off-by: Like Xu <like.xu@linux.intel.com>
+Message-Id: <20201230081916.63417-1-like.xu@linux.intel.com>
+Reviewed-by: Sean Christopherson <seanjc@google.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/pmu_intel.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/pmu_intel.c
++++ b/arch/x86/kvm/pmu_intel.c
+@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping
+ [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
+ [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+ [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+- [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
++ [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
+ };
+
+ /* mapping between fixed pmc index and intel_arch_events array */