]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 12 May 2022 16:24:07 +0000 (18:24 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 12 May 2022 16:24:07 +0000 (18:24 +0200)
added patches:
kvm-x86-svm-account-for-family-17h-event-renumberings-in-amd_pmc_perf_hw_id.patch

queue-5.4/kvm-x86-svm-account-for-family-17h-event-renumberings-in-amd_pmc_perf_hw_id.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/kvm-x86-svm-account-for-family-17h-event-renumberings-in-amd_pmc_perf_hw_id.patch b/queue-5.4/kvm-x86-svm-account-for-family-17h-event-renumberings-in-amd_pmc_perf_hw_id.patch
new file mode 100644 (file)
index 0000000..eaca8c0
--- /dev/null
@@ -0,0 +1,86 @@
+From 5eb849322d7f7ae9d5c587c7bc3b4f7c6872cd2f Mon Sep 17 00:00:00 2001
+From: Kyle Huey <me@kylehuey.com>
+Date: Mon, 2 May 2022 22:01:36 -0700
+Subject: KVM: x86/svm: Account for family 17h event renumberings in amd_pmc_perf_hw_id
+
+From: Kyle Huey <me@kylehuey.com>
+
+commit 5eb849322d7f7ae9d5c587c7bc3b4f7c6872cd2f upstream.
+
+Zen renumbered some of the performance counters that correspond to the
+well known events in perf_hw_id. This code in KVM was never updated for
+that, so guest that attempt to use counters on Zen that correspond to the
+pre-Zen perf_hw_id values will silently receive the wrong values.
+
+This has been observed in the wild with rr[0] when running in Zen 3
+guests. rr uses the retired conditional branch counter 00d1 which is
+incorrectly recognized by KVM as PERF_COUNT_HW_STALLED_CYCLES_BACKEND.
+
+[0] https://rr-project.org/
+
+Signed-off-by: Kyle Huey <me@kylehuey.com>
+Message-Id: <20220503050136.86298-1-khuey@kylehuey.com>
+Cc: stable@vger.kernel.org
+[Check guest family, not host. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+[Backport to 5.15: adjusted context]
+Signed-off-by: Kyle Huey <me@kylehuey.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/pmu_amd.c |   28 +++++++++++++++++++++++++---
+ 1 file changed, 25 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/pmu_amd.c
++++ b/arch/x86/kvm/pmu_amd.c
+@@ -44,6 +44,22 @@ static struct kvm_event_hw_type_mapping
+       [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
+ };
++/* duplicated from amd_f17h_perfmon_event_map. */
++static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
++      [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
++      [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
++      [2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
++      [3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
++      [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
++      [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
++      [6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
++      [7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
++};
++
++/* amd_pmc_perf_hw_id depends on these being the same size */
++static_assert(ARRAY_SIZE(amd_event_mapping) ==
++           ARRAY_SIZE(amd_f17h_event_mapping));
++
+ static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
+ {
+       struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+@@ -128,19 +144,25 @@ static inline struct kvm_pmc *get_gp_pmc
+ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
+ {
++      struct kvm_event_hw_type_mapping *event_mapping;
+       u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+       u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+       int i;
++      if (guest_cpuid_family(pmc->vcpu) >= 0x17)
++              event_mapping = amd_f17h_event_mapping;
++      else
++              event_mapping = amd_event_mapping;
++
+       for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
+-              if (amd_event_mapping[i].eventsel == event_select
+-                  && amd_event_mapping[i].unit_mask == unit_mask)
++              if (event_mapping[i].eventsel == event_select
++                  && event_mapping[i].unit_mask == unit_mask)
+                       break;
+       if (i == ARRAY_SIZE(amd_event_mapping))
+               return PERF_COUNT_HW_MAX;
+-      return amd_event_mapping[i].event_type;
++      return event_mapping[i].event_type;
+ }
+ /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
index ec47a725357d23eff3ad701cf65c46a77f43982c..762eebd40ef32a38aa632fd45ad3784af9de5587 100644 (file)
@@ -11,3 +11,4 @@ x86-asm-allow-to-pass-macros-to-__asm_form.patch
 x86-xen-kvm-gather-the-definition-of-emulate-prefixes.patch
 x86-xen-insn-decode-xen-and-kvm-emulate-prefix-signature.patch
 x86-kprobes-prohibit-probing-on-instruction-which-has-emulate-prefix.patch
+kvm-x86-svm-account-for-family-17h-event-renumberings-in-amd_pmc_perf_hw_id.patch