]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 16 Nov 2022 08:54:56 +0000 (09:54 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 16 Nov 2022 08:54:56 +0000 (09:54 +0100)
added patches:
x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch

queue-4.19/series
queue-4.19/x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch [new file with mode: 0644]

index d437f0a24550aa01530919355ef4cc58dde0a148..ae785d76f4e91c2bfd6d11d9291dac355a6349f2 100644 (file)
@@ -39,3 +39,4 @@ dmaengine-at_hdmac-don-t-allow-cpu-to-reorder-channel-enable.patch
 dmaengine-at_hdmac-fix-impossible-condition.patch
 dmaengine-at_hdmac-check-return-code-of-dma_async_device_register.patch
 net-tun-call-napi_schedule_prep-to-ensure-we-own-a-napi.patch
+x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch
diff --git a/queue-4.19/x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch b/queue-4.19/x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch
new file mode 100644 (file)
index 0000000..9aabca4
--- /dev/null
@@ -0,0 +1,164 @@
+From 2632daebafd04746b4b96c2f26a6021bc38f6209 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Mon, 14 Nov 2022 12:44:01 +0100
+Subject: x86/cpu: Restore AMD's DE_CFG MSR after resume
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 2632daebafd04746b4b96c2f26a6021bc38f6209 upstream.
+
+DE_CFG contains the LFENCE serializing bit, restore it on resume too.
+This is relevant to older families due to the way how they do S3.
+
+Unify and correct naming while at it.
+
+Fixes: e4d0e84e4907 ("x86/cpu/AMD: Make LFENCE a serializing instruction")
+Reported-by: Andrew Cooper <Andrew.Cooper3@citrix.com>
+Reported-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: <stable@kernel.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/msr-index.h          |    8 +++++---
+ arch/x86/kernel/cpu/amd.c                 |   10 ++++------
+ arch/x86/kvm/svm.c                        |   10 +++++-----
+ arch/x86/kvm/x86.c                        |    2 +-
+ arch/x86/power/cpu.c                      |    1 +
+ tools/testing/selftests/kvm/include/x86.h |    8 +++++---
+ 6 files changed, 21 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -389,6 +389,11 @@
+ #define MSR_AMD64_OSVW_STATUS         0xc0010141
+ #define MSR_AMD64_LS_CFG              0xc0011020
+ #define MSR_AMD64_DC_CFG              0xc0011022
++
++#define MSR_AMD64_DE_CFG              0xc0011029
++#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT  1
++#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE     BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
++
+ #define MSR_AMD64_BU_CFG2             0xc001102a
+ #define MSR_AMD64_IBSFETCHCTL         0xc0011030
+ #define MSR_AMD64_IBSFETCHLINAD               0xc0011031
+@@ -457,9 +462,6 @@
+ #define FAM10H_MMIO_CONF_BASE_MASK    0xfffffffULL
+ #define FAM10H_MMIO_CONF_BASE_SHIFT   20
+ #define MSR_FAM10H_NODE_ID            0xc001100c
+-#define MSR_F10H_DECFG                        0xc0011029
+-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT   1
+-#define MSR_F10H_DECFG_LFENCE_SERIALIZE               BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1                       0xc001001a
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -789,8 +789,6 @@ static void init_amd_gh(struct cpuinfo_x
+               set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
+ }
+-#define MSR_AMD64_DE_CFG      0xC0011029
+-
+ static void init_amd_ln(struct cpuinfo_x86 *c)
+ {
+       /*
+@@ -951,16 +949,16 @@ static void init_amd(struct cpuinfo_x86
+                * msr_set_bit() uses the safe accessors, too, even if the MSR
+                * is not present.
+                */
+-              msr_set_bit(MSR_F10H_DECFG,
+-                          MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
++              msr_set_bit(MSR_AMD64_DE_CFG,
++                          MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
+               /*
+                * Verify that the MSR write was successful (could be running
+                * under a hypervisor) and only then assume that LFENCE is
+                * serializing.
+                */
+-              ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
+-              if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
++              ret = rdmsrl_safe(MSR_AMD64_DE_CFG, &val);
++              if (!ret && (val & MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)) {
+                       /* A serializing LFENCE stops RDTSC speculation */
+                       set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
+               } else {
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4154,9 +4154,9 @@ static int svm_get_msr_feature(struct kv
+       msr->data = 0;
+       switch (msr->index) {
+-      case MSR_F10H_DECFG:
+-              if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
+-                      msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
++      case MSR_AMD64_DE_CFG:
++              if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
++                      msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
+               break;
+       default:
+               return 1;
+@@ -4258,7 +4258,7 @@ static int svm_get_msr(struct kvm_vcpu *
+                       msr_info->data = 0x1E;
+               }
+               break;
+-      case MSR_F10H_DECFG:
++      case MSR_AMD64_DE_CFG:
+               msr_info->data = svm->msr_decfg;
+               break;
+       default:
+@@ -4445,7 +4445,7 @@ static int svm_set_msr(struct kvm_vcpu *
+       case MSR_VM_IGNNE:
+               vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
+               break;
+-      case MSR_F10H_DECFG: {
++      case MSR_AMD64_DE_CFG: {
+               struct kvm_msr_entry msr_entry;
+               msr_entry.index = msr->index;
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1156,7 +1156,7 @@ static u32 msr_based_features[] = {
+       MSR_IA32_VMX_EPT_VPID_CAP,
+       MSR_IA32_VMX_VMFUNC,
+-      MSR_F10H_DECFG,
++      MSR_AMD64_DE_CFG,
+       MSR_IA32_UCODE_REV,
+       MSR_IA32_ARCH_CAPABILITIES,
+ };
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -533,6 +533,7 @@ static void pm_save_spec_msr(void)
+               MSR_TSX_FORCE_ABORT,
+               MSR_IA32_MCU_OPT_CTRL,
+               MSR_AMD64_LS_CFG,
++              MSR_AMD64_DE_CFG,
+       };
+       msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
+--- a/tools/testing/selftests/kvm/include/x86.h
++++ b/tools/testing/selftests/kvm/include/x86.h
+@@ -614,6 +614,11 @@ void vcpu_load_state(struct kvm_vm *vm,
+ #define MSR_AMD64_OSVW_STATUS         0xc0010141
+ #define MSR_AMD64_LS_CFG              0xc0011020
+ #define MSR_AMD64_DC_CFG              0xc0011022
++
++#define MSR_AMD64_DE_CFG              0xc0011029
++#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
++#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE     BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
++
+ #define MSR_AMD64_BU_CFG2             0xc001102a
+ #define MSR_AMD64_IBSFETCHCTL         0xc0011030
+ #define MSR_AMD64_IBSFETCHLINAD               0xc0011031
+@@ -664,9 +669,6 @@ void vcpu_load_state(struct kvm_vm *vm,
+ #define FAM10H_MMIO_CONF_BASE_MASK    0xfffffffULL
+ #define FAM10H_MMIO_CONF_BASE_SHIFT   20
+ #define MSR_FAM10H_NODE_ID            0xc001100c
+-#define MSR_F10H_DECFG                        0xc0011029
+-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT   1
+-#define MSR_F10H_DECFG_LFENCE_SERIALIZE               BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1                       0xc001001a