From 4f92a377e916ae2c7fd056f1282763973b1b4f66 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 16 Nov 2022 09:54:39 +0100 Subject: [PATCH] 4.9-stable patches added patches: x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch --- queue-4.9/series | 1 + ...estore-amd-s-de_cfg-msr-after-resume.patch | 139 ++++++++++++++++++ 2 files changed, 140 insertions(+) create mode 100644 queue-4.9/x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch diff --git a/queue-4.9/series b/queue-4.9/series index fc11344a31d..07351bc56a0 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -26,3 +26,4 @@ dmaengine-at_hdmac-fix-completion-of-unissued-descriptor-in-case-of-errors.patch dmaengine-at_hdmac-don-t-allow-cpu-to-reorder-channel-enable.patch dmaengine-at_hdmac-fix-impossible-condition.patch dmaengine-at_hdmac-check-return-code-of-dma_async_device_register.patch +x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch diff --git a/queue-4.9/x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch b/queue-4.9/x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch new file mode 100644 index 00000000000..ea756cb412c --- /dev/null +++ b/queue-4.9/x86-cpu-restore-amd-s-de_cfg-msr-after-resume.patch @@ -0,0 +1,139 @@ +From 2632daebafd04746b4b96c2f26a6021bc38f6209 Mon Sep 17 00:00:00 2001 +From: Borislav Petkov +Date: Mon, 14 Nov 2022 12:44:01 +0100 +Subject: x86/cpu: Restore AMD's DE_CFG MSR after resume + +From: Borislav Petkov + +commit 2632daebafd04746b4b96c2f26a6021bc38f6209 upstream. + +DE_CFG contains the LFENCE serializing bit, restore it on resume too. +This is relevant to older families due to the way how they do S3. + +Unify and correct naming while at it. + +Fixes: e4d0e84e4907 ("x86/cpu/AMD: Make LFENCE a serializing instruction") +Reported-by: Andrew Cooper +Reported-by: Pawan Gupta +Signed-off-by: Borislav Petkov +Cc: +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/msr-index.h | 8 +++++--- + arch/x86/kernel/cpu/amd.c | 10 ++++------ + arch/x86/kvm/svm.c | 10 +++++----- + arch/x86/kvm/x86.c | 2 +- + arch/x86/power/cpu.c | 1 + + 5 files changed, 16 insertions(+), 15 deletions(-) + +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -364,6 +364,11 @@ + #define MSR_AMD64_OSVW_STATUS 0xc0010141 + #define MSR_AMD64_LS_CFG 0xc0011020 + #define MSR_AMD64_DC_CFG 0xc0011022 ++ ++#define MSR_AMD64_DE_CFG 0xc0011029 ++#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 ++#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) ++ + #define MSR_AMD64_BU_CFG2 0xc001102a + #define MSR_AMD64_IBSFETCHCTL 0xc0011030 + #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 +@@ -414,9 +419,6 @@ + #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL + #define FAM10H_MMIO_CONF_BASE_SHIFT 20 + #define MSR_FAM10H_NODE_ID 0xc001100c +-#define MSR_F10H_DECFG 0xc0011029 +-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 +-#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) + + /* K8 MSRs */ + #define MSR_K8_TOP_MEM1 0xc001001a +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -735,8 +735,6 @@ static void init_amd_gh(struct cpuinfo_x + set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); + } + +-#define MSR_AMD64_DE_CFG 0xC0011029 +- + static void init_amd_ln(struct cpuinfo_x86 *c) + { + /* +@@ -898,16 +896,16 @@ static void init_amd(struct cpuinfo_x86 + * msr_set_bit() uses the safe accessors, too, even if the MSR + * is not present. + */ +- msr_set_bit(MSR_F10H_DECFG, +- MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); ++ msr_set_bit(MSR_AMD64_DE_CFG, ++ MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT); + + /* + * Verify that the MSR write was successful (could be running + * under a hypervisor) and only then assume that LFENCE is + * serializing. + */ +- ret = rdmsrl_safe(MSR_F10H_DECFG, &val); +- if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { ++ ret = rdmsrl_safe(MSR_AMD64_DE_CFG, &val); ++ if (!ret && (val & MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)) { + /* A serializing LFENCE stops RDTSC speculation */ + set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); + } else { +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -3528,9 +3528,9 @@ static int svm_get_msr_feature(struct kv + msr->data = 0; + + switch (msr->index) { +- case MSR_F10H_DECFG: +- if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) +- msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; ++ case MSR_AMD64_DE_CFG: ++ if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC)) ++ msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE; + break; + default: + return 1; +@@ -3638,7 +3638,7 @@ static int svm_get_msr(struct kvm_vcpu * + msr_info->data = 0x1E; + } + break; +- case MSR_F10H_DECFG: ++ case MSR_AMD64_DE_CFG: + msr_info->data = svm->msr_decfg; + break; + default: +@@ -3829,7 +3829,7 @@ static int svm_set_msr(struct kvm_vcpu * + case MSR_VM_IGNNE: + vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); + break; +- case MSR_F10H_DECFG: { ++ case MSR_AMD64_DE_CFG: { + struct kvm_msr_entry msr_entry; + + msr_entry.index = msr->index; +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -1026,7 +1026,7 @@ static unsigned num_emulated_msrs; + * can be used by a hypervisor to validate requested CPU features. + */ + static u32 msr_based_features[] = { +- MSR_F10H_DECFG, ++ MSR_AMD64_DE_CFG, + MSR_IA32_UCODE_REV, + MSR_IA32_ARCH_CAPABILITIES, + }; +--- a/arch/x86/power/cpu.c ++++ b/arch/x86/power/cpu.c +@@ -526,6 +526,7 @@ static void pm_save_spec_msr(void) + MSR_TSX_FORCE_ABORT, + MSR_IA32_MCU_OPT_CTRL, + MSR_AMD64_LS_CFG, ++ MSR_AMD64_DE_CFG, + }; + + msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id)); -- 2.47.3