From: Greg Kroah-Hartman Date: Mon, 24 Jul 2023 15:24:45 +0000 (+0200) Subject: 5.4-stable patches X-Git-Tag: v6.1.41~8 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=6d0a1784f3c2533f11b4cff6e3e3d848a7c900fc;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: x86-cpu-amd-add-a-zenbleed-fix.patch x86-cpu-amd-move-the-errata-checking-functionality-up.patch --- diff --git a/queue-5.4/series b/queue-5.4/series index fc7f0f8bae8..1e762abf70a 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -312,3 +312,5 @@ tcp-annotate-data-races-around-rskq_defer_accept.patch tcp-annotate-data-races-around-tp-notsent_lowat.patch tcp-annotate-data-races-around-fastopenq.max_qlen.patch tracing-histograms-return-an-error-if-we-fail-to-add-histogram-to-hist_vars-list.patch +x86-cpu-amd-move-the-errata-checking-functionality-up.patch +x86-cpu-amd-add-a-zenbleed-fix.patch diff --git a/queue-5.4/x86-cpu-amd-add-a-zenbleed-fix.patch b/queue-5.4/x86-cpu-amd-add-a-zenbleed-fix.patch new file mode 100644 index 00000000000..781910d72f9 --- /dev/null +++ b/queue-5.4/x86-cpu-amd-add-a-zenbleed-fix.patch @@ -0,0 +1,161 @@ +From b2d362e150f1a48e95b4224e6ad860948f48c158 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 15 Jul 2023 13:41:28 +0200 +Subject: x86/cpu/amd: Add a Zenbleed fix + +From: "Borislav Petkov (AMD)" + +Upstream commit: 522b1d69219d8f083173819fde04f994aa051a98 + +Add a fix for the Zen2 VZEROUPPER data corruption bug where under +certain circumstances executing VZEROUPPER can cause register +corruption or leak data. + +The optimal fix is through microcode but in the case the proper +microcode revision has not been applied, enable a fallback fix using +a chicken bit. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/microcode.h | 1 + arch/x86/include/asm/microcode_amd.h | 2 + + arch/x86/include/asm/msr-index.h | 1 + arch/x86/kernel/cpu/amd.c | 60 +++++++++++++++++++++++++++++++++++ + arch/x86/kernel/cpu/common.c | 2 + + 5 files changed, 66 insertions(+) + +--- a/arch/x86/include/asm/microcode.h ++++ b/arch/x86/include/asm/microcode.h +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + struct ucode_patch { + struct list_head plist; +--- a/arch/x86/include/asm/microcode_amd.h ++++ b/arch/x86/include/asm/microcode_amd.h +@@ -48,11 +48,13 @@ extern void __init load_ucode_amd_bsp(un + extern void load_ucode_amd_ap(unsigned int family); + extern int __init save_microcode_in_initrd_amd(unsigned int family); + void reload_ucode_amd(unsigned int cpu); ++extern void amd_check_microcode(void); + #else + static inline void __init load_ucode_amd_bsp(unsigned int family) {} + static inline void load_ucode_amd_ap(unsigned int family) {} + static inline int __init + save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } + static inline void reload_ucode_amd(unsigned int cpu) {} ++static inline void amd_check_microcode(void) {} + #endif + #endif /* _ASM_X86_MICROCODE_AMD_H */ +--- a/arch/x86/include/asm/msr-index.h ++++ b/arch/x86/include/asm/msr-index.h +@@ -462,6 +462,7 @@ + #define MSR_AMD64_DE_CFG 0xc0011029 + #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1 + #define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT) ++#define MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT 9 + + #define MSR_AMD64_BU_CFG2 0xc001102a + #define MSR_AMD64_IBSFETCHCTL 0xc0011030 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -69,6 +69,11 @@ static const int amd_erratum_383[] = + static const int amd_erratum_1054[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); + ++static const int amd_zenbleed[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), ++ AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); ++ + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) + { + int osvw_id = *erratum++; +@@ -980,6 +985,47 @@ static void init_amd_zn(struct cpuinfo_x + } + } + ++static bool cpu_has_zenbleed_microcode(void) ++{ ++ u32 good_rev = 0; ++ ++ switch (boot_cpu_data.x86_model) { ++ case 0x30 ... 0x3f: good_rev = 0x0830107a; break; ++ case 0x60 ... 0x67: good_rev = 0x0860010b; break; ++ case 0x68 ... 0x6f: good_rev = 0x08608105; break; ++ case 0x70 ... 0x7f: good_rev = 0x08701032; break; ++ case 0xa0 ... 0xaf: good_rev = 0x08a00008; break; ++ ++ default: ++ return false; ++ break; ++ } ++ ++ if (boot_cpu_data.microcode < good_rev) ++ return false; ++ ++ return true; ++} ++ ++static void zenbleed_check(struct cpuinfo_x86 *c) ++{ ++ if (!cpu_has_amd_erratum(c, amd_zenbleed)) ++ return; ++ ++ if (cpu_has(c, X86_FEATURE_HYPERVISOR)) ++ return; ++ ++ if (!cpu_has(c, X86_FEATURE_AVX)) ++ return; ++ ++ if (!cpu_has_zenbleed_microcode()) { ++ pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n"); ++ msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); ++ } else { ++ msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT); ++ } ++} ++ + static void init_amd(struct cpuinfo_x86 *c) + { + early_init_amd(c); +@@ -1067,6 +1113,8 @@ static void init_amd(struct cpuinfo_x86 + msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); + + check_null_seg_clears_base(c); ++ ++ zenbleed_check(c); + } + + #ifdef CONFIG_X86_32 +@@ -1180,3 +1228,15 @@ void set_dr_addr_mask(unsigned long mask + break; + } + } ++ ++static void zenbleed_check_cpu(void *unused) ++{ ++ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); ++ ++ zenbleed_check(c); ++} ++ ++void amd_check_microcode(void) ++{ ++ on_each_cpu(zenbleed_check_cpu, NULL, 1); ++} +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -2125,6 +2125,8 @@ void microcode_check(void) + + perf_check_microcode(); + ++ amd_check_microcode(); ++ + /* Reload CPUID max function as it might've changed. */ + info.cpuid_level = cpuid_eax(0); + diff --git a/queue-5.4/x86-cpu-amd-move-the-errata-checking-functionality-up.patch b/queue-5.4/x86-cpu-amd-move-the-errata-checking-functionality-up.patch new file mode 100644 index 00000000000..26d7baf7bdf --- /dev/null +++ b/queue-5.4/x86-cpu-amd-move-the-errata-checking-functionality-up.patch @@ -0,0 +1,181 @@ +From 334baad709246598bfd30587a0e98b0d90f3f596 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Sat, 15 Jul 2023 13:31:32 +0200 +Subject: x86/cpu/amd: Move the errata checking functionality up + +From: "Borislav Petkov (AMD)" + +Upstream commit: 8b6f687743dacce83dbb0c7cfacf88bab00f808a + +Avoid new and remove old forward declarations. + +No functional changes. + +Signed-off-by: Borislav Petkov (AMD) +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/amd.c | 139 ++++++++++++++++++++++------------------------ + 1 file changed, 67 insertions(+), 72 deletions(-) + +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -26,11 +26,6 @@ + + #include "cpu.h" + +-static const int amd_erratum_383[]; +-static const int amd_erratum_400[]; +-static const int amd_erratum_1054[]; +-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); +- + /* + * nodes_per_socket: Stores the number of nodes per socket. + * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX +@@ -38,6 +33,73 @@ static bool cpu_has_amd_erratum(struct c + */ + static u32 nodes_per_socket = 1; + ++/* ++ * AMD errata checking ++ * ++ * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or ++ * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that ++ * have an OSVW id assigned, which it takes as first argument. Both take a ++ * variable number of family-specific model-stepping ranges created by ++ * AMD_MODEL_RANGE(). ++ * ++ * Example: ++ * ++ * const int amd_erratum_319[] = ++ * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), ++ * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), ++ * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); ++ */ ++ ++#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } ++#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } ++#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ ++ ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) ++#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) ++#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) ++#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) ++ ++static const int amd_erratum_400[] = ++ AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), ++ AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); ++ ++static const int amd_erratum_383[] = ++ AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); ++ ++/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ ++static const int amd_erratum_1054[] = ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); ++ ++static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) ++{ ++ int osvw_id = *erratum++; ++ u32 range; ++ u32 ms; ++ ++ if (osvw_id >= 0 && osvw_id < 65536 && ++ cpu_has(cpu, X86_FEATURE_OSVW)) { ++ u64 osvw_len; ++ ++ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); ++ if (osvw_id < osvw_len) { ++ u64 osvw_bits; ++ ++ rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), ++ osvw_bits); ++ return osvw_bits & (1ULL << (osvw_id & 0x3f)); ++ } ++ } ++ ++ /* OSVW unavailable or ID unknown, match family-model-stepping range */ ++ ms = (cpu->x86_model << 4) | cpu->x86_stepping; ++ while ((range = *erratum++)) ++ if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && ++ (ms >= AMD_MODEL_RANGE_START(range)) && ++ (ms <= AMD_MODEL_RANGE_END(range))) ++ return true; ++ ++ return false; ++} ++ + static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) + { + u32 gprs[8] = { 0 }; +@@ -1100,73 +1162,6 @@ static const struct cpu_dev amd_cpu_dev + + cpu_dev_register(amd_cpu_dev); + +-/* +- * AMD errata checking +- * +- * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or +- * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that +- * have an OSVW id assigned, which it takes as first argument. Both take a +- * variable number of family-specific model-stepping ranges created by +- * AMD_MODEL_RANGE(). +- * +- * Example: +- * +- * const int amd_erratum_319[] = +- * AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2), +- * AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0), +- * AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0)); +- */ +- +-#define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 } +-#define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 } +-#define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \ +- ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end)) +-#define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff) +-#define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) +-#define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) +- +-static const int amd_erratum_400[] = +- AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf), +- AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf)); +- +-static const int amd_erratum_383[] = +- AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); +- +-/* #1054: Instructions Retired Performance Counter May Be Inaccurate */ +-static const int amd_erratum_1054[] = +- AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); +- +-static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) +-{ +- int osvw_id = *erratum++; +- u32 range; +- u32 ms; +- +- if (osvw_id >= 0 && osvw_id < 65536 && +- cpu_has(cpu, X86_FEATURE_OSVW)) { +- u64 osvw_len; +- +- rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len); +- if (osvw_id < osvw_len) { +- u64 osvw_bits; +- +- rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6), +- osvw_bits); +- return osvw_bits & (1ULL << (osvw_id & 0x3f)); +- } +- } +- +- /* OSVW unavailable or ID unknown, match family-model-stepping range */ +- ms = (cpu->x86_model << 4) | cpu->x86_stepping; +- while ((range = *erratum++)) +- if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) && +- (ms >= AMD_MODEL_RANGE_START(range)) && +- (ms <= AMD_MODEL_RANGE_END(range))) +- return true; +- +- return false; +-} +- + void set_dr_addr_mask(unsigned long mask, int dr) + { + if (!boot_cpu_has(X86_FEATURE_BPEXT))