]> git.ipfire.org Git - people/arne_f/kernel.git/blobdiff - arch/x86/kernel/cpu/amd.c
Merge branch 'linus' into x86/mm to pick up fixes and to fix conflicts
[people/arne_f/kernel.git] / arch / x86 / kernel / cpu / amd.c
index 3b9e220621f83c8a5161e8b57b297233370f72ea..110ca5d2bb872a7f15cffe4349ffab74b03a4c86 100644 (file)
@@ -548,8 +548,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 
 static void early_init_amd(struct cpuinfo_x86 *c)
 {
+       u32 dummy;
+
        early_init_amd_mc(c);
 
+       rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
+
        /*
         * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
         * with P/T states and does not stop in deep C-states
@@ -612,6 +616,27 @@ static void early_init_amd(struct cpuinfo_x86 *c)
         */
        if (cpu_has_amd_erratum(c, amd_erratum_400))
                set_cpu_bug(c, X86_BUG_AMD_E400);
+
+       /*
+        * BIOS support is required for SME. If BIOS has enabled SME then
+        * adjust x86_phys_bits by the SME physical address space reduction
+        * value. If BIOS has not enabled SME then don't advertise the
+        * feature (set in scattered.c). Also, since the SME support requires
+        * long mode, don't advertise the feature under CONFIG_X86_32.
+        */
+       if (cpu_has(c, X86_FEATURE_SME)) {
+               u64 msr;
+
+               /* Check if SME is enabled */
+               rdmsrl(MSR_K8_SYSCFG, msr);
+               if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) {
+                       c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
+                       if (IS_ENABLED(CONFIG_X86_32))
+                               clear_cpu_cap(c, X86_FEATURE_SME);
+               } else {
+                       clear_cpu_cap(c, X86_FEATURE_SME);
+               }
+       }
 }
 
 static void init_amd_k8(struct cpuinfo_x86 *c)
@@ -730,8 +755,6 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
 
 static void init_amd(struct cpuinfo_x86 *c)
 {
-       u32 dummy;
-
        early_init_amd(c);
 
        /*
@@ -793,8 +816,6 @@ static void init_amd(struct cpuinfo_x86 *c)
        if (c->x86 > 0x11)
                set_cpu_cap(c, X86_FEATURE_ARAT);
 
-       rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
-
        /* 3DNow or LM implies PREFETCHW */
        if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
                if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))