]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/sev: Evict cache lines during SNP memory validation
authorTom Lendacky <thomas.lendacky@amd.com>
Wed, 30 Jul 2025 14:26:00 +0000 (09:26 -0500)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 15 Aug 2025 10:09:08 +0000 (12:09 +0200)
Commit 7b306dfa326f70114312b320d083b21fa9481e1e upstream.

An SNP cache coherency vulnerability requires a cache line eviction
mitigation when validating memory after a page state change to private.
The specific mitigation is to touch the first and last byte of each 4K
page that is being validated. There is no need to perform the mitigation
when performing a page state change to shared and rescinding validation.

CPUID bit Fn8000001F_EBX[31] defines the COHERENCY_SFW_NO CPUID bit that,
when set, indicates that the software mitigation for this vulnerability is
not needed.

Implement the mitigation and invoke it when validating memory (making it
private) and the COHERENCY_SFW_NO bit is not set, indicating the SNP guest
is vulnerable.

Co-developed-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/boot/compressed/sev.c
arch/x86/boot/cpuflags.c
arch/x86/include/asm/cpufeatures.h
arch/x86/kernel/cpu/scattered.c
arch/x86/kernel/sev-shared.c
arch/x86/kernel/sev.c

index 5616c3b258060ea57a5bba7a983a12425865aad0..92c9f8b79f0dc46d644074c17c26488a1f520526 100644 (file)
@@ -165,6 +165,13 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
         */
        if (op == SNP_PAGE_STATE_PRIVATE && pvalidate(paddr, RMP_PG_SIZE_4K, 1))
                sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
+
+       /*
+        * If validating memory (making it private) and affected by the
+        * cache-coherency vulnerability, perform the cache eviction mitigation.
+        */
+       if (op == SNP_PAGE_STATE_PRIVATE && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
+               sev_evict_cache((void *)paddr, 1);
 }
 
 void snp_set_page_private(unsigned long paddr)
index d75237ba7ce94415de457a38d41c38df69c0c8a7..5660d3229d29c2de1fd87c1486b9f6de235667bd 100644 (file)
@@ -115,5 +115,18 @@ void get_cpuflags(void)
                        cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
                              &cpu.flags[1]);
                }
+
+               if (max_amd_level >= 0x8000001f) {
+                       u32 ebx;
+
+                       /*
+                        * The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
+                        * the virtualization flags entry (word 8) and set by
+                        * scattered.c, so the bit needs to be explicitly set.
+                        */
+                       cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
+                       if (ebx & BIT(31))
+                               set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
+               }
        }
 }
index 311cc58f29581d9dd56cce1b0dc8c05601b5601a..199441d11fbbab520e5a4345564d234e32452c98 100644 (file)
 #define X86_FEATURE_FLEXPRIORITY       ( 8*32+ 1) /* Intel FlexPriority */
 #define X86_FEATURE_EPT                        ( 8*32+ 2) /* Intel Extended Page Table */
 #define X86_FEATURE_VPID               ( 8*32+ 3) /* Intel Virtual Processor ID */
+#define X86_FEATURE_COHERENCY_SFW_NO   ( 8*32+ 4) /* "" SNP cache coherency software work around not needed */
 
 #define X86_FEATURE_VMMCALL            ( 8*32+15) /* Prefer VMMCALL to VMCALL */
 #define X86_FEATURE_XENPV              ( 8*32+16) /* "" Xen paravirtual guest */
index 7a42e699f6e39a5b07476492f9e5b7c0ffe9eaa1..8fad19ec855514625a69d3b44feae78bd0694ea4 100644 (file)
@@ -46,6 +46,7 @@ static const struct cpuid_bit cpuid_bits[] = {
        { X86_FEATURE_CPB,              CPUID_EDX,  9, 0x80000007, 0 },
        { X86_FEATURE_PROC_FEEDBACK,    CPUID_EDX, 11, 0x80000007, 0 },
        { X86_FEATURE_MBA,              CPUID_EBX,  6, 0x80000008, 0 },
+       { X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
        { X86_FEATURE_SMBA,             CPUID_EBX,  2, 0x80000020, 0 },
        { X86_FEATURE_BMEC,             CPUID_EBX,  3, 0x80000020, 0 },
        { X86_FEATURE_TSA_SQ_NO,        CPUID_ECX,  1, 0x80000021, 0 },
index acbec4de3ec31ace1761ecb48138e5056a4e0501..b90dfa46ec5b57634f0fb97c2dcc85c54e20ca19 100644 (file)
@@ -1068,6 +1068,24 @@ static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
        }
 }
 
+static inline void sev_evict_cache(void *va, int npages)
+{
+       volatile u8 val __always_unused;
+       u8 *bytes = va;
+       int page_idx;
+
+       /*
+        * For SEV guests, a read from the first/last cache-lines of a 4K page
+        * using the guest key is sufficient to cause a flush of all cache-lines
+        * associated with that 4K page without incurring all the overhead of a
+        * full CLFLUSH sequence.
+        */
+       for (page_idx = 0; page_idx < npages; page_idx++) {
+               val = bytes[page_idx * PAGE_SIZE];
+               val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
+       }
+}
+
 static void pvalidate_pages(struct snp_psc_desc *desc)
 {
        struct psc_entry *e;
@@ -1100,6 +1118,24 @@ static void pvalidate_pages(struct snp_psc_desc *desc)
                        sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
                }
        }
+
+       /*
+        * If not affected by the cache-coherency vulnerability there is no need
+        * to perform the cache eviction mitigation.
+        */
+       if (cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
+               return;
+
+       for (i = 0; i <= desc->hdr.end_entry; i++) {
+               e = &desc->entries[i];
+
+               /*
+                * If validating memory (making it private) perform the cache
+                * eviction mitigation.
+                */
+               if (e->operation == SNP_PAGE_STATE_PRIVATE)
+                       sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
+       }
 }
 
 static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
index 9905dc0e0b0960e50124a45a959f9ea26356f31a..c6a9a9d3ff2f3729926db24d52c6f21c62f9e99d 100644 (file)
@@ -688,12 +688,14 @@ static void __head
 early_set_pages_state(unsigned long vaddr, unsigned long paddr,
                      unsigned long npages, enum psc_op op)
 {
-       unsigned long paddr_end;
+       unsigned long vaddr_begin, paddr_end;
        u64 val;
        int ret;
 
        vaddr = vaddr & PAGE_MASK;
 
+       vaddr_begin = vaddr;
+
        paddr = paddr & PAGE_MASK;
        paddr_end = paddr + (npages << PAGE_SHIFT);
 
@@ -736,6 +738,13 @@ early_set_pages_state(unsigned long vaddr, unsigned long paddr,
                paddr += PAGE_SIZE;
        }
 
+       /*
+        * If validating memory (making it private) and affected by the
+        * cache-coherency vulnerability, perform the cache eviction mitigation.
+        */
+       if (op == SNP_PAGE_STATE_PRIVATE && !cpu_feature_enabled(X86_FEATURE_COHERENCY_SFW_NO))
+               sev_evict_cache((void *)vaddr_begin, npages);
+
        return;
 
 e_term: