]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: selftests: Verify SEV+ guests can read and write EFER, CR0, CR4, and CR8
authorSean Christopherson <seanjc@google.com>
Tue, 10 Mar 2026 21:18:41 +0000 (14:18 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 12 Mar 2026 16:31:53 +0000 (17:31 +0100)
Add "do no harm" testing of EFER, CR0, CR4, and CR8 for SEV+ guests to
verify that the guest can read and write the registers, without hitting
e.g. a #VC on SEV-ES guests due to KVM incorrectly trying to intercept a
register.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-ID: <20260310211841.2552361-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/include/x86/processor.h
tools/testing/selftests/kvm/x86/sev_smoke_test.c

index 4ebae4269e68113a71172e2a0d4ace0fd13f9002..469a221221575475c970c9e23aac42f30b37a3f3 100644 (file)
@@ -557,6 +557,11 @@ static inline uint64_t get_cr0(void)
        return cr0;
 }
 
+static inline void set_cr0(uint64_t val)
+{
+       __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
+}
+
 static inline uint64_t get_cr3(void)
 {
        uint64_t cr3;
@@ -566,6 +571,11 @@ static inline uint64_t get_cr3(void)
        return cr3;
 }
 
+static inline void set_cr3(uint64_t val)
+{
+       __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory");
+}
+
 static inline uint64_t get_cr4(void)
 {
        uint64_t cr4;
@@ -580,6 +590,19 @@ static inline void set_cr4(uint64_t val)
        __asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
 }
 
+static inline uint64_t get_cr8(void)
+{
+       uint64_t cr8;
+
+       __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8));
+       return cr8;
+}
+
+static inline void set_cr8(uint64_t val)
+{
+       __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory");
+}
+
 static inline void set_idt(const struct desc_ptr *idt_desc)
 {
        __asm__ __volatile__("lidt %0"::"m"(*idt_desc));
index 86ad1c7d068f29abec7f8c8dd62b12a4f7152222..8bd37a476f15959e6fd9419a63e1dce0e53802da 100644 (file)
 #include "linux/psp-sev.h"
 #include "sev.h"
 
+static void guest_sev_test_msr(uint32_t msr)
+{
+       uint64_t val = rdmsr(msr);
+
+       wrmsr(msr, val);
+       GUEST_ASSERT(val == rdmsr(msr));
+}
+
+#define guest_sev_test_reg(reg)                        \
+do {                                           \
+       uint64_t val = get_##reg();             \
+                                               \
+       set_##reg(val);                         \
+       GUEST_ASSERT(val == get_##reg());       \
+} while (0)
+
+static void guest_sev_test_regs(void)
+{
+       guest_sev_test_msr(MSR_EFER);
+       guest_sev_test_reg(cr0);
+       guest_sev_test_reg(cr3);
+       guest_sev_test_reg(cr4);
+       guest_sev_test_reg(cr8);
+}
 
 #define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)
 
@@ -24,6 +48,8 @@ static void guest_snp_code(void)
        GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED);
        GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_SNP_ENABLED);
 
+       guest_sev_test_regs();
+
        wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
        vmgexit();
 }
@@ -34,6 +60,8 @@ static void guest_sev_es_code(void)
        GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
        GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
 
+       guest_sev_test_regs();
+
        /*
         * TODO: Add GHCB and ucall support for SEV-ES guests.  For now, simply
         * force "termination" to signal "done" via the GHCB MSR protocol.
@@ -47,6 +75,8 @@ static void guest_sev_code(void)
        GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV));
        GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
 
+       guest_sev_test_regs();
+
        GUEST_DONE();
 }