return cr0;
}
+static inline void set_cr0(uint64_t val)
+{
+ __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
+}
+
static inline uint64_t get_cr3(void)
{
uint64_t cr3;
return cr3;
}
+static inline void set_cr3(uint64_t val)
+{
+ __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory");
+}
+
static inline uint64_t get_cr4(void)
{
uint64_t cr4;
__asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
}
+static inline uint64_t get_cr8(void)
+{
+ uint64_t cr8;
+
+ __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8));
+ return cr8;
+}
+
+static inline void set_cr8(uint64_t val)
+{
+ __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory");
+}
+
static inline void set_idt(const struct desc_ptr *idt_desc)
{
__asm__ __volatile__("lidt %0"::"m"(*idt_desc));
#include "linux/psp-sev.h"
#include "sev.h"
+static void guest_sev_test_msr(uint32_t msr)
+{
+ uint64_t val = rdmsr(msr);
+
+ wrmsr(msr, val);
+ GUEST_ASSERT(val == rdmsr(msr));
+}
+
+#define guest_sev_test_reg(reg) \
+do { \
+ uint64_t val = get_##reg(); \
+ \
+ set_##reg(val); \
+ GUEST_ASSERT(val == get_##reg()); \
+} while (0)
+
+static void guest_sev_test_regs(void)
+{
+ guest_sev_test_msr(MSR_EFER);
+ guest_sev_test_reg(cr0);
+ guest_sev_test_reg(cr3);
+ guest_sev_test_reg(cr4);
+ guest_sev_test_reg(cr8);
+}
#define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)
GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED);
GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_SNP_ENABLED);
+ guest_sev_test_regs();
+
wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
vmgexit();
}
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
+ guest_sev_test_regs();
+
/*
* TODO: Add GHCB and ucall support for SEV-ES guests. For now, simply
* force "termination" to signal "done" via the GHCB MSR protocol.
GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV));
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
+ guest_sev_test_regs();
+
GUEST_DONE();
}