]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: SVM: Decrypt SEV VMSA in dump_vmcb() if debugging is enabled
authorTom Lendacky <thomas.lendacky@amd.com>
Thu, 20 Mar 2025 13:26:49 +0000 (08:26 -0500)
committerSean Christopherson <seanjc@google.com>
Fri, 25 Apr 2025 23:19:52 +0000 (16:19 -0700)
An SEV-ES/SEV-SNP VM save area (VMSA) can be decrypted if the guest
policy allows debugging. Update the dump_vmcb() routine to output
some of the SEV VMSA contents if possible. This can be useful for
debug purposes.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Acked-by: Borislav Petkov (AMD) <bp@alien8.de>
Tested-by: Kim Phillips <kim.phillips@amd.com>
Link: https://lore.kernel.org/r/ea3b852c295b6f4b200925ed6b6e2c90d9475e71.1742477213.git.thomas.lendacky@amd.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 7d11e82e4fa419afef7c0ee56fe308edc3ddd0ab..11695362b6b2a6b4d4fff94b5004fafc4de58014 100644 (file)
@@ -560,6 +560,8 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
                return -EFAULT;
 
+       sev->policy = params.policy;
+
        memset(&start, 0, sizeof(start));
 
        dh_blob = NULL;
@@ -2199,6 +2201,8 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (params.policy & SNP_POLICY_MASK_SINGLE_SOCKET)
                return -EINVAL;
 
+       sev->policy = params.policy;
+
        sev->snp_context = snp_context_create(kvm, argp);
        if (!sev->snp_context)
                return -ENOTTY;
@@ -4922,3 +4926,97 @@ int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
 
        return level;
 }
+
+struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+       struct vmcb_save_area *vmsa;
+       struct kvm_sev_info *sev;
+       int error = 0;
+       int ret;
+
+       if (!sev_es_guest(vcpu->kvm))
+               return NULL;
+
+       /*
+        * If the VMSA has not yet been encrypted, return a pointer to the
+        * current un-encrypted VMSA.
+        */
+       if (!vcpu->arch.guest_state_protected)
+               return (struct vmcb_save_area *)svm->sev_es.vmsa;
+
+       sev = to_kvm_sev_info(vcpu->kvm);
+
+       /* Check if the SEV policy allows debugging */
+       if (sev_snp_guest(vcpu->kvm)) {
+               if (!(sev->policy & SNP_POLICY_DEBUG))
+                       return NULL;
+       } else {
+               if (sev->policy & SEV_POLICY_NODBG)
+                       return NULL;
+       }
+
+       if (sev_snp_guest(vcpu->kvm)) {
+               struct sev_data_snp_dbg dbg = {0};
+
+               vmsa = snp_alloc_firmware_page(__GFP_ZERO);
+               if (!vmsa)
+                       return NULL;
+
+               dbg.gctx_paddr = __psp_pa(sev->snp_context);
+               dbg.src_addr = svm->vmcb->control.vmsa_pa;
+               dbg.dst_addr = __psp_pa(vmsa);
+
+               ret = sev_do_cmd(SEV_CMD_SNP_DBG_DECRYPT, &dbg, &error);
+
+               /*
+                * Return the target page to a hypervisor page no matter what.
+                * If this fails, the page can't be used, so leak it and don't
+                * try to use it.
+                */
+               if (snp_page_reclaim(vcpu->kvm, PHYS_PFN(__pa(vmsa))))
+                       return NULL;
+
+               if (ret) {
+                       pr_err("SEV: SNP_DBG_DECRYPT failed ret=%d, fw_error=%d (%#x)\n",
+                              ret, error, error);
+                       free_page((unsigned long)vmsa);
+
+                       return NULL;
+               }
+       } else {
+               struct sev_data_dbg dbg = {0};
+               struct page *vmsa_page;
+
+               vmsa_page = alloc_page(GFP_KERNEL);
+               if (!vmsa_page)
+                       return NULL;
+
+               vmsa = page_address(vmsa_page);
+
+               dbg.handle = sev->handle;
+               dbg.src_addr = svm->vmcb->control.vmsa_pa;
+               dbg.dst_addr = __psp_pa(vmsa);
+               dbg.len = PAGE_SIZE;
+
+               ret = sev_do_cmd(SEV_CMD_DBG_DECRYPT, &dbg, &error);
+               if (ret) {
+                       pr_err("SEV: SEV_CMD_DBG_DECRYPT failed ret=%d, fw_error=%d (0x%x)\n",
+                              ret, error, error);
+                       __free_page(vmsa_page);
+
+                       return NULL;
+               }
+       }
+
+       return vmsa;
+}
+
+void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa)
+{
+       /* If the VMSA has not yet been encrypted, nothing was allocated */
+       if (!vcpu->arch.guest_state_protected || !vmsa)
+               return;
+
+       free_page((unsigned long)vmsa);
+}
index cc1c721ba067da0ace5e4ec969c9d877356a6a44..36e2199fe4838039cf539afd7a13d2edfdcc1aae 100644 (file)
@@ -3442,6 +3442,15 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
        pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
        pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
        pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
+
+       if (sev_es_guest(vcpu->kvm)) {
+               save = sev_decrypt_vmsa(vcpu);
+               if (!save)
+                       goto no_vmsa;
+
+               save01 = save;
+       }
+
        pr_err("VMCB State Save Area:\n");
        pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
               "es:",
@@ -3512,6 +3521,10 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
        pr_err("%-15s %016llx %-13s %016llx\n",
               "excp_from:", save->last_excp_from,
               "excp_to:", save->last_excp_to);
+
+no_vmsa:
+       if (sev_es_guest(vcpu->kvm))
+               sev_free_decrypted_vmsa(vcpu, save);
 }
 
 static bool svm_check_exit_valid(u64 exit_code)
index d4490eaed55dd42130552e517d899c5f16d090cb..5ce34a1faf6ac3643c51309e5fcbcdea5d97a1eb 100644 (file)
@@ -98,6 +98,7 @@ struct kvm_sev_info {
        unsigned int asid;      /* ASID used for this guest */
        unsigned int handle;    /* SEV firmware handle */
        int fd;                 /* SEV device fd */
+       unsigned long policy;
        unsigned long pages_locked; /* Number of pages locked */
        struct list_head regions_list;  /* List of registered regions */
        u64 ap_jump_table;      /* SEV-ES AP Jump Table address */
@@ -114,6 +115,9 @@ struct kvm_sev_info {
        struct mutex guest_req_mutex; /* Must acquire before using bounce buffers */
 };
 
+#define SEV_POLICY_NODBG       BIT_ULL(0)
+#define SNP_POLICY_DEBUG       BIT_ULL(19)
+
 struct kvm_svm {
        struct kvm kvm;
 
@@ -783,6 +787,8 @@ void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
 int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
 void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
 int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
+struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu);
+void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa);
 #else
 static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
 {
@@ -814,6 +820,11 @@ static inline int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn)
        return 0;
 }
 
+static inline struct vmcb_save_area *sev_decrypt_vmsa(struct kvm_vcpu *vcpu)
+{
+       return NULL;
+}
+static inline void sev_free_decrypted_vmsa(struct kvm_vcpu *vcpu, struct vmcb_save_area *vmsa) {}
 #endif
 
 /* vmenter.S */