]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: SVM: Mark VMCB dirty before processing incoming snp_vmsa_gpa
authorSean Christopherson <seanjc@google.com>
Thu, 27 Feb 2025 01:25:39 +0000 (17:25 -0800)
committerSean Christopherson <seanjc@google.com>
Mon, 3 Mar 2025 15:34:54 +0000 (07:34 -0800)
Mark the VMCB dirty, i.e. zero control.clean, prior to handling the new
VMSA.  Nothing in the VALID_PAGE() case touches control.clean, and
isolating the VALID_PAGE() code will allow simplifying the overall logic.

Note, the VMCB probably doesn't need to be marked dirty when the VMSA is
invalid, as KVM will disallow running the vCPU in such a state.  But it
also doesn't hurt anything.

Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20250227012541.3234589-9-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/sev.c

index 2ae476eed47a1b014782120d58123e63d07f6608..aa46e8a4cce8151a349d126fbb130ace32517123 100644 (file)
@@ -3855,6 +3855,12 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
        /* Clear use of the VMSA */
        svm->vmcb->control.vmsa_pa = INVALID_PAGE;
 
+       /*
+        * When replacing the VMSA during SEV-SNP AP creation,
+        * mark the VMCB dirty so that full state is always reloaded.
+        */
+       vmcb_mark_all_dirty(svm->vmcb);
+
        if (VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) {
                gfn_t gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
                struct kvm_memory_slot *slot;
@@ -3901,12 +3907,6 @@ static int __sev_snp_update_protected_guest_state(struct kvm_vcpu *vcpu)
                kvm_release_page_clean(page);
        }
 
-       /*
-        * When replacing the VMSA during SEV-SNP AP creation,
-        * mark the VMCB dirty so that full state is always reloaded.
-        */
-       vmcb_mark_all_dirty(svm->vmcb);
-
        return 0;
 }