]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86: Update retry protection fields when forcing retry on emulation failure
authorSean Christopherson <seanjc@google.com>
Sat, 31 Aug 2024 00:15:33 +0000 (17:15 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 10 Sep 2024 03:16:32 +0000 (20:16 -0700)
When retrying the faulting instruction after emulation failure, refresh
the infinite loop protection fields even if no shadow pages were zapped,
i.e. avoid hitting an infinite loop even when retrying the instruction as
a last-ditch effort to avoid terminating the guest.

Link: https://lore.kernel.org/r/20240831001538.336683-19-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/x86.c

index fd115feb49b38bfa52d607b02da7a84b7b3a244d..cdee59f3d15be3f68f0f2d7facdfcf9a22caa716 100644 (file)
@@ -2133,7 +2133,15 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
 void kvm_update_dr7(struct kvm_vcpu *vcpu);
 
 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
-bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa);
+bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                                      bool always_retry);
+
+static inline bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu,
+                                                  gpa_t cr2_or_gpa)
+{
+       return __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, false);
+}
+
 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
                        ulong roots_to_free);
 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu);
index 195ba7430720bf3d2795cd0157bfee4cb9d8b253..4b4edaf7dc0681b2ff5bd9b209f64f94258387cc 100644 (file)
@@ -2713,10 +2713,11 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
        return r;
 }
 
-bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa)
+bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                                      bool always_retry)
 {
        gpa_t gpa = cr2_or_gpa;
-       bool r;
+       bool r = false;
 
        /*
         * Bail early if there aren't any write-protected shadow pages to avoid
@@ -2727,16 +2728,17 @@ bool kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa)
         * skipping the unprotect+retry path, which is also an optimization.
         */
        if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
-               return false;
+               goto out;
 
        if (!vcpu->arch.mmu->root_role.direct) {
                gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
                if (gpa == INVALID_GPA)
-                       return false;
+                       goto out;
        }
 
        r = kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
-       if (r) {
+out:
+       if (r || always_retry) {
                vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);
                vcpu->arch.last_retry_addr = cr2_or_gpa;
        }
index ad942892fa2c5eb3e4597a0fe90c71dc3d808b00..843ddb982b35c90928d4ce0f733e2450ff1847ad 100644 (file)
@@ -8886,7 +8886,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
         * guest to let the CPU re-execute the instruction in the hope that the
         * CPU can cleanly execute the instruction that KVM failed to emulate.
         */
-       kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa);
+       __kvm_mmu_unprotect_gfn_and_retry(vcpu, cr2_or_gpa, true);
 
        /*
         * Retry even if _this_ vCPU didn't unprotect the gfn, as it's possible