]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86/mmu: Subsume kvm_mmu_unprotect_page() into the and_retry() version
authorSean Christopherson <seanjc@google.com>
Sat, 31 Aug 2024 00:15:35 +0000 (17:15 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 10 Sep 2024 03:16:34 +0000 (20:16 -0700)
Fold kvm_mmu_unprotect_page() into kvm_mmu_unprotect_gfn_and_retry() now
that all other direct usage is gone.

No functional change intended.

Link: https://lore.kernel.org/r/20240831001538.336683-21-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu.c

index cdee59f3d15be3f68f0f2d7facdfcf9a22caa716..8f4164f58b6c15a78dc1969923316b18888006d8 100644 (file)
@@ -2132,7 +2132,6 @@ int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu);
 
 void kvm_update_dr7(struct kvm_vcpu *vcpu);
 
-int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
 bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                                       bool always_retry);
 
index 4b4edaf7dc0681b2ff5bd9b209f64f94258387cc..29305403f95615858dd5ceded2b849d31a9f8fb5 100644 (file)
@@ -2695,27 +2695,12 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
        write_unlock(&kvm->mmu_lock);
 }
 
-int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
-{
-       struct kvm_mmu_page *sp;
-       LIST_HEAD(invalid_list);
-       int r;
-
-       r = 0;
-       write_lock(&kvm->mmu_lock);
-       for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
-               r = 1;
-               kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
-       }
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
-       write_unlock(&kvm->mmu_lock);
-
-       return r;
-}
-
 bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                                       bool always_retry)
 {
+       struct kvm *kvm = vcpu->kvm;
+       LIST_HEAD(invalid_list);
+       struct kvm_mmu_page *sp;
        gpa_t gpa = cr2_or_gpa;
        bool r = false;
 
@@ -2727,7 +2712,7 @@ bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
         * positive is benign, and a false negative will simply result in KVM
         * skipping the unprotect+retry path, which is also an optimization.
         */
-       if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
+       if (!READ_ONCE(kvm->arch.indirect_shadow_pages))
                goto out;
 
        if (!vcpu->arch.mmu->root_role.direct) {
@@ -2736,7 +2721,15 @@ bool __kvm_mmu_unprotect_gfn_and_retry(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                        goto out;
        }
 
-       r = kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
+       r = false;
+       write_lock(&kvm->mmu_lock);
+       for_each_gfn_valid_sp_with_gptes(kvm, sp, gpa_to_gfn(gpa)) {
+               r = true;
+               kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+       }
+       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+       write_unlock(&kvm->mmu_lock);
+
 out:
        if (r || always_retry) {
                vcpu->arch.last_retry_eip = kvm_rip_read(vcpu);