]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/mmu: Add "mmu" prefix fault-in helpers to free up generic names
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:23:39 +0000 (11:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 16:59:08 +0000 (12:59 -0400)
Prefix x86's faultin_pfn helpers with "mmu" so that the mmu-less names can
be used by common KVM for similar APIs.

No functional change intended.

Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-38-seanjc@google.com>

arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/paging_tmpl.h

index 688202ac50c23c4b4c11192ecb63345237d9731b..7dcd34628f49d92946f130cd2c9519e7523547f2 100644 (file)
@@ -4354,8 +4354,8 @@ static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
        return max_level;
 }
 
-static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
-                                  struct kvm_page_fault *fault)
+static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
+                                      struct kvm_page_fault *fault)
 {
        int max_order, r;
 
@@ -4378,10 +4378,11 @@ static int kvm_faultin_pfn_private(struct kvm_vcpu *vcpu,
        return RET_PF_CONTINUE;
 }
 
-static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
+                                struct kvm_page_fault *fault)
 {
        if (fault->is_private)
-               return kvm_faultin_pfn_private(vcpu, fault);
+               return kvm_mmu_faultin_pfn_private(vcpu, fault);
 
        fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true,
                                          fault->write, &fault->map_writable);
@@ -4416,8 +4417,8 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        return RET_PF_CONTINUE;
 }
 
-static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
-                          unsigned int access)
+static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
+                              struct kvm_page_fault *fault, unsigned int access)
 {
        struct kvm_memory_slot *slot = fault->slot;
        int ret;
@@ -4500,7 +4501,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
        if (mmu_invalidate_retry_gfn_unsafe(vcpu->kvm, fault->mmu_seq, fault->gfn))
                return RET_PF_RETRY;
 
-       ret = __kvm_faultin_pfn(vcpu, fault);
+       ret = __kvm_mmu_faultin_pfn(vcpu, fault);
        if (ret != RET_PF_CONTINUE)
                return ret;
 
@@ -4577,7 +4578,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        if (r)
                return r;
 
-       r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
+       r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
        if (r != RET_PF_CONTINUE)
                return r;
 
@@ -4668,7 +4669,7 @@ static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
        if (r)
                return r;
 
-       r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
+       r = kvm_mmu_faultin_pfn(vcpu, fault, ACC_ALL);
        if (r != RET_PF_CONTINUE)
                return r;
 
index 633aedec3c2e3cad4d4b9a098f90d56466dcf148..59e600f6ff9df9acf75fa952f29a1b0ef9d30bec 100644 (file)
@@ -235,7 +235,7 @@ struct kvm_page_fault {
        /* The memslot containing gfn. May be NULL. */
        struct kvm_memory_slot *slot;
 
-       /* Outputs of kvm_faultin_pfn.  */
+       /* Outputs of kvm_mmu_faultin_pfn().  */
        unsigned long mmu_seq;
        kvm_pfn_t pfn;
        bool map_writable;
index 143b7e9f26dc598feaedb00a4d85757e4ef0fe3e..9bd3d6f5db91330345916667f7a6a611319bc6d9 100644 (file)
@@ -812,7 +812,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        if (r)
                return r;
 
-       r = kvm_faultin_pfn(vcpu, fault, walker.pte_access);
+       r = kvm_mmu_faultin_pfn(vcpu, fault, walker.pte_access);
        if (r != RET_PF_CONTINUE)
                return r;