]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: arm64: Kill fault->ipa
authorMarc Zyngier <maz@kernel.org>
Sat, 7 Mar 2026 11:39:49 +0000 (11:39 +0000)
committerMarc Zyngier <maz@kernel.org>
Sat, 28 Mar 2026 11:29:41 +0000 (11:29 +0000)
fault->ipa, in a nested contest, represents the output of the guest's
S2 translation for the fault->fault_ipa input, and is equal to
fault->fault_ipa otherwise,

Given that this is readily available from kvm_s2_trans_output(),
drop fault->ipa and directly compute fault->gfn instead, which
is really what we want.

Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/mmu.c

index 719521904ef501ed46a8864b379f7aebedbdea52..371ee0a836cfa1659ed9bf986a196d5717050e57 100644 (file)
@@ -1643,7 +1643,7 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
                                     unsigned long hva,
                                     struct kvm_memory_slot *memslot,
                                     struct kvm_s2_trans *nested,
-                                    bool *force_pte, phys_addr_t *ipa)
+                                    bool *force_pte)
 {
        short vma_shift;
 
@@ -1681,8 +1681,6 @@ static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
 
                max_map_size = *force_pte ? PAGE_SIZE : PUD_SIZE;
 
-               *ipa = kvm_s2_trans_output(nested);
-
                /*
                 * If we're about to create a shadow stage 2 entry, then we
                 * can only create a block mapping if the guest stage 2 page
@@ -1722,7 +1720,6 @@ struct kvm_s2_fault {
        bool is_vma_cacheable;
        bool s2_force_noncacheable;
        unsigned long mmu_seq;
-       phys_addr_t ipa;
        gfn_t gfn;
        kvm_pfn_t pfn;
        bool logging_active;
@@ -1738,6 +1735,7 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
 {
        struct vm_area_struct *vma;
        struct kvm *kvm = fault->vcpu->kvm;
+       phys_addr_t ipa;
 
        mmap_read_lock(current->mm);
        vma = vma_lookup(current->mm, fault->hva);
@@ -1748,8 +1746,7 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
        }
 
        fault->vma_pagesize = 1UL << kvm_s2_resolve_vma_size(vma, fault->hva, fault->memslot,
-                                                            fault->nested, &fault->force_pte,
-                                                            &fault->ipa);
+                                                            fault->nested, &fault->force_pte);
 
        /*
         * Both the canonical IPA and fault IPA must be aligned to the
@@ -1757,9 +1754,9 @@ static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
         * mapping in the right place.
         */
        fault->fault_ipa = ALIGN_DOWN(fault->fault_ipa, fault->vma_pagesize);
-       fault->ipa = ALIGN_DOWN(fault->ipa, fault->vma_pagesize);
+       ipa = fault->nested ? kvm_s2_trans_output(fault->nested) : fault->fault_ipa;
+       fault->gfn = ALIGN_DOWN(ipa, fault->vma_pagesize) >> PAGE_SHIFT;
 
-       fault->gfn = fault->ipa >> PAGE_SHIFT;
        fault->mte_allowed = kvm_vma_mte_allowed(vma);
 
        fault->vm_flags = vma->vm_flags;
@@ -1970,7 +1967,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                .memslot = memslot,
                .hva = hva,
                .fault_is_perm = fault_is_perm,
-               .ipa = fault_ipa,
                .logging_active = logging_active,
                .force_pte = logging_active,
                .prot = KVM_PGTABLE_PROT_R,