]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Extract VMA size resolution in user_mem_abort()
authorFuad Tabba <tabba@google.com>
Fri, 6 Mar 2026 14:02:20 +0000 (14:02 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 27 Mar 2026 10:20:34 +0000 (10:20 +0000)
As part of an effort to refactor user_mem_abort() into smaller, more
focused helper functions, extract the logic responsible for determining
the VMA shift and page size into a new static helper,
kvm_s2_resolve_vma_size().

Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/mmu.c

index 17d64a1e11e5c00906fcc93c3948cbab69009040..f8064b2d32045d012fbc130b6e4fb886b868d6c4 100644 (file)
@@ -1639,6 +1639,77 @@ out_unlock:
        return ret != -EAGAIN ? ret : 0;
 }
 
+static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
+                                    unsigned long hva,
+                                    struct kvm_memory_slot *memslot,
+                                    struct kvm_s2_trans *nested,
+                                    bool *force_pte, phys_addr_t *ipa)
+{
+       short vma_shift;
+       long vma_pagesize;
+
+       if (*force_pte)
+               vma_shift = PAGE_SHIFT;
+       else
+               vma_shift = get_vma_page_shift(vma, hva);
+
+       switch (vma_shift) {
+#ifndef __PAGETABLE_PMD_FOLDED
+       case PUD_SHIFT:
+               if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
+                       break;
+               fallthrough;
+#endif
+       case CONT_PMD_SHIFT:
+               vma_shift = PMD_SHIFT;
+               fallthrough;
+       case PMD_SHIFT:
+               if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
+                       break;
+               fallthrough;
+       case CONT_PTE_SHIFT:
+               vma_shift = PAGE_SHIFT;
+               *force_pte = true;
+               fallthrough;
+       case PAGE_SHIFT:
+               break;
+       default:
+               WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
+       }
+
+       vma_pagesize = 1UL << vma_shift;
+
+       if (nested) {
+               unsigned long max_map_size;
+
+               max_map_size = *force_pte ? PAGE_SIZE : PUD_SIZE;
+
+               *ipa = kvm_s2_trans_output(nested);
+
+               /*
+                * If we're about to create a shadow stage 2 entry, then we
+                * can only create a block mapping if the guest stage 2 page
+                * table uses at least as big a mapping.
+                */
+               max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
+
+               /*
+                * Be careful that if the mapping size falls between
+                * two host sizes, take the smallest of the two.
+                */
+               if (max_map_size >= PMD_SIZE && max_map_size < PUD_SIZE)
+                       max_map_size = PMD_SIZE;
+               else if (max_map_size >= PAGE_SIZE && max_map_size < PMD_SIZE)
+                       max_map_size = PAGE_SIZE;
+
+               *force_pte = (max_map_size == PAGE_SIZE);
+               vma_pagesize = min_t(long, vma_pagesize, max_map_size);
+               vma_shift = __ffs(vma_pagesize);
+       }
+
+       return vma_shift;
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          struct kvm_s2_trans *nested,
                          struct kvm_memory_slot *memslot, unsigned long hva,
@@ -1695,65 +1766,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                return -EFAULT;
        }
 
-       if (force_pte)
-               vma_shift = PAGE_SHIFT;
-       else
-               vma_shift = get_vma_page_shift(vma, hva);
-
-       switch (vma_shift) {
-#ifndef __PAGETABLE_PMD_FOLDED
-       case PUD_SHIFT:
-               if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
-                       break;
-               fallthrough;
-#endif
-       case CONT_PMD_SHIFT:
-               vma_shift = PMD_SHIFT;
-               fallthrough;
-       case PMD_SHIFT:
-               if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
-                       break;
-               fallthrough;
-       case CONT_PTE_SHIFT:
-               vma_shift = PAGE_SHIFT;
-               force_pte = true;
-               fallthrough;
-       case PAGE_SHIFT:
-               break;
-       default:
-               WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
-       }
-
+       vma_shift = kvm_s2_resolve_vma_size(vma, hva, memslot, nested,
+                                           &force_pte, &ipa);
        vma_pagesize = 1UL << vma_shift;
 
-       if (nested) {
-               unsigned long max_map_size;
-
-               max_map_size = force_pte ? PAGE_SIZE : PUD_SIZE;
-
-               ipa = kvm_s2_trans_output(nested);
-
-               /*
-                * If we're about to create a shadow stage 2 entry, then we
-                * can only create a block mapping if the guest stage 2 page
-                * table uses at least as big a mapping.
-                */
-               max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
-
-               /*
-                * Be careful that if the mapping size falls between
-                * two host sizes, take the smallest of the two.
-                */
-               if (max_map_size >= PMD_SIZE && max_map_size < PUD_SIZE)
-                       max_map_size = PMD_SIZE;
-               else if (max_map_size >= PAGE_SIZE && max_map_size < PMD_SIZE)
-                       max_map_size = PAGE_SIZE;
-
-               force_pte = (max_map_size == PAGE_SIZE);
-               vma_pagesize = min_t(long, vma_pagesize, max_map_size);
-               vma_shift = __ffs(vma_pagesize);
-       }
-
        /*
         * Both the canonical IPA and fault IPA must be aligned to the
         * mapping size to ensure we find the right PFN and lay down the