return ret != -EAGAIN ? ret : 0;
}
+static short kvm_s2_resolve_vma_size(struct vm_area_struct *vma,
+ unsigned long hva,
+ struct kvm_memory_slot *memslot,
+ struct kvm_s2_trans *nested,
+ bool *force_pte, phys_addr_t *ipa)
+{
+ short vma_shift;
+ long vma_pagesize;
+
+ if (*force_pte)
+ vma_shift = PAGE_SHIFT;
+ else
+ vma_shift = get_vma_page_shift(vma, hva);
+
+ switch (vma_shift) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SHIFT:
+ if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
+ break;
+ fallthrough;
+#endif
+ case CONT_PMD_SHIFT:
+ vma_shift = PMD_SHIFT;
+ fallthrough;
+ case PMD_SHIFT:
+ if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
+ break;
+ fallthrough;
+ case CONT_PTE_SHIFT:
+ vma_shift = PAGE_SHIFT;
+ *force_pte = true;
+ fallthrough;
+ case PAGE_SHIFT:
+ break;
+ default:
+ WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
+ }
+
+ vma_pagesize = 1UL << vma_shift;
+
+ if (nested) {
+ unsigned long max_map_size;
+
+ max_map_size = *force_pte ? PAGE_SIZE : PUD_SIZE;
+
+ *ipa = kvm_s2_trans_output(nested);
+
+ /*
+ * If we're about to create a shadow stage 2 entry, then we
+ * can only create a block mapping if the guest stage 2 page
+ * table uses at least as big a mapping.
+ */
+ max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
+
+ /*
+ * Be careful that if the mapping size falls between
+ * two host sizes, take the smallest of the two.
+ */
+ if (max_map_size >= PMD_SIZE && max_map_size < PUD_SIZE)
+ max_map_size = PMD_SIZE;
+ else if (max_map_size >= PAGE_SIZE && max_map_size < PMD_SIZE)
+ max_map_size = PAGE_SIZE;
+
+ *force_pte = (max_map_size == PAGE_SIZE);
+ vma_pagesize = min_t(long, vma_pagesize, max_map_size);
+ vma_shift = __ffs(vma_pagesize);
+ }
+
+ return vma_shift;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_s2_trans *nested,
struct kvm_memory_slot *memslot, unsigned long hva,
return -EFAULT;
}
- if (force_pte)
- vma_shift = PAGE_SHIFT;
- else
- vma_shift = get_vma_page_shift(vma, hva);
-
- switch (vma_shift) {
-#ifndef __PAGETABLE_PMD_FOLDED
- case PUD_SHIFT:
- if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
- break;
- fallthrough;
-#endif
- case CONT_PMD_SHIFT:
- vma_shift = PMD_SHIFT;
- fallthrough;
- case PMD_SHIFT:
- if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
- break;
- fallthrough;
- case CONT_PTE_SHIFT:
- vma_shift = PAGE_SHIFT;
- force_pte = true;
- fallthrough;
- case PAGE_SHIFT:
- break;
- default:
- WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
- }
-
+ vma_shift = kvm_s2_resolve_vma_size(vma, hva, memslot, nested,
+ &force_pte, &ipa);
vma_pagesize = 1UL << vma_shift;
- if (nested) {
- unsigned long max_map_size;
-
- max_map_size = force_pte ? PAGE_SIZE : PUD_SIZE;
-
- ipa = kvm_s2_trans_output(nested);
-
- /*
- * If we're about to create a shadow stage 2 entry, then we
- * can only create a block mapping if the guest stage 2 page
- * table uses at least as big a mapping.
- */
- max_map_size = min(kvm_s2_trans_size(nested), max_map_size);
-
- /*
- * Be careful that if the mapping size falls between
- * two host sizes, take the smallest of the two.
- */
- if (max_map_size >= PMD_SIZE && max_map_size < PUD_SIZE)
- max_map_size = PMD_SIZE;
- else if (max_map_size >= PAGE_SIZE && max_map_size < PMD_SIZE)
- max_map_size = PAGE_SIZE;
-
- force_pte = (max_map_size == PAGE_SIZE);
- vma_pagesize = min_t(long, vma_pagesize, max_map_size);
- vma_shift = __ffs(vma_pagesize);
- }
-
/*
* Both the canonical IPA and fault IPA must be aligned to the
* mapping size to ensure we find the right PFN and lay down the