]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Constrain fault_granule to kvm_s2_fault_map()
authorMarc Zyngier <maz@kernel.org>
Mon, 9 Mar 2026 10:57:04 +0000 (10:57 +0000)
committerMarc Zyngier <maz@kernel.org>
Sat, 28 Mar 2026 11:29:41 +0000 (11:29 +0000)
The notion of fault_granule is specific to kvm_s2_fault_map(), and
is unused anywhere else.

Move this variable locally, removing it from kvm_s2_fault.

Tested-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/mmu.c

index 2bb4e974886a8a3fac382debe69e2bc9474af10c..981c04a74ab7a60c24bdf28676f40db52ed72174 100644 (file)
@@ -1724,7 +1724,6 @@ struct kvm_s2_fault {
        bool logging_active;
        bool force_pte;
        long vma_pagesize;
-       long fault_granule;
        enum kvm_pgtable_prot prot;
        struct page *page;
        vm_flags_t vm_flags;
@@ -1908,9 +1907,9 @@ static phys_addr_t get_ipa(const struct kvm_s2_fault *fault)
 static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
                            struct kvm_s2_fault *fault, void *memcache)
 {
-       bool fault_is_perm = kvm_s2_fault_is_perm(s2fd);
        struct kvm *kvm = s2fd->vcpu->kvm;
        struct kvm_pgtable *pgt;
+       long perm_fault_granule;
        int ret;
        enum kvm_pgtable_walk_flags flags = KVM_PGTABLE_WALK_SHARED;
 
@@ -1920,14 +1919,17 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
        if (mmu_invalidate_retry(kvm, fault->mmu_seq))
                goto out_unlock;
 
+       perm_fault_granule = (kvm_s2_fault_is_perm(s2fd) ?
+                             kvm_vcpu_trap_get_perm_fault_granule(s2fd->vcpu) : 0);
+
        /*
         * If we are not forced to use page mapping, check if we are
         * backed by a THP and thus use block mapping if possible.
         */
        if (fault->vma_pagesize == PAGE_SIZE &&
            !(fault->force_pte || fault->s2_force_noncacheable)) {
-               if (fault_is_perm && fault->fault_granule > PAGE_SIZE) {
-                       fault->vma_pagesize = fault->fault_granule;
+               if (perm_fault_granule > PAGE_SIZE) {
+                       fault->vma_pagesize = perm_fault_granule;
                } else {
                        fault->vma_pagesize = transparent_hugepage_adjust(kvm, s2fd->memslot,
                                                                          s2fd->hva, &fault->pfn,
@@ -1940,15 +1942,15 @@ static int kvm_s2_fault_map(const struct kvm_s2_fault_desc *s2fd,
                }
        }
 
-       if (!fault_is_perm && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
+       if (!perm_fault_granule && !fault->s2_force_noncacheable && kvm_has_mte(kvm))
                sanitise_mte_tags(kvm, fault->pfn, fault->vma_pagesize);
 
        /*
         * Under the premise of getting a FSC_PERM fault, we just need to relax
-        * permissions only if vma_pagesize equals fault_granule. Otherwise,
+        * permissions only if vma_pagesize equals perm_fault_granule. Otherwise,
         * kvm_pgtable_stage2_map() should be called to change block size.
         */
-       if (fault_is_perm && fault->vma_pagesize == fault->fault_granule) {
+       if (fault->vma_pagesize == perm_fault_granule) {
                /*
                 * Drop the SW bits in favour of those stored in the
                 * PTE, which will be preserved.
@@ -1984,7 +1986,6 @@ static int user_mem_abort(const struct kvm_s2_fault_desc *s2fd)
                .logging_active = logging_active,
                .force_pte = logging_active,
                .prot = KVM_PGTABLE_PROT_R,
-               .fault_granule = perm_fault ? kvm_vcpu_trap_get_perm_fault_granule(s2fd->vcpu) : 0,
                .write_fault = write_fault,
                .exec_fault = kvm_vcpu_trap_is_exec_fault(s2fd->vcpu),
                .topup_memcache = !perm_fault || (logging_active && write_fault),