]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Clean up control flow in kvm_s2_fault_map()
authorFuad Tabba <tabba@google.com>
Fri, 6 Mar 2026 14:02:32 +0000 (14:02 +0000)
committerMarc Zyngier <maz@kernel.org>
Sat, 28 Mar 2026 11:29:41 +0000 (11:29 +0000)
Clean up the KVM MMU lock retry loop by pre-assigning the error code.
Add clear braces to the THP adjustment integration for readability, and
safely unnest the transparent hugepage logic branches.

Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/mmu.c

index 871143a2b2f702c5ff2016170674dfc28913f019..719521904ef501ed46a8864b379f7aebedbdea52 100644 (file)
@@ -1897,10 +1897,9 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
 
        kvm_fault_lock(kvm);
        pgt = fault->vcpu->arch.hw_mmu->pgt;
-       if (mmu_invalidate_retry(kvm, fault->mmu_seq)) {
-               ret = -EAGAIN;
+       ret = -EAGAIN;
+       if (mmu_invalidate_retry(kvm, fault->mmu_seq))
                goto out_unlock;
-       }
 
        /*
         * If we are not forced to use page mapping, check if we are
@@ -1908,16 +1907,17 @@ static int kvm_s2_fault_map(struct kvm_s2_fault *fault, void *memcache)
         */
        if (fault->vma_pagesize == PAGE_SIZE &&
            !(fault->force_pte || fault->s2_force_noncacheable)) {
-               if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE)
+               if (fault->fault_is_perm && fault->fault_granule > PAGE_SIZE) {
                        fault->vma_pagesize = fault->fault_granule;
-               else
+               } else {
                        fault->vma_pagesize = transparent_hugepage_adjust(kvm, fault->memslot,
                                                                          fault->hva, &fault->pfn,
                                                                          &fault->fault_ipa);
 
-               if (fault->vma_pagesize < 0) {
-                       ret = fault->vma_pagesize;
-                       goto out_unlock;
+                       if (fault->vma_pagesize < 0) {
+                               ret = fault->vma_pagesize;
+                               goto out_unlock;
+                       }
                }
        }
 
@@ -1951,7 +1951,9 @@ out_unlock:
        if (fault->writable && !ret)
                mark_page_dirty_in_slot(kvm, fault->memslot, fault->gfn);
 
-       return ret != -EAGAIN ? ret : 0;
+       if (ret != -EAGAIN)
+               return ret;
+       return 0;
 }
 
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,