]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Fix uninitialized memcache pointer in user_mem_abort()
authorSebastian Ott <sebott@redhat.com>
Mon, 5 May 2025 17:31:48 +0000 (19:31 +0200)
committerOliver Upton <oliver.upton@linux.dev>
Mon, 5 May 2025 19:12:27 +0000 (12:12 -0700)
Commit fce886a60207 ("KVM: arm64: Plumb the pKVM MMU in KVM") made the
initialization of the local memcache variable in user_mem_abort()
conditional, leaving a codepath where it is used uninitialized via
kvm_pgtable_stage2_map().

This can fail on any path that requires a stage-2 allocation
without transition via a permission fault or dirty logging.

Fix this by making sure that memcache is always valid.

Fixes: fce886a60207 ("KVM: arm64: Plumb the pKVM MMU in KVM")
Signed-off-by: Sebastian Ott <sebott@redhat.com>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/kvmarm/3f5db4c7-ccce-fb95-595c-692fa7aad227@redhat.com/
Link: https://lore.kernel.org/r/20250505173148.33900-1-sebott@redhat.com
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/mmu.c

index 754f2fe0cc67382338fd05628b14d3d1fd785831..eeda92330ade707f85eadcaa1a1e223330698472 100644 (file)
@@ -1501,6 +1501,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                return -EFAULT;
        }
 
+       if (!is_protected_kvm_enabled())
+               memcache = &vcpu->arch.mmu_page_cache;
+       else
+               memcache = &vcpu->arch.pkvm_memcache;
+
        /*
         * Permission faults just need to update the existing leaf entry,
         * and so normally don't require allocations from the memcache. The
@@ -1510,13 +1515,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (!fault_is_perm || (logging_active && write_fault)) {
                int min_pages = kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu);
 
-               if (!is_protected_kvm_enabled()) {
-                       memcache = &vcpu->arch.mmu_page_cache;
+               if (!is_protected_kvm_enabled())
                        ret = kvm_mmu_topup_memory_cache(memcache, min_pages);
-               } else {
-                       memcache = &vcpu->arch.pkvm_memcache;
+               else
                        ret = topup_hyp_memcache(memcache, min_pages);
-               }
+
                if (ret)
                        return ret;
        }