]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: Isolate mmap_read_lock inside new kvm_s2_fault_get_vma_info() helper
authorFuad Tabba <tabba@google.com>
Fri, 6 Mar 2026 14:02:23 +0000 (14:02 +0000)
committerMarc Zyngier <maz@kernel.org>
Sat, 28 Mar 2026 11:29:40 +0000 (11:29 +0000)
Extract the VMA lookup and metadata snapshotting logic from
kvm_s2_fault_pin_pfn() into a tightly-scoped sub-helper.

This refactoring structurally fixes a TOCTOU (Time-Of-Check to
Time-Of-Use) vulnerability and Use-After-Free risk involving the vma
pointer. In the previous layout, the mmap_read_lock is taken, the vma is
looked up, and then the lock is dropped before the function continues to
map the PFN. While an explicit vma = NULL safeguard was present, the vma
variable was still lexically in scope for the remainder of the function.

By isolating the locked region into kvm_s2_fault_get_vma_info(), the vma
pointer becomes a local variable strictly confined to that sub-helper.
Because the pointer's scope literally ends when the sub-helper returns,
it is not possible for the subsequent page fault logic in
kvm_s2_fault_pin_pfn() to accidentally access the vanished VMA,
eliminating this bug class by design.

Signed-off-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/mmu.c

index 7ca704a62d62e6fccf2a1b86448d1ef89b577c2b..9fe2e31a8601b7d1ae85c348f90c2f6769252d87 100644 (file)
@@ -1740,7 +1740,7 @@ struct kvm_s2_fault {
        vm_flags_t vm_flags;
 };
 
-static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
+static int kvm_s2_fault_get_vma_info(struct kvm_s2_fault *fault)
 {
        struct vm_area_struct *vma;
        struct kvm *kvm = fault->vcpu->kvm;
@@ -1774,9 +1774,6 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
 
        fault->is_vma_cacheable = kvm_vma_is_cacheable(vma);
 
-       /* Don't use the VMA after the unlock -- it may have vanished */
-       vma = NULL;
-
        /*
         * Read mmu_invalidate_seq so that KVM can detect if the results of
         * vma_lookup() or __kvm_faultin_pfn() become stale prior to
@@ -1788,6 +1785,17 @@ static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
        fault->mmu_seq = kvm->mmu_invalidate_seq;
        mmap_read_unlock(current->mm);
 
+       return 0;
+}
+
+static int kvm_s2_fault_pin_pfn(struct kvm_s2_fault *fault)
+{
+       int ret;
+
+       ret = kvm_s2_fault_get_vma_info(fault);
+       if (ret)
+               return ret;
+
        fault->pfn = __kvm_faultin_pfn(fault->memslot, fault->gfn,
                                       fault->write_fault ? FOLL_WRITE : 0,
                                       &fault->writable, &fault->page);