]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RISC-V: KVM: Remove automatic I/O mapping for VM_PFNMAP
authorFangyu Yu <fangyu.yu@linux.alibaba.com>
Tue, 21 Oct 2025 14:21:31 +0000 (22:21 +0800)
committerAnup Patel <anup@brainfault.org>
Fri, 24 Oct 2025 15:54:36 +0000 (21:24 +0530)
As of commit aac6db75a9fc ("vfio/pci: Use unmap_mapping_range()"),
vm_pgoff may no longer guaranteed to hold the PFN for VM_PFNMAP
regions. Using vma->vm_pgoff to derive the HPA here may therefore
produce incorrect mappings.

Instead, I/O mappings for such regions can be established on-demand
during g-stage page faults, making the upfront ioremap in this path
is unnecessary.

Fixes: aac6db75a9fc ("vfio/pci: Use unmap_mapping_range()")
Signed-off-by: Fangyu Yu <fangyu.yu@linux.alibaba.com>
Tested-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Guo Ren <guoren@kernel.org>
Reviewed-by: Anup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/r/20251021142131.78796-1-fangyu.yu@linux.alibaba.com
Signed-off-by: Anup Patel <anup@brainfault.org>
arch/riscv/kvm/mmu.c

index 525fb5a330c0d20b71ee8e27092efe05dc4cd88f..58f5f3536ffdffb4be39b5a66b9a0c0a0af6cac3 100644 (file)
@@ -171,7 +171,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                enum kvm_mr_change change)
 {
        hva_t hva, reg_end, size;
-       gpa_t base_gpa;
        bool writable;
        int ret = 0;
 
@@ -190,15 +189,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
        hva = new->userspace_addr;
        size = new->npages << PAGE_SHIFT;
        reg_end = hva + size;
-       base_gpa = new->base_gfn << PAGE_SHIFT;
        writable = !(new->flags & KVM_MEM_READONLY);
 
        mmap_read_lock(current->mm);
 
        /*
         * A memory region could potentially cover multiple VMAs, and
-        * any holes between them, so iterate over all of them to find
-        * out if we can map any of them right now.
+        * any holes between them, so iterate over all of them.
         *
         *     +--------------------------------------------+
         * +---------------+----------------+   +----------------+
@@ -209,7 +206,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
         */
        do {
                struct vm_area_struct *vma;
-               hva_t vm_start, vm_end;
+               hva_t vm_end;
 
                vma = find_vma_intersection(current->mm, hva, reg_end);
                if (!vma)
@@ -225,36 +222,18 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
                }
 
                /* Take the intersection of this VMA with the memory region */
-               vm_start = max(hva, vma->vm_start);
                vm_end = min(reg_end, vma->vm_end);
 
                if (vma->vm_flags & VM_PFNMAP) {
-                       gpa_t gpa = base_gpa + (vm_start - hva);
-                       phys_addr_t pa;
-
-                       pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
-                       pa += vm_start - vma->vm_start;
-
                        /* IO region dirty page logging not allowed */
                        if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
                                ret = -EINVAL;
                                goto out;
                        }
-
-                       ret = kvm_riscv_mmu_ioremap(kvm, gpa, pa, vm_end - vm_start,
-                                                   writable, false);
-                       if (ret)
-                               break;
                }
                hva = vm_end;
        } while (hva < reg_end);
 
-       if (change == KVM_MR_FLAGS_ONLY)
-               goto out;
-
-       if (ret)
-               kvm_riscv_mmu_iounmap(kvm, base_gpa, size);
-
 out:
        mmap_read_unlock(current->mm);
        return ret;