]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
vfio/type1: move iova increment to unmap_unpin_*() caller
authorAlex Mastro <amastro@fb.com>
Tue, 28 Oct 2025 16:15:01 +0000 (09:15 -0700)
committerAlex Williamson <alex@shazbot.org>
Tue, 28 Oct 2025 21:54:41 +0000 (15:54 -0600)
Move incrementing iova to the caller of these functions as part of
preparing to handle end of address space map/unmap.

Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Fixes: 73fa0d10d077 ("vfio: Type1 IOMMU implementation")
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Alex Mastro <amastro@fb.com>
Link: https://lore.kernel.org/r/20251028-fix-unmap-v6-2-2542b96bcc8e@fb.com
Signed-off-by: Alex Williamson <alex@shazbot.org>
drivers/vfio/vfio_iommu_type1.c

index 91b1480b7a37656bc3e281f25aa3db9630f039f6..48bcc0633d445f571b243685ec40487942764ff8 100644 (file)
@@ -1083,7 +1083,7 @@ static long vfio_sync_unpin(struct vfio_dma *dma, struct vfio_domain *domain,
 #define VFIO_IOMMU_TLB_SYNC_MAX                512
 
 static size_t unmap_unpin_fast(struct vfio_domain *domain,
-                              struct vfio_dma *dma, dma_addr_t *iova,
+                              struct vfio_dma *dma, dma_addr_t iova,
                               size_t len, phys_addr_t phys, long *unlocked,
                               struct list_head *unmapped_list,
                               int *unmapped_cnt,
@@ -1093,18 +1093,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
        struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 
        if (entry) {
-               unmapped = iommu_unmap_fast(domain->domain, *iova, len,
+               unmapped = iommu_unmap_fast(domain->domain, iova, len,
                                            iotlb_gather);
 
                if (!unmapped) {
                        kfree(entry);
                } else {
-                       entry->iova = *iova;
+                       entry->iova = iova;
                        entry->phys = phys;
                        entry->len  = unmapped;
                        list_add_tail(&entry->list, unmapped_list);
 
-                       *iova += unmapped;
                        (*unmapped_cnt)++;
                }
        }
@@ -1123,18 +1122,17 @@ static size_t unmap_unpin_fast(struct vfio_domain *domain,
 }
 
 static size_t unmap_unpin_slow(struct vfio_domain *domain,
-                              struct vfio_dma *dma, dma_addr_t *iova,
+                              struct vfio_dma *dma, dma_addr_t iova,
                               size_t len, phys_addr_t phys,
                               long *unlocked)
 {
-       size_t unmapped = iommu_unmap(domain->domain, *iova, len);
+       size_t unmapped = iommu_unmap(domain->domain, iova, len);
 
        if (unmapped) {
-               *unlocked += vfio_unpin_pages_remote(dma, *iova,
+               *unlocked += vfio_unpin_pages_remote(dma, iova,
                                                     phys >> PAGE_SHIFT,
                                                     unmapped >> PAGE_SHIFT,
                                                     false);
-               *iova += unmapped;
                cond_resched();
        }
        return unmapped;
@@ -1197,16 +1195,18 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
                 * First, try to use fast unmap/unpin. In case of failure,
                 * switch to slow unmap/unpin path.
                 */
-               unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
+               unmapped = unmap_unpin_fast(domain, dma, iova, len, phys,
                                            &unlocked, &unmapped_region_list,
                                            &unmapped_region_cnt,
                                            &iotlb_gather);
                if (!unmapped) {
-                       unmapped = unmap_unpin_slow(domain, dma, &iova, len,
+                       unmapped = unmap_unpin_slow(domain, dma, iova, len,
                                                    phys, &unlocked);
                        if (WARN_ON(!unmapped))
                                break;
                }
+
+               iova += unmapped;
        }
 
        dma->iommu_mapped = false;