]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
iommufd: Make vfio_compat's unmap succeed if the range is already empty
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 4 Nov 2025 18:11:49 +0000 (14:11 -0400)
committerJason Gunthorpe <jgg@nvidia.com>
Wed, 5 Nov 2025 19:11:26 +0000 (15:11 -0400)
iommufd returns ENOENT when attempting to unmap a range that is already
empty, while vfio type1 returns success. Fix vfio_compat to match.

Fixes: d624d6652a65 ("iommufd: vfio container FD ioctl compatibility")
Link: https://patch.msgid.link/r/0-v1-76be45eff0be+5d-iommufd_unmap_compat_jgg@nvidia.com
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Alex Mastro <amastro@fb.com>
Reported-by: Alex Mastro <amastro@fb.com>
Closes: https://lore.kernel.org/r/aP0S5ZF9l3sWkJ1G@devgpu012.nha5.facebook.com
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/iommu/iommufd/io_pagetable.c
drivers/iommu/iommufd/ioas.c
tools/testing/selftests/iommu/iommufd.c

index c0360c450880b8f088dfbbe5e25d6e644d951865..75d60f2ad9008261cf06b3cc3bb3e848da14010f 100644 (file)
@@ -707,7 +707,8 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
        struct iopt_area *area;
        unsigned long unmapped_bytes = 0;
        unsigned int tries = 0;
-       int rc = -ENOENT;
+       /* If there are no mapped entries then success */
+       int rc = 0;
 
        /*
         * The domains_rwsem must be held in read mode any time any area->pages
@@ -777,8 +778,6 @@ again:
 
                down_write(&iopt->iova_rwsem);
        }
-       if (unmapped_bytes)
-               rc = 0;
 
 out_unlock_iova:
        up_write(&iopt->iova_rwsem);
@@ -815,13 +814,8 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
 
 int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped)
 {
-       int rc;
-
-       rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
        /* If the IOVAs are empty then unmap all succeeds */
-       if (rc == -ENOENT)
-               return 0;
-       return rc;
+       return iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
 }
 
 /* The caller must always free all the nodes in the allowed_iova rb_root. */
index 1542c5fd10a85cac4e20d37cc0f3fa1f904e0dd6..459a7c5169154bbb76b4bb551d01a30fae2fa764 100644 (file)
@@ -367,6 +367,10 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
                                     &unmapped);
                if (rc)
                        goto out_put;
+               if (!unmapped) {
+                       rc = -ENOENT;
+                       goto out_put;
+               }
        }
 
        cmd->length = unmapped;
index 3eebf5e3b974f469d1c5a96ac56ba86e9264c373..bb4d33dde3c89969e6c3493b130ecf573dd2fbc1 100644 (file)
@@ -2638,6 +2638,8 @@ TEST_F(vfio_compat_mock_domain, map)
        ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
        ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
        ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
+       /* Unmap of empty is success */
+       ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
 
        /* UNMAP_FLAG_ALL requires 0 iova/size */
        ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));