]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
iommufd: Fix refcounting race during mmap
authorJason Gunthorpe <jgg@nvidia.com>
Tue, 16 Sep 2025 15:42:56 +0000 (12:42 -0300)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 19 Sep 2025 13:34:49 +0000 (10:34 -0300)
The owner object of the imap can be destroyed while the imap remains in
the mtree. So access to the imap pointer without holding locks is racy
with destruction.

The imap is safe to access outside the lock once a users refcount is
obtained, the owner object cannot start destruction until users is 0.

Thus the users refcount should not be obtained at the end of
iommufd_fops_mmap() but instead inside the mtree lock held around the
mtree_load(). Move the refcount there and use refcount_inc_not_zero() as
we can have a 0 refcount inside the mtree during destruction races.

Link: https://patch.msgid.link/r/0-v1-e6faace50971+3cc-iommufd_mmap_fix_jgg@nvidia.com
Cc: stable@vger.kernel.org
Fixes: 56e9a0d8e53f ("iommufd: Add mmap interface")
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/iommu/iommufd/main.c

index 15af7ced0501d6f85a2daa7f963ced2b783e198b..a9d4decc8ba1650e19a61438c5d1b171b589deca 100644 (file)
@@ -550,16 +550,23 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
        if (vma->vm_flags & VM_EXEC)
                return -EPERM;
 
+       mtree_lock(&ictx->mt_mmap);
        /* vma->vm_pgoff carries a page-shifted start position to an immap */
        immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
-       if (!immap)
+       if (!immap || !refcount_inc_not_zero(&immap->owner->users)) {
+               mtree_unlock(&ictx->mt_mmap);
                return -ENXIO;
+       }
+       mtree_unlock(&ictx->mt_mmap);
+
        /*
         * mtree_load() returns the immap for any contained mmio_addr, so only
         * allow the exact immap thing to be mapped
         */
-       if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
-               return -ENXIO;
+       if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length) {
+               rc = -ENXIO;
+               goto err_refcount;
+       }
 
        vma->vm_pgoff = 0;
        vma->vm_private_data = immap;
@@ -570,10 +577,11 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
                                immap->mmio_addr >> PAGE_SHIFT, length,
                                vma->vm_page_prot);
        if (rc)
-               return rc;
+               goto err_refcount;
+       return 0;
 
-       /* vm_ops.open won't be called for mmap itself. */
-       refcount_inc(&immap->owner->users);
+err_refcount:
+       refcount_dec(&immap->owner->users);
        return rc;
 }