]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommufd/access: Bypass access->ops->unmap for internal use
authorNicolin Chen <nicolinc@nvidia.com>
Thu, 10 Jul 2025 05:59:03 +0000 (22:59 -0700)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 11 Jul 2025 14:09:22 +0000 (11:09 -0300)
The access object has been used externally by VFIO mdev devices, allowing
them to pin/unpin physical pages (via needs_pin_pages). Meanwhile, a racy
unmap can occur in this case, so these devices usually implement an unmap
handler, invoked by iommufd_access_notify_unmap().

The new HW queue object will need the same pin/unpin feature, although it
(unlike the mdev case) wants to reject any unmap attempt, during its life
cycle. Instead, it would not implement an unmap handler. Thus, bypass any
access->ops->unmap access call when the access is marked as internal.

Also, an area being pinned by an internal access should reject any unmap
request. This cannot be done inside iommufd_access_notify_unmap() as it's
a per-iopt action. Add a "num_locks" counter in the struct iopt_area, set
that in iopt_area_add_access() when the caller is an internal access.

Link: https://patch.msgid.link/r/6df9a43febf79c0379091ec59747276ce9d2493b.1752126748.git.nicolinc@nvidia.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Pranjal Shrivastava <praan@google.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/iommu/iommufd/device.c
drivers/iommu/iommufd/io_pagetable.c
drivers/iommu/iommufd/io_pagetable.h
drivers/iommu/iommufd/pages.c

index 07a4ff753c1207a1af0be798631bd8a32f3d5b95..0567faff5680f5866955c321442548b31bd68bce 100644 (file)
@@ -1048,7 +1048,7 @@ static int iommufd_access_change_ioas(struct iommufd_access *access,
        }
 
        if (cur_ioas) {
-               if (access->ops->unmap) {
+               if (!iommufd_access_is_internal(access) && access->ops->unmap) {
                        mutex_unlock(&access->ioas_lock);
                        access->ops->unmap(access->data, 0, ULONG_MAX);
                        mutex_lock(&access->ioas_lock);
@@ -1255,7 +1255,8 @@ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
 
        xa_lock(&ioas->iopt.access_list);
        xa_for_each(&ioas->iopt.access_list, index, access) {
-               if (!iommufd_lock_obj(&access->obj))
+               if (!iommufd_lock_obj(&access->obj) ||
+                   iommufd_access_is_internal(access))
                        continue;
                xa_unlock(&ioas->iopt.access_list);
 
@@ -1279,6 +1280,7 @@ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
 void iommufd_access_unpin_pages(struct iommufd_access *access,
                                unsigned long iova, unsigned long length)
 {
+       bool internal = iommufd_access_is_internal(access);
        struct iopt_area_contig_iter iter;
        struct io_pagetable *iopt;
        unsigned long last_iova;
@@ -1305,7 +1307,8 @@ void iommufd_access_unpin_pages(struct iommufd_access *access,
                        area, iopt_area_iova_to_index(area, iter.cur_iova),
                        iopt_area_iova_to_index(
                                area,
-                               min(last_iova, iopt_area_last_iova(area))));
+                               min(last_iova, iopt_area_last_iova(area))),
+                       internal);
        WARN_ON(!iopt_area_contig_done(&iter));
        up_read(&iopt->iova_rwsem);
        mutex_unlock(&access->ioas_lock);
@@ -1354,6 +1357,7 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
                             unsigned long length, struct page **out_pages,
                             unsigned int flags)
 {
+       bool internal = iommufd_access_is_internal(access);
        struct iopt_area_contig_iter iter;
        struct io_pagetable *iopt;
        unsigned long last_iova;
@@ -1362,7 +1366,8 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
 
        /* Driver's ops don't support pin_pages */
        if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
-           WARN_ON(access->iova_alignment != PAGE_SIZE || !access->ops->unmap))
+           WARN_ON(access->iova_alignment != PAGE_SIZE ||
+                   (!internal && !access->ops->unmap)))
                return -EINVAL;
 
        if (!length)
@@ -1396,7 +1401,7 @@ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
                }
 
                rc = iopt_area_add_access(area, index, last_index, out_pages,
-                                         flags);
+                                         flags, internal);
                if (rc)
                        goto err_remove;
                out_pages += last_index - index + 1;
@@ -1419,7 +1424,8 @@ err_remove:
                                iopt_area_iova_to_index(area, iter.cur_iova),
                                iopt_area_iova_to_index(
                                        area, min(last_iova,
-                                                 iopt_area_last_iova(area))));
+                                                 iopt_area_last_iova(area))),
+                               internal);
        }
        up_read(&iopt->iova_rwsem);
        mutex_unlock(&access->ioas_lock);
index 22fc3a12109f0bb6d8de7f06421dbccd2b1d8b88..abf4aadca96c0bf69800cdab9e652778f4cecc88 100644 (file)
@@ -719,6 +719,12 @@ again:
                        goto out_unlock_iova;
                }
 
+               /* The area is locked by an object that has not been destroyed */
+               if (area->num_locks) {
+                       rc = -EBUSY;
+                       goto out_unlock_iova;
+               }
+
                if (area_first < start || area_last > last) {
                        rc = -ENOENT;
                        goto out_unlock_iova;
index c115a51d93846a8a2007c1a3d2011c1585684117..b6064f4ce4af91f100bcd12b7e9587fae21125a0 100644 (file)
@@ -48,6 +48,7 @@ struct iopt_area {
        int iommu_prot;
        bool prevent_access : 1;
        unsigned int num_accesses;
+       unsigned int num_locks;
 };
 
 struct iopt_allowed {
@@ -238,9 +239,9 @@ void iopt_pages_unfill_xarray(struct iopt_pages *pages, unsigned long start,
 
 int iopt_area_add_access(struct iopt_area *area, unsigned long start,
                         unsigned long last, struct page **out_pages,
-                        unsigned int flags);
+                        unsigned int flags, bool lock_area);
 void iopt_area_remove_access(struct iopt_area *area, unsigned long start,
-                            unsigned long last);
+                            unsigned long last, bool unlock_area);
 int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte,
                         void *data, unsigned long length, unsigned int flags);
 
index cbdde642d2af8d5c792f2a562f51287f1fb4d3d4..c3433b84556172d89edb9af1423557d331c29bdb 100644 (file)
@@ -2103,6 +2103,7 @@ iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index,
  * @last_index: Inclusive last page index
  * @out_pages: Output list of struct page's representing the PFNs
  * @flags: IOMMUFD_ACCESS_RW_* flags
+ * @lock_area: Fail userspace munmap on this area
  *
  * Record that an in-kernel access will be accessing the pages, ensure they are
  * pinned, and return the PFNs as a simple list of 'struct page *'.
@@ -2111,7 +2112,7 @@ iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index,
  */
 int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
                         unsigned long last_index, struct page **out_pages,
-                        unsigned int flags)
+                        unsigned int flags, bool lock_area)
 {
        struct iopt_pages *pages = area->pages;
        struct iopt_pages_access *access;
@@ -2124,6 +2125,8 @@ int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
        access = iopt_pages_get_exact_access(pages, start_index, last_index);
        if (access) {
                area->num_accesses++;
+               if (lock_area)
+                       area->num_locks++;
                access->users++;
                iopt_pages_fill_from_xarray(pages, start_index, last_index,
                                            out_pages);
@@ -2145,6 +2148,8 @@ int iopt_area_add_access(struct iopt_area *area, unsigned long start_index,
        access->node.last = last_index;
        access->users = 1;
        area->num_accesses++;
+       if (lock_area)
+               area->num_locks++;
        interval_tree_insert(&access->node, &pages->access_itree);
        mutex_unlock(&pages->mutex);
        return 0;
@@ -2161,12 +2166,13 @@ err_unlock:
  * @area: The source of PFNs
  * @start_index: First page index
  * @last_index: Inclusive last page index
+ * @unlock_area: Must match the matching iopt_area_add_access()'s lock_area
  *
  * Undo iopt_area_add_access() and unpin the pages if necessary. The caller
  * must stop using the PFNs before calling this.
  */
 void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index,
-                            unsigned long last_index)
+                            unsigned long last_index, bool unlock_area)
 {
        struct iopt_pages *pages = area->pages;
        struct iopt_pages_access *access;
@@ -2177,6 +2183,10 @@ void iopt_area_remove_access(struct iopt_area *area, unsigned long start_index,
                goto out_unlock;
 
        WARN_ON(area->num_accesses == 0 || access->users == 0);
+       if (unlock_area) {
+               WARN_ON(area->num_locks == 0);
+               area->num_locks--;
+       }
        area->num_accesses--;
        access->users--;
        if (access->users)