]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu: Swap the order of setting group->pasid_array and calling attach op of iommu...
authorYi Liu <yi.l.liu@intel.com>
Wed, 26 Feb 2025 01:18:49 +0000 (17:18 -0800)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 28 Feb 2025 14:07:14 +0000 (10:07 -0400)
The current implementation stores entry to the group->pasid_array before
the underlying iommu driver has successfully set the new domain. This can
lead to issues where PRIs are received on the new domain before the attach
operation is completed.

This patch swaps the order of operations to ensure that the domain is set
in the underlying iommu driver before updating the group->pasid_array.

Link: https://patch.msgid.link/r/20250226011849.5102-5-yi.l.liu@intel.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/iommu/iommu.c

index d3652518c7347b29797534670b9c154c10b487a6..0ee17893810fdd447fe44a2af83b6efb45aa362f 100644 (file)
@@ -3388,13 +3388,29 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
 
        entry = iommu_make_pasid_array_entry(domain, handle);
 
-       ret = xa_insert(&group->pasid_array, pasid, entry, GFP_KERNEL);
+       /*
+        * Entry present is a failure case. Use xa_insert() instead of
+        * xa_reserve().
+        */
+       ret = xa_insert(&group->pasid_array, pasid, XA_ZERO_ENTRY, GFP_KERNEL);
        if (ret)
                goto out_unlock;
 
        ret = __iommu_set_group_pasid(domain, group, pasid);
-       if (ret)
-               xa_erase(&group->pasid_array, pasid);
+       if (ret) {
+               xa_release(&group->pasid_array, pasid);
+               goto out_unlock;
+       }
+
+       /*
+        * The xa_insert() above reserved the memory, and the group->mutex is
+        * held, this cannot fail. The new domain cannot be visible until the
+        * operation succeeds as we cannot tolerate PRIs becoming concurrently
+        * queued and then failing attach.
+        */
+       WARN_ON(xa_is_err(xa_store(&group->pasid_array,
+                                  pasid, entry, GFP_KERNEL)));
+
 out_unlock:
        mutex_unlock(&group->mutex);
        return ret;
@@ -3509,19 +3525,27 @@ int iommu_attach_group_handle(struct iommu_domain *domain,
 
        mutex_lock(&group->mutex);
        entry = iommu_make_pasid_array_entry(domain, handle);
-       ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL);
+       ret = xa_insert(&group->pasid_array,
+                       IOMMU_NO_PASID, XA_ZERO_ENTRY, GFP_KERNEL);
        if (ret)
-               goto err_unlock;
+               goto out_unlock;
 
        ret = __iommu_attach_group(domain, group);
-       if (ret)
-               goto err_erase;
-       mutex_unlock(&group->mutex);
+       if (ret) {
+               xa_release(&group->pasid_array, IOMMU_NO_PASID);
+               goto out_unlock;
+       }
 
-       return 0;
-err_erase:
-       xa_erase(&group->pasid_array, IOMMU_NO_PASID);
-err_unlock:
+       /*
+        * The xa_insert() above reserved the memory, and the group->mutex is
+        * held, this cannot fail. The new domain cannot be visible until the
+        * operation succeeds as we cannot tolerate PRIs becoming concurrently
+        * queued and then failing attach.
+        */
+       WARN_ON(xa_is_err(xa_store(&group->pasid_array,
+                                  IOMMU_NO_PASID, entry, GFP_KERNEL)));
+
+out_unlock:
        mutex_unlock(&group->mutex);
        return ret;
 }