]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu: Store either domain or handle in group->pasid_array
authorYi Liu <yi.l.liu@intel.com>
Wed, 26 Feb 2025 01:18:48 +0000 (17:18 -0800)
committerJason Gunthorpe <jgg@nvidia.com>
Fri, 28 Feb 2025 14:07:14 +0000 (10:07 -0400)
iommu_attach_device_pasid() only stores handle to group->pasid_array
when there is a valid handle input. However, it makes the
iommu_attach_device_pasid() unable to detect if the pasid has been
attached or not previously.

To be complete, let the iommu_attach_device_pasid() store the domain
to group->pasid_array if no valid handle. The other users of the
group->pasid_array should be updated to be consistent. e.g. the
iommu_attach_group_handle() and iommu_replace_group_handle().

Link: https://patch.msgid.link/r/20250226011849.5102-4-yi.l.liu@intel.com
Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Nicolin Chen <nicolinc@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/iommu/iommu.c

index 37c8cc6a9f79a25ecdf45bbf0265a2b7d35193d1..d3652518c7347b29797534670b9c154c10b487a6 100644 (file)
@@ -45,6 +45,9 @@ static unsigned int iommu_def_domain_type __read_mostly;
 static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
 static u32 iommu_cmd_line __read_mostly;
 
+/* Tags used with xa_tag_pointer() in group->pasid_array */
+enum { IOMMU_PASID_ARRAY_DOMAIN = 0, IOMMU_PASID_ARRAY_HANDLE = 1 };
+
 struct iommu_group {
        struct kobject kobj;
        struct kobject *devices_kobj;
@@ -2147,6 +2150,17 @@ struct iommu_domain *iommu_get_dma_domain(struct device *dev)
        return dev->iommu_group->default_domain;
 }
 
+static void *iommu_make_pasid_array_entry(struct iommu_domain *domain,
+                                         struct iommu_attach_handle *handle)
+{
+       if (handle) {
+               handle->domain = domain;
+               return xa_tag_pointer(handle, IOMMU_PASID_ARRAY_HANDLE);
+       }
+
+       return xa_tag_pointer(domain, IOMMU_PASID_ARRAY_DOMAIN);
+}
+
 static int __iommu_attach_group(struct iommu_domain *domain,
                                struct iommu_group *group)
 {
@@ -3348,6 +3362,7 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
        struct iommu_group *group = dev->iommu_group;
        struct group_device *device;
        const struct iommu_ops *ops;
+       void *entry;
        int ret;
 
        if (!group)
@@ -3371,10 +3386,9 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
                }
        }
 
-       if (handle)
-               handle->domain = domain;
+       entry = iommu_make_pasid_array_entry(domain, handle);
 
-       ret = xa_insert(&group->pasid_array, pasid, handle, GFP_KERNEL);
+       ret = xa_insert(&group->pasid_array, pasid, entry, GFP_KERNEL);
        if (ret)
                goto out_unlock;
 
@@ -3454,13 +3468,17 @@ struct iommu_attach_handle *
 iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type)
 {
        struct iommu_attach_handle *handle;
+       void *entry;
 
        xa_lock(&group->pasid_array);
-       handle = xa_load(&group->pasid_array, pasid);
-       if (!handle)
+       entry = xa_load(&group->pasid_array, pasid);
+       if (!entry || xa_pointer_tag(entry) != IOMMU_PASID_ARRAY_HANDLE) {
                handle = ERR_PTR(-ENOENT);
-       else if (type && handle->domain->type != type)
-               handle = ERR_PTR(-EBUSY);
+       } else {
+               handle = xa_untag_pointer(entry);
+               if (type && handle->domain->type != type)
+                       handle = ERR_PTR(-EBUSY);
+       }
        xa_unlock(&group->pasid_array);
 
        return handle;
@@ -3483,14 +3501,15 @@ int iommu_attach_group_handle(struct iommu_domain *domain,
                              struct iommu_group *group,
                              struct iommu_attach_handle *handle)
 {
+       void *entry;
        int ret;
 
        if (!handle)
                return -EINVAL;
 
        mutex_lock(&group->mutex);
-       handle->domain = domain;
-       ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL);
+       entry = iommu_make_pasid_array_entry(domain, handle);
+       ret = xa_insert(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL);
        if (ret)
                goto err_unlock;
 
@@ -3543,14 +3562,14 @@ int iommu_replace_group_handle(struct iommu_group *group,
                               struct iommu_domain *new_domain,
                               struct iommu_attach_handle *handle)
 {
-       void *curr;
+       void *curr, *entry;
        int ret;
 
        if (!new_domain || !handle)
                return -EINVAL;
 
        mutex_lock(&group->mutex);
-       handle->domain = new_domain;
+       entry = iommu_make_pasid_array_entry(new_domain, handle);
        ret = xa_reserve(&group->pasid_array, IOMMU_NO_PASID, GFP_KERNEL);
        if (ret)
                goto err_unlock;
@@ -3559,7 +3578,7 @@ int iommu_replace_group_handle(struct iommu_group *group,
        if (ret)
                goto err_release;
 
-       curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, handle, GFP_KERNEL);
+       curr = xa_store(&group->pasid_array, IOMMU_NO_PASID, entry, GFP_KERNEL);
        WARN_ON(xa_is_err(curr));
 
        mutex_unlock(&group->mutex);