int id;
struct iommu_domain *default_domain;
struct iommu_domain *blocking_domain;
- /*
- * During a group device reset, @resetting_domain points to the physical
- * domain, while @domain points to the attached domain before the reset.
- */
- struct iommu_domain *resetting_domain;
struct iommu_domain *domain;
struct list_head entry;
unsigned int owner_cnt;
+ /*
+ * Number of devices in the group undergoing or awaiting recovery.
+ * If non-zero, concurrent domain attachments are rejected.
+ */
+ unsigned int recovery_cnt;
void *owner;
};
struct list_head list;
struct device *dev;
char *name;
+ /*
+ * Device is blocked for a pending recovery while its group->domain is
+ * retained. This can happen when:
+ * - Device is undergoing a reset
+ */
+ bool blocked;
};
/* Iterate over each struct group_device in a struct iommu_group */
#define for_each_group_device(group, pos) \
list_for_each_entry(pos, &(group)->devices, list)
+static struct group_device *__dev_to_gdev(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
+ struct group_device *gdev;
+
+ lockdep_assert_held(&group->mutex);
+
+ for_each_group_device(group, gdev) {
+ if (gdev->dev == dev)
+ return gdev;
+ }
+ return NULL;
+}
+
struct iommu_group_attribute {
struct attribute attr;
ssize_t (*show)(struct iommu_group *group, char *buf);
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{
+ struct group_device *gdev;
+
/*
* This is called on the dma mapping fast path so avoid locking. This is
* racy, but we have an expectation that the driver will setup its DMAs
guard(mutex)(&dev->iommu_group->mutex);
+ gdev = __dev_to_gdev(dev);
+ if (WARN_ON(!gdev))
+ return -ENODEV;
+
/*
- * This is a concurrent attach during a device reset. Reject it until
+ * This is a concurrent attach during device recovery. Reject it until
* pci_dev_reset_iommu_done() attaches the device to group->domain.
*
* Note that this might fail the iommu_dma_map(). But there's nothing
* more we can do here.
*/
- if (dev->iommu_group->resetting_domain)
+ if (gdev->blocked)
return -EBUSY;
return __iommu_attach_device(domain, dev, NULL);
}
struct iommu_domain *iommu_driver_get_domain_for_dev(struct device *dev)
{
struct iommu_group *group = dev->iommu_group;
+ struct group_device *gdev;
lockdep_assert_held(&group->mutex);
+ gdev = __dev_to_gdev(dev);
+ if (WARN_ON(!gdev))
+ return NULL;
+
/*
* Driver handles the low-level __iommu_attach_device(), including the
* one invoked by pci_dev_reset_iommu_done() re-attaching the device to
* the cached group->domain. In this case, the driver must get the old
- * domain from group->resetting_domain rather than group->domain. This
+ * domain from group->blocking_domain rather than group->domain. This
* prevents it from re-attaching the device from group->domain (old) to
* group->domain (new).
*/
- if (group->resetting_domain)
- return group->resetting_domain;
+ if (gdev->blocked)
+ return group->blocking_domain;
return group->domain;
}
return -EINVAL;
/*
- * This is a concurrent attach during a device reset. Reject it until
+ * This is a concurrent attach during device recovery. Reject it until
* pci_dev_reset_iommu_done() attaches the device to group->domain.
*/
- if (group->resetting_domain)
+ if (group->recovery_cnt)
return -EBUSY;
/*
mutex_lock(&group->mutex);
/*
- * This is a concurrent attach during a device reset. Reject it until
+ * This is a concurrent attach during device recovery. Reject it until
* pci_dev_reset_iommu_done() attaches the device to group->domain.
*/
- if (group->resetting_domain) {
+ if (group->recovery_cnt) {
ret = -EBUSY;
goto out_unlock;
}
mutex_lock(&group->mutex);
/*
- * This is a concurrent attach during a device reset. Reject it until
+ * This is a concurrent attach during device recovery. Reject it until
* pci_dev_reset_iommu_done() attaches the device to group->domain.
*/
- if (group->resetting_domain) {
+ if (group->recovery_cnt) {
ret = -EBUSY;
goto out_unlock;
}
* routine wants to block any IOMMU activity: translation and ATS invalidation.
*
* This function attaches the device's RID/PASID(s) the group->blocking_domain,
- * setting the group->resetting_domain. This allows the IOMMU driver pausing any
+ * incrementing the group->recovery_cnt, to allow the IOMMU driver pausing any
* IOMMU activity while leaving the group->domain pointer intact. Later when the
* reset is finished, pci_dev_reset_iommu_done() can restore everything.
*
* Caller must use pci_dev_reset_iommu_prepare() with pci_dev_reset_iommu_done()
- * before/after the core-level reset routine, to unset the resetting_domain.
+ * before/after the core-level reset routine, to decrement the recovery_cnt.
*
* Return: 0 on success or negative error code if the preparation failed.
*
int pci_dev_reset_iommu_prepare(struct pci_dev *pdev)
{
struct iommu_group *group = pdev->dev.iommu_group;
+ struct group_device *gdev;
unsigned long pasid;
void *entry;
int ret;
guard(mutex)(&group->mutex);
+ gdev = __dev_to_gdev(&pdev->dev);
+ if (WARN_ON(!gdev))
+ return -ENODEV;
+
/* Re-entry is not allowed */
- if (WARN_ON(group->resetting_domain))
+ if (WARN_ON(gdev->blocked))
return -EBUSY;
ret = __iommu_group_alloc_blocking_domain(group);
return ret;
}
+ /*
+ * Update gdev->blocked upon the domain change, as it is used to return
+ * the correct domain in iommu_driver_get_domain_for_dev() that might be
+ * called in a set_dev_pasid callback function.
+ */
+ gdev->blocked = true;
+
/*
* Stage PASID domains at blocking_domain while retaining pasid_array.
*
iommu_remove_dev_pasid(&pdev->dev, pasid,
pasid_array_entry_to_domain(entry));
- group->resetting_domain = group->blocking_domain;
+ group->recovery_cnt++;
return ret;
}
EXPORT_SYMBOL_GPL(pci_dev_reset_iommu_prepare);
void pci_dev_reset_iommu_done(struct pci_dev *pdev)
{
struct iommu_group *group = pdev->dev.iommu_group;
+ struct group_device *gdev;
unsigned long pasid;
void *entry;
guard(mutex)(&group->mutex);
- /* pci_dev_reset_iommu_prepare() was bypassed for the device */
- if (!group->resetting_domain)
+ gdev = __dev_to_gdev(&pdev->dev);
+ if (WARN_ON(!gdev))
+ return;
+
+ if (!gdev->blocked)
return;
- /* pci_dev_reset_iommu_prepare() was not successfully called */
if (WARN_ON(!group->blocking_domain))
return;
group->blocking_domain));
}
+ /*
+ * Update gdev->blocked upon the domain change, as it is used to return
+ * the correct domain in iommu_driver_get_domain_for_dev() that might be
+ * called in a set_dev_pasid callback function.
+ */
+ gdev->blocked = false;
+
/*
* Re-attach PASID domains back to the domains retained in pasid_array.
*
pasid_array_entry_to_domain(entry), group, pasid,
group->blocking_domain));
- group->resetting_domain = NULL;
+ if (!WARN_ON(group->recovery_cnt == 0))
+ group->recovery_cnt--;
}
EXPORT_SYMBOL_GPL(pci_dev_reset_iommu_done);