}
EXPORT_SYMBOL_GPL(dma_iova_destroy);
-void iommu_setup_dma_ops(struct device *dev)
+void iommu_setup_dma_ops(struct device *dev, struct iommu_domain *domain)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-
if (dev_is_pci(dev))
dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
#ifdef CONFIG_IOMMU_DMA
-void iommu_setup_dma_ops(struct device *dev);
+void iommu_setup_dma_ops(struct device *dev, struct iommu_domain *domain);
int iommu_get_dma_cookie(struct iommu_domain *domain);
void iommu_put_dma_cookie(struct iommu_domain *domain);
#else /* CONFIG_IOMMU_DMA */
-static inline void iommu_setup_dma_ops(struct device *dev)
+static inline void iommu_setup_dma_ops(struct device *dev,
+ struct iommu_domain *domain)
{
}
}
if (group->default_domain)
- iommu_setup_dma_ops(dev);
+ iommu_setup_dma_ops(dev, group->default_domain);
mutex_unlock(&group->mutex);
return ret;
}
for_each_group_device(group, gdev)
- iommu_setup_dma_ops(gdev->dev);
+ iommu_setup_dma_ops(gdev->dev, group->default_domain);
mutex_unlock(&group->mutex);
/*
/* Make sure dma_ops is appropriatley set */
for_each_group_device(group, gdev)
- iommu_setup_dma_ops(gdev->dev);
+ iommu_setup_dma_ops(gdev->dev, group->default_domain);
out_unlock:
mutex_unlock(&group->mutex);