}
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
-static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
- phys_addr_t msi_addr);
-
/* Number of entries per flush queue */
#define IOVA_DEFAULT_FQ_SIZE 256
#define IOVA_SINGLE_FQ_SIZE 32768
mutex_init(&cookie->mutex);
INIT_LIST_HEAD(&cookie->msi_page_list);
- iommu_domain_set_sw_msi(domain, iommu_dma_sw_msi);
domain->cookie_type = IOMMU_COOKIE_DMA_IOVA;
domain->iova_cookie = cookie;
return 0;
cookie->msi_iova = base;
INIT_LIST_HEAD(&cookie->msi_page_list);
- iommu_domain_set_sw_msi(domain, iommu_dma_sw_msi);
domain->cookie_type = IOMMU_COOKIE_DMA_MSI;
domain->msi_cookie = cookie;
return 0;
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi, *tmp;
-#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
- if (domain->sw_msi != iommu_dma_sw_msi)
- return;
-#endif
-
if (cookie->iovad.granule) {
iommu_dma_free_fq(cookie);
put_iova_domain(&cookie->iovad);
return NULL;
}
-static int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
- phys_addr_t msi_addr)
+int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr)
{
struct device *dev = msi_desc_to_dev(desc);
const struct iommu_dma_msi_page *msi_page;
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
+int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
+ phys_addr_t msi_addr);
+
extern bool iommu_dma_forcedac;
#else /* CONFIG_IOMMU_DMA */
{
}
+static inline int iommu_dma_sw_msi(struct iommu_domain *domain,
+ struct msi_desc *desc, phys_addr_t msi_addr)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_IOMMU_DMA */
#endif /* __DMA_IOMMU_H */
#include <linux/errno.h>
#include <linux/host1x_context_bus.h>
#include <linux/iommu.h>
+#include <linux/iommufd.h>
#include <linux/idr.h>
#include <linux/err.h>
#include <linux/pci.h>
return 0;
mutex_lock(&group->mutex);
- if (group->domain && group->domain->sw_msi)
- ret = group->domain->sw_msi(group->domain, desc, msi_addr);
+ /* An IDENTITY domain must pass through */
+ if (group->domain && group->domain->type != IOMMU_DOMAIN_IDENTITY) {
+ switch (group->domain->cookie_type) {
+ case IOMMU_COOKIE_DMA_MSI:
+ case IOMMU_COOKIE_DMA_IOVA:
+ ret = iommu_dma_sw_msi(group->domain, desc, msi_addr);
+ break;
+ case IOMMU_COOKIE_IOMMUFD:
+ ret = iommufd_sw_msi(group->domain, desc, msi_addr);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ }
mutex_unlock(&group->mutex);
return ret;
}
}
hwpt->domain->iommufd_hwpt = hwpt;
hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
- iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
/*
* Set the coherency mode before we do iopt_table_add_domain() as some
hwpt->domain->owner = ops;
hwpt->domain->iommufd_hwpt = hwpt;
hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
- iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
rc = -EINVAL;
hwpt->domain->iommufd_hwpt = hwpt;
hwpt->domain->owner = viommu->iommu_dev->ops;
hwpt->domain->cookie_type = IOMMU_COOKIE_IOMMUFD;
- iommu_domain_set_sw_msi(hwpt->domain, iommufd_sw_msi);
if (WARN_ON_ONCE(hwpt->domain->type != IOMMU_DOMAIN_NESTED)) {
rc = -EINVAL;
struct iommu_domain_geometry geometry;
int (*iopf_handler)(struct iopf_group *group);
-#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
- int (*sw_msi)(struct iommu_domain *domain, struct msi_desc *desc,
- phys_addr_t msi_addr);
-#endif
-
union { /* cookie */
struct iommu_dma_cookie *iova_cookie;
struct iommu_dma_msi_cookie *msi_cookie;
};
};
-static inline void iommu_domain_set_sw_msi(
- struct iommu_domain *domain,
- int (*sw_msi)(struct iommu_domain *domain, struct msi_desc *desc,
- phys_addr_t msi_addr))
-{
-#if IS_ENABLED(CONFIG_IRQ_MSI_IOMMU)
- domain->sw_msi = sw_msi;
-#endif
-}
-
static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
{
return domain->type & __IOMMU_DOMAIN_DMA_API;