return amdr_ivrs_remap_support;
case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
return true;
- case IOMMU_CAP_DEFERRED_FLUSH:
- return true;
case IOMMU_CAP_DIRTY_TRACKING: {
struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
#include <linux/device.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
+#include <linux/generic_pt/iommu.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
}
}
+static bool iommu_domain_supports_fq(struct device *dev,
+ struct iommu_domain *domain)
+{
+ /* iommupt always supports DMA-FQ */
+ if (iommupt_from_domain(domain))
+ return true;
+ return device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH);
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
/* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
- (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
+ (!iommu_domain_supports_fq(dev, domain) ||
+ iommu_dma_init_fq(domain)))
domain->type = IOMMU_DOMAIN_DMA;
return iova_reserve_iommu_regions(dev, domain);
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
- case IOMMU_CAP_DEFERRED_FLUSH:
- return true;
case IOMMU_CAP_PRE_BOOT_PROTECTION:
return dmar_platform_optin();
case IOMMU_CAP_ENFORCE_CACHE_COHERENCY: