static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
+#define rwbf_required(iommu) (rwbf_quirk || cap_rwbf((iommu)->cap))
+
/*
* set to 1 to panic kernel if can't successfully enable VT-d
* (used when kernel is launched w/ TXT)
__pa(pgd), flags, old);
}
-static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- if (cap_caching_mode(iommu->cap) && intel_domain_is_ss_paging(domain))
- return true;
-
- if (rwbf_quirk || cap_rwbf(iommu->cap))
- return true;
-
- return false;
-}
-
static int dmar_domain_attach_device(struct dmar_domain *domain,
struct device *dev)
{
if (ret)
goto out_block_translation;
- domain->iotlb_sync_map |= domain_need_iotlb_sync_map(domain, iommu);
-
return 0;
out_block_translation:
return ERR_CAST(dmar_domain);
dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
+ /*
+ * iotlb sync for map is only needed for legacy implementations that
+ * explicitly require flushing internal write buffers to ensure memory
+ * coherence.
+ */
+ if (rwbf_required(iommu))
+ dmar_domain->iotlb_sync_map = true;
+
return &dmar_domain->domain;
}
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
dmar_domain->domain.dirty_ops = &intel_dirty_ops;
+ /*
+ * Besides the internal write buffer flush, the caching mode used for
+ * legacy nested translation (which utilizes shadowing page tables)
+ * also requires iotlb sync on map.
+ */
+ if (rwbf_required(iommu) || cap_caching_mode(iommu->cap))
+ dmar_domain->iotlb_sync_map = true;
+
return &dmar_domain->domain;
}
if (!cap_fl1gp_support(iommu->cap) &&
(dmar_domain->domain.pgsize_bitmap & SZ_1G))
return -EINVAL;
+
+ /* iotlb sync on map requirement */
+ if ((rwbf_required(iommu)) && !dmar_domain->iotlb_sync_map)
+ return -EINVAL;
+
return 0;
}
return -EINVAL;
if (!(sslps & BIT(1)) && (dmar_domain->domain.pgsize_bitmap & SZ_1G))
return -EINVAL;
+
+ /* iotlb sync on map requirement */
+ if ((rwbf_required(iommu) || cap_caching_mode(iommu->cap)) &&
+ !dmar_domain->iotlb_sync_map)
+ return -EINVAL;
+
return 0;
}