domain->qi_batch);
}
-static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_tag *tag)
-{
- struct intel_iommu *iommu = tag->iommu;
- struct device_domain_info *info;
- u16 sid;
-
- info = dev_iommu_priv_get(tag->dev);
- sid = PCI_DEVID(info->bus, info->devfn);
-
- qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
- MAX_AGAW_PFN_WIDTH, domain->qi_batch);
- if (info->dtlb_extra_inval)
- qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
- MAX_AGAW_PFN_WIDTH, domain->qi_batch);
-}
-
/*
* Invalidates a range of IOVA from @start (inclusive) to @end (inclusive)
* when the memory mappings in the target domain have been modified.
break;
case CACHE_TAG_DEVTLB:
case CACHE_TAG_NESTING_DEVTLB:
- cache_tag_flush_devtlb_all(domain, tag);
+ cache_tag_flush_devtlb_psi(domain, tag, 0, MAX_AGAW_PFN_WIDTH);
break;
}