struct context_entry *context;
int ret;
+ if (WARN_ON(!intel_domain_is_ss_paging(domain)))
+ return -EINVAL;
+
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
- if (cap_caching_mode(iommu->cap) && !domain->use_first_level)
+ if (cap_caching_mode(iommu->cap) && intel_domain_is_ss_paging(domain))
return true;
if (rwbf_quirk || cap_rwbf(iommu->cap))
if (!sm_supported(iommu))
ret = domain_context_mapping(domain, dev);
- else if (domain->use_first_level)
+ else if (intel_domain_is_fs_paging(domain))
ret = domain_setup_first_level(iommu, domain, dev,
IOMMU_NO_PASID, NULL);
- else
+ else if (intel_domain_is_ss_paging(domain))
ret = domain_setup_second_level(iommu, domain, dev,
IOMMU_NO_PASID, NULL);
+ else if (WARN_ON(true))
+ ret = -EINVAL;
if (ret)
goto out_block_translation;
domain->use_first_level = first_stage;
domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
- domain->domain.ops = intel_iommu_ops.default_domain_ops;
/* calculate the address width */
addr_width = agaw_to_width(iommu->agaw);
dmar_domain = paging_domain_alloc(dev, true);
if (IS_ERR(dmar_domain))
return ERR_CAST(dmar_domain);
+
+ dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
return &dmar_domain->domain;
}
if (IS_ERR(dmar_domain))
return ERR_CAST(dmar_domain);
+ dmar_domain->domain.ops = &intel_ss_paging_domain_ops;
dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
if (ret)
goto out_remove_dev_pasid;
- if (dmar_domain->use_first_level)
+ if (intel_domain_is_fs_paging(dmar_domain))
ret = domain_setup_first_level(iommu, dmar_domain,
dev, pasid, old);
- else
+ else if (intel_domain_is_ss_paging(dmar_domain))
ret = domain_setup_second_level(iommu, dmar_domain,
dev, pasid, old);
+ else if (WARN_ON(true))
+ ret = -EINVAL;
+
if (ret)
goto out_unwind_iopf;
},
};
+const struct iommu_domain_ops intel_fs_paging_domain_ops = {
+ .attach_dev = intel_iommu_attach_device,
+ .set_dev_pasid = intel_iommu_set_dev_pasid,
+ .map_pages = intel_iommu_map_pages,
+ .unmap_pages = intel_iommu_unmap_pages,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
+ .flush_iotlb_all = intel_flush_iotlb_all,
+ .iotlb_sync = intel_iommu_tlb_sync,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .free = intel_iommu_domain_free,
+ .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
+};
+
+const struct iommu_domain_ops intel_ss_paging_domain_ops = {
+ .attach_dev = intel_iommu_attach_device,
+ .set_dev_pasid = intel_iommu_set_dev_pasid,
+ .map_pages = intel_iommu_map_pages,
+ .unmap_pages = intel_iommu_unmap_pages,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
+ .flush_iotlb_all = intel_flush_iotlb_all,
+ .iotlb_sync = intel_iommu_tlb_sync,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .free = intel_iommu_domain_free,
+ .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
+};
+
const struct iommu_ops intel_iommu_ops = {
.blocked_domain = &blocking_domain,
.release_domain = &blocking_domain,
.def_domain_type = device_def_domain_type,
.pgsize_bitmap = SZ_4K,
.page_response = intel_iommu_page_response,
- .default_domain_ops = &(const struct iommu_domain_ops) {
- .attach_dev = intel_iommu_attach_device,
- .set_dev_pasid = intel_iommu_set_dev_pasid,
- .map_pages = intel_iommu_map_pages,
- .unmap_pages = intel_iommu_unmap_pages,
- .iotlb_sync_map = intel_iommu_iotlb_sync_map,
- .flush_iotlb_all = intel_flush_iotlb_all,
- .iotlb_sync = intel_iommu_tlb_sync,
- .iova_to_phys = intel_iommu_iova_to_phys,
- .free = intel_iommu_domain_free,
- .enforce_cache_coherency = intel_iommu_enforce_cache_coherency,
- }
};
static void quirk_iommu_igfx(struct pci_dev *dev)