From: Joerg Roedel Date: Thu, 4 Jul 2019 15:26:48 +0000 (+0200) Subject: Merge branches 'x86/vt-d', 'x86/amd', 'arm/smmu', 'arm/omap', 'generic-dma-ops' and... X-Git-Tag: v5.3-rc1~172^2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=d95c3885865b71e56d8d60c8617f2ce1f0fa079d;p=thirdparty%2Fkernel%2Flinux.git Merge branches 'x86/vt-d', 'x86/amd', 'arm/smmu', 'arm/omap', 'generic-dma-ops' and 'core' into next --- d95c3885865b71e56d8d60c8617f2ce1f0fa079d diff --cc drivers/iommu/dma-iommu.c index 129c4badf9ae6,129c4badf9ae6,379318266468c,77aabe637a601,0ba108edc5199,749e3251ee858..f802255219d3a --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@@@@@@ -222,31 -222,31 -211,31 -221,6 -208,31 -222,31 +208,31 @@@@@@@ static int iova_reserve_pci_windows(str hi = iova_pfn(iovad, window->res->end - window->offset); reserve_iova(iovad, lo, hi); } + + /* Get reserved DMA windows from host bridge */ + resource_list_for_each_entry(window, &bridge->dma_ranges) { + end = window->res->start - window->offset; + resv_iova: + if (end > start) { + lo = iova_pfn(iovad, start); + hi = iova_pfn(iovad, end); + reserve_iova(iovad, lo, hi); + } else { + /* dma_ranges list should be sorted */ + dev_err(&dev->dev, "Failed to reserve IOVA\n"); + return -EINVAL; + } + + start = window->res->end - window->offset + 1; + /* If window is last entry */ + if (window->node.next == &bridge->dma_ranges && --- - end != ~(dma_addr_t)0) { --- - end = ~(dma_addr_t)0; +++++ end != ~(phys_addr_t)0) { +++++ end = ~(phys_addr_t)0; + goto resv_iova; + } + } + + return 0; } static int iova_reserve_iommu_regions(struct device *dev, @@@@@@@ -645,46 -645,46 -634,46 -616,56 -648,86 -645,46 +648,86 @@@@@@@ out_free_pages * Maps the pages of the buffer in @pages into @vma. The caller is responsible * for verifying the correct size and protection of @vma beforehand. */ --- - --- -int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) ++++ +static int __iommu_dma_mmap(struct page **pages, size_t size, ++++ + struct vm_area_struct *vma) + { + return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); + } --- -static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, --- - size_t size, int prot, struct iommu_domain *domain) - int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) ++++ +static void iommu_dma_sync_single_for_cpu(struct device *dev, ++++ + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { --- - struct iommu_dma_cookie *cookie = domain->iova_cookie; --- - size_t iova_off = 0; --- - dma_addr_t iova; - unsigned long uaddr = vma->vm_start; - unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; - int ret = -ENXIO; ++++ + phys_addr_t phys; --- - if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { --- - iova_off = iova_offset(&cookie->iovad, phys); --- - size = iova_align(&cookie->iovad, size + iova_off); - for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { - ret = vm_insert_page(vma, uaddr, pages[i]); - if (ret) - break; - uaddr += PAGE_SIZE; ---- - } - return ret; ++++ + if (dev_is_dma_coherent(dev)) ++++ + return; + --- - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); --- - if (!iova) --- - return DMA_MAPPING_ERROR; ++++ + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); ++++ + arch_sync_dma_for_cpu(dev, phys, size, dir); +++ +} --- - if (iommu_map(domain, iova, phys - iova_off, size, prot)) { --- - iommu_dma_free_iova(cookie, iova, size); --- - return DMA_MAPPING_ERROR; --- - } --- - return iova + iova_off; - static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, - size_t size, int prot, struct iommu_domain *domain) ++++ +static void iommu_dma_sync_single_for_device(struct device *dev, ++++ + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) +++ +{ - struct iommu_dma_cookie *cookie = domain->iova_cookie; - size_t iova_off = 0; - dma_addr_t iova; ++++ + phys_addr_t phys; +++ + - if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { - iova_off = iova_offset(&cookie->iovad, phys); - size = iova_align(&cookie->iovad, size + iova_off); - } ++++ + if (dev_is_dma_coherent(dev)) ++++ + return; +++ + - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); - if (!iova) - return DMA_MAPPING_ERROR; ++++ + phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); ++++ + arch_sync_dma_for_device(dev, phys, size, dir); ++++ +} +++ + - if (iommu_map(domain, iova, phys - iova_off, size, prot)) { - iommu_dma_free_iova(cookie, iova, size); - return DMA_MAPPING_ERROR; - } - return iova + iova_off; ++++ +static void iommu_dma_sync_sg_for_cpu(struct device *dev, ++++ + struct scatterlist *sgl, int nelems, ++++ + enum dma_data_direction dir) ++++ +{ ++++ + struct scatterlist *sg; ++++ + int i; ++++ + ++++ + if (dev_is_dma_coherent(dev)) ++++ + return; ++++ + ++++ + for_each_sg(sgl, sg, nelems, i) ++++ + arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); ++++ +} ++++ + ++++ +static void iommu_dma_sync_sg_for_device(struct device *dev, ++++ + struct scatterlist *sgl, int nelems, ++++ + enum dma_data_direction dir) ++++ +{ ++++ + struct scatterlist *sg; ++++ + int i; ++++ + ++++ + if (dev_is_dma_coherent(dev)) ++++ + return; ++++ + ++++ + for_each_sg(sgl, sg, nelems, i) ++++ + arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); } ---- -dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, ---- - unsigned long offset, size_t size, int prot) ++++ +static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, ++++ + unsigned long offset, size_t size, enum dma_data_direction dir, ++++ + unsigned long attrs) { ---- - return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot, ---- - iommu_get_dma_domain(dev)); ++++ + phys_addr_t phys = page_to_phys(page) + offset; ++++ + bool coherent = dev_is_dma_coherent(dev); ++++ + int prot = dma_info_to_prot(dir, coherent, attrs); ++++ + dma_addr_t dma_handle; ++++ + ++++ + dma_handle =__iommu_dma_map(dev, phys, size, prot); ++++ + if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && ++++ + dma_handle != DMA_MAPPING_ERROR) ++++ + arch_sync_dma_for_device(dev, phys, size, dir); ++++ + return dma_handle; } ---- -void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, ---- - enum dma_data_direction dir, unsigned long attrs) ++++ +static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, ++++ + size_t size, enum dma_data_direction dir, unsigned long attrs) { ---- - __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); ++++ + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) ++++ + iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); ++++ + __iommu_dma_unmap(dev, dma_handle, size); } /* @@@@@@@ -931,26 -931,26 -920,26 -911,19 -1192,32 -931,26 +1192,32 @@@@@@@ int iommu_dma_prepare_msi(struct msi_de msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); spin_unlock_irqrestore(&cookie->msi_lock, flags); - if (WARN_ON(!msi_page)) { - /* - * We're called from a void callback, so the best we can do is - * 'fail' by filling the message with obviously bogus values. - * Since we got this far due to an IOMMU being present, it's - * not like the existing address would have worked anyway... - */ - msg->address_hi = ~0U; - msg->address_lo = ~0U; - msg->data = ~0U; - } else { - msg->address_hi = upper_32_bits(msi_page->iova); - msg->address_lo &= cookie_msi_granule(cookie) - 1; - msg->address_lo += lower_32_bits(msi_page->iova); - } + msi_desc_set_iommu_cookie(desc, msi_page); + + if (!msi_page) + return -ENOMEM; + return 0; + } + + void iommu_dma_compose_msi_msg(struct msi_desc *desc, + struct msi_msg *msg) + { + struct device *dev = msi_desc_to_dev(desc); + const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + const struct iommu_dma_msi_page *msi_page; + + msi_page = msi_desc_get_iommu_cookie(desc); + + if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) + return; + + msg->address_hi = upper_32_bits(msi_page->iova); + msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; + msg->address_lo += lower_32_bits(msi_page->iova); + } ++++ + ++++ +static int iommu_dma_init(void) ++++ +{ ++++ + return iova_cache_get(); +++ +} ++++ +arch_initcall(iommu_dma_init); diff --cc drivers/iommu/intel-iommu.c index ca0a1d5d2983a,a209199f3af64,162b3236e72c3,28cb713d728ce,162b3236e72c3,a209199f3af64..ac4172c022448 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@@@@@@ -2274,23 -2341,33 -2333,33 -2341,32 -2333,33 -2341,33 +2266,23 @@@@@@@ static int __domain_mapping(struct dmar } static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, - struct scatterlist *sg, unsigned long phys_pfn, - unsigned long nr_pages, int prot) - { - int ret; - struct intel_iommu *iommu; - - /* Do the real mapping first */ - ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot); - if (ret) - return ret; - - /* Notify about the new mapping */ - if (domain_type_is_vm(domain)) { - /* VM typed domains can have more than one IOMMUs */ - int iommu_id; - for_each_domain_iommu(iommu_id, domain) { - iommu = g_iommus[iommu_id]; - __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); - } - } else { - /* General domains only have one IOMMU */ - iommu = domain_get_iommu(domain); - __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); - } - - return 0; + struct scatterlist *sg, unsigned long phys_pfn, + unsigned long nr_pages, int prot) + { -- -- int ret; +++++ int iommu_id, ret; + struct intel_iommu *iommu; + + /* Do the real mapping first */ + ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot); + if (ret) + return ret; + -- -- /* Notify about the new mapping */ -- -- if (domain_type_is_vm(domain)) { -- -- /* VM typed domains can have more than one IOMMUs */ -- -- int iommu_id; -- -- -- -- for_each_domain_iommu(iommu_id, domain) { -- -- iommu = g_iommus[iommu_id]; -- -- __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); -- -- } -- -- } else { -- -- /* General domains only have one IOMMU */ -- -- iommu = domain_get_iommu(domain); +++++ for_each_domain_iommu(iommu_id, domain) { +++++ iommu = g_iommus[iommu_id]; + __mapping_notify_one(iommu, domain, iov_pfn, nr_pages); + } + + return 0; } static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, @@@@@@@ -2918,38 -2979,113 -2971,114 -2976,113 -2971,114 -2979,113 +2910,38 @@@@@@@ static int device_def_domain_type(struc */ if (!pci_is_pcie(pdev)) { if (!pci_is_root_bus(pdev->bus)) ----- return 0; +++++ return IOMMU_DOMAIN_DMA; if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) ----- return 0; +++++ return IOMMU_DOMAIN_DMA; } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) ----- return 0; +++++ return IOMMU_DOMAIN_DMA; } else { if (device_has_rmrr(dev)) ----- return 0; - - } - - - - /* - - * At boot time, we don't yet know if devices will be 64-bit capable. - - * Assume that they will — if they turn out not to be, then we can - - * take them out of the 1:1 domain later. - - */ - - if (!startup) { - - /* - - * If the device's dma_mask is less than the system's memory - - * size then this is not a candidate for identity mapping. - - */ - - u64 dma_mask = *dev->dma_mask; - - - - if (dev->coherent_dma_mask && - - dev->coherent_dma_mask < dma_mask) - - dma_mask = dev->coherent_dma_mask; - - - - return dma_mask >= dma_get_required_mask(dev); - - } - - - - return 1; - - } - - - - static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw) - - { - - int ret; - - - - if (!iommu_should_identity_map(dev, 1)) - - return 0; - - - - ret = domain_add_dev_info(si_domain, dev); - - if (!ret) - - dev_info(dev, "%s identity mapping\n", - - hw ? "Hardware" : "Software"); - - else if (ret == -ENODEV) - - /* device not associated with an iommu */ - - ret = 0; - - - - return ret; - - } - - - - - - static int __init iommu_prepare_static_identity_mapping(int hw) - - { - - struct pci_dev *pdev = NULL; - - struct dmar_drhd_unit *drhd; - - /* To avoid a -Wunused-but-set-variable warning. */ - - struct intel_iommu *iommu __maybe_unused; - - struct device *dev; - - int i; - - int ret = 0; - - - - for_each_pci_dev(pdev) { - - ret = dev_prepare_static_identity_mapping(&pdev->dev, hw); - - if (ret) - - return ret; +++++ return IOMMU_DOMAIN_DMA; } - - /* - - * At boot time, we don't yet know if devices will be 64-bit capable. - - * Assume that they will — if they turn out not to be, then we can - - * take them out of the 1:1 domain later. - - */ - - if (!startup) { - - /* - - * If the device's dma_mask is less than the system's memory - - * size then this is not a candidate for identity mapping. - - */ - - u64 dma_mask = *dev->dma_mask; - - - - if (dev->coherent_dma_mask && - - dev->coherent_dma_mask < dma_mask) - - dma_mask = dev->coherent_dma_mask; - - - - return dma_mask >= dma_get_required_mask(dev); - - } - - - - return 1; - -} - - - -static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw) - -{ - - int ret; - - - - if (!iommu_should_identity_map(dev, 1)) - - return 0; - - - - ret = domain_add_dev_info(si_domain, dev); - - if (!ret) - - dev_info(dev, "%s identity mapping\n", - - hw ? "Hardware" : "Software"); - - else if (ret == -ENODEV) - - /* device not associated with an iommu */ - - ret = 0; - - - - return ret; - -} - - - - - -static int __init iommu_prepare_static_identity_mapping(int hw) - -{ - - struct pci_dev *pdev = NULL; - - struct dmar_drhd_unit *drhd; - - struct intel_iommu *iommu; - - struct device *dev; - - int i; - - int ret = 0; - - - - for_each_pci_dev(pdev) { - - ret = dev_prepare_static_identity_mapping(&pdev->dev, hw); - - if (ret) - - return ret; - - } - - -- -- for_each_active_iommu(iommu, drhd) -- -- for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) { -- -- struct acpi_device_physical_node *pn; -- -- struct acpi_device *adev; -- -- -- -- if (dev->bus != &acpi_bus_type) -- -- continue; -- -- -- -- adev= to_acpi_device(dev); -- -- mutex_lock(&adev->physical_node_lock); -- -- list_for_each_entry(pn, &adev->physical_node_list, node) { -- -- ret = dev_prepare_static_identity_mapping(pn->dev, hw); -- -- if (ret) -- -- break; -- -- } -- -- mutex_unlock(&adev->physical_node_lock); -- -- if (ret) -- -- return ret; -- -- } -- -- -- -- return 0; +++++ return (iommu_identity_mapping & IDENTMAP_ALL) ? +++++ IOMMU_DOMAIN_IDENTITY : 0; + } + + static void intel_iommu_init_qi(struct intel_iommu *iommu) + { /* - * At boot time, we don't yet know if devices will be 64-bit capable. - * Assume that they will — if they turn out not to be, then we can - * take them out of the 1:1 domain later. + * Start from the sane iommu hardware state. + * If the queued invalidation is already initialized by us + * (for example, while enabling interrupt-remapping) then + * we got the things already rolling from a sane state. */ - if (!startup) { + if (!iommu->qi) { /* - * If the device's dma_mask is less than the system's memory - * size then this is not a candidate for identity mapping. + * Clear any previous faults. */ - u64 dma_mask = *dev->dma_mask; - - if (dev->coherent_dma_mask && - dev->coherent_dma_mask < dma_mask) - dma_mask = dev->coherent_dma_mask; - - return dma_mask >= dma_get_required_mask(dev); - } - - return 1; - } - - static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw) - { - int ret; - - if (!iommu_should_identity_map(dev, 1)) - return 0; - - ret = domain_add_dev_info(si_domain, dev); - if (!ret) - dev_info(dev, "%s identity mapping\n", - hw ? "Hardware" : "Software"); - else if (ret == -ENODEV) - /* device not associated with an iommu */ - ret = 0; - - return ret; - } - - - static int __init iommu_prepare_static_identity_mapping(int hw) - { - struct pci_dev *pdev = NULL; - struct dmar_drhd_unit *drhd; - struct intel_iommu *iommu; - struct device *dev; - int i; - int ret = 0; - - for_each_pci_dev(pdev) { - ret = dev_prepare_static_identity_mapping(&pdev->dev, hw); - if (ret) - return ret; - } - - for_each_active_iommu(iommu, drhd) - for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) { - struct acpi_device_physical_node *pn; - struct acpi_device *adev; - - if (dev->bus != &acpi_bus_type) - continue; - - adev= to_acpi_device(dev); - mutex_lock(&adev->physical_node_lock); - list_for_each_entry(pn, &adev->physical_node_list, node) { - ret = dev_prepare_static_identity_mapping(pn->dev, hw); - if (ret) - break; - } - mutex_unlock(&adev->physical_node_lock); - if (ret) - return ret; - } - - return 0; - } - - static void intel_iommu_init_qi(struct intel_iommu *iommu) - { - /* - * Start from the sane iommu hardware state. - * If the queued invalidation is already initialized by us - * (for example, while enabling interrupt-remapping) then - * we got the things already rolling from a sane state. - */ - if (!iommu->qi) { - /* - * Clear any previous faults. - */ - dmar_fault(-1, iommu); - /* - * Disable queued invalidation if supported and already enabled - * before OS handover. - */ - dmar_disable_qi(iommu); + dmar_fault(-1, iommu); + /* + * Disable queued invalidation if supported and already enabled + * before OS handover. + */ + dmar_disable_qi(iommu); } if (dmar_enable_qi(iommu)) { @@@@@@@ -3280,17 -3415,70 -3408,70 -3412,67 -3408,70 -3415,70 +3272,17 @@@@@@@ static int __init init_dmars(void iommu_identity_mapping |= IDENTMAP_ALL; #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA - iommu_identity_mapping |= IDENTMAP_GFX; + dmar_map_gfx = 0; #endif - check_tylersburg_isoch(); - - if (iommu_identity_mapping) { - ret = si_domain_init(hw_pass_through); - if (ret) - goto free_iommu; - } - - - /* - * If we copied translations from a previous kernel in the kdump - * case, we can not assign the devices to domains now, as that - * would eliminate the old mappings. So skip this part and defer - * the assignment to device driver initialization time. - */ - if (copied_tables) - goto domains_done; - - /* - * If pass through is not set or not enabled, setup context entries for - * identity mappings for rmrr, gfx, and isa and may fall back to static - * identity mapping if iommu_identity_mapping is set. - */ - if (iommu_identity_mapping) { - ret = iommu_prepare_static_identity_mapping(hw_pass_through); - if (ret) { - pr_crit("Failed to setup IOMMU pass-through\n"); - goto free_iommu; - } - } - /* - * For each rmrr - * for each dev attached to rmrr - * do - * locate drhd for dev, alloc domain for dev - * allocate free domain - * allocate page table entries for rmrr - * if context not allocated for bus - * allocate and init context - * set present in root table for this bus - * init context with domain, translation etc - * endfor - * endfor - */ - pr_info("Setting RMRR:\n"); - for_each_rmrr_units(rmrr) { - /* some BIOS lists non-exist devices in DMAR table. */ - for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, - i, dev) { - ret = iommu_prepare_rmrr_dev(rmrr, dev); - if (ret) - pr_err("Mapping reserved region failed\n"); - } - } + if (!dmar_map_gfx) + iommu_identity_mapping |= IDENTMAP_GFX; - iommu_prepare_isa(); + check_tylersburg_isoch(); -- -- if (iommu_identity_mapping) { -- -- ret = si_domain_init(hw_pass_through); -- -- if (ret) -- -- goto free_iommu; -- -- } -- -- -- -- -- -- /* -- -- * If we copied translations from a previous kernel in the kdump -- -- * case, we can not assign the devices to domains now, as that -- -- * would eliminate the old mappings. So skip this part and defer -- -- * the assignment to device driver initialization time. -- -- */ -- -- if (copied_tables) -- -- goto domains_done; -- -- -- -- /* -- -- * If pass through is not set or not enabled, setup context entries for -- -- * identity mappings for rmrr, gfx, and isa and may fall back to static -- -- * identity mapping if iommu_identity_mapping is set. -- -- */ -- -- if (iommu_identity_mapping) { -- -- ret = iommu_prepare_static_identity_mapping(hw_pass_through); -- -- if (ret) { -- -- pr_crit("Failed to setup IOMMU pass-through\n"); -- -- goto free_iommu; -- -- } -- -- } -- -- /* -- -- * For each rmrr -- -- * for each dev attached to rmrr -- -- * do -- -- * locate drhd for dev, alloc domain for dev -- -- * allocate free domain -- -- * allocate page table entries for rmrr -- -- * if context not allocated for bus -- -- * allocate and init context -- -- * set present in root table for this bus -- -- * init context with domain, translation etc -- -- * endfor -- -- * endfor -- -- */ -- -- pr_info("Setting RMRR:\n"); -- -- for_each_rmrr_units(rmrr) { -- -- /* some BIOS lists non-exist devices in DMAR table. */ -- -- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, -- -- i, dev) { -- -- ret = iommu_prepare_rmrr_dev(rmrr, dev); -- -- if (ret) -- -- pr_err("Mapping reserved region failed\n"); -- -- } -- -- } -- -- -- -- iommu_prepare_isa(); -- -- -----domains_done: +++++ ret = si_domain_init(hw_pass_through); +++++ if (ret) +++++ goto free_iommu; /* * for each drhd @@@@@@@ -3426,45 -3618,40 -3611,40 -3606,45 -3611,40 -3618,40 +3418,45 @@@@@@@ out } /* Check if the dev needs to go through non-identity map and unmap process.*/ - static int iommu_no_mapping(struct device *dev) + static bool iommu_need_mapping(struct device *dev) { ----- int found; +++++ int ret; if (iommu_dummy(dev)) - return 1; + return false; ----- if (!iommu_identity_mapping) -- -- return true; - return 0; +++++ ret = identity_mapping(dev); +++++ if (ret) { +++++ u64 dma_mask = *dev->dma_mask; + -- -- found = identity_mapping(dev); -- -- if (found) { -- -- if (iommu_should_identity_map(dev, 0)) +++++ if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask) +++++ dma_mask = dev->coherent_dma_mask; +++++ +++++ if (dma_mask >= dma_get_required_mask(dev)) + return false; - found = identity_mapping(dev); - if (found) { - if (iommu_should_identity_map(dev, 0)) - return 1; - else { - /* - * 32 bit DMA is removed from si_domain and fall back - * to non-identity mapping. - */ - dmar_remove_one_dev_info(dev); - dev_info(dev, "32bit DMA uses non-identity mapping\n"); - return 0; - } - } else { /* - * In case of a detached 64 bit DMA device from vm, the device - * is put into si_domain for identity mapping. + * 32 bit DMA is removed from si_domain and fall back to + * non-identity mapping. */ - if (iommu_should_identity_map(dev, 0)) { - int ret; - ret = domain_add_dev_info(si_domain, dev); - if (!ret) { - dev_info(dev, "64bit DMA uses identity mapping\n"); - return 1; + dmar_remove_one_dev_info(dev); -- -- dev_info(dev, "32bit DMA uses non-identity mapping\n"); -- -- } else { -- -- /* -- -- * In case of a detached 64 bit DMA device from vm, the device -- -- * is put into si_domain for identity mapping. -- -- */ -- -- if (iommu_should_identity_map(dev, 0) && -- -- !domain_add_dev_info(si_domain, dev)) { -- -- dev_info(dev, "64bit DMA uses identity mapping\n"); -- -- return false; +++++ ret = iommu_request_dma_domain_for_dev(dev); +++++ if (ret) { +++++ struct iommu_domain *domain; +++++ struct dmar_domain *dmar_domain; +++++ +++++ domain = iommu_get_domain_for_dev(dev); +++++ if (domain) { +++++ dmar_domain = to_dmar_domain(domain); +++++ dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN; ++ ++ } +++++ get_private_domain_for_dev(dev); } +++++ +++++ dev_info(dev, "32bit DMA uses non-identity mapping\n"); } - return 0; + return true; } static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, @@@@@@@ -3480,7 -3667,7 -3660,7 -3660,10 -3660,7 -3667,7 +3472,7 @@@@@@@ BUG_ON(dir == DMA_NONE); - if (iommu_no_mapping(dev)) - return paddr; - ----- domain = get_valid_domain_for_dev(dev); +++++ domain = find_domain(dev); if (!domain) return DMA_MAPPING_ERROR; @@@@@@@ -3692,10 -3879,10 -3872,10 -3878,10 -3872,10 -3879,10 +3684,10 @@@@@@@ static int intel_map_sg(struct device * struct intel_iommu *iommu; BUG_ON(dir == DMA_NONE); - if (iommu_no_mapping(dev)) - return intel_nontranslate_map_sg(dev, sglist, nelems, dir); + if (!iommu_need_mapping(dev)) + return dma_direct_map_sg(dev, sglist, nelems, dir, attrs); ----- domain = get_valid_domain_for_dev(dev); +++++ domain = find_domain(dev); if (!domain) return 0; @@@@@@@ -4842,185 -5039,167 -5032,167 -5037,63 -5032,167 -5039,167 +4834,185 @@@@@@@ static struct iommu_domain *intel_iommu struct dmar_domain *dmar_domain; struct iommu_domain *domain; ----- if (type != IOMMU_DOMAIN_UNMANAGED) ----- return NULL; +++++ switch (type) { +++++ case IOMMU_DOMAIN_DMA: +++++ /* fallthrough */ +++++ case IOMMU_DOMAIN_UNMANAGED: +++++ dmar_domain = alloc_domain(0); +++++ if (!dmar_domain) { +++++ pr_err("Can't allocate dmar_domain\n"); +++++ return NULL; +++++ } +++++ if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { +++++ pr_err("Domain initialization failed\n"); +++++ domain_exit(dmar_domain); +++++ return NULL; +++++ } ----- dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE); ----- if (!dmar_domain) { ----- pr_err("Can't allocate dmar_domain\n"); ----- return NULL; ----- } ----- if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { ----- pr_err("Domain initialization failed\n"); ----- domain_exit(dmar_domain); +++++ if (type == IOMMU_DOMAIN_DMA && +++++ init_iova_flush_queue(&dmar_domain->iovad, +++++ iommu_flush_iova, iova_entry_free)) { +++++ pr_warn("iova flush queue initialization failed\n"); +++++ intel_iommu_strict = 1; +++++ } +++++ +++++ domain_update_iommu_cap(dmar_domain); +++++ +++++ domain = &dmar_domain->domain; +++++ domain->geometry.aperture_start = 0; +++++ domain->geometry.aperture_end = +++++ __DOMAIN_MAX_ADDR(dmar_domain->gaw); +++++ domain->geometry.force_aperture = true; +++++ +++++ return domain; +++++ case IOMMU_DOMAIN_IDENTITY: +++++ return &si_domain->domain; +++++ default: return NULL; } ----- domain_update_iommu_cap(dmar_domain); --- --- domain = &dmar_domain->domain; --- domain->geometry.aperture_start = 0; --- domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw); --- domain->geometry.force_aperture = true; - - domain = &dmar_domain->domain; - - domain->geometry.aperture_start = 0; - - domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw); - - domain->geometry.force_aperture = true; - - ----- return domain; +++++ return NULL; } static void intel_iommu_domain_free(struct iommu_domain *domain) { ----- domain_exit(to_dmar_domain(domain)); +++++ if (domain != &si_domain->domain) +++++ domain_exit(to_dmar_domain(domain)); } - static int intel_iommu_attach_device(struct iommu_domain *domain, - struct device *dev) + /* + * Check whether a @domain could be attached to the @dev through the + * aux-domain attach/detach APIs. + */ + static inline bool + is_aux_domain(struct device *dev, struct iommu_domain *domain) { - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - struct intel_iommu *iommu; - int addr_width; - u8 bus, devfn; + struct device_domain_info *info = dev->archdata.iommu; - if (device_is_rmrr_locked(dev)) { - dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n"); - return -EPERM; - } + return info && info->auxd_enabled && + domain->type == IOMMU_DOMAIN_UNMANAGED; + } - /* normally dev is not mapped */ - if (unlikely(domain_context_mapped(dev))) { - struct dmar_domain *old_domain; + static void auxiliary_link_device(struct dmar_domain *domain, + struct device *dev) + { + struct device_domain_info *info = dev->archdata.iommu; - old_domain = find_domain(dev); - if (old_domain) { - rcu_read_lock(); - dmar_remove_one_dev_info(dev); - rcu_read_unlock(); + assert_spin_locked(&device_domain_lock); + if (WARN_ON(!info)) + return; + + domain->auxd_refcnt++; + list_add(&domain->auxd, &info->auxiliary_domains); + } + + static void auxiliary_unlink_device(struct dmar_domain *domain, + struct device *dev) + { + struct device_domain_info *info = dev->archdata.iommu; + + assert_spin_locked(&device_domain_lock); + if (WARN_ON(!info)) + return; + + list_del(&domain->auxd); + domain->auxd_refcnt--; + + if (!domain->auxd_refcnt && domain->default_pasid > 0) + intel_pasid_free_id(domain->default_pasid); + } + + static int aux_domain_add_dev(struct dmar_domain *domain, + struct device *dev) + { + int ret; + u8 bus, devfn; + unsigned long flags; + struct intel_iommu *iommu; - if (!domain_type_is_vm_or_si(old_domain) && - list_empty(&old_domain->devices)) - domain_exit(old_domain); + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu) + return -ENODEV; + + if (domain->default_pasid <= 0) { + int pasid; + + pasid = intel_pasid_alloc_id(domain, PASID_MIN, + pci_max_pasids(to_pci_dev(dev)), + GFP_KERNEL); + if (pasid <= 0) { + pr_err("Can't allocate default pasid\n"); + return -ENODEV; } + domain->default_pasid = pasid; } + spin_lock_irqsave(&device_domain_lock, flags); + /* + * iommu->lock must be held to attach domain to iommu and setup the + * pasid entry for second level translation. + */ + spin_lock(&iommu->lock); + ret = domain_attach_iommu(domain, iommu); + if (ret) + goto attach_failed; + + /* Setup the PASID entry for mediated devices: */ + ret = intel_pasid_setup_second_level(iommu, domain, dev, + domain->default_pasid); + if (ret) + goto table_failed; + spin_unlock(&iommu->lock); + + auxiliary_link_device(domain, dev); + + spin_unlock_irqrestore(&device_domain_lock, flags); + + return 0; + + table_failed: + domain_detach_iommu(domain, iommu); + attach_failed: + spin_unlock(&iommu->lock); + spin_unlock_irqrestore(&device_domain_lock, flags); + if (!domain->auxd_refcnt && domain->default_pasid > 0) + intel_pasid_free_id(domain->default_pasid); + + return ret; + } + + static void aux_domain_remove_dev(struct dmar_domain *domain, + struct device *dev) + { + struct device_domain_info *info; + struct intel_iommu *iommu; + unsigned long flags; + + if (!is_aux_domain(dev, &domain->domain)) + return; + + spin_lock_irqsave(&device_domain_lock, flags); + info = dev->archdata.iommu; + iommu = info->iommu; + + auxiliary_unlink_device(domain, dev); + + spin_lock(&iommu->lock); + intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid); + domain_detach_iommu(domain, iommu); + spin_unlock(&iommu->lock); + + spin_unlock_irqrestore(&device_domain_lock, flags); + } + + static int prepare_domain_attach_device(struct iommu_domain *domain, + struct device *dev) + { + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + struct intel_iommu *iommu; + int addr_width; + u8 bus, devfn; + iommu = device_to_iommu(dev, &bus, &devfn); if (!iommu) return -ENODEV; @@@@@@@ -5053,52 -5232,58 -5225,58 -5126,7 -5225,58 -5232,58 +5045,52 @@@@@@@ dmar_domain->agaw--; } - return domain_add_dev_info(dmar_domain, dev); + return 0; + } + + static int intel_iommu_attach_device(struct iommu_domain *domain, + struct device *dev) + { + int ret; + -- -- if (device_is_rmrr_locked(dev)) { +++++ if (domain->type == IOMMU_DOMAIN_UNMANAGED && +++++ device_is_rmrr_locked(dev)) { + dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n"); + return -EPERM; + } + + if (is_aux_domain(dev, domain)) + return -EPERM; + + /* normally dev is not mapped */ + if (unlikely(domain_context_mapped(dev))) { + struct dmar_domain *old_domain; + + old_domain = find_domain(dev); -- -- if (old_domain) { -- -- rcu_read_lock(); +++++ if (old_domain) + dmar_remove_one_dev_info(dev); -- -- rcu_read_unlock(); -- -- -- -- if (!domain_type_is_vm_or_si(old_domain) && -- -- list_empty(&old_domain->devices)) -- -- domain_exit(old_domain); -- -- } + } + + ret = prepare_domain_attach_device(domain, dev); + if (ret) + return ret; + + return domain_add_dev_info(to_dmar_domain(domain), dev); + } + + static int intel_iommu_aux_attach_device(struct iommu_domain *domain, + struct device *dev) + { + int ret; + + if (!is_aux_domain(dev, domain)) + return -EPERM; + + ret = prepare_domain_attach_device(domain, dev); + if (ret) + return ret; + + return aux_domain_add_dev(to_dmar_domain(domain), dev); } static void intel_iommu_detach_device(struct iommu_domain *domain, @@@@@@@ -5388,11 -5500,13 -5493,13 -5301,14 -5493,13 -5500,13 +5380,11 @@@@@@@ static void intel_iommu_put_resv_region { struct iommu_resv_region *entry, *next; ----- list_for_each_entry_safe(entry, next, head, list) { ----- if (entry->type == IOMMU_RESV_MSI) ----- kfree(entry); ----- } +++++ list_for_each_entry_safe(entry, next, head, list) +++++ kfree(entry); } - #ifdef CONFIG_INTEL_IOMMU_SVM - int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev) + int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) { struct device_domain_info *info; struct context_entry *context; @@@@@@@ -5401,7 -5515,7 -5508,7 -5317,7 -5508,7 -5515,7 +5393,7 @@@@@@@ u64 ctx_lo; int ret; -- -- domain = get_valid_domain_for_dev(dev); - domain = get_valid_domain_for_dev(sdev->dev); +++++ domain = find_domain(dev); if (!domain) return -EINVAL; @@@@@@@ -5443,20 -5557,7 -5550,7 -5366,6 -5550,7 -5557,7 +5435,20 @@@@@@@ return ret; } +++++static void intel_iommu_apply_resv_region(struct device *dev, +++++ struct iommu_domain *domain, +++++ struct iommu_resv_region *region) +++++{ +++++ struct dmar_domain *dmar_domain = to_dmar_domain(domain); +++++ unsigned long start, end; +++++ +++++ start = IOVA_PFN(region->start); +++++ end = IOVA_PFN(region->start + region->length - 1); +++++ +++++ WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end)); +++++} +++++ + #ifdef CONFIG_INTEL_IOMMU_SVM struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) { struct intel_iommu *iommu; @@@@@@@ -5478,139 -5579,133 -5572,133 -5387,6 -5572,133 -5579,133 +5470,139 @@@@@@@ } #endif /* CONFIG_INTEL_IOMMU_SVM */ + static int intel_iommu_enable_auxd(struct device *dev) + { + struct device_domain_info *info; + struct intel_iommu *iommu; + unsigned long flags; + u8 bus, devfn; + int ret; + + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu || dmar_disabled) + return -EINVAL; + + if (!sm_supported(iommu) || !pasid_supported(iommu)) + return -EINVAL; + + ret = intel_iommu_enable_pasid(iommu, dev); + if (ret) + return -ENODEV; + + spin_lock_irqsave(&device_domain_lock, flags); + info = dev->archdata.iommu; + info->auxd_enabled = 1; + spin_unlock_irqrestore(&device_domain_lock, flags); + + return 0; + } + + static int intel_iommu_disable_auxd(struct device *dev) + { + struct device_domain_info *info; + unsigned long flags; + + spin_lock_irqsave(&device_domain_lock, flags); + info = dev->archdata.iommu; + if (!WARN_ON(!info)) + info->auxd_enabled = 0; + spin_unlock_irqrestore(&device_domain_lock, flags); + + return 0; + } + + /* + * A PCI express designated vendor specific extended capability is defined + * in the section 3.7 of Intel scalable I/O virtualization technical spec + * for system software and tools to detect endpoint devices supporting the + * Intel scalable IO virtualization without host driver dependency. + * + * Returns the address of the matching extended capability structure within + * the device's PCI configuration space or 0 if the device does not support + * it. + */ + static int siov_find_pci_dvsec(struct pci_dev *pdev) + { + int pos; + u16 vendor, id; + + pos = pci_find_next_ext_capability(pdev, 0, 0x23); + while (pos) { + pci_read_config_word(pdev, pos + 4, &vendor); + pci_read_config_word(pdev, pos + 8, &id); + if (vendor == PCI_VENDOR_ID_INTEL && id == 5) + return pos; + + pos = pci_find_next_ext_capability(pdev, pos, 0x23); + } + + return 0; + } + + static bool + intel_iommu_dev_has_feat(struct device *dev, enum iommu_dev_features feat) + { + if (feat == IOMMU_DEV_FEAT_AUX) { + int ret; + + if (!dev_is_pci(dev) || dmar_disabled || + !scalable_mode_support() || !iommu_pasid_support()) + return false; + + ret = pci_pasid_features(to_pci_dev(dev)); + if (ret < 0) + return false; + + return !!siov_find_pci_dvsec(to_pci_dev(dev)); + } + + return false; + } + + static int + intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat) + { + if (feat == IOMMU_DEV_FEAT_AUX) + return intel_iommu_enable_auxd(dev); + + return -ENODEV; + } + + static int + intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) + { + if (feat == IOMMU_DEV_FEAT_AUX) + return intel_iommu_disable_auxd(dev); + + return -ENODEV; + } + + static bool + intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat) + { + struct device_domain_info *info = dev->archdata.iommu; + + if (feat == IOMMU_DEV_FEAT_AUX) + return scalable_mode_support() && info && info->auxd_enabled; + + return false; + } + + static int + intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) + { + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + + return dmar_domain->default_pasid > 0 ? + dmar_domain->default_pasid : -EINVAL; + } + +++++static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, +++++ struct device *dev) +++++{ +++++ return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO; +++++} +++++ const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, @@@@@@@ -5627,13 -5722,11 -5715,11 -5400,7 -5715,11 -5722,11 +5619,13 @@@@@@@ .remove_device = intel_iommu_remove_device, .get_resv_regions = intel_iommu_get_resv_regions, .put_resv_regions = intel_iommu_put_resv_regions, +++++ .apply_resv_region = intel_iommu_apply_resv_region, .device_group = pci_device_group, + .dev_has_feat = intel_iommu_dev_has_feat, + .dev_feat_enabled = intel_iommu_dev_feat_enabled, + .dev_enable_feat = intel_iommu_dev_enable_feat, + .dev_disable_feat = intel_iommu_dev_disable_feat, +++++ .is_attach_deferred = intel_iommu_is_attach_deferred, .pgsize_bitmap = INTEL_IOMMU_PGSIZES, }; diff --cc drivers/iommu/iommu.c index 46a06ff46e472,67ee6623f9b2a,9f0a2844371cf,109de67d5d727,9f0a2844371cf,ac1f29c19e594..0c674d80c37fd --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@@@@@@ -111,12 -110,12 -98,12 -114,12 -98,12 -132,18 +121,18 @@@@@@@ static void iommu_free_dev_param(struc int iommu_probe_device(struct device *dev) { const struct iommu_ops *ops = dev->bus->iommu_ops; ----- int ret = -EINVAL; +++++ int ret; WARN_ON(dev->iommu_group); +++++ if (!ops) +++++ return -EINVAL; + - --- if (ops) - --- ret = ops->add_device(dev); +++++ if (!iommu_get_dev_param(dev)) +++++ return -ENOMEM; + +++ - if (ops) - ret = ops->add_device(dev); +++++ ret = ops->add_device(dev); +++++ if (ret) +++++ iommu_free_dev_param(dev); return ret; } diff --cc drivers/iommu/omap-iommu-debug.c index 4abc0ef522a8e,4abc0ef522a8e,ff31bddba60ac,f87a72821bad0,ff31bddba60ac,4abc0ef522a8e..8e19bfa94121e --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@@@@@@ -257,23 -257,23 -254,23 -246,13 -254,23 -257,23 +243,13 @@@@@@@ void omap_iommu_debugfs_add(struct omap if (!iommu_debug_root) return; --- -- obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root); --- -- if (!obj->debug_dir) -- - return; -- - -- - d = debugfs_create_u32("nr_tlb_entries", 0400, obj->debug_dir, -- - &obj->nr_tlb_entries); -- - if (!d) --- -- return; -- - -- - DEBUG_ADD_FILE_RO(regs); -- - DEBUG_ADD_FILE_RO(tlb); -- - DEBUG_ADD_FILE_RO(pagetable); +++ ++ d = debugfs_create_dir(obj->name, iommu_debug_root); +++ ++ obj->debug_dir = d; - - d = debugfs_create_u32("nr_tlb_entries", 0400, obj->debug_dir, - - &obj->nr_tlb_entries); - - if (!d) - - return; - - - - DEBUG_ADD_FILE_RO(regs); - - DEBUG_ADD_FILE_RO(tlb); - - DEBUG_ADD_FILE_RO(pagetable); - - --- -- return; --- -- --- --err: --- -- debugfs_remove_recursive(obj->debug_dir); +++ ++ debugfs_create_u32("nr_tlb_entries", 0400, d, &obj->nr_tlb_entries); +++ ++ debugfs_create_file("regs", 0400, d, obj, ®s_fops); +++ ++ debugfs_create_file("tlb", 0400, d, obj, &tlb_fops); +++ ++ debugfs_create_file("pagetable", 0400, d, obj, &pagetable_fops); } void omap_iommu_debugfs_remove(struct omap_iommu *obj) diff --cc include/linux/iommu.h index ab7a1c85af75f,a815cf6f6f47a,86b4e0a75a97a,ffbbc7e39ceeb,e552c3b63f6f0,76c8cda61dfd4..fdc355ccc5701 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@@@@@@ -48,7 -48,7 -42,7 -48,6 -36,7 -49,8 +43,8 @@@@@@@ struct bus_type struct device; struct iommu_domain; struct notifier_block; + struct iommu_sva; +++++ struct iommu_fault_event; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 @@@@@@@ -56,8 -56,8 -50,8 -55,6 -44,8 -58,9 +52,9 @@@@@@@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); + typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, + void *); +++++ typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); struct iommu_domain_geometry { dma_addr_t aperture_start; /* First address that can be mapped */ @@@@@@@ -222,14 -216,14 -210,14 -186,6 -204,14 -219,15 +219,15 @@@@@@@ struct iommu_sva_ops * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) + * @dev_has/enable/disable_feat: per device entries to check/enable/disable + * iommu specific features. + * @dev_feat_enabled: check enabled feature + * @aux_attach/detach_dev: aux-domain specific attach/detach entries. + * @aux_get_pasid: get the pasid given an aux-domain + * @sva_bind: Bind process address space to device + * @sva_unbind: Unbind process address space from device + * @sva_get_pasid: Get PASID associated to a SVA handle +++++ * @page_response: handle page request response * @pgsize_bitmap: bitmap of all possible supported page sizes */ struct iommu_ops { @@@@@@@ -274,22 -268,22 -262,22 -230,6 -256,22 -272,26 +272,26 @@@@@@@ int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); + /* Per device IOMMU features */ + bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); + bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f); + int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); + int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); + + /* Aux-domain specific attach/detach entries */ + int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); + void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); + int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); + + struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, + void *drvdata); + void (*sva_unbind)(struct iommu_sva *handle); + int (*sva_get_pasid)(struct iommu_sva *handle); + +++++ int (*page_response)(struct device *dev, +++++ struct iommu_fault_event *evt, +++++ struct iommu_page_response *msg); +++++ unsigned long pgsize_bitmap; }; @@@@@@@ -629,15 -622,10 -616,10 -540,10 -610,10 -684,10 +685,15 @@@@@@@ static inline int iommu_get_group_resv_ } static inline int iommu_request_dm_for_dev(struct device *dev) +{ + return -ENODEV; +} + +++++static inline int iommu_request_dma_domain_for_dev(struct device *dev) ++++ { ++++ return -ENODEV; ++++ } ++++ static inline int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) {