From: Greg Kroah-Hartman Date: Thu, 26 Jul 2018 13:58:31 +0000 (+0200) Subject: 4.17-stable patches X-Git-Tag: v3.18.117~20 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=657541666cbc91c5c3eee29f0cbde8c1bfe5c027;p=thirdparty%2Fkernel%2Fstable-queue.git 4.17-stable patches added patches: revert-iommu-intel-iommu-enable-config_dma_direct_ops-y-and-clean-up-intel_-alloc-free-_coherent.patch --- diff --git a/queue-4.17/revert-iommu-intel-iommu-enable-config_dma_direct_ops-y-and-clean-up-intel_-alloc-free-_coherent.patch b/queue-4.17/revert-iommu-intel-iommu-enable-config_dma_direct_ops-y-and-clean-up-intel_-alloc-free-_coherent.patch new file mode 100644 index 00000000000..7dfa49d1a5c --- /dev/null +++ b/queue-4.17/revert-iommu-intel-iommu-enable-config_dma_direct_ops-y-and-clean-up-intel_-alloc-free-_coherent.patch @@ -0,0 +1,124 @@ +From 7ec916f82c48dcfc115eee2e3e0e6d400e310fc5 Mon Sep 17 00:00:00 2001 +From: Christoph Hellwig +Date: Thu, 5 Jul 2018 13:29:55 -0600 +Subject: Revert "iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent()" + +From: Christoph Hellwig + +commit 7ec916f82c48dcfc115eee2e3e0e6d400e310fc5 upstream. + +This commit may cause a less than required dma mask to be used for +some allocations, which apparently leads to module load failures for +iwlwifi sometimes. + +This reverts commit d657c5c73ca987214a6f9436e435b34fc60f332a. + +Signed-off-by: Christoph Hellwig +Reported-by: Fabio Coatti +Tested-by: Fabio Coatti +Cc: "Jason A. Donenfeld" +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/iommu/Kconfig | 1 + drivers/iommu/intel-iommu.c | 64 ++++++++++++++++++++++++++++++++------------ + 2 files changed, 47 insertions(+), 18 deletions(-) + +--- a/drivers/iommu/Kconfig ++++ b/drivers/iommu/Kconfig +@@ -142,7 +142,6 @@ config DMAR_TABLE + config INTEL_IOMMU + bool "Support for Intel IOMMU using DMA Remapping Devices" + depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) +- select DMA_DIRECT_OPS + select IOMMU_API + select IOMMU_IOVA + select DMAR_TABLE +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -31,7 +31,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -3709,30 +3708,61 @@ static void *intel_alloc_coherent(struct + dma_addr_t *dma_handle, gfp_t flags, + unsigned long attrs) + { +- void *vaddr; ++ struct page *page = NULL; ++ int order; + +- vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); +- if (iommu_no_mapping(dev) || !vaddr) +- return vaddr; +- +- *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), +- PAGE_ALIGN(size), DMA_BIDIRECTIONAL, +- dev->coherent_dma_mask); +- if (!*dma_handle) +- goto out_free_pages; +- return vaddr; ++ size = PAGE_ALIGN(size); ++ order = get_order(size); ++ ++ if (!iommu_no_mapping(dev)) ++ flags &= ~(GFP_DMA | GFP_DMA32); ++ else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { ++ if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) ++ flags |= GFP_DMA; ++ else ++ flags |= GFP_DMA32; ++ } ++ ++ if (gfpflags_allow_blocking(flags)) { ++ unsigned int count = size >> PAGE_SHIFT; ++ ++ page = dma_alloc_from_contiguous(dev, count, order, flags); ++ if (page && iommu_no_mapping(dev) && ++ page_to_phys(page) + size > dev->coherent_dma_mask) { ++ dma_release_from_contiguous(dev, page, count); ++ page = NULL; ++ } ++ } ++ ++ if (!page) ++ page = alloc_pages(flags, order); ++ if (!page) ++ return NULL; ++ memset(page_address(page), 0, size); ++ ++ *dma_handle = __intel_map_single(dev, page_to_phys(page), size, ++ DMA_BIDIRECTIONAL, ++ dev->coherent_dma_mask); ++ if (*dma_handle) ++ return page_address(page); ++ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) ++ __free_pages(page, order); + +-out_free_pages: +- dma_direct_free(dev, size, vaddr, *dma_handle, attrs); + return NULL; + } + + static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) + { +- if (!iommu_no_mapping(dev)) +- intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); +- dma_direct_free(dev, size, vaddr, dma_handle, attrs); ++ int order; ++ struct page *page = virt_to_page(vaddr); ++ ++ size = PAGE_ALIGN(size); ++ order = get_order(size); ++ ++ intel_unmap(dev, dma_handle, size); ++ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) ++ __free_pages(page, order); + } + + static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, diff --git a/queue-4.17/series b/queue-4.17/series index 82e2d4a64ef..997f9122c60 100644 --- a/queue-4.17/series +++ b/queue-4.17/series @@ -1 +1,2 @@ kvm-vmx-support-msr_ia32_arch_capabilities-as-a-feature-msr.patch +revert-iommu-intel-iommu-enable-config_dma_direct_ops-y-and-clean-up-intel_-alloc-free-_coherent.patch