1 From 7ec916f82c48dcfc115eee2e3e0e6d400e310fc5 Mon Sep 17 00:00:00 2001
2 From: Christoph Hellwig <hch@lst.de>
3 Date: Thu, 5 Jul 2018 13:29:55 -0600
4 Subject: Revert "iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent()"
6 From: Christoph Hellwig <hch@lst.de>
8 commit 7ec916f82c48dcfc115eee2e3e0e6d400e310fc5 upstream.
10 This commit may cause a less than required dma mask to be used for
11 some allocations, which apparently leads to module load failures for
14 This reverts commit d657c5c73ca987214a6f9436e435b34fc60f332a.
16 Signed-off-by: Christoph Hellwig <hch@lst.de>
17 Reported-by: Fabio Coatti <fabio.coatti@gmail.com>
18 Tested-by: Fabio Coatti <fabio.coatti@gmail.com>
19 Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
20 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
23 drivers/iommu/Kconfig | 1
24 drivers/iommu/intel-iommu.c | 64 ++++++++++++++++++++++++++++++++------------
25 2 files changed, 47 insertions(+), 18 deletions(-)
27 --- a/drivers/iommu/Kconfig
28 +++ b/drivers/iommu/Kconfig
29 @@ -142,7 +142,6 @@ config DMAR_TABLE
31 bool "Support for Intel IOMMU using DMA Remapping Devices"
32 depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
33 - select DMA_DIRECT_OPS
37 --- a/drivers/iommu/intel-iommu.c
38 +++ b/drivers/iommu/intel-iommu.c
40 #include <linux/pci.h>
41 #include <linux/dmar.h>
42 #include <linux/dma-mapping.h>
43 -#include <linux/dma-direct.h>
44 #include <linux/mempool.h>
45 #include <linux/memory.h>
46 #include <linux/cpu.h>
47 @@ -3709,30 +3708,61 @@ static void *intel_alloc_coherent(struct
48 dma_addr_t *dma_handle, gfp_t flags,
52 + struct page *page = NULL;
55 - vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
56 - if (iommu_no_mapping(dev) || !vaddr)
59 - *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
60 - PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
61 - dev->coherent_dma_mask);
63 - goto out_free_pages;
65 + size = PAGE_ALIGN(size);
66 + order = get_order(size);
68 + if (!iommu_no_mapping(dev))
69 + flags &= ~(GFP_DMA | GFP_DMA32);
70 + else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
71 + if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
77 + if (gfpflags_allow_blocking(flags)) {
78 + unsigned int count = size >> PAGE_SHIFT;
80 + page = dma_alloc_from_contiguous(dev, count, order, flags);
81 + if (page && iommu_no_mapping(dev) &&
82 + page_to_phys(page) + size > dev->coherent_dma_mask) {
83 + dma_release_from_contiguous(dev, page, count);
89 + page = alloc_pages(flags, order);
92 + memset(page_address(page), 0, size);
94 + *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
96 + dev->coherent_dma_mask);
98 + return page_address(page);
99 + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
100 + __free_pages(page, order);
103 - dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
107 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
108 dma_addr_t dma_handle, unsigned long attrs)
110 - if (!iommu_no_mapping(dev))
111 - intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
112 - dma_direct_free(dev, size, vaddr, dma_handle, attrs);
114 + struct page *page = virt_to_page(vaddr);
116 + size = PAGE_ALIGN(size);
117 + order = get_order(size);
119 + intel_unmap(dev, dma_handle, size);
120 + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
121 + __free_pages(page, order);
124 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,