]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
7dfa49d1a5c703a6bd29bfd9816fd1e2bd7dfb5f
[thirdparty/kernel/stable-queue.git] /
1 From 7ec916f82c48dcfc115eee2e3e0e6d400e310fc5 Mon Sep 17 00:00:00 2001
2 From: Christoph Hellwig <hch@lst.de>
3 Date: Thu, 5 Jul 2018 13:29:55 -0600
4 Subject: Revert "iommu/intel-iommu: Enable CONFIG_DMA_DIRECT_OPS=y and clean up intel_{alloc,free}_coherent()"
5
6 From: Christoph Hellwig <hch@lst.de>
7
8 commit 7ec916f82c48dcfc115eee2e3e0e6d400e310fc5 upstream.
9
10 This commit may cause a less than required dma mask to be used for
11 some allocations, which apparently leads to module load failures for
12 iwlwifi sometimes.
13
14 This reverts commit d657c5c73ca987214a6f9436e435b34fc60f332a.
15
16 Signed-off-by: Christoph Hellwig <hch@lst.de>
17 Reported-by: Fabio Coatti <fabio.coatti@gmail.com>
18 Tested-by: Fabio Coatti <fabio.coatti@gmail.com>
19 Cc: "Jason A. Donenfeld" <Jason@zx2c4.com>
20 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
21
22 ---
23 drivers/iommu/Kconfig | 1
24 drivers/iommu/intel-iommu.c | 64 ++++++++++++++++++++++++++++++++------------
25 2 files changed, 47 insertions(+), 18 deletions(-)
26
27 --- a/drivers/iommu/Kconfig
28 +++ b/drivers/iommu/Kconfig
29 @@ -142,7 +142,6 @@ config DMAR_TABLE
30 config INTEL_IOMMU
31 bool "Support for Intel IOMMU using DMA Remapping Devices"
32 depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
33 - select DMA_DIRECT_OPS
34 select IOMMU_API
35 select IOMMU_IOVA
36 select DMAR_TABLE
37 --- a/drivers/iommu/intel-iommu.c
38 +++ b/drivers/iommu/intel-iommu.c
39 @@ -31,7 +31,6 @@
40 #include <linux/pci.h>
41 #include <linux/dmar.h>
42 #include <linux/dma-mapping.h>
43 -#include <linux/dma-direct.h>
44 #include <linux/mempool.h>
45 #include <linux/memory.h>
46 #include <linux/cpu.h>
47 @@ -3709,30 +3708,61 @@ static void *intel_alloc_coherent(struct
48 dma_addr_t *dma_handle, gfp_t flags,
49 unsigned long attrs)
50 {
51 - void *vaddr;
52 + struct page *page = NULL;
53 + int order;
54
55 - vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
56 - if (iommu_no_mapping(dev) || !vaddr)
57 - return vaddr;
58 -
59 - *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
60 - PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
61 - dev->coherent_dma_mask);
62 - if (!*dma_handle)
63 - goto out_free_pages;
64 - return vaddr;
65 + size = PAGE_ALIGN(size);
66 + order = get_order(size);
67 +
68 + if (!iommu_no_mapping(dev))
69 + flags &= ~(GFP_DMA | GFP_DMA32);
70 + else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
71 + if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
72 + flags |= GFP_DMA;
73 + else
74 + flags |= GFP_DMA32;
75 + }
76 +
77 + if (gfpflags_allow_blocking(flags)) {
78 + unsigned int count = size >> PAGE_SHIFT;
79 +
80 + page = dma_alloc_from_contiguous(dev, count, order, flags);
81 + if (page && iommu_no_mapping(dev) &&
82 + page_to_phys(page) + size > dev->coherent_dma_mask) {
83 + dma_release_from_contiguous(dev, page, count);
84 + page = NULL;
85 + }
86 + }
87 +
88 + if (!page)
89 + page = alloc_pages(flags, order);
90 + if (!page)
91 + return NULL;
92 + memset(page_address(page), 0, size);
93 +
94 + *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
95 + DMA_BIDIRECTIONAL,
96 + dev->coherent_dma_mask);
97 + if (*dma_handle)
98 + return page_address(page);
99 + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
100 + __free_pages(page, order);
101
102 -out_free_pages:
103 - dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
104 return NULL;
105 }
106
107 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
108 dma_addr_t dma_handle, unsigned long attrs)
109 {
110 - if (!iommu_no_mapping(dev))
111 - intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
112 - dma_direct_free(dev, size, vaddr, dma_handle, attrs);
113 + int order;
114 + struct page *page = virt_to_page(vaddr);
115 +
116 + size = PAGE_ALIGN(size);
117 + order = get_order(size);
118 +
119 + intel_unmap(dev, dma_handle, size);
120 + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
121 + __free_pages(page, order);
122 }
123
124 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,