]>
git.ipfire.org Git - thirdparty/linux.git/blob - drivers/staging/media/ipu3/ipu3-dmamap.c
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Intel Corporation
4 * Copyright 2018 Google LLC.
6 * Author: Tomasz Figa <tfiga@chromium.org>
7 * Author: Yong Zhi <yong.zhi@intel.com>
10 #include <linux/vmalloc.h>
13 #include "ipu3-css-pool.h"
15 #include "ipu3-dmamap.h"
18 * Free a buffer allocated by imgu_dmamap_alloc_buffer()
20 static void imgu_dmamap_free_buffer(struct page
**pages
,
23 int count
= size
>> PAGE_SHIFT
;
26 __free_page(pages
[count
]);
31 * Based on the implementation of __iommu_dma_alloc_pages()
32 * defined in drivers/iommu/dma-iommu.c
34 static struct page
**imgu_dmamap_alloc_buffer(size_t size
, gfp_t gfp
)
37 unsigned int i
= 0, count
= size
>> PAGE_SHIFT
;
38 unsigned int order_mask
= 1;
39 const gfp_t high_order_gfp
= __GFP_NOWARN
| __GFP_NORETRY
;
41 /* Allocate mem for array of page ptrs */
42 pages
= kvmalloc_array(count
, sizeof(*pages
), GFP_KERNEL
);
47 gfp
|= __GFP_HIGHMEM
| __GFP_ZERO
;
50 struct page
*page
= NULL
;
51 unsigned int order_size
;
53 for (order_mask
&= (2U << __fls(count
)) - 1;
54 order_mask
; order_mask
&= ~order_size
) {
55 unsigned int order
= __fls(order_mask
);
57 order_size
= 1U << order
;
58 page
= alloc_pages((order_mask
- order_size
) ?
59 gfp
| high_order_gfp
: gfp
, order
);
64 if (!PageCompound(page
)) {
65 split_page(page
, order
);
69 __free_pages(page
, order
);
72 imgu_dmamap_free_buffer(pages
, i
<< PAGE_SHIFT
);
84 * imgu_dmamap_alloc - allocate and map a buffer into KVA
85 * @imgu: struct device pointer
86 * @map: struct to store mapping variables
93 void *imgu_dmamap_alloc(struct imgu_device
*imgu
, struct imgu_css_map
*map
,
96 unsigned long shift
= iova_shift(&imgu
->iova_domain
);
97 struct device
*dev
= &imgu
->pci_dev
->dev
;
98 size_t size
= PAGE_ALIGN(len
);
99 int count
= size
>> PAGE_SHIFT
;
105 dev_dbg(dev
, "%s: allocating %zu\n", __func__
, size
);
107 iova
= alloc_iova(&imgu
->iova_domain
, size
>> shift
,
108 imgu
->mmu
->aperture_end
>> shift
, 0);
112 pages
= imgu_dmamap_alloc_buffer(size
, GFP_KERNEL
);
116 /* Call IOMMU driver to setup pgt */
117 iovaddr
= iova_dma_addr(&imgu
->iova_domain
, iova
);
118 for (i
= 0; i
< count
; ++i
) {
119 rval
= imgu_mmu_map(imgu
->mmu
, iovaddr
,
120 page_to_phys(pages
[i
]), PAGE_SIZE
);
124 iovaddr
+= PAGE_SIZE
;
127 map
->vaddr
= vmap(pages
, count
, VM_USERMAP
, PAGE_KERNEL
);
133 map
->daddr
= iova_dma_addr(&imgu
->iova_domain
, iova
);
135 dev_dbg(dev
, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__
,
136 size
, &map
->daddr
, map
->vaddr
);
141 imgu_dmamap_free_buffer(pages
, size
);
142 imgu_mmu_unmap(imgu
->mmu
, iova_dma_addr(&imgu
->iova_domain
, iova
),
146 __free_iova(&imgu
->iova_domain
, iova
);
151 void imgu_dmamap_unmap(struct imgu_device
*imgu
, struct imgu_css_map
*map
)
155 iova
= find_iova(&imgu
->iova_domain
,
156 iova_pfn(&imgu
->iova_domain
, map
->daddr
));
160 imgu_mmu_unmap(imgu
->mmu
, iova_dma_addr(&imgu
->iova_domain
, iova
),
161 iova_size(iova
) << iova_shift(&imgu
->iova_domain
));
163 __free_iova(&imgu
->iova_domain
, iova
);
167 * Counterpart of imgu_dmamap_alloc
169 void imgu_dmamap_free(struct imgu_device
*imgu
, struct imgu_css_map
*map
)
171 dev_dbg(&imgu
->pci_dev
->dev
, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
172 __func__
, map
->size
, &map
->daddr
, map
->vaddr
);
177 imgu_dmamap_unmap(imgu
, map
);
180 imgu_dmamap_free_buffer(map
->pages
, map
->size
);
184 int imgu_dmamap_map_sg(struct imgu_device
*imgu
, struct scatterlist
*sglist
,
185 int nents
, struct imgu_css_map
*map
)
187 unsigned long shift
= iova_shift(&imgu
->iova_domain
);
188 struct scatterlist
*sg
;
193 for_each_sg(sglist
, sg
, nents
, i
) {
197 if (i
!= nents
- 1 && !PAGE_ALIGNED(sg
->length
))
203 size
= iova_align(&imgu
->iova_domain
, size
);
204 dev_dbg(&imgu
->pci_dev
->dev
, "dmamap: mapping sg %d entries, %zu pages\n",
205 nents
, size
>> shift
);
207 iova
= alloc_iova(&imgu
->iova_domain
, size
>> shift
,
208 imgu
->mmu
->aperture_end
>> shift
, 0);
212 dev_dbg(&imgu
->pci_dev
->dev
, "dmamap: iova low pfn %lu, high pfn %lu\n",
213 iova
->pfn_lo
, iova
->pfn_hi
);
215 if (imgu_mmu_map_sg(imgu
->mmu
, iova_dma_addr(&imgu
->iova_domain
, iova
),
216 sglist
, nents
) < size
)
219 memset(map
, 0, sizeof(*map
));
220 map
->daddr
= iova_dma_addr(&imgu
->iova_domain
, iova
);
226 __free_iova(&imgu
->iova_domain
, iova
);
231 int imgu_dmamap_init(struct imgu_device
*imgu
)
233 unsigned long order
, base_pfn
;
234 int ret
= iova_cache_get();
239 order
= __ffs(IPU3_PAGE_SIZE
);
240 base_pfn
= max_t(unsigned long, 1, imgu
->mmu
->aperture_start
>> order
);
241 init_iova_domain(&imgu
->iova_domain
, 1UL << order
, base_pfn
);
246 void imgu_dmamap_exit(struct imgu_device
*imgu
)
248 put_iova_domain(&imgu
->iova_domain
);