]>
Commit | Line | Data |
---|---|---|
a8c21a54 T |
1 | /* |
2 | * Copyright (C) 2015 Etnaviv Project | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License version 2 as published by | |
6 | * the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
15 | */ | |
16 | ||
dd34bb96 | 17 | #include "common.xml.h" |
ea1f5729 | 18 | #include "etnaviv_cmdbuf.h" |
a8c21a54 T |
19 | #include "etnaviv_drv.h" |
20 | #include "etnaviv_gem.h" | |
21 | #include "etnaviv_gpu.h" | |
e095c8fe | 22 | #include "etnaviv_iommu.h" |
a8c21a54 T |
23 | #include "etnaviv_mmu.h" |
24 | ||
b6709083 | 25 | static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain, |
50073cf9 LS |
26 | unsigned long iova, size_t size) |
27 | { | |
28 | size_t unmapped_page, unmapped = 0; | |
29 | size_t pgsize = SZ_4K; | |
30 | ||
31 | if (!IS_ALIGNED(iova | size, pgsize)) { | |
ba5a4219 | 32 | pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n", |
50073cf9 LS |
33 | iova, size, pgsize); |
34 | return; | |
35 | } | |
36 | ||
37 | while (unmapped < size) { | |
38 | unmapped_page = domain->ops->unmap(domain, iova, pgsize); | |
39 | if (!unmapped_page) | |
40 | break; | |
41 | ||
42 | iova += unmapped_page; | |
43 | unmapped += unmapped_page; | |
44 | } | |
45 | } | |
46 | ||
b6709083 LS |
47 | static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain, |
48 | unsigned long iova, phys_addr_t paddr, | |
49 | size_t size, int prot) | |
50073cf9 LS |
50 | { |
51 | unsigned long orig_iova = iova; | |
52 | size_t pgsize = SZ_4K; | |
53 | size_t orig_size = size; | |
54 | int ret = 0; | |
55 | ||
56 | if (!IS_ALIGNED(iova | paddr | size, pgsize)) { | |
ba5a4219 | 57 | pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n", |
50073cf9 LS |
58 | iova, &paddr, size, pgsize); |
59 | return -EINVAL; | |
60 | } | |
61 | ||
62 | while (size) { | |
63 | ret = domain->ops->map(domain, iova, paddr, pgsize, prot); | |
64 | if (ret) | |
65 | break; | |
66 | ||
67 | iova += pgsize; | |
68 | paddr += pgsize; | |
69 | size -= pgsize; | |
70 | } | |
71 | ||
72 | /* unroll mapping in case something went wrong */ | |
73 | if (ret) | |
74 | etnaviv_domain_unmap(domain, orig_iova, orig_size - size); | |
75 | ||
76 | return ret; | |
77 | } | |
78 | ||
27d38062 LS |
79 | static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, |
80 | struct sg_table *sgt, unsigned len, int prot) | |
a8c21a54 | 81 | { |
b6709083 | 82 | struct etnaviv_iommu_domain *domain = iommu->domain; |
a8c21a54 T |
83 | struct scatterlist *sg; |
84 | unsigned int da = iova; | |
85 | unsigned int i, j; | |
86 | int ret; | |
87 | ||
88 | if (!domain || !sgt) | |
89 | return -EINVAL; | |
90 | ||
91 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
92 | u32 pa = sg_dma_address(sg) - sg->offset; | |
93 | size_t bytes = sg_dma_len(sg) + sg->offset; | |
94 | ||
95 | VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); | |
96 | ||
50073cf9 | 97 | ret = etnaviv_domain_map(domain, da, pa, bytes, prot); |
a8c21a54 T |
98 | if (ret) |
99 | goto fail; | |
100 | ||
101 | da += bytes; | |
102 | } | |
103 | ||
104 | return 0; | |
105 | ||
106 | fail: | |
107 | da = iova; | |
108 | ||
109 | for_each_sg(sgt->sgl, sg, i, j) { | |
110 | size_t bytes = sg_dma_len(sg) + sg->offset; | |
111 | ||
50073cf9 | 112 | etnaviv_domain_unmap(domain, da, bytes); |
a8c21a54 T |
113 | da += bytes; |
114 | } | |
115 | return ret; | |
116 | } | |
117 | ||
27d38062 LS |
118 | static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, |
119 | struct sg_table *sgt, unsigned len) | |
a8c21a54 | 120 | { |
b6709083 | 121 | struct etnaviv_iommu_domain *domain = iommu->domain; |
a8c21a54 T |
122 | struct scatterlist *sg; |
123 | unsigned int da = iova; | |
124 | int i; | |
125 | ||
126 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
127 | size_t bytes = sg_dma_len(sg) + sg->offset; | |
a8c21a54 | 128 | |
50073cf9 | 129 | etnaviv_domain_unmap(domain, da, bytes); |
a8c21a54 T |
130 | |
131 | VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); | |
132 | ||
133 | BUG_ON(!PAGE_ALIGNED(bytes)); | |
134 | ||
135 | da += bytes; | |
136 | } | |
a8c21a54 T |
137 | } |
138 | ||
139 | static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, | |
140 | struct etnaviv_vram_mapping *mapping) | |
141 | { | |
142 | struct etnaviv_gem_object *etnaviv_obj = mapping->object; | |
143 | ||
144 | etnaviv_iommu_unmap(mmu, mapping->vram_node.start, | |
145 | etnaviv_obj->sgt, etnaviv_obj->base.size); | |
146 | drm_mm_remove_node(&mapping->vram_node); | |
147 | } | |
148 | ||
90969c9a LS |
149 | static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, |
150 | struct drm_mm_node *node, size_t size) | |
a8c21a54 T |
151 | { |
152 | struct etnaviv_vram_mapping *free = NULL; | |
4e64e553 | 153 | enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW; |
a8c21a54 T |
154 | int ret; |
155 | ||
90969c9a | 156 | lockdep_assert_held(&mmu->lock); |
a8c21a54 | 157 | |
a8c21a54 T |
158 | while (1) { |
159 | struct etnaviv_vram_mapping *m, *n; | |
9a71e277 | 160 | struct drm_mm_scan scan; |
a8c21a54 T |
161 | struct list_head list; |
162 | bool found; | |
163 | ||
164 | ret = drm_mm_insert_node_in_range(&mmu->mm, node, | |
ccae4592 | 165 | size, 0, 0, 0, U64_MAX, mode); |
a8c21a54 T |
166 | if (ret != -ENOSPC) |
167 | break; | |
168 | ||
a8c21a54 | 169 | /* Try to retire some entries */ |
4e64e553 | 170 | drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode); |
a8c21a54 T |
171 | |
172 | found = 0; | |
173 | INIT_LIST_HEAD(&list); | |
174 | list_for_each_entry(free, &mmu->mappings, mmu_node) { | |
175 | /* If this vram node has not been used, skip this. */ | |
176 | if (!free->vram_node.mm) | |
177 | continue; | |
178 | ||
179 | /* | |
180 | * If the iova is pinned, then it's in-use, | |
181 | * so we must keep its mapping. | |
182 | */ | |
183 | if (free->use) | |
184 | continue; | |
185 | ||
186 | list_add(&free->scan_node, &list); | |
9a71e277 | 187 | if (drm_mm_scan_add_block(&scan, &free->vram_node)) { |
a8c21a54 T |
188 | found = true; |
189 | break; | |
190 | } | |
191 | } | |
192 | ||
193 | if (!found) { | |
194 | /* Nothing found, clean up and fail */ | |
195 | list_for_each_entry_safe(m, n, &list, scan_node) | |
9a71e277 | 196 | BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node)); |
a8c21a54 T |
197 | break; |
198 | } | |
199 | ||
200 | /* | |
201 | * drm_mm does not allow any other operations while | |
202 | * scanning, so we have to remove all blocks first. | |
203 | * If drm_mm_scan_remove_block() returns false, we | |
204 | * can leave the block pinned. | |
205 | */ | |
206 | list_for_each_entry_safe(m, n, &list, scan_node) | |
9a71e277 | 207 | if (!drm_mm_scan_remove_block(&scan, &m->vram_node)) |
a8c21a54 T |
208 | list_del_init(&m->scan_node); |
209 | ||
210 | /* | |
211 | * Unmap the blocks which need to be reaped from the MMU. | |
b6325f40 | 212 | * Clear the mmu pointer to prevent the mapping_get finding |
a8c21a54 T |
213 | * this mapping. |
214 | */ | |
215 | list_for_each_entry_safe(m, n, &list, scan_node) { | |
216 | etnaviv_iommu_remove_mapping(mmu, m); | |
217 | m->mmu = NULL; | |
218 | list_del_init(&m->mmu_node); | |
219 | list_del_init(&m->scan_node); | |
220 | } | |
221 | ||
4e64e553 CW |
222 | mode = DRM_MM_INSERT_EVICT; |
223 | ||
a8c21a54 T |
224 | /* |
225 | * We removed enough mappings so that the new allocation will | |
d4645073 | 226 | * succeed, retry the allocation one more time. |
a8c21a54 | 227 | */ |
a8c21a54 T |
228 | } |
229 | ||
90969c9a LS |
230 | return ret; |
231 | } | |
232 | ||
233 | int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, | |
234 | struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, | |
235 | struct etnaviv_vram_mapping *mapping) | |
236 | { | |
237 | struct sg_table *sgt = etnaviv_obj->sgt; | |
238 | struct drm_mm_node *node; | |
239 | int ret; | |
240 | ||
241 | lockdep_assert_held(&etnaviv_obj->lock); | |
242 | ||
243 | mutex_lock(&mmu->lock); | |
244 | ||
245 | /* v1 MMU can optimize single entry (contiguous) scatterlists */ | |
246 | if (mmu->version == ETNAVIV_IOMMU_V1 && | |
247 | sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) { | |
248 | u32 iova; | |
249 | ||
250 | iova = sg_dma_address(sgt->sgl) - memory_base; | |
251 | if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) { | |
252 | mapping->iova = iova; | |
253 | list_add_tail(&mapping->mmu_node, &mmu->mappings); | |
ff981595 ME |
254 | ret = 0; |
255 | goto unlock; | |
90969c9a LS |
256 | } |
257 | } | |
258 | ||
259 | node = &mapping->vram_node; | |
260 | ||
261 | ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size); | |
ff981595 ME |
262 | if (ret < 0) |
263 | goto unlock; | |
a8c21a54 | 264 | |
a8c21a54 T |
265 | mapping->iova = node->start; |
266 | ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size, | |
b6709083 | 267 | ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); |
a8c21a54 T |
268 | |
269 | if (ret < 0) { | |
270 | drm_mm_remove_node(node); | |
ff981595 | 271 | goto unlock; |
a8c21a54 T |
272 | } |
273 | ||
274 | list_add_tail(&mapping->mmu_node, &mmu->mappings); | |
d4645073 | 275 | mmu->need_flush = true; |
ff981595 | 276 | unlock: |
a8c21a54 T |
277 | mutex_unlock(&mmu->lock); |
278 | ||
279 | return ret; | |
280 | } | |
281 | ||
282 | void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, | |
283 | struct etnaviv_vram_mapping *mapping) | |
284 | { | |
285 | WARN_ON(mapping->use); | |
286 | ||
287 | mutex_lock(&mmu->lock); | |
288 | ||
289 | /* If the vram node is on the mm, unmap and remove the node */ | |
290 | if (mapping->vram_node.mm == &mmu->mm) | |
291 | etnaviv_iommu_remove_mapping(mmu, mapping); | |
292 | ||
293 | list_del(&mapping->mmu_node); | |
d4645073 | 294 | mmu->need_flush = true; |
a8c21a54 T |
295 | mutex_unlock(&mmu->lock); |
296 | } | |
297 | ||
298 | void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu) | |
299 | { | |
300 | drm_mm_takedown(&mmu->mm); | |
b6709083 | 301 | mmu->domain->ops->free(mmu->domain); |
a8c21a54 T |
302 | kfree(mmu); |
303 | } | |
304 | ||
dd34bb96 | 305 | struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu) |
a8c21a54 | 306 | { |
dd34bb96 | 307 | enum etnaviv_iommu_version version; |
a8c21a54 T |
308 | struct etnaviv_iommu *mmu; |
309 | ||
310 | mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); | |
311 | if (!mmu) | |
312 | return ERR_PTR(-ENOMEM); | |
313 | ||
dd34bb96 LS |
314 | if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) { |
315 | mmu->domain = etnaviv_iommuv1_domain_alloc(gpu); | |
316 | version = ETNAVIV_IOMMU_V1; | |
317 | } else { | |
318 | mmu->domain = etnaviv_iommuv2_domain_alloc(gpu); | |
319 | version = ETNAVIV_IOMMU_V2; | |
320 | } | |
321 | ||
322 | if (!mmu->domain) { | |
323 | dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n"); | |
324 | kfree(mmu); | |
325 | return ERR_PTR(-ENOMEM); | |
326 | } | |
327 | ||
a8c21a54 T |
328 | mmu->gpu = gpu; |
329 | mmu->version = version; | |
330 | mutex_init(&mmu->lock); | |
331 | INIT_LIST_HEAD(&mmu->mappings); | |
332 | ||
b6709083 | 333 | drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size); |
a8c21a54 | 334 | |
a8c21a54 T |
335 | return mmu; |
336 | } | |
337 | ||
e095c8fe LS |
338 | void etnaviv_iommu_restore(struct etnaviv_gpu *gpu) |
339 | { | |
340 | if (gpu->mmu->version == ETNAVIV_IOMMU_V1) | |
341 | etnaviv_iommuv1_restore(gpu); | |
342 | else | |
afb7b3b1 | 343 | etnaviv_iommuv2_restore(gpu); |
e095c8fe LS |
344 | } |
345 | ||
e66774dd LS |
346 | int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr, |
347 | struct drm_mm_node *vram_node, size_t size, | |
348 | u32 *iova) | |
e07c0db5 | 349 | { |
e68f270f LS |
350 | struct etnaviv_iommu *mmu = gpu->mmu; |
351 | ||
352 | if (mmu->version == ETNAVIV_IOMMU_V1) { | |
e66774dd LS |
353 | *iova = paddr - gpu->memory_base; |
354 | return 0; | |
e68f270f LS |
355 | } else { |
356 | int ret; | |
357 | ||
e68f270f | 358 | mutex_lock(&mmu->lock); |
e66774dd | 359 | ret = etnaviv_iommu_find_iova(mmu, vram_node, size); |
e68f270f LS |
360 | if (ret < 0) { |
361 | mutex_unlock(&mmu->lock); | |
e66774dd | 362 | return ret; |
e68f270f | 363 | } |
50073cf9 | 364 | ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr, |
b6709083 | 365 | size, ETNAVIV_PROT_READ); |
e68f270f | 366 | if (ret < 0) { |
e66774dd | 367 | drm_mm_remove_node(vram_node); |
e68f270f | 368 | mutex_unlock(&mmu->lock); |
e66774dd | 369 | return ret; |
e68f270f | 370 | } |
e68f270f LS |
371 | gpu->mmu->need_flush = true; |
372 | mutex_unlock(&mmu->lock); | |
373 | ||
e66774dd LS |
374 | *iova = (u32)vram_node->start; |
375 | return 0; | |
e68f270f | 376 | } |
e07c0db5 LS |
377 | } |
378 | ||
e66774dd LS |
379 | void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu, |
380 | struct drm_mm_node *vram_node, size_t size, | |
381 | u32 iova) | |
e68f270f LS |
382 | { |
383 | struct etnaviv_iommu *mmu = gpu->mmu; | |
384 | ||
e66774dd | 385 | if (mmu->version == ETNAVIV_IOMMU_V2) { |
e68f270f | 386 | mutex_lock(&mmu->lock); |
50073cf9 | 387 | etnaviv_domain_unmap(mmu->domain, iova, size); |
e66774dd | 388 | drm_mm_remove_node(vram_node); |
e68f270f LS |
389 | mutex_unlock(&mmu->lock); |
390 | } | |
391 | } | |
a8c21a54 T |
392 | size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) |
393 | { | |
b6709083 | 394 | return iommu->domain->ops->dump_size(iommu->domain); |
a8c21a54 T |
395 | } |
396 | ||
397 | void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf) | |
398 | { | |
b6709083 | 399 | iommu->domain->ops->dump(iommu->domain, buf); |
a8c21a54 | 400 | } |