]>
Commit | Line | Data |
---|---|---|
f6ffbd4f | 1 | // SPDX-License-Identifier: GPL-2.0 |
a8c21a54 | 2 | /* |
f6ffbd4f | 3 | * Copyright (C) 2015-2018 Etnaviv Project |
a8c21a54 T |
4 | */ |
5 | ||
27b67278 | 6 | #include <linux/dma-mapping.h> |
6eae41fe SR |
7 | #include <linux/scatterlist.h> |
8 | ||
dd34bb96 | 9 | #include "common.xml.h" |
ea1f5729 | 10 | #include "etnaviv_cmdbuf.h" |
a8c21a54 T |
11 | #include "etnaviv_drv.h" |
12 | #include "etnaviv_gem.h" | |
13 | #include "etnaviv_gpu.h" | |
14 | #include "etnaviv_mmu.h" | |
15 | ||
27b67278 | 16 | static void etnaviv_context_unmap(struct etnaviv_iommu_context *context, |
50073cf9 LS |
17 | unsigned long iova, size_t size) |
18 | { | |
19 | size_t unmapped_page, unmapped = 0; | |
20 | size_t pgsize = SZ_4K; | |
21 | ||
22 | if (!IS_ALIGNED(iova | size, pgsize)) { | |
ba5a4219 | 23 | pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n", |
50073cf9 LS |
24 | iova, size, pgsize); |
25 | return; | |
26 | } | |
27 | ||
28 | while (unmapped < size) { | |
27b67278 LS |
29 | unmapped_page = context->global->ops->unmap(context, iova, |
30 | pgsize); | |
50073cf9 LS |
31 | if (!unmapped_page) |
32 | break; | |
33 | ||
34 | iova += unmapped_page; | |
35 | unmapped += unmapped_page; | |
36 | } | |
37 | } | |
38 | ||
27b67278 | 39 | static int etnaviv_context_map(struct etnaviv_iommu_context *context, |
b6709083 LS |
40 | unsigned long iova, phys_addr_t paddr, |
41 | size_t size, int prot) | |
50073cf9 LS |
42 | { |
43 | unsigned long orig_iova = iova; | |
44 | size_t pgsize = SZ_4K; | |
45 | size_t orig_size = size; | |
46 | int ret = 0; | |
47 | ||
48 | if (!IS_ALIGNED(iova | paddr | size, pgsize)) { | |
ba5a4219 | 49 | pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n", |
50073cf9 LS |
50 | iova, &paddr, size, pgsize); |
51 | return -EINVAL; | |
52 | } | |
53 | ||
54 | while (size) { | |
27b67278 LS |
55 | ret = context->global->ops->map(context, iova, paddr, pgsize, |
56 | prot); | |
50073cf9 LS |
57 | if (ret) |
58 | break; | |
59 | ||
60 | iova += pgsize; | |
61 | paddr += pgsize; | |
62 | size -= pgsize; | |
63 | } | |
64 | ||
65 | /* unroll mapping in case something went wrong */ | |
66 | if (ret) | |
27b67278 | 67 | etnaviv_context_unmap(context, orig_iova, orig_size - size); |
50073cf9 LS |
68 | |
69 | return ret; | |
70 | } | |
71 | ||
27b67278 | 72 | static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, |
9e2e8a51 | 73 | struct sg_table *sgt, int prot) |
27b67278 | 74 | { struct scatterlist *sg; |
a8c21a54 | 75 | unsigned int da = iova; |
182354a5 | 76 | unsigned int i; |
a8c21a54 T |
77 | int ret; |
78 | ||
27b67278 | 79 | if (!context || !sgt) |
a8c21a54 T |
80 | return -EINVAL; |
81 | ||
182354a5 | 82 | for_each_sgtable_dma_sg(sgt, sg, i) { |
d37c120b | 83 | phys_addr_t pa = sg_dma_address(sg) - sg->offset; |
a8c21a54 T |
84 | size_t bytes = sg_dma_len(sg) + sg->offset; |
85 | ||
d37c120b | 86 | VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes); |
a8c21a54 | 87 | |
27b67278 | 88 | ret = etnaviv_context_map(context, da, pa, bytes, prot); |
a8c21a54 T |
89 | if (ret) |
90 | goto fail; | |
91 | ||
92 | da += bytes; | |
93 | } | |
94 | ||
9247fcca LS |
95 | context->flush_seq++; |
96 | ||
a8c21a54 T |
97 | return 0; |
98 | ||
99 | fail: | |
182354a5 | 100 | etnaviv_context_unmap(context, iova, da - iova); |
a8c21a54 T |
101 | return ret; |
102 | } | |
103 | ||
27b67278 | 104 | static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, |
27d38062 | 105 | struct sg_table *sgt, unsigned len) |
a8c21a54 | 106 | { |
a8c21a54 T |
107 | struct scatterlist *sg; |
108 | unsigned int da = iova; | |
109 | int i; | |
110 | ||
182354a5 | 111 | for_each_sgtable_dma_sg(sgt, sg, i) { |
a8c21a54 | 112 | size_t bytes = sg_dma_len(sg) + sg->offset; |
a8c21a54 | 113 | |
27b67278 | 114 | etnaviv_context_unmap(context, da, bytes); |
a8c21a54 T |
115 | |
116 | VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); | |
117 | ||
118 | BUG_ON(!PAGE_ALIGNED(bytes)); | |
119 | ||
120 | da += bytes; | |
121 | } | |
9247fcca LS |
122 | |
123 | context->flush_seq++; | |
a8c21a54 T |
124 | } |
125 | ||
27b67278 | 126 | static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, |
a8c21a54 T |
127 | struct etnaviv_vram_mapping *mapping) |
128 | { | |
129 | struct etnaviv_gem_object *etnaviv_obj = mapping->object; | |
130 | ||
4612bad5 GG |
131 | lockdep_assert_held(&context->lock); |
132 | ||
27b67278 | 133 | etnaviv_iommu_unmap(context, mapping->vram_node.start, |
a8c21a54 T |
134 | etnaviv_obj->sgt, etnaviv_obj->base.size); |
135 | drm_mm_remove_node(&mapping->vram_node); | |
136 | } | |
137 | ||
5a40837d LS |
138 | void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping) |
139 | { | |
140 | struct etnaviv_iommu_context *context = mapping->context; | |
141 | ||
142 | lockdep_assert_held(&context->lock); | |
143 | WARN_ON(mapping->use); | |
144 | ||
145 | etnaviv_iommu_remove_mapping(context, mapping); | |
146 | etnaviv_iommu_context_put(mapping->context); | |
147 | mapping->context = NULL; | |
148 | list_del_init(&mapping->mmu_node); | |
149 | } | |
150 | ||
27b67278 | 151 | static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, |
90969c9a | 152 | struct drm_mm_node *node, size_t size) |
a8c21a54 T |
153 | { |
154 | struct etnaviv_vram_mapping *free = NULL; | |
4e64e553 | 155 | enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW; |
a8c21a54 T |
156 | int ret; |
157 | ||
27b67278 | 158 | lockdep_assert_held(&context->lock); |
a8c21a54 | 159 | |
a8c21a54 T |
160 | while (1) { |
161 | struct etnaviv_vram_mapping *m, *n; | |
9a71e277 | 162 | struct drm_mm_scan scan; |
a8c21a54 T |
163 | struct list_head list; |
164 | bool found; | |
165 | ||
27b67278 | 166 | ret = drm_mm_insert_node_in_range(&context->mm, node, |
ccae4592 | 167 | size, 0, 0, 0, U64_MAX, mode); |
a8c21a54 T |
168 | if (ret != -ENOSPC) |
169 | break; | |
170 | ||
a8c21a54 | 171 | /* Try to retire some entries */ |
27b67278 | 172 | drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode); |
a8c21a54 T |
173 | |
174 | found = 0; | |
175 | INIT_LIST_HEAD(&list); | |
27b67278 | 176 | list_for_each_entry(free, &context->mappings, mmu_node) { |
a8c21a54 T |
177 | /* If this vram node has not been used, skip this. */ |
178 | if (!free->vram_node.mm) | |
179 | continue; | |
180 | ||
181 | /* | |
182 | * If the iova is pinned, then it's in-use, | |
183 | * so we must keep its mapping. | |
184 | */ | |
185 | if (free->use) | |
186 | continue; | |
187 | ||
188 | list_add(&free->scan_node, &list); | |
9a71e277 | 189 | if (drm_mm_scan_add_block(&scan, &free->vram_node)) { |
a8c21a54 T |
190 | found = true; |
191 | break; | |
192 | } | |
193 | } | |
194 | ||
195 | if (!found) { | |
196 | /* Nothing found, clean up and fail */ | |
197 | list_for_each_entry_safe(m, n, &list, scan_node) | |
9a71e277 | 198 | BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node)); |
a8c21a54 T |
199 | break; |
200 | } | |
201 | ||
202 | /* | |
203 | * drm_mm does not allow any other operations while | |
204 | * scanning, so we have to remove all blocks first. | |
205 | * If drm_mm_scan_remove_block() returns false, we | |
206 | * can leave the block pinned. | |
207 | */ | |
208 | list_for_each_entry_safe(m, n, &list, scan_node) | |
9a71e277 | 209 | if (!drm_mm_scan_remove_block(&scan, &m->vram_node)) |
a8c21a54 T |
210 | list_del_init(&m->scan_node); |
211 | ||
212 | /* | |
213 | * Unmap the blocks which need to be reaped from the MMU. | |
b6325f40 | 214 | * Clear the mmu pointer to prevent the mapping_get finding |
a8c21a54 T |
215 | * this mapping. |
216 | */ | |
217 | list_for_each_entry_safe(m, n, &list, scan_node) { | |
5a40837d | 218 | etnaviv_iommu_reap_mapping(m); |
a8c21a54 T |
219 | list_del_init(&m->scan_node); |
220 | } | |
221 | ||
4e64e553 CW |
222 | mode = DRM_MM_INSERT_EVICT; |
223 | ||
a8c21a54 T |
224 | /* |
225 | * We removed enough mappings so that the new allocation will | |
d4645073 | 226 | * succeed, retry the allocation one more time. |
a8c21a54 | 227 | */ |
a8c21a54 T |
228 | } |
229 | ||
90969c9a LS |
230 | return ret; |
231 | } | |
232 | ||
17eae23b LS |
233 | static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context, |
234 | struct drm_mm_node *node, size_t size, u64 va) | |
235 | { | |
2829a9fc LS |
236 | struct etnaviv_vram_mapping *m, *n; |
237 | struct drm_mm_node *scan_node; | |
238 | LIST_HEAD(scan_list); | |
239 | int ret; | |
240 | ||
4612bad5 GG |
241 | lockdep_assert_held(&context->lock); |
242 | ||
2829a9fc LS |
243 | ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va, |
244 | va + size, DRM_MM_INSERT_LOWEST); | |
245 | if (ret != -ENOSPC) | |
246 | return ret; | |
247 | ||
248 | /* | |
249 | * When we can't insert the node, due to a existing mapping blocking | |
250 | * the address space, there are two possible reasons: | |
251 | * 1. Userspace genuinely messed up and tried to reuse address space | |
252 | * before the last job using this VMA has finished executing. | |
253 | * 2. The existing buffer mappings are idle, but the buffers are not | |
254 | * destroyed yet (likely due to being referenced by another context) in | |
255 | * which case the mappings will not be cleaned up and we must reap them | |
256 | * here to make space for the new mapping. | |
257 | */ | |
258 | ||
259 | drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) { | |
260 | m = container_of(scan_node, struct etnaviv_vram_mapping, | |
261 | vram_node); | |
262 | ||
263 | if (m->use) | |
264 | return -ENOSPC; | |
265 | ||
266 | list_add(&m->scan_node, &scan_list); | |
267 | } | |
268 | ||
269 | list_for_each_entry_safe(m, n, &scan_list, scan_node) { | |
5a40837d | 270 | etnaviv_iommu_reap_mapping(m); |
2829a9fc LS |
271 | list_del_init(&m->scan_node); |
272 | } | |
273 | ||
17eae23b LS |
274 | return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va, |
275 | va + size, DRM_MM_INSERT_LOWEST); | |
276 | } | |
277 | ||
27b67278 | 278 | int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, |
90969c9a | 279 | struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, |
17eae23b | 280 | struct etnaviv_vram_mapping *mapping, u64 va) |
90969c9a LS |
281 | { |
282 | struct sg_table *sgt = etnaviv_obj->sgt; | |
283 | struct drm_mm_node *node; | |
284 | int ret; | |
285 | ||
286 | lockdep_assert_held(&etnaviv_obj->lock); | |
287 | ||
27b67278 | 288 | mutex_lock(&context->lock); |
90969c9a LS |
289 | |
290 | /* v1 MMU can optimize single entry (contiguous) scatterlists */ | |
27b67278 | 291 | if (context->global->version == ETNAVIV_IOMMU_V1 && |
90969c9a LS |
292 | sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) { |
293 | u32 iova; | |
294 | ||
295 | iova = sg_dma_address(sgt->sgl) - memory_base; | |
296 | if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) { | |
297 | mapping->iova = iova; | |
11ad6a1f | 298 | mapping->context = etnaviv_iommu_context_get(context); |
27b67278 | 299 | list_add_tail(&mapping->mmu_node, &context->mappings); |
ff981595 ME |
300 | ret = 0; |
301 | goto unlock; | |
90969c9a LS |
302 | } |
303 | } | |
304 | ||
305 | node = &mapping->vram_node; | |
306 | ||
17eae23b LS |
307 | if (va) |
308 | ret = etnaviv_iommu_insert_exact(context, node, | |
309 | etnaviv_obj->base.size, va); | |
310 | else | |
311 | ret = etnaviv_iommu_find_iova(context, node, | |
312 | etnaviv_obj->base.size); | |
ff981595 ME |
313 | if (ret < 0) |
314 | goto unlock; | |
a8c21a54 | 315 | |
a8c21a54 | 316 | mapping->iova = node->start; |
9e2e8a51 | 317 | ret = etnaviv_iommu_map(context, node->start, sgt, |
b6709083 | 318 | ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); |
a8c21a54 T |
319 | |
320 | if (ret < 0) { | |
321 | drm_mm_remove_node(node); | |
ff981595 | 322 | goto unlock; |
a8c21a54 T |
323 | } |
324 | ||
11ad6a1f | 325 | mapping->context = etnaviv_iommu_context_get(context); |
27b67278 | 326 | list_add_tail(&mapping->mmu_node, &context->mappings); |
ff981595 | 327 | unlock: |
27b67278 | 328 | mutex_unlock(&context->lock); |
a8c21a54 T |
329 | |
330 | return ret; | |
331 | } | |
332 | ||
27b67278 | 333 | void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context, |
a8c21a54 T |
334 | struct etnaviv_vram_mapping *mapping) |
335 | { | |
336 | WARN_ON(mapping->use); | |
337 | ||
27b67278 | 338 | mutex_lock(&context->lock); |
a8c21a54 | 339 | |
e168c255 LS |
340 | /* Bail if the mapping has been reaped by another thread */ |
341 | if (!mapping->context) { | |
342 | mutex_unlock(&context->lock); | |
343 | return; | |
344 | } | |
345 | ||
a8c21a54 | 346 | /* If the vram node is on the mm, unmap and remove the node */ |
27b67278 LS |
347 | if (mapping->vram_node.mm == &context->mm) |
348 | etnaviv_iommu_remove_mapping(context, mapping); | |
a8c21a54 T |
349 | |
350 | list_del(&mapping->mmu_node); | |
27b67278 | 351 | mutex_unlock(&context->lock); |
11ad6a1f | 352 | etnaviv_iommu_context_put(context); |
a8c21a54 T |
353 | } |
354 | ||
27b67278 | 355 | static void etnaviv_iommu_context_free(struct kref *kref) |
a8c21a54 | 356 | { |
27b67278 LS |
357 | struct etnaviv_iommu_context *context = |
358 | container_of(kref, struct etnaviv_iommu_context, refcount); | |
a8c21a54 | 359 | |
17e4660a LS |
360 | etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping); |
361 | ||
27b67278 LS |
362 | context->global->ops->free(context); |
363 | } | |
364 | void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context) | |
a8c21a54 | 365 | { |
27b67278 | 366 | kref_put(&context->refcount, etnaviv_iommu_context_free); |
a8c21a54 T |
367 | } |
368 | ||
27b67278 | 369 | struct etnaviv_iommu_context * |
17e4660a LS |
370 | etnaviv_iommu_context_init(struct etnaviv_iommu_global *global, |
371 | struct etnaviv_cmdbuf_suballoc *suballoc) | |
e095c8fe | 372 | { |
17e4660a LS |
373 | struct etnaviv_iommu_context *ctx; |
374 | int ret; | |
375 | ||
27b67278 | 376 | if (global->version == ETNAVIV_IOMMU_V1) |
17e4660a | 377 | ctx = etnaviv_iommuv1_context_alloc(global); |
e095c8fe | 378 | else |
17e4660a LS |
379 | ctx = etnaviv_iommuv2_context_alloc(global); |
380 | ||
381 | if (!ctx) | |
382 | return NULL; | |
383 | ||
384 | ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping, | |
385 | global->memory_base); | |
18fa692d LS |
386 | if (ret) |
387 | goto out_free; | |
388 | ||
389 | if (global->version == ETNAVIV_IOMMU_V1 && | |
390 | ctx->cmdbuf_mapping.iova > 0x80000000) { | |
391 | dev_err(global->dev, | |
392 | "command buffer outside valid memory window\n"); | |
393 | goto out_unmap; | |
17e4660a LS |
394 | } |
395 | ||
396 | return ctx; | |
18fa692d LS |
397 | |
398 | out_unmap: | |
399 | etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping); | |
400 | out_free: | |
401 | global->ops->free(ctx); | |
402 | return NULL; | |
27b67278 LS |
403 | } |
404 | ||
405 | void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, | |
406 | struct etnaviv_iommu_context *context) | |
407 | { | |
408 | context->global->ops->restore(gpu, context); | |
e095c8fe LS |
409 | } |
410 | ||
27b67278 | 411 | int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context, |
db82a043 LS |
412 | struct etnaviv_vram_mapping *mapping, |
413 | u32 memory_base, dma_addr_t paddr, | |
414 | size_t size) | |
e07c0db5 | 415 | { |
27b67278 | 416 | mutex_lock(&context->lock); |
e68f270f | 417 | |
17e4660a LS |
418 | if (mapping->use > 0) { |
419 | mapping->use++; | |
420 | mutex_unlock(&context->lock); | |
421 | return 0; | |
422 | } | |
423 | ||
db82a043 LS |
424 | /* |
425 | * For MMUv1 we don't add the suballoc region to the pagetables, as | |
426 | * those GPUs can only work with cmdbufs accessed through the linear | |
427 | * window. Instead we manufacture a mapping to make it look uniform | |
428 | * to the upper layers. | |
429 | */ | |
27b67278 | 430 | if (context->global->version == ETNAVIV_IOMMU_V1) { |
db82a043 | 431 | mapping->iova = paddr - memory_base; |
e68f270f | 432 | } else { |
db82a043 | 433 | struct drm_mm_node *node = &mapping->vram_node; |
e68f270f LS |
434 | int ret; |
435 | ||
27b67278 | 436 | ret = etnaviv_iommu_find_iova(context, node, size); |
e68f270f | 437 | if (ret < 0) { |
27b67278 | 438 | mutex_unlock(&context->lock); |
e66774dd | 439 | return ret; |
e68f270f | 440 | } |
db82a043 LS |
441 | |
442 | mapping->iova = node->start; | |
27b67278 LS |
443 | ret = etnaviv_context_map(context, node->start, paddr, size, |
444 | ETNAVIV_PROT_READ); | |
e68f270f | 445 | if (ret < 0) { |
db82a043 | 446 | drm_mm_remove_node(node); |
27b67278 | 447 | mutex_unlock(&context->lock); |
e66774dd | 448 | return ret; |
e68f270f | 449 | } |
e68f270f | 450 | |
27b67278 | 451 | context->flush_seq++; |
e68f270f | 452 | } |
db82a043 | 453 | |
27b67278 | 454 | list_add_tail(&mapping->mmu_node, &context->mappings); |
db82a043 LS |
455 | mapping->use = 1; |
456 | ||
27b67278 | 457 | mutex_unlock(&context->lock); |
db82a043 LS |
458 | |
459 | return 0; | |
e07c0db5 LS |
460 | } |
461 | ||
27b67278 | 462 | void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context, |
db82a043 | 463 | struct etnaviv_vram_mapping *mapping) |
e68f270f | 464 | { |
db82a043 | 465 | struct drm_mm_node *node = &mapping->vram_node; |
e68f270f | 466 | |
17e4660a LS |
467 | mutex_lock(&context->lock); |
468 | mapping->use--; | |
db82a043 | 469 | |
17e4660a LS |
470 | if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) { |
471 | mutex_unlock(&context->lock); | |
db82a043 | 472 | return; |
17e4660a | 473 | } |
db82a043 | 474 | |
27b67278 | 475 | etnaviv_context_unmap(context, node->start, node->size); |
db82a043 | 476 | drm_mm_remove_node(node); |
27b67278 LS |
477 | mutex_unlock(&context->lock); |
478 | } | |
479 | ||
480 | size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context) | |
481 | { | |
482 | return context->global->ops->dump_size(context); | |
483 | } | |
484 | ||
485 | void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf) | |
486 | { | |
487 | context->global->ops->dump(context, buf); | |
e68f270f | 488 | } |
db82a043 | 489 | |
27b67278 | 490 | int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu) |
a8c21a54 | 491 | { |
27b67278 LS |
492 | enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1; |
493 | struct etnaviv_drm_private *priv = gpu->drm->dev_private; | |
494 | struct etnaviv_iommu_global *global; | |
495 | struct device *dev = gpu->drm->dev; | |
496 | ||
497 | if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION) | |
498 | version = ETNAVIV_IOMMU_V2; | |
499 | ||
500 | if (priv->mmu_global) { | |
501 | if (priv->mmu_global->version != version) { | |
502 | dev_err(gpu->dev, | |
503 | "MMU version doesn't match global version\n"); | |
504 | return -ENXIO; | |
505 | } | |
506 | ||
507 | priv->mmu_global->use++; | |
508 | return 0; | |
509 | } | |
510 | ||
511 | global = kzalloc(sizeof(*global), GFP_KERNEL); | |
512 | if (!global) | |
513 | return -ENOMEM; | |
514 | ||
515 | global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma, | |
516 | GFP_KERNEL); | |
517 | if (!global->bad_page_cpu) | |
518 | goto free_global; | |
519 | ||
520 | memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32)); | |
521 | ||
522 | if (version == ETNAVIV_IOMMU_V2) { | |
523 | global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE, | |
524 | &global->v2.pta_dma, GFP_KERNEL); | |
525 | if (!global->v2.pta_cpu) | |
526 | goto free_bad_page; | |
527 | } | |
528 | ||
529 | global->dev = dev; | |
530 | global->version = version; | |
531 | global->use = 1; | |
532 | mutex_init(&global->lock); | |
533 | ||
534 | if (version == ETNAVIV_IOMMU_V1) | |
535 | global->ops = &etnaviv_iommuv1_ops; | |
536 | else | |
537 | global->ops = &etnaviv_iommuv2_ops; | |
538 | ||
539 | priv->mmu_global = global; | |
540 | ||
541 | return 0; | |
542 | ||
543 | free_bad_page: | |
544 | dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma); | |
545 | free_global: | |
546 | kfree(global); | |
547 | ||
548 | return -ENOMEM; | |
a8c21a54 T |
549 | } |
550 | ||
27b67278 | 551 | void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu) |
a8c21a54 | 552 | { |
27b67278 LS |
553 | struct etnaviv_drm_private *priv = gpu->drm->dev_private; |
554 | struct etnaviv_iommu_global *global = priv->mmu_global; | |
555 | ||
e116be25 LS |
556 | if (!global) |
557 | return; | |
558 | ||
27b67278 LS |
559 | if (--global->use > 0) |
560 | return; | |
561 | ||
562 | if (global->v2.pta_cpu) | |
563 | dma_free_wc(global->dev, ETNAVIV_PTA_SIZE, | |
564 | global->v2.pta_cpu, global->v2.pta_dma); | |
565 | ||
566 | if (global->bad_page_cpu) | |
567 | dma_free_wc(global->dev, SZ_4K, | |
568 | global->bad_page_cpu, global->bad_page_dma); | |
569 | ||
570 | mutex_destroy(&global->lock); | |
571 | kfree(global); | |
572 | ||
573 | priv->mmu_global = NULL; | |
a8c21a54 | 574 | } |