]>
Commit | Line | Data |
---|---|---|
a6ff85d3 AC |
1 | /* |
2 | * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
20 | * DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
a7f6da6e AC |
23 | /* |
24 | * GK20A does not have dedicated video memory, and to accurately represent this | |
25 | * fact Nouveau will not create a RAM device for it. Therefore its instmem | |
69c49382 AC |
26 | * implementation must be done directly on top of system memory, while |
27 | * preserving coherency for read and write operations. | |
a7f6da6e AC |
28 | * |
29 | * Instmem can be allocated through two means: | |
69c49382 | 30 | * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory |
a7f6da6e | 31 | * pages contiguous to the GPU. This is the preferred way. |
69c49382 | 32 | * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically |
a7f6da6e AC |
33 | * contiguous memory. |
34 | * | |
69c49382 AC |
35 | * In both cases CPU read and writes are performed by creating a write-combined |
36 | * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To | |
37 | * be conservative we do this every time we acquire or release an instobj, but | |
38 | * ideally L2 management should be handled at a higher level. | |
39 | * | |
40 | * To improve performance, CPU mappings are not removed upon instobj release. | |
41 | * Instead they are placed into a LRU list to be recycled when the mapped space | |
42 | * goes beyond a certain threshold. At the moment this limit is 1MB. | |
a7f6da6e | 43 | */ |
d8e83994 | 44 | #include "priv.h" |
a7f6da6e | 45 | |
d8e83994 | 46 | #include <core/memory.h> |
a6ff85d3 | 47 | #include <core/mm.h> |
43a70661 | 48 | #include <core/tegra.h> |
d8e83994 | 49 | #include <subdev/fb.h> |
69c49382 | 50 | #include <subdev/ltc.h> |
a6ff85d3 | 51 | |
c44c06ae | 52 | struct gk20a_instobj { |
d8e83994 | 53 | struct nvkm_memory memory; |
d8e83994 | 54 | struct nvkm_mem mem; |
69c49382 AC |
55 | struct gk20a_instmem *imem; |
56 | ||
57 | /* CPU mapping */ | |
58 | u32 *vaddr; | |
a7f6da6e | 59 | }; |
69c49382 | 60 | #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) |
a7f6da6e AC |
61 | |
62 | /* | |
63 | * Used for objects allocated using the DMA API | |
64 | */ | |
65 | struct gk20a_instobj_dma { | |
c44c06ae | 66 | struct gk20a_instobj base; |
a7f6da6e | 67 | |
a6ff85d3 AC |
68 | dma_addr_t handle; |
69 | struct nvkm_mm_node r; | |
70 | }; | |
69c49382 AC |
71 | #define gk20a_instobj_dma(p) \ |
72 | container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base) | |
a6ff85d3 | 73 | |
a7f6da6e AC |
74 | /* |
75 | * Used for objects flattened using the IOMMU API | |
76 | */ | |
77 | struct gk20a_instobj_iommu { | |
c44c06ae | 78 | struct gk20a_instobj base; |
a7f6da6e | 79 | |
b306712d AC |
80 | /* to link into gk20a_instmem::vaddr_lru */ |
81 | struct list_head vaddr_node; | |
82 | /* how many clients are using vaddr? */ | |
83 | u32 use_cpt; | |
84 | ||
69c49382 AC |
85 | /* will point to the higher half of pages */ |
86 | dma_addr_t *dma_addrs; | |
87 | /* array of base.mem->size pages (+ dma_addr_ts) */ | |
a7f6da6e AC |
88 | struct page *pages[]; |
89 | }; | |
69c49382 AC |
90 | #define gk20a_instobj_iommu(p) \ |
91 | container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base) | |
a7f6da6e | 92 | |
c44c06ae | 93 | struct gk20a_instmem { |
a6ff85d3 | 94 | struct nvkm_instmem base; |
69c49382 AC |
95 | |
96 | /* protects vaddr_* and gk20a_instobj::vaddr* */ | |
e5ffa727 | 97 | struct mutex lock; |
69c49382 AC |
98 | |
99 | /* CPU mappings LRU */ | |
100 | unsigned int vaddr_use; | |
101 | unsigned int vaddr_max; | |
102 | struct list_head vaddr_lru; | |
a7f6da6e AC |
103 | |
104 | /* Only used if IOMMU if present */ | |
105 | struct mutex *mm_mutex; | |
106 | struct nvkm_mm *mm; | |
107 | struct iommu_domain *domain; | |
108 | unsigned long iommu_pgshift; | |
68b56653 | 109 | u16 iommu_bit; |
a7f6da6e AC |
110 | |
111 | /* Only used by DMA API */ | |
00085f1e | 112 | unsigned long attrs; |
a6ff85d3 | 113 | }; |
69c49382 | 114 | #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) |
a6ff85d3 | 115 | |
d8e83994 BS |
116 | static enum nvkm_memory_target |
117 | gk20a_instobj_target(struct nvkm_memory *memory) | |
118 | { | |
d2ee3605 | 119 | return NVKM_MEM_TARGET_NCOH; |
d8e83994 BS |
120 | } |
121 | ||
122 | static u64 | |
123 | gk20a_instobj_addr(struct nvkm_memory *memory) | |
124 | { | |
125 | return gk20a_instobj(memory)->mem.offset; | |
d8e83994 BS |
126 | } |
127 | ||
128 | static u64 | |
129 | gk20a_instobj_size(struct nvkm_memory *memory) | |
130 | { | |
131 | return (u64)gk20a_instobj(memory)->mem.size << 12; | |
132 | } | |
133 | ||
69c49382 | 134 | /* |
338840ee AC |
135 | * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held. |
136 | */ | |
137 | static void | |
b306712d | 138 | gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj) |
338840ee | 139 | { |
b306712d | 140 | struct gk20a_instmem *imem = obj->base.imem; |
338840ee AC |
141 | /* there should not be any user left... */ |
142 | WARN_ON(obj->use_cpt); | |
143 | list_del(&obj->vaddr_node); | |
b306712d AC |
144 | vunmap(obj->base.vaddr); |
145 | obj->base.vaddr = NULL; | |
146 | imem->vaddr_use -= nvkm_memory_size(&obj->base.memory); | |
338840ee AC |
147 | nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, |
148 | imem->vaddr_max); | |
149 | } | |
150 | ||
151 | /* | |
152 | * Must be called while holding gk20a_instmem::lock | |
69c49382 AC |
153 | */ |
154 | static void | |
155 | gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size) | |
156 | { | |
157 | while (imem->vaddr_use + size > imem->vaddr_max) { | |
69c49382 AC |
158 | /* no candidate that can be unmapped, abort... */ |
159 | if (list_empty(&imem->vaddr_lru)) | |
160 | break; | |
161 | ||
b306712d AC |
162 | gk20a_instobj_iommu_recycle_vaddr( |
163 | list_first_entry(&imem->vaddr_lru, | |
164 | struct gk20a_instobj_iommu, vaddr_node)); | |
69c49382 AC |
165 | } |
166 | } | |
167 | ||
d8e83994 | 168 | static void __iomem * |
b306712d | 169 | gk20a_instobj_acquire_dma(struct nvkm_memory *memory) |
d8e83994 | 170 | { |
69c49382 AC |
171 | struct gk20a_instobj *node = gk20a_instobj(memory); |
172 | struct gk20a_instmem *imem = node->imem; | |
173 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
b306712d AC |
174 | |
175 | nvkm_ltc_flush(ltc); | |
176 | ||
177 | return node->vaddr; | |
178 | } | |
179 | ||
180 | static void __iomem * | |
181 | gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) | |
182 | { | |
183 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); | |
184 | struct gk20a_instmem *imem = node->base.imem; | |
185 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
69c49382 | 186 | const u64 size = nvkm_memory_size(memory); |
69c49382 AC |
187 | |
188 | nvkm_ltc_flush(ltc); | |
189 | ||
e5ffa727 | 190 | mutex_lock(&imem->lock); |
69c49382 | 191 | |
b306712d | 192 | if (node->base.vaddr) { |
338840ee AC |
193 | if (!node->use_cpt) { |
194 | /* remove from LRU list since mapping in use again */ | |
195 | list_del(&node->vaddr_node); | |
196 | } | |
69c49382 AC |
197 | goto out; |
198 | } | |
199 | ||
200 | /* try to free some address space if we reached the limit */ | |
201 | gk20a_instmem_vaddr_gc(imem, size); | |
202 | ||
b306712d AC |
203 | /* map the pages */ |
204 | node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP, | |
205 | pgprot_writecombine(PAGE_KERNEL)); | |
206 | if (!node->base.vaddr) { | |
69c49382 AC |
207 | nvkm_error(&imem->base.subdev, "cannot map instobj - " |
208 | "this is not going to end well...\n"); | |
209 | goto out; | |
210 | } | |
211 | ||
212 | imem->vaddr_use += size; | |
213 | nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", | |
214 | imem->vaddr_use, imem->vaddr_max); | |
215 | ||
216 | out: | |
338840ee | 217 | node->use_cpt++; |
e5ffa727 | 218 | mutex_unlock(&imem->lock); |
69c49382 | 219 | |
b306712d | 220 | return node->base.vaddr; |
d8e83994 BS |
221 | } |
222 | ||
223 | static void | |
b306712d | 224 | gk20a_instobj_release_dma(struct nvkm_memory *memory) |
d8e83994 | 225 | { |
69c49382 AC |
226 | struct gk20a_instobj *node = gk20a_instobj(memory); |
227 | struct gk20a_instmem *imem = node->imem; | |
228 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
b306712d | 229 | |
e02d586d AC |
230 | /* in case we got a write-combined mapping */ |
231 | wmb(); | |
b306712d AC |
232 | nvkm_ltc_invalidate(ltc); |
233 | } | |
234 | ||
235 | static void | |
236 | gk20a_instobj_release_iommu(struct nvkm_memory *memory) | |
237 | { | |
238 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); | |
239 | struct gk20a_instmem *imem = node->base.imem; | |
240 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
d8e83994 | 241 | |
e5ffa727 | 242 | mutex_lock(&imem->lock); |
69c49382 | 243 | |
338840ee AC |
244 | /* we should at least have one user to release... */ |
245 | if (WARN_ON(node->use_cpt == 0)) | |
246 | goto out; | |
247 | ||
248 | /* add unused objs to the LRU list to recycle their mapping */ | |
249 | if (--node->use_cpt == 0) | |
250 | list_add_tail(&node->vaddr_node, &imem->vaddr_lru); | |
69c49382 | 251 | |
338840ee | 252 | out: |
e5ffa727 | 253 | mutex_unlock(&imem->lock); |
69c49382 AC |
254 | |
255 | wmb(); | |
256 | nvkm_ltc_invalidate(ltc); | |
257 | } | |
a7f6da6e | 258 | |
a6ff85d3 | 259 | static u32 |
d8e83994 | 260 | gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset) |
a6ff85d3 | 261 | { |
d8e83994 | 262 | struct gk20a_instobj *node = gk20a_instobj(memory); |
69c49382 AC |
263 | |
264 | return node->vaddr[offset / 4]; | |
a6ff85d3 AC |
265 | } |
266 | ||
267 | static void | |
d8e83994 | 268 | gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) |
a6ff85d3 | 269 | { |
d8e83994 | 270 | struct gk20a_instobj *node = gk20a_instobj(memory); |
a6ff85d3 | 271 | |
69c49382 | 272 | node->vaddr[offset / 4] = data; |
d8e83994 BS |
273 | } |
274 | ||
275 | static void | |
276 | gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset) | |
277 | { | |
278 | struct gk20a_instobj *node = gk20a_instobj(memory); | |
69c49382 | 279 | |
d8e83994 | 280 | nvkm_vm_map_at(vma, offset, &node->mem); |
a6ff85d3 AC |
281 | } |
282 | ||
69c49382 AC |
283 | static void * |
284 | gk20a_instobj_dtor_dma(struct nvkm_memory *memory) | |
a6ff85d3 | 285 | { |
69c49382 AC |
286 | struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); |
287 | struct gk20a_instmem *imem = node->base.imem; | |
26c9e8ef | 288 | struct device *dev = imem->base.subdev.device->dev; |
a6ff85d3 | 289 | |
b306712d | 290 | if (unlikely(!node->base.vaddr)) |
69c49382 | 291 | goto out; |
a6ff85d3 | 292 | |
b306712d | 293 | dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, |
00085f1e | 294 | node->handle, imem->attrs); |
69c49382 AC |
295 | |
296 | out: | |
297 | return node; | |
a7f6da6e AC |
298 | } |
299 | ||
69c49382 AC |
300 | static void * |
301 | gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) | |
a7f6da6e | 302 | { |
69c49382 AC |
303 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); |
304 | struct gk20a_instmem *imem = node->base.imem; | |
305 | struct device *dev = imem->base.subdev.device->dev; | |
134fdc1a | 306 | struct nvkm_mm_node *r = node->base.mem.mem; |
a7f6da6e AC |
307 | int i; |
308 | ||
134fdc1a | 309 | if (unlikely(!r)) |
69c49382 AC |
310 | goto out; |
311 | ||
e5ffa727 | 312 | mutex_lock(&imem->lock); |
b306712d AC |
313 | |
314 | /* vaddr has already been recycled */ | |
315 | if (node->base.vaddr) | |
316 | gk20a_instobj_iommu_recycle_vaddr(node); | |
317 | ||
e5ffa727 | 318 | mutex_unlock(&imem->lock); |
b306712d | 319 | |
68b56653 AC |
320 | /* clear IOMMU bit to unmap pages */ |
321 | r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); | |
a7f6da6e AC |
322 | |
323 | /* Unmap pages from GPU address space and free them */ | |
69c49382 | 324 | for (i = 0; i < node->base.mem.size; i++) { |
c44c06ae BS |
325 | iommu_unmap(imem->domain, |
326 | (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE); | |
69c49382 AC |
327 | dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE, |
328 | DMA_BIDIRECTIONAL); | |
a7f6da6e AC |
329 | __free_page(node->pages[i]); |
330 | } | |
331 | ||
332 | /* Release area from GPU address space */ | |
c44c06ae BS |
333 | mutex_lock(imem->mm_mutex); |
334 | nvkm_mm_free(imem->mm, &r); | |
335 | mutex_unlock(imem->mm_mutex); | |
a6ff85d3 | 336 | |
69c49382 | 337 | out: |
d8e83994 | 338 | return node; |
a6ff85d3 AC |
339 | } |
340 | ||
d8e83994 | 341 | static const struct nvkm_memory_func |
69c49382 AC |
342 | gk20a_instobj_func_dma = { |
343 | .dtor = gk20a_instobj_dtor_dma, | |
344 | .target = gk20a_instobj_target, | |
345 | .addr = gk20a_instobj_addr, | |
346 | .size = gk20a_instobj_size, | |
b306712d AC |
347 | .acquire = gk20a_instobj_acquire_dma, |
348 | .release = gk20a_instobj_release_dma, | |
69c49382 AC |
349 | .map = gk20a_instobj_map, |
350 | }; | |
351 | ||
352 | static const struct nvkm_memory_func | |
353 | gk20a_instobj_func_iommu = { | |
354 | .dtor = gk20a_instobj_dtor_iommu, | |
d8e83994 BS |
355 | .target = gk20a_instobj_target, |
356 | .addr = gk20a_instobj_addr, | |
357 | .size = gk20a_instobj_size, | |
b306712d AC |
358 | .acquire = gk20a_instobj_acquire_iommu, |
359 | .release = gk20a_instobj_release_iommu, | |
07bbc1c5 BS |
360 | .map = gk20a_instobj_map, |
361 | }; | |
362 | ||
363 | static const struct nvkm_memory_ptrs | |
364 | gk20a_instobj_ptrs = { | |
d8e83994 BS |
365 | .rd32 = gk20a_instobj_rd32, |
366 | .wr32 = gk20a_instobj_wr32, | |
d8e83994 BS |
367 | }; |
368 | ||
a6ff85d3 | 369 | static int |
d8e83994 | 370 | gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, |
c44c06ae | 371 | struct gk20a_instobj **_node) |
a6ff85d3 | 372 | { |
a7f6da6e | 373 | struct gk20a_instobj_dma *node; |
00c55507 | 374 | struct nvkm_subdev *subdev = &imem->base.subdev; |
d8e83994 | 375 | struct device *dev = subdev->device->dev; |
a6ff85d3 | 376 | |
d8e83994 BS |
377 | if (!(node = kzalloc(sizeof(*node), GFP_KERNEL))) |
378 | return -ENOMEM; | |
a7f6da6e | 379 | *_node = &node->base; |
a6ff85d3 | 380 | |
69c49382 | 381 | nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory); |
07bbc1c5 | 382 | node->base.memory.ptrs = &gk20a_instobj_ptrs; |
69c49382 | 383 | |
b306712d AC |
384 | node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, |
385 | &node->handle, GFP_KERNEL, | |
00085f1e | 386 | imem->attrs); |
b306712d | 387 | if (!node->base.vaddr) { |
00c55507 | 388 | nvkm_error(subdev, "cannot allocate DMA memory\n"); |
a6ff85d3 AC |
389 | return -ENOMEM; |
390 | } | |
391 | ||
392 | /* alignment check */ | |
393 | if (unlikely(node->handle & (align - 1))) | |
00c55507 BS |
394 | nvkm_warn(subdev, |
395 | "memory not aligned as requested: %pad (0x%x)\n", | |
396 | &node->handle, align); | |
a6ff85d3 | 397 | |
a7f6da6e AC |
398 | /* present memory for being mapped using small pages */ |
399 | node->r.type = 12; | |
400 | node->r.offset = node->handle >> 12; | |
401 | node->r.length = (npages << PAGE_SHIFT) >> 12; | |
402 | ||
d8e83994 | 403 | node->base.mem.offset = node->handle; |
134fdc1a | 404 | node->base.mem.mem = &node->r; |
a7f6da6e AC |
405 | return 0; |
406 | } | |
407 | ||
408 | static int | |
d8e83994 | 409 | gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, |
c44c06ae | 410 | struct gk20a_instobj **_node) |
a7f6da6e AC |
411 | { |
412 | struct gk20a_instobj_iommu *node; | |
00c55507 | 413 | struct nvkm_subdev *subdev = &imem->base.subdev; |
69c49382 | 414 | struct device *dev = subdev->device->dev; |
a7f6da6e AC |
415 | struct nvkm_mm_node *r; |
416 | int ret; | |
417 | int i; | |
418 | ||
69c49382 AC |
419 | /* |
420 | * despite their variable size, instmem allocations are small enough | |
421 | * (< 1 page) to be handled by kzalloc | |
422 | */ | |
423 | if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) + | |
424 | sizeof(*node->dma_addrs)) * npages), GFP_KERNEL))) | |
d8e83994 | 425 | return -ENOMEM; |
a7f6da6e | 426 | *_node = &node->base; |
69c49382 AC |
427 | node->dma_addrs = (void *)(node->pages + npages); |
428 | ||
429 | nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory); | |
07bbc1c5 | 430 | node->base.memory.ptrs = &gk20a_instobj_ptrs; |
a7f6da6e AC |
431 | |
432 | /* Allocate backing memory */ | |
433 | for (i = 0; i < npages; i++) { | |
434 | struct page *p = alloc_page(GFP_KERNEL); | |
69c49382 | 435 | dma_addr_t dma_adr; |
a7f6da6e AC |
436 | |
437 | if (p == NULL) { | |
438 | ret = -ENOMEM; | |
439 | goto free_pages; | |
440 | } | |
441 | node->pages[i] = p; | |
69c49382 AC |
442 | dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); |
443 | if (dma_mapping_error(dev, dma_adr)) { | |
444 | nvkm_error(subdev, "DMA mapping error!\n"); | |
445 | ret = -ENOMEM; | |
446 | goto free_pages; | |
447 | } | |
448 | node->dma_addrs[i] = dma_adr; | |
a7f6da6e AC |
449 | } |
450 | ||
c44c06ae | 451 | mutex_lock(imem->mm_mutex); |
a7f6da6e | 452 | /* Reserve area from GPU address space */ |
c44c06ae BS |
453 | ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages, |
454 | align >> imem->iommu_pgshift, &r); | |
455 | mutex_unlock(imem->mm_mutex); | |
a7f6da6e | 456 | if (ret) { |
69c49382 | 457 | nvkm_error(subdev, "IOMMU space is full!\n"); |
a7f6da6e AC |
458 | goto free_pages; |
459 | } | |
460 | ||
461 | /* Map into GPU address space */ | |
462 | for (i = 0; i < npages; i++) { | |
c44c06ae | 463 | u32 offset = (r->offset + i) << imem->iommu_pgshift; |
a7f6da6e | 464 | |
69c49382 | 465 | ret = iommu_map(imem->domain, offset, node->dma_addrs[i], |
a7f6da6e AC |
466 | PAGE_SIZE, IOMMU_READ | IOMMU_WRITE); |
467 | if (ret < 0) { | |
00c55507 | 468 | nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret); |
a7f6da6e AC |
469 | |
470 | while (i-- > 0) { | |
471 | offset -= PAGE_SIZE; | |
c44c06ae | 472 | iommu_unmap(imem->domain, offset, PAGE_SIZE); |
a7f6da6e AC |
473 | } |
474 | goto release_area; | |
475 | } | |
476 | } | |
477 | ||
68b56653 AC |
478 | /* IOMMU bit tells that an address is to be resolved through the IOMMU */ |
479 | r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); | |
a7f6da6e | 480 | |
d8e83994 | 481 | node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift; |
134fdc1a | 482 | node->base.mem.mem = r; |
a7f6da6e AC |
483 | return 0; |
484 | ||
485 | release_area: | |
c44c06ae BS |
486 | mutex_lock(imem->mm_mutex); |
487 | nvkm_mm_free(imem->mm, &r); | |
488 | mutex_unlock(imem->mm_mutex); | |
a7f6da6e AC |
489 | |
490 | free_pages: | |
69c49382 AC |
491 | for (i = 0; i < npages && node->pages[i] != NULL; i++) { |
492 | dma_addr_t dma_addr = node->dma_addrs[i]; | |
493 | if (dma_addr) | |
494 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, | |
495 | DMA_BIDIRECTIONAL); | |
a7f6da6e | 496 | __free_page(node->pages[i]); |
69c49382 | 497 | } |
a7f6da6e AC |
498 | |
499 | return ret; | |
500 | } | |
501 | ||
502 | static int | |
d8e83994 BS |
503 | gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, |
504 | struct nvkm_memory **pmemory) | |
a7f6da6e | 505 | { |
d8e83994 | 506 | struct gk20a_instmem *imem = gk20a_instmem(base); |
00c55507 | 507 | struct nvkm_subdev *subdev = &imem->base.subdev; |
69c49382 | 508 | struct gk20a_instobj *node = NULL; |
a7f6da6e AC |
509 | int ret; |
510 | ||
00c55507 | 511 | nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__, |
d8e83994 | 512 | imem->domain ? "IOMMU" : "DMA", size, align); |
a7f6da6e AC |
513 | |
514 | /* Round size and align to page bounds */ | |
d8e83994 BS |
515 | size = max(roundup(size, PAGE_SIZE), PAGE_SIZE); |
516 | align = max(roundup(align, PAGE_SIZE), PAGE_SIZE); | |
a7f6da6e | 517 | |
c44c06ae | 518 | if (imem->domain) |
d8e83994 BS |
519 | ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT, |
520 | align, &node); | |
a7f6da6e | 521 | else |
d8e83994 BS |
522 | ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT, |
523 | align, &node); | |
b7a2bc18 | 524 | *pmemory = node ? &node->memory : NULL; |
a7f6da6e AC |
525 | if (ret) |
526 | return ret; | |
527 | ||
d8e83994 | 528 | node->imem = imem; |
a7f6da6e AC |
529 | |
530 | /* present memory for being mapped using small pages */ | |
d8e83994 BS |
531 | node->mem.size = size >> 12; |
532 | node->mem.memtype = 0; | |
533 | node->mem.page_shift = 12; | |
a6ff85d3 | 534 | |
00c55507 | 535 | nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n", |
d8e83994 | 536 | size, align, node->mem.offset); |
a6ff85d3 AC |
537 | |
538 | return 0; | |
539 | } | |
540 | ||
69c49382 AC |
541 | static void * |
542 | gk20a_instmem_dtor(struct nvkm_instmem *base) | |
a6ff85d3 | 543 | { |
69c49382 AC |
544 | struct gk20a_instmem *imem = gk20a_instmem(base); |
545 | ||
546 | /* perform some sanity checks... */ | |
547 | if (!list_empty(&imem->vaddr_lru)) | |
548 | nvkm_warn(&base->subdev, "instobj LRU not empty!\n"); | |
549 | ||
550 | if (imem->vaddr_use != 0) | |
551 | nvkm_warn(&base->subdev, "instobj vmap area not empty! " | |
552 | "0x%x bytes still mapped\n", imem->vaddr_use); | |
553 | ||
554 | return imem; | |
a6ff85d3 AC |
555 | } |
556 | ||
b7a2bc18 BS |
557 | static const struct nvkm_instmem_func |
558 | gk20a_instmem = { | |
69c49382 | 559 | .dtor = gk20a_instmem_dtor, |
b7a2bc18 BS |
560 | .memory_new = gk20a_instobj_new, |
561 | .persistent = true, | |
562 | .zero = false, | |
563 | }; | |
564 | ||
565 | int | |
566 | gk20a_instmem_new(struct nvkm_device *device, int index, | |
43a70661 | 567 | struct nvkm_instmem **pimem) |
a6ff85d3 | 568 | { |
43a70661 | 569 | struct nvkm_device_tegra *tdev = device->func->tegra(device); |
c44c06ae | 570 | struct gk20a_instmem *imem; |
a6ff85d3 | 571 | |
b7a2bc18 BS |
572 | if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) |
573 | return -ENOMEM; | |
574 | nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base); | |
e5ffa727 | 575 | mutex_init(&imem->lock); |
b7a2bc18 | 576 | *pimem = &imem->base; |
a6ff85d3 | 577 | |
69c49382 AC |
578 | /* do not allow more than 1MB of CPU-mapped instmem */ |
579 | imem->vaddr_use = 0; | |
580 | imem->vaddr_max = 0x100000; | |
581 | INIT_LIST_HEAD(&imem->vaddr_lru); | |
582 | ||
43a70661 | 583 | if (tdev->iommu.domain) { |
69c49382 | 584 | imem->mm_mutex = &tdev->iommu.mutex; |
43a70661 | 585 | imem->mm = &tdev->iommu.mm; |
69c49382 | 586 | imem->domain = tdev->iommu.domain; |
43a70661 | 587 | imem->iommu_pgshift = tdev->iommu.pgshift; |
68b56653 | 588 | imem->iommu_bit = tdev->func->iommu_bit; |
a7f6da6e | 589 | |
00c55507 | 590 | nvkm_info(&imem->base.subdev, "using IOMMU\n"); |
a7f6da6e | 591 | } else { |
00085f1e KK |
592 | imem->attrs = DMA_ATTR_NON_CONSISTENT | |
593 | DMA_ATTR_WEAK_ORDERING | | |
594 | DMA_ATTR_WRITE_COMBINE; | |
a7f6da6e | 595 | |
00c55507 | 596 | nvkm_info(&imem->base.subdev, "using DMA API\n"); |
a7f6da6e | 597 | } |
5dc240bc | 598 | |
a6ff85d3 AC |
599 | return 0; |
600 | } |