]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
drm/nouveau/imem/nv50-: use new interfaces for vmm operations
[thirdparty/linux.git] / drivers / gpu / drm / nouveau / nvkm / subdev / instmem / gk20a.c
CommitLineData
a6ff85d3
AC
1/*
2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
a7f6da6e
AC
23/*
24 * GK20A does not have dedicated video memory, and to accurately represent this
25 * fact Nouveau will not create a RAM device for it. Therefore its instmem
69c49382
AC
26 * implementation must be done directly on top of system memory, while
27 * preserving coherency for read and write operations.
a7f6da6e
AC
28 *
29 * Instmem can be allocated through two means:
69c49382 30 * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory
a7f6da6e 31 * pages contiguous to the GPU. This is the preferred way.
69c49382 32 * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically
a7f6da6e
AC
33 * contiguous memory.
34 *
69c49382
AC
35 * In both cases CPU read and writes are performed by creating a write-combined
36 * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To
37 * be conservative we do this every time we acquire or release an instobj, but
38 * ideally L2 management should be handled at a higher level.
39 *
40 * To improve performance, CPU mappings are not removed upon instobj release.
41 * Instead they are placed into a LRU list to be recycled when the mapped space
42 * goes beyond a certain threshold. At the moment this limit is 1MB.
a7f6da6e 43 */
d8e83994 44#include "priv.h"
a7f6da6e 45
d8e83994 46#include <core/memory.h>
a6ff85d3 47#include <core/mm.h>
43a70661 48#include <core/tegra.h>
d8e83994 49#include <subdev/fb.h>
69c49382 50#include <subdev/ltc.h>
f9463a4b 51#include <subdev/mmu.h>
a6ff85d3 52
c44c06ae 53struct gk20a_instobj {
d8e83994 54 struct nvkm_memory memory;
9202d732 55 struct nvkm_mm_node *mn;
69c49382
AC
56 struct gk20a_instmem *imem;
57
58 /* CPU mapping */
59 u32 *vaddr;
a7f6da6e 60};
69c49382 61#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
a7f6da6e
AC
62
63/*
64 * Used for objects allocated using the DMA API
65 */
66struct gk20a_instobj_dma {
c44c06ae 67 struct gk20a_instobj base;
a7f6da6e 68
a6ff85d3
AC
69 dma_addr_t handle;
70 struct nvkm_mm_node r;
71};
69c49382
AC
72#define gk20a_instobj_dma(p) \
73 container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base)
a6ff85d3 74
a7f6da6e
AC
75/*
76 * Used for objects flattened using the IOMMU API
77 */
78struct gk20a_instobj_iommu {
c44c06ae 79 struct gk20a_instobj base;
a7f6da6e 80
b306712d
AC
81 /* to link into gk20a_instmem::vaddr_lru */
82 struct list_head vaddr_node;
83 /* how many clients are using vaddr? */
84 u32 use_cpt;
85
69c49382
AC
86 /* will point to the higher half of pages */
87 dma_addr_t *dma_addrs;
88 /* array of base.mem->size pages (+ dma_addr_ts) */
a7f6da6e
AC
89 struct page *pages[];
90};
69c49382
AC
91#define gk20a_instobj_iommu(p) \
92 container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base)
a7f6da6e 93
c44c06ae 94struct gk20a_instmem {
a6ff85d3 95 struct nvkm_instmem base;
69c49382
AC
96
97 /* protects vaddr_* and gk20a_instobj::vaddr* */
e5ffa727 98 struct mutex lock;
69c49382
AC
99
100 /* CPU mappings LRU */
101 unsigned int vaddr_use;
102 unsigned int vaddr_max;
103 struct list_head vaddr_lru;
a7f6da6e
AC
104
105 /* Only used if IOMMU if present */
106 struct mutex *mm_mutex;
107 struct nvkm_mm *mm;
108 struct iommu_domain *domain;
109 unsigned long iommu_pgshift;
68b56653 110 u16 iommu_bit;
a7f6da6e
AC
111
112 /* Only used by DMA API */
00085f1e 113 unsigned long attrs;
a6ff85d3 114};
69c49382 115#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
a6ff85d3 116
d8e83994
BS
117static enum nvkm_memory_target
118gk20a_instobj_target(struct nvkm_memory *memory)
119{
d2ee3605 120 return NVKM_MEM_TARGET_NCOH;
d8e83994
BS
121}
122
bd275f1d
BS
123static u8
124gk20a_instobj_page(struct nvkm_memory *memory)
125{
126 return 12;
127}
128
d8e83994
BS
129static u64
130gk20a_instobj_addr(struct nvkm_memory *memory)
131{
9202d732 132 return (u64)gk20a_instobj(memory)->mn->offset << 12;
d8e83994
BS
133}
134
135static u64
136gk20a_instobj_size(struct nvkm_memory *memory)
137{
9202d732 138 return (u64)gk20a_instobj(memory)->mn->length << 12;
d8e83994
BS
139}
140
69c49382 141/*
338840ee
AC
142 * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held.
143 */
144static void
b306712d 145gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
338840ee 146{
b306712d 147 struct gk20a_instmem *imem = obj->base.imem;
338840ee
AC
148 /* there should not be any user left... */
149 WARN_ON(obj->use_cpt);
150 list_del(&obj->vaddr_node);
b306712d
AC
151 vunmap(obj->base.vaddr);
152 obj->base.vaddr = NULL;
153 imem->vaddr_use -= nvkm_memory_size(&obj->base.memory);
338840ee
AC
154 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use,
155 imem->vaddr_max);
156}
157
158/*
159 * Must be called while holding gk20a_instmem::lock
69c49382
AC
160 */
161static void
162gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size)
163{
164 while (imem->vaddr_use + size > imem->vaddr_max) {
69c49382
AC
165 /* no candidate that can be unmapped, abort... */
166 if (list_empty(&imem->vaddr_lru))
167 break;
168
b306712d
AC
169 gk20a_instobj_iommu_recycle_vaddr(
170 list_first_entry(&imem->vaddr_lru,
171 struct gk20a_instobj_iommu, vaddr_node));
69c49382
AC
172 }
173}
174
d8e83994 175static void __iomem *
b306712d 176gk20a_instobj_acquire_dma(struct nvkm_memory *memory)
d8e83994 177{
69c49382
AC
178 struct gk20a_instobj *node = gk20a_instobj(memory);
179 struct gk20a_instmem *imem = node->imem;
180 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
b306712d
AC
181
182 nvkm_ltc_flush(ltc);
183
184 return node->vaddr;
185}
186
187static void __iomem *
188gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
189{
190 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
191 struct gk20a_instmem *imem = node->base.imem;
192 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
69c49382 193 const u64 size = nvkm_memory_size(memory);
69c49382
AC
194
195 nvkm_ltc_flush(ltc);
196
e5ffa727 197 mutex_lock(&imem->lock);
69c49382 198
b306712d 199 if (node->base.vaddr) {
338840ee
AC
200 if (!node->use_cpt) {
201 /* remove from LRU list since mapping in use again */
202 list_del(&node->vaddr_node);
203 }
69c49382
AC
204 goto out;
205 }
206
207 /* try to free some address space if we reached the limit */
208 gk20a_instmem_vaddr_gc(imem, size);
209
b306712d
AC
210 /* map the pages */
211 node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP,
212 pgprot_writecombine(PAGE_KERNEL));
213 if (!node->base.vaddr) {
69c49382
AC
214 nvkm_error(&imem->base.subdev, "cannot map instobj - "
215 "this is not going to end well...\n");
216 goto out;
217 }
218
219 imem->vaddr_use += size;
220 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
221 imem->vaddr_use, imem->vaddr_max);
222
223out:
338840ee 224 node->use_cpt++;
e5ffa727 225 mutex_unlock(&imem->lock);
69c49382 226
b306712d 227 return node->base.vaddr;
d8e83994
BS
228}
229
230static void
b306712d 231gk20a_instobj_release_dma(struct nvkm_memory *memory)
d8e83994 232{
69c49382
AC
233 struct gk20a_instobj *node = gk20a_instobj(memory);
234 struct gk20a_instmem *imem = node->imem;
235 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
b306712d 236
e02d586d
AC
237 /* in case we got a write-combined mapping */
238 wmb();
b306712d
AC
239 nvkm_ltc_invalidate(ltc);
240}
241
242static void
243gk20a_instobj_release_iommu(struct nvkm_memory *memory)
244{
245 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
246 struct gk20a_instmem *imem = node->base.imem;
247 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
d8e83994 248
e5ffa727 249 mutex_lock(&imem->lock);
69c49382 250
338840ee
AC
251 /* we should at least have one user to release... */
252 if (WARN_ON(node->use_cpt == 0))
253 goto out;
254
255 /* add unused objs to the LRU list to recycle their mapping */
256 if (--node->use_cpt == 0)
257 list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
69c49382 258
338840ee 259out:
e5ffa727 260 mutex_unlock(&imem->lock);
69c49382
AC
261
262 wmb();
263 nvkm_ltc_invalidate(ltc);
264}
a7f6da6e 265
a6ff85d3 266static u32
d8e83994 267gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
a6ff85d3 268{
d8e83994 269 struct gk20a_instobj *node = gk20a_instobj(memory);
69c49382
AC
270
271 return node->vaddr[offset / 4];
a6ff85d3
AC
272}
273
274static void
d8e83994 275gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
a6ff85d3 276{
d8e83994 277 struct gk20a_instobj *node = gk20a_instobj(memory);
a6ff85d3 278
69c49382 279 node->vaddr[offset / 4] = data;
d8e83994
BS
280}
281
19a82e49
BS
282static int
283gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
284 struct nvkm_vma *vma, void *argv, u32 argc)
d8e83994
BS
285{
286 struct gk20a_instobj *node = gk20a_instobj(memory);
9202d732
BS
287 struct nvkm_vmm_map map = {
288 .memory = &node->memory,
289 .offset = offset,
290 .mem = node->mn,
291 };
292
293 if (vma->vm) {
294 struct nvkm_mem mem = {
295 .mem = node->mn,
296 .memory = &node->memory,
297 };
298 nvkm_vm_map_at(vma, 0, &mem);
299 return 0;
300 }
301
302 return nvkm_vmm_map(vmm, vma, argv, argc, &map);
a6ff85d3
AC
303}
304
69c49382
AC
305static void *
306gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
a6ff85d3 307{
69c49382
AC
308 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
309 struct gk20a_instmem *imem = node->base.imem;
26c9e8ef 310 struct device *dev = imem->base.subdev.device->dev;
a6ff85d3 311
b306712d 312 if (unlikely(!node->base.vaddr))
69c49382 313 goto out;
a6ff85d3 314
9202d732
BS
315 dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT,
316 node->base.vaddr, node->handle, imem->attrs);
69c49382
AC
317
318out:
319 return node;
a7f6da6e
AC
320}
321
69c49382
AC
322static void *
323gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
a7f6da6e 324{
69c49382
AC
325 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
326 struct gk20a_instmem *imem = node->base.imem;
327 struct device *dev = imem->base.subdev.device->dev;
9202d732 328 struct nvkm_mm_node *r = node->base.mn;
a7f6da6e
AC
329 int i;
330
134fdc1a 331 if (unlikely(!r))
69c49382
AC
332 goto out;
333
e5ffa727 334 mutex_lock(&imem->lock);
b306712d
AC
335
336 /* vaddr has already been recycled */
337 if (node->base.vaddr)
338 gk20a_instobj_iommu_recycle_vaddr(node);
339
e5ffa727 340 mutex_unlock(&imem->lock);
b306712d 341
68b56653
AC
342 /* clear IOMMU bit to unmap pages */
343 r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
a7f6da6e
AC
344
345 /* Unmap pages from GPU address space and free them */
9202d732 346 for (i = 0; i < node->base.mn->length; i++) {
c44c06ae
BS
347 iommu_unmap(imem->domain,
348 (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
69c49382
AC
349 dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
350 DMA_BIDIRECTIONAL);
a7f6da6e
AC
351 __free_page(node->pages[i]);
352 }
353
354 /* Release area from GPU address space */
c44c06ae
BS
355 mutex_lock(imem->mm_mutex);
356 nvkm_mm_free(imem->mm, &r);
357 mutex_unlock(imem->mm_mutex);
a6ff85d3 358
69c49382 359out:
d8e83994 360 return node;
a6ff85d3
AC
361}
362
d8e83994 363static const struct nvkm_memory_func
69c49382
AC
364gk20a_instobj_func_dma = {
365 .dtor = gk20a_instobj_dtor_dma,
366 .target = gk20a_instobj_target,
bd275f1d 367 .page = gk20a_instobj_page,
69c49382
AC
368 .addr = gk20a_instobj_addr,
369 .size = gk20a_instobj_size,
b306712d
AC
370 .acquire = gk20a_instobj_acquire_dma,
371 .release = gk20a_instobj_release_dma,
69c49382
AC
372 .map = gk20a_instobj_map,
373};
374
375static const struct nvkm_memory_func
376gk20a_instobj_func_iommu = {
377 .dtor = gk20a_instobj_dtor_iommu,
d8e83994 378 .target = gk20a_instobj_target,
bd275f1d 379 .page = gk20a_instobj_page,
d8e83994
BS
380 .addr = gk20a_instobj_addr,
381 .size = gk20a_instobj_size,
b306712d
AC
382 .acquire = gk20a_instobj_acquire_iommu,
383 .release = gk20a_instobj_release_iommu,
07bbc1c5
BS
384 .map = gk20a_instobj_map,
385};
386
387static const struct nvkm_memory_ptrs
388gk20a_instobj_ptrs = {
d8e83994
BS
389 .rd32 = gk20a_instobj_rd32,
390 .wr32 = gk20a_instobj_wr32,
d8e83994
BS
391};
392
a6ff85d3 393static int
d8e83994 394gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
c44c06ae 395 struct gk20a_instobj **_node)
a6ff85d3 396{
a7f6da6e 397 struct gk20a_instobj_dma *node;
00c55507 398 struct nvkm_subdev *subdev = &imem->base.subdev;
d8e83994 399 struct device *dev = subdev->device->dev;
a6ff85d3 400
d8e83994
BS
401 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
402 return -ENOMEM;
a7f6da6e 403 *_node = &node->base;
a6ff85d3 404
69c49382 405 nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
07bbc1c5 406 node->base.memory.ptrs = &gk20a_instobj_ptrs;
69c49382 407
b306712d
AC
408 node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
409 &node->handle, GFP_KERNEL,
00085f1e 410 imem->attrs);
b306712d 411 if (!node->base.vaddr) {
00c55507 412 nvkm_error(subdev, "cannot allocate DMA memory\n");
a6ff85d3
AC
413 return -ENOMEM;
414 }
415
416 /* alignment check */
417 if (unlikely(node->handle & (align - 1)))
00c55507
BS
418 nvkm_warn(subdev,
419 "memory not aligned as requested: %pad (0x%x)\n",
420 &node->handle, align);
a6ff85d3 421
a7f6da6e
AC
422 /* present memory for being mapped using small pages */
423 node->r.type = 12;
424 node->r.offset = node->handle >> 12;
425 node->r.length = (npages << PAGE_SHIFT) >> 12;
426
9202d732 427 node->base.mn = &node->r;
a7f6da6e
AC
428 return 0;
429}
430
431static int
d8e83994 432gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
c44c06ae 433 struct gk20a_instobj **_node)
a7f6da6e
AC
434{
435 struct gk20a_instobj_iommu *node;
00c55507 436 struct nvkm_subdev *subdev = &imem->base.subdev;
69c49382 437 struct device *dev = subdev->device->dev;
a7f6da6e
AC
438 struct nvkm_mm_node *r;
439 int ret;
440 int i;
441
69c49382
AC
442 /*
443 * despite their variable size, instmem allocations are small enough
444 * (< 1 page) to be handled by kzalloc
445 */
446 if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) +
447 sizeof(*node->dma_addrs)) * npages), GFP_KERNEL)))
d8e83994 448 return -ENOMEM;
a7f6da6e 449 *_node = &node->base;
69c49382
AC
450 node->dma_addrs = (void *)(node->pages + npages);
451
452 nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
07bbc1c5 453 node->base.memory.ptrs = &gk20a_instobj_ptrs;
a7f6da6e
AC
454
455 /* Allocate backing memory */
456 for (i = 0; i < npages; i++) {
457 struct page *p = alloc_page(GFP_KERNEL);
69c49382 458 dma_addr_t dma_adr;
a7f6da6e
AC
459
460 if (p == NULL) {
461 ret = -ENOMEM;
462 goto free_pages;
463 }
464 node->pages[i] = p;
69c49382
AC
465 dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
466 if (dma_mapping_error(dev, dma_adr)) {
467 nvkm_error(subdev, "DMA mapping error!\n");
468 ret = -ENOMEM;
469 goto free_pages;
470 }
471 node->dma_addrs[i] = dma_adr;
a7f6da6e
AC
472 }
473
c44c06ae 474 mutex_lock(imem->mm_mutex);
a7f6da6e 475 /* Reserve area from GPU address space */
c44c06ae
BS
476 ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
477 align >> imem->iommu_pgshift, &r);
478 mutex_unlock(imem->mm_mutex);
a7f6da6e 479 if (ret) {
69c49382 480 nvkm_error(subdev, "IOMMU space is full!\n");
a7f6da6e
AC
481 goto free_pages;
482 }
483
484 /* Map into GPU address space */
485 for (i = 0; i < npages; i++) {
c44c06ae 486 u32 offset = (r->offset + i) << imem->iommu_pgshift;
a7f6da6e 487
69c49382 488 ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
a7f6da6e
AC
489 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
490 if (ret < 0) {
00c55507 491 nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
a7f6da6e
AC
492
493 while (i-- > 0) {
494 offset -= PAGE_SIZE;
c44c06ae 495 iommu_unmap(imem->domain, offset, PAGE_SIZE);
a7f6da6e
AC
496 }
497 goto release_area;
498 }
499 }
500
68b56653
AC
501 /* IOMMU bit tells that an address is to be resolved through the IOMMU */
502 r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
a7f6da6e 503
9202d732 504 node->base.mn = r;
a7f6da6e
AC
505 return 0;
506
507release_area:
c44c06ae
BS
508 mutex_lock(imem->mm_mutex);
509 nvkm_mm_free(imem->mm, &r);
510 mutex_unlock(imem->mm_mutex);
a7f6da6e
AC
511
512free_pages:
69c49382
AC
513 for (i = 0; i < npages && node->pages[i] != NULL; i++) {
514 dma_addr_t dma_addr = node->dma_addrs[i];
515 if (dma_addr)
516 dma_unmap_page(dev, dma_addr, PAGE_SIZE,
517 DMA_BIDIRECTIONAL);
a7f6da6e 518 __free_page(node->pages[i]);
69c49382 519 }
a7f6da6e
AC
520
521 return ret;
522}
523
524static int
d8e83994
BS
525gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
526 struct nvkm_memory **pmemory)
a7f6da6e 527{
d8e83994 528 struct gk20a_instmem *imem = gk20a_instmem(base);
00c55507 529 struct nvkm_subdev *subdev = &imem->base.subdev;
69c49382 530 struct gk20a_instobj *node = NULL;
a7f6da6e
AC
531 int ret;
532
00c55507 533 nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
d8e83994 534 imem->domain ? "IOMMU" : "DMA", size, align);
a7f6da6e
AC
535
536 /* Round size and align to page bounds */
d8e83994
BS
537 size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
538 align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
a7f6da6e 539
c44c06ae 540 if (imem->domain)
d8e83994
BS
541 ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
542 align, &node);
a7f6da6e 543 else
d8e83994
BS
544 ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
545 align, &node);
b7a2bc18 546 *pmemory = node ? &node->memory : NULL;
a7f6da6e
AC
547 if (ret)
548 return ret;
549
d8e83994 550 node->imem = imem;
a7f6da6e 551
00c55507 552 nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
9202d732 553 size, align, (u64)node->mn->offset << 12);
a6ff85d3
AC
554
555 return 0;
556}
557
69c49382
AC
558static void *
559gk20a_instmem_dtor(struct nvkm_instmem *base)
a6ff85d3 560{
69c49382
AC
561 struct gk20a_instmem *imem = gk20a_instmem(base);
562
563 /* perform some sanity checks... */
564 if (!list_empty(&imem->vaddr_lru))
565 nvkm_warn(&base->subdev, "instobj LRU not empty!\n");
566
567 if (imem->vaddr_use != 0)
568 nvkm_warn(&base->subdev, "instobj vmap area not empty! "
569 "0x%x bytes still mapped\n", imem->vaddr_use);
570
571 return imem;
a6ff85d3
AC
572}
573
b7a2bc18
BS
574static const struct nvkm_instmem_func
575gk20a_instmem = {
69c49382 576 .dtor = gk20a_instmem_dtor,
b7a2bc18 577 .memory_new = gk20a_instobj_new,
b7a2bc18
BS
578 .zero = false,
579};
580
581int
582gk20a_instmem_new(struct nvkm_device *device, int index,
43a70661 583 struct nvkm_instmem **pimem)
a6ff85d3 584{
43a70661 585 struct nvkm_device_tegra *tdev = device->func->tegra(device);
c44c06ae 586 struct gk20a_instmem *imem;
a6ff85d3 587
b7a2bc18
BS
588 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
589 return -ENOMEM;
590 nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
e5ffa727 591 mutex_init(&imem->lock);
b7a2bc18 592 *pimem = &imem->base;
a6ff85d3 593
69c49382
AC
594 /* do not allow more than 1MB of CPU-mapped instmem */
595 imem->vaddr_use = 0;
596 imem->vaddr_max = 0x100000;
597 INIT_LIST_HEAD(&imem->vaddr_lru);
598
43a70661 599 if (tdev->iommu.domain) {
69c49382 600 imem->mm_mutex = &tdev->iommu.mutex;
43a70661 601 imem->mm = &tdev->iommu.mm;
69c49382 602 imem->domain = tdev->iommu.domain;
43a70661 603 imem->iommu_pgshift = tdev->iommu.pgshift;
68b56653 604 imem->iommu_bit = tdev->func->iommu_bit;
a7f6da6e 605
00c55507 606 nvkm_info(&imem->base.subdev, "using IOMMU\n");
a7f6da6e 607 } else {
00085f1e
KK
608 imem->attrs = DMA_ATTR_NON_CONSISTENT |
609 DMA_ATTR_WEAK_ORDERING |
610 DMA_ATTR_WRITE_COMBINE;
a7f6da6e 611
00c55507 612 nvkm_info(&imem->base.subdev, "using DMA API\n");
a7f6da6e 613 }
5dc240bc 614
a6ff85d3
AC
615 return 0;
616}