]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
drm/nouveau/instmem/gk20a: add write barrier when releasing DMA object
[thirdparty/linux.git] / drivers / gpu / drm / nouveau / nvkm / subdev / instmem / gk20a.c
CommitLineData
a6ff85d3
AC
1/*
2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
a7f6da6e
AC
23/*
24 * GK20A does not have dedicated video memory, and to accurately represent this
25 * fact Nouveau will not create a RAM device for it. Therefore its instmem
69c49382
AC
26 * implementation must be done directly on top of system memory, while
27 * preserving coherency for read and write operations.
a7f6da6e
AC
28 *
29 * Instmem can be allocated through two means:
69c49382 30 * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory
a7f6da6e 31 * pages contiguous to the GPU. This is the preferred way.
69c49382 32 * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically
a7f6da6e
AC
33 * contiguous memory.
34 *
69c49382
AC
35 * In both cases CPU read and writes are performed by creating a write-combined
36 * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To
37 * be conservative we do this every time we acquire or release an instobj, but
38 * ideally L2 management should be handled at a higher level.
39 *
40 * To improve performance, CPU mappings are not removed upon instobj release.
41 * Instead they are placed into a LRU list to be recycled when the mapped space
42 * goes beyond a certain threshold. At the moment this limit is 1MB.
a7f6da6e 43 */
d8e83994 44#include "priv.h"
a7f6da6e 45
d8e83994 46#include <core/memory.h>
a6ff85d3 47#include <core/mm.h>
43a70661 48#include <core/tegra.h>
d8e83994 49#include <subdev/fb.h>
69c49382 50#include <subdev/ltc.h>
a6ff85d3 51
c44c06ae 52struct gk20a_instobj {
d8e83994 53 struct nvkm_memory memory;
d8e83994 54 struct nvkm_mem mem;
69c49382
AC
55 struct gk20a_instmem *imem;
56
57 /* CPU mapping */
58 u32 *vaddr;
a7f6da6e 59};
69c49382 60#define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
a7f6da6e
AC
61
62/*
63 * Used for objects allocated using the DMA API
64 */
65struct gk20a_instobj_dma {
c44c06ae 66 struct gk20a_instobj base;
a7f6da6e 67
a6ff85d3
AC
68 dma_addr_t handle;
69 struct nvkm_mm_node r;
70};
69c49382
AC
71#define gk20a_instobj_dma(p) \
72 container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base)
a6ff85d3 73
a7f6da6e
AC
74/*
75 * Used for objects flattened using the IOMMU API
76 */
77struct gk20a_instobj_iommu {
c44c06ae 78 struct gk20a_instobj base;
a7f6da6e 79
b306712d
AC
80 /* to link into gk20a_instmem::vaddr_lru */
81 struct list_head vaddr_node;
82 /* how many clients are using vaddr? */
83 u32 use_cpt;
84
69c49382
AC
85 /* will point to the higher half of pages */
86 dma_addr_t *dma_addrs;
87 /* array of base.mem->size pages (+ dma_addr_ts) */
a7f6da6e
AC
88 struct page *pages[];
89};
69c49382
AC
90#define gk20a_instobj_iommu(p) \
91 container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base)
a7f6da6e 92
c44c06ae 93struct gk20a_instmem {
a6ff85d3 94 struct nvkm_instmem base;
69c49382
AC
95
96 /* protects vaddr_* and gk20a_instobj::vaddr* */
a6ff85d3 97 spinlock_t lock;
69c49382
AC
98
99 /* CPU mappings LRU */
100 unsigned int vaddr_use;
101 unsigned int vaddr_max;
102 struct list_head vaddr_lru;
a7f6da6e
AC
103
104 /* Only used if IOMMU if present */
105 struct mutex *mm_mutex;
106 struct nvkm_mm *mm;
107 struct iommu_domain *domain;
108 unsigned long iommu_pgshift;
68b56653 109 u16 iommu_bit;
a7f6da6e
AC
110
111 /* Only used by DMA API */
5dc240bc 112 struct dma_attrs attrs;
a6ff85d3 113};
69c49382 114#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
a6ff85d3 115
d8e83994
BS
116static enum nvkm_memory_target
117gk20a_instobj_target(struct nvkm_memory *memory)
118{
119 return NVKM_MEM_TARGET_HOST;
120}
121
122static u64
123gk20a_instobj_addr(struct nvkm_memory *memory)
124{
125 return gk20a_instobj(memory)->mem.offset;
d8e83994
BS
126}
127
128static u64
129gk20a_instobj_size(struct nvkm_memory *memory)
130{
131 return (u64)gk20a_instobj(memory)->mem.size << 12;
132}
133
69c49382 134/*
338840ee
AC
135 * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held.
136 */
137static void
b306712d 138gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj)
338840ee 139{
b306712d 140 struct gk20a_instmem *imem = obj->base.imem;
338840ee
AC
141 /* there should not be any user left... */
142 WARN_ON(obj->use_cpt);
143 list_del(&obj->vaddr_node);
b306712d
AC
144 vunmap(obj->base.vaddr);
145 obj->base.vaddr = NULL;
146 imem->vaddr_use -= nvkm_memory_size(&obj->base.memory);
338840ee
AC
147 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use,
148 imem->vaddr_max);
149}
150
151/*
152 * Must be called while holding gk20a_instmem::lock
69c49382
AC
153 */
154static void
155gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size)
156{
157 while (imem->vaddr_use + size > imem->vaddr_max) {
69c49382
AC
158 /* no candidate that can be unmapped, abort... */
159 if (list_empty(&imem->vaddr_lru))
160 break;
161
b306712d
AC
162 gk20a_instobj_iommu_recycle_vaddr(
163 list_first_entry(&imem->vaddr_lru,
164 struct gk20a_instobj_iommu, vaddr_node));
69c49382
AC
165 }
166}
167
d8e83994 168static void __iomem *
b306712d 169gk20a_instobj_acquire_dma(struct nvkm_memory *memory)
d8e83994 170{
69c49382
AC
171 struct gk20a_instobj *node = gk20a_instobj(memory);
172 struct gk20a_instmem *imem = node->imem;
173 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
b306712d
AC
174
175 nvkm_ltc_flush(ltc);
176
177 return node->vaddr;
178}
179
180static void __iomem *
181gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
182{
183 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
184 struct gk20a_instmem *imem = node->base.imem;
185 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
69c49382 186 const u64 size = nvkm_memory_size(memory);
d8e83994 187 unsigned long flags;
69c49382
AC
188
189 nvkm_ltc_flush(ltc);
190
d8e83994 191 spin_lock_irqsave(&imem->lock, flags);
69c49382 192
b306712d 193 if (node->base.vaddr) {
338840ee
AC
194 if (!node->use_cpt) {
195 /* remove from LRU list since mapping in use again */
196 list_del(&node->vaddr_node);
197 }
69c49382
AC
198 goto out;
199 }
200
201 /* try to free some address space if we reached the limit */
202 gk20a_instmem_vaddr_gc(imem, size);
203
b306712d
AC
204 /* map the pages */
205 node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP,
206 pgprot_writecombine(PAGE_KERNEL));
207 if (!node->base.vaddr) {
69c49382
AC
208 nvkm_error(&imem->base.subdev, "cannot map instobj - "
209 "this is not going to end well...\n");
210 goto out;
211 }
212
213 imem->vaddr_use += size;
214 nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n",
215 imem->vaddr_use, imem->vaddr_max);
216
217out:
338840ee 218 node->use_cpt++;
69c49382
AC
219 spin_unlock_irqrestore(&imem->lock, flags);
220
b306712d 221 return node->base.vaddr;
d8e83994
BS
222}
223
224static void
b306712d 225gk20a_instobj_release_dma(struct nvkm_memory *memory)
d8e83994 226{
69c49382
AC
227 struct gk20a_instobj *node = gk20a_instobj(memory);
228 struct gk20a_instmem *imem = node->imem;
229 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
b306712d 230
e02d586d
AC
231 /* in case we got a write-combined mapping */
232 wmb();
b306712d
AC
233 nvkm_ltc_invalidate(ltc);
234}
235
236static void
237gk20a_instobj_release_iommu(struct nvkm_memory *memory)
238{
239 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
240 struct gk20a_instmem *imem = node->base.imem;
241 struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
69c49382 242 unsigned long flags;
d8e83994 243
69c49382
AC
244 spin_lock_irqsave(&imem->lock, flags);
245
338840ee
AC
246 /* we should at least have one user to release... */
247 if (WARN_ON(node->use_cpt == 0))
248 goto out;
249
250 /* add unused objs to the LRU list to recycle their mapping */
251 if (--node->use_cpt == 0)
252 list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
69c49382 253
338840ee 254out:
69c49382
AC
255 spin_unlock_irqrestore(&imem->lock, flags);
256
257 wmb();
258 nvkm_ltc_invalidate(ltc);
259}
a7f6da6e 260
a6ff85d3 261static u32
d8e83994 262gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset)
a6ff85d3 263{
d8e83994 264 struct gk20a_instobj *node = gk20a_instobj(memory);
69c49382
AC
265
266 return node->vaddr[offset / 4];
a6ff85d3
AC
267}
268
269static void
d8e83994 270gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data)
a6ff85d3 271{
d8e83994 272 struct gk20a_instobj *node = gk20a_instobj(memory);
a6ff85d3 273
69c49382 274 node->vaddr[offset / 4] = data;
d8e83994
BS
275}
276
277static void
278gk20a_instobj_map(struct nvkm_memory *memory, struct nvkm_vma *vma, u64 offset)
279{
280 struct gk20a_instobj *node = gk20a_instobj(memory);
69c49382 281
d8e83994 282 nvkm_vm_map_at(vma, offset, &node->mem);
a6ff85d3
AC
283}
284
69c49382
AC
285static void *
286gk20a_instobj_dtor_dma(struct nvkm_memory *memory)
a6ff85d3 287{
69c49382
AC
288 struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory);
289 struct gk20a_instmem *imem = node->base.imem;
26c9e8ef 290 struct device *dev = imem->base.subdev.device->dev;
a6ff85d3 291
b306712d 292 if (unlikely(!node->base.vaddr))
69c49382 293 goto out;
a6ff85d3 294
b306712d 295 dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
c44c06ae 296 node->handle, &imem->attrs);
69c49382
AC
297
298out:
299 return node;
a7f6da6e
AC
300}
301
69c49382
AC
302static void *
303gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
a7f6da6e 304{
69c49382
AC
305 struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
306 struct gk20a_instmem *imem = node->base.imem;
307 struct device *dev = imem->base.subdev.device->dev;
a7f6da6e 308 struct nvkm_mm_node *r;
b306712d 309 unsigned long flags;
a7f6da6e
AC
310 int i;
311
69c49382
AC
312 if (unlikely(list_empty(&node->base.mem.regions)))
313 goto out;
314
b306712d
AC
315 spin_lock_irqsave(&imem->lock, flags);
316
317 /* vaddr has already been recycled */
318 if (node->base.vaddr)
319 gk20a_instobj_iommu_recycle_vaddr(node);
320
321 spin_unlock_irqrestore(&imem->lock, flags);
322
69c49382 323 r = list_first_entry(&node->base.mem.regions, struct nvkm_mm_node,
a7f6da6e
AC
324 rl_entry);
325
68b56653
AC
326 /* clear IOMMU bit to unmap pages */
327 r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
a7f6da6e
AC
328
329 /* Unmap pages from GPU address space and free them */
69c49382 330 for (i = 0; i < node->base.mem.size; i++) {
c44c06ae
BS
331 iommu_unmap(imem->domain,
332 (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE);
69c49382
AC
333 dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE,
334 DMA_BIDIRECTIONAL);
a7f6da6e
AC
335 __free_page(node->pages[i]);
336 }
337
338 /* Release area from GPU address space */
c44c06ae
BS
339 mutex_lock(imem->mm_mutex);
340 nvkm_mm_free(imem->mm, &r);
341 mutex_unlock(imem->mm_mutex);
a6ff85d3 342
69c49382 343out:
d8e83994 344 return node;
a6ff85d3
AC
345}
346
d8e83994 347static const struct nvkm_memory_func
69c49382
AC
348gk20a_instobj_func_dma = {
349 .dtor = gk20a_instobj_dtor_dma,
350 .target = gk20a_instobj_target,
351 .addr = gk20a_instobj_addr,
352 .size = gk20a_instobj_size,
b306712d
AC
353 .acquire = gk20a_instobj_acquire_dma,
354 .release = gk20a_instobj_release_dma,
69c49382
AC
355 .rd32 = gk20a_instobj_rd32,
356 .wr32 = gk20a_instobj_wr32,
357 .map = gk20a_instobj_map,
358};
359
360static const struct nvkm_memory_func
361gk20a_instobj_func_iommu = {
362 .dtor = gk20a_instobj_dtor_iommu,
d8e83994
BS
363 .target = gk20a_instobj_target,
364 .addr = gk20a_instobj_addr,
365 .size = gk20a_instobj_size,
b306712d
AC
366 .acquire = gk20a_instobj_acquire_iommu,
367 .release = gk20a_instobj_release_iommu,
d8e83994
BS
368 .rd32 = gk20a_instobj_rd32,
369 .wr32 = gk20a_instobj_wr32,
370 .map = gk20a_instobj_map,
371};
372
a6ff85d3 373static int
d8e83994 374gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align,
c44c06ae 375 struct gk20a_instobj **_node)
a6ff85d3 376{
a7f6da6e 377 struct gk20a_instobj_dma *node;
00c55507 378 struct nvkm_subdev *subdev = &imem->base.subdev;
d8e83994 379 struct device *dev = subdev->device->dev;
a6ff85d3 380
d8e83994
BS
381 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
382 return -ENOMEM;
a7f6da6e 383 *_node = &node->base;
a6ff85d3 384
69c49382
AC
385 nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.memory);
386
b306712d
AC
387 node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
388 &node->handle, GFP_KERNEL,
389 &imem->attrs);
390 if (!node->base.vaddr) {
00c55507 391 nvkm_error(subdev, "cannot allocate DMA memory\n");
a6ff85d3
AC
392 return -ENOMEM;
393 }
394
395 /* alignment check */
396 if (unlikely(node->handle & (align - 1)))
00c55507
BS
397 nvkm_warn(subdev,
398 "memory not aligned as requested: %pad (0x%x)\n",
399 &node->handle, align);
a6ff85d3 400
a7f6da6e
AC
401 /* present memory for being mapped using small pages */
402 node->r.type = 12;
403 node->r.offset = node->handle >> 12;
404 node->r.length = (npages << PAGE_SHIFT) >> 12;
405
d8e83994 406 node->base.mem.offset = node->handle;
a7f6da6e 407
d8e83994
BS
408 INIT_LIST_HEAD(&node->base.mem.regions);
409 list_add_tail(&node->r.rl_entry, &node->base.mem.regions);
a7f6da6e
AC
410
411 return 0;
412}
413
414static int
d8e83994 415gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
c44c06ae 416 struct gk20a_instobj **_node)
a7f6da6e
AC
417{
418 struct gk20a_instobj_iommu *node;
00c55507 419 struct nvkm_subdev *subdev = &imem->base.subdev;
69c49382 420 struct device *dev = subdev->device->dev;
a7f6da6e
AC
421 struct nvkm_mm_node *r;
422 int ret;
423 int i;
424
69c49382
AC
425 /*
426 * despite their variable size, instmem allocations are small enough
427 * (< 1 page) to be handled by kzalloc
428 */
429 if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) +
430 sizeof(*node->dma_addrs)) * npages), GFP_KERNEL)))
d8e83994 431 return -ENOMEM;
a7f6da6e 432 *_node = &node->base;
69c49382
AC
433 node->dma_addrs = (void *)(node->pages + npages);
434
435 nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.memory);
a7f6da6e
AC
436
437 /* Allocate backing memory */
438 for (i = 0; i < npages; i++) {
439 struct page *p = alloc_page(GFP_KERNEL);
69c49382 440 dma_addr_t dma_adr;
a7f6da6e
AC
441
442 if (p == NULL) {
443 ret = -ENOMEM;
444 goto free_pages;
445 }
446 node->pages[i] = p;
69c49382
AC
447 dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
448 if (dma_mapping_error(dev, dma_adr)) {
449 nvkm_error(subdev, "DMA mapping error!\n");
450 ret = -ENOMEM;
451 goto free_pages;
452 }
453 node->dma_addrs[i] = dma_adr;
a7f6da6e
AC
454 }
455
c44c06ae 456 mutex_lock(imem->mm_mutex);
a7f6da6e 457 /* Reserve area from GPU address space */
c44c06ae
BS
458 ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages,
459 align >> imem->iommu_pgshift, &r);
460 mutex_unlock(imem->mm_mutex);
a7f6da6e 461 if (ret) {
69c49382 462 nvkm_error(subdev, "IOMMU space is full!\n");
a7f6da6e
AC
463 goto free_pages;
464 }
465
466 /* Map into GPU address space */
467 for (i = 0; i < npages; i++) {
c44c06ae 468 u32 offset = (r->offset + i) << imem->iommu_pgshift;
a7f6da6e 469
69c49382 470 ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
a7f6da6e
AC
471 PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
472 if (ret < 0) {
00c55507 473 nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
a7f6da6e
AC
474
475 while (i-- > 0) {
476 offset -= PAGE_SIZE;
c44c06ae 477 iommu_unmap(imem->domain, offset, PAGE_SIZE);
a7f6da6e
AC
478 }
479 goto release_area;
480 }
481 }
482
68b56653
AC
483 /* IOMMU bit tells that an address is to be resolved through the IOMMU */
484 r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift);
a7f6da6e 485
d8e83994 486 node->base.mem.offset = ((u64)r->offset) << imem->iommu_pgshift;
a7f6da6e 487
d8e83994
BS
488 INIT_LIST_HEAD(&node->base.mem.regions);
489 list_add_tail(&r->rl_entry, &node->base.mem.regions);
a7f6da6e
AC
490
491 return 0;
492
493release_area:
c44c06ae
BS
494 mutex_lock(imem->mm_mutex);
495 nvkm_mm_free(imem->mm, &r);
496 mutex_unlock(imem->mm_mutex);
a7f6da6e
AC
497
498free_pages:
69c49382
AC
499 for (i = 0; i < npages && node->pages[i] != NULL; i++) {
500 dma_addr_t dma_addr = node->dma_addrs[i];
501 if (dma_addr)
502 dma_unmap_page(dev, dma_addr, PAGE_SIZE,
503 DMA_BIDIRECTIONAL);
a7f6da6e 504 __free_page(node->pages[i]);
69c49382 505 }
a7f6da6e
AC
506
507 return ret;
508}
509
510static int
d8e83994
BS
511gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero,
512 struct nvkm_memory **pmemory)
a7f6da6e 513{
d8e83994 514 struct gk20a_instmem *imem = gk20a_instmem(base);
00c55507 515 struct nvkm_subdev *subdev = &imem->base.subdev;
69c49382 516 struct gk20a_instobj *node = NULL;
a7f6da6e
AC
517 int ret;
518
00c55507 519 nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__,
d8e83994 520 imem->domain ? "IOMMU" : "DMA", size, align);
a7f6da6e
AC
521
522 /* Round size and align to page bounds */
d8e83994
BS
523 size = max(roundup(size, PAGE_SIZE), PAGE_SIZE);
524 align = max(roundup(align, PAGE_SIZE), PAGE_SIZE);
a7f6da6e 525
c44c06ae 526 if (imem->domain)
d8e83994
BS
527 ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT,
528 align, &node);
a7f6da6e 529 else
d8e83994
BS
530 ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT,
531 align, &node);
b7a2bc18 532 *pmemory = node ? &node->memory : NULL;
a7f6da6e
AC
533 if (ret)
534 return ret;
535
d8e83994 536 node->imem = imem;
a7f6da6e
AC
537
538 /* present memory for being mapped using small pages */
d8e83994
BS
539 node->mem.size = size >> 12;
540 node->mem.memtype = 0;
541 node->mem.page_shift = 12;
a6ff85d3 542
00c55507 543 nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
d8e83994 544 size, align, node->mem.offset);
a6ff85d3
AC
545
546 return 0;
547}
548
69c49382
AC
549static void *
550gk20a_instmem_dtor(struct nvkm_instmem *base)
a6ff85d3 551{
69c49382
AC
552 struct gk20a_instmem *imem = gk20a_instmem(base);
553
554 /* perform some sanity checks... */
555 if (!list_empty(&imem->vaddr_lru))
556 nvkm_warn(&base->subdev, "instobj LRU not empty!\n");
557
558 if (imem->vaddr_use != 0)
559 nvkm_warn(&base->subdev, "instobj vmap area not empty! "
560 "0x%x bytes still mapped\n", imem->vaddr_use);
561
562 return imem;
a6ff85d3
AC
563}
564
b7a2bc18
BS
565static const struct nvkm_instmem_func
566gk20a_instmem = {
69c49382 567 .dtor = gk20a_instmem_dtor,
b7a2bc18
BS
568 .memory_new = gk20a_instobj_new,
569 .persistent = true,
570 .zero = false,
571};
572
573int
574gk20a_instmem_new(struct nvkm_device *device, int index,
43a70661 575 struct nvkm_instmem **pimem)
a6ff85d3 576{
43a70661 577 struct nvkm_device_tegra *tdev = device->func->tegra(device);
c44c06ae 578 struct gk20a_instmem *imem;
a6ff85d3 579
b7a2bc18
BS
580 if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
581 return -ENOMEM;
582 nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
c44c06ae 583 spin_lock_init(&imem->lock);
b7a2bc18 584 *pimem = &imem->base;
a6ff85d3 585
69c49382
AC
586 /* do not allow more than 1MB of CPU-mapped instmem */
587 imem->vaddr_use = 0;
588 imem->vaddr_max = 0x100000;
589 INIT_LIST_HEAD(&imem->vaddr_lru);
590
43a70661 591 if (tdev->iommu.domain) {
69c49382 592 imem->mm_mutex = &tdev->iommu.mutex;
43a70661 593 imem->mm = &tdev->iommu.mm;
69c49382 594 imem->domain = tdev->iommu.domain;
43a70661 595 imem->iommu_pgshift = tdev->iommu.pgshift;
68b56653 596 imem->iommu_bit = tdev->func->iommu_bit;
a7f6da6e 597
00c55507 598 nvkm_info(&imem->base.subdev, "using IOMMU\n");
a7f6da6e 599 } else {
c44c06ae 600 init_dma_attrs(&imem->attrs);
c44c06ae
BS
601 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
602 dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
603 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
a7f6da6e 604
00c55507 605 nvkm_info(&imem->base.subdev, "using DMA API\n");
a7f6da6e 606 }
5dc240bc 607
a6ff85d3
AC
608 return 0;
609}