]>
Commit | Line | Data |
---|---|---|
a6ff85d3 AC |
1 | /* |
2 | * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
20 | * DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
a7f6da6e AC |
23 | /* |
24 | * GK20A does not have dedicated video memory, and to accurately represent this | |
25 | * fact Nouveau will not create a RAM device for it. Therefore its instmem | |
69c49382 AC |
26 | * implementation must be done directly on top of system memory, while |
27 | * preserving coherency for read and write operations. | |
a7f6da6e AC |
28 | * |
29 | * Instmem can be allocated through two means: | |
69c49382 | 30 | * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory |
a7f6da6e | 31 | * pages contiguous to the GPU. This is the preferred way. |
69c49382 | 32 | * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically |
a7f6da6e AC |
33 | * contiguous memory. |
34 | * | |
69c49382 AC |
35 | * In both cases CPU read and writes are performed by creating a write-combined |
36 | * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To | |
37 | * be conservative we do this every time we acquire or release an instobj, but | |
38 | * ideally L2 management should be handled at a higher level. | |
39 | * | |
40 | * To improve performance, CPU mappings are not removed upon instobj release. | |
41 | * Instead they are placed into a LRU list to be recycled when the mapped space | |
42 | * goes beyond a certain threshold. At the moment this limit is 1MB. | |
a7f6da6e | 43 | */ |
d8e83994 | 44 | #include "priv.h" |
a7f6da6e | 45 | |
d8e83994 | 46 | #include <core/memory.h> |
43a70661 | 47 | #include <core/tegra.h> |
69c49382 | 48 | #include <subdev/ltc.h> |
f9463a4b | 49 | #include <subdev/mmu.h> |
a6ff85d3 | 50 | |
c44c06ae | 51 | struct gk20a_instobj { |
46dec616 | 52 | struct nvkm_instobj base; |
9202d732 | 53 | struct nvkm_mm_node *mn; |
69c49382 AC |
54 | struct gk20a_instmem *imem; |
55 | ||
56 | /* CPU mapping */ | |
57 | u32 *vaddr; | |
a7f6da6e | 58 | }; |
46dec616 | 59 | #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, base.memory) |
a7f6da6e AC |
60 | |
61 | /* | |
62 | * Used for objects allocated using the DMA API | |
63 | */ | |
64 | struct gk20a_instobj_dma { | |
c44c06ae | 65 | struct gk20a_instobj base; |
a7f6da6e | 66 | |
a6ff85d3 AC |
67 | dma_addr_t handle; |
68 | struct nvkm_mm_node r; | |
69 | }; | |
69c49382 AC |
70 | #define gk20a_instobj_dma(p) \ |
71 | container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base) | |
a6ff85d3 | 72 | |
a7f6da6e AC |
73 | /* |
74 | * Used for objects flattened using the IOMMU API | |
75 | */ | |
76 | struct gk20a_instobj_iommu { | |
c44c06ae | 77 | struct gk20a_instobj base; |
a7f6da6e | 78 | |
b306712d AC |
79 | /* to link into gk20a_instmem::vaddr_lru */ |
80 | struct list_head vaddr_node; | |
81 | /* how many clients are using vaddr? */ | |
82 | u32 use_cpt; | |
83 | ||
69c49382 AC |
84 | /* will point to the higher half of pages */ |
85 | dma_addr_t *dma_addrs; | |
86 | /* array of base.mem->size pages (+ dma_addr_ts) */ | |
a7f6da6e AC |
87 | struct page *pages[]; |
88 | }; | |
69c49382 AC |
89 | #define gk20a_instobj_iommu(p) \ |
90 | container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base) | |
a7f6da6e | 91 | |
c44c06ae | 92 | struct gk20a_instmem { |
a6ff85d3 | 93 | struct nvkm_instmem base; |
69c49382 AC |
94 | |
95 | /* protects vaddr_* and gk20a_instobj::vaddr* */ | |
e5ffa727 | 96 | struct mutex lock; |
69c49382 AC |
97 | |
98 | /* CPU mappings LRU */ | |
99 | unsigned int vaddr_use; | |
100 | unsigned int vaddr_max; | |
101 | struct list_head vaddr_lru; | |
a7f6da6e AC |
102 | |
103 | /* Only used if IOMMU if present */ | |
104 | struct mutex *mm_mutex; | |
105 | struct nvkm_mm *mm; | |
106 | struct iommu_domain *domain; | |
107 | unsigned long iommu_pgshift; | |
68b56653 | 108 | u16 iommu_bit; |
a7f6da6e AC |
109 | |
110 | /* Only used by DMA API */ | |
00085f1e | 111 | unsigned long attrs; |
a6ff85d3 | 112 | }; |
69c49382 | 113 | #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) |
a6ff85d3 | 114 | |
d8e83994 BS |
115 | static enum nvkm_memory_target |
116 | gk20a_instobj_target(struct nvkm_memory *memory) | |
117 | { | |
d2ee3605 | 118 | return NVKM_MEM_TARGET_NCOH; |
d8e83994 BS |
119 | } |
120 | ||
bd275f1d BS |
121 | static u8 |
122 | gk20a_instobj_page(struct nvkm_memory *memory) | |
123 | { | |
124 | return 12; | |
125 | } | |
126 | ||
d8e83994 BS |
127 | static u64 |
128 | gk20a_instobj_addr(struct nvkm_memory *memory) | |
129 | { | |
9202d732 | 130 | return (u64)gk20a_instobj(memory)->mn->offset << 12; |
d8e83994 BS |
131 | } |
132 | ||
133 | static u64 | |
134 | gk20a_instobj_size(struct nvkm_memory *memory) | |
135 | { | |
9202d732 | 136 | return (u64)gk20a_instobj(memory)->mn->length << 12; |
d8e83994 BS |
137 | } |
138 | ||
69c49382 | 139 | /* |
338840ee AC |
140 | * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held. |
141 | */ | |
142 | static void | |
b306712d | 143 | gk20a_instobj_iommu_recycle_vaddr(struct gk20a_instobj_iommu *obj) |
338840ee | 144 | { |
b306712d | 145 | struct gk20a_instmem *imem = obj->base.imem; |
338840ee AC |
146 | /* there should not be any user left... */ |
147 | WARN_ON(obj->use_cpt); | |
148 | list_del(&obj->vaddr_node); | |
b306712d AC |
149 | vunmap(obj->base.vaddr); |
150 | obj->base.vaddr = NULL; | |
46dec616 | 151 | imem->vaddr_use -= nvkm_memory_size(&obj->base.base.memory); |
338840ee AC |
152 | nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, |
153 | imem->vaddr_max); | |
154 | } | |
155 | ||
156 | /* | |
157 | * Must be called while holding gk20a_instmem::lock | |
69c49382 AC |
158 | */ |
159 | static void | |
160 | gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size) | |
161 | { | |
162 | while (imem->vaddr_use + size > imem->vaddr_max) { | |
69c49382 AC |
163 | /* no candidate that can be unmapped, abort... */ |
164 | if (list_empty(&imem->vaddr_lru)) | |
165 | break; | |
166 | ||
b306712d AC |
167 | gk20a_instobj_iommu_recycle_vaddr( |
168 | list_first_entry(&imem->vaddr_lru, | |
169 | struct gk20a_instobj_iommu, vaddr_node)); | |
69c49382 AC |
170 | } |
171 | } | |
172 | ||
d8e83994 | 173 | static void __iomem * |
b306712d | 174 | gk20a_instobj_acquire_dma(struct nvkm_memory *memory) |
d8e83994 | 175 | { |
69c49382 AC |
176 | struct gk20a_instobj *node = gk20a_instobj(memory); |
177 | struct gk20a_instmem *imem = node->imem; | |
178 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
b306712d AC |
179 | |
180 | nvkm_ltc_flush(ltc); | |
181 | ||
182 | return node->vaddr; | |
183 | } | |
184 | ||
185 | static void __iomem * | |
186 | gk20a_instobj_acquire_iommu(struct nvkm_memory *memory) | |
187 | { | |
188 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); | |
189 | struct gk20a_instmem *imem = node->base.imem; | |
190 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
69c49382 | 191 | const u64 size = nvkm_memory_size(memory); |
69c49382 AC |
192 | |
193 | nvkm_ltc_flush(ltc); | |
194 | ||
e5ffa727 | 195 | mutex_lock(&imem->lock); |
69c49382 | 196 | |
b306712d | 197 | if (node->base.vaddr) { |
338840ee AC |
198 | if (!node->use_cpt) { |
199 | /* remove from LRU list since mapping in use again */ | |
200 | list_del(&node->vaddr_node); | |
201 | } | |
69c49382 AC |
202 | goto out; |
203 | } | |
204 | ||
205 | /* try to free some address space if we reached the limit */ | |
206 | gk20a_instmem_vaddr_gc(imem, size); | |
207 | ||
b306712d AC |
208 | /* map the pages */ |
209 | node->base.vaddr = vmap(node->pages, size >> PAGE_SHIFT, VM_MAP, | |
210 | pgprot_writecombine(PAGE_KERNEL)); | |
211 | if (!node->base.vaddr) { | |
69c49382 AC |
212 | nvkm_error(&imem->base.subdev, "cannot map instobj - " |
213 | "this is not going to end well...\n"); | |
214 | goto out; | |
215 | } | |
216 | ||
217 | imem->vaddr_use += size; | |
218 | nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", | |
219 | imem->vaddr_use, imem->vaddr_max); | |
220 | ||
221 | out: | |
338840ee | 222 | node->use_cpt++; |
e5ffa727 | 223 | mutex_unlock(&imem->lock); |
69c49382 | 224 | |
b306712d | 225 | return node->base.vaddr; |
d8e83994 BS |
226 | } |
227 | ||
228 | static void | |
b306712d | 229 | gk20a_instobj_release_dma(struct nvkm_memory *memory) |
d8e83994 | 230 | { |
69c49382 AC |
231 | struct gk20a_instobj *node = gk20a_instobj(memory); |
232 | struct gk20a_instmem *imem = node->imem; | |
233 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
b306712d | 234 | |
e02d586d AC |
235 | /* in case we got a write-combined mapping */ |
236 | wmb(); | |
b306712d AC |
237 | nvkm_ltc_invalidate(ltc); |
238 | } | |
239 | ||
240 | static void | |
241 | gk20a_instobj_release_iommu(struct nvkm_memory *memory) | |
242 | { | |
243 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); | |
244 | struct gk20a_instmem *imem = node->base.imem; | |
245 | struct nvkm_ltc *ltc = imem->base.subdev.device->ltc; | |
d8e83994 | 246 | |
e5ffa727 | 247 | mutex_lock(&imem->lock); |
69c49382 | 248 | |
338840ee AC |
249 | /* we should at least have one user to release... */ |
250 | if (WARN_ON(node->use_cpt == 0)) | |
251 | goto out; | |
252 | ||
253 | /* add unused objs to the LRU list to recycle their mapping */ | |
254 | if (--node->use_cpt == 0) | |
255 | list_add_tail(&node->vaddr_node, &imem->vaddr_lru); | |
69c49382 | 256 | |
338840ee | 257 | out: |
e5ffa727 | 258 | mutex_unlock(&imem->lock); |
69c49382 AC |
259 | |
260 | wmb(); | |
261 | nvkm_ltc_invalidate(ltc); | |
262 | } | |
a7f6da6e | 263 | |
a6ff85d3 | 264 | static u32 |
d8e83994 | 265 | gk20a_instobj_rd32(struct nvkm_memory *memory, u64 offset) |
a6ff85d3 | 266 | { |
d8e83994 | 267 | struct gk20a_instobj *node = gk20a_instobj(memory); |
69c49382 AC |
268 | |
269 | return node->vaddr[offset / 4]; | |
a6ff85d3 AC |
270 | } |
271 | ||
272 | static void | |
d8e83994 | 273 | gk20a_instobj_wr32(struct nvkm_memory *memory, u64 offset, u32 data) |
a6ff85d3 | 274 | { |
d8e83994 | 275 | struct gk20a_instobj *node = gk20a_instobj(memory); |
a6ff85d3 | 276 | |
69c49382 | 277 | node->vaddr[offset / 4] = data; |
d8e83994 BS |
278 | } |
279 | ||
19a82e49 BS |
280 | static int |
281 | gk20a_instobj_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm, | |
282 | struct nvkm_vma *vma, void *argv, u32 argc) | |
d8e83994 BS |
283 | { |
284 | struct gk20a_instobj *node = gk20a_instobj(memory); | |
9202d732 | 285 | struct nvkm_vmm_map map = { |
46dec616 | 286 | .memory = &node->base.memory, |
9202d732 BS |
287 | .offset = offset, |
288 | .mem = node->mn, | |
289 | }; | |
290 | ||
9202d732 | 291 | return nvkm_vmm_map(vmm, vma, argv, argc, &map); |
a6ff85d3 AC |
292 | } |
293 | ||
69c49382 AC |
294 | static void * |
295 | gk20a_instobj_dtor_dma(struct nvkm_memory *memory) | |
a6ff85d3 | 296 | { |
69c49382 AC |
297 | struct gk20a_instobj_dma *node = gk20a_instobj_dma(memory); |
298 | struct gk20a_instmem *imem = node->base.imem; | |
26c9e8ef | 299 | struct device *dev = imem->base.subdev.device->dev; |
a6ff85d3 | 300 | |
b306712d | 301 | if (unlikely(!node->base.vaddr)) |
69c49382 | 302 | goto out; |
a6ff85d3 | 303 | |
9202d732 BS |
304 | dma_free_attrs(dev, (u64)node->base.mn->length << PAGE_SHIFT, |
305 | node->base.vaddr, node->handle, imem->attrs); | |
69c49382 AC |
306 | |
307 | out: | |
308 | return node; | |
a7f6da6e AC |
309 | } |
310 | ||
69c49382 AC |
311 | static void * |
312 | gk20a_instobj_dtor_iommu(struct nvkm_memory *memory) | |
a7f6da6e | 313 | { |
69c49382 AC |
314 | struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory); |
315 | struct gk20a_instmem *imem = node->base.imem; | |
316 | struct device *dev = imem->base.subdev.device->dev; | |
9202d732 | 317 | struct nvkm_mm_node *r = node->base.mn; |
a7f6da6e AC |
318 | int i; |
319 | ||
134fdc1a | 320 | if (unlikely(!r)) |
69c49382 AC |
321 | goto out; |
322 | ||
e5ffa727 | 323 | mutex_lock(&imem->lock); |
b306712d AC |
324 | |
325 | /* vaddr has already been recycled */ | |
326 | if (node->base.vaddr) | |
327 | gk20a_instobj_iommu_recycle_vaddr(node); | |
328 | ||
e5ffa727 | 329 | mutex_unlock(&imem->lock); |
b306712d | 330 | |
68b56653 AC |
331 | /* clear IOMMU bit to unmap pages */ |
332 | r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift); | |
a7f6da6e AC |
333 | |
334 | /* Unmap pages from GPU address space and free them */ | |
9202d732 | 335 | for (i = 0; i < node->base.mn->length; i++) { |
c44c06ae BS |
336 | iommu_unmap(imem->domain, |
337 | (r->offset + i) << imem->iommu_pgshift, PAGE_SIZE); | |
69c49382 AC |
338 | dma_unmap_page(dev, node->dma_addrs[i], PAGE_SIZE, |
339 | DMA_BIDIRECTIONAL); | |
a7f6da6e AC |
340 | __free_page(node->pages[i]); |
341 | } | |
342 | ||
343 | /* Release area from GPU address space */ | |
c44c06ae BS |
344 | mutex_lock(imem->mm_mutex); |
345 | nvkm_mm_free(imem->mm, &r); | |
346 | mutex_unlock(imem->mm_mutex); | |
a6ff85d3 | 347 | |
69c49382 | 348 | out: |
d8e83994 | 349 | return node; |
a6ff85d3 AC |
350 | } |
351 | ||
d8e83994 | 352 | static const struct nvkm_memory_func |
69c49382 AC |
353 | gk20a_instobj_func_dma = { |
354 | .dtor = gk20a_instobj_dtor_dma, | |
355 | .target = gk20a_instobj_target, | |
bd275f1d | 356 | .page = gk20a_instobj_page, |
69c49382 AC |
357 | .addr = gk20a_instobj_addr, |
358 | .size = gk20a_instobj_size, | |
b306712d AC |
359 | .acquire = gk20a_instobj_acquire_dma, |
360 | .release = gk20a_instobj_release_dma, | |
69c49382 AC |
361 | .map = gk20a_instobj_map, |
362 | }; | |
363 | ||
364 | static const struct nvkm_memory_func | |
365 | gk20a_instobj_func_iommu = { | |
366 | .dtor = gk20a_instobj_dtor_iommu, | |
d8e83994 | 367 | .target = gk20a_instobj_target, |
bd275f1d | 368 | .page = gk20a_instobj_page, |
d8e83994 BS |
369 | .addr = gk20a_instobj_addr, |
370 | .size = gk20a_instobj_size, | |
b306712d AC |
371 | .acquire = gk20a_instobj_acquire_iommu, |
372 | .release = gk20a_instobj_release_iommu, | |
07bbc1c5 BS |
373 | .map = gk20a_instobj_map, |
374 | }; | |
375 | ||
376 | static const struct nvkm_memory_ptrs | |
377 | gk20a_instobj_ptrs = { | |
d8e83994 BS |
378 | .rd32 = gk20a_instobj_rd32, |
379 | .wr32 = gk20a_instobj_wr32, | |
d8e83994 BS |
380 | }; |
381 | ||
a6ff85d3 | 382 | static int |
d8e83994 | 383 | gk20a_instobj_ctor_dma(struct gk20a_instmem *imem, u32 npages, u32 align, |
c44c06ae | 384 | struct gk20a_instobj **_node) |
a6ff85d3 | 385 | { |
a7f6da6e | 386 | struct gk20a_instobj_dma *node; |
00c55507 | 387 | struct nvkm_subdev *subdev = &imem->base.subdev; |
d8e83994 | 388 | struct device *dev = subdev->device->dev; |
a6ff85d3 | 389 | |
d8e83994 BS |
390 | if (!(node = kzalloc(sizeof(*node), GFP_KERNEL))) |
391 | return -ENOMEM; | |
a7f6da6e | 392 | *_node = &node->base; |
a6ff85d3 | 393 | |
46dec616 TR |
394 | nvkm_memory_ctor(&gk20a_instobj_func_dma, &node->base.base.memory); |
395 | node->base.base.memory.ptrs = &gk20a_instobj_ptrs; | |
69c49382 | 396 | |
b306712d AC |
397 | node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, |
398 | &node->handle, GFP_KERNEL, | |
00085f1e | 399 | imem->attrs); |
b306712d | 400 | if (!node->base.vaddr) { |
00c55507 | 401 | nvkm_error(subdev, "cannot allocate DMA memory\n"); |
a6ff85d3 AC |
402 | return -ENOMEM; |
403 | } | |
404 | ||
405 | /* alignment check */ | |
406 | if (unlikely(node->handle & (align - 1))) | |
00c55507 BS |
407 | nvkm_warn(subdev, |
408 | "memory not aligned as requested: %pad (0x%x)\n", | |
409 | &node->handle, align); | |
a6ff85d3 | 410 | |
a7f6da6e AC |
411 | /* present memory for being mapped using small pages */ |
412 | node->r.type = 12; | |
413 | node->r.offset = node->handle >> 12; | |
414 | node->r.length = (npages << PAGE_SHIFT) >> 12; | |
415 | ||
9202d732 | 416 | node->base.mn = &node->r; |
a7f6da6e AC |
417 | return 0; |
418 | } | |
419 | ||
420 | static int | |
d8e83994 | 421 | gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align, |
c44c06ae | 422 | struct gk20a_instobj **_node) |
a7f6da6e AC |
423 | { |
424 | struct gk20a_instobj_iommu *node; | |
00c55507 | 425 | struct nvkm_subdev *subdev = &imem->base.subdev; |
69c49382 | 426 | struct device *dev = subdev->device->dev; |
a7f6da6e AC |
427 | struct nvkm_mm_node *r; |
428 | int ret; | |
429 | int i; | |
430 | ||
69c49382 AC |
431 | /* |
432 | * despite their variable size, instmem allocations are small enough | |
433 | * (< 1 page) to be handled by kzalloc | |
434 | */ | |
435 | if (!(node = kzalloc(sizeof(*node) + ((sizeof(node->pages[0]) + | |
436 | sizeof(*node->dma_addrs)) * npages), GFP_KERNEL))) | |
d8e83994 | 437 | return -ENOMEM; |
a7f6da6e | 438 | *_node = &node->base; |
69c49382 AC |
439 | node->dma_addrs = (void *)(node->pages + npages); |
440 | ||
46dec616 TR |
441 | nvkm_memory_ctor(&gk20a_instobj_func_iommu, &node->base.base.memory); |
442 | node->base.base.memory.ptrs = &gk20a_instobj_ptrs; | |
a7f6da6e AC |
443 | |
444 | /* Allocate backing memory */ | |
445 | for (i = 0; i < npages; i++) { | |
446 | struct page *p = alloc_page(GFP_KERNEL); | |
69c49382 | 447 | dma_addr_t dma_adr; |
a7f6da6e AC |
448 | |
449 | if (p == NULL) { | |
450 | ret = -ENOMEM; | |
451 | goto free_pages; | |
452 | } | |
453 | node->pages[i] = p; | |
69c49382 AC |
454 | dma_adr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); |
455 | if (dma_mapping_error(dev, dma_adr)) { | |
456 | nvkm_error(subdev, "DMA mapping error!\n"); | |
457 | ret = -ENOMEM; | |
458 | goto free_pages; | |
459 | } | |
460 | node->dma_addrs[i] = dma_adr; | |
a7f6da6e AC |
461 | } |
462 | ||
c44c06ae | 463 | mutex_lock(imem->mm_mutex); |
a7f6da6e | 464 | /* Reserve area from GPU address space */ |
c44c06ae BS |
465 | ret = nvkm_mm_head(imem->mm, 0, 1, npages, npages, |
466 | align >> imem->iommu_pgshift, &r); | |
467 | mutex_unlock(imem->mm_mutex); | |
a7f6da6e | 468 | if (ret) { |
69c49382 | 469 | nvkm_error(subdev, "IOMMU space is full!\n"); |
a7f6da6e AC |
470 | goto free_pages; |
471 | } | |
472 | ||
473 | /* Map into GPU address space */ | |
474 | for (i = 0; i < npages; i++) { | |
c44c06ae | 475 | u32 offset = (r->offset + i) << imem->iommu_pgshift; |
a7f6da6e | 476 | |
69c49382 | 477 | ret = iommu_map(imem->domain, offset, node->dma_addrs[i], |
1369459b JG |
478 | PAGE_SIZE, IOMMU_READ | IOMMU_WRITE, |
479 | GFP_KERNEL); | |
a7f6da6e | 480 | if (ret < 0) { |
00c55507 | 481 | nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret); |
a7f6da6e AC |
482 | |
483 | while (i-- > 0) { | |
484 | offset -= PAGE_SIZE; | |
c44c06ae | 485 | iommu_unmap(imem->domain, offset, PAGE_SIZE); |
a7f6da6e AC |
486 | } |
487 | goto release_area; | |
488 | } | |
489 | } | |
490 | ||
68b56653 AC |
491 | /* IOMMU bit tells that an address is to be resolved through the IOMMU */ |
492 | r->offset |= BIT(imem->iommu_bit - imem->iommu_pgshift); | |
a7f6da6e | 493 | |
9202d732 | 494 | node->base.mn = r; |
a7f6da6e AC |
495 | return 0; |
496 | ||
497 | release_area: | |
c44c06ae BS |
498 | mutex_lock(imem->mm_mutex); |
499 | nvkm_mm_free(imem->mm, &r); | |
500 | mutex_unlock(imem->mm_mutex); | |
a7f6da6e AC |
501 | |
502 | free_pages: | |
69c49382 AC |
503 | for (i = 0; i < npages && node->pages[i] != NULL; i++) { |
504 | dma_addr_t dma_addr = node->dma_addrs[i]; | |
505 | if (dma_addr) | |
506 | dma_unmap_page(dev, dma_addr, PAGE_SIZE, | |
507 | DMA_BIDIRECTIONAL); | |
a7f6da6e | 508 | __free_page(node->pages[i]); |
69c49382 | 509 | } |
a7f6da6e AC |
510 | |
511 | return ret; | |
512 | } | |
513 | ||
514 | static int | |
d8e83994 BS |
515 | gk20a_instobj_new(struct nvkm_instmem *base, u32 size, u32 align, bool zero, |
516 | struct nvkm_memory **pmemory) | |
a7f6da6e | 517 | { |
d8e83994 | 518 | struct gk20a_instmem *imem = gk20a_instmem(base); |
00c55507 | 519 | struct nvkm_subdev *subdev = &imem->base.subdev; |
69c49382 | 520 | struct gk20a_instobj *node = NULL; |
a7f6da6e AC |
521 | int ret; |
522 | ||
00c55507 | 523 | nvkm_debug(subdev, "%s (%s): size: %x align: %x\n", __func__, |
d8e83994 | 524 | imem->domain ? "IOMMU" : "DMA", size, align); |
a7f6da6e AC |
525 | |
526 | /* Round size and align to page bounds */ | |
d8e83994 BS |
527 | size = max(roundup(size, PAGE_SIZE), PAGE_SIZE); |
528 | align = max(roundup(align, PAGE_SIZE), PAGE_SIZE); | |
a7f6da6e | 529 | |
c44c06ae | 530 | if (imem->domain) |
d8e83994 BS |
531 | ret = gk20a_instobj_ctor_iommu(imem, size >> PAGE_SHIFT, |
532 | align, &node); | |
a7f6da6e | 533 | else |
d8e83994 BS |
534 | ret = gk20a_instobj_ctor_dma(imem, size >> PAGE_SHIFT, |
535 | align, &node); | |
46dec616 | 536 | *pmemory = node ? &node->base.memory : NULL; |
a7f6da6e AC |
537 | if (ret) |
538 | return ret; | |
539 | ||
d8e83994 | 540 | node->imem = imem; |
a7f6da6e | 541 | |
00c55507 | 542 | nvkm_debug(subdev, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n", |
9202d732 | 543 | size, align, (u64)node->mn->offset << 12); |
a6ff85d3 AC |
544 | |
545 | return 0; | |
546 | } | |
547 | ||
69c49382 AC |
548 | static void * |
549 | gk20a_instmem_dtor(struct nvkm_instmem *base) | |
a6ff85d3 | 550 | { |
69c49382 AC |
551 | struct gk20a_instmem *imem = gk20a_instmem(base); |
552 | ||
553 | /* perform some sanity checks... */ | |
554 | if (!list_empty(&imem->vaddr_lru)) | |
555 | nvkm_warn(&base->subdev, "instobj LRU not empty!\n"); | |
556 | ||
557 | if (imem->vaddr_use != 0) | |
558 | nvkm_warn(&base->subdev, "instobj vmap area not empty! " | |
559 | "0x%x bytes still mapped\n", imem->vaddr_use); | |
560 | ||
561 | return imem; | |
a6ff85d3 AC |
562 | } |
563 | ||
b7a2bc18 BS |
564 | static const struct nvkm_instmem_func |
565 | gk20a_instmem = { | |
69c49382 | 566 | .dtor = gk20a_instmem_dtor, |
624c6f78 BS |
567 | .suspend = nv04_instmem_suspend, |
568 | .resume = nv04_instmem_resume, | |
b7a2bc18 | 569 | .memory_new = gk20a_instobj_new, |
b7a2bc18 BS |
570 | .zero = false, |
571 | }; | |
572 | ||
573 | int | |
d9691a22 | 574 | gk20a_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, |
43a70661 | 575 | struct nvkm_instmem **pimem) |
a6ff85d3 | 576 | { |
43a70661 | 577 | struct nvkm_device_tegra *tdev = device->func->tegra(device); |
c44c06ae | 578 | struct gk20a_instmem *imem; |
a6ff85d3 | 579 | |
b7a2bc18 BS |
580 | if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL))) |
581 | return -ENOMEM; | |
d9691a22 | 582 | nvkm_instmem_ctor(&gk20a_instmem, device, type, inst, &imem->base); |
e5ffa727 | 583 | mutex_init(&imem->lock); |
b7a2bc18 | 584 | *pimem = &imem->base; |
a6ff85d3 | 585 | |
69c49382 AC |
586 | /* do not allow more than 1MB of CPU-mapped instmem */ |
587 | imem->vaddr_use = 0; | |
588 | imem->vaddr_max = 0x100000; | |
589 | INIT_LIST_HEAD(&imem->vaddr_lru); | |
590 | ||
43a70661 | 591 | if (tdev->iommu.domain) { |
69c49382 | 592 | imem->mm_mutex = &tdev->iommu.mutex; |
43a70661 | 593 | imem->mm = &tdev->iommu.mm; |
69c49382 | 594 | imem->domain = tdev->iommu.domain; |
43a70661 | 595 | imem->iommu_pgshift = tdev->iommu.pgshift; |
68b56653 | 596 | imem->iommu_bit = tdev->func->iommu_bit; |
a7f6da6e | 597 | |
00c55507 | 598 | nvkm_info(&imem->base.subdev, "using IOMMU\n"); |
a7f6da6e | 599 | } else { |
e0ec8a4d | 600 | imem->attrs = DMA_ATTR_WEAK_ORDERING | |
00085f1e | 601 | DMA_ATTR_WRITE_COMBINE; |
a7f6da6e | 602 | |
00c55507 | 603 | nvkm_info(&imem->base.subdev, "using DMA API\n"); |
a7f6da6e | 604 | } |
5dc240bc | 605 | |
a6ff85d3 AC |
606 | return 0; |
607 | } |