]> git.ipfire.org Git - thirdparty/linux.git/blob - drivers/gpu/drm/nouveau/nouveau_dmem.c
Merge tag 'amd-drm-next-5.8-2020-05-27' of git://people.freedesktop.org/~agd5f/linux...
[thirdparty/linux.git] / drivers / gpu / drm / nouveau / nouveau_dmem.c
1 /*
2 * Copyright 2018 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #include "nouveau_dmem.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dma.h"
26 #include "nouveau_mem.h"
27 #include "nouveau_bo.h"
28 #include "nouveau_svm.h"
29
30 #include <nvif/class.h>
31 #include <nvif/object.h>
32 #include <nvif/if000c.h>
33 #include <nvif/if500b.h>
34 #include <nvif/if900b.h>
35 #include <nvif/if000c.h>
36
37 #include <linux/sched/mm.h>
38 #include <linux/hmm.h>
39
40 /*
41 * FIXME: this is ugly right now we are using TTM to allocate vram and we pin
42 * it in vram while in use. We likely want to overhaul memory management for
43 * nouveau to be more page like (not necessarily with system page size but a
44 * bigger page size) at lowest level and have some shim layer on top that would
45 * provide the same functionality as TTM.
46 */
47 #define DMEM_CHUNK_SIZE (2UL << 20)
48 #define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
49
50 enum nouveau_aper {
51 NOUVEAU_APER_VIRT,
52 NOUVEAU_APER_VRAM,
53 NOUVEAU_APER_HOST,
54 };
55
56 typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
57 enum nouveau_aper, u64 dst_addr,
58 enum nouveau_aper, u64 src_addr);
59 typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
60 enum nouveau_aper, u64 dst_addr);
61
62 struct nouveau_dmem_chunk {
63 struct list_head list;
64 struct nouveau_bo *bo;
65 struct nouveau_drm *drm;
66 unsigned long callocated;
67 struct dev_pagemap pagemap;
68 };
69
70 struct nouveau_dmem_migrate {
71 nouveau_migrate_copy_t copy_func;
72 nouveau_clear_page_t clear_func;
73 struct nouveau_channel *chan;
74 };
75
76 struct nouveau_dmem {
77 struct nouveau_drm *drm;
78 struct nouveau_dmem_migrate migrate;
79 struct list_head chunks;
80 struct mutex mutex;
81 struct page *free_pages;
82 spinlock_t lock;
83 };
84
85 static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
86 {
87 return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
88 }
89
90 static struct nouveau_drm *page_to_drm(struct page *page)
91 {
92 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
93
94 return chunk->drm;
95 }
96
97 static unsigned long nouveau_dmem_page_addr(struct page *page)
98 {
99 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
100 unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
101 chunk->pagemap.res.start;
102
103 return chunk->bo->bo.offset + off;
104 }
105
106 static void nouveau_dmem_page_free(struct page *page)
107 {
108 struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
109 struct nouveau_dmem *dmem = chunk->drm->dmem;
110
111 spin_lock(&dmem->lock);
112 page->zone_device_data = dmem->free_pages;
113 dmem->free_pages = page;
114
115 WARN_ON(!chunk->callocated);
116 chunk->callocated--;
117 /*
118 * FIXME when chunk->callocated reach 0 we should add the chunk to
119 * a reclaim list so that it can be freed in case of memory pressure.
120 */
121 spin_unlock(&dmem->lock);
122 }
123
124 static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
125 {
126 if (fence) {
127 nouveau_fence_wait(*fence, true, false);
128 nouveau_fence_unref(fence);
129 } else {
130 /*
131 * FIXME wait for channel to be IDLE before calling finalizing
132 * the hmem object.
133 */
134 }
135 }
136
137 static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm,
138 struct vm_fault *vmf, struct migrate_vma *args,
139 dma_addr_t *dma_addr)
140 {
141 struct device *dev = drm->dev->dev;
142 struct page *dpage, *spage;
143
144 spage = migrate_pfn_to_page(args->src[0]);
145 if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
146 return 0;
147
148 dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address);
149 if (!dpage)
150 return VM_FAULT_SIGBUS;
151 lock_page(dpage);
152
153 *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
154 if (dma_mapping_error(dev, *dma_addr))
155 goto error_free_page;
156
157 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
158 NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage)))
159 goto error_dma_unmap;
160
161 args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
162 return 0;
163
164 error_dma_unmap:
165 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
166 error_free_page:
167 __free_page(dpage);
168 return VM_FAULT_SIGBUS;
169 }
170
171 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
172 {
173 struct nouveau_drm *drm = page_to_drm(vmf->page);
174 struct nouveau_dmem *dmem = drm->dmem;
175 struct nouveau_fence *fence;
176 unsigned long src = 0, dst = 0;
177 dma_addr_t dma_addr = 0;
178 vm_fault_t ret;
179 struct migrate_vma args = {
180 .vma = vmf->vma,
181 .start = vmf->address,
182 .end = vmf->address + PAGE_SIZE,
183 .src = &src,
184 .dst = &dst,
185 .src_owner = drm->dev,
186 };
187
188 /*
189 * FIXME what we really want is to find some heuristic to migrate more
190 * than just one page on CPU fault. When such fault happens it is very
191 * likely that more surrounding page will CPU fault too.
192 */
193 if (migrate_vma_setup(&args) < 0)
194 return VM_FAULT_SIGBUS;
195 if (!args.cpages)
196 return 0;
197
198 ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
199 if (ret || dst == 0)
200 goto done;
201
202 nouveau_fence_new(dmem->migrate.chan, false, &fence);
203 migrate_vma_pages(&args);
204 nouveau_dmem_fence_done(&fence);
205 dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
206 done:
207 migrate_vma_finalize(&args);
208 return ret;
209 }
210
211 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
212 .page_free = nouveau_dmem_page_free,
213 .migrate_to_ram = nouveau_dmem_migrate_to_ram,
214 };
215
216 static int
217 nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
218 {
219 struct nouveau_dmem_chunk *chunk;
220 struct resource *res;
221 struct page *page;
222 void *ptr;
223 unsigned long i, pfn_first;
224 int ret;
225
226 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
227 if (chunk == NULL) {
228 ret = -ENOMEM;
229 goto out;
230 }
231
232 /* Allocate unused physical address space for device private pages. */
233 res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
234 "nouveau_dmem");
235 if (IS_ERR(res)) {
236 ret = PTR_ERR(res);
237 goto out_free;
238 }
239
240 chunk->drm = drm;
241 chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
242 chunk->pagemap.res = *res;
243 chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
244 chunk->pagemap.owner = drm->dev;
245
246 ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
247 TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
248 &chunk->bo);
249 if (ret)
250 goto out_release;
251
252 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
253 if (ret)
254 goto out_bo_free;
255
256 ptr = memremap_pages(&chunk->pagemap, numa_node_id());
257 if (IS_ERR(ptr)) {
258 ret = PTR_ERR(ptr);
259 goto out_bo_unpin;
260 }
261
262 mutex_lock(&drm->dmem->mutex);
263 list_add(&chunk->list, &drm->dmem->chunks);
264 mutex_unlock(&drm->dmem->mutex);
265
266 pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
267 page = pfn_to_page(pfn_first);
268 spin_lock(&drm->dmem->lock);
269 for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
270 page->zone_device_data = drm->dmem->free_pages;
271 drm->dmem->free_pages = page;
272 }
273 *ppage = page;
274 chunk->callocated++;
275 spin_unlock(&drm->dmem->lock);
276
277 NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
278 DMEM_CHUNK_SIZE >> 20);
279
280 return 0;
281
282 out_bo_unpin:
283 nouveau_bo_unpin(chunk->bo);
284 out_bo_free:
285 nouveau_bo_ref(NULL, &chunk->bo);
286 out_release:
287 release_mem_region(chunk->pagemap.res.start,
288 resource_size(&chunk->pagemap.res));
289 out_free:
290 kfree(chunk);
291 out:
292 return ret;
293 }
294
295 static struct page *
296 nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
297 {
298 struct nouveau_dmem_chunk *chunk;
299 struct page *page = NULL;
300 int ret;
301
302 spin_lock(&drm->dmem->lock);
303 if (drm->dmem->free_pages) {
304 page = drm->dmem->free_pages;
305 drm->dmem->free_pages = page->zone_device_data;
306 chunk = nouveau_page_to_chunk(page);
307 chunk->callocated++;
308 spin_unlock(&drm->dmem->lock);
309 } else {
310 spin_unlock(&drm->dmem->lock);
311 ret = nouveau_dmem_chunk_alloc(drm, &page);
312 if (ret)
313 return NULL;
314 }
315
316 get_page(page);
317 lock_page(page);
318 return page;
319 }
320
321 static void
322 nouveau_dmem_page_free_locked(struct nouveau_drm *drm, struct page *page)
323 {
324 unlock_page(page);
325 put_page(page);
326 }
327
328 void
329 nouveau_dmem_resume(struct nouveau_drm *drm)
330 {
331 struct nouveau_dmem_chunk *chunk;
332 int ret;
333
334 if (drm->dmem == NULL)
335 return;
336
337 mutex_lock(&drm->dmem->mutex);
338 list_for_each_entry(chunk, &drm->dmem->chunks, list) {
339 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
340 /* FIXME handle pin failure */
341 WARN_ON(ret);
342 }
343 mutex_unlock(&drm->dmem->mutex);
344 }
345
346 void
347 nouveau_dmem_suspend(struct nouveau_drm *drm)
348 {
349 struct nouveau_dmem_chunk *chunk;
350
351 if (drm->dmem == NULL)
352 return;
353
354 mutex_lock(&drm->dmem->mutex);
355 list_for_each_entry(chunk, &drm->dmem->chunks, list)
356 nouveau_bo_unpin(chunk->bo);
357 mutex_unlock(&drm->dmem->mutex);
358 }
359
360 void
361 nouveau_dmem_fini(struct nouveau_drm *drm)
362 {
363 struct nouveau_dmem_chunk *chunk, *tmp;
364
365 if (drm->dmem == NULL)
366 return;
367
368 mutex_lock(&drm->dmem->mutex);
369
370 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
371 nouveau_bo_unpin(chunk->bo);
372 nouveau_bo_ref(NULL, &chunk->bo);
373 list_del(&chunk->list);
374 memunmap_pages(&chunk->pagemap);
375 release_mem_region(chunk->pagemap.res.start,
376 resource_size(&chunk->pagemap.res));
377 kfree(chunk);
378 }
379
380 mutex_unlock(&drm->dmem->mutex);
381 }
382
383 static int
384 nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
385 enum nouveau_aper dst_aper, u64 dst_addr,
386 enum nouveau_aper src_aper, u64 src_addr)
387 {
388 struct nouveau_channel *chan = drm->dmem->migrate.chan;
389 u32 launch_dma = (1 << 9) /* MULTI_LINE_ENABLE. */ |
390 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
391 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
392 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
393 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
394 int ret;
395
396 ret = RING_SPACE(chan, 13);
397 if (ret)
398 return ret;
399
400 if (src_aper != NOUVEAU_APER_VIRT) {
401 switch (src_aper) {
402 case NOUVEAU_APER_VRAM:
403 BEGIN_IMC0(chan, NvSubCopy, 0x0260, 0);
404 break;
405 case NOUVEAU_APER_HOST:
406 BEGIN_IMC0(chan, NvSubCopy, 0x0260, 1);
407 break;
408 default:
409 return -EINVAL;
410 }
411 launch_dma |= 0x00001000; /* SRC_TYPE_PHYSICAL. */
412 }
413
414 if (dst_aper != NOUVEAU_APER_VIRT) {
415 switch (dst_aper) {
416 case NOUVEAU_APER_VRAM:
417 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
418 break;
419 case NOUVEAU_APER_HOST:
420 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
421 break;
422 default:
423 return -EINVAL;
424 }
425 launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
426 }
427
428 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
429 OUT_RING (chan, upper_32_bits(src_addr));
430 OUT_RING (chan, lower_32_bits(src_addr));
431 OUT_RING (chan, upper_32_bits(dst_addr));
432 OUT_RING (chan, lower_32_bits(dst_addr));
433 OUT_RING (chan, PAGE_SIZE);
434 OUT_RING (chan, PAGE_SIZE);
435 OUT_RING (chan, PAGE_SIZE);
436 OUT_RING (chan, npages);
437 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
438 OUT_RING (chan, launch_dma);
439 return 0;
440 }
441
442 static int
443 nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
444 enum nouveau_aper dst_aper, u64 dst_addr)
445 {
446 struct nouveau_channel *chan = drm->dmem->migrate.chan;
447 u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
448 (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
449 (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
450 (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
451 (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
452 u32 remap = (4 << 0) /* DST_X_CONST_A */ |
453 (5 << 4) /* DST_Y_CONST_B */ |
454 (3 << 16) /* COMPONENT_SIZE_FOUR */ |
455 (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
456 int ret;
457
458 ret = RING_SPACE(chan, 12);
459 if (ret)
460 return ret;
461
462 switch (dst_aper) {
463 case NOUVEAU_APER_VRAM:
464 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
465 break;
466 case NOUVEAU_APER_HOST:
467 BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
468 break;
469 default:
470 return -EINVAL;
471 }
472 launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
473
474 BEGIN_NVC0(chan, NvSubCopy, 0x0700, 3);
475 OUT_RING(chan, 0);
476 OUT_RING(chan, 0);
477 OUT_RING(chan, remap);
478 BEGIN_NVC0(chan, NvSubCopy, 0x0408, 2);
479 OUT_RING(chan, upper_32_bits(dst_addr));
480 OUT_RING(chan, lower_32_bits(dst_addr));
481 BEGIN_NVC0(chan, NvSubCopy, 0x0418, 1);
482 OUT_RING(chan, length >> 3);
483 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
484 OUT_RING(chan, launch_dma);
485 return 0;
486 }
487
488 static int
489 nouveau_dmem_migrate_init(struct nouveau_drm *drm)
490 {
491 switch (drm->ttm.copy.oclass) {
492 case PASCAL_DMA_COPY_A:
493 case PASCAL_DMA_COPY_B:
494 case VOLTA_DMA_COPY_A:
495 case TURING_DMA_COPY_A:
496 drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
497 drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
498 drm->dmem->migrate.chan = drm->ttm.chan;
499 return 0;
500 default:
501 break;
502 }
503 return -ENODEV;
504 }
505
506 void
507 nouveau_dmem_init(struct nouveau_drm *drm)
508 {
509 int ret;
510
511 /* This only make sense on PASCAL or newer */
512 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_PASCAL)
513 return;
514
515 if (!(drm->dmem = kzalloc(sizeof(*drm->dmem), GFP_KERNEL)))
516 return;
517
518 drm->dmem->drm = drm;
519 mutex_init(&drm->dmem->mutex);
520 INIT_LIST_HEAD(&drm->dmem->chunks);
521 mutex_init(&drm->dmem->mutex);
522 spin_lock_init(&drm->dmem->lock);
523
524 /* Initialize migration dma helpers before registering memory */
525 ret = nouveau_dmem_migrate_init(drm);
526 if (ret) {
527 kfree(drm->dmem);
528 drm->dmem = NULL;
529 }
530 }
531
532 static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
533 unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
534 {
535 struct device *dev = drm->dev->dev;
536 struct page *dpage, *spage;
537 unsigned long paddr;
538
539 spage = migrate_pfn_to_page(src);
540 if (!(src & MIGRATE_PFN_MIGRATE))
541 goto out;
542
543 dpage = nouveau_dmem_page_alloc_locked(drm);
544 if (!dpage)
545 goto out;
546
547 paddr = nouveau_dmem_page_addr(dpage);
548 if (spage) {
549 *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
550 DMA_BIDIRECTIONAL);
551 if (dma_mapping_error(dev, *dma_addr))
552 goto out_free_page;
553 if (drm->dmem->migrate.copy_func(drm, page_size(spage),
554 NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
555 goto out_dma_unmap;
556 } else {
557 *dma_addr = DMA_MAPPING_ERROR;
558 if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
559 NOUVEAU_APER_VRAM, paddr))
560 goto out_free_page;
561 }
562
563 *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
564 ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
565 if (src & MIGRATE_PFN_WRITE)
566 *pfn |= NVIF_VMM_PFNMAP_V0_W;
567 return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
568
569 out_dma_unmap:
570 dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
571 out_free_page:
572 nouveau_dmem_page_free_locked(drm, dpage);
573 out:
574 *pfn = NVIF_VMM_PFNMAP_V0_NONE;
575 return 0;
576 }
577
578 static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
579 struct nouveau_svmm *svmm, struct migrate_vma *args,
580 dma_addr_t *dma_addrs, u64 *pfns)
581 {
582 struct nouveau_fence *fence;
583 unsigned long addr = args->start, nr_dma = 0, i;
584
585 for (i = 0; addr < args->end; i++) {
586 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
587 dma_addrs + nr_dma, pfns + i);
588 if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
589 nr_dma++;
590 addr += PAGE_SIZE;
591 }
592
593 nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
594 migrate_vma_pages(args);
595 nouveau_dmem_fence_done(&fence);
596 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
597
598 while (nr_dma--) {
599 dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
600 DMA_BIDIRECTIONAL);
601 }
602 migrate_vma_finalize(args);
603 }
604
605 int
606 nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
607 struct nouveau_svmm *svmm,
608 struct vm_area_struct *vma,
609 unsigned long start,
610 unsigned long end)
611 {
612 unsigned long npages = (end - start) >> PAGE_SHIFT;
613 unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
614 dma_addr_t *dma_addrs;
615 struct migrate_vma args = {
616 .vma = vma,
617 .start = start,
618 };
619 unsigned long i;
620 u64 *pfns;
621 int ret = -ENOMEM;
622
623 if (drm->dmem == NULL)
624 return -ENODEV;
625
626 args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
627 if (!args.src)
628 goto out;
629 args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
630 if (!args.dst)
631 goto out_free_src;
632
633 dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
634 if (!dma_addrs)
635 goto out_free_dst;
636
637 pfns = nouveau_pfns_alloc(max);
638 if (!pfns)
639 goto out_free_dma;
640
641 for (i = 0; i < npages; i += max) {
642 args.end = start + (max << PAGE_SHIFT);
643 ret = migrate_vma_setup(&args);
644 if (ret)
645 goto out_free_pfns;
646
647 if (args.cpages)
648 nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
649 pfns);
650 args.start = args.end;
651 }
652
653 ret = 0;
654 out_free_pfns:
655 nouveau_pfns_free(pfns);
656 out_free_dma:
657 kfree(dma_addrs);
658 out_free_dst:
659 kfree(args.dst);
660 out_free_src:
661 kfree(args.src);
662 out:
663 return ret;
664 }
665
666 void
667 nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
668 struct hmm_range *range)
669 {
670 unsigned long i, npages;
671
672 npages = (range->end - range->start) >> PAGE_SHIFT;
673 for (i = 0; i < npages; ++i) {
674 struct page *page;
675 uint64_t addr;
676
677 page = hmm_device_entry_to_page(range, range->pfns[i]);
678 if (page == NULL)
679 continue;
680
681 if (!is_device_private_page(page))
682 continue;
683
684 addr = nouveau_dmem_page_addr(page);
685 range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
686 range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
687 range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM;
688 }
689 }