2 * Copyright (C) 2008 Ben Skeggs.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <drm/drm_gem_ttm_helper.h>
29 #include "nouveau_drv.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_fence.h"
32 #include "nouveau_abi16.h"
34 #include "nouveau_ttm.h"
35 #include "nouveau_gem.h"
36 #include "nouveau_mem.h"
37 #include "nouveau_vmm.h"
39 #include <nvif/class.h>
40 #include <nvif/push206e.h>
42 static vm_fault_t
nouveau_ttm_fault(struct vm_fault
*vmf
)
44 struct vm_area_struct
*vma
= vmf
->vma
;
45 struct ttm_buffer_object
*bo
= vma
->vm_private_data
;
49 ret
= ttm_bo_vm_reserve(bo
, vmf
);
53 ret
= nouveau_ttm_fault_reserve_notify(bo
);
57 nouveau_bo_del_io_reserve_lru(bo
);
58 prot
= vm_get_page_prot(vma
->vm_flags
);
59 ret
= ttm_bo_vm_fault_reserved(vmf
, prot
, TTM_BO_VM_NUM_PREFAULT
);
60 nouveau_bo_add_io_reserve_lru(bo
);
61 if (ret
== VM_FAULT_RETRY
&& !(vmf
->flags
& FAULT_FLAG_RETRY_NOWAIT
))
65 dma_resv_unlock(bo
->base
.resv
);
69 static const struct vm_operations_struct nouveau_ttm_vm_ops
= {
70 .fault
= nouveau_ttm_fault
,
71 .open
= ttm_bo_vm_open
,
72 .close
= ttm_bo_vm_close
,
73 .access
= ttm_bo_vm_access
77 nouveau_gem_object_del(struct drm_gem_object
*gem
)
79 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
80 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
81 struct device
*dev
= drm
->dev
->dev
;
84 ret
= pm_runtime_get_sync(dev
);
85 if (WARN_ON(ret
< 0 && ret
!= -EACCES
)) {
86 pm_runtime_put_autosuspend(dev
);
90 if (gem
->import_attach
)
91 drm_prime_gem_destroy(gem
, nvbo
->bo
.sg
);
93 ttm_bo_put(&nvbo
->bo
);
95 pm_runtime_mark_last_busy(dev
);
96 pm_runtime_put_autosuspend(dev
);
100 nouveau_gem_object_open(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
102 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
103 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
104 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
105 struct device
*dev
= drm
->dev
->dev
;
106 struct nouveau_uvmm
*uvmm
= nouveau_cli_uvmm(cli
);
107 struct nouveau_vmm
*vmm
= nouveau_cli_vmm(cli
);
108 struct nouveau_vma
*vma
;
111 if (vmm
->vmm
.object
.oclass
< NVIF_CLASS_VMM_NV50
)
114 if (nvbo
->no_share
&& uvmm
&& &uvmm
->resv
!= nvbo
->bo
.base
.resv
)
117 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, NULL
);
121 ret
= pm_runtime_get_sync(dev
);
122 if (ret
< 0 && ret
!= -EACCES
) {
123 pm_runtime_put_autosuspend(dev
);
127 /* only create a VMA on binding */
128 if (!nouveau_cli_uvmm(cli
))
129 ret
= nouveau_vma_new(nvbo
, vmm
, &vma
);
132 pm_runtime_mark_last_busy(dev
);
133 pm_runtime_put_autosuspend(dev
);
135 ttm_bo_unreserve(&nvbo
->bo
);
139 struct nouveau_gem_object_unmap
{
140 struct nouveau_cli_work work
;
141 struct nouveau_vma
*vma
;
145 nouveau_gem_object_delete(struct nouveau_vma
*vma
)
147 nouveau_fence_unref(&vma
->fence
);
148 nouveau_vma_del(&vma
);
152 nouveau_gem_object_delete_work(struct nouveau_cli_work
*w
)
154 struct nouveau_gem_object_unmap
*work
=
155 container_of(w
, typeof(*work
), work
);
156 nouveau_gem_object_delete(work
->vma
);
161 nouveau_gem_object_unmap(struct nouveau_bo
*nvbo
, struct nouveau_vma
*vma
)
163 struct dma_fence
*fence
= vma
->fence
? &vma
->fence
->base
: NULL
;
164 struct nouveau_gem_object_unmap
*work
;
166 list_del_init(&vma
->head
);
169 nouveau_gem_object_delete(vma
);
173 if (!(work
= kmalloc(sizeof(*work
), GFP_KERNEL
))) {
174 WARN_ON(dma_fence_wait_timeout(fence
, false, 2 * HZ
) <= 0);
175 nouveau_gem_object_delete(vma
);
179 work
->work
.func
= nouveau_gem_object_delete_work
;
181 nouveau_cli_work_queue(vma
->vmm
->cli
, fence
, &work
->work
);
185 nouveau_gem_object_close(struct drm_gem_object
*gem
, struct drm_file
*file_priv
)
187 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
188 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
189 struct nouveau_drm
*drm
= nouveau_bdev(nvbo
->bo
.bdev
);
190 struct device
*dev
= drm
->dev
->dev
;
191 struct nouveau_vmm
*vmm
= nouveau_cli_vmm(cli
);
192 struct nouveau_vma
*vma
;
195 if (vmm
->vmm
.object
.oclass
< NVIF_CLASS_VMM_NV50
)
198 if (nouveau_cli_uvmm(cli
))
201 ret
= ttm_bo_reserve(&nvbo
->bo
, false, false, NULL
);
205 vma
= nouveau_vma_find(nvbo
, vmm
);
207 if (--vma
->refs
== 0) {
208 ret
= pm_runtime_get_sync(dev
);
209 if (!WARN_ON(ret
< 0 && ret
!= -EACCES
)) {
210 nouveau_gem_object_unmap(nvbo
, vma
);
211 pm_runtime_mark_last_busy(dev
);
213 pm_runtime_put_autosuspend(dev
);
216 ttm_bo_unreserve(&nvbo
->bo
);
219 const struct drm_gem_object_funcs nouveau_gem_object_funcs
= {
220 .free
= nouveau_gem_object_del
,
221 .open
= nouveau_gem_object_open
,
222 .close
= nouveau_gem_object_close
,
223 .export
= nouveau_gem_prime_export
,
224 .pin
= nouveau_gem_prime_pin
,
225 .unpin
= nouveau_gem_prime_unpin
,
226 .get_sg_table
= nouveau_gem_prime_get_sg_table
,
227 .vmap
= drm_gem_ttm_vmap
,
228 .vunmap
= drm_gem_ttm_vunmap
,
229 .mmap
= drm_gem_ttm_mmap
,
230 .vm_ops
= &nouveau_ttm_vm_ops
,
234 nouveau_gem_new(struct nouveau_cli
*cli
, u64 size
, int align
, uint32_t domain
,
235 uint32_t tile_mode
, uint32_t tile_flags
,
236 struct nouveau_bo
**pnvbo
)
238 struct nouveau_drm
*drm
= cli
->drm
;
239 struct nouveau_uvmm
*uvmm
= nouveau_cli_uvmm(cli
);
240 struct dma_resv
*resv
= NULL
;
241 struct nouveau_bo
*nvbo
;
244 if (domain
& NOUVEAU_GEM_DOMAIN_NO_SHARE
) {
251 if (!(domain
& (NOUVEAU_GEM_DOMAIN_VRAM
| NOUVEAU_GEM_DOMAIN_GART
)))
252 domain
|= NOUVEAU_GEM_DOMAIN_CPU
;
254 nvbo
= nouveau_bo_alloc(cli
, &size
, &align
, domain
, tile_mode
,
257 return PTR_ERR(nvbo
);
259 nvbo
->bo
.base
.funcs
= &nouveau_gem_object_funcs
;
260 nvbo
->no_share
= domain
& NOUVEAU_GEM_DOMAIN_NO_SHARE
;
262 /* Initialize the embedded gem-object. We return a single gem-reference
263 * to the caller, instead of a normal nouveau_bo ttm reference. */
264 ret
= drm_gem_object_init(drm
->dev
, &nvbo
->bo
.base
, size
);
266 drm_gem_object_release(&nvbo
->bo
.base
);
272 dma_resv_lock(resv
, NULL
);
274 ret
= nouveau_bo_init(nvbo
, size
, align
, domain
, NULL
, resv
);
277 dma_resv_unlock(resv
);
282 /* we restrict allowed domains on nv50+ to only the types
283 * that were requested at creation time. not possibly on
284 * earlier chips without busting the ABI.
286 nvbo
->valid_domains
= NOUVEAU_GEM_DOMAIN_VRAM
|
287 NOUVEAU_GEM_DOMAIN_GART
;
288 if (drm
->client
.device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
)
289 nvbo
->valid_domains
&= domain
;
296 nouveau_gem_info(struct drm_file
*file_priv
, struct drm_gem_object
*gem
,
297 struct drm_nouveau_gem_info
*rep
)
299 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
300 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
301 struct nouveau_vmm
*vmm
= nouveau_cli_vmm(cli
);
302 struct nouveau_vma
*vma
;
304 if (is_power_of_2(nvbo
->valid_domains
))
305 rep
->domain
= nvbo
->valid_domains
;
306 else if (nvbo
->bo
.resource
->mem_type
== TTM_PL_TT
)
307 rep
->domain
= NOUVEAU_GEM_DOMAIN_GART
;
309 rep
->domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
310 rep
->offset
= nvbo
->offset
;
311 if (vmm
->vmm
.object
.oclass
>= NVIF_CLASS_VMM_NV50
&&
312 !nouveau_cli_uvmm(cli
)) {
313 vma
= nouveau_vma_find(nvbo
, vmm
);
317 rep
->offset
= vma
->addr
;
321 rep
->size
= nvbo
->bo
.base
.size
;
322 rep
->map_handle
= drm_vma_node_offset_addr(&nvbo
->bo
.base
.vma_node
);
323 rep
->tile_mode
= nvbo
->mode
;
324 rep
->tile_flags
= nvbo
->contig
? 0 : NOUVEAU_GEM_TILE_NONCONTIG
;
325 if (cli
->device
.info
.family
>= NV_DEVICE_INFO_V0_FERMI
)
326 rep
->tile_flags
|= nvbo
->kind
<< 8;
328 if (cli
->device
.info
.family
>= NV_DEVICE_INFO_V0_TESLA
)
329 rep
->tile_flags
|= nvbo
->kind
<< 8 | nvbo
->comp
<< 16;
331 rep
->tile_flags
|= nvbo
->zeta
;
336 nouveau_gem_ioctl_new(struct drm_device
*dev
, void *data
,
337 struct drm_file
*file_priv
)
339 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
340 struct drm_nouveau_gem_new
*req
= data
;
341 struct nouveau_bo
*nvbo
= NULL
;
344 /* If uvmm wasn't initialized until now disable it completely to prevent
345 * userspace from mixing up UAPIs.
347 nouveau_cli_disable_uvmm_noinit(cli
);
349 ret
= nouveau_gem_new(cli
, req
->info
.size
, req
->align
,
350 req
->info
.domain
, req
->info
.tile_mode
,
351 req
->info
.tile_flags
, &nvbo
);
355 ret
= drm_gem_handle_create(file_priv
, &nvbo
->bo
.base
,
358 ret
= nouveau_gem_info(file_priv
, &nvbo
->bo
.base
, &req
->info
);
360 drm_gem_handle_delete(file_priv
, req
->info
.handle
);
363 /* drop reference from allocate - handle holds it now */
364 drm_gem_object_put(&nvbo
->bo
.base
);
369 nouveau_gem_set_domain(struct drm_gem_object
*gem
, uint32_t read_domains
,
370 uint32_t write_domains
, uint32_t valid_domains
)
372 struct nouveau_bo
*nvbo
= nouveau_gem_object(gem
);
373 struct ttm_buffer_object
*bo
= &nvbo
->bo
;
374 uint32_t domains
= valid_domains
& nvbo
->valid_domains
&
375 (write_domains
? write_domains
: read_domains
);
376 uint32_t pref_domains
= 0;
381 valid_domains
&= ~(NOUVEAU_GEM_DOMAIN_VRAM
| NOUVEAU_GEM_DOMAIN_GART
);
383 if ((domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
384 bo
->resource
->mem_type
== TTM_PL_VRAM
)
385 pref_domains
|= NOUVEAU_GEM_DOMAIN_VRAM
;
387 else if ((domains
& NOUVEAU_GEM_DOMAIN_GART
) &&
388 bo
->resource
->mem_type
== TTM_PL_TT
)
389 pref_domains
|= NOUVEAU_GEM_DOMAIN_GART
;
391 else if (domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
392 pref_domains
|= NOUVEAU_GEM_DOMAIN_VRAM
;
395 pref_domains
|= NOUVEAU_GEM_DOMAIN_GART
;
397 nouveau_bo_placement_set(nvbo
, pref_domains
, valid_domains
);
403 struct list_head list
;
404 struct ww_acquire_ctx ticket
;
408 validate_fini_no_ticket(struct validate_op
*op
, struct nouveau_channel
*chan
,
409 struct nouveau_fence
*fence
,
410 struct drm_nouveau_gem_pushbuf_bo
*pbbo
)
412 struct nouveau_bo
*nvbo
;
413 struct drm_nouveau_gem_pushbuf_bo
*b
;
415 while (!list_empty(&op
->list
)) {
416 nvbo
= list_entry(op
->list
.next
, struct nouveau_bo
, entry
);
417 b
= &pbbo
[nvbo
->pbbo_index
];
420 nouveau_bo_fence(nvbo
, fence
, !!b
->write_domains
);
422 if (chan
->vmm
->vmm
.object
.oclass
>= NVIF_CLASS_VMM_NV50
) {
423 struct nouveau_vma
*vma
=
424 (void *)(unsigned long)b
->user_priv
;
425 nouveau_fence_unref(&vma
->fence
);
426 dma_fence_get(&fence
->base
);
431 if (unlikely(nvbo
->validate_mapped
)) {
432 ttm_bo_kunmap(&nvbo
->kmap
);
433 nvbo
->validate_mapped
= false;
436 list_del(&nvbo
->entry
);
437 nvbo
->reserved_by
= NULL
;
438 ttm_bo_unreserve(&nvbo
->bo
);
439 drm_gem_object_put(&nvbo
->bo
.base
);
444 validate_fini(struct validate_op
*op
, struct nouveau_channel
*chan
,
445 struct nouveau_fence
*fence
,
446 struct drm_nouveau_gem_pushbuf_bo
*pbbo
)
448 validate_fini_no_ticket(op
, chan
, fence
, pbbo
);
449 ww_acquire_fini(&op
->ticket
);
453 validate_init(struct nouveau_channel
*chan
, struct drm_file
*file_priv
,
454 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
455 int nr_buffers
, struct validate_op
*op
)
457 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
459 int ret
= -EINVAL
, i
;
460 struct nouveau_bo
*res_bo
= NULL
;
461 LIST_HEAD(gart_list
);
462 LIST_HEAD(vram_list
);
463 LIST_HEAD(both_list
);
465 ww_acquire_init(&op
->ticket
, &reservation_ww_class
);
467 if (++trycnt
> 100000) {
468 NV_PRINTK(err
, cli
, "%s failed and gave up.\n", __func__
);
472 for (i
= 0; i
< nr_buffers
; i
++) {
473 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[i
];
474 struct drm_gem_object
*gem
;
475 struct nouveau_bo
*nvbo
;
477 gem
= drm_gem_object_lookup(file_priv
, b
->handle
);
479 NV_PRINTK(err
, cli
, "Unknown handle 0x%08x\n", b
->handle
);
483 nvbo
= nouveau_gem_object(gem
);
484 if (nvbo
== res_bo
) {
486 drm_gem_object_put(gem
);
490 if (nvbo
->reserved_by
&& nvbo
->reserved_by
== file_priv
) {
491 NV_PRINTK(err
, cli
, "multiple instances of buffer %d on "
492 "validation list\n", b
->handle
);
493 drm_gem_object_put(gem
);
498 ret
= ttm_bo_reserve(&nvbo
->bo
, true, false, &op
->ticket
);
500 list_splice_tail_init(&vram_list
, &op
->list
);
501 list_splice_tail_init(&gart_list
, &op
->list
);
502 list_splice_tail_init(&both_list
, &op
->list
);
503 validate_fini_no_ticket(op
, chan
, NULL
, NULL
);
504 if (unlikely(ret
== -EDEADLK
)) {
505 ret
= ttm_bo_reserve_slowpath(&nvbo
->bo
, true,
511 if (ret
!= -ERESTARTSYS
)
512 NV_PRINTK(err
, cli
, "fail reserve\n");
517 if (chan
->vmm
->vmm
.object
.oclass
>= NVIF_CLASS_VMM_NV50
) {
518 struct nouveau_vmm
*vmm
= chan
->vmm
;
519 struct nouveau_vma
*vma
= nouveau_vma_find(nvbo
, vmm
);
521 NV_PRINTK(err
, cli
, "vma not found!\n");
526 b
->user_priv
= (uint64_t)(unsigned long)vma
;
528 b
->user_priv
= (uint64_t)(unsigned long)nvbo
;
531 nvbo
->reserved_by
= file_priv
;
532 nvbo
->pbbo_index
= i
;
533 if ((b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
534 (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
))
535 list_add_tail(&nvbo
->entry
, &both_list
);
537 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
)
538 list_add_tail(&nvbo
->entry
, &vram_list
);
540 if (b
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
)
541 list_add_tail(&nvbo
->entry
, &gart_list
);
543 NV_PRINTK(err
, cli
, "invalid valid domains: 0x%08x\n",
545 list_add_tail(&nvbo
->entry
, &both_list
);
553 ww_acquire_done(&op
->ticket
);
554 list_splice_tail(&vram_list
, &op
->list
);
555 list_splice_tail(&gart_list
, &op
->list
);
556 list_splice_tail(&both_list
, &op
->list
);
558 validate_fini(op
, chan
, NULL
, NULL
);
564 validate_list(struct nouveau_channel
*chan
, struct nouveau_cli
*cli
,
565 struct list_head
*list
, struct drm_nouveau_gem_pushbuf_bo
*pbbo
)
567 struct nouveau_drm
*drm
= chan
->drm
;
568 struct nouveau_bo
*nvbo
;
571 list_for_each_entry(nvbo
, list
, entry
) {
572 struct drm_nouveau_gem_pushbuf_bo
*b
= &pbbo
[nvbo
->pbbo_index
];
574 ret
= nouveau_gem_set_domain(&nvbo
->bo
.base
, b
->read_domains
,
578 NV_PRINTK(err
, cli
, "fail set_domain\n");
582 ret
= nouveau_bo_validate(nvbo
, true, false);
584 if (ret
!= -ERESTARTSYS
)
585 NV_PRINTK(err
, cli
, "fail ttm_validate\n");
589 ret
= nouveau_fence_sync(nvbo
, chan
, !!b
->write_domains
, true);
591 if (ret
!= -ERESTARTSYS
)
592 NV_PRINTK(err
, cli
, "fail post-validate sync\n");
596 if (drm
->client
.device
.info
.family
< NV_DEVICE_INFO_V0_TESLA
) {
597 if (nvbo
->offset
== b
->presumed
.offset
&&
598 ((nvbo
->bo
.resource
->mem_type
== TTM_PL_VRAM
&&
599 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_VRAM
) ||
600 (nvbo
->bo
.resource
->mem_type
== TTM_PL_TT
&&
601 b
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_GART
)))
604 if (nvbo
->bo
.resource
->mem_type
== TTM_PL_TT
)
605 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_GART
;
607 b
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
608 b
->presumed
.offset
= nvbo
->offset
;
609 b
->presumed
.valid
= 0;
618 nouveau_gem_pushbuf_validate(struct nouveau_channel
*chan
,
619 struct drm_file
*file_priv
,
620 struct drm_nouveau_gem_pushbuf_bo
*pbbo
,
622 struct validate_op
*op
, bool *apply_relocs
)
624 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
627 INIT_LIST_HEAD(&op
->list
);
632 ret
= validate_init(chan
, file_priv
, pbbo
, nr_buffers
, op
);
634 if (ret
!= -ERESTARTSYS
)
635 NV_PRINTK(err
, cli
, "validate_init\n");
639 ret
= validate_list(chan
, cli
, &op
->list
, pbbo
);
640 if (unlikely(ret
< 0)) {
641 if (ret
!= -ERESTARTSYS
)
642 NV_PRINTK(err
, cli
, "validating bo list\n");
643 validate_fini(op
, chan
, NULL
, NULL
);
645 } else if (ret
> 0) {
646 *apply_relocs
= true;
653 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli
*cli
,
654 struct drm_nouveau_gem_pushbuf
*req
,
655 struct drm_nouveau_gem_pushbuf_reloc
*reloc
,
656 struct drm_nouveau_gem_pushbuf_bo
*bo
)
661 for (i
= 0; i
< req
->nr_relocs
; i
++) {
662 struct drm_nouveau_gem_pushbuf_reloc
*r
= &reloc
[i
];
663 struct drm_nouveau_gem_pushbuf_bo
*b
;
664 struct nouveau_bo
*nvbo
;
668 if (unlikely(r
->bo_index
>= req
->nr_buffers
)) {
669 NV_PRINTK(err
, cli
, "reloc bo index invalid\n");
674 b
= &bo
[r
->bo_index
];
675 if (b
->presumed
.valid
)
678 if (unlikely(r
->reloc_bo_index
>= req
->nr_buffers
)) {
679 NV_PRINTK(err
, cli
, "reloc container bo index invalid\n");
683 nvbo
= (void *)(unsigned long)bo
[r
->reloc_bo_index
].user_priv
;
685 if (unlikely(r
->reloc_bo_offset
+ 4 >
686 nvbo
->bo
.base
.size
)) {
687 NV_PRINTK(err
, cli
, "reloc outside of bo\n");
692 if (!nvbo
->kmap
.virtual) {
693 ret
= ttm_bo_kmap(&nvbo
->bo
, 0, PFN_UP(nvbo
->bo
.base
.size
),
696 NV_PRINTK(err
, cli
, "failed kmap for reloc\n");
699 nvbo
->validate_mapped
= true;
702 if (r
->flags
& NOUVEAU_GEM_RELOC_LOW
)
703 data
= b
->presumed
.offset
+ r
->data
;
705 if (r
->flags
& NOUVEAU_GEM_RELOC_HIGH
)
706 data
= (b
->presumed
.offset
+ r
->data
) >> 32;
710 if (r
->flags
& NOUVEAU_GEM_RELOC_OR
) {
711 if (b
->presumed
.domain
== NOUVEAU_GEM_DOMAIN_GART
)
717 lret
= dma_resv_wait_timeout(nvbo
->bo
.base
.resv
,
718 DMA_RESV_USAGE_BOOKKEEP
,
728 NV_PRINTK(err
, cli
, "reloc wait_idle failed: %d\n",
733 nouveau_bo_wr32(nvbo
, r
->reloc_bo_offset
>> 2, data
);
740 nouveau_gem_ioctl_pushbuf(struct drm_device
*dev
, void *data
,
741 struct drm_file
*file_priv
)
743 struct nouveau_abi16
*abi16
= nouveau_abi16_get(file_priv
);
744 struct nouveau_cli
*cli
= nouveau_cli(file_priv
);
745 struct nouveau_abi16_chan
*temp
;
746 struct nouveau_drm
*drm
= nouveau_drm(dev
);
747 struct drm_nouveau_gem_pushbuf
*req
= data
;
748 struct drm_nouveau_gem_pushbuf_push
*push
;
749 struct drm_nouveau_gem_pushbuf_reloc
*reloc
= NULL
;
750 struct drm_nouveau_gem_pushbuf_bo
*bo
;
751 struct nouveau_channel
*chan
= NULL
;
752 struct validate_op op
;
753 struct nouveau_fence
*fence
= NULL
;
755 bool do_reloc
= false, sync
= false;
757 if (unlikely(!abi16
))
760 if (unlikely(nouveau_cli_uvmm(cli
)))
763 list_for_each_entry(temp
, &abi16
->channels
, head
) {
764 if (temp
->chan
->chid
== req
->channel
) {
771 return nouveau_abi16_put(abi16
, -ENOENT
);
772 if (unlikely(atomic_read(&chan
->killed
)))
773 return nouveau_abi16_put(abi16
, -ENODEV
);
775 sync
= req
->vram_available
& NOUVEAU_GEM_PUSHBUF_SYNC
;
777 req
->vram_available
= drm
->gem
.vram_available
;
778 req
->gart_available
= drm
->gem
.gart_available
;
779 if (unlikely(req
->nr_push
== 0))
782 if (unlikely(req
->nr_push
> NOUVEAU_GEM_MAX_PUSH
)) {
783 NV_PRINTK(err
, cli
, "pushbuf push count exceeds limit: %d max %d\n",
784 req
->nr_push
, NOUVEAU_GEM_MAX_PUSH
);
785 return nouveau_abi16_put(abi16
, -EINVAL
);
788 if (unlikely(req
->nr_buffers
> NOUVEAU_GEM_MAX_BUFFERS
)) {
789 NV_PRINTK(err
, cli
, "pushbuf bo count exceeds limit: %d max %d\n",
790 req
->nr_buffers
, NOUVEAU_GEM_MAX_BUFFERS
);
791 return nouveau_abi16_put(abi16
, -EINVAL
);
794 if (unlikely(req
->nr_relocs
> NOUVEAU_GEM_MAX_RELOCS
)) {
795 NV_PRINTK(err
, cli
, "pushbuf reloc count exceeds limit: %d max %d\n",
796 req
->nr_relocs
, NOUVEAU_GEM_MAX_RELOCS
);
797 return nouveau_abi16_put(abi16
, -EINVAL
);
800 push
= u_memcpya(req
->push
, req
->nr_push
, sizeof(*push
));
802 return nouveau_abi16_put(abi16
, PTR_ERR(push
));
804 bo
= u_memcpya(req
->buffers
, req
->nr_buffers
, sizeof(*bo
));
807 return nouveau_abi16_put(abi16
, PTR_ERR(bo
));
810 /* Ensure all push buffers are on validate list */
811 for (i
= 0; i
< req
->nr_push
; i
++) {
812 if (push
[i
].bo_index
>= req
->nr_buffers
) {
813 NV_PRINTK(err
, cli
, "push %d buffer not in list\n", i
);
819 /* Validate buffer list */
821 ret
= nouveau_gem_pushbuf_validate(chan
, file_priv
, bo
,
822 req
->nr_buffers
, &op
, &do_reloc
);
824 if (ret
!= -ERESTARTSYS
)
825 NV_PRINTK(err
, cli
, "validate: %d\n", ret
);
829 /* Apply any relocations that are required */
832 validate_fini(&op
, chan
, NULL
, bo
);
833 reloc
= u_memcpya(req
->relocs
, req
->nr_relocs
, sizeof(*reloc
));
835 ret
= PTR_ERR(reloc
);
842 ret
= nouveau_gem_pushbuf_reloc_apply(cli
, req
, reloc
, bo
);
844 NV_PRINTK(err
, cli
, "reloc apply: %d\n", ret
);
849 if (chan
->dma
.ib_max
) {
850 ret
= nouveau_dma_wait(chan
, req
->nr_push
+ 1, 16);
852 NV_PRINTK(err
, cli
, "nv50cal_space: %d\n", ret
);
856 for (i
= 0; i
< req
->nr_push
; i
++) {
857 struct nouveau_vma
*vma
= (void *)(unsigned long)
858 bo
[push
[i
].bo_index
].user_priv
;
859 u64 addr
= vma
->addr
+ push
[i
].offset
;
860 u32 length
= push
[i
].length
& ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH
;
861 bool no_prefetch
= push
[i
].length
& NOUVEAU_GEM_PUSHBUF_NO_PREFETCH
;
863 nv50_dma_push(chan
, addr
, length
, no_prefetch
);
866 if (drm
->client
.device
.info
.chipset
>= 0x25) {
867 ret
= PUSH_WAIT(chan
->chan
.push
, req
->nr_push
* 2);
869 NV_PRINTK(err
, cli
, "cal_space: %d\n", ret
);
873 for (i
= 0; i
< req
->nr_push
; i
++) {
874 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
875 bo
[push
[i
].bo_index
].user_priv
;
877 PUSH_CALL(chan
->chan
.push
, nvbo
->offset
+ push
[i
].offset
);
878 PUSH_DATA(chan
->chan
.push
, 0);
881 ret
= PUSH_WAIT(chan
->chan
.push
, req
->nr_push
* (2 + NOUVEAU_DMA_SKIPS
));
883 NV_PRINTK(err
, cli
, "jmp_space: %d\n", ret
);
887 for (i
= 0; i
< req
->nr_push
; i
++) {
888 struct nouveau_bo
*nvbo
= (void *)(unsigned long)
889 bo
[push
[i
].bo_index
].user_priv
;
892 cmd
= chan
->push
.addr
+ ((chan
->dma
.cur
+ 2) << 2);
894 if (unlikely(cmd
!= req
->suffix0
)) {
895 if (!nvbo
->kmap
.virtual) {
896 ret
= ttm_bo_kmap(&nvbo
->bo
, 0,
897 PFN_UP(nvbo
->bo
.base
.size
),
903 nvbo
->validate_mapped
= true;
906 nouveau_bo_wr32(nvbo
, (push
[i
].offset
+
907 push
[i
].length
- 8) / 4, cmd
);
910 PUSH_JUMP(chan
->chan
.push
, nvbo
->offset
+ push
[i
].offset
);
911 PUSH_DATA(chan
->chan
.push
, 0);
912 for (j
= 0; j
< NOUVEAU_DMA_SKIPS
; j
++)
913 PUSH_DATA(chan
->chan
.push
, 0);
917 ret
= nouveau_fence_new(&fence
, chan
);
919 NV_PRINTK(err
, cli
, "error fencing pushbuf: %d\n", ret
);
925 if (!(ret
= nouveau_fence_wait(fence
, false, false))) {
926 if ((ret
= dma_fence_get_status(&fence
->base
)) == 1)
932 validate_fini(&op
, chan
, fence
, bo
);
933 nouveau_fence_unref(&fence
);
936 struct drm_nouveau_gem_pushbuf_bo __user
*upbbo
=
937 u64_to_user_ptr(req
->buffers
);
939 for (i
= 0; i
< req
->nr_buffers
; i
++) {
940 if (bo
[i
].presumed
.valid
)
943 if (copy_to_user(&upbbo
[i
].presumed
, &bo
[i
].presumed
,
944 sizeof(bo
[i
].presumed
))) {
957 if (chan
->dma
.ib_max
) {
958 req
->suffix0
= 0x00000000;
959 req
->suffix1
= 0x00000000;
961 if (drm
->client
.device
.info
.chipset
>= 0x25) {
962 req
->suffix0
= 0x00020000;
963 req
->suffix1
= 0x00000000;
965 req
->suffix0
= 0x20000000 |
966 (chan
->push
.addr
+ ((chan
->dma
.cur
+ 2) << 2));
967 req
->suffix1
= 0x00000000;
970 return nouveau_abi16_put(abi16
, ret
);
974 nouveau_gem_ioctl_cpu_prep(struct drm_device
*dev
, void *data
,
975 struct drm_file
*file_priv
)
977 struct drm_nouveau_gem_cpu_prep
*req
= data
;
978 struct drm_gem_object
*gem
;
979 struct nouveau_bo
*nvbo
;
980 bool no_wait
= !!(req
->flags
& NOUVEAU_GEM_CPU_PREP_NOWAIT
);
981 bool write
= !!(req
->flags
& NOUVEAU_GEM_CPU_PREP_WRITE
);
985 gem
= drm_gem_object_lookup(file_priv
, req
->handle
);
988 nvbo
= nouveau_gem_object(gem
);
990 lret
= dma_resv_wait_timeout(nvbo
->bo
.base
.resv
,
991 dma_resv_usage_rw(write
), true,
992 no_wait
? 0 : 30 * HZ
);
1000 nouveau_bo_sync_for_cpu(nvbo
);
1001 drm_gem_object_put(gem
);
1007 nouveau_gem_ioctl_cpu_fini(struct drm_device
*dev
, void *data
,
1008 struct drm_file
*file_priv
)
1010 struct drm_nouveau_gem_cpu_fini
*req
= data
;
1011 struct drm_gem_object
*gem
;
1012 struct nouveau_bo
*nvbo
;
1014 gem
= drm_gem_object_lookup(file_priv
, req
->handle
);
1017 nvbo
= nouveau_gem_object(gem
);
1019 nouveau_bo_sync_for_device(nvbo
);
1020 drm_gem_object_put(gem
);
1025 nouveau_gem_ioctl_info(struct drm_device
*dev
, void *data
,
1026 struct drm_file
*file_priv
)
1028 struct drm_nouveau_gem_info
*req
= data
;
1029 struct drm_gem_object
*gem
;
1032 gem
= drm_gem_object_lookup(file_priv
, req
->handle
);
1036 ret
= nouveau_gem_info(file_priv
, gem
, req
);
1037 drm_gem_object_put(gem
);