2 * Copyright (C) 2015 Red Hat, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <drm/virtgpu_drm.h>
30 #include <drm/ttm/ttm_execbuf_util.h>
31 #include <linux/sync_file.h>
33 #include "virtgpu_drv.h"
35 static void convert_to_hw_box(struct virtio_gpu_box
*dst
,
36 const struct drm_virtgpu_3d_box
*src
)
38 dst
->x
= cpu_to_le32(src
->x
);
39 dst
->y
= cpu_to_le32(src
->y
);
40 dst
->z
= cpu_to_le32(src
->z
);
41 dst
->w
= cpu_to_le32(src
->w
);
42 dst
->h
= cpu_to_le32(src
->h
);
43 dst
->d
= cpu_to_le32(src
->d
);
46 static int virtio_gpu_map_ioctl(struct drm_device
*dev
, void *data
,
47 struct drm_file
*file_priv
)
49 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
50 struct drm_virtgpu_map
*virtio_gpu_map
= data
;
52 return virtio_gpu_mode_dumb_mmap(file_priv
, vgdev
->ddev
,
53 virtio_gpu_map
->handle
,
54 &virtio_gpu_map
->offset
);
57 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx
*ticket
,
58 struct list_head
*head
)
60 struct ttm_operation_ctx ctx
= { false, false };
61 struct ttm_validate_buffer
*buf
;
62 struct ttm_buffer_object
*bo
;
63 struct virtio_gpu_object
*qobj
;
66 ret
= ttm_eu_reserve_buffers(ticket
, head
, true, NULL
);
70 list_for_each_entry(buf
, head
, head
) {
72 qobj
= container_of(bo
, struct virtio_gpu_object
, tbo
);
73 ret
= ttm_bo_validate(bo
, &qobj
->placement
, &ctx
);
75 ttm_eu_backoff_reservation(ticket
, head
);
82 static void virtio_gpu_unref_list(struct list_head
*head
)
84 struct ttm_validate_buffer
*buf
;
85 struct ttm_buffer_object
*bo
;
86 struct virtio_gpu_object
*qobj
;
88 list_for_each_entry(buf
, head
, head
) {
90 qobj
= container_of(bo
, struct virtio_gpu_object
, tbo
);
92 drm_gem_object_put_unlocked(&qobj
->gem_base
);
97 * Usage of execbuffer:
98 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
99 * However, the command as passed from user space must *not* contain the initial
100 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
102 static int virtio_gpu_execbuffer_ioctl(struct drm_device
*dev
, void *data
,
103 struct drm_file
*drm_file
)
105 struct drm_virtgpu_execbuffer
*exbuf
= data
;
106 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
107 struct virtio_gpu_fpriv
*vfpriv
= drm_file
->driver_priv
;
108 struct drm_gem_object
*gobj
;
109 struct virtio_gpu_fence
*out_fence
;
110 struct virtio_gpu_object
*qobj
;
112 uint32_t *bo_handles
= NULL
;
113 void __user
*user_bo_handles
= NULL
;
114 struct list_head validate_list
;
115 struct ttm_validate_buffer
*buflist
= NULL
;
117 struct ww_acquire_ctx ticket
;
118 struct sync_file
*sync_file
;
119 int in_fence_fd
= exbuf
->fence_fd
;
120 int out_fence_fd
= -1;
123 if (vgdev
->has_virgl_3d
== false)
126 if ((exbuf
->flags
& ~VIRTGPU_EXECBUF_FLAGS
))
129 exbuf
->fence_fd
= -1;
131 if (exbuf
->flags
& VIRTGPU_EXECBUF_FENCE_FD_IN
) {
132 struct dma_fence
*in_fence
;
134 in_fence
= sync_file_get_fence(in_fence_fd
);
140 * Wait if the fence is from a foreign context, or if the fence
141 * array contains any fence from a foreign context.
144 if (!dma_fence_match_context(in_fence
, vgdev
->fence_drv
.context
))
145 ret
= dma_fence_wait(in_fence
, true);
147 dma_fence_put(in_fence
);
152 if (exbuf
->flags
& VIRTGPU_EXECBUF_FENCE_FD_OUT
) {
153 out_fence_fd
= get_unused_fd_flags(O_CLOEXEC
);
154 if (out_fence_fd
< 0)
158 INIT_LIST_HEAD(&validate_list
);
159 if (exbuf
->num_bo_handles
) {
161 bo_handles
= kvmalloc_array(exbuf
->num_bo_handles
,
162 sizeof(uint32_t), GFP_KERNEL
);
163 buflist
= kvmalloc_array(exbuf
->num_bo_handles
,
164 sizeof(struct ttm_validate_buffer
),
165 GFP_KERNEL
| __GFP_ZERO
);
166 if (!bo_handles
|| !buflist
) {
171 user_bo_handles
= (void __user
*)(uintptr_t)exbuf
->bo_handles
;
172 if (copy_from_user(bo_handles
, user_bo_handles
,
173 exbuf
->num_bo_handles
* sizeof(uint32_t))) {
178 for (i
= 0; i
< exbuf
->num_bo_handles
; i
++) {
179 gobj
= drm_gem_object_lookup(drm_file
, bo_handles
[i
]);
185 qobj
= gem_to_virtio_gpu_obj(gobj
);
186 buflist
[i
].bo
= &qobj
->tbo
;
188 list_add(&buflist
[i
].head
, &validate_list
);
194 ret
= virtio_gpu_object_list_validate(&ticket
, &validate_list
);
198 buf
= memdup_user((void __user
*)(uintptr_t)exbuf
->command
,
205 out_fence
= virtio_gpu_fence_alloc(vgdev
);
211 if (out_fence_fd
>= 0) {
212 sync_file
= sync_file_create(&out_fence
->f
);
214 dma_fence_put(&out_fence
->f
);
219 exbuf
->fence_fd
= out_fence_fd
;
220 fd_install(out_fence_fd
, sync_file
->file
);
223 virtio_gpu_cmd_submit(vgdev
, buf
, exbuf
->size
,
224 vfpriv
->ctx_id
, out_fence
);
226 ttm_eu_fence_buffer_objects(&ticket
, &validate_list
, &out_fence
->f
);
228 /* fence the command bo */
229 virtio_gpu_unref_list(&validate_list
);
236 ttm_eu_backoff_reservation(&ticket
, &validate_list
);
238 virtio_gpu_unref_list(&validate_list
);
243 if (out_fence_fd
>= 0)
244 put_unused_fd(out_fence_fd
);
249 static int virtio_gpu_getparam_ioctl(struct drm_device
*dev
, void *data
,
250 struct drm_file
*file_priv
)
252 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
253 struct drm_virtgpu_getparam
*param
= data
;
256 switch (param
->param
) {
257 case VIRTGPU_PARAM_3D_FEATURES
:
258 value
= vgdev
->has_virgl_3d
== true ? 1 : 0;
260 case VIRTGPU_PARAM_CAPSET_QUERY_FIX
:
266 if (copy_to_user((void __user
*)(unsigned long)param
->value
,
267 &value
, sizeof(int))) {
273 static int virtio_gpu_resource_create_ioctl(struct drm_device
*dev
, void *data
,
274 struct drm_file
*file_priv
)
276 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
277 struct drm_virtgpu_resource_create
*rc
= data
;
279 struct virtio_gpu_object
*qobj
;
280 struct drm_gem_object
*obj
;
283 struct list_head validate_list
;
284 struct ttm_validate_buffer mainbuf
;
285 struct virtio_gpu_fence
*fence
= NULL
;
286 struct ww_acquire_ctx ticket
;
287 struct virtio_gpu_resource_create_3d rc_3d
;
289 if (vgdev
->has_virgl_3d
== false) {
292 if (rc
->nr_samples
> 1)
294 if (rc
->last_level
> 1)
298 if (rc
->array_size
> 1)
302 INIT_LIST_HEAD(&validate_list
);
303 memset(&mainbuf
, 0, sizeof(struct ttm_validate_buffer
));
307 /* allocate a single page size object */
311 qobj
= virtio_gpu_alloc_object(dev
, size
, false, false);
313 return PTR_ERR(qobj
);
314 obj
= &qobj
->gem_base
;
316 if (!vgdev
->has_virgl_3d
) {
317 virtio_gpu_cmd_create_resource(vgdev
, qobj
, rc
->format
,
318 rc
->width
, rc
->height
);
320 ret
= virtio_gpu_object_attach(vgdev
, qobj
, NULL
);
322 /* use a gem reference since unref list undoes them */
323 drm_gem_object_get(&qobj
->gem_base
);
324 mainbuf
.bo
= &qobj
->tbo
;
325 list_add(&mainbuf
.head
, &validate_list
);
327 ret
= virtio_gpu_object_list_validate(&ticket
, &validate_list
);
329 DRM_DEBUG("failed to validate\n");
333 rc_3d
.resource_id
= cpu_to_le32(qobj
->hw_res_handle
);
334 rc_3d
.target
= cpu_to_le32(rc
->target
);
335 rc_3d
.format
= cpu_to_le32(rc
->format
);
336 rc_3d
.bind
= cpu_to_le32(rc
->bind
);
337 rc_3d
.width
= cpu_to_le32(rc
->width
);
338 rc_3d
.height
= cpu_to_le32(rc
->height
);
339 rc_3d
.depth
= cpu_to_le32(rc
->depth
);
340 rc_3d
.array_size
= cpu_to_le32(rc
->array_size
);
341 rc_3d
.last_level
= cpu_to_le32(rc
->last_level
);
342 rc_3d
.nr_samples
= cpu_to_le32(rc
->nr_samples
);
343 rc_3d
.flags
= cpu_to_le32(rc
->flags
);
345 fence
= virtio_gpu_fence_alloc(vgdev
);
351 virtio_gpu_cmd_resource_create_3d(vgdev
, qobj
, &rc_3d
);
352 ret
= virtio_gpu_object_attach(vgdev
, qobj
, fence
);
354 virtio_gpu_fence_cleanup(fence
);
357 ttm_eu_fence_buffer_objects(&ticket
, &validate_list
, &fence
->f
);
360 ret
= drm_gem_handle_create(file_priv
, obj
, &handle
);
363 drm_gem_object_release(obj
);
364 if (vgdev
->has_virgl_3d
) {
365 virtio_gpu_unref_list(&validate_list
);
366 dma_fence_put(&fence
->f
);
370 drm_gem_object_put_unlocked(obj
);
372 rc
->res_handle
= qobj
->hw_res_handle
; /* similiar to a VM address */
373 rc
->bo_handle
= handle
;
375 if (vgdev
->has_virgl_3d
) {
376 virtio_gpu_unref_list(&validate_list
);
377 dma_fence_put(&fence
->f
);
381 ttm_eu_backoff_reservation(&ticket
, &validate_list
);
383 if (vgdev
->has_virgl_3d
) {
384 virtio_gpu_unref_list(&validate_list
);
385 dma_fence_put(&fence
->f
);
388 // drm_gem_object_handle_unreference_unlocked(obj);
392 static int virtio_gpu_resource_info_ioctl(struct drm_device
*dev
, void *data
,
393 struct drm_file
*file_priv
)
395 struct drm_virtgpu_resource_info
*ri
= data
;
396 struct drm_gem_object
*gobj
= NULL
;
397 struct virtio_gpu_object
*qobj
= NULL
;
399 gobj
= drm_gem_object_lookup(file_priv
, ri
->bo_handle
);
403 qobj
= gem_to_virtio_gpu_obj(gobj
);
405 ri
->size
= qobj
->gem_base
.size
;
406 ri
->res_handle
= qobj
->hw_res_handle
;
407 drm_gem_object_put_unlocked(gobj
);
411 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device
*dev
,
413 struct drm_file
*file
)
415 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
416 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
417 struct drm_virtgpu_3d_transfer_from_host
*args
= data
;
418 struct ttm_operation_ctx ctx
= { true, false };
419 struct drm_gem_object
*gobj
= NULL
;
420 struct virtio_gpu_object
*qobj
= NULL
;
421 struct virtio_gpu_fence
*fence
;
423 u32 offset
= args
->offset
;
424 struct virtio_gpu_box box
;
426 if (vgdev
->has_virgl_3d
== false)
429 gobj
= drm_gem_object_lookup(file
, args
->bo_handle
);
433 qobj
= gem_to_virtio_gpu_obj(gobj
);
435 ret
= virtio_gpu_object_reserve(qobj
, false);
439 ret
= ttm_bo_validate(&qobj
->tbo
, &qobj
->placement
, &ctx
);
443 convert_to_hw_box(&box
, &args
->box
);
445 fence
= virtio_gpu_fence_alloc(vgdev
);
450 virtio_gpu_cmd_transfer_from_host_3d
451 (vgdev
, qobj
->hw_res_handle
,
452 vfpriv
->ctx_id
, offset
, args
->level
,
454 reservation_object_add_excl_fence(qobj
->tbo
.resv
,
457 dma_fence_put(&fence
->f
);
459 virtio_gpu_object_unreserve(qobj
);
461 drm_gem_object_put_unlocked(gobj
);
465 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device
*dev
, void *data
,
466 struct drm_file
*file
)
468 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
469 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
470 struct drm_virtgpu_3d_transfer_to_host
*args
= data
;
471 struct ttm_operation_ctx ctx
= { true, false };
472 struct drm_gem_object
*gobj
= NULL
;
473 struct virtio_gpu_object
*qobj
= NULL
;
474 struct virtio_gpu_fence
*fence
;
475 struct virtio_gpu_box box
;
477 u32 offset
= args
->offset
;
479 gobj
= drm_gem_object_lookup(file
, args
->bo_handle
);
483 qobj
= gem_to_virtio_gpu_obj(gobj
);
485 ret
= virtio_gpu_object_reserve(qobj
, false);
489 ret
= ttm_bo_validate(&qobj
->tbo
, &qobj
->placement
, &ctx
);
493 convert_to_hw_box(&box
, &args
->box
);
494 if (!vgdev
->has_virgl_3d
) {
495 virtio_gpu_cmd_transfer_to_host_2d
496 (vgdev
, qobj
, offset
,
497 box
.w
, box
.h
, box
.x
, box
.y
, NULL
);
499 fence
= virtio_gpu_fence_alloc(vgdev
);
504 virtio_gpu_cmd_transfer_to_host_3d
506 vfpriv
? vfpriv
->ctx_id
: 0, offset
,
507 args
->level
, &box
, fence
);
508 reservation_object_add_excl_fence(qobj
->tbo
.resv
,
510 dma_fence_put(&fence
->f
);
514 virtio_gpu_object_unreserve(qobj
);
516 drm_gem_object_put_unlocked(gobj
);
520 static int virtio_gpu_wait_ioctl(struct drm_device
*dev
, void *data
,
521 struct drm_file
*file
)
523 struct drm_virtgpu_3d_wait
*args
= data
;
524 struct drm_gem_object
*gobj
= NULL
;
525 struct virtio_gpu_object
*qobj
= NULL
;
529 gobj
= drm_gem_object_lookup(file
, args
->handle
);
533 qobj
= gem_to_virtio_gpu_obj(gobj
);
535 if (args
->flags
& VIRTGPU_WAIT_NOWAIT
)
537 ret
= virtio_gpu_object_wait(qobj
, nowait
);
539 drm_gem_object_put_unlocked(gobj
);
543 static int virtio_gpu_get_caps_ioctl(struct drm_device
*dev
,
544 void *data
, struct drm_file
*file
)
546 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
547 struct drm_virtgpu_get_caps
*args
= data
;
548 unsigned size
, host_caps_size
;
550 int found_valid
= -1;
552 struct virtio_gpu_drv_cap_cache
*cache_ent
;
555 if (vgdev
->num_capsets
== 0)
558 /* don't allow userspace to pass 0 */
562 spin_lock(&vgdev
->display_info_lock
);
563 for (i
= 0; i
< vgdev
->num_capsets
; i
++) {
564 if (vgdev
->capsets
[i
].id
== args
->cap_set_id
) {
565 if (vgdev
->capsets
[i
].max_version
>= args
->cap_set_ver
) {
572 if (found_valid
== -1) {
573 spin_unlock(&vgdev
->display_info_lock
);
577 host_caps_size
= vgdev
->capsets
[found_valid
].max_size
;
578 /* only copy to user the minimum of the host caps size or the guest caps size */
579 size
= min(args
->size
, host_caps_size
);
581 list_for_each_entry(cache_ent
, &vgdev
->cap_cache
, head
) {
582 if (cache_ent
->id
== args
->cap_set_id
&&
583 cache_ent
->version
== args
->cap_set_ver
) {
584 ptr
= cache_ent
->caps_cache
;
585 spin_unlock(&vgdev
->display_info_lock
);
589 spin_unlock(&vgdev
->display_info_lock
);
591 /* not in cache - need to talk to hw */
592 virtio_gpu_cmd_get_capset(vgdev
, found_valid
, args
->cap_set_ver
,
595 ret
= wait_event_timeout(vgdev
->resp_wq
,
596 atomic_read(&cache_ent
->is_valid
), 5 * HZ
);
600 ptr
= cache_ent
->caps_cache
;
603 if (copy_to_user((void __user
*)(unsigned long)args
->addr
, ptr
, size
))
609 struct drm_ioctl_desc virtio_gpu_ioctls
[DRM_VIRTIO_NUM_IOCTLS
] = {
610 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP
, virtio_gpu_map_ioctl
,
611 DRM_AUTH
| DRM_UNLOCKED
| DRM_RENDER_ALLOW
),
613 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER
, virtio_gpu_execbuffer_ioctl
,
614 DRM_AUTH
| DRM_UNLOCKED
| DRM_RENDER_ALLOW
),
616 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM
, virtio_gpu_getparam_ioctl
,
617 DRM_AUTH
| DRM_UNLOCKED
| DRM_RENDER_ALLOW
),
619 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE
,
620 virtio_gpu_resource_create_ioctl
,
621 DRM_AUTH
| DRM_UNLOCKED
| DRM_RENDER_ALLOW
),
623 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO
, virtio_gpu_resource_info_ioctl
,
624 DRM_AUTH
| DRM_UNLOCKED
| DRM_RENDER_ALLOW
),
626 /* make transfer async to the main ring? - no sure, can we
627 * thread these in the underlying GL
629 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST
,
630 virtio_gpu_transfer_from_host_ioctl
,
631 DRM_AUTH
| DRM_UNLOCKED
| DRM_RENDER_ALLOW
),
632 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST
,
633 virtio_gpu_transfer_to_host_ioctl
,
634 DRM_AUTH
| DRM_UNLOCKED
| DRM_RENDER_ALLOW
),
636 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT
, virtio_gpu_wait_ioctl
,
637 DRM_AUTH
| DRM_UNLOCKED
| DRM_RENDER_ALLOW
),
639 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS
, virtio_gpu_get_caps_ioctl
,
640 DRM_AUTH
| DRM_UNLOCKED
| DRM_RENDER_ALLOW
),