2 * Copyright (C) 2015 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <drm/drm_file.h>
27 #include <drm/drm_fourcc.h>
29 #include "virtgpu_drv.h"
31 int virtio_gpu_gem_create(struct drm_file
*file
,
32 struct drm_device
*dev
,
33 struct virtio_gpu_object_params
*params
,
34 struct drm_gem_object
**obj_p
,
37 struct virtio_gpu_device
*vgdev
= dev
->dev_private
;
38 struct virtio_gpu_object
*obj
;
42 ret
= virtio_gpu_object_create(vgdev
, params
, &obj
, NULL
);
46 ret
= drm_gem_handle_create(file
, &obj
->base
.base
, &handle
);
48 drm_gem_object_release(&obj
->base
.base
);
52 *obj_p
= &obj
->base
.base
;
54 /* drop reference from allocate - handle holds it now */
55 drm_gem_object_put_unlocked(&obj
->base
.base
);
61 int virtio_gpu_mode_dumb_create(struct drm_file
*file_priv
,
62 struct drm_device
*dev
,
63 struct drm_mode_create_dumb
*args
)
65 struct drm_gem_object
*gobj
;
66 struct virtio_gpu_object_params params
= { 0 };
73 pitch
= args
->width
* 4;
74 args
->size
= pitch
* args
->height
;
75 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
77 params
.format
= virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888
);
78 params
.width
= args
->width
;
79 params
.height
= args
->height
;
80 params
.size
= args
->size
;
82 ret
= virtio_gpu_gem_create(file_priv
, dev
, ¶ms
, &gobj
,
94 int virtio_gpu_mode_dumb_mmap(struct drm_file
*file_priv
,
95 struct drm_device
*dev
,
96 uint32_t handle
, uint64_t *offset_p
)
98 struct drm_gem_object
*gobj
;
101 gobj
= drm_gem_object_lookup(file_priv
, handle
);
104 *offset_p
= drm_vma_node_offset_addr(&gobj
->vma_node
);
105 drm_gem_object_put_unlocked(gobj
);
109 int virtio_gpu_gem_object_open(struct drm_gem_object
*obj
,
110 struct drm_file
*file
)
112 struct virtio_gpu_device
*vgdev
= obj
->dev
->dev_private
;
113 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
114 struct virtio_gpu_object_array
*objs
;
116 if (!vgdev
->has_virgl_3d
)
119 objs
= virtio_gpu_array_alloc(1);
122 virtio_gpu_array_add_obj(objs
, obj
);
124 virtio_gpu_cmd_context_attach_resource(vgdev
, vfpriv
->ctx_id
,
126 virtio_gpu_notify(vgdev
);
130 void virtio_gpu_gem_object_close(struct drm_gem_object
*obj
,
131 struct drm_file
*file
)
133 struct virtio_gpu_device
*vgdev
= obj
->dev
->dev_private
;
134 struct virtio_gpu_fpriv
*vfpriv
= file
->driver_priv
;
135 struct virtio_gpu_object_array
*objs
;
137 if (!vgdev
->has_virgl_3d
)
140 objs
= virtio_gpu_array_alloc(1);
143 virtio_gpu_array_add_obj(objs
, obj
);
145 virtio_gpu_cmd_context_detach_resource(vgdev
, vfpriv
->ctx_id
,
147 virtio_gpu_notify(vgdev
);
150 struct virtio_gpu_object_array
*virtio_gpu_array_alloc(u32 nents
)
152 struct virtio_gpu_object_array
*objs
;
153 size_t size
= sizeof(*objs
) + sizeof(objs
->objs
[0]) * nents
;
155 objs
= kmalloc(size
, GFP_KERNEL
);
164 static void virtio_gpu_array_free(struct virtio_gpu_object_array
*objs
)
169 struct virtio_gpu_object_array
*
170 virtio_gpu_array_from_handles(struct drm_file
*drm_file
, u32
*handles
, u32 nents
)
172 struct virtio_gpu_object_array
*objs
;
175 objs
= virtio_gpu_array_alloc(nents
);
179 for (i
= 0; i
< nents
; i
++) {
180 objs
->objs
[i
] = drm_gem_object_lookup(drm_file
, handles
[i
]);
181 if (!objs
->objs
[i
]) {
183 virtio_gpu_array_put_free(objs
);
191 void virtio_gpu_array_add_obj(struct virtio_gpu_object_array
*objs
,
192 struct drm_gem_object
*obj
)
194 if (WARN_ON_ONCE(objs
->nents
== objs
->total
))
197 drm_gem_object_get(obj
);
198 objs
->objs
[objs
->nents
] = obj
;
202 int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array
*objs
)
206 if (objs
->nents
== 1) {
207 ret
= dma_resv_lock_interruptible(objs
->objs
[0]->resv
, NULL
);
209 ret
= drm_gem_lock_reservations(objs
->objs
, objs
->nents
,
215 void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array
*objs
)
217 if (objs
->nents
== 1) {
218 dma_resv_unlock(objs
->objs
[0]->resv
);
220 drm_gem_unlock_reservations(objs
->objs
, objs
->nents
,
225 void virtio_gpu_array_add_fence(struct virtio_gpu_object_array
*objs
,
226 struct dma_fence
*fence
)
230 for (i
= 0; i
< objs
->nents
; i
++)
231 dma_resv_add_excl_fence(objs
->objs
[i
]->resv
, fence
);
234 void virtio_gpu_array_put_free(struct virtio_gpu_object_array
*objs
)
238 for (i
= 0; i
< objs
->nents
; i
++)
239 drm_gem_object_put_unlocked(objs
->objs
[i
]);
240 virtio_gpu_array_free(objs
);
243 void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device
*vgdev
,
244 struct virtio_gpu_object_array
*objs
)
246 spin_lock(&vgdev
->obj_free_lock
);
247 list_add_tail(&objs
->next
, &vgdev
->obj_free_list
);
248 spin_unlock(&vgdev
->obj_free_lock
);
249 schedule_work(&vgdev
->obj_free_work
);
252 void virtio_gpu_array_put_free_work(struct work_struct
*work
)
254 struct virtio_gpu_device
*vgdev
=
255 container_of(work
, struct virtio_gpu_device
, obj_free_work
);
256 struct virtio_gpu_object_array
*objs
;
258 spin_lock(&vgdev
->obj_free_lock
);
259 while (!list_empty(&vgdev
->obj_free_list
)) {
260 objs
= list_first_entry(&vgdev
->obj_free_list
,
261 struct virtio_gpu_object_array
, next
);
262 list_del(&objs
->next
);
263 spin_unlock(&vgdev
->obj_free_lock
);
264 virtio_gpu_array_put_free(objs
);
265 spin_lock(&vgdev
->obj_free_lock
);
267 spin_unlock(&vgdev
->obj_free_lock
);