]> git.ipfire.org Git - thirdparty/qemu.git/blame - hw/display/virtio-gpu.c
virtio-gpu: fix potential divide-by-zero regression
[thirdparty/qemu.git] / hw / display / virtio-gpu.c
CommitLineData
62232bf4
GH
1/*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
2e252145 10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
62232bf4
GH
11 * See the COPYING file in the top-level directory.
12 */
13
9b8bfe21 14#include "qemu/osdep.h"
f0353b0d 15#include "qemu/units.h"
62232bf4
GH
16#include "qemu/iov.h"
17#include "ui/console.h"
18#include "trace.h"
8da132a5 19#include "sysemu/dma.h"
2f780b6a 20#include "sysemu/sysemu.h"
62232bf4 21#include "hw/virtio/virtio.h"
ca77ee28 22#include "migration/qemu-file-types.h"
62232bf4 23#include "hw/virtio/virtio-gpu.h"
ad08e67a 24#include "hw/virtio/virtio-gpu-bswap.h"
83a7d3c0 25#include "hw/virtio/virtio-gpu-pixman.h"
62232bf4 26#include "hw/virtio/virtio-bus.h"
a27bd6c7 27#include "hw/qdev-properties.h"
03dd024f 28#include "qemu/log.h"
0b8fa32f 29#include "qemu/module.h"
5e3d741c 30#include "qapi/error.h"
50d8e25e 31#include "qemu/error-report.h"
62232bf4 32
0c244e50
GH
33#define VIRTIO_GPU_VM_VERSION 1
34
62232bf4
GH
35static struct virtio_gpu_simple_resource*
36virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
25c001a4
VK
37static struct virtio_gpu_simple_resource *
38virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
39 bool require_backing,
40 const char *caller, uint32_t *error);
62232bf4 41
3bb68f79
GH
42static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
43 struct virtio_gpu_simple_resource *res);
b8e23926 44
2c267d66
GH
45void virtio_gpu_update_cursor_data(VirtIOGPU *g,
46 struct virtio_gpu_scanout *s,
47 uint32_t resource_id)
62232bf4
GH
48{
49 struct virtio_gpu_simple_resource *res;
50 uint32_t pixels;
bdd53f73 51 void *data;
62232bf4 52
25c001a4
VK
53 res = virtio_gpu_find_check_resource(g, resource_id, false,
54 __func__, NULL);
62232bf4
GH
55 if (!res) {
56 return;
57 }
58
bdd53f73
VK
59 if (res->blob_size) {
60 if (res->blob_size < (s->current_cursor->width *
61 s->current_cursor->height * 4)) {
62 return;
63 }
64 data = res->blob;
65 } else {
66 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
67 pixman_image_get_height(res->image) != s->current_cursor->height) {
68 return;
69 }
70 data = pixman_image_get_data(res->image);
62232bf4
GH
71 }
72
73 pixels = s->current_cursor->width * s->current_cursor->height;
bdd53f73 74 memcpy(s->current_cursor->data, data,
62232bf4
GH
75 pixels * sizeof(uint32_t));
76}
77
78static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
79{
80 struct virtio_gpu_scanout *s;
2c267d66 81 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
0c244e50 82 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
62232bf4 83
50d8e25e 84 if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
62232bf4
GH
85 return;
86 }
50d8e25e 87 s = &g->parent_obj.scanout[cursor->pos.scanout_id];
62232bf4 88
e9c1b459
GH
89 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
90 cursor->pos.x,
91 cursor->pos.y,
92 move ? "move" : "update",
93 cursor->resource_id);
94
0c244e50 95 if (!move) {
62232bf4
GH
96 if (!s->current_cursor) {
97 s->current_cursor = cursor_alloc(64, 64);
98 }
99
100 s->current_cursor->hot_x = cursor->hot_x;
101 s->current_cursor->hot_y = cursor->hot_y;
102
103 if (cursor->resource_id > 0) {
2c267d66 104 vgc->update_cursor_data(g, s, cursor->resource_id);
62232bf4
GH
105 }
106 dpy_cursor_define(s->con, s->current_cursor);
0c244e50
GH
107
108 s->cursor = *cursor;
109 } else {
110 s->cursor.pos.x = cursor->pos.x;
111 s->cursor.pos.y = cursor->pos.y;
62232bf4
GH
112 }
113 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
114 cursor->resource_id ? 1 : 0);
115}
116
62232bf4
GH
117static struct virtio_gpu_simple_resource *
118virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
119{
120 struct virtio_gpu_simple_resource *res;
121
122 QTAILQ_FOREACH(res, &g->reslist, next) {
123 if (res->resource_id == resource_id) {
124 return res;
125 }
126 }
127 return NULL;
128}
129
25c001a4
VK
130static struct virtio_gpu_simple_resource *
131virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id,
132 bool require_backing,
133 const char *caller, uint32_t *error)
134{
135 struct virtio_gpu_simple_resource *res;
136
137 res = virtio_gpu_find_resource(g, resource_id);
138 if (!res) {
139 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n",
140 caller, resource_id);
141 if (error) {
142 *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
143 }
144 return NULL;
145 }
146
147 if (require_backing) {
e0933d91 148 if (!res->iov || (!res->image && !res->blob)) {
25c001a4
VK
149 qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n",
150 caller, resource_id);
151 if (error) {
152 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
153 }
154 return NULL;
155 }
156 }
157
158 return res;
159}
160
62232bf4
GH
161void virtio_gpu_ctrl_response(VirtIOGPU *g,
162 struct virtio_gpu_ctrl_command *cmd,
163 struct virtio_gpu_ctrl_hdr *resp,
164 size_t resp_len)
165{
166 size_t s;
167
168 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
169 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
170 resp->fence_id = cmd->cmd_hdr.fence_id;
171 resp->ctx_id = cmd->cmd_hdr.ctx_id;
172 }
1715d6b5 173 virtio_gpu_ctrl_hdr_bswap(resp);
62232bf4
GH
174 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
175 if (s != resp_len) {
176 qemu_log_mask(LOG_GUEST_ERROR,
177 "%s: response size incorrect %zu vs %zu\n",
178 __func__, s, resp_len);
179 }
180 virtqueue_push(cmd->vq, &cmd->elem, s);
181 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
182 cmd->finished = true;
183}
184
185void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
186 struct virtio_gpu_ctrl_command *cmd,
187 enum virtio_gpu_ctrl_type type)
188{
189 struct virtio_gpu_ctrl_hdr resp;
190
191 memset(&resp, 0, sizeof(resp));
192 resp.type = type;
193 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
194}
195
62232bf4
GH
196void virtio_gpu_get_display_info(VirtIOGPU *g,
197 struct virtio_gpu_ctrl_command *cmd)
198{
199 struct virtio_gpu_resp_display_info display_info;
200
201 trace_virtio_gpu_cmd_get_display_info();
202 memset(&display_info, 0, sizeof(display_info));
203 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
50d8e25e 204 virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
62232bf4
GH
205 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
206 sizeof(display_info));
207}
208
1ed2cb32
GH
209void virtio_gpu_get_edid(VirtIOGPU *g,
210 struct virtio_gpu_ctrl_command *cmd)
211{
212 struct virtio_gpu_resp_edid edid;
213 struct virtio_gpu_cmd_get_edid get_edid;
50d8e25e 214 VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
1ed2cb32
GH
215
216 VIRTIO_GPU_FILL_CMD(get_edid);
217 virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
218
50d8e25e 219 if (get_edid.scanout >= b->conf.max_outputs) {
1ed2cb32
GH
220 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
221 return;
222 }
223
224 trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
225 memset(&edid, 0, sizeof(edid));
226 edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
ee3729d9 227 virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
1ed2cb32
GH
228 virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
229}
230
c53f5b89
TW
231static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
232 uint32_t width, uint32_t height)
233{
234 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
235 * pixman_image_create_bits will fail in case it overflow.
236 */
237
238 int bpp = PIXMAN_FORMAT_BPP(pformat);
239 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
240 return height * stride;
241}
242
9462ff46
MAL
243#ifdef WIN32
244static void
245win32_pixman_image_destroy(pixman_image_t *image, void *data)
246{
247 HANDLE handle = data;
248
249 qemu_win32_map_free(pixman_image_get_data(image), handle, &error_warn);
250}
251#endif
252
62232bf4
GH
253static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
254 struct virtio_gpu_ctrl_command *cmd)
255{
256 pixman_format_code_t pformat;
257 struct virtio_gpu_simple_resource *res;
258 struct virtio_gpu_resource_create_2d c2d;
259
260 VIRTIO_GPU_FILL_CMD(c2d);
1715d6b5 261 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
62232bf4
GH
262 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
263 c2d.width, c2d.height);
264
265 if (c2d.resource_id == 0) {
266 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
267 __func__);
268 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
269 return;
270 }
271
272 res = virtio_gpu_find_resource(g, c2d.resource_id);
273 if (res) {
274 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
275 __func__, c2d.resource_id);
276 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
277 return;
278 }
279
280 res = g_new0(struct virtio_gpu_simple_resource, 1);
281
282 res->width = c2d.width;
283 res->height = c2d.height;
284 res->format = c2d.format;
285 res->resource_id = c2d.resource_id;
286
83a7d3c0 287 pformat = virtio_gpu_get_pixman_format(c2d.format);
62232bf4
GH
288 if (!pformat) {
289 qemu_log_mask(LOG_GUEST_ERROR,
290 "%s: host couldn't handle guest format %d\n",
291 __func__, c2d.format);
cb3a0522 292 g_free(res);
62232bf4
GH
293 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
294 return;
295 }
9b7621bc 296
c53f5b89 297 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
50d8e25e 298 if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
9462ff46
MAL
299 void *bits = NULL;
300#ifdef WIN32
301 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
302 if (!bits) {
303 goto end;
304 }
305#endif
9c18a923
MAL
306 res->image = pixman_image_create_bits(
307 pformat,
308 c2d.width,
309 c2d.height,
310 bits, c2d.height ? res->hostmem / c2d.height : 0);
9462ff46
MAL
311#ifdef WIN32
312 if (res->image) {
313 pixman_image_set_destroy_function(res->image, win32_pixman_image_destroy, res->handle);
314 }
315#endif
9b7621bc 316 }
62232bf4 317
9462ff46
MAL
318#ifdef WIN32
319end:
320#endif
62232bf4
GH
321 if (!res->image) {
322 qemu_log_mask(LOG_GUEST_ERROR,
323 "%s: resource creation failed %d %d %d\n",
324 __func__, c2d.resource_id, c2d.width, c2d.height);
325 g_free(res);
326 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
327 return;
328 }
329
330 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
9b7621bc 331 g->hostmem += res->hostmem;
62232bf4
GH
332}
333
e0933d91
VK
334static void virtio_gpu_resource_create_blob(VirtIOGPU *g,
335 struct virtio_gpu_ctrl_command *cmd)
336{
337 struct virtio_gpu_simple_resource *res;
338 struct virtio_gpu_resource_create_blob cblob;
339 int ret;
340
341 VIRTIO_GPU_FILL_CMD(cblob);
342 virtio_gpu_create_blob_bswap(&cblob);
343 trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size);
344
345 if (cblob.resource_id == 0) {
346 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
347 __func__);
348 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
349 return;
350 }
351
e0933d91
VK
352 if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST &&
353 cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) {
354 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n",
355 __func__);
356 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
e0933d91
VK
357 return;
358 }
359
dc2deaba
PMD
360 if (virtio_gpu_find_resource(g, cblob.resource_id)) {
361 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
362 __func__, cblob.resource_id);
363 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
e0933d91
VK
364 return;
365 }
366
dc2deaba
PMD
367 res = g_new0(struct virtio_gpu_simple_resource, 1);
368 res->resource_id = cblob.resource_id;
369 res->blob_size = cblob.size;
370
e0933d91
VK
371 ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob),
372 cmd, &res->addrs, &res->iov,
373 &res->iov_cnt);
fc4d3f35 374 if (ret != 0) {
e0933d91 375 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
dc2deaba 376 g_free(res);
e0933d91
VK
377 return;
378 }
379
380 virtio_gpu_init_udmabuf(res);
381 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
382}
383
da566a18
GH
384static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
385{
50d8e25e 386 struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
da566a18 387 struct virtio_gpu_simple_resource *res;
da566a18
GH
388
389 if (scanout->resource_id == 0) {
390 return;
391 }
392
393 res = virtio_gpu_find_resource(g, scanout->resource_id);
394 if (res) {
395 res->scanout_bitmask &= ~(1 << scanout_id);
396 }
397
ed8f3fe6 398 dpy_gfx_replace_surface(scanout->con, NULL);
da566a18
GH
399 scanout->resource_id = 0;
400 scanout->ds = NULL;
401 scanout->width = 0;
402 scanout->height = 0;
403}
404
62232bf4
GH
405static void virtio_gpu_resource_destroy(VirtIOGPU *g,
406 struct virtio_gpu_simple_resource *res)
407{
1fccd7c5
GH
408 int i;
409
410 if (res->scanout_bitmask) {
50d8e25e 411 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
1fccd7c5
GH
412 if (res->scanout_bitmask & (1 << i)) {
413 virtio_gpu_disable_scanout(g, i);
414 }
415 }
416 }
417
32db3c63 418 qemu_pixman_image_unref(res->image);
3bb68f79 419 virtio_gpu_cleanup_mapping(g, res);
62232bf4 420 QTAILQ_REMOVE(&g->reslist, res, next);
9b7621bc 421 g->hostmem -= res->hostmem;
62232bf4
GH
422 g_free(res);
423}
424
425static void virtio_gpu_resource_unref(VirtIOGPU *g,
426 struct virtio_gpu_ctrl_command *cmd)
427{
428 struct virtio_gpu_simple_resource *res;
429 struct virtio_gpu_resource_unref unref;
430
431 VIRTIO_GPU_FILL_CMD(unref);
1715d6b5 432 virtio_gpu_bswap_32(&unref, sizeof(unref));
62232bf4
GH
433 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
434
435 res = virtio_gpu_find_resource(g, unref.resource_id);
436 if (!res) {
437 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
438 __func__, unref.resource_id);
439 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
440 return;
441 }
442 virtio_gpu_resource_destroy(g, res);
443}
444
445static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
446 struct virtio_gpu_ctrl_command *cmd)
447{
448 struct virtio_gpu_simple_resource *res;
b097b80b 449 int h, bpp;
62232bf4 450 uint32_t src_offset, dst_offset, stride;
62232bf4
GH
451 pixman_format_code_t format;
452 struct virtio_gpu_transfer_to_host_2d t2d;
b097b80b 453 void *img_data;
62232bf4
GH
454
455 VIRTIO_GPU_FILL_CMD(t2d);
1715d6b5 456 virtio_gpu_t2d_bswap(&t2d);
62232bf4
GH
457 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
458
25c001a4
VK
459 res = virtio_gpu_find_check_resource(g, t2d.resource_id, true,
460 __func__, &cmd->error);
e0933d91 461 if (!res || res->blob) {
62232bf4
GH
462 return;
463 }
464
465 if (t2d.r.x > res->width ||
466 t2d.r.y > res->height ||
467 t2d.r.width > res->width ||
468 t2d.r.height > res->height ||
469 t2d.r.x + t2d.r.width > res->width ||
470 t2d.r.y + t2d.r.height > res->height) {
471 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
472 " bounds for resource %d: %d %d %d %d vs %d %d\n",
473 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
474 t2d.r.width, t2d.r.height, res->width, res->height);
475 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
476 return;
477 }
478
479 format = pixman_image_get_format(res->image);
e5f99037 480 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
62232bf4 481 stride = pixman_image_get_stride(res->image);
b097b80b 482 img_data = pixman_image_get_data(res->image);
62232bf4 483
b097b80b 484 if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) {
62232bf4
GH
485 for (h = 0; h < t2d.r.height; h++) {
486 src_offset = t2d.offset + stride * h;
487 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
488
489 iov_to_buf(res->iov, res->iov_cnt, src_offset,
b097b80b
KZ
490 (uint8_t *)img_data + dst_offset,
491 t2d.r.width * bpp);
62232bf4
GH
492 }
493 } else {
b097b80b
KZ
494 src_offset = t2d.offset;
495 dst_offset = t2d.r.y * stride + t2d.r.x * bpp;
496 iov_to_buf(res->iov, res->iov_cnt, src_offset,
497 (uint8_t *)img_data + dst_offset,
498 stride * t2d.r.height);
62232bf4
GH
499 }
500}
501
502static void virtio_gpu_resource_flush(VirtIOGPU *g,
503 struct virtio_gpu_ctrl_command *cmd)
504{
505 struct virtio_gpu_simple_resource *res;
506 struct virtio_gpu_resource_flush rf;
32db3c63 507 struct virtio_gpu_scanout *scanout;
62232bf4 508 pixman_region16_t flush_region;
34e29d85
VK
509 bool within_bounds = false;
510 bool update_submitted = false;
62232bf4
GH
511 int i;
512
513 VIRTIO_GPU_FILL_CMD(rf);
1715d6b5 514 virtio_gpu_bswap_32(&rf, sizeof(rf));
62232bf4
GH
515 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
516 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
517
25c001a4
VK
518 res = virtio_gpu_find_check_resource(g, rf.resource_id, false,
519 __func__, &cmd->error);
32db3c63 520 if (!res) {
62232bf4
GH
521 return;
522 }
523
32db3c63
VK
524 if (res->blob) {
525 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
526 scanout = &g->parent_obj.scanout[i];
527 if (scanout->resource_id == res->resource_id &&
49a99ecb
DK
528 rf.r.x < scanout->x + scanout->width &&
529 rf.r.x + rf.r.width >= scanout->x &&
530 rf.r.y < scanout->y + scanout->height &&
34e29d85
VK
531 rf.r.y + rf.r.height >= scanout->y) {
532 within_bounds = true;
533
534 if (console_has_gl(scanout->con)) {
535 dpy_gl_update(scanout->con, 0, 0, scanout->width,
536 scanout->height);
537 update_submitted = true;
538 }
32db3c63
VK
539 }
540 }
34e29d85
VK
541
542 if (update_submitted) {
543 return;
544 }
545 if (!within_bounds) {
546 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts"
547 " bounds for flush %d: %d %d %d %d\n",
548 __func__, rf.resource_id, rf.r.x, rf.r.y,
549 rf.r.width, rf.r.height);
550 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
551 return;
552 }
32db3c63
VK
553 }
554
555 if (!res->blob &&
556 (rf.r.x > res->width ||
62232bf4
GH
557 rf.r.y > res->height ||
558 rf.r.width > res->width ||
559 rf.r.height > res->height ||
560 rf.r.x + rf.r.width > res->width ||
32db3c63 561 rf.r.y + rf.r.height > res->height)) {
62232bf4
GH
562 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
563 " bounds for resource %d: %d %d %d %d vs %d %d\n",
564 __func__, rf.resource_id, rf.r.x, rf.r.y,
565 rf.r.width, rf.r.height, res->width, res->height);
566 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
567 return;
568 }
569
570 pixman_region_init_rect(&flush_region,
571 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
50d8e25e 572 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
62232bf4
GH
573 pixman_region16_t region, finalregion;
574 pixman_box16_t *extents;
575
576 if (!(res->scanout_bitmask & (1 << i))) {
577 continue;
578 }
50d8e25e 579 scanout = &g->parent_obj.scanout[i];
62232bf4
GH
580
581 pixman_region_init(&finalregion);
582 pixman_region_init_rect(&region, scanout->x, scanout->y,
583 scanout->width, scanout->height);
584
585 pixman_region_intersect(&finalregion, &flush_region, &region);
586 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
587 extents = pixman_region_extents(&finalregion);
588 /* work out the area we need to update for each console */
50d8e25e 589 dpy_gfx_update(g->parent_obj.scanout[i].con,
62232bf4
GH
590 extents->x1, extents->y1,
591 extents->x2 - extents->x1,
592 extents->y2 - extents->y1);
593
594 pixman_region_fini(&region);
595 pixman_region_fini(&finalregion);
596 }
597 pixman_region_fini(&flush_region);
598}
599
fa06e5cb
GH
600static void virtio_unref_resource(pixman_image_t *image, void *data)
601{
602 pixman_image_unref(data);
603}
604
81cd9f71
VK
605static void virtio_gpu_update_scanout(VirtIOGPU *g,
606 uint32_t scanout_id,
607 struct virtio_gpu_simple_resource *res,
608 struct virtio_gpu_rect *r)
609{
610 struct virtio_gpu_simple_resource *ores;
611 struct virtio_gpu_scanout *scanout;
612
613 scanout = &g->parent_obj.scanout[scanout_id];
614 ores = virtio_gpu_find_resource(g, scanout->resource_id);
615 if (ores) {
616 ores->scanout_bitmask &= ~(1 << scanout_id);
617 }
618
619 res->scanout_bitmask |= (1 << scanout_id);
620 scanout->resource_id = res->resource_id;
621 scanout->x = r->x;
622 scanout->y = r->y;
623 scanout->width = r->width;
624 scanout->height = r->height;
625}
626
e64d4b6a
VK
627static void virtio_gpu_do_set_scanout(VirtIOGPU *g,
628 uint32_t scanout_id,
629 struct virtio_gpu_framebuffer *fb,
630 struct virtio_gpu_simple_resource *res,
631 struct virtio_gpu_rect *r,
632 uint32_t *error)
62232bf4 633{
62232bf4 634 struct virtio_gpu_scanout *scanout;
e64d4b6a 635 uint8_t *data;
62232bf4 636
e64d4b6a
VK
637 scanout = &g->parent_obj.scanout[scanout_id];
638
639 if (r->x > fb->width ||
640 r->y > fb->height ||
641 r->width < 16 ||
642 r->height < 16 ||
643 r->width > fb->width ||
644 r->height > fb->height ||
645 r->x + r->width > fb->width ||
646 r->y + r->height > fb->height) {
62232bf4 647 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
e64d4b6a
VK
648 " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n",
649 __func__, scanout_id, res->resource_id,
650 r->x, r->y, r->width, r->height,
651 fb->width, fb->height);
652 *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
62232bf4
GH
653 return;
654 }
655
e64d4b6a 656 g->parent_obj.enable = 1;
32db3c63
VK
657
658 if (res->blob) {
659 if (console_has_gl(scanout->con)) {
e86a93f5 660 if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) {
32db3c63 661 virtio_gpu_update_scanout(g, scanout_id, res, r);
7b41ca8d
DK
662 } else {
663 *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
32db3c63 664 }
7b41ca8d 665 return;
32db3c63
VK
666 }
667
668 data = res->blob;
669 } else {
670 data = (uint8_t *)pixman_image_get_data(res->image);
671 }
62232bf4 672
e64d4b6a 673 /* create a surface for this scanout */
32db3c63
VK
674 if ((res->blob && !console_has_gl(scanout->con)) ||
675 !scanout->ds ||
e64d4b6a
VK
676 surface_data(scanout->ds) != data + fb->offset ||
677 scanout->width != r->width ||
678 scanout->height != r->height) {
fa06e5cb 679 pixman_image_t *rect;
e64d4b6a
VK
680 void *ptr = data + fb->offset;
681 rect = pixman_image_create_bits(fb->format, r->width, r->height,
682 ptr, fb->stride);
683
684 if (res->image) {
685 pixman_image_ref(res->image);
686 pixman_image_set_destroy_function(rect, virtio_unref_resource,
687 res->image);
688 }
689
62232bf4 690 /* realloc the surface ptr */
fa06e5cb 691 scanout->ds = qemu_create_displaysurface_pixman(rect);
62232bf4 692 if (!scanout->ds) {
e64d4b6a 693 *error = VIRTIO_GPU_RESP_ERR_UNSPEC;
62232bf4
GH
694 return;
695 }
9462ff46
MAL
696#ifdef WIN32
697 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, fb->offset);
698#endif
e64d4b6a 699
dd248ed7 700 pixman_image_unref(rect);
e64d4b6a 701 dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con,
50d8e25e 702 scanout->ds);
62232bf4
GH
703 }
704
81cd9f71 705 virtio_gpu_update_scanout(g, scanout_id, res, r);
e64d4b6a
VK
706}
707
708static void virtio_gpu_set_scanout(VirtIOGPU *g,
709 struct virtio_gpu_ctrl_command *cmd)
710{
711 struct virtio_gpu_simple_resource *res;
712 struct virtio_gpu_framebuffer fb = { 0 };
713 struct virtio_gpu_set_scanout ss;
714
715 VIRTIO_GPU_FILL_CMD(ss);
716 virtio_gpu_bswap_32(&ss, sizeof(ss));
717 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
718 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
719
4fa7b4cc
GH
720 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
721 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
722 __func__, ss.scanout_id);
723 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
724 return;
725 }
726
e64d4b6a
VK
727 if (ss.resource_id == 0) {
728 virtio_gpu_disable_scanout(g, ss.scanout_id);
729 return;
c806cfa0
GH
730 }
731
e64d4b6a
VK
732 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
733 __func__, &cmd->error);
734 if (!res) {
735 return;
736 }
737
738 fb.format = pixman_image_get_format(res->image);
739 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
740 fb.width = pixman_image_get_width(res->image);
741 fb.height = pixman_image_get_height(res->image);
742 fb.stride = pixman_image_get_stride(res->image);
743 fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
744
745 virtio_gpu_do_set_scanout(g, ss.scanout_id,
746 &fb, res, &ss.r, &cmd->error);
62232bf4
GH
747}
748
32db3c63
VK
749static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
750 struct virtio_gpu_ctrl_command *cmd)
751{
752 struct virtio_gpu_simple_resource *res;
753 struct virtio_gpu_framebuffer fb = { 0 };
754 struct virtio_gpu_set_scanout_blob ss;
755 uint64_t fbend;
756
757 VIRTIO_GPU_FILL_CMD(ss);
758 virtio_gpu_scanout_blob_bswap(&ss);
759 trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id,
760 ss.r.width, ss.r.height, ss.r.x,
761 ss.r.y);
762
4fa7b4cc
GH
763 if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
764 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
765 __func__, ss.scanout_id);
766 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
767 return;
768 }
769
32db3c63
VK
770 if (ss.resource_id == 0) {
771 virtio_gpu_disable_scanout(g, ss.scanout_id);
772 return;
773 }
774
775 res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
776 __func__, &cmd->error);
777 if (!res) {
778 return;
779 }
780
781 fb.format = virtio_gpu_get_pixman_format(ss.format);
782 if (!fb.format) {
783 qemu_log_mask(LOG_GUEST_ERROR,
784 "%s: host couldn't handle guest format %d\n",
785 __func__, ss.format);
786 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
787 return;
788 }
789
790 fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8);
791 fb.width = ss.width;
792 fb.height = ss.height;
793 fb.stride = ss.strides[0];
794 fb.offset = ss.offsets[0] + ss.r.x * fb.bytes_pp + ss.r.y * fb.stride;
795
796 fbend = fb.offset;
797 fbend += fb.stride * (ss.r.height - 1);
798 fbend += fb.bytes_pp * ss.r.width;
799 if (fbend > res->blob_size) {
800 qemu_log_mask(LOG_GUEST_ERROR,
801 "%s: fb end out of range\n",
802 __func__);
803 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
804 return;
805 }
806
807 virtio_gpu_do_set_scanout(g, ss.scanout_id,
808 &fb, res, &ss.r, &cmd->error);
809}
810
3bb68f79 811int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
70d37662 812 uint32_t nr_entries, uint32_t offset,
62232bf4 813 struct virtio_gpu_ctrl_command *cmd,
9049f8bc
GH
814 uint64_t **addr, struct iovec **iov,
815 uint32_t *niov)
62232bf4
GH
816{
817 struct virtio_gpu_mem_entry *ents;
818 size_t esize, s;
9049f8bc 819 int e, v;
62232bf4 820
70d37662 821 if (nr_entries > 16384) {
62232bf4 822 qemu_log_mask(LOG_GUEST_ERROR,
2c84167b 823 "%s: nr_entries is too big (%d > 16384)\n",
70d37662 824 __func__, nr_entries);
62232bf4
GH
825 return -1;
826 }
827
70d37662 828 esize = sizeof(*ents) * nr_entries;
62232bf4
GH
829 ents = g_malloc(esize);
830 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
70d37662 831 offset, ents, esize);
62232bf4
GH
832 if (s != esize) {
833 qemu_log_mask(LOG_GUEST_ERROR,
834 "%s: command data size incorrect %zu vs %zu\n",
835 __func__, s, esize);
836 g_free(ents);
837 return -1;
838 }
839
9049f8bc 840 *iov = NULL;
0c244e50 841 if (addr) {
9049f8bc
GH
842 *addr = NULL;
843 }
70d37662 844 for (e = 0, v = 0; e < nr_entries; e++) {
9049f8bc
GH
845 uint64_t a = le64_to_cpu(ents[e].addr);
846 uint32_t l = le32_to_cpu(ents[e].length);
847 hwaddr len;
848 void *map;
849
850 do {
851 len = l;
a1d4b0a3
PMD
852 map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len,
853 DMA_DIRECTION_TO_DEVICE,
854 MEMTXATTRS_UNSPECIFIED);
9049f8bc
GH
855 if (!map) {
856 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
70d37662 857 " element %d\n", __func__, e);
9049f8bc
GH
858 virtio_gpu_cleanup_mapping_iov(g, *iov, v);
859 g_free(ents);
860 *iov = NULL;
861 if (addr) {
862 g_free(*addr);
863 *addr = NULL;
864 }
865 return -1;
866 }
867
868 if (!(v % 16)) {
b21e2380 869 *iov = g_renew(struct iovec, *iov, v + 16);
9049f8bc 870 if (addr) {
b21e2380 871 *addr = g_renew(uint64_t, *addr, v + 16);
9049f8bc 872 }
eb398a54 873 }
9049f8bc
GH
874 (*iov)[v].iov_base = map;
875 (*iov)[v].iov_len = len;
0c244e50 876 if (addr) {
9049f8bc 877 (*addr)[v] = a;
0c244e50 878 }
9049f8bc
GH
879
880 a += len;
881 l -= len;
882 v += 1;
883 } while (l > 0);
62232bf4 884 }
9049f8bc
GH
885 *niov = v;
886
62232bf4
GH
887 g_free(ents);
888 return 0;
889}
890
3bb68f79
GH
891void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
892 struct iovec *iov, uint32_t count)
62232bf4
GH
893{
894 int i;
895
896 for (i = 0; i < count; i++) {
8da132a5
GH
897 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
898 iov[i].iov_base, iov[i].iov_len,
899 DMA_DIRECTION_TO_DEVICE,
900 iov[i].iov_len);
62232bf4 901 }
7f3be0f2 902 g_free(iov);
62232bf4
GH
903}
904
3bb68f79
GH
905static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
906 struct virtio_gpu_simple_resource *res)
62232bf4 907{
3bb68f79 908 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
62232bf4
GH
909 res->iov = NULL;
910 res->iov_cnt = 0;
0c244e50
GH
911 g_free(res->addrs);
912 res->addrs = NULL;
e0933d91
VK
913
914 if (res->blob) {
915 virtio_gpu_fini_udmabuf(res);
916 }
62232bf4
GH
917}
918
919static void
920virtio_gpu_resource_attach_backing(VirtIOGPU *g,
921 struct virtio_gpu_ctrl_command *cmd)
922{
923 struct virtio_gpu_simple_resource *res;
924 struct virtio_gpu_resource_attach_backing ab;
925 int ret;
926
927 VIRTIO_GPU_FILL_CMD(ab);
1715d6b5 928 virtio_gpu_bswap_32(&ab, sizeof(ab));
62232bf4
GH
929 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
930
931 res = virtio_gpu_find_resource(g, ab.resource_id);
932 if (!res) {
933 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
934 __func__, ab.resource_id);
935 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
936 return;
937 }
938
204f01b3
LQ
939 if (res->iov) {
940 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
941 return;
942 }
943
70d37662
VK
944 ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd,
945 &res->addrs, &res->iov, &res->iov_cnt);
62232bf4
GH
946 if (ret != 0) {
947 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
948 return;
949 }
62232bf4
GH
950}
951
952static void
953virtio_gpu_resource_detach_backing(VirtIOGPU *g,
954 struct virtio_gpu_ctrl_command *cmd)
955{
956 struct virtio_gpu_simple_resource *res;
957 struct virtio_gpu_resource_detach_backing detach;
958
959 VIRTIO_GPU_FILL_CMD(detach);
1715d6b5 960 virtio_gpu_bswap_32(&detach, sizeof(detach));
62232bf4
GH
961 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
962
25c001a4
VK
963 res = virtio_gpu_find_check_resource(g, detach.resource_id, true,
964 __func__, &cmd->error);
965 if (!res) {
62232bf4
GH
966 return;
967 }
3bb68f79 968 virtio_gpu_cleanup_mapping(g, res);
62232bf4
GH
969}
970
2f47691a
GH
971void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
972 struct virtio_gpu_ctrl_command *cmd)
62232bf4
GH
973{
974 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
1715d6b5 975 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
62232bf4
GH
976
977 switch (cmd->cmd_hdr.type) {
978 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
979 virtio_gpu_get_display_info(g, cmd);
980 break;
1ed2cb32
GH
981 case VIRTIO_GPU_CMD_GET_EDID:
982 virtio_gpu_get_edid(g, cmd);
983 break;
62232bf4
GH
984 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
985 virtio_gpu_resource_create_2d(g, cmd);
986 break;
e0933d91
VK
987 case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB:
988 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
989 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
990 break;
991 }
992 virtio_gpu_resource_create_blob(g, cmd);
993 break;
62232bf4
GH
994 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
995 virtio_gpu_resource_unref(g, cmd);
996 break;
997 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
998 virtio_gpu_resource_flush(g, cmd);
999 break;
1000 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
1001 virtio_gpu_transfer_to_host_2d(g, cmd);
1002 break;
1003 case VIRTIO_GPU_CMD_SET_SCANOUT:
1004 virtio_gpu_set_scanout(g, cmd);
1005 break;
32db3c63
VK
1006 case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB:
1007 if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
1008 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
1009 break;
1010 }
1011 virtio_gpu_set_scanout_blob(g, cmd);
1012 break;
62232bf4
GH
1013 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
1014 virtio_gpu_resource_attach_backing(g, cmd);
1015 break;
1016 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
1017 virtio_gpu_resource_detach_backing(g, cmd);
1018 break;
1019 default:
1020 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
1021 break;
1022 }
1023 if (!cmd->finished) {
b3a5dfde
VK
1024 if (!g->parent_obj.renderer_blocked) {
1025 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
1026 VIRTIO_GPU_RESP_OK_NODATA);
1027 }
62232bf4
GH
1028 }
1029}
1030
1031static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
1032{
1033 VirtIOGPU *g = VIRTIO_GPU(vdev);
1034 qemu_bh_schedule(g->ctrl_bh);
1035}
1036
1037static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
1038{
1039 VirtIOGPU *g = VIRTIO_GPU(vdev);
1040 qemu_bh_schedule(g->cursor_bh);
1041}
1042
0c55a1cf 1043void virtio_gpu_process_cmdq(VirtIOGPU *g)
3eb769fd
GH
1044{
1045 struct virtio_gpu_ctrl_command *cmd;
2f47691a 1046 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
3eb769fd 1047
f8f3c271
MAL
1048 if (g->processing_cmdq) {
1049 return;
1050 }
1051 g->processing_cmdq = true;
3eb769fd
GH
1052 while (!QTAILQ_EMPTY(&g->cmdq)) {
1053 cmd = QTAILQ_FIRST(&g->cmdq);
1054
50d8e25e 1055 if (g->parent_obj.renderer_blocked) {
0c55a1cf
GH
1056 break;
1057 }
ad341aac
MAL
1058
1059 /* process command */
2f47691a 1060 vgc->process_cmd(g, cmd);
ad341aac 1061
3eb769fd 1062 QTAILQ_REMOVE(&g->cmdq, cmd, next);
50d8e25e 1063 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
3eb769fd
GH
1064 g->stats.requests++;
1065 }
1066
1067 if (!cmd->finished) {
1068 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
1069 g->inflight++;
50d8e25e 1070 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
3eb769fd
GH
1071 if (g->stats.max_inflight < g->inflight) {
1072 g->stats.max_inflight = g->inflight;
1073 }
1074 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
1075 }
1076 } else {
1077 g_free(cmd);
1078 }
1079 }
f8f3c271 1080 g->processing_cmdq = false;
3eb769fd
GH
1081}
1082
b3a5dfde
VK
1083static void virtio_gpu_process_fenceq(VirtIOGPU *g)
1084{
1085 struct virtio_gpu_ctrl_command *cmd, *tmp;
1086
1087 QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
1088 trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id);
1089 virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
1090 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1091 g_free(cmd);
1092 g->inflight--;
1093 if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
1094 fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
1095 }
1096 }
1097}
1098
1099static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
1100{
1101 VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
1102
1103 virtio_gpu_process_fenceq(g);
1104 virtio_gpu_process_cmdq(g);
1105}
1106
62232bf4
GH
1107static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1108{
1109 VirtIOGPU *g = VIRTIO_GPU(vdev);
1110 struct virtio_gpu_ctrl_command *cmd;
1111
1112 if (!virtio_queue_ready(vq)) {
1113 return;
1114 }
1115
51b19ebe
PB
1116 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
1117 while (cmd) {
62232bf4
GH
1118 cmd->vq = vq;
1119 cmd->error = 0;
1120 cmd->finished = false;
3eb769fd 1121 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
51b19ebe 1122 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
62232bf4 1123 }
9d9e1521 1124
3eb769fd 1125 virtio_gpu_process_cmdq(g);
62232bf4
GH
1126}
1127
1128static void virtio_gpu_ctrl_bh(void *opaque)
1129{
1130 VirtIOGPU *g = opaque;
cabbe8e5
GH
1131 VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g);
1132
1133 vgc->handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
62232bf4
GH
1134}
1135
1136static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
1137{
1138 VirtIOGPU *g = VIRTIO_GPU(vdev);
51b19ebe 1139 VirtQueueElement *elem;
62232bf4
GH
1140 size_t s;
1141 struct virtio_gpu_update_cursor cursor_info;
1142
1143 if (!virtio_queue_ready(vq)) {
1144 return;
1145 }
51b19ebe
PB
1146 for (;;) {
1147 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1148 if (!elem) {
1149 break;
1150 }
1151
1152 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
62232bf4
GH
1153 &cursor_info, sizeof(cursor_info));
1154 if (s != sizeof(cursor_info)) {
1155 qemu_log_mask(LOG_GUEST_ERROR,
1156 "%s: cursor size incorrect %zu vs %zu\n",
1157 __func__, s, sizeof(cursor_info));
1158 } else {
1715d6b5 1159 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
62232bf4
GH
1160 update_cursor(g, &cursor_info);
1161 }
51b19ebe 1162 virtqueue_push(vq, elem, 0);
62232bf4 1163 virtio_notify(vdev, vq);
51b19ebe 1164 g_free(elem);
62232bf4
GH
1165 }
1166}
1167
1168static void virtio_gpu_cursor_bh(void *opaque)
1169{
1170 VirtIOGPU *g = opaque;
50d8e25e 1171 virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
62232bf4
GH
1172}
1173
0c244e50
GH
1174static const VMStateDescription vmstate_virtio_gpu_scanout = {
1175 .name = "virtio-gpu-one-scanout",
1176 .version_id = 1,
1177 .fields = (VMStateField[]) {
1178 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1179 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1180 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1181 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1182 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1183 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1184 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1185 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1186 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1187 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1188 VMSTATE_END_OF_LIST()
1189 },
1190};
1191
1192static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1193 .name = "virtio-gpu-scanouts",
1194 .version_id = 1,
1195 .fields = (VMStateField[]) {
50d8e25e
MAL
1196 VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
1197 VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
1198 struct VirtIOGPU, NULL),
1199 VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
1200 parent_obj.conf.max_outputs, 1,
0c244e50
GH
1201 vmstate_virtio_gpu_scanout,
1202 struct virtio_gpu_scanout),
1203 VMSTATE_END_OF_LIST()
1204 },
1205};
1206
2c21ee76 1207static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
3ddba9a9 1208 const VMStateField *field, JSONWriter *vmdesc)
0c244e50
GH
1209{
1210 VirtIOGPU *g = opaque;
0c244e50
GH
1211 struct virtio_gpu_simple_resource *res;
1212 int i;
1213
0c244e50
GH
1214 /* in 2d mode we should never find unprocessed commands here */
1215 assert(QTAILQ_EMPTY(&g->cmdq));
1216
1217 QTAILQ_FOREACH(res, &g->reslist, next) {
1218 qemu_put_be32(f, res->resource_id);
1219 qemu_put_be32(f, res->width);
1220 qemu_put_be32(f, res->height);
1221 qemu_put_be32(f, res->format);
1222 qemu_put_be32(f, res->iov_cnt);
1223 for (i = 0; i < res->iov_cnt; i++) {
1224 qemu_put_be64(f, res->addrs[i]);
1225 qemu_put_be32(f, res->iov[i].iov_len);
1226 }
1227 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1228 pixman_image_get_stride(res->image) * res->height);
1229 }
1230 qemu_put_be32(f, 0); /* end of list */
1231
2f168d07 1232 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
0c244e50
GH
1233}
1234
2c21ee76 1235static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
03fee66f 1236 const VMStateField *field)
0c244e50
GH
1237{
1238 VirtIOGPU *g = opaque;
0c244e50
GH
1239 struct virtio_gpu_simple_resource *res;
1240 struct virtio_gpu_scanout *scanout;
1241 uint32_t resource_id, pformat;
9462ff46 1242 void *bits = NULL;
8a502efd 1243 int i;
0c244e50 1244
039aa5db
PM
1245 g->hostmem = 0;
1246
0c244e50
GH
1247 resource_id = qemu_get_be32(f);
1248 while (resource_id != 0) {
b0ee78ff
LQ
1249 res = virtio_gpu_find_resource(g, resource_id);
1250 if (res) {
1251 return -EINVAL;
1252 }
1253
0c244e50
GH
1254 res = g_new0(struct virtio_gpu_simple_resource, 1);
1255 res->resource_id = resource_id;
1256 res->width = qemu_get_be32(f);
1257 res->height = qemu_get_be32(f);
1258 res->format = qemu_get_be32(f);
1259 res->iov_cnt = qemu_get_be32(f);
1260
1261 /* allocate */
83a7d3c0 1262 pformat = virtio_gpu_get_pixman_format(res->format);
0c244e50 1263 if (!pformat) {
c84f0f25 1264 g_free(res);
0c244e50
GH
1265 return -EINVAL;
1266 }
9462ff46
MAL
1267
1268 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
1269#ifdef WIN32
1270 bits = qemu_win32_map_alloc(res->hostmem, &res->handle, &error_warn);
1271 if (!bits) {
1272 g_free(res);
1273 return -EINVAL;
1274 }
1275#endif
9c18a923
MAL
1276 res->image = pixman_image_create_bits(
1277 pformat,
1278 res->width, res->height,
1279 bits, res->height ? res->hostmem / res->height : 0);
0c244e50 1280 if (!res->image) {
c84f0f25 1281 g_free(res);
0c244e50
GH
1282 return -EINVAL;
1283 }
1284
039aa5db 1285
0c244e50
GH
1286 res->addrs = g_new(uint64_t, res->iov_cnt);
1287 res->iov = g_new(struct iovec, res->iov_cnt);
1288
1289 /* read data */
1290 for (i = 0; i < res->iov_cnt; i++) {
1291 res->addrs[i] = qemu_get_be64(f);
1292 res->iov[i].iov_len = qemu_get_be32(f);
1293 }
1294 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1295 pixman_image_get_stride(res->image) * res->height);
1296
1297 /* restore mapping */
1298 for (i = 0; i < res->iov_cnt; i++) {
1299 hwaddr len = res->iov[i].iov_len;
1300 res->iov[i].iov_base =
a1d4b0a3
PMD
1301 dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len,
1302 DMA_DIRECTION_TO_DEVICE,
1303 MEMTXATTRS_UNSPECIFIED);
3bb68f79 1304
0c244e50 1305 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
c84f0f25
PM
1306 /* Clean up the half-a-mapping we just created... */
1307 if (res->iov[i].iov_base) {
8da132a5
GH
1308 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1309 res->iov[i].iov_base,
a7f85e03 1310 len,
8da132a5 1311 DMA_DIRECTION_TO_DEVICE,
a7f85e03 1312 0);
c84f0f25
PM
1313 }
1314 /* ...and the mappings for previous loop iterations */
1315 res->iov_cnt = i;
3bb68f79 1316 virtio_gpu_cleanup_mapping(g, res);
c84f0f25
PM
1317 pixman_image_unref(res->image);
1318 g_free(res);
0c244e50
GH
1319 return -EINVAL;
1320 }
1321 }
1322
1323 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
039aa5db 1324 g->hostmem += res->hostmem;
0c244e50
GH
1325
1326 resource_id = qemu_get_be32(f);
1327 }
1328
1329 /* load & apply scanout state */
1330 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
50d8e25e 1331 for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
529969b8 1332 /* FIXME: should take scanout.r.{x,y} into account */
50d8e25e 1333 scanout = &g->parent_obj.scanout[i];
0c244e50
GH
1334 if (!scanout->resource_id) {
1335 continue;
1336 }
1337 res = virtio_gpu_find_resource(g, scanout->resource_id);
1338 if (!res) {
1339 return -EINVAL;
1340 }
1341 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1342 if (!scanout->ds) {
1343 return -EINVAL;
1344 }
9462ff46
MAL
1345#ifdef WIN32
1346 qemu_displaysurface_win32_set_handle(scanout->ds, res->handle, 0);
1347#endif
0c244e50
GH
1348
1349 dpy_gfx_replace_surface(scanout->con, scanout->ds);
91155f8b 1350 dpy_gfx_update_full(scanout->con);
10750ee0
GH
1351 if (scanout->cursor.resource_id) {
1352 update_cursor(g, &scanout->cursor);
1353 }
0c244e50
GH
1354 res->scanout_bitmask |= (1 << i);
1355 }
1356
1357 return 0;
1358}
1359
37f86af0 1360void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
62232bf4
GH
1361{
1362 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1363 VirtIOGPU *g = VIRTIO_GPU(qdev);
62232bf4 1364
cce386e1
VK
1365 if (virtio_gpu_blob_enabled(g->parent_obj.conf)) {
1366 if (!virtio_gpu_have_udmabuf()) {
1367 error_setg(errp, "cannot enable blob resources without udmabuf");
1368 return;
1369 }
1370
1371 if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) {
1372 error_setg(errp, "blobs and virgl are not compatible (yet)");
1373 return;
1374 }
1375 }
1376
50d8e25e
MAL
1377 if (!virtio_gpu_base_device_realize(qdev,
1378 virtio_gpu_handle_ctrl_cb,
1379 virtio_gpu_handle_cursor_cb,
1380 errp)) {
1381 return;
1382 }
1383
1384 g->ctrl_vq = virtio_get_queue(vdev, 0);
1385 g->cursor_vq = virtio_get_queue(vdev, 1);
f63192b0
AB
1386 g->ctrl_bh = qemu_bh_new_guarded(virtio_gpu_ctrl_bh, g,
1387 &qdev->mem_reentrancy_guard);
1388 g->cursor_bh = qemu_bh_new_guarded(virtio_gpu_cursor_bh, g,
1389 &qdev->mem_reentrancy_guard);
62232bf4 1390 QTAILQ_INIT(&g->reslist);
3eb769fd 1391 QTAILQ_INIT(&g->cmdq);
62232bf4 1392 QTAILQ_INIT(&g->fenceq);
62232bf4
GH
1393}
1394
76fa8b35 1395void virtio_gpu_reset(VirtIODevice *vdev)
62232bf4
GH
1396{
1397 VirtIOGPU *g = VIRTIO_GPU(vdev);
1398 struct virtio_gpu_simple_resource *res, *tmp;
dc84ed5b 1399 struct virtio_gpu_ctrl_command *cmd;
62232bf4 1400
62232bf4
GH
1401 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1402 virtio_gpu_resource_destroy(g, res);
1403 }
9d9e1521 1404
dc84ed5b
GH
1405 while (!QTAILQ_EMPTY(&g->cmdq)) {
1406 cmd = QTAILQ_FIRST(&g->cmdq);
1407 QTAILQ_REMOVE(&g->cmdq, cmd, next);
1408 g_free(cmd);
1409 }
1410
1411 while (!QTAILQ_EMPTY(&g->fenceq)) {
1412 cmd = QTAILQ_FIRST(&g->fenceq);
1413 QTAILQ_REMOVE(&g->fenceq, cmd, next);
1414 g->inflight--;
1415 g_free(cmd);
1416 }
1417
50d8e25e
MAL
1418 virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
1419}
1420
1421static void
1422virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
1423{
1424 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1425
1426 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
1427}
1428
1429static void
1430virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
1431{
1432 VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
1433 const struct virtio_gpu_config *vgconfig =
1434 (const struct virtio_gpu_config *)config;
1435
1436 if (vgconfig->events_clear) {
1437 g->virtio_config.events_read &= ~vgconfig->events_clear;
1438 }
62232bf4
GH
1439}
1440
8a502efd
HP
1441/*
1442 * For historical reasons virtio_gpu does not adhere to virtio migration
1443 * scheme as described in doc/virtio-migration.txt, in a sense that no
1444 * save/load callback are provided to the core. Instead the device data
1445 * is saved/loaded after the core data.
1446 *
1447 * Because of this we need a special vmsd.
1448 */
1449static const VMStateDescription vmstate_virtio_gpu = {
1450 .name = "virtio-gpu",
1451 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1452 .version_id = VIRTIO_GPU_VM_VERSION,
1453 .fields = (VMStateField[]) {
1454 VMSTATE_VIRTIO_DEVICE /* core */,
1455 {
1456 .name = "virtio-gpu",
1457 .info = &(const VMStateInfo) {
1458 .name = "virtio-gpu",
1459 .get = virtio_gpu_load,
1460 .put = virtio_gpu_save,
1461 },
1462 .flags = VMS_SINGLE,
1463 } /* device */,
1464 VMSTATE_END_OF_LIST()
1465 },
1466};
0fc07498 1467
62232bf4 1468static Property virtio_gpu_properties[] = {
50d8e25e
MAL
1469 VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
1470 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
1471 256 * MiB),
cce386e1
VK
1472 DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
1473 VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
62232bf4
GH
1474 DEFINE_PROP_END_OF_LIST(),
1475};
1476
1477static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1478{
1479 DeviceClass *dc = DEVICE_CLASS(klass);
1480 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
cabbe8e5 1481 VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass);
b3a5dfde 1482 VirtIOGPUBaseClass *vgbc = &vgc->parent;
cabbe8e5 1483
cabbe8e5 1484 vgc->handle_ctrl = virtio_gpu_handle_ctrl;
2f47691a 1485 vgc->process_cmd = virtio_gpu_simple_process_cmd;
2c267d66 1486 vgc->update_cursor_data = virtio_gpu_update_cursor_data;
b3a5dfde 1487 vgbc->gl_flushed = virtio_gpu_handle_gl_flushed;
62232bf4
GH
1488
1489 vdc->realize = virtio_gpu_device_realize;
50d8e25e 1490 vdc->reset = virtio_gpu_reset;
62232bf4
GH
1491 vdc->get_config = virtio_gpu_get_config;
1492 vdc->set_config = virtio_gpu_set_config;
62232bf4 1493
0fc07498 1494 dc->vmsd = &vmstate_virtio_gpu;
4f67d30b 1495 device_class_set_props(dc, virtio_gpu_properties);
62232bf4
GH
1496}
1497
1498static const TypeInfo virtio_gpu_info = {
1499 .name = TYPE_VIRTIO_GPU,
50d8e25e 1500 .parent = TYPE_VIRTIO_GPU_BASE,
62232bf4 1501 .instance_size = sizeof(VirtIOGPU),
cabbe8e5 1502 .class_size = sizeof(VirtIOGPUClass),
62232bf4
GH
1503 .class_init = virtio_gpu_class_init,
1504};
561d0f45 1505module_obj(TYPE_VIRTIO_GPU);
24ce7aa7 1506module_kconfig(VIRTIO_GPU);
62232bf4
GH
1507
1508static void virtio_register_types(void)
1509{
1510 type_register_static(&virtio_gpu_info);
1511}
1512
1513type_init(virtio_register_types)