]> git.ipfire.org Git - thirdparty/qemu.git/blame - hw/display/virtio-gpu.c
virtio-gpu: block both 2d and 3d rendering
[thirdparty/qemu.git] / hw / display / virtio-gpu.c
CommitLineData
62232bf4
GH
1/*
2 * Virtio GPU Device
3 *
4 * Copyright Red Hat, Inc. 2013-2014
5 *
6 * Authors:
7 * Dave Airlie <airlied@redhat.com>
8 * Gerd Hoffmann <kraxel@redhat.com>
9 *
2e252145 10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
62232bf4
GH
11 * See the COPYING file in the top-level directory.
12 */
13
9b8bfe21 14#include "qemu/osdep.h"
f0353b0d 15#include "qemu/units.h"
62232bf4
GH
16#include "qemu-common.h"
17#include "qemu/iov.h"
18#include "ui/console.h"
19#include "trace.h"
8da132a5 20#include "sysemu/dma.h"
62232bf4
GH
21#include "hw/virtio/virtio.h"
22#include "hw/virtio/virtio-gpu.h"
23#include "hw/virtio/virtio-bus.h"
795c40b8 24#include "migration/blocker.h"
03dd024f 25#include "qemu/log.h"
5e3d741c 26#include "qapi/error.h"
62232bf4 27
0c244e50
GH
28#define VIRTIO_GPU_VM_VERSION 1
29
62232bf4
GH
30static struct virtio_gpu_simple_resource*
31virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
32
3bb68f79
GH
33static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
34 struct virtio_gpu_simple_resource *res);
b8e23926 35
1715d6b5
FA
36static void
37virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
38{
39 le32_to_cpus(&hdr->type);
40 le32_to_cpus(&hdr->flags);
41 le64_to_cpus(&hdr->fence_id);
42 le32_to_cpus(&hdr->ctx_id);
43 le32_to_cpus(&hdr->padding);
44}
45
46static void virtio_gpu_bswap_32(void *ptr,
47 size_t size)
48{
49#ifdef HOST_WORDS_BIGENDIAN
50
51 size_t i;
52 struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
53
54 virtio_gpu_ctrl_hdr_bswap(hdr);
55
56 i = sizeof(struct virtio_gpu_ctrl_hdr);
57 while (i < size) {
58 le32_to_cpus((uint32_t *)(ptr + i));
59 i = i + sizeof(uint32_t);
60 }
61
62#endif
63}
64
65static void
66virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
67{
68 virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
69 le32_to_cpus(&t2d->r.x);
70 le32_to_cpus(&t2d->r.y);
71 le32_to_cpus(&t2d->r.width);
72 le32_to_cpus(&t2d->r.height);
73 le64_to_cpus(&t2d->offset);
74 le32_to_cpus(&t2d->resource_id);
75 le32_to_cpus(&t2d->padding);
76}
77
9d9e1521 78#ifdef CONFIG_VIRGL
a9c94277 79#include <virglrenderer.h>
9d9e1521
GH
80#define VIRGL(_g, _virgl, _simple, ...) \
81 do { \
82 if (_g->use_virgl_renderer) { \
83 _virgl(__VA_ARGS__); \
84 } else { \
85 _simple(__VA_ARGS__); \
86 } \
87 } while (0)
88#else
89#define VIRGL(_g, _virgl, _simple, ...) \
90 do { \
91 _simple(__VA_ARGS__); \
92 } while (0)
93#endif
94
62232bf4
GH
95static void update_cursor_data_simple(VirtIOGPU *g,
96 struct virtio_gpu_scanout *s,
97 uint32_t resource_id)
98{
99 struct virtio_gpu_simple_resource *res;
100 uint32_t pixels;
101
102 res = virtio_gpu_find_resource(g, resource_id);
103 if (!res) {
104 return;
105 }
106
107 if (pixman_image_get_width(res->image) != s->current_cursor->width ||
108 pixman_image_get_height(res->image) != s->current_cursor->height) {
109 return;
110 }
111
112 pixels = s->current_cursor->width * s->current_cursor->height;
113 memcpy(s->current_cursor->data,
114 pixman_image_get_data(res->image),
115 pixels * sizeof(uint32_t));
116}
117
9d9e1521
GH
118#ifdef CONFIG_VIRGL
119
120static void update_cursor_data_virgl(VirtIOGPU *g,
121 struct virtio_gpu_scanout *s,
122 uint32_t resource_id)
123{
124 uint32_t width, height;
125 uint32_t pixels, *data;
126
127 data = virgl_renderer_get_cursor_data(resource_id, &width, &height);
128 if (!data) {
129 return;
130 }
131
132 if (width != s->current_cursor->width ||
133 height != s->current_cursor->height) {
2d1cd6c7 134 free(data);
9d9e1521
GH
135 return;
136 }
137
138 pixels = s->current_cursor->width * s->current_cursor->height;
139 memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t));
140 free(data);
141}
142
143#endif
144
62232bf4
GH
145static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
146{
147 struct virtio_gpu_scanout *s;
0c244e50 148 bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
62232bf4
GH
149
150 if (cursor->pos.scanout_id >= g->conf.max_outputs) {
151 return;
152 }
153 s = &g->scanout[cursor->pos.scanout_id];
154
e9c1b459
GH
155 trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
156 cursor->pos.x,
157 cursor->pos.y,
158 move ? "move" : "update",
159 cursor->resource_id);
160
0c244e50 161 if (!move) {
62232bf4
GH
162 if (!s->current_cursor) {
163 s->current_cursor = cursor_alloc(64, 64);
164 }
165
166 s->current_cursor->hot_x = cursor->hot_x;
167 s->current_cursor->hot_y = cursor->hot_y;
168
169 if (cursor->resource_id > 0) {
9d9e1521
GH
170 VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple,
171 g, s, cursor->resource_id);
62232bf4
GH
172 }
173 dpy_cursor_define(s->con, s->current_cursor);
0c244e50
GH
174
175 s->cursor = *cursor;
176 } else {
177 s->cursor.pos.x = cursor->pos.x;
178 s->cursor.pos.y = cursor->pos.y;
62232bf4
GH
179 }
180 dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
181 cursor->resource_id ? 1 : 0);
182}
183
184static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
185{
186 VirtIOGPU *g = VIRTIO_GPU(vdev);
187 memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
188}
189
190static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
191{
192 VirtIOGPU *g = VIRTIO_GPU(vdev);
193 struct virtio_gpu_config vgconfig;
194
195 memcpy(&vgconfig, config, sizeof(g->virtio_config));
196
197 if (vgconfig.events_clear) {
198 g->virtio_config.events_read &= ~vgconfig.events_clear;
199 }
200}
201
9d5b731d
JW
202static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
203 Error **errp)
62232bf4 204{
9d9e1521
GH
205 VirtIOGPU *g = VIRTIO_GPU(vdev);
206
207 if (virtio_gpu_virgl_enabled(g->conf)) {
fff02bc0 208 features |= (1 << VIRTIO_GPU_F_VIRGL);
9d9e1521 209 }
62232bf4
GH
210 return features;
211}
212
9d9e1521
GH
213static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
214{
fff02bc0 215 static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
9d9e1521
GH
216 VirtIOGPU *g = VIRTIO_GPU(vdev);
217
218 g->use_virgl_renderer = ((features & virgl) == virgl);
219 trace_virtio_gpu_features(g->use_virgl_renderer);
220}
221
62232bf4
GH
222static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
223{
224 g->virtio_config.events_read |= event_type;
225 virtio_notify_config(&g->parent_obj);
226}
227
228static struct virtio_gpu_simple_resource *
229virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
230{
231 struct virtio_gpu_simple_resource *res;
232
233 QTAILQ_FOREACH(res, &g->reslist, next) {
234 if (res->resource_id == resource_id) {
235 return res;
236 }
237 }
238 return NULL;
239}
240
241void virtio_gpu_ctrl_response(VirtIOGPU *g,
242 struct virtio_gpu_ctrl_command *cmd,
243 struct virtio_gpu_ctrl_hdr *resp,
244 size_t resp_len)
245{
246 size_t s;
247
248 if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
249 resp->flags |= VIRTIO_GPU_FLAG_FENCE;
250 resp->fence_id = cmd->cmd_hdr.fence_id;
251 resp->ctx_id = cmd->cmd_hdr.ctx_id;
252 }
1715d6b5 253 virtio_gpu_ctrl_hdr_bswap(resp);
62232bf4
GH
254 s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
255 if (s != resp_len) {
256 qemu_log_mask(LOG_GUEST_ERROR,
257 "%s: response size incorrect %zu vs %zu\n",
258 __func__, s, resp_len);
259 }
260 virtqueue_push(cmd->vq, &cmd->elem, s);
261 virtio_notify(VIRTIO_DEVICE(g), cmd->vq);
262 cmd->finished = true;
263}
264
265void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
266 struct virtio_gpu_ctrl_command *cmd,
267 enum virtio_gpu_ctrl_type type)
268{
269 struct virtio_gpu_ctrl_hdr resp;
270
271 memset(&resp, 0, sizeof(resp));
272 resp.type = type;
273 virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
274}
275
276static void
277virtio_gpu_fill_display_info(VirtIOGPU *g,
278 struct virtio_gpu_resp_display_info *dpy_info)
279{
280 int i;
281
282 for (i = 0; i < g->conf.max_outputs; i++) {
283 if (g->enabled_output_bitmask & (1 << i)) {
284 dpy_info->pmodes[i].enabled = 1;
1715d6b5
FA
285 dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
286 dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
62232bf4
GH
287 }
288 }
289}
290
291void virtio_gpu_get_display_info(VirtIOGPU *g,
292 struct virtio_gpu_ctrl_command *cmd)
293{
294 struct virtio_gpu_resp_display_info display_info;
295
296 trace_virtio_gpu_cmd_get_display_info();
297 memset(&display_info, 0, sizeof(display_info));
298 display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
299 virtio_gpu_fill_display_info(g, &display_info);
300 virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
301 sizeof(display_info));
302}
303
304static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
305{
306 switch (virtio_gpu_format) {
62232bf4 307 case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
a27450ec 308 return PIXMAN_BE_b8g8r8x8;
62232bf4 309 case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
a27450ec 310 return PIXMAN_BE_b8g8r8a8;
62232bf4 311 case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
a27450ec 312 return PIXMAN_BE_x8r8g8b8;
62232bf4 313 case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
a27450ec 314 return PIXMAN_BE_a8r8g8b8;
62232bf4 315 case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
a27450ec 316 return PIXMAN_BE_r8g8b8x8;
62232bf4 317 case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
a27450ec 318 return PIXMAN_BE_r8g8b8a8;
62232bf4 319 case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
a27450ec 320 return PIXMAN_BE_x8b8g8r8;
62232bf4 321 case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
a27450ec 322 return PIXMAN_BE_a8b8g8r8;
62232bf4
GH
323 default:
324 return 0;
325 }
326}
327
c53f5b89
TW
328static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
329 uint32_t width, uint32_t height)
330{
331 /* Copied from pixman/pixman-bits-image.c, skip integer overflow check.
332 * pixman_image_create_bits will fail in case it overflow.
333 */
334
335 int bpp = PIXMAN_FORMAT_BPP(pformat);
336 int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t);
337 return height * stride;
338}
339
62232bf4
GH
340static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
341 struct virtio_gpu_ctrl_command *cmd)
342{
343 pixman_format_code_t pformat;
344 struct virtio_gpu_simple_resource *res;
345 struct virtio_gpu_resource_create_2d c2d;
346
347 VIRTIO_GPU_FILL_CMD(c2d);
1715d6b5 348 virtio_gpu_bswap_32(&c2d, sizeof(c2d));
62232bf4
GH
349 trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
350 c2d.width, c2d.height);
351
352 if (c2d.resource_id == 0) {
353 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n",
354 __func__);
355 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
356 return;
357 }
358
359 res = virtio_gpu_find_resource(g, c2d.resource_id);
360 if (res) {
361 qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n",
362 __func__, c2d.resource_id);
363 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
364 return;
365 }
366
367 res = g_new0(struct virtio_gpu_simple_resource, 1);
368
369 res->width = c2d.width;
370 res->height = c2d.height;
371 res->format = c2d.format;
372 res->resource_id = c2d.resource_id;
373
374 pformat = get_pixman_format(c2d.format);
375 if (!pformat) {
376 qemu_log_mask(LOG_GUEST_ERROR,
377 "%s: host couldn't handle guest format %d\n",
378 __func__, c2d.format);
cb3a0522 379 g_free(res);
62232bf4
GH
380 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
381 return;
382 }
9b7621bc 383
c53f5b89 384 res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
9b7621bc
GH
385 if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
386 res->image = pixman_image_create_bits(pformat,
387 c2d.width,
388 c2d.height,
389 NULL, 0);
390 }
62232bf4
GH
391
392 if (!res->image) {
393 qemu_log_mask(LOG_GUEST_ERROR,
394 "%s: resource creation failed %d %d %d\n",
395 __func__, c2d.resource_id, c2d.width, c2d.height);
396 g_free(res);
397 cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
398 return;
399 }
400
401 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
9b7621bc 402 g->hostmem += res->hostmem;
62232bf4
GH
403}
404
da566a18
GH
405static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
406{
407 struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
408 struct virtio_gpu_simple_resource *res;
409 DisplaySurface *ds = NULL;
410
411 if (scanout->resource_id == 0) {
412 return;
413 }
414
415 res = virtio_gpu_find_resource(g, scanout->resource_id);
416 if (res) {
417 res->scanout_bitmask &= ~(1 << scanout_id);
418 }
419
420 if (scanout_id == 0) {
421 /* primary head */
422 ds = qemu_create_message_surface(scanout->width ?: 640,
423 scanout->height ?: 480,
424 "Guest disabled display.");
425 }
426 dpy_gfx_replace_surface(scanout->con, ds);
427 scanout->resource_id = 0;
428 scanout->ds = NULL;
429 scanout->width = 0;
430 scanout->height = 0;
431}
432
62232bf4
GH
433static void virtio_gpu_resource_destroy(VirtIOGPU *g,
434 struct virtio_gpu_simple_resource *res)
435{
1fccd7c5
GH
436 int i;
437
438 if (res->scanout_bitmask) {
439 for (i = 0; i < g->conf.max_outputs; i++) {
440 if (res->scanout_bitmask & (1 << i)) {
441 virtio_gpu_disable_scanout(g, i);
442 }
443 }
444 }
445
62232bf4 446 pixman_image_unref(res->image);
3bb68f79 447 virtio_gpu_cleanup_mapping(g, res);
62232bf4 448 QTAILQ_REMOVE(&g->reslist, res, next);
9b7621bc 449 g->hostmem -= res->hostmem;
62232bf4
GH
450 g_free(res);
451}
452
453static void virtio_gpu_resource_unref(VirtIOGPU *g,
454 struct virtio_gpu_ctrl_command *cmd)
455{
456 struct virtio_gpu_simple_resource *res;
457 struct virtio_gpu_resource_unref unref;
458
459 VIRTIO_GPU_FILL_CMD(unref);
1715d6b5 460 virtio_gpu_bswap_32(&unref, sizeof(unref));
62232bf4
GH
461 trace_virtio_gpu_cmd_res_unref(unref.resource_id);
462
463 res = virtio_gpu_find_resource(g, unref.resource_id);
464 if (!res) {
465 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
466 __func__, unref.resource_id);
467 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
468 return;
469 }
470 virtio_gpu_resource_destroy(g, res);
471}
472
473static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
474 struct virtio_gpu_ctrl_command *cmd)
475{
476 struct virtio_gpu_simple_resource *res;
477 int h;
478 uint32_t src_offset, dst_offset, stride;
479 int bpp;
480 pixman_format_code_t format;
481 struct virtio_gpu_transfer_to_host_2d t2d;
482
483 VIRTIO_GPU_FILL_CMD(t2d);
1715d6b5 484 virtio_gpu_t2d_bswap(&t2d);
62232bf4
GH
485 trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
486
487 res = virtio_gpu_find_resource(g, t2d.resource_id);
488 if (!res || !res->iov) {
489 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
490 __func__, t2d.resource_id);
491 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
492 return;
493 }
494
495 if (t2d.r.x > res->width ||
496 t2d.r.y > res->height ||
497 t2d.r.width > res->width ||
498 t2d.r.height > res->height ||
499 t2d.r.x + t2d.r.width > res->width ||
500 t2d.r.y + t2d.r.height > res->height) {
501 qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource"
502 " bounds for resource %d: %d %d %d %d vs %d %d\n",
503 __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
504 t2d.r.width, t2d.r.height, res->width, res->height);
505 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
506 return;
507 }
508
509 format = pixman_image_get_format(res->image);
e5f99037 510 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
62232bf4
GH
511 stride = pixman_image_get_stride(res->image);
512
513 if (t2d.offset || t2d.r.x || t2d.r.y ||
514 t2d.r.width != pixman_image_get_width(res->image)) {
515 void *img_data = pixman_image_get_data(res->image);
516 for (h = 0; h < t2d.r.height; h++) {
517 src_offset = t2d.offset + stride * h;
518 dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
519
520 iov_to_buf(res->iov, res->iov_cnt, src_offset,
521 (uint8_t *)img_data
522 + dst_offset, t2d.r.width * bpp);
523 }
524 } else {
525 iov_to_buf(res->iov, res->iov_cnt, 0,
526 pixman_image_get_data(res->image),
527 pixman_image_get_stride(res->image)
528 * pixman_image_get_height(res->image));
529 }
530}
531
532static void virtio_gpu_resource_flush(VirtIOGPU *g,
533 struct virtio_gpu_ctrl_command *cmd)
534{
535 struct virtio_gpu_simple_resource *res;
536 struct virtio_gpu_resource_flush rf;
537 pixman_region16_t flush_region;
538 int i;
539
540 VIRTIO_GPU_FILL_CMD(rf);
1715d6b5 541 virtio_gpu_bswap_32(&rf, sizeof(rf));
62232bf4
GH
542 trace_virtio_gpu_cmd_res_flush(rf.resource_id,
543 rf.r.width, rf.r.height, rf.r.x, rf.r.y);
544
545 res = virtio_gpu_find_resource(g, rf.resource_id);
546 if (!res) {
547 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
548 __func__, rf.resource_id);
549 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
550 return;
551 }
552
553 if (rf.r.x > res->width ||
554 rf.r.y > res->height ||
555 rf.r.width > res->width ||
556 rf.r.height > res->height ||
557 rf.r.x + rf.r.width > res->width ||
558 rf.r.y + rf.r.height > res->height) {
559 qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource"
560 " bounds for resource %d: %d %d %d %d vs %d %d\n",
561 __func__, rf.resource_id, rf.r.x, rf.r.y,
562 rf.r.width, rf.r.height, res->width, res->height);
563 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
564 return;
565 }
566
567 pixman_region_init_rect(&flush_region,
568 rf.r.x, rf.r.y, rf.r.width, rf.r.height);
2fe76055 569 for (i = 0; i < g->conf.max_outputs; i++) {
62232bf4
GH
570 struct virtio_gpu_scanout *scanout;
571 pixman_region16_t region, finalregion;
572 pixman_box16_t *extents;
573
574 if (!(res->scanout_bitmask & (1 << i))) {
575 continue;
576 }
577 scanout = &g->scanout[i];
578
579 pixman_region_init(&finalregion);
580 pixman_region_init_rect(&region, scanout->x, scanout->y,
581 scanout->width, scanout->height);
582
583 pixman_region_intersect(&finalregion, &flush_region, &region);
584 pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
585 extents = pixman_region_extents(&finalregion);
586 /* work out the area we need to update for each console */
587 dpy_gfx_update(g->scanout[i].con,
588 extents->x1, extents->y1,
589 extents->x2 - extents->x1,
590 extents->y2 - extents->y1);
591
592 pixman_region_fini(&region);
593 pixman_region_fini(&finalregion);
594 }
595 pixman_region_fini(&flush_region);
596}
597
fa06e5cb
GH
598static void virtio_unref_resource(pixman_image_t *image, void *data)
599{
600 pixman_image_unref(data);
601}
602
62232bf4
GH
603static void virtio_gpu_set_scanout(VirtIOGPU *g,
604 struct virtio_gpu_ctrl_command *cmd)
605{
c806cfa0 606 struct virtio_gpu_simple_resource *res, *ores;
62232bf4
GH
607 struct virtio_gpu_scanout *scanout;
608 pixman_format_code_t format;
609 uint32_t offset;
610 int bpp;
611 struct virtio_gpu_set_scanout ss;
612
613 VIRTIO_GPU_FILL_CMD(ss);
1715d6b5 614 virtio_gpu_bswap_32(&ss, sizeof(ss));
62232bf4
GH
615 trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
616 ss.r.width, ss.r.height, ss.r.x, ss.r.y);
617
2fe76055 618 if (ss.scanout_id >= g->conf.max_outputs) {
fe89fdeb
MAL
619 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
620 __func__, ss.scanout_id);
621 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
622 return;
623 }
624
62232bf4
GH
625 g->enable = 1;
626 if (ss.resource_id == 0) {
da566a18 627 virtio_gpu_disable_scanout(g, ss.scanout_id);
62232bf4
GH
628 return;
629 }
630
631 /* create a surface for this scanout */
62232bf4
GH
632 res = virtio_gpu_find_resource(g, ss.resource_id);
633 if (!res) {
634 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
635 __func__, ss.resource_id);
636 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
637 return;
638 }
639
640 if (ss.r.x > res->width ||
641 ss.r.y > res->height ||
642 ss.r.width > res->width ||
643 ss.r.height > res->height ||
644 ss.r.x + ss.r.width > res->width ||
645 ss.r.y + ss.r.height > res->height) {
646 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for"
647 " resource %d, (%d,%d)+%d,%d vs %d %d\n",
648 __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
649 ss.r.width, ss.r.height, res->width, res->height);
650 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
651 return;
652 }
653
654 scanout = &g->scanout[ss.scanout_id];
655
656 format = pixman_image_get_format(res->image);
e5f99037 657 bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
62232bf4
GH
658 offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image);
659 if (!scanout->ds || surface_data(scanout->ds)
660 != ((uint8_t *)pixman_image_get_data(res->image) + offset) ||
661 scanout->width != ss.r.width ||
662 scanout->height != ss.r.height) {
fa06e5cb
GH
663 pixman_image_t *rect;
664 void *ptr = (uint8_t *)pixman_image_get_data(res->image) + offset;
665 rect = pixman_image_create_bits(format, ss.r.width, ss.r.height, ptr,
666 pixman_image_get_stride(res->image));
667 pixman_image_ref(res->image);
668 pixman_image_set_destroy_function(rect, virtio_unref_resource,
669 res->image);
62232bf4 670 /* realloc the surface ptr */
fa06e5cb 671 scanout->ds = qemu_create_displaysurface_pixman(rect);
62232bf4
GH
672 if (!scanout->ds) {
673 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
674 return;
675 }
dd248ed7 676 pixman_image_unref(rect);
62232bf4
GH
677 dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
678 }
679
c806cfa0
GH
680 ores = virtio_gpu_find_resource(g, scanout->resource_id);
681 if (ores) {
682 ores->scanout_bitmask &= ~(1 << ss.scanout_id);
683 }
684
62232bf4
GH
685 res->scanout_bitmask |= (1 << ss.scanout_id);
686 scanout->resource_id = ss.resource_id;
687 scanout->x = ss.r.x;
688 scanout->y = ss.r.y;
689 scanout->width = ss.r.width;
690 scanout->height = ss.r.height;
691}
692
3bb68f79
GH
693int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
694 struct virtio_gpu_resource_attach_backing *ab,
62232bf4 695 struct virtio_gpu_ctrl_command *cmd,
0c244e50 696 uint64_t **addr, struct iovec **iov)
62232bf4
GH
697{
698 struct virtio_gpu_mem_entry *ents;
699 size_t esize, s;
700 int i;
701
702 if (ab->nr_entries > 16384) {
703 qemu_log_mask(LOG_GUEST_ERROR,
2c84167b 704 "%s: nr_entries is too big (%d > 16384)\n",
62232bf4
GH
705 __func__, ab->nr_entries);
706 return -1;
707 }
708
709 esize = sizeof(*ents) * ab->nr_entries;
710 ents = g_malloc(esize);
711 s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
712 sizeof(*ab), ents, esize);
713 if (s != esize) {
714 qemu_log_mask(LOG_GUEST_ERROR,
715 "%s: command data size incorrect %zu vs %zu\n",
716 __func__, s, esize);
717 g_free(ents);
718 return -1;
719 }
720
721 *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
0c244e50
GH
722 if (addr) {
723 *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
724 }
62232bf4 725 for (i = 0; i < ab->nr_entries; i++) {
1715d6b5
FA
726 uint64_t a = le64_to_cpu(ents[i].addr);
727 uint32_t l = le32_to_cpu(ents[i].length);
728 hwaddr len = l;
729 (*iov)[i].iov_len = l;
8da132a5
GH
730 (*iov)[i].iov_base = dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
731 a, &len, DMA_DIRECTION_TO_DEVICE);
0c244e50 732 if (addr) {
1715d6b5 733 (*addr)[i] = a;
0c244e50 734 }
1715d6b5 735 if (!(*iov)[i].iov_base || len != l) {
62232bf4
GH
736 qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
737 " resource %d element %d\n",
738 __func__, ab->resource_id, i);
3bb68f79 739 virtio_gpu_cleanup_mapping_iov(g, *iov, i);
62232bf4 740 g_free(ents);
62232bf4 741 *iov = NULL;
0c244e50
GH
742 if (addr) {
743 g_free(*addr);
744 *addr = NULL;
745 }
62232bf4
GH
746 return -1;
747 }
748 }
749 g_free(ents);
750 return 0;
751}
752
3bb68f79
GH
753void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g,
754 struct iovec *iov, uint32_t count)
62232bf4
GH
755{
756 int i;
757
758 for (i = 0; i < count; i++) {
8da132a5
GH
759 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
760 iov[i].iov_base, iov[i].iov_len,
761 DMA_DIRECTION_TO_DEVICE,
762 iov[i].iov_len);
62232bf4 763 }
7f3be0f2 764 g_free(iov);
62232bf4
GH
765}
766
3bb68f79
GH
767static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
768 struct virtio_gpu_simple_resource *res)
62232bf4 769{
3bb68f79 770 virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt);
62232bf4
GH
771 res->iov = NULL;
772 res->iov_cnt = 0;
0c244e50
GH
773 g_free(res->addrs);
774 res->addrs = NULL;
62232bf4
GH
775}
776
777static void
778virtio_gpu_resource_attach_backing(VirtIOGPU *g,
779 struct virtio_gpu_ctrl_command *cmd)
780{
781 struct virtio_gpu_simple_resource *res;
782 struct virtio_gpu_resource_attach_backing ab;
783 int ret;
784
785 VIRTIO_GPU_FILL_CMD(ab);
1715d6b5 786 virtio_gpu_bswap_32(&ab, sizeof(ab));
62232bf4
GH
787 trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
788
789 res = virtio_gpu_find_resource(g, ab.resource_id);
790 if (!res) {
791 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
792 __func__, ab.resource_id);
793 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
794 return;
795 }
796
204f01b3
LQ
797 if (res->iov) {
798 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
799 return;
800 }
801
3bb68f79 802 ret = virtio_gpu_create_mapping_iov(g, &ab, cmd, &res->addrs, &res->iov);
62232bf4
GH
803 if (ret != 0) {
804 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
805 return;
806 }
807
808 res->iov_cnt = ab.nr_entries;
809}
810
811static void
812virtio_gpu_resource_detach_backing(VirtIOGPU *g,
813 struct virtio_gpu_ctrl_command *cmd)
814{
815 struct virtio_gpu_simple_resource *res;
816 struct virtio_gpu_resource_detach_backing detach;
817
818 VIRTIO_GPU_FILL_CMD(detach);
1715d6b5 819 virtio_gpu_bswap_32(&detach, sizeof(detach));
62232bf4
GH
820 trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
821
822 res = virtio_gpu_find_resource(g, detach.resource_id);
823 if (!res || !res->iov) {
824 qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n",
825 __func__, detach.resource_id);
826 cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
827 return;
828 }
3bb68f79 829 virtio_gpu_cleanup_mapping(g, res);
62232bf4
GH
830}
831
832static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
833 struct virtio_gpu_ctrl_command *cmd)
834{
835 VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
1715d6b5 836 virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
62232bf4
GH
837
838 switch (cmd->cmd_hdr.type) {
839 case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
840 virtio_gpu_get_display_info(g, cmd);
841 break;
842 case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
843 virtio_gpu_resource_create_2d(g, cmd);
844 break;
845 case VIRTIO_GPU_CMD_RESOURCE_UNREF:
846 virtio_gpu_resource_unref(g, cmd);
847 break;
848 case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
849 virtio_gpu_resource_flush(g, cmd);
850 break;
851 case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
852 virtio_gpu_transfer_to_host_2d(g, cmd);
853 break;
854 case VIRTIO_GPU_CMD_SET_SCANOUT:
855 virtio_gpu_set_scanout(g, cmd);
856 break;
857 case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
858 virtio_gpu_resource_attach_backing(g, cmd);
859 break;
860 case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
861 virtio_gpu_resource_detach_backing(g, cmd);
862 break;
863 default:
864 cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
865 break;
866 }
867 if (!cmd->finished) {
868 virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error :
869 VIRTIO_GPU_RESP_OK_NODATA);
870 }
871}
872
873static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq)
874{
875 VirtIOGPU *g = VIRTIO_GPU(vdev);
876 qemu_bh_schedule(g->ctrl_bh);
877}
878
879static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq)
880{
881 VirtIOGPU *g = VIRTIO_GPU(vdev);
882 qemu_bh_schedule(g->cursor_bh);
883}
884
0c55a1cf 885void virtio_gpu_process_cmdq(VirtIOGPU *g)
3eb769fd
GH
886{
887 struct virtio_gpu_ctrl_command *cmd;
888
889 while (!QTAILQ_EMPTY(&g->cmdq)) {
890 cmd = QTAILQ_FIRST(&g->cmdq);
891
ad341aac 892 cmd->waiting = g->renderer_blocked;
0c55a1cf
GH
893 if (cmd->waiting) {
894 break;
895 }
ad341aac
MAL
896
897 /* process command */
898 VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd,
899 g, cmd);
900
3eb769fd
GH
901 QTAILQ_REMOVE(&g->cmdq, cmd, next);
902 if (virtio_gpu_stats_enabled(g->conf)) {
903 g->stats.requests++;
904 }
905
906 if (!cmd->finished) {
907 QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
908 g->inflight++;
909 if (virtio_gpu_stats_enabled(g->conf)) {
910 if (g->stats.max_inflight < g->inflight) {
911 g->stats.max_inflight = g->inflight;
912 }
913 fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
914 }
915 } else {
916 g_free(cmd);
917 }
918 }
919}
920
62232bf4
GH
921static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
922{
923 VirtIOGPU *g = VIRTIO_GPU(vdev);
924 struct virtio_gpu_ctrl_command *cmd;
925
926 if (!virtio_queue_ready(vq)) {
927 return;
928 }
929
9d9e1521
GH
930#ifdef CONFIG_VIRGL
931 if (!g->renderer_inited && g->use_virgl_renderer) {
932 virtio_gpu_virgl_init(g);
933 g->renderer_inited = true;
934 }
935#endif
936
51b19ebe
PB
937 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
938 while (cmd) {
62232bf4
GH
939 cmd->vq = vq;
940 cmd->error = 0;
941 cmd->finished = false;
3eb769fd
GH
942 cmd->waiting = false;
943 QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next);
51b19ebe 944 cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command));
62232bf4 945 }
9d9e1521 946
3eb769fd
GH
947 virtio_gpu_process_cmdq(g);
948
9d9e1521
GH
949#ifdef CONFIG_VIRGL
950 if (g->use_virgl_renderer) {
951 virtio_gpu_virgl_fence_poll(g);
952 }
953#endif
62232bf4
GH
954}
955
956static void virtio_gpu_ctrl_bh(void *opaque)
957{
958 VirtIOGPU *g = opaque;
959 virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
960}
961
962static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
963{
964 VirtIOGPU *g = VIRTIO_GPU(vdev);
51b19ebe 965 VirtQueueElement *elem;
62232bf4
GH
966 size_t s;
967 struct virtio_gpu_update_cursor cursor_info;
968
969 if (!virtio_queue_ready(vq)) {
970 return;
971 }
51b19ebe
PB
972 for (;;) {
973 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
974 if (!elem) {
975 break;
976 }
977
978 s = iov_to_buf(elem->out_sg, elem->out_num, 0,
62232bf4
GH
979 &cursor_info, sizeof(cursor_info));
980 if (s != sizeof(cursor_info)) {
981 qemu_log_mask(LOG_GUEST_ERROR,
982 "%s: cursor size incorrect %zu vs %zu\n",
983 __func__, s, sizeof(cursor_info));
984 } else {
1715d6b5 985 virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
62232bf4
GH
986 update_cursor(g, &cursor_info);
987 }
51b19ebe 988 virtqueue_push(vq, elem, 0);
62232bf4 989 virtio_notify(vdev, vq);
51b19ebe 990 g_free(elem);
62232bf4
GH
991 }
992}
993
994static void virtio_gpu_cursor_bh(void *opaque)
995{
996 VirtIOGPU *g = opaque;
997 virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
998}
999
1000static void virtio_gpu_invalidate_display(void *opaque)
1001{
1002}
1003
1004static void virtio_gpu_update_display(void *opaque)
1005{
1006}
1007
1008static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
1009{
1010}
1011
1012static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
1013{
1014 VirtIOGPU *g = opaque;
1015
6b860806 1016 if (idx >= g->conf.max_outputs) {
62232bf4
GH
1017 return -1;
1018 }
1019
1020 g->req_state[idx].x = info->xoff;
1021 g->req_state[idx].y = info->yoff;
1022 g->req_state[idx].width = info->width;
1023 g->req_state[idx].height = info->height;
1024
1025 if (info->width && info->height) {
1026 g->enabled_output_bitmask |= (1 << idx);
1027 } else {
1028 g->enabled_output_bitmask &= ~(1 << idx);
1029 }
1030
1031 /* send event to guest */
1032 virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
1033 return 0;
1034}
1035
ad341aac
MAL
1036static void virtio_gpu_gl_block(void *opaque, bool block)
1037{
1038 VirtIOGPU *g = opaque;
1039
1040 if (block) {
1041 g->renderer_blocked++;
1042 } else {
1043 g->renderer_blocked--;
1044 }
1045 assert(g->renderer_blocked >= 0);
1046
1047 if (g->renderer_blocked == 0) {
1048 virtio_gpu_process_cmdq(g);
1049 }
1050}
1051
62232bf4
GH
1052const GraphicHwOps virtio_gpu_ops = {
1053 .invalidate = virtio_gpu_invalidate_display,
1054 .gfx_update = virtio_gpu_update_display,
1055 .text_update = virtio_gpu_text_update,
1056 .ui_info = virtio_gpu_ui_info,
321c9adb 1057 .gl_block = virtio_gpu_gl_block,
62232bf4
GH
1058};
1059
0c244e50
GH
1060static const VMStateDescription vmstate_virtio_gpu_scanout = {
1061 .name = "virtio-gpu-one-scanout",
1062 .version_id = 1,
1063 .fields = (VMStateField[]) {
1064 VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
1065 VMSTATE_UINT32(width, struct virtio_gpu_scanout),
1066 VMSTATE_UINT32(height, struct virtio_gpu_scanout),
1067 VMSTATE_INT32(x, struct virtio_gpu_scanout),
1068 VMSTATE_INT32(y, struct virtio_gpu_scanout),
1069 VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
1070 VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
1071 VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
1072 VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
1073 VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
1074 VMSTATE_END_OF_LIST()
1075 },
1076};
1077
1078static const VMStateDescription vmstate_virtio_gpu_scanouts = {
1079 .name = "virtio-gpu-scanouts",
1080 .version_id = 1,
1081 .fields = (VMStateField[]) {
1082 VMSTATE_INT32(enable, struct VirtIOGPU),
d2164ad3 1083 VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL),
0c244e50
GH
1084 VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
1085 conf.max_outputs, 1,
1086 vmstate_virtio_gpu_scanout,
1087 struct virtio_gpu_scanout),
1088 VMSTATE_END_OF_LIST()
1089 },
1090};
1091
2c21ee76 1092static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size,
03fee66f 1093 const VMStateField *field, QJSON *vmdesc)
0c244e50
GH
1094{
1095 VirtIOGPU *g = opaque;
0c244e50
GH
1096 struct virtio_gpu_simple_resource *res;
1097 int i;
1098
0c244e50
GH
1099 /* in 2d mode we should never find unprocessed commands here */
1100 assert(QTAILQ_EMPTY(&g->cmdq));
1101
1102 QTAILQ_FOREACH(res, &g->reslist, next) {
1103 qemu_put_be32(f, res->resource_id);
1104 qemu_put_be32(f, res->width);
1105 qemu_put_be32(f, res->height);
1106 qemu_put_be32(f, res->format);
1107 qemu_put_be32(f, res->iov_cnt);
1108 for (i = 0; i < res->iov_cnt; i++) {
1109 qemu_put_be64(f, res->addrs[i]);
1110 qemu_put_be32(f, res->iov[i].iov_len);
1111 }
1112 qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
1113 pixman_image_get_stride(res->image) * res->height);
1114 }
1115 qemu_put_be32(f, 0); /* end of list */
1116
2f168d07 1117 return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
0c244e50
GH
1118}
1119
2c21ee76 1120static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
03fee66f 1121 const VMStateField *field)
0c244e50
GH
1122{
1123 VirtIOGPU *g = opaque;
0c244e50
GH
1124 struct virtio_gpu_simple_resource *res;
1125 struct virtio_gpu_scanout *scanout;
1126 uint32_t resource_id, pformat;
8a502efd 1127 int i;
0c244e50 1128
039aa5db
PM
1129 g->hostmem = 0;
1130
0c244e50
GH
1131 resource_id = qemu_get_be32(f);
1132 while (resource_id != 0) {
1133 res = g_new0(struct virtio_gpu_simple_resource, 1);
1134 res->resource_id = resource_id;
1135 res->width = qemu_get_be32(f);
1136 res->height = qemu_get_be32(f);
1137 res->format = qemu_get_be32(f);
1138 res->iov_cnt = qemu_get_be32(f);
1139
1140 /* allocate */
1141 pformat = get_pixman_format(res->format);
1142 if (!pformat) {
c84f0f25 1143 g_free(res);
0c244e50
GH
1144 return -EINVAL;
1145 }
1146 res->image = pixman_image_create_bits(pformat,
1147 res->width, res->height,
1148 NULL, 0);
1149 if (!res->image) {
c84f0f25 1150 g_free(res);
0c244e50
GH
1151 return -EINVAL;
1152 }
1153
c53f5b89 1154 res->hostmem = calc_image_hostmem(pformat, res->width, res->height);
039aa5db 1155
0c244e50
GH
1156 res->addrs = g_new(uint64_t, res->iov_cnt);
1157 res->iov = g_new(struct iovec, res->iov_cnt);
1158
1159 /* read data */
1160 for (i = 0; i < res->iov_cnt; i++) {
1161 res->addrs[i] = qemu_get_be64(f);
1162 res->iov[i].iov_len = qemu_get_be32(f);
1163 }
1164 qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
1165 pixman_image_get_stride(res->image) * res->height);
1166
1167 /* restore mapping */
1168 for (i = 0; i < res->iov_cnt; i++) {
1169 hwaddr len = res->iov[i].iov_len;
1170 res->iov[i].iov_base =
8da132a5
GH
1171 dma_memory_map(VIRTIO_DEVICE(g)->dma_as,
1172 res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE);
3bb68f79 1173
0c244e50 1174 if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
c84f0f25
PM
1175 /* Clean up the half-a-mapping we just created... */
1176 if (res->iov[i].iov_base) {
8da132a5
GH
1177 dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as,
1178 res->iov[i].iov_base,
1179 res->iov[i].iov_len,
1180 DMA_DIRECTION_TO_DEVICE,
1181 res->iov[i].iov_len);
c84f0f25
PM
1182 }
1183 /* ...and the mappings for previous loop iterations */
1184 res->iov_cnt = i;
3bb68f79 1185 virtio_gpu_cleanup_mapping(g, res);
c84f0f25
PM
1186 pixman_image_unref(res->image);
1187 g_free(res);
0c244e50
GH
1188 return -EINVAL;
1189 }
1190 }
1191
1192 QTAILQ_INSERT_HEAD(&g->reslist, res, next);
039aa5db 1193 g->hostmem += res->hostmem;
0c244e50
GH
1194
1195 resource_id = qemu_get_be32(f);
1196 }
1197
1198 /* load & apply scanout state */
1199 vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
1200 for (i = 0; i < g->conf.max_outputs; i++) {
1201 scanout = &g->scanout[i];
1202 if (!scanout->resource_id) {
1203 continue;
1204 }
1205 res = virtio_gpu_find_resource(g, scanout->resource_id);
1206 if (!res) {
1207 return -EINVAL;
1208 }
1209 scanout->ds = qemu_create_displaysurface_pixman(res->image);
1210 if (!scanout->ds) {
1211 return -EINVAL;
1212 }
1213
1214 dpy_gfx_replace_surface(scanout->con, scanout->ds);
91155f8b 1215 dpy_gfx_update_full(scanout->con);
10750ee0
GH
1216 if (scanout->cursor.resource_id) {
1217 update_cursor(g, &scanout->cursor);
1218 }
0c244e50
GH
1219 res->scanout_bitmask |= (1 << i);
1220 }
1221
1222 return 0;
1223}
1224
62232bf4
GH
1225static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
1226{
1227 VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
1228 VirtIOGPU *g = VIRTIO_GPU(qdev);
9d9e1521 1229 bool have_virgl;
fe44dc91 1230 Error *local_err = NULL;
62232bf4
GH
1231 int i;
1232
acfc4846
MAL
1233 if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
1234 error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
5e3d741c
MAL
1235 return;
1236 }
1237
9d9e1521
GH
1238 g->use_virgl_renderer = false;
1239#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
1240 have_virgl = false;
1241#else
1242 have_virgl = display_opengl;
1243#endif
1244 if (!have_virgl) {
1245 g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
1246 }
1247
fe44dc91
AA
1248 if (virtio_gpu_virgl_enabled(g->conf)) {
1249 error_setg(&g->migration_blocker, "virgl is not yet migratable");
1250 migrate_add_blocker(g->migration_blocker, &local_err);
1251 if (local_err) {
1252 error_propagate(errp, local_err);
1253 error_free(g->migration_blocker);
1254 return;
1255 }
1256 }
1257
1715d6b5 1258 g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
fe44dc91 1259 virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
4a9102c5 1260 sizeof(struct virtio_gpu_config));
fe44dc91 1261
729abb6a
GH
1262 g->req_state[0].width = g->conf.xres;
1263 g->req_state[0].height = g->conf.yres;
fe44dc91 1264
9d9e1521
GH
1265 if (virtio_gpu_virgl_enabled(g->conf)) {
1266 /* use larger control queue in 3d mode */
1267 g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
1268 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
5643cc94
DA
1269
1270#if defined(CONFIG_VIRGL)
1271 g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g);
1272#else
1273 g->virtio_config.num_capsets = 0;
1274#endif
9d9e1521
GH
1275 } else {
1276 g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
1277 g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
1278 }
62232bf4
GH
1279
1280 g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
1281 g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
1282 QTAILQ_INIT(&g->reslist);
3eb769fd 1283 QTAILQ_INIT(&g->cmdq);
62232bf4
GH
1284 QTAILQ_INIT(&g->fenceq);
1285
1286 g->enabled_output_bitmask = 1;
62232bf4
GH
1287
1288 for (i = 0; i < g->conf.max_outputs; i++) {
1289 g->scanout[i].con =
1290 graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
1291 if (i > 0) {
1292 dpy_gfx_replace_surface(g->scanout[i].con, NULL);
1293 }
1294 }
1295}
1296
de889221
DDAG
1297static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
1298{
1299 VirtIOGPU *g = VIRTIO_GPU(qdev);
1300 if (g->migration_blocker) {
1301 migrate_del_blocker(g->migration_blocker);
1302 error_free(g->migration_blocker);
1303 }
1304}
1305
62232bf4
GH
1306static void virtio_gpu_instance_init(Object *obj)
1307{
1308}
1309
43e4dbe2 1310void virtio_gpu_reset(VirtIODevice *vdev)
62232bf4
GH
1311{
1312 VirtIOGPU *g = VIRTIO_GPU(vdev);
1313 struct virtio_gpu_simple_resource *res, *tmp;
1314 int i;
1315
1316 g->enable = 0;
1317
1318 QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
1319 virtio_gpu_resource_destroy(g, res);
1320 }
1321 for (i = 0; i < g->conf.max_outputs; i++) {
62232bf4
GH
1322 g->scanout[i].resource_id = 0;
1323 g->scanout[i].width = 0;
1324 g->scanout[i].height = 0;
1325 g->scanout[i].x = 0;
1326 g->scanout[i].y = 0;
1327 g->scanout[i].ds = NULL;
1328 }
9d9e1521
GH
1329
1330#ifdef CONFIG_VIRGL
1331 if (g->use_virgl_renderer) {
1332 virtio_gpu_virgl_reset(g);
1333 g->use_virgl_renderer = 0;
1334 }
1335#endif
62232bf4
GH
1336}
1337
8a502efd
HP
1338/*
1339 * For historical reasons virtio_gpu does not adhere to virtio migration
1340 * scheme as described in doc/virtio-migration.txt, in a sense that no
1341 * save/load callback are provided to the core. Instead the device data
1342 * is saved/loaded after the core data.
1343 *
1344 * Because of this we need a special vmsd.
1345 */
1346static const VMStateDescription vmstate_virtio_gpu = {
1347 .name = "virtio-gpu",
1348 .minimum_version_id = VIRTIO_GPU_VM_VERSION,
1349 .version_id = VIRTIO_GPU_VM_VERSION,
1350 .fields = (VMStateField[]) {
1351 VMSTATE_VIRTIO_DEVICE /* core */,
1352 {
1353 .name = "virtio-gpu",
1354 .info = &(const VMStateInfo) {
1355 .name = "virtio-gpu",
1356 .get = virtio_gpu_load,
1357 .put = virtio_gpu_save,
1358 },
1359 .flags = VMS_SINGLE,
1360 } /* device */,
1361 VMSTATE_END_OF_LIST()
1362 },
1363};
0fc07498 1364
62232bf4 1365static Property virtio_gpu_properties[] = {
b3409a31 1366 DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
f0353b0d 1367 DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB),
9d9e1521
GH
1368#ifdef CONFIG_VIRGL
1369 DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
1370 VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
1371 DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
1372 VIRTIO_GPU_FLAG_STATS_ENABLED, false),
1373#endif
729abb6a
GH
1374 DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024),
1375 DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768),
62232bf4
GH
1376 DEFINE_PROP_END_OF_LIST(),
1377};
1378
1379static void virtio_gpu_class_init(ObjectClass *klass, void *data)
1380{
1381 DeviceClass *dc = DEVICE_CLASS(klass);
1382 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1383
1384 vdc->realize = virtio_gpu_device_realize;
de889221 1385 vdc->unrealize = virtio_gpu_device_unrealize;
62232bf4
GH
1386 vdc->get_config = virtio_gpu_get_config;
1387 vdc->set_config = virtio_gpu_set_config;
1388 vdc->get_features = virtio_gpu_get_features;
9d9e1521 1389 vdc->set_features = virtio_gpu_set_features;
62232bf4
GH
1390
1391 vdc->reset = virtio_gpu_reset;
1392
e837acfd 1393 set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
62232bf4 1394 dc->props = virtio_gpu_properties;
0fc07498 1395 dc->vmsd = &vmstate_virtio_gpu;
a2056e09 1396 dc->hotpluggable = false;
62232bf4
GH
1397}
1398
1399static const TypeInfo virtio_gpu_info = {
1400 .name = TYPE_VIRTIO_GPU,
1401 .parent = TYPE_VIRTIO_DEVICE,
1402 .instance_size = sizeof(VirtIOGPU),
1403 .instance_init = virtio_gpu_instance_init,
1404 .class_init = virtio_gpu_class_init,
1405};
1406
1407static void virtio_register_types(void)
1408{
1409 type_register_static(&virtio_gpu_info);
1410}
1411
1412type_init(virtio_register_types)
1413
1414QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
1415QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
1416QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
1417QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
1418QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
1419QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
1420QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
1421QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
1422QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
1423QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
1424QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
9d9e1521
GH
1425
1426QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
1427QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
1428QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
1429QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
1430QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
1431QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
1432QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
1433QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
1434QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
1435QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);