]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Virtio GPU Device | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2013-2014 | |
5 | * | |
6 | * Authors: | |
7 | * Dave Airlie <airlied@redhat.com> | |
8 | * Gerd Hoffmann <kraxel@redhat.com> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
11 | * See the COPYING file in the top-level directory. | |
12 | */ | |
13 | ||
14 | #include "qemu/osdep.h" | |
15 | #include "qemu/units.h" | |
16 | #include "qemu/iov.h" | |
17 | #include "system/cpus.h" | |
18 | #include "ui/console.h" | |
19 | #include "ui/rect.h" | |
20 | #include "trace.h" | |
21 | #include "system/dma.h" | |
22 | #include "system/system.h" | |
23 | #include "hw/virtio/virtio.h" | |
24 | #include "migration/qemu-file-types.h" | |
25 | #include "hw/virtio/virtio-gpu.h" | |
26 | #include "hw/virtio/virtio-gpu-bswap.h" | |
27 | #include "hw/virtio/virtio-gpu-pixman.h" | |
28 | #include "hw/virtio/virtio-bus.h" | |
29 | #include "hw/qdev-properties.h" | |
30 | #include "qemu/log.h" | |
31 | #include "qemu/memfd.h" | |
32 | #include "qemu/module.h" | |
33 | #include "qapi/error.h" | |
34 | #include "qemu/error-report.h" | |
35 | ||
36 | #define VIRTIO_GPU_VM_VERSION 1 | |
37 | ||
38 | static struct virtio_gpu_simple_resource * | |
39 | virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, | |
40 | bool require_backing, | |
41 | const char *caller, uint32_t *error); | |
42 | ||
43 | static void virtio_gpu_reset_bh(void *opaque); | |
44 | ||
45 | void virtio_gpu_update_cursor_data(VirtIOGPU *g, | |
46 | struct virtio_gpu_scanout *s, | |
47 | uint32_t resource_id) | |
48 | { | |
49 | struct virtio_gpu_simple_resource *res; | |
50 | uint32_t pixels; | |
51 | void *data; | |
52 | ||
53 | res = virtio_gpu_find_check_resource(g, resource_id, false, | |
54 | __func__, NULL); | |
55 | if (!res) { | |
56 | return; | |
57 | } | |
58 | ||
59 | if (res->blob_size) { | |
60 | if (res->blob_size < (s->current_cursor->width * | |
61 | s->current_cursor->height * 4)) { | |
62 | return; | |
63 | } | |
64 | data = res->blob; | |
65 | } else { | |
66 | if (pixman_image_get_width(res->image) != s->current_cursor->width || | |
67 | pixman_image_get_height(res->image) != s->current_cursor->height) { | |
68 | return; | |
69 | } | |
70 | data = pixman_image_get_data(res->image); | |
71 | } | |
72 | ||
73 | pixels = s->current_cursor->width * s->current_cursor->height; | |
74 | memcpy(s->current_cursor->data, data, | |
75 | pixels * sizeof(uint32_t)); | |
76 | } | |
77 | ||
78 | static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) | |
79 | { | |
80 | struct virtio_gpu_scanout *s; | |
81 | VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); | |
82 | bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; | |
83 | ||
84 | if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { | |
85 | return; | |
86 | } | |
87 | s = &g->parent_obj.scanout[cursor->pos.scanout_id]; | |
88 | ||
89 | trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, | |
90 | cursor->pos.x, | |
91 | cursor->pos.y, | |
92 | move ? "move" : "update", | |
93 | cursor->resource_id); | |
94 | ||
95 | if (!move) { | |
96 | if (!s->current_cursor) { | |
97 | s->current_cursor = cursor_alloc(64, 64); | |
98 | } | |
99 | ||
100 | s->current_cursor->hot_x = cursor->hot_x; | |
101 | s->current_cursor->hot_y = cursor->hot_y; | |
102 | ||
103 | if (cursor->resource_id > 0) { | |
104 | vgc->update_cursor_data(g, s, cursor->resource_id); | |
105 | } | |
106 | dpy_cursor_define(s->con, s->current_cursor); | |
107 | ||
108 | s->cursor = *cursor; | |
109 | } else { | |
110 | s->cursor.pos.x = cursor->pos.x; | |
111 | s->cursor.pos.y = cursor->pos.y; | |
112 | } | |
113 | dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, cursor->resource_id); | |
114 | } | |
115 | ||
116 | struct virtio_gpu_simple_resource * | |
117 | virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) | |
118 | { | |
119 | struct virtio_gpu_simple_resource *res; | |
120 | ||
121 | QTAILQ_FOREACH(res, &g->reslist, next) { | |
122 | if (res->resource_id == resource_id) { | |
123 | return res; | |
124 | } | |
125 | } | |
126 | return NULL; | |
127 | } | |
128 | ||
129 | static struct virtio_gpu_simple_resource * | |
130 | virtio_gpu_find_check_resource(VirtIOGPU *g, uint32_t resource_id, | |
131 | bool require_backing, | |
132 | const char *caller, uint32_t *error) | |
133 | { | |
134 | struct virtio_gpu_simple_resource *res; | |
135 | ||
136 | res = virtio_gpu_find_resource(g, resource_id); | |
137 | if (!res) { | |
138 | qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid resource specified %d\n", | |
139 | caller, resource_id); | |
140 | if (error) { | |
141 | *error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
142 | } | |
143 | return NULL; | |
144 | } | |
145 | ||
146 | if (require_backing) { | |
147 | if (!res->iov || (!res->image && !res->blob)) { | |
148 | qemu_log_mask(LOG_GUEST_ERROR, "%s: no backing storage %d\n", | |
149 | caller, resource_id); | |
150 | if (error) { | |
151 | *error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
152 | } | |
153 | return NULL; | |
154 | } | |
155 | } | |
156 | ||
157 | return res; | |
158 | } | |
159 | ||
160 | void virtio_gpu_ctrl_response(VirtIOGPU *g, | |
161 | struct virtio_gpu_ctrl_command *cmd, | |
162 | struct virtio_gpu_ctrl_hdr *resp, | |
163 | size_t resp_len) | |
164 | { | |
165 | size_t s; | |
166 | ||
167 | if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { | |
168 | resp->flags |= VIRTIO_GPU_FLAG_FENCE; | |
169 | resp->fence_id = cmd->cmd_hdr.fence_id; | |
170 | resp->ctx_id = cmd->cmd_hdr.ctx_id; | |
171 | } | |
172 | virtio_gpu_ctrl_hdr_bswap(resp); | |
173 | s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); | |
174 | if (s != resp_len) { | |
175 | qemu_log_mask(LOG_GUEST_ERROR, | |
176 | "%s: response size incorrect %zu vs %zu\n", | |
177 | __func__, s, resp_len); | |
178 | } | |
179 | virtqueue_push(cmd->vq, &cmd->elem, s); | |
180 | virtio_notify(VIRTIO_DEVICE(g), cmd->vq); | |
181 | cmd->finished = true; | |
182 | } | |
183 | ||
184 | void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, | |
185 | struct virtio_gpu_ctrl_command *cmd, | |
186 | enum virtio_gpu_ctrl_type type) | |
187 | { | |
188 | struct virtio_gpu_ctrl_hdr resp; | |
189 | ||
190 | memset(&resp, 0, sizeof(resp)); | |
191 | resp.type = type; | |
192 | virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); | |
193 | } | |
194 | ||
195 | void virtio_gpu_get_display_info(VirtIOGPU *g, | |
196 | struct virtio_gpu_ctrl_command *cmd) | |
197 | { | |
198 | struct virtio_gpu_resp_display_info display_info; | |
199 | ||
200 | trace_virtio_gpu_cmd_get_display_info(); | |
201 | memset(&display_info, 0, sizeof(display_info)); | |
202 | display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; | |
203 | virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); | |
204 | virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, | |
205 | sizeof(display_info)); | |
206 | } | |
207 | ||
208 | void virtio_gpu_get_edid(VirtIOGPU *g, | |
209 | struct virtio_gpu_ctrl_command *cmd) | |
210 | { | |
211 | struct virtio_gpu_resp_edid edid; | |
212 | struct virtio_gpu_cmd_get_edid get_edid; | |
213 | VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); | |
214 | ||
215 | VIRTIO_GPU_FILL_CMD(get_edid); | |
216 | virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); | |
217 | ||
218 | if (get_edid.scanout >= b->conf.max_outputs) { | |
219 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
220 | return; | |
221 | } | |
222 | ||
223 | trace_virtio_gpu_cmd_get_edid(get_edid.scanout); | |
224 | memset(&edid, 0, sizeof(edid)); | |
225 | edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID; | |
226 | virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid); | |
227 | virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); | |
228 | } | |
229 | ||
230 | static uint32_t calc_image_hostmem(pixman_format_code_t pformat, | |
231 | uint32_t width, uint32_t height) | |
232 | { | |
233 | /* Copied from pixman/pixman-bits-image.c, skip integer overflow check. | |
234 | * pixman_image_create_bits will fail in case it overflow. | |
235 | */ | |
236 | ||
237 | int bpp = PIXMAN_FORMAT_BPP(pformat); | |
238 | int stride = ((width * bpp + 0x1f) >> 5) * sizeof(uint32_t); | |
239 | return height * stride; | |
240 | } | |
241 | ||
242 | static void virtio_gpu_resource_create_2d(VirtIOGPU *g, | |
243 | struct virtio_gpu_ctrl_command *cmd) | |
244 | { | |
245 | pixman_format_code_t pformat; | |
246 | struct virtio_gpu_simple_resource *res; | |
247 | struct virtio_gpu_resource_create_2d c2d; | |
248 | ||
249 | VIRTIO_GPU_FILL_CMD(c2d); | |
250 | virtio_gpu_bswap_32(&c2d, sizeof(c2d)); | |
251 | trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, | |
252 | c2d.width, c2d.height); | |
253 | ||
254 | if (c2d.resource_id == 0) { | |
255 | qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", | |
256 | __func__); | |
257 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
258 | return; | |
259 | } | |
260 | ||
261 | res = virtio_gpu_find_resource(g, c2d.resource_id); | |
262 | if (res) { | |
263 | qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", | |
264 | __func__, c2d.resource_id); | |
265 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
266 | return; | |
267 | } | |
268 | ||
269 | res = g_new0(struct virtio_gpu_simple_resource, 1); | |
270 | ||
271 | res->width = c2d.width; | |
272 | res->height = c2d.height; | |
273 | res->format = c2d.format; | |
274 | res->resource_id = c2d.resource_id; | |
275 | ||
276 | pformat = virtio_gpu_get_pixman_format(c2d.format); | |
277 | if (!pformat) { | |
278 | qemu_log_mask(LOG_GUEST_ERROR, | |
279 | "%s: host couldn't handle guest format %d\n", | |
280 | __func__, c2d.format); | |
281 | g_free(res); | |
282 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
283 | return; | |
284 | } | |
285 | ||
286 | res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); | |
287 | if (res->hostmem + g->hostmem < g->conf_max_hostmem) { | |
288 | if (!qemu_pixman_image_new_shareable( | |
289 | &res->image, | |
290 | &res->share_handle, | |
291 | "virtio-gpu res", | |
292 | pformat, | |
293 | c2d.width, | |
294 | c2d.height, | |
295 | c2d.height ? res->hostmem / c2d.height : 0, | |
296 | &error_warn)) { | |
297 | goto end; | |
298 | } | |
299 | } | |
300 | ||
301 | end: | |
302 | if (!res->image) { | |
303 | qemu_log_mask(LOG_GUEST_ERROR, | |
304 | "%s: resource creation failed %d %d %d\n", | |
305 | __func__, c2d.resource_id, c2d.width, c2d.height); | |
306 | g_free(res); | |
307 | cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; | |
308 | return; | |
309 | } | |
310 | ||
311 | QTAILQ_INSERT_HEAD(&g->reslist, res, next); | |
312 | g->hostmem += res->hostmem; | |
313 | } | |
314 | ||
315 | static void virtio_gpu_resource_create_blob(VirtIOGPU *g, | |
316 | struct virtio_gpu_ctrl_command *cmd) | |
317 | { | |
318 | struct virtio_gpu_simple_resource *res; | |
319 | struct virtio_gpu_resource_create_blob cblob; | |
320 | int ret; | |
321 | ||
322 | VIRTIO_GPU_FILL_CMD(cblob); | |
323 | virtio_gpu_create_blob_bswap(&cblob); | |
324 | trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); | |
325 | ||
326 | if (cblob.resource_id == 0) { | |
327 | qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", | |
328 | __func__); | |
329 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
330 | return; | |
331 | } | |
332 | ||
333 | if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_GUEST && | |
334 | cblob.blob_flags != VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE) { | |
335 | qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid memory type\n", | |
336 | __func__); | |
337 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
338 | return; | |
339 | } | |
340 | ||
341 | if (virtio_gpu_find_resource(g, cblob.resource_id)) { | |
342 | qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", | |
343 | __func__, cblob.resource_id); | |
344 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
345 | return; | |
346 | } | |
347 | ||
348 | res = g_new0(struct virtio_gpu_simple_resource, 1); | |
349 | res->resource_id = cblob.resource_id; | |
350 | res->blob_size = cblob.size; | |
351 | ||
352 | ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), | |
353 | cmd, &res->addrs, &res->iov, | |
354 | &res->iov_cnt); | |
355 | if (ret != 0) { | |
356 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
357 | g_free(res); | |
358 | return; | |
359 | } | |
360 | ||
361 | virtio_gpu_init_udmabuf(res); | |
362 | QTAILQ_INSERT_HEAD(&g->reslist, res, next); | |
363 | } | |
364 | ||
365 | void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) | |
366 | { | |
367 | struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; | |
368 | struct virtio_gpu_simple_resource *res; | |
369 | ||
370 | if (scanout->resource_id == 0) { | |
371 | return; | |
372 | } | |
373 | ||
374 | res = virtio_gpu_find_resource(g, scanout->resource_id); | |
375 | if (res) { | |
376 | res->scanout_bitmask &= ~(1 << scanout_id); | |
377 | } | |
378 | ||
379 | dpy_gfx_replace_surface(scanout->con, NULL); | |
380 | scanout->resource_id = 0; | |
381 | scanout->ds = NULL; | |
382 | scanout->width = 0; | |
383 | scanout->height = 0; | |
384 | } | |
385 | ||
386 | static void virtio_gpu_resource_destroy(VirtIOGPU *g, | |
387 | struct virtio_gpu_simple_resource *res, | |
388 | Error **errp) | |
389 | { | |
390 | int i; | |
391 | ||
392 | if (res->scanout_bitmask) { | |
393 | for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { | |
394 | if (res->scanout_bitmask & (1 << i)) { | |
395 | virtio_gpu_disable_scanout(g, i); | |
396 | } | |
397 | } | |
398 | } | |
399 | ||
400 | qemu_pixman_image_unref(res->image); | |
401 | virtio_gpu_cleanup_mapping(g, res); | |
402 | QTAILQ_REMOVE(&g->reslist, res, next); | |
403 | g->hostmem -= res->hostmem; | |
404 | g_free(res); | |
405 | } | |
406 | ||
407 | static void virtio_gpu_resource_unref(VirtIOGPU *g, | |
408 | struct virtio_gpu_ctrl_command *cmd) | |
409 | { | |
410 | struct virtio_gpu_simple_resource *res; | |
411 | struct virtio_gpu_resource_unref unref; | |
412 | ||
413 | VIRTIO_GPU_FILL_CMD(unref); | |
414 | virtio_gpu_bswap_32(&unref, sizeof(unref)); | |
415 | trace_virtio_gpu_cmd_res_unref(unref.resource_id); | |
416 | ||
417 | res = virtio_gpu_find_resource(g, unref.resource_id); | |
418 | if (!res) { | |
419 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
420 | __func__, unref.resource_id); | |
421 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
422 | return; | |
423 | } | |
424 | /* | |
425 | * virtio_gpu_resource_destroy does not set any errors, so pass a NULL errp | |
426 | * to ignore them. | |
427 | */ | |
428 | virtio_gpu_resource_destroy(g, res, NULL); | |
429 | } | |
430 | ||
431 | static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, | |
432 | struct virtio_gpu_ctrl_command *cmd) | |
433 | { | |
434 | struct virtio_gpu_simple_resource *res; | |
435 | int h, bpp; | |
436 | uint32_t src_offset, dst_offset, stride; | |
437 | pixman_format_code_t format; | |
438 | struct virtio_gpu_transfer_to_host_2d t2d; | |
439 | void *img_data; | |
440 | ||
441 | VIRTIO_GPU_FILL_CMD(t2d); | |
442 | virtio_gpu_t2d_bswap(&t2d); | |
443 | trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); | |
444 | ||
445 | res = virtio_gpu_find_check_resource(g, t2d.resource_id, true, | |
446 | __func__, &cmd->error); | |
447 | if (!res || res->blob) { | |
448 | return; | |
449 | } | |
450 | ||
451 | if (t2d.r.x > res->width || | |
452 | t2d.r.y > res->height || | |
453 | t2d.r.width > res->width || | |
454 | t2d.r.height > res->height || | |
455 | t2d.r.x + t2d.r.width > res->width || | |
456 | t2d.r.y + t2d.r.height > res->height) { | |
457 | qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" | |
458 | " bounds for resource %d: %d %d %d %d vs %d %d\n", | |
459 | __func__, t2d.resource_id, t2d.r.x, t2d.r.y, | |
460 | t2d.r.width, t2d.r.height, res->width, res->height); | |
461 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
462 | return; | |
463 | } | |
464 | ||
465 | format = pixman_image_get_format(res->image); | |
466 | bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); | |
467 | stride = pixman_image_get_stride(res->image); | |
468 | img_data = pixman_image_get_data(res->image); | |
469 | ||
470 | if (t2d.r.x || t2d.r.width != pixman_image_get_width(res->image)) { | |
471 | for (h = 0; h < t2d.r.height; h++) { | |
472 | src_offset = t2d.offset + stride * h; | |
473 | dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); | |
474 | ||
475 | iov_to_buf(res->iov, res->iov_cnt, src_offset, | |
476 | (uint8_t *)img_data + dst_offset, | |
477 | t2d.r.width * bpp); | |
478 | } | |
479 | } else { | |
480 | src_offset = t2d.offset; | |
481 | dst_offset = t2d.r.y * stride + t2d.r.x * bpp; | |
482 | iov_to_buf(res->iov, res->iov_cnt, src_offset, | |
483 | (uint8_t *)img_data + dst_offset, | |
484 | stride * t2d.r.height); | |
485 | } | |
486 | } | |
487 | ||
488 | static void virtio_gpu_resource_flush(VirtIOGPU *g, | |
489 | struct virtio_gpu_ctrl_command *cmd) | |
490 | { | |
491 | struct virtio_gpu_simple_resource *res; | |
492 | struct virtio_gpu_resource_flush rf; | |
493 | struct virtio_gpu_scanout *scanout; | |
494 | QemuRect flush_rect; | |
495 | bool within_bounds = false; | |
496 | bool update_submitted = false; | |
497 | int i; | |
498 | ||
499 | VIRTIO_GPU_FILL_CMD(rf); | |
500 | virtio_gpu_bswap_32(&rf, sizeof(rf)); | |
501 | trace_virtio_gpu_cmd_res_flush(rf.resource_id, | |
502 | rf.r.width, rf.r.height, rf.r.x, rf.r.y); | |
503 | ||
504 | res = virtio_gpu_find_check_resource(g, rf.resource_id, false, | |
505 | __func__, &cmd->error); | |
506 | if (!res) { | |
507 | return; | |
508 | } | |
509 | ||
510 | if (res->blob) { | |
511 | for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { | |
512 | scanout = &g->parent_obj.scanout[i]; | |
513 | if (scanout->resource_id == res->resource_id && | |
514 | rf.r.x < scanout->x + scanout->width && | |
515 | rf.r.x + rf.r.width >= scanout->x && | |
516 | rf.r.y < scanout->y + scanout->height && | |
517 | rf.r.y + rf.r.height >= scanout->y) { | |
518 | within_bounds = true; | |
519 | ||
520 | if (console_has_gl(scanout->con)) { | |
521 | dpy_gl_update(scanout->con, 0, 0, scanout->width, | |
522 | scanout->height); | |
523 | update_submitted = true; | |
524 | } | |
525 | } | |
526 | } | |
527 | ||
528 | if (update_submitted) { | |
529 | return; | |
530 | } | |
531 | if (!within_bounds) { | |
532 | qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside scanouts" | |
533 | " bounds for flush %d: %d %d %d %d\n", | |
534 | __func__, rf.resource_id, rf.r.x, rf.r.y, | |
535 | rf.r.width, rf.r.height); | |
536 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
537 | return; | |
538 | } | |
539 | } | |
540 | ||
541 | if (!res->blob && | |
542 | (rf.r.x > res->width || | |
543 | rf.r.y > res->height || | |
544 | rf.r.width > res->width || | |
545 | rf.r.height > res->height || | |
546 | rf.r.x + rf.r.width > res->width || | |
547 | rf.r.y + rf.r.height > res->height)) { | |
548 | qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" | |
549 | " bounds for resource %d: %d %d %d %d vs %d %d\n", | |
550 | __func__, rf.resource_id, rf.r.x, rf.r.y, | |
551 | rf.r.width, rf.r.height, res->width, res->height); | |
552 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
553 | return; | |
554 | } | |
555 | ||
556 | qemu_rect_init(&flush_rect, rf.r.x, rf.r.y, rf.r.width, rf.r.height); | |
557 | for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { | |
558 | QemuRect rect; | |
559 | ||
560 | if (!(res->scanout_bitmask & (1 << i))) { | |
561 | continue; | |
562 | } | |
563 | scanout = &g->parent_obj.scanout[i]; | |
564 | ||
565 | qemu_rect_init(&rect, scanout->x, scanout->y, | |
566 | scanout->width, scanout->height); | |
567 | ||
568 | /* work out the area we need to update for each console */ | |
569 | if (qemu_rect_intersect(&flush_rect, &rect, &rect)) { | |
570 | qemu_rect_translate(&rect, -scanout->x, -scanout->y); | |
571 | dpy_gfx_update(g->parent_obj.scanout[i].con, | |
572 | rect.x, rect.y, rect.width, rect.height); | |
573 | } | |
574 | } | |
575 | } | |
576 | ||
577 | static void virtio_unref_resource(pixman_image_t *image, void *data) | |
578 | { | |
579 | pixman_image_unref(data); | |
580 | } | |
581 | ||
582 | void virtio_gpu_update_scanout(VirtIOGPU *g, | |
583 | uint32_t scanout_id, | |
584 | struct virtio_gpu_simple_resource *res, | |
585 | struct virtio_gpu_framebuffer *fb, | |
586 | struct virtio_gpu_rect *r) | |
587 | { | |
588 | struct virtio_gpu_simple_resource *ores; | |
589 | struct virtio_gpu_scanout *scanout; | |
590 | ||
591 | scanout = &g->parent_obj.scanout[scanout_id]; | |
592 | ores = virtio_gpu_find_resource(g, scanout->resource_id); | |
593 | if (ores) { | |
594 | ores->scanout_bitmask &= ~(1 << scanout_id); | |
595 | } | |
596 | ||
597 | res->scanout_bitmask |= (1 << scanout_id); | |
598 | scanout->resource_id = res->resource_id; | |
599 | scanout->x = r->x; | |
600 | scanout->y = r->y; | |
601 | scanout->width = r->width; | |
602 | scanout->height = r->height; | |
603 | scanout->fb = *fb; | |
604 | } | |
605 | ||
606 | static bool virtio_gpu_do_set_scanout(VirtIOGPU *g, | |
607 | uint32_t scanout_id, | |
608 | struct virtio_gpu_framebuffer *fb, | |
609 | struct virtio_gpu_simple_resource *res, | |
610 | struct virtio_gpu_rect *r, | |
611 | uint32_t *error) | |
612 | { | |
613 | struct virtio_gpu_scanout *scanout; | |
614 | uint8_t *data; | |
615 | ||
616 | scanout = &g->parent_obj.scanout[scanout_id]; | |
617 | ||
618 | if (r->x > fb->width || | |
619 | r->y > fb->height || | |
620 | r->width < 16 || | |
621 | r->height < 16 || | |
622 | r->width > fb->width || | |
623 | r->height > fb->height || | |
624 | r->x + r->width > fb->width || | |
625 | r->y + r->height > fb->height) { | |
626 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" | |
627 | " resource %d, rect (%d,%d)+%d,%d, fb %d %d\n", | |
628 | __func__, scanout_id, res->resource_id, | |
629 | r->x, r->y, r->width, r->height, | |
630 | fb->width, fb->height); | |
631 | *error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
632 | return false; | |
633 | } | |
634 | ||
635 | g->parent_obj.enable = 1; | |
636 | ||
637 | if (res->blob) { | |
638 | if (console_has_gl(scanout->con)) { | |
639 | if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { | |
640 | virtio_gpu_update_scanout(g, scanout_id, res, fb, r); | |
641 | } else { | |
642 | *error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; | |
643 | return false; | |
644 | } | |
645 | return true; | |
646 | } | |
647 | ||
648 | data = res->blob; | |
649 | } else { | |
650 | data = (uint8_t *)pixman_image_get_data(res->image); | |
651 | } | |
652 | ||
653 | /* create a surface for this scanout */ | |
654 | if ((res->blob && !console_has_gl(scanout->con)) || | |
655 | !scanout->ds || | |
656 | surface_data(scanout->ds) != data + fb->offset || | |
657 | scanout->width != r->width || | |
658 | scanout->height != r->height) { | |
659 | pixman_image_t *rect; | |
660 | void *ptr = data + fb->offset; | |
661 | rect = pixman_image_create_bits(fb->format, r->width, r->height, | |
662 | ptr, fb->stride); | |
663 | ||
664 | if (res->image) { | |
665 | pixman_image_ref(res->image); | |
666 | pixman_image_set_destroy_function(rect, virtio_unref_resource, | |
667 | res->image); | |
668 | } | |
669 | ||
670 | /* realloc the surface ptr */ | |
671 | scanout->ds = qemu_create_displaysurface_pixman(rect); | |
672 | qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, fb->offset); | |
673 | ||
674 | pixman_image_unref(rect); | |
675 | dpy_gfx_replace_surface(g->parent_obj.scanout[scanout_id].con, | |
676 | scanout->ds); | |
677 | } | |
678 | ||
679 | virtio_gpu_update_scanout(g, scanout_id, res, fb, r); | |
680 | return true; | |
681 | } | |
682 | ||
683 | static void virtio_gpu_set_scanout(VirtIOGPU *g, | |
684 | struct virtio_gpu_ctrl_command *cmd) | |
685 | { | |
686 | struct virtio_gpu_simple_resource *res; | |
687 | struct virtio_gpu_framebuffer fb = { 0 }; | |
688 | struct virtio_gpu_set_scanout ss; | |
689 | ||
690 | VIRTIO_GPU_FILL_CMD(ss); | |
691 | virtio_gpu_bswap_32(&ss, sizeof(ss)); | |
692 | trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, | |
693 | ss.r.width, ss.r.height, ss.r.x, ss.r.y); | |
694 | ||
695 | if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { | |
696 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", | |
697 | __func__, ss.scanout_id); | |
698 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; | |
699 | return; | |
700 | } | |
701 | ||
702 | if (ss.resource_id == 0) { | |
703 | virtio_gpu_disable_scanout(g, ss.scanout_id); | |
704 | return; | |
705 | } | |
706 | ||
707 | res = virtio_gpu_find_check_resource(g, ss.resource_id, true, | |
708 | __func__, &cmd->error); | |
709 | if (!res) { | |
710 | return; | |
711 | } | |
712 | ||
713 | fb.format = pixman_image_get_format(res->image); | |
714 | fb.bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb.format), 8); | |
715 | fb.width = pixman_image_get_width(res->image); | |
716 | fb.height = pixman_image_get_height(res->image); | |
717 | fb.stride = pixman_image_get_stride(res->image); | |
718 | fb.offset = ss.r.x * fb.bytes_pp + ss.r.y * fb.stride; | |
719 | ||
720 | virtio_gpu_do_set_scanout(g, ss.scanout_id, | |
721 | &fb, res, &ss.r, &cmd->error); | |
722 | } | |
723 | ||
724 | bool virtio_gpu_scanout_blob_to_fb(struct virtio_gpu_framebuffer *fb, | |
725 | struct virtio_gpu_set_scanout_blob *ss, | |
726 | uint64_t blob_size) | |
727 | { | |
728 | uint64_t fbend; | |
729 | ||
730 | fb->format = virtio_gpu_get_pixman_format(ss->format); | |
731 | if (!fb->format) { | |
732 | qemu_log_mask(LOG_GUEST_ERROR, | |
733 | "%s: host couldn't handle guest format %d\n", | |
734 | __func__, ss->format); | |
735 | return false; | |
736 | } | |
737 | ||
738 | fb->bytes_pp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(fb->format), 8); | |
739 | fb->width = ss->width; | |
740 | fb->height = ss->height; | |
741 | fb->stride = ss->strides[0]; | |
742 | fb->offset = ss->offsets[0] + ss->r.x * fb->bytes_pp + ss->r.y * fb->stride; | |
743 | ||
744 | fbend = fb->offset; | |
745 | fbend += (uint64_t) fb->stride * ss->r.height; | |
746 | ||
747 | if (fbend > blob_size) { | |
748 | qemu_log_mask(LOG_GUEST_ERROR, | |
749 | "%s: fb end out of range\n", | |
750 | __func__); | |
751 | return false; | |
752 | } | |
753 | ||
754 | return true; | |
755 | } | |
756 | ||
757 | ||
758 | ||
759 | static void virtio_gpu_set_scanout_blob(VirtIOGPU *g, | |
760 | struct virtio_gpu_ctrl_command *cmd) | |
761 | { | |
762 | struct virtio_gpu_simple_resource *res; | |
763 | struct virtio_gpu_framebuffer fb = { 0 }; | |
764 | struct virtio_gpu_set_scanout_blob ss; | |
765 | ||
766 | VIRTIO_GPU_FILL_CMD(ss); | |
767 | virtio_gpu_scanout_blob_bswap(&ss); | |
768 | trace_virtio_gpu_cmd_set_scanout_blob(ss.scanout_id, ss.resource_id, | |
769 | ss.r.width, ss.r.height, ss.r.x, | |
770 | ss.r.y); | |
771 | ||
772 | if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { | |
773 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", | |
774 | __func__, ss.scanout_id); | |
775 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; | |
776 | return; | |
777 | } | |
778 | ||
779 | if (ss.resource_id == 0) { | |
780 | virtio_gpu_disable_scanout(g, ss.scanout_id); | |
781 | return; | |
782 | } | |
783 | ||
784 | res = virtio_gpu_find_check_resource(g, ss.resource_id, true, | |
785 | __func__, &cmd->error); | |
786 | if (!res) { | |
787 | return; | |
788 | } | |
789 | ||
790 | if (!virtio_gpu_scanout_blob_to_fb(&fb, &ss, res->blob_size)) { | |
791 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
792 | return; | |
793 | } | |
794 | ||
795 | virtio_gpu_do_set_scanout(g, ss.scanout_id, | |
796 | &fb, res, &ss.r, &cmd->error); | |
797 | } | |
798 | ||
799 | int virtio_gpu_create_mapping_iov(VirtIOGPU *g, | |
800 | uint32_t nr_entries, uint32_t offset, | |
801 | struct virtio_gpu_ctrl_command *cmd, | |
802 | uint64_t **addr, struct iovec **iov, | |
803 | uint32_t *niov) | |
804 | { | |
805 | struct virtio_gpu_mem_entry *ents; | |
806 | size_t esize, s; | |
807 | int e, v; | |
808 | ||
809 | if (nr_entries > 16384) { | |
810 | qemu_log_mask(LOG_GUEST_ERROR, | |
811 | "%s: nr_entries is too big (%d > 16384)\n", | |
812 | __func__, nr_entries); | |
813 | return -1; | |
814 | } | |
815 | ||
816 | esize = sizeof(*ents) * nr_entries; | |
817 | ents = g_malloc(esize); | |
818 | s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, | |
819 | offset, ents, esize); | |
820 | if (s != esize) { | |
821 | qemu_log_mask(LOG_GUEST_ERROR, | |
822 | "%s: command data size incorrect %zu vs %zu\n", | |
823 | __func__, s, esize); | |
824 | g_free(ents); | |
825 | return -1; | |
826 | } | |
827 | ||
828 | *iov = NULL; | |
829 | if (addr) { | |
830 | *addr = NULL; | |
831 | } | |
832 | for (e = 0, v = 0; e < nr_entries; e++) { | |
833 | uint64_t a = le64_to_cpu(ents[e].addr); | |
834 | uint32_t l = le32_to_cpu(ents[e].length); | |
835 | hwaddr len; | |
836 | void *map; | |
837 | ||
838 | do { | |
839 | len = l; | |
840 | map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, | |
841 | DMA_DIRECTION_TO_DEVICE, | |
842 | MEMTXATTRS_UNSPECIFIED); | |
843 | if (!map) { | |
844 | qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" | |
845 | " element %d\n", __func__, e); | |
846 | virtio_gpu_cleanup_mapping_iov(g, *iov, v); | |
847 | g_free(ents); | |
848 | *iov = NULL; | |
849 | if (addr) { | |
850 | g_free(*addr); | |
851 | *addr = NULL; | |
852 | } | |
853 | return -1; | |
854 | } | |
855 | ||
856 | if (!(v % 16)) { | |
857 | *iov = g_renew(struct iovec, *iov, v + 16); | |
858 | if (addr) { | |
859 | *addr = g_renew(uint64_t, *addr, v + 16); | |
860 | } | |
861 | } | |
862 | (*iov)[v].iov_base = map; | |
863 | (*iov)[v].iov_len = len; | |
864 | if (addr) { | |
865 | (*addr)[v] = a; | |
866 | } | |
867 | ||
868 | a += len; | |
869 | l -= len; | |
870 | v += 1; | |
871 | } while (l > 0); | |
872 | } | |
873 | *niov = v; | |
874 | ||
875 | g_free(ents); | |
876 | return 0; | |
877 | } | |
878 | ||
879 | void virtio_gpu_cleanup_mapping_iov(VirtIOGPU *g, | |
880 | struct iovec *iov, uint32_t count) | |
881 | { | |
882 | int i; | |
883 | ||
884 | for (i = 0; i < count; i++) { | |
885 | dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, | |
886 | iov[i].iov_base, iov[i].iov_len, | |
887 | DMA_DIRECTION_TO_DEVICE, | |
888 | iov[i].iov_len); | |
889 | } | |
890 | g_free(iov); | |
891 | } | |
892 | ||
893 | void virtio_gpu_cleanup_mapping(VirtIOGPU *g, | |
894 | struct virtio_gpu_simple_resource *res) | |
895 | { | |
896 | virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); | |
897 | res->iov = NULL; | |
898 | res->iov_cnt = 0; | |
899 | g_free(res->addrs); | |
900 | res->addrs = NULL; | |
901 | ||
902 | if (res->blob) { | |
903 | virtio_gpu_fini_udmabuf(res); | |
904 | } | |
905 | } | |
906 | ||
907 | static void | |
908 | virtio_gpu_resource_attach_backing(VirtIOGPU *g, | |
909 | struct virtio_gpu_ctrl_command *cmd) | |
910 | { | |
911 | struct virtio_gpu_simple_resource *res; | |
912 | struct virtio_gpu_resource_attach_backing ab; | |
913 | int ret; | |
914 | ||
915 | VIRTIO_GPU_FILL_CMD(ab); | |
916 | virtio_gpu_bswap_32(&ab, sizeof(ab)); | |
917 | trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); | |
918 | ||
919 | res = virtio_gpu_find_resource(g, ab.resource_id); | |
920 | if (!res) { | |
921 | qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", | |
922 | __func__, ab.resource_id); | |
923 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; | |
924 | return; | |
925 | } | |
926 | ||
927 | if (res->iov) { | |
928 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
929 | return; | |
930 | } | |
931 | ||
932 | ret = virtio_gpu_create_mapping_iov(g, ab.nr_entries, sizeof(ab), cmd, | |
933 | &res->addrs, &res->iov, &res->iov_cnt); | |
934 | if (ret != 0) { | |
935 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
936 | return; | |
937 | } | |
938 | } | |
939 | ||
940 | static void | |
941 | virtio_gpu_resource_detach_backing(VirtIOGPU *g, | |
942 | struct virtio_gpu_ctrl_command *cmd) | |
943 | { | |
944 | struct virtio_gpu_simple_resource *res; | |
945 | struct virtio_gpu_resource_detach_backing detach; | |
946 | ||
947 | VIRTIO_GPU_FILL_CMD(detach); | |
948 | virtio_gpu_bswap_32(&detach, sizeof(detach)); | |
949 | trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); | |
950 | ||
951 | res = virtio_gpu_find_check_resource(g, detach.resource_id, true, | |
952 | __func__, &cmd->error); | |
953 | if (!res) { | |
954 | return; | |
955 | } | |
956 | virtio_gpu_cleanup_mapping(g, res); | |
957 | } | |
958 | ||
959 | void virtio_gpu_simple_process_cmd(VirtIOGPU *g, | |
960 | struct virtio_gpu_ctrl_command *cmd) | |
961 | { | |
962 | VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); | |
963 | virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr); | |
964 | ||
965 | switch (cmd->cmd_hdr.type) { | |
966 | case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: | |
967 | virtio_gpu_get_display_info(g, cmd); | |
968 | break; | |
969 | case VIRTIO_GPU_CMD_GET_EDID: | |
970 | virtio_gpu_get_edid(g, cmd); | |
971 | break; | |
972 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: | |
973 | virtio_gpu_resource_create_2d(g, cmd); | |
974 | break; | |
975 | case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: | |
976 | if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { | |
977 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
978 | break; | |
979 | } | |
980 | virtio_gpu_resource_create_blob(g, cmd); | |
981 | break; | |
982 | case VIRTIO_GPU_CMD_RESOURCE_UNREF: | |
983 | virtio_gpu_resource_unref(g, cmd); | |
984 | break; | |
985 | case VIRTIO_GPU_CMD_RESOURCE_FLUSH: | |
986 | virtio_gpu_resource_flush(g, cmd); | |
987 | break; | |
988 | case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: | |
989 | virtio_gpu_transfer_to_host_2d(g, cmd); | |
990 | break; | |
991 | case VIRTIO_GPU_CMD_SET_SCANOUT: | |
992 | virtio_gpu_set_scanout(g, cmd); | |
993 | break; | |
994 | case VIRTIO_GPU_CMD_SET_SCANOUT_BLOB: | |
995 | if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) { | |
996 | cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; | |
997 | break; | |
998 | } | |
999 | virtio_gpu_set_scanout_blob(g, cmd); | |
1000 | break; | |
1001 | case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: | |
1002 | virtio_gpu_resource_attach_backing(g, cmd); | |
1003 | break; | |
1004 | case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: | |
1005 | virtio_gpu_resource_detach_backing(g, cmd); | |
1006 | break; | |
1007 | default: | |
1008 | cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; | |
1009 | break; | |
1010 | } | |
1011 | if (!cmd->finished) { | |
1012 | if (!g->parent_obj.renderer_blocked) { | |
1013 | virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : | |
1014 | VIRTIO_GPU_RESP_OK_NODATA); | |
1015 | } | |
1016 | } | |
1017 | } | |
1018 | ||
1019 | static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) | |
1020 | { | |
1021 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
1022 | qemu_bh_schedule(g->ctrl_bh); | |
1023 | } | |
1024 | ||
1025 | static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) | |
1026 | { | |
1027 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
1028 | qemu_bh_schedule(g->cursor_bh); | |
1029 | } | |
1030 | ||
1031 | void virtio_gpu_process_cmdq(VirtIOGPU *g) | |
1032 | { | |
1033 | struct virtio_gpu_ctrl_command *cmd; | |
1034 | VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); | |
1035 | ||
1036 | if (g->processing_cmdq) { | |
1037 | return; | |
1038 | } | |
1039 | g->processing_cmdq = true; | |
1040 | while (!QTAILQ_EMPTY(&g->cmdq)) { | |
1041 | cmd = QTAILQ_FIRST(&g->cmdq); | |
1042 | ||
1043 | if (g->parent_obj.renderer_blocked) { | |
1044 | break; | |
1045 | } | |
1046 | ||
1047 | /* process command */ | |
1048 | vgc->process_cmd(g, cmd); | |
1049 | ||
1050 | /* command suspended */ | |
1051 | if (!cmd->finished && !(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { | |
1052 | trace_virtio_gpu_cmd_suspended(cmd->cmd_hdr.type); | |
1053 | break; | |
1054 | } | |
1055 | ||
1056 | QTAILQ_REMOVE(&g->cmdq, cmd, next); | |
1057 | if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { | |
1058 | g->stats.requests++; | |
1059 | } | |
1060 | ||
1061 | if (!cmd->finished) { | |
1062 | QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); | |
1063 | g->inflight++; | |
1064 | if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { | |
1065 | if (g->stats.max_inflight < g->inflight) { | |
1066 | g->stats.max_inflight = g->inflight; | |
1067 | } | |
1068 | trace_virtio_gpu_inc_inflight_fences(g->inflight); | |
1069 | } | |
1070 | } else { | |
1071 | g_free(cmd); | |
1072 | } | |
1073 | } | |
1074 | g->processing_cmdq = false; | |
1075 | } | |
1076 | ||
1077 | static void virtio_gpu_process_fenceq(VirtIOGPU *g) | |
1078 | { | |
1079 | struct virtio_gpu_ctrl_command *cmd, *tmp; | |
1080 | ||
1081 | QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { | |
1082 | trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); | |
1083 | virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); | |
1084 | QTAILQ_REMOVE(&g->fenceq, cmd, next); | |
1085 | g_free(cmd); | |
1086 | g->inflight--; | |
1087 | if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { | |
1088 | trace_virtio_gpu_dec_inflight_fences(g->inflight); | |
1089 | } | |
1090 | } | |
1091 | } | |
1092 | ||
1093 | static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b) | |
1094 | { | |
1095 | VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj); | |
1096 | ||
1097 | virtio_gpu_process_fenceq(g); | |
1098 | virtio_gpu_process_cmdq(g); | |
1099 | } | |
1100 | ||
1101 | static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) | |
1102 | { | |
1103 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
1104 | struct virtio_gpu_ctrl_command *cmd; | |
1105 | ||
1106 | if (!virtio_queue_ready(vq)) { | |
1107 | return; | |
1108 | } | |
1109 | ||
1110 | cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); | |
1111 | while (cmd) { | |
1112 | cmd->vq = vq; | |
1113 | cmd->error = 0; | |
1114 | cmd->finished = false; | |
1115 | QTAILQ_INSERT_TAIL(&g->cmdq, cmd, next); | |
1116 | cmd = virtqueue_pop(vq, sizeof(struct virtio_gpu_ctrl_command)); | |
1117 | } | |
1118 | ||
1119 | virtio_gpu_process_cmdq(g); | |
1120 | } | |
1121 | ||
1122 | static void virtio_gpu_ctrl_bh(void *opaque) | |
1123 | { | |
1124 | VirtIOGPU *g = opaque; | |
1125 | VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); | |
1126 | ||
1127 | vgc->handle_ctrl(VIRTIO_DEVICE(g), g->ctrl_vq); | |
1128 | } | |
1129 | ||
1130 | static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) | |
1131 | { | |
1132 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
1133 | VirtQueueElement *elem; | |
1134 | size_t s; | |
1135 | struct virtio_gpu_update_cursor cursor_info; | |
1136 | ||
1137 | if (!virtio_queue_ready(vq)) { | |
1138 | return; | |
1139 | } | |
1140 | for (;;) { | |
1141 | elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); | |
1142 | if (!elem) { | |
1143 | break; | |
1144 | } | |
1145 | ||
1146 | s = iov_to_buf(elem->out_sg, elem->out_num, 0, | |
1147 | &cursor_info, sizeof(cursor_info)); | |
1148 | if (s != sizeof(cursor_info)) { | |
1149 | qemu_log_mask(LOG_GUEST_ERROR, | |
1150 | "%s: cursor size incorrect %zu vs %zu\n", | |
1151 | __func__, s, sizeof(cursor_info)); | |
1152 | } else { | |
1153 | virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info)); | |
1154 | update_cursor(g, &cursor_info); | |
1155 | } | |
1156 | virtqueue_push(vq, elem, 0); | |
1157 | virtio_notify(vdev, vq); | |
1158 | g_free(elem); | |
1159 | } | |
1160 | } | |
1161 | ||
1162 | static void virtio_gpu_cursor_bh(void *opaque) | |
1163 | { | |
1164 | VirtIOGPU *g = opaque; | |
1165 | virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); | |
1166 | } | |
1167 | ||
1168 | static bool scanout_vmstate_after_v2(void *opaque, int version) | |
1169 | { | |
1170 | struct VirtIOGPUBase *base = container_of(opaque, VirtIOGPUBase, scanout); | |
1171 | struct VirtIOGPU *gpu = container_of(base, VirtIOGPU, parent_obj); | |
1172 | ||
1173 | return gpu->scanout_vmstate_version >= 2; | |
1174 | } | |
1175 | ||
1176 | static const VMStateDescription vmstate_virtio_gpu_scanout = { | |
1177 | .name = "virtio-gpu-one-scanout", | |
1178 | .version_id = 1, | |
1179 | .fields = (const VMStateField[]) { | |
1180 | VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout), | |
1181 | VMSTATE_UINT32(width, struct virtio_gpu_scanout), | |
1182 | VMSTATE_UINT32(height, struct virtio_gpu_scanout), | |
1183 | VMSTATE_INT32(x, struct virtio_gpu_scanout), | |
1184 | VMSTATE_INT32(y, struct virtio_gpu_scanout), | |
1185 | VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout), | |
1186 | VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout), | |
1187 | VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout), | |
1188 | VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout), | |
1189 | VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout), | |
1190 | VMSTATE_UINT32_TEST(fb.format, struct virtio_gpu_scanout, | |
1191 | scanout_vmstate_after_v2), | |
1192 | VMSTATE_UINT32_TEST(fb.bytes_pp, struct virtio_gpu_scanout, | |
1193 | scanout_vmstate_after_v2), | |
1194 | VMSTATE_UINT32_TEST(fb.width, struct virtio_gpu_scanout, | |
1195 | scanout_vmstate_after_v2), | |
1196 | VMSTATE_UINT32_TEST(fb.height, struct virtio_gpu_scanout, | |
1197 | scanout_vmstate_after_v2), | |
1198 | VMSTATE_UINT32_TEST(fb.stride, struct virtio_gpu_scanout, | |
1199 | scanout_vmstate_after_v2), | |
1200 | VMSTATE_UINT32_TEST(fb.offset, struct virtio_gpu_scanout, | |
1201 | scanout_vmstate_after_v2), | |
1202 | VMSTATE_END_OF_LIST() | |
1203 | }, | |
1204 | }; | |
1205 | ||
1206 | static const VMStateDescription vmstate_virtio_gpu_scanouts = { | |
1207 | .name = "virtio-gpu-scanouts", | |
1208 | .version_id = 1, | |
1209 | .fields = (const VMStateField[]) { | |
1210 | VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), | |
1211 | VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, | |
1212 | struct VirtIOGPU, NULL), | |
1213 | VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, | |
1214 | parent_obj.conf.max_outputs, 1, | |
1215 | vmstate_virtio_gpu_scanout, | |
1216 | struct virtio_gpu_scanout), | |
1217 | VMSTATE_END_OF_LIST() | |
1218 | }, | |
1219 | }; | |
1220 | ||
1221 | static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, | |
1222 | const VMStateField *field, JSONWriter *vmdesc) | |
1223 | { | |
1224 | VirtIOGPU *g = opaque; | |
1225 | struct virtio_gpu_simple_resource *res; | |
1226 | int i; | |
1227 | ||
1228 | /* in 2d mode we should never find unprocessed commands here */ | |
1229 | assert(QTAILQ_EMPTY(&g->cmdq)); | |
1230 | ||
1231 | QTAILQ_FOREACH(res, &g->reslist, next) { | |
1232 | if (res->blob_size) { | |
1233 | continue; | |
1234 | } | |
1235 | qemu_put_be32(f, res->resource_id); | |
1236 | qemu_put_be32(f, res->width); | |
1237 | qemu_put_be32(f, res->height); | |
1238 | qemu_put_be32(f, res->format); | |
1239 | qemu_put_be32(f, res->iov_cnt); | |
1240 | for (i = 0; i < res->iov_cnt; i++) { | |
1241 | qemu_put_be64(f, res->addrs[i]); | |
1242 | qemu_put_be32(f, res->iov[i].iov_len); | |
1243 | } | |
1244 | qemu_put_buffer(f, (void *)pixman_image_get_data(res->image), | |
1245 | pixman_image_get_stride(res->image) * res->height); | |
1246 | } | |
1247 | qemu_put_be32(f, 0); /* end of list */ | |
1248 | ||
1249 | return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); | |
1250 | } | |
1251 | ||
1252 | static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g, | |
1253 | struct virtio_gpu_simple_resource *res) | |
1254 | { | |
1255 | int i; | |
1256 | ||
1257 | for (i = 0; i < res->iov_cnt; i++) { | |
1258 | hwaddr len = res->iov[i].iov_len; | |
1259 | res->iov[i].iov_base = | |
1260 | dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, | |
1261 | DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED); | |
1262 | ||
1263 | if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { | |
1264 | /* Clean up the half-a-mapping we just created... */ | |
1265 | if (res->iov[i].iov_base) { | |
1266 | dma_memory_unmap(VIRTIO_DEVICE(g)->dma_as, res->iov[i].iov_base, | |
1267 | len, DMA_DIRECTION_TO_DEVICE, 0); | |
1268 | } | |
1269 | /* ...and the mappings for previous loop iterations */ | |
1270 | res->iov_cnt = i; | |
1271 | virtio_gpu_cleanup_mapping(g, res); | |
1272 | return false; | |
1273 | } | |
1274 | } | |
1275 | ||
1276 | QTAILQ_INSERT_HEAD(&g->reslist, res, next); | |
1277 | g->hostmem += res->hostmem; | |
1278 | return true; | |
1279 | } | |
1280 | ||
1281 | static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, | |
1282 | const VMStateField *field) | |
1283 | { | |
1284 | VirtIOGPU *g = opaque; | |
1285 | struct virtio_gpu_simple_resource *res; | |
1286 | uint32_t resource_id, pformat; | |
1287 | int i; | |
1288 | ||
1289 | g->hostmem = 0; | |
1290 | ||
1291 | resource_id = qemu_get_be32(f); | |
1292 | while (resource_id != 0) { | |
1293 | res = virtio_gpu_find_resource(g, resource_id); | |
1294 | if (res) { | |
1295 | return -EINVAL; | |
1296 | } | |
1297 | ||
1298 | res = g_new0(struct virtio_gpu_simple_resource, 1); | |
1299 | res->resource_id = resource_id; | |
1300 | res->width = qemu_get_be32(f); | |
1301 | res->height = qemu_get_be32(f); | |
1302 | res->format = qemu_get_be32(f); | |
1303 | res->iov_cnt = qemu_get_be32(f); | |
1304 | ||
1305 | /* allocate */ | |
1306 | pformat = virtio_gpu_get_pixman_format(res->format); | |
1307 | if (!pformat) { | |
1308 | g_free(res); | |
1309 | return -EINVAL; | |
1310 | } | |
1311 | ||
1312 | res->hostmem = calc_image_hostmem(pformat, res->width, res->height); | |
1313 | if (!qemu_pixman_image_new_shareable(&res->image, | |
1314 | &res->share_handle, | |
1315 | "virtio-gpu res", | |
1316 | pformat, | |
1317 | res->width, | |
1318 | res->height, | |
1319 | res->height ? res->hostmem / res->height : 0, | |
1320 | &error_warn)) { | |
1321 | g_free(res); | |
1322 | return -EINVAL; | |
1323 | } | |
1324 | ||
1325 | res->addrs = g_new(uint64_t, res->iov_cnt); | |
1326 | res->iov = g_new(struct iovec, res->iov_cnt); | |
1327 | ||
1328 | /* read data */ | |
1329 | for (i = 0; i < res->iov_cnt; i++) { | |
1330 | res->addrs[i] = qemu_get_be64(f); | |
1331 | res->iov[i].iov_len = qemu_get_be32(f); | |
1332 | } | |
1333 | qemu_get_buffer(f, (void *)pixman_image_get_data(res->image), | |
1334 | pixman_image_get_stride(res->image) * res->height); | |
1335 | ||
1336 | if (!virtio_gpu_load_restore_mapping(g, res)) { | |
1337 | pixman_image_unref(res->image); | |
1338 | g_free(res); | |
1339 | return -EINVAL; | |
1340 | } | |
1341 | ||
1342 | resource_id = qemu_get_be32(f); | |
1343 | } | |
1344 | ||
1345 | /* load & apply scanout state */ | |
1346 | vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); | |
1347 | ||
1348 | return 0; | |
1349 | } | |
1350 | ||
1351 | static int virtio_gpu_blob_save(QEMUFile *f, void *opaque, size_t size, | |
1352 | const VMStateField *field, JSONWriter *vmdesc) | |
1353 | { | |
1354 | VirtIOGPU *g = opaque; | |
1355 | struct virtio_gpu_simple_resource *res; | |
1356 | int i; | |
1357 | ||
1358 | /* in 2d mode we should never find unprocessed commands here */ | |
1359 | assert(QTAILQ_EMPTY(&g->cmdq)); | |
1360 | ||
1361 | QTAILQ_FOREACH(res, &g->reslist, next) { | |
1362 | if (!res->blob_size) { | |
1363 | continue; | |
1364 | } | |
1365 | assert(!res->image); | |
1366 | qemu_put_be32(f, res->resource_id); | |
1367 | qemu_put_be32(f, res->blob_size); | |
1368 | qemu_put_be32(f, res->iov_cnt); | |
1369 | for (i = 0; i < res->iov_cnt; i++) { | |
1370 | qemu_put_be64(f, res->addrs[i]); | |
1371 | qemu_put_be32(f, res->iov[i].iov_len); | |
1372 | } | |
1373 | } | |
1374 | qemu_put_be32(f, 0); /* end of list */ | |
1375 | ||
1376 | return 0; | |
1377 | } | |
1378 | ||
1379 | static int virtio_gpu_blob_load(QEMUFile *f, void *opaque, size_t size, | |
1380 | const VMStateField *field) | |
1381 | { | |
1382 | VirtIOGPU *g = opaque; | |
1383 | struct virtio_gpu_simple_resource *res; | |
1384 | uint32_t resource_id; | |
1385 | int i; | |
1386 | ||
1387 | resource_id = qemu_get_be32(f); | |
1388 | while (resource_id != 0) { | |
1389 | res = virtio_gpu_find_resource(g, resource_id); | |
1390 | if (res) { | |
1391 | return -EINVAL; | |
1392 | } | |
1393 | ||
1394 | res = g_new0(struct virtio_gpu_simple_resource, 1); | |
1395 | res->resource_id = resource_id; | |
1396 | res->blob_size = qemu_get_be32(f); | |
1397 | res->iov_cnt = qemu_get_be32(f); | |
1398 | res->addrs = g_new(uint64_t, res->iov_cnt); | |
1399 | res->iov = g_new(struct iovec, res->iov_cnt); | |
1400 | ||
1401 | /* read data */ | |
1402 | for (i = 0; i < res->iov_cnt; i++) { | |
1403 | res->addrs[i] = qemu_get_be64(f); | |
1404 | res->iov[i].iov_len = qemu_get_be32(f); | |
1405 | } | |
1406 | ||
1407 | if (!virtio_gpu_load_restore_mapping(g, res)) { | |
1408 | g_free(res); | |
1409 | return -EINVAL; | |
1410 | } | |
1411 | ||
1412 | virtio_gpu_init_udmabuf(res); | |
1413 | ||
1414 | resource_id = qemu_get_be32(f); | |
1415 | } | |
1416 | ||
1417 | return 0; | |
1418 | } | |
1419 | ||
1420 | static int virtio_gpu_post_load(void *opaque, int version_id) | |
1421 | { | |
1422 | VirtIOGPU *g = opaque; | |
1423 | struct virtio_gpu_scanout *scanout; | |
1424 | struct virtio_gpu_simple_resource *res; | |
1425 | int i; | |
1426 | ||
1427 | for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { | |
1428 | scanout = &g->parent_obj.scanout[i]; | |
1429 | if (!scanout->resource_id) { | |
1430 | continue; | |
1431 | } | |
1432 | ||
1433 | res = virtio_gpu_find_resource(g, scanout->resource_id); | |
1434 | if (!res) { | |
1435 | return -EINVAL; | |
1436 | } | |
1437 | ||
1438 | if (scanout->fb.format != 0) { | |
1439 | uint32_t error = 0; | |
1440 | struct virtio_gpu_rect r = { | |
1441 | .x = scanout->x, | |
1442 | .y = scanout->y, | |
1443 | .width = scanout->width, | |
1444 | .height = scanout->height | |
1445 | }; | |
1446 | ||
1447 | if (!virtio_gpu_do_set_scanout(g, i, &scanout->fb, res, &r, &error)) { | |
1448 | return -EINVAL; | |
1449 | } | |
1450 | } else { | |
1451 | /* legacy v1 migration support */ | |
1452 | if (!res->image) { | |
1453 | return -EINVAL; | |
1454 | } | |
1455 | scanout->ds = qemu_create_displaysurface_pixman(res->image); | |
1456 | qemu_displaysurface_set_share_handle(scanout->ds, res->share_handle, 0); | |
1457 | dpy_gfx_replace_surface(scanout->con, scanout->ds); | |
1458 | } | |
1459 | ||
1460 | dpy_gfx_update_full(scanout->con); | |
1461 | if (scanout->cursor.resource_id) { | |
1462 | update_cursor(g, &scanout->cursor); | |
1463 | } | |
1464 | res->scanout_bitmask |= (1 << i); | |
1465 | } | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
1470 | void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) | |
1471 | { | |
1472 | VirtIODevice *vdev = VIRTIO_DEVICE(qdev); | |
1473 | VirtIOGPU *g = VIRTIO_GPU(qdev); | |
1474 | ||
1475 | if (virtio_gpu_blob_enabled(g->parent_obj.conf)) { | |
1476 | if (!virtio_gpu_rutabaga_enabled(g->parent_obj.conf) && | |
1477 | !virtio_gpu_virgl_enabled(g->parent_obj.conf) && | |
1478 | !virtio_gpu_have_udmabuf()) { | |
1479 | error_setg(errp, "need rutabaga or udmabuf for blob resources"); | |
1480 | return; | |
1481 | } | |
1482 | ||
1483 | #ifdef VIRGL_VERSION_MAJOR | |
1484 | #if VIRGL_VERSION_MAJOR < 1 | |
1485 | if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { | |
1486 | error_setg(errp, "old virglrenderer, blob resources unsupported"); | |
1487 | return; | |
1488 | } | |
1489 | #endif | |
1490 | #endif | |
1491 | } | |
1492 | ||
1493 | if (virtio_gpu_venus_enabled(g->parent_obj.conf)) { | |
1494 | #ifdef VIRGL_VERSION_MAJOR | |
1495 | #if VIRGL_VERSION_MAJOR >= 1 | |
1496 | if (!virtio_gpu_blob_enabled(g->parent_obj.conf) || | |
1497 | !virtio_gpu_hostmem_enabled(g->parent_obj.conf)) { | |
1498 | error_setg(errp, "venus requires enabled blob and hostmem options"); | |
1499 | return; | |
1500 | } | |
1501 | #else | |
1502 | error_setg(errp, "old virglrenderer, venus unsupported"); | |
1503 | return; | |
1504 | #endif | |
1505 | #endif | |
1506 | } | |
1507 | ||
1508 | if (!virtio_gpu_base_device_realize(qdev, | |
1509 | virtio_gpu_handle_ctrl_cb, | |
1510 | virtio_gpu_handle_cursor_cb, | |
1511 | errp)) { | |
1512 | return; | |
1513 | } | |
1514 | ||
1515 | g->ctrl_vq = virtio_get_queue(vdev, 0); | |
1516 | g->cursor_vq = virtio_get_queue(vdev, 1); | |
1517 | g->ctrl_bh = virtio_bh_new_guarded(qdev, virtio_gpu_ctrl_bh, g); | |
1518 | g->cursor_bh = virtio_bh_new_guarded(qdev, virtio_gpu_cursor_bh, g); | |
1519 | g->reset_bh = qemu_bh_new(virtio_gpu_reset_bh, g); | |
1520 | qemu_cond_init(&g->reset_cond); | |
1521 | QTAILQ_INIT(&g->reslist); | |
1522 | QTAILQ_INIT(&g->cmdq); | |
1523 | QTAILQ_INIT(&g->fenceq); | |
1524 | } | |
1525 | ||
1526 | static void virtio_gpu_device_unrealize(DeviceState *qdev) | |
1527 | { | |
1528 | VirtIOGPU *g = VIRTIO_GPU(qdev); | |
1529 | ||
1530 | g_clear_pointer(&g->ctrl_bh, qemu_bh_delete); | |
1531 | g_clear_pointer(&g->cursor_bh, qemu_bh_delete); | |
1532 | g_clear_pointer(&g->reset_bh, qemu_bh_delete); | |
1533 | qemu_cond_destroy(&g->reset_cond); | |
1534 | virtio_gpu_base_device_unrealize(qdev); | |
1535 | } | |
1536 | ||
1537 | static void virtio_gpu_reset_bh(void *opaque) | |
1538 | { | |
1539 | VirtIOGPU *g = VIRTIO_GPU(opaque); | |
1540 | VirtIOGPUClass *vgc = VIRTIO_GPU_GET_CLASS(g); | |
1541 | struct virtio_gpu_simple_resource *res, *tmp; | |
1542 | uint32_t resource_id; | |
1543 | Error *local_err = NULL; | |
1544 | int i = 0; | |
1545 | ||
1546 | QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { | |
1547 | resource_id = res->resource_id; | |
1548 | vgc->resource_destroy(g, res, &local_err); | |
1549 | if (local_err) { | |
1550 | error_append_hint(&local_err, "%s: %s resource_destroy" | |
1551 | "for resource_id = %"PRIu32" failed.\n", | |
1552 | __func__, object_get_typename(OBJECT(g)), | |
1553 | resource_id); | |
1554 | /* error_report_err frees the error object for us */ | |
1555 | error_report_err(local_err); | |
1556 | local_err = NULL; | |
1557 | } | |
1558 | } | |
1559 | ||
1560 | for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { | |
1561 | dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL); | |
1562 | } | |
1563 | ||
1564 | g->reset_finished = true; | |
1565 | qemu_cond_signal(&g->reset_cond); | |
1566 | } | |
1567 | ||
1568 | void virtio_gpu_reset(VirtIODevice *vdev) | |
1569 | { | |
1570 | VirtIOGPU *g = VIRTIO_GPU(vdev); | |
1571 | struct virtio_gpu_ctrl_command *cmd; | |
1572 | ||
1573 | if (qemu_in_vcpu_thread()) { | |
1574 | g->reset_finished = false; | |
1575 | qemu_bh_schedule(g->reset_bh); | |
1576 | while (!g->reset_finished) { | |
1577 | qemu_cond_wait_bql(&g->reset_cond); | |
1578 | } | |
1579 | } else { | |
1580 | aio_bh_call(g->reset_bh); | |
1581 | } | |
1582 | ||
1583 | while (!QTAILQ_EMPTY(&g->cmdq)) { | |
1584 | cmd = QTAILQ_FIRST(&g->cmdq); | |
1585 | QTAILQ_REMOVE(&g->cmdq, cmd, next); | |
1586 | g_free(cmd); | |
1587 | } | |
1588 | ||
1589 | while (!QTAILQ_EMPTY(&g->fenceq)) { | |
1590 | cmd = QTAILQ_FIRST(&g->fenceq); | |
1591 | QTAILQ_REMOVE(&g->fenceq, cmd, next); | |
1592 | g->inflight--; | |
1593 | g_free(cmd); | |
1594 | } | |
1595 | ||
1596 | virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); | |
1597 | } | |
1598 | ||
1599 | static void | |
1600 | virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) | |
1601 | { | |
1602 | VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); | |
1603 | ||
1604 | memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); | |
1605 | } | |
1606 | ||
1607 | static void | |
1608 | virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) | |
1609 | { | |
1610 | VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); | |
1611 | const struct virtio_gpu_config *vgconfig = | |
1612 | (const struct virtio_gpu_config *)config; | |
1613 | ||
1614 | if (vgconfig->events_clear) { | |
1615 | g->virtio_config.events_read &= ~vgconfig->events_clear; | |
1616 | } | |
1617 | } | |
1618 | ||
1619 | static bool virtio_gpu_blob_state_needed(void *opaque) | |
1620 | { | |
1621 | VirtIOGPU *g = VIRTIO_GPU(opaque); | |
1622 | ||
1623 | return virtio_gpu_blob_enabled(g->parent_obj.conf); | |
1624 | } | |
1625 | ||
1626 | const VMStateDescription vmstate_virtio_gpu_blob_state = { | |
1627 | .name = "virtio-gpu/blob", | |
1628 | .minimum_version_id = VIRTIO_GPU_VM_VERSION, | |
1629 | .version_id = VIRTIO_GPU_VM_VERSION, | |
1630 | .needed = virtio_gpu_blob_state_needed, | |
1631 | .fields = (const VMStateField[]){ | |
1632 | { | |
1633 | .name = "virtio-gpu/blob", | |
1634 | .info = &(const VMStateInfo) { | |
1635 | .name = "blob", | |
1636 | .get = virtio_gpu_blob_load, | |
1637 | .put = virtio_gpu_blob_save, | |
1638 | }, | |
1639 | .flags = VMS_SINGLE, | |
1640 | } /* device */, | |
1641 | VMSTATE_END_OF_LIST() | |
1642 | }, | |
1643 | }; | |
1644 | ||
1645 | /* | |
1646 | * For historical reasons virtio_gpu does not adhere to virtio migration | |
1647 | * scheme as described in doc/virtio-migration.txt, in a sense that no | |
1648 | * save/load callback are provided to the core. Instead the device data | |
1649 | * is saved/loaded after the core data. | |
1650 | * | |
1651 | * Because of this we need a special vmsd. | |
1652 | */ | |
1653 | static const VMStateDescription vmstate_virtio_gpu = { | |
1654 | .name = "virtio-gpu", | |
1655 | .minimum_version_id = VIRTIO_GPU_VM_VERSION, | |
1656 | .version_id = VIRTIO_GPU_VM_VERSION, | |
1657 | .fields = (const VMStateField[]) { | |
1658 | VMSTATE_VIRTIO_DEVICE /* core */, | |
1659 | { | |
1660 | .name = "virtio-gpu", | |
1661 | .info = &(const VMStateInfo) { | |
1662 | .name = "virtio-gpu", | |
1663 | .get = virtio_gpu_load, | |
1664 | .put = virtio_gpu_save, | |
1665 | }, | |
1666 | .flags = VMS_SINGLE, | |
1667 | } /* device */, | |
1668 | VMSTATE_END_OF_LIST() | |
1669 | }, | |
1670 | .subsections = (const VMStateDescription * const []) { | |
1671 | &vmstate_virtio_gpu_blob_state, | |
1672 | NULL | |
1673 | }, | |
1674 | .post_load = virtio_gpu_post_load, | |
1675 | }; | |
1676 | ||
1677 | static const Property virtio_gpu_properties[] = { | |
1678 | VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), | |
1679 | DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, | |
1680 | 256 * MiB), | |
1681 | DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags, | |
1682 | VIRTIO_GPU_FLAG_BLOB_ENABLED, false), | |
1683 | DEFINE_PROP_SIZE("hostmem", VirtIOGPU, parent_obj.conf.hostmem, 0), | |
1684 | DEFINE_PROP_UINT8("x-scanout-vmstate-version", VirtIOGPU, scanout_vmstate_version, 2), | |
1685 | }; | |
1686 | ||
1687 | static void virtio_gpu_class_init(ObjectClass *klass, const void *data) | |
1688 | { | |
1689 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1690 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); | |
1691 | VirtIOGPUClass *vgc = VIRTIO_GPU_CLASS(klass); | |
1692 | VirtIOGPUBaseClass *vgbc = &vgc->parent; | |
1693 | ||
1694 | vgc->handle_ctrl = virtio_gpu_handle_ctrl; | |
1695 | vgc->process_cmd = virtio_gpu_simple_process_cmd; | |
1696 | vgc->update_cursor_data = virtio_gpu_update_cursor_data; | |
1697 | vgc->resource_destroy = virtio_gpu_resource_destroy; | |
1698 | vgbc->gl_flushed = virtio_gpu_handle_gl_flushed; | |
1699 | ||
1700 | vdc->realize = virtio_gpu_device_realize; | |
1701 | vdc->unrealize = virtio_gpu_device_unrealize; | |
1702 | vdc->reset = virtio_gpu_reset; | |
1703 | vdc->get_config = virtio_gpu_get_config; | |
1704 | vdc->set_config = virtio_gpu_set_config; | |
1705 | ||
1706 | dc->vmsd = &vmstate_virtio_gpu; | |
1707 | device_class_set_props(dc, virtio_gpu_properties); | |
1708 | } | |
1709 | ||
1710 | static const TypeInfo virtio_gpu_info = { | |
1711 | .name = TYPE_VIRTIO_GPU, | |
1712 | .parent = TYPE_VIRTIO_GPU_BASE, | |
1713 | .instance_size = sizeof(VirtIOGPU), | |
1714 | .class_size = sizeof(VirtIOGPUClass), | |
1715 | .class_init = virtio_gpu_class_init, | |
1716 | }; | |
1717 | module_obj(TYPE_VIRTIO_GPU); | |
1718 | module_kconfig(VIRTIO_GPU); | |
1719 | ||
1720 | static void virtio_register_types(void) | |
1721 | { | |
1722 | type_register_static(&virtio_gpu_info); | |
1723 | } | |
1724 | ||
1725 | type_init(virtio_register_types) |