]> git.ipfire.org Git - thirdparty/qemu.git/blob - contrib/libvhost-user/libvhost-user.c
vhost-user: add vhost_user_gpu_set_socket()
[thirdparty/qemu.git] / contrib / libvhost-user / libvhost-user.c
1 /*
2 * Vhost User library
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2016 Red Hat, Inc.
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Marc-André Lureau <mlureau@redhat.com>
10 * Victor Kaplansky <victork@redhat.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
14 */
15
16 /* this code avoids GLib dependency */
17 #include <stdlib.h>
18 #include <stdio.h>
19 #include <unistd.h>
20 #include <stdarg.h>
21 #include <errno.h>
22 #include <string.h>
23 #include <assert.h>
24 #include <inttypes.h>
25 #include <sys/types.h>
26 #include <sys/socket.h>
27 #include <sys/eventfd.h>
28 #include <sys/mman.h>
29 #include "qemu/compiler.h"
30
31 #if defined(__linux__)
32 #include <sys/syscall.h>
33 #include <fcntl.h>
34 #include <sys/ioctl.h>
35 #include <linux/vhost.h>
36
37 #ifdef __NR_userfaultfd
38 #include <linux/userfaultfd.h>
39 #endif
40
41 #endif
42
43 #include "qemu/atomic.h"
44 #include "qemu/osdep.h"
45 #include "qemu/memfd.h"
46
47 #include "libvhost-user.h"
48
49 /* usually provided by GLib */
50 #ifndef MIN
51 #define MIN(x, y) ({ \
52 typeof(x) _min1 = (x); \
53 typeof(y) _min2 = (y); \
54 (void) (&_min1 == &_min2); \
55 _min1 < _min2 ? _min1 : _min2; })
56 #endif
57
58 /* Round number down to multiple */
59 #define ALIGN_DOWN(n, m) ((n) / (m) * (m))
60
61 /* Round number up to multiple */
62 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
63
64 /* Align each region to cache line size in inflight buffer */
65 #define INFLIGHT_ALIGNMENT 64
66
67 /* The version of inflight buffer */
68 #define INFLIGHT_VERSION 1
69
70 #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
71
72 /* The version of the protocol we support */
73 #define VHOST_USER_VERSION 1
74 #define LIBVHOST_USER_DEBUG 0
75
76 #define DPRINT(...) \
77 do { \
78 if (LIBVHOST_USER_DEBUG) { \
79 fprintf(stderr, __VA_ARGS__); \
80 } \
81 } while (0)
82
83 static inline
84 bool has_feature(uint64_t features, unsigned int fbit)
85 {
86 assert(fbit < 64);
87 return !!(features & (1ULL << fbit));
88 }
89
90 static inline
91 bool vu_has_feature(VuDev *dev,
92 unsigned int fbit)
93 {
94 return has_feature(dev->features, fbit);
95 }
96
97 static const char *
98 vu_request_to_string(unsigned int req)
99 {
100 #define REQ(req) [req] = #req
101 static const char *vu_request_str[] = {
102 REQ(VHOST_USER_NONE),
103 REQ(VHOST_USER_GET_FEATURES),
104 REQ(VHOST_USER_SET_FEATURES),
105 REQ(VHOST_USER_SET_OWNER),
106 REQ(VHOST_USER_RESET_OWNER),
107 REQ(VHOST_USER_SET_MEM_TABLE),
108 REQ(VHOST_USER_SET_LOG_BASE),
109 REQ(VHOST_USER_SET_LOG_FD),
110 REQ(VHOST_USER_SET_VRING_NUM),
111 REQ(VHOST_USER_SET_VRING_ADDR),
112 REQ(VHOST_USER_SET_VRING_BASE),
113 REQ(VHOST_USER_GET_VRING_BASE),
114 REQ(VHOST_USER_SET_VRING_KICK),
115 REQ(VHOST_USER_SET_VRING_CALL),
116 REQ(VHOST_USER_SET_VRING_ERR),
117 REQ(VHOST_USER_GET_PROTOCOL_FEATURES),
118 REQ(VHOST_USER_SET_PROTOCOL_FEATURES),
119 REQ(VHOST_USER_GET_QUEUE_NUM),
120 REQ(VHOST_USER_SET_VRING_ENABLE),
121 REQ(VHOST_USER_SEND_RARP),
122 REQ(VHOST_USER_NET_SET_MTU),
123 REQ(VHOST_USER_SET_SLAVE_REQ_FD),
124 REQ(VHOST_USER_IOTLB_MSG),
125 REQ(VHOST_USER_SET_VRING_ENDIAN),
126 REQ(VHOST_USER_GET_CONFIG),
127 REQ(VHOST_USER_SET_CONFIG),
128 REQ(VHOST_USER_POSTCOPY_ADVISE),
129 REQ(VHOST_USER_POSTCOPY_LISTEN),
130 REQ(VHOST_USER_POSTCOPY_END),
131 REQ(VHOST_USER_GET_INFLIGHT_FD),
132 REQ(VHOST_USER_SET_INFLIGHT_FD),
133 REQ(VHOST_USER_GPU_SET_SOCKET),
134 REQ(VHOST_USER_MAX),
135 };
136 #undef REQ
137
138 if (req < VHOST_USER_MAX) {
139 return vu_request_str[req];
140 } else {
141 return "unknown";
142 }
143 }
144
145 static void
146 vu_panic(VuDev *dev, const char *msg, ...)
147 {
148 char *buf = NULL;
149 va_list ap;
150
151 va_start(ap, msg);
152 if (vasprintf(&buf, msg, ap) < 0) {
153 buf = NULL;
154 }
155 va_end(ap);
156
157 dev->broken = true;
158 dev->panic(dev, buf);
159 free(buf);
160
161 /* FIXME: find a way to call virtio_error? */
162 }
163
164 /* Translate guest physical address to our virtual address. */
165 void *
166 vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
167 {
168 int i;
169
170 if (*plen == 0) {
171 return NULL;
172 }
173
174 /* Find matching memory region. */
175 for (i = 0; i < dev->nregions; i++) {
176 VuDevRegion *r = &dev->regions[i];
177
178 if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
179 if ((guest_addr + *plen) > (r->gpa + r->size)) {
180 *plen = r->gpa + r->size - guest_addr;
181 }
182 return (void *)(uintptr_t)
183 guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
184 }
185 }
186
187 return NULL;
188 }
189
190 /* Translate qemu virtual address to our virtual address. */
191 static void *
192 qva_to_va(VuDev *dev, uint64_t qemu_addr)
193 {
194 int i;
195
196 /* Find matching memory region. */
197 for (i = 0; i < dev->nregions; i++) {
198 VuDevRegion *r = &dev->regions[i];
199
200 if ((qemu_addr >= r->qva) && (qemu_addr < (r->qva + r->size))) {
201 return (void *)(uintptr_t)
202 qemu_addr - r->qva + r->mmap_addr + r->mmap_offset;
203 }
204 }
205
206 return NULL;
207 }
208
209 static void
210 vmsg_close_fds(VhostUserMsg *vmsg)
211 {
212 int i;
213
214 for (i = 0; i < vmsg->fd_num; i++) {
215 close(vmsg->fds[i]);
216 }
217 }
218
219 /* A test to see if we have userfault available */
220 static bool
221 have_userfault(void)
222 {
223 #if defined(__linux__) && defined(__NR_userfaultfd) &&\
224 defined(UFFD_FEATURE_MISSING_SHMEM) &&\
225 defined(UFFD_FEATURE_MISSING_HUGETLBFS)
226 /* Now test the kernel we're running on really has the features */
227 int ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
228 struct uffdio_api api_struct;
229 if (ufd < 0) {
230 return false;
231 }
232
233 api_struct.api = UFFD_API;
234 api_struct.features = UFFD_FEATURE_MISSING_SHMEM |
235 UFFD_FEATURE_MISSING_HUGETLBFS;
236 if (ioctl(ufd, UFFDIO_API, &api_struct)) {
237 close(ufd);
238 return false;
239 }
240 close(ufd);
241 return true;
242
243 #else
244 return false;
245 #endif
246 }
247
248 static bool
249 vu_message_read(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
250 {
251 char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
252 struct iovec iov = {
253 .iov_base = (char *)vmsg,
254 .iov_len = VHOST_USER_HDR_SIZE,
255 };
256 struct msghdr msg = {
257 .msg_iov = &iov,
258 .msg_iovlen = 1,
259 .msg_control = control,
260 .msg_controllen = sizeof(control),
261 };
262 size_t fd_size;
263 struct cmsghdr *cmsg;
264 int rc;
265
266 do {
267 rc = recvmsg(conn_fd, &msg, 0);
268 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
269
270 if (rc < 0) {
271 vu_panic(dev, "Error while recvmsg: %s", strerror(errno));
272 return false;
273 }
274
275 vmsg->fd_num = 0;
276 for (cmsg = CMSG_FIRSTHDR(&msg);
277 cmsg != NULL;
278 cmsg = CMSG_NXTHDR(&msg, cmsg))
279 {
280 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
281 fd_size = cmsg->cmsg_len - CMSG_LEN(0);
282 vmsg->fd_num = fd_size / sizeof(int);
283 memcpy(vmsg->fds, CMSG_DATA(cmsg), fd_size);
284 break;
285 }
286 }
287
288 if (vmsg->size > sizeof(vmsg->payload)) {
289 vu_panic(dev,
290 "Error: too big message request: %d, size: vmsg->size: %u, "
291 "while sizeof(vmsg->payload) = %zu\n",
292 vmsg->request, vmsg->size, sizeof(vmsg->payload));
293 goto fail;
294 }
295
296 if (vmsg->size) {
297 do {
298 rc = read(conn_fd, &vmsg->payload, vmsg->size);
299 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
300
301 if (rc <= 0) {
302 vu_panic(dev, "Error while reading: %s", strerror(errno));
303 goto fail;
304 }
305
306 assert(rc == vmsg->size);
307 }
308
309 return true;
310
311 fail:
312 vmsg_close_fds(vmsg);
313
314 return false;
315 }
316
317 static bool
318 vu_message_write(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
319 {
320 int rc;
321 uint8_t *p = (uint8_t *)vmsg;
322 char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))] = { };
323 struct iovec iov = {
324 .iov_base = (char *)vmsg,
325 .iov_len = VHOST_USER_HDR_SIZE,
326 };
327 struct msghdr msg = {
328 .msg_iov = &iov,
329 .msg_iovlen = 1,
330 .msg_control = control,
331 };
332 struct cmsghdr *cmsg;
333
334 memset(control, 0, sizeof(control));
335 assert(vmsg->fd_num <= VHOST_MEMORY_MAX_NREGIONS);
336 if (vmsg->fd_num > 0) {
337 size_t fdsize = vmsg->fd_num * sizeof(int);
338 msg.msg_controllen = CMSG_SPACE(fdsize);
339 cmsg = CMSG_FIRSTHDR(&msg);
340 cmsg->cmsg_len = CMSG_LEN(fdsize);
341 cmsg->cmsg_level = SOL_SOCKET;
342 cmsg->cmsg_type = SCM_RIGHTS;
343 memcpy(CMSG_DATA(cmsg), vmsg->fds, fdsize);
344 } else {
345 msg.msg_controllen = 0;
346 }
347
348 do {
349 rc = sendmsg(conn_fd, &msg, 0);
350 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
351
352 if (vmsg->size) {
353 do {
354 if (vmsg->data) {
355 rc = write(conn_fd, vmsg->data, vmsg->size);
356 } else {
357 rc = write(conn_fd, p + VHOST_USER_HDR_SIZE, vmsg->size);
358 }
359 } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
360 }
361
362 if (rc <= 0) {
363 vu_panic(dev, "Error while writing: %s", strerror(errno));
364 return false;
365 }
366
367 return true;
368 }
369
370 static bool
371 vu_send_reply(VuDev *dev, int conn_fd, VhostUserMsg *vmsg)
372 {
373 /* Set the version in the flags when sending the reply */
374 vmsg->flags &= ~VHOST_USER_VERSION_MASK;
375 vmsg->flags |= VHOST_USER_VERSION;
376 vmsg->flags |= VHOST_USER_REPLY_MASK;
377
378 return vu_message_write(dev, conn_fd, vmsg);
379 }
380
381 static bool
382 vu_process_message_reply(VuDev *dev, const VhostUserMsg *vmsg)
383 {
384 VhostUserMsg msg_reply;
385
386 if ((vmsg->flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
387 return true;
388 }
389
390 if (!vu_message_read(dev, dev->slave_fd, &msg_reply)) {
391 return false;
392 }
393
394 if (msg_reply.request != vmsg->request) {
395 DPRINT("Received unexpected msg type. Expected %d received %d",
396 vmsg->request, msg_reply.request);
397 return false;
398 }
399
400 return msg_reply.payload.u64 == 0;
401 }
402
403 /* Kick the log_call_fd if required. */
404 static void
405 vu_log_kick(VuDev *dev)
406 {
407 if (dev->log_call_fd != -1) {
408 DPRINT("Kicking the QEMU's log...\n");
409 if (eventfd_write(dev->log_call_fd, 1) < 0) {
410 vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
411 }
412 }
413 }
414
415 static void
416 vu_log_page(uint8_t *log_table, uint64_t page)
417 {
418 DPRINT("Logged dirty guest page: %"PRId64"\n", page);
419 atomic_or(&log_table[page / 8], 1 << (page % 8));
420 }
421
422 static void
423 vu_log_write(VuDev *dev, uint64_t address, uint64_t length)
424 {
425 uint64_t page;
426
427 if (!(dev->features & (1ULL << VHOST_F_LOG_ALL)) ||
428 !dev->log_table || !length) {
429 return;
430 }
431
432 assert(dev->log_size > ((address + length - 1) / VHOST_LOG_PAGE / 8));
433
434 page = address / VHOST_LOG_PAGE;
435 while (page * VHOST_LOG_PAGE < address + length) {
436 vu_log_page(dev->log_table, page);
437 page += 1;
438 }
439
440 vu_log_kick(dev);
441 }
442
443 static void
444 vu_kick_cb(VuDev *dev, int condition, void *data)
445 {
446 int index = (intptr_t)data;
447 VuVirtq *vq = &dev->vq[index];
448 int sock = vq->kick_fd;
449 eventfd_t kick_data;
450 ssize_t rc;
451
452 rc = eventfd_read(sock, &kick_data);
453 if (rc == -1) {
454 vu_panic(dev, "kick eventfd_read(): %s", strerror(errno));
455 dev->remove_watch(dev, dev->vq[index].kick_fd);
456 } else {
457 DPRINT("Got kick_data: %016"PRIx64" handler:%p idx:%d\n",
458 kick_data, vq->handler, index);
459 if (vq->handler) {
460 vq->handler(dev, index);
461 }
462 }
463 }
464
465 static bool
466 vu_get_features_exec(VuDev *dev, VhostUserMsg *vmsg)
467 {
468 vmsg->payload.u64 =
469 1ULL << VHOST_F_LOG_ALL |
470 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
471
472 if (dev->iface->get_features) {
473 vmsg->payload.u64 |= dev->iface->get_features(dev);
474 }
475
476 vmsg->size = sizeof(vmsg->payload.u64);
477 vmsg->fd_num = 0;
478
479 DPRINT("Sending back to guest u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
480
481 return true;
482 }
483
484 static void
485 vu_set_enable_all_rings(VuDev *dev, bool enabled)
486 {
487 int i;
488
489 for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
490 dev->vq[i].enable = enabled;
491 }
492 }
493
494 static bool
495 vu_set_features_exec(VuDev *dev, VhostUserMsg *vmsg)
496 {
497 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
498
499 dev->features = vmsg->payload.u64;
500
501 if (!(dev->features & VHOST_USER_F_PROTOCOL_FEATURES)) {
502 vu_set_enable_all_rings(dev, true);
503 }
504
505 if (dev->iface->set_features) {
506 dev->iface->set_features(dev, dev->features);
507 }
508
509 return false;
510 }
511
512 static bool
513 vu_set_owner_exec(VuDev *dev, VhostUserMsg *vmsg)
514 {
515 return false;
516 }
517
518 static void
519 vu_close_log(VuDev *dev)
520 {
521 if (dev->log_table) {
522 if (munmap(dev->log_table, dev->log_size) != 0) {
523 perror("close log munmap() error");
524 }
525
526 dev->log_table = NULL;
527 }
528 if (dev->log_call_fd != -1) {
529 close(dev->log_call_fd);
530 dev->log_call_fd = -1;
531 }
532 }
533
534 static bool
535 vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
536 {
537 vu_set_enable_all_rings(dev, false);
538
539 return false;
540 }
541
542 static bool
543 vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
544 {
545 int i;
546 VhostUserMemory m = vmsg->payload.memory, *memory = &m;
547 dev->nregions = memory->nregions;
548
549 DPRINT("Nregions: %d\n", memory->nregions);
550 for (i = 0; i < dev->nregions; i++) {
551 void *mmap_addr;
552 VhostUserMemoryRegion *msg_region = &memory->regions[i];
553 VuDevRegion *dev_region = &dev->regions[i];
554
555 DPRINT("Region %d\n", i);
556 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
557 msg_region->guest_phys_addr);
558 DPRINT(" memory_size: 0x%016"PRIx64"\n",
559 msg_region->memory_size);
560 DPRINT(" userspace_addr 0x%016"PRIx64"\n",
561 msg_region->userspace_addr);
562 DPRINT(" mmap_offset 0x%016"PRIx64"\n",
563 msg_region->mmap_offset);
564
565 dev_region->gpa = msg_region->guest_phys_addr;
566 dev_region->size = msg_region->memory_size;
567 dev_region->qva = msg_region->userspace_addr;
568 dev_region->mmap_offset = msg_region->mmap_offset;
569
570 /* We don't use offset argument of mmap() since the
571 * mapped address has to be page aligned, and we use huge
572 * pages.
573 * In postcopy we're using PROT_NONE here to catch anyone
574 * accessing it before we userfault
575 */
576 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
577 PROT_NONE, MAP_SHARED,
578 vmsg->fds[i], 0);
579
580 if (mmap_addr == MAP_FAILED) {
581 vu_panic(dev, "region mmap error: %s", strerror(errno));
582 } else {
583 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
584 DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
585 dev_region->mmap_addr);
586 }
587
588 /* Return the address to QEMU so that it can translate the ufd
589 * fault addresses back.
590 */
591 msg_region->userspace_addr = (uintptr_t)(mmap_addr +
592 dev_region->mmap_offset);
593 close(vmsg->fds[i]);
594 }
595
596 /* Send the message back to qemu with the addresses filled in */
597 vmsg->fd_num = 0;
598 if (!vu_send_reply(dev, dev->sock, vmsg)) {
599 vu_panic(dev, "failed to respond to set-mem-table for postcopy");
600 return false;
601 }
602
603 /* Wait for QEMU to confirm that it's registered the handler for the
604 * faults.
605 */
606 if (!vu_message_read(dev, dev->sock, vmsg) ||
607 vmsg->size != sizeof(vmsg->payload.u64) ||
608 vmsg->payload.u64 != 0) {
609 vu_panic(dev, "failed to receive valid ack for postcopy set-mem-table");
610 return false;
611 }
612
613 /* OK, now we can go and register the memory and generate faults */
614 for (i = 0; i < dev->nregions; i++) {
615 VuDevRegion *dev_region = &dev->regions[i];
616 int ret;
617 #ifdef UFFDIO_REGISTER
618 /* We should already have an open ufd. Mark each memory
619 * range as ufd.
620 * Discard any mapping we have here; note I can't use MADV_REMOVE
621 * or fallocate to make the hole since I don't want to lose
622 * data that's already arrived in the shared process.
623 * TODO: How to do hugepage
624 */
625 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
626 dev_region->size + dev_region->mmap_offset,
627 MADV_DONTNEED);
628 if (ret) {
629 fprintf(stderr,
630 "%s: Failed to madvise(DONTNEED) region %d: %s\n",
631 __func__, i, strerror(errno));
632 }
633 /* Turn off transparent hugepages so we dont get lose wakeups
634 * in neighbouring pages.
635 * TODO: Turn this backon later.
636 */
637 ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
638 dev_region->size + dev_region->mmap_offset,
639 MADV_NOHUGEPAGE);
640 if (ret) {
641 /* Note: This can happen legally on kernels that are configured
642 * without madvise'able hugepages
643 */
644 fprintf(stderr,
645 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
646 __func__, i, strerror(errno));
647 }
648 struct uffdio_register reg_struct;
649 reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
650 reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
651 reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
652
653 if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) {
654 vu_panic(dev, "%s: Failed to userfault region %d "
655 "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
656 __func__, i,
657 dev_region->mmap_addr,
658 dev_region->size, dev_region->mmap_offset,
659 dev->postcopy_ufd, strerror(errno));
660 return false;
661 }
662 if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
663 vu_panic(dev, "%s Region (%d) doesn't support COPY",
664 __func__, i);
665 return false;
666 }
667 DPRINT("%s: region %d: Registered userfault for %"
668 PRIx64 " + %" PRIx64 "\n", __func__, i,
669 (uint64_t)reg_struct.range.start,
670 (uint64_t)reg_struct.range.len);
671 /* Now it's registered we can let the client at it */
672 if (mprotect((void *)(uintptr_t)dev_region->mmap_addr,
673 dev_region->size + dev_region->mmap_offset,
674 PROT_READ | PROT_WRITE)) {
675 vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
676 i, strerror(errno));
677 return false;
678 }
679 /* TODO: Stash 'zero' support flags somewhere */
680 #endif
681 }
682
683 return false;
684 }
685
686 static bool
687 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
688 {
689 int i;
690 VhostUserMemory m = vmsg->payload.memory, *memory = &m;
691
692 for (i = 0; i < dev->nregions; i++) {
693 VuDevRegion *r = &dev->regions[i];
694 void *m = (void *) (uintptr_t) r->mmap_addr;
695
696 if (m) {
697 munmap(m, r->size + r->mmap_offset);
698 }
699 }
700 dev->nregions = memory->nregions;
701
702 if (dev->postcopy_listening) {
703 return vu_set_mem_table_exec_postcopy(dev, vmsg);
704 }
705
706 DPRINT("Nregions: %d\n", memory->nregions);
707 for (i = 0; i < dev->nregions; i++) {
708 void *mmap_addr;
709 VhostUserMemoryRegion *msg_region = &memory->regions[i];
710 VuDevRegion *dev_region = &dev->regions[i];
711
712 DPRINT("Region %d\n", i);
713 DPRINT(" guest_phys_addr: 0x%016"PRIx64"\n",
714 msg_region->guest_phys_addr);
715 DPRINT(" memory_size: 0x%016"PRIx64"\n",
716 msg_region->memory_size);
717 DPRINT(" userspace_addr 0x%016"PRIx64"\n",
718 msg_region->userspace_addr);
719 DPRINT(" mmap_offset 0x%016"PRIx64"\n",
720 msg_region->mmap_offset);
721
722 dev_region->gpa = msg_region->guest_phys_addr;
723 dev_region->size = msg_region->memory_size;
724 dev_region->qva = msg_region->userspace_addr;
725 dev_region->mmap_offset = msg_region->mmap_offset;
726
727 /* We don't use offset argument of mmap() since the
728 * mapped address has to be page aligned, and we use huge
729 * pages. */
730 mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
731 PROT_READ | PROT_WRITE, MAP_SHARED,
732 vmsg->fds[i], 0);
733
734 if (mmap_addr == MAP_FAILED) {
735 vu_panic(dev, "region mmap error: %s", strerror(errno));
736 } else {
737 dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
738 DPRINT(" mmap_addr: 0x%016"PRIx64"\n",
739 dev_region->mmap_addr);
740 }
741
742 close(vmsg->fds[i]);
743 }
744
745 return false;
746 }
747
748 static bool
749 vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
750 {
751 int fd;
752 uint64_t log_mmap_size, log_mmap_offset;
753 void *rc;
754
755 if (vmsg->fd_num != 1 ||
756 vmsg->size != sizeof(vmsg->payload.log)) {
757 vu_panic(dev, "Invalid log_base message");
758 return true;
759 }
760
761 fd = vmsg->fds[0];
762 log_mmap_offset = vmsg->payload.log.mmap_offset;
763 log_mmap_size = vmsg->payload.log.mmap_size;
764 DPRINT("Log mmap_offset: %"PRId64"\n", log_mmap_offset);
765 DPRINT("Log mmap_size: %"PRId64"\n", log_mmap_size);
766
767 rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
768 log_mmap_offset);
769 close(fd);
770 if (rc == MAP_FAILED) {
771 perror("log mmap error");
772 }
773
774 if (dev->log_table) {
775 munmap(dev->log_table, dev->log_size);
776 }
777 dev->log_table = rc;
778 dev->log_size = log_mmap_size;
779
780 vmsg->size = sizeof(vmsg->payload.u64);
781 vmsg->fd_num = 0;
782
783 return true;
784 }
785
786 static bool
787 vu_set_log_fd_exec(VuDev *dev, VhostUserMsg *vmsg)
788 {
789 if (vmsg->fd_num != 1) {
790 vu_panic(dev, "Invalid log_fd message");
791 return false;
792 }
793
794 if (dev->log_call_fd != -1) {
795 close(dev->log_call_fd);
796 }
797 dev->log_call_fd = vmsg->fds[0];
798 DPRINT("Got log_call_fd: %d\n", vmsg->fds[0]);
799
800 return false;
801 }
802
803 static bool
804 vu_set_vring_num_exec(VuDev *dev, VhostUserMsg *vmsg)
805 {
806 unsigned int index = vmsg->payload.state.index;
807 unsigned int num = vmsg->payload.state.num;
808
809 DPRINT("State.index: %d\n", index);
810 DPRINT("State.num: %d\n", num);
811 dev->vq[index].vring.num = num;
812
813 return false;
814 }
815
816 static bool
817 vu_set_vring_addr_exec(VuDev *dev, VhostUserMsg *vmsg)
818 {
819 struct vhost_vring_addr addr = vmsg->payload.addr, *vra = &addr;
820 unsigned int index = vra->index;
821 VuVirtq *vq = &dev->vq[index];
822
823 DPRINT("vhost_vring_addr:\n");
824 DPRINT(" index: %d\n", vra->index);
825 DPRINT(" flags: %d\n", vra->flags);
826 DPRINT(" desc_user_addr: 0x%016" PRIx64 "\n", vra->desc_user_addr);
827 DPRINT(" used_user_addr: 0x%016" PRIx64 "\n", vra->used_user_addr);
828 DPRINT(" avail_user_addr: 0x%016" PRIx64 "\n", vra->avail_user_addr);
829 DPRINT(" log_guest_addr: 0x%016" PRIx64 "\n", vra->log_guest_addr);
830
831 vq->vring.flags = vra->flags;
832 vq->vring.desc = qva_to_va(dev, vra->desc_user_addr);
833 vq->vring.used = qva_to_va(dev, vra->used_user_addr);
834 vq->vring.avail = qva_to_va(dev, vra->avail_user_addr);
835 vq->vring.log_guest_addr = vra->log_guest_addr;
836
837 DPRINT("Setting virtq addresses:\n");
838 DPRINT(" vring_desc at %p\n", vq->vring.desc);
839 DPRINT(" vring_used at %p\n", vq->vring.used);
840 DPRINT(" vring_avail at %p\n", vq->vring.avail);
841
842 if (!(vq->vring.desc && vq->vring.used && vq->vring.avail)) {
843 vu_panic(dev, "Invalid vring_addr message");
844 return false;
845 }
846
847 vq->used_idx = vq->vring.used->idx;
848
849 if (vq->last_avail_idx != vq->used_idx) {
850 bool resume = dev->iface->queue_is_processed_in_order &&
851 dev->iface->queue_is_processed_in_order(dev, index);
852
853 DPRINT("Last avail index != used index: %u != %u%s\n",
854 vq->last_avail_idx, vq->used_idx,
855 resume ? ", resuming" : "");
856
857 if (resume) {
858 vq->shadow_avail_idx = vq->last_avail_idx = vq->used_idx;
859 }
860 }
861
862 return false;
863 }
864
865 static bool
866 vu_set_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
867 {
868 unsigned int index = vmsg->payload.state.index;
869 unsigned int num = vmsg->payload.state.num;
870
871 DPRINT("State.index: %d\n", index);
872 DPRINT("State.num: %d\n", num);
873 dev->vq[index].shadow_avail_idx = dev->vq[index].last_avail_idx = num;
874
875 return false;
876 }
877
878 static bool
879 vu_get_vring_base_exec(VuDev *dev, VhostUserMsg *vmsg)
880 {
881 unsigned int index = vmsg->payload.state.index;
882
883 DPRINT("State.index: %d\n", index);
884 vmsg->payload.state.num = dev->vq[index].last_avail_idx;
885 vmsg->size = sizeof(vmsg->payload.state);
886
887 dev->vq[index].started = false;
888 if (dev->iface->queue_set_started) {
889 dev->iface->queue_set_started(dev, index, false);
890 }
891
892 if (dev->vq[index].call_fd != -1) {
893 close(dev->vq[index].call_fd);
894 dev->vq[index].call_fd = -1;
895 }
896 if (dev->vq[index].kick_fd != -1) {
897 dev->remove_watch(dev, dev->vq[index].kick_fd);
898 close(dev->vq[index].kick_fd);
899 dev->vq[index].kick_fd = -1;
900 }
901
902 return true;
903 }
904
905 static bool
906 vu_check_queue_msg_file(VuDev *dev, VhostUserMsg *vmsg)
907 {
908 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
909
910 if (index >= VHOST_MAX_NR_VIRTQUEUE) {
911 vmsg_close_fds(vmsg);
912 vu_panic(dev, "Invalid queue index: %u", index);
913 return false;
914 }
915
916 if (vmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK ||
917 vmsg->fd_num != 1) {
918 vmsg_close_fds(vmsg);
919 vu_panic(dev, "Invalid fds in request: %d", vmsg->request);
920 return false;
921 }
922
923 return true;
924 }
925
926 static int
927 inflight_desc_compare(const void *a, const void *b)
928 {
929 VuVirtqInflightDesc *desc0 = (VuVirtqInflightDesc *)a,
930 *desc1 = (VuVirtqInflightDesc *)b;
931
932 if (desc1->counter > desc0->counter &&
933 (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) {
934 return 1;
935 }
936
937 return -1;
938 }
939
940 static int
941 vu_check_queue_inflights(VuDev *dev, VuVirtq *vq)
942 {
943 int i = 0;
944
945 if (!has_feature(dev->protocol_features,
946 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
947 return 0;
948 }
949
950 if (unlikely(!vq->inflight)) {
951 return -1;
952 }
953
954 if (unlikely(!vq->inflight->version)) {
955 /* initialize the buffer */
956 vq->inflight->version = INFLIGHT_VERSION;
957 return 0;
958 }
959
960 vq->used_idx = vq->vring.used->idx;
961 vq->resubmit_num = 0;
962 vq->resubmit_list = NULL;
963 vq->counter = 0;
964
965 if (unlikely(vq->inflight->used_idx != vq->used_idx)) {
966 vq->inflight->desc[vq->inflight->last_batch_head].inflight = 0;
967
968 barrier();
969
970 vq->inflight->used_idx = vq->used_idx;
971 }
972
973 for (i = 0; i < vq->inflight->desc_num; i++) {
974 if (vq->inflight->desc[i].inflight == 1) {
975 vq->inuse++;
976 }
977 }
978
979 vq->shadow_avail_idx = vq->last_avail_idx = vq->inuse + vq->used_idx;
980
981 if (vq->inuse) {
982 vq->resubmit_list = malloc(sizeof(VuVirtqInflightDesc) * vq->inuse);
983 if (!vq->resubmit_list) {
984 return -1;
985 }
986
987 for (i = 0; i < vq->inflight->desc_num; i++) {
988 if (vq->inflight->desc[i].inflight) {
989 vq->resubmit_list[vq->resubmit_num].index = i;
990 vq->resubmit_list[vq->resubmit_num].counter =
991 vq->inflight->desc[i].counter;
992 vq->resubmit_num++;
993 }
994 }
995
996 if (vq->resubmit_num > 1) {
997 qsort(vq->resubmit_list, vq->resubmit_num,
998 sizeof(VuVirtqInflightDesc), inflight_desc_compare);
999 }
1000 vq->counter = vq->resubmit_list[0].counter + 1;
1001 }
1002
1003 /* in case of I/O hang after reconnecting */
1004 if (eventfd_write(vq->kick_fd, 1)) {
1005 return -1;
1006 }
1007
1008 return 0;
1009 }
1010
1011 static bool
1012 vu_set_vring_kick_exec(VuDev *dev, VhostUserMsg *vmsg)
1013 {
1014 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1015
1016 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1017
1018 if (!vu_check_queue_msg_file(dev, vmsg)) {
1019 return false;
1020 }
1021
1022 if (dev->vq[index].kick_fd != -1) {
1023 dev->remove_watch(dev, dev->vq[index].kick_fd);
1024 close(dev->vq[index].kick_fd);
1025 dev->vq[index].kick_fd = -1;
1026 }
1027
1028 dev->vq[index].kick_fd = vmsg->fds[0];
1029 DPRINT("Got kick_fd: %d for vq: %d\n", vmsg->fds[0], index);
1030
1031 dev->vq[index].started = true;
1032 if (dev->iface->queue_set_started) {
1033 dev->iface->queue_set_started(dev, index, true);
1034 }
1035
1036 if (dev->vq[index].kick_fd != -1 && dev->vq[index].handler) {
1037 dev->set_watch(dev, dev->vq[index].kick_fd, VU_WATCH_IN,
1038 vu_kick_cb, (void *)(long)index);
1039
1040 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1041 dev->vq[index].kick_fd, index);
1042 }
1043
1044 if (vu_check_queue_inflights(dev, &dev->vq[index])) {
1045 vu_panic(dev, "Failed to check inflights for vq: %d\n", index);
1046 }
1047
1048 return false;
1049 }
1050
1051 void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
1052 vu_queue_handler_cb handler)
1053 {
1054 int qidx = vq - dev->vq;
1055
1056 vq->handler = handler;
1057 if (vq->kick_fd >= 0) {
1058 if (handler) {
1059 dev->set_watch(dev, vq->kick_fd, VU_WATCH_IN,
1060 vu_kick_cb, (void *)(long)qidx);
1061 } else {
1062 dev->remove_watch(dev, vq->kick_fd);
1063 }
1064 }
1065 }
1066
1067 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
1068 int size, int offset)
1069 {
1070 int qidx = vq - dev->vq;
1071 int fd_num = 0;
1072 VhostUserMsg vmsg = {
1073 .request = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1074 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1075 .size = sizeof(vmsg.payload.area),
1076 .payload.area = {
1077 .u64 = qidx & VHOST_USER_VRING_IDX_MASK,
1078 .size = size,
1079 .offset = offset,
1080 },
1081 };
1082
1083 if (fd == -1) {
1084 vmsg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1085 } else {
1086 vmsg.fds[fd_num++] = fd;
1087 }
1088
1089 vmsg.fd_num = fd_num;
1090
1091 if ((dev->protocol_features & VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD) == 0) {
1092 return false;
1093 }
1094
1095 if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
1096 return false;
1097 }
1098
1099 return vu_process_message_reply(dev, &vmsg);
1100 }
1101
1102 static bool
1103 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
1104 {
1105 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1106
1107 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1108
1109 if (!vu_check_queue_msg_file(dev, vmsg)) {
1110 return false;
1111 }
1112
1113 if (dev->vq[index].call_fd != -1) {
1114 close(dev->vq[index].call_fd);
1115 dev->vq[index].call_fd = -1;
1116 }
1117
1118 dev->vq[index].call_fd = vmsg->fds[0];
1119
1120 /* in case of I/O hang after reconnecting */
1121 if (eventfd_write(vmsg->fds[0], 1)) {
1122 return -1;
1123 }
1124
1125 DPRINT("Got call_fd: %d for vq: %d\n", vmsg->fds[0], index);
1126
1127 return false;
1128 }
1129
1130 static bool
1131 vu_set_vring_err_exec(VuDev *dev, VhostUserMsg *vmsg)
1132 {
1133 int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1134
1135 DPRINT("u64: 0x%016"PRIx64"\n", vmsg->payload.u64);
1136
1137 if (!vu_check_queue_msg_file(dev, vmsg)) {
1138 return false;
1139 }
1140
1141 if (dev->vq[index].err_fd != -1) {
1142 close(dev->vq[index].err_fd);
1143 dev->vq[index].err_fd = -1;
1144 }
1145
1146 dev->vq[index].err_fd = vmsg->fds[0];
1147
1148 return false;
1149 }
1150
1151 static bool
1152 vu_get_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1153 {
1154 uint64_t features = 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD |
1155 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ |
1156 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER |
1157 1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD;
1158
1159 if (have_userfault()) {
1160 features |= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT;
1161 }
1162
1163 if (dev->iface->get_config && dev->iface->set_config) {
1164 features |= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG;
1165 }
1166
1167 if (dev->iface->get_protocol_features) {
1168 features |= dev->iface->get_protocol_features(dev);
1169 }
1170
1171 vmsg->payload.u64 = features;
1172 vmsg->size = sizeof(vmsg->payload.u64);
1173 vmsg->fd_num = 0;
1174
1175 return true;
1176 }
1177
1178 static bool
1179 vu_set_protocol_features_exec(VuDev *dev, VhostUserMsg *vmsg)
1180 {
1181 uint64_t features = vmsg->payload.u64;
1182
1183 DPRINT("u64: 0x%016"PRIx64"\n", features);
1184
1185 dev->protocol_features = vmsg->payload.u64;
1186
1187 if (dev->iface->set_protocol_features) {
1188 dev->iface->set_protocol_features(dev, features);
1189 }
1190
1191 return false;
1192 }
1193
1194 static bool
1195 vu_get_queue_num_exec(VuDev *dev, VhostUserMsg *vmsg)
1196 {
1197 DPRINT("Function %s() not implemented yet.\n", __func__);
1198 return false;
1199 }
1200
1201 static bool
1202 vu_set_vring_enable_exec(VuDev *dev, VhostUserMsg *vmsg)
1203 {
1204 unsigned int index = vmsg->payload.state.index;
1205 unsigned int enable = vmsg->payload.state.num;
1206
1207 DPRINT("State.index: %d\n", index);
1208 DPRINT("State.enable: %d\n", enable);
1209
1210 if (index >= VHOST_MAX_NR_VIRTQUEUE) {
1211 vu_panic(dev, "Invalid vring_enable index: %u", index);
1212 return false;
1213 }
1214
1215 dev->vq[index].enable = enable;
1216 return false;
1217 }
1218
1219 static bool
1220 vu_set_slave_req_fd(VuDev *dev, VhostUserMsg *vmsg)
1221 {
1222 if (vmsg->fd_num != 1) {
1223 vu_panic(dev, "Invalid slave_req_fd message (%d fd's)", vmsg->fd_num);
1224 return false;
1225 }
1226
1227 if (dev->slave_fd != -1) {
1228 close(dev->slave_fd);
1229 }
1230 dev->slave_fd = vmsg->fds[0];
1231 DPRINT("Got slave_fd: %d\n", vmsg->fds[0]);
1232
1233 return false;
1234 }
1235
1236 static bool
1237 vu_get_config(VuDev *dev, VhostUserMsg *vmsg)
1238 {
1239 int ret = -1;
1240
1241 if (dev->iface->get_config) {
1242 ret = dev->iface->get_config(dev, vmsg->payload.config.region,
1243 vmsg->payload.config.size);
1244 }
1245
1246 if (ret) {
1247 /* resize to zero to indicate an error to master */
1248 vmsg->size = 0;
1249 }
1250
1251 return true;
1252 }
1253
1254 static bool
1255 vu_set_config(VuDev *dev, VhostUserMsg *vmsg)
1256 {
1257 int ret = -1;
1258
1259 if (dev->iface->set_config) {
1260 ret = dev->iface->set_config(dev, vmsg->payload.config.region,
1261 vmsg->payload.config.offset,
1262 vmsg->payload.config.size,
1263 vmsg->payload.config.flags);
1264 if (ret) {
1265 vu_panic(dev, "Set virtio configuration space failed");
1266 }
1267 }
1268
1269 return false;
1270 }
1271
1272 static bool
1273 vu_set_postcopy_advise(VuDev *dev, VhostUserMsg *vmsg)
1274 {
1275 dev->postcopy_ufd = -1;
1276 #ifdef UFFDIO_API
1277 struct uffdio_api api_struct;
1278
1279 dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
1280 vmsg->size = 0;
1281 #endif
1282
1283 if (dev->postcopy_ufd == -1) {
1284 vu_panic(dev, "Userfaultfd not available: %s", strerror(errno));
1285 goto out;
1286 }
1287
1288 #ifdef UFFDIO_API
1289 api_struct.api = UFFD_API;
1290 api_struct.features = 0;
1291 if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
1292 vu_panic(dev, "Failed UFFDIO_API: %s", strerror(errno));
1293 close(dev->postcopy_ufd);
1294 dev->postcopy_ufd = -1;
1295 goto out;
1296 }
1297 /* TODO: Stash feature flags somewhere */
1298 #endif
1299
1300 out:
1301 /* Return a ufd to the QEMU */
1302 vmsg->fd_num = 1;
1303 vmsg->fds[0] = dev->postcopy_ufd;
1304 return true; /* = send a reply */
1305 }
1306
1307 static bool
1308 vu_set_postcopy_listen(VuDev *dev, VhostUserMsg *vmsg)
1309 {
1310 vmsg->payload.u64 = -1;
1311 vmsg->size = sizeof(vmsg->payload.u64);
1312
1313 if (dev->nregions) {
1314 vu_panic(dev, "Regions already registered at postcopy-listen");
1315 return true;
1316 }
1317 dev->postcopy_listening = true;
1318
1319 vmsg->flags = VHOST_USER_VERSION | VHOST_USER_REPLY_MASK;
1320 vmsg->payload.u64 = 0; /* Success */
1321 return true;
1322 }
1323
1324 static bool
1325 vu_set_postcopy_end(VuDev *dev, VhostUserMsg *vmsg)
1326 {
1327 DPRINT("%s: Entry\n", __func__);
1328 dev->postcopy_listening = false;
1329 if (dev->postcopy_ufd > 0) {
1330 close(dev->postcopy_ufd);
1331 dev->postcopy_ufd = -1;
1332 DPRINT("%s: Done close\n", __func__);
1333 }
1334
1335 vmsg->fd_num = 0;
1336 vmsg->payload.u64 = 0;
1337 vmsg->size = sizeof(vmsg->payload.u64);
1338 vmsg->flags = VHOST_USER_VERSION | VHOST_USER_REPLY_MASK;
1339 DPRINT("%s: exit\n", __func__);
1340 return true;
1341 }
1342
1343 static inline uint64_t
1344 vu_inflight_queue_size(uint16_t queue_size)
1345 {
1346 return ALIGN_UP(sizeof(VuDescStateSplit) * queue_size +
1347 sizeof(uint16_t), INFLIGHT_ALIGNMENT);
1348 }
1349
1350 static bool
1351 vu_get_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1352 {
1353 int fd;
1354 void *addr;
1355 uint64_t mmap_size;
1356 uint16_t num_queues, queue_size;
1357
1358 if (vmsg->size != sizeof(vmsg->payload.inflight)) {
1359 vu_panic(dev, "Invalid get_inflight_fd message:%d", vmsg->size);
1360 vmsg->payload.inflight.mmap_size = 0;
1361 return true;
1362 }
1363
1364 num_queues = vmsg->payload.inflight.num_queues;
1365 queue_size = vmsg->payload.inflight.queue_size;
1366
1367 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1368 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1369
1370 mmap_size = vu_inflight_queue_size(queue_size) * num_queues;
1371
1372 addr = qemu_memfd_alloc("vhost-inflight", mmap_size,
1373 F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL,
1374 &fd, NULL);
1375
1376 if (!addr) {
1377 vu_panic(dev, "Failed to alloc vhost inflight area");
1378 vmsg->payload.inflight.mmap_size = 0;
1379 return true;
1380 }
1381
1382 memset(addr, 0, mmap_size);
1383
1384 dev->inflight_info.addr = addr;
1385 dev->inflight_info.size = vmsg->payload.inflight.mmap_size = mmap_size;
1386 dev->inflight_info.fd = vmsg->fds[0] = fd;
1387 vmsg->fd_num = 1;
1388 vmsg->payload.inflight.mmap_offset = 0;
1389
1390 DPRINT("send inflight mmap_size: %"PRId64"\n",
1391 vmsg->payload.inflight.mmap_size);
1392 DPRINT("send inflight mmap offset: %"PRId64"\n",
1393 vmsg->payload.inflight.mmap_offset);
1394
1395 return true;
1396 }
1397
1398 static bool
1399 vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
1400 {
1401 int fd, i;
1402 uint64_t mmap_size, mmap_offset;
1403 uint16_t num_queues, queue_size;
1404 void *rc;
1405
1406 if (vmsg->fd_num != 1 ||
1407 vmsg->size != sizeof(vmsg->payload.inflight)) {
1408 vu_panic(dev, "Invalid set_inflight_fd message size:%d fds:%d",
1409 vmsg->size, vmsg->fd_num);
1410 return false;
1411 }
1412
1413 fd = vmsg->fds[0];
1414 mmap_size = vmsg->payload.inflight.mmap_size;
1415 mmap_offset = vmsg->payload.inflight.mmap_offset;
1416 num_queues = vmsg->payload.inflight.num_queues;
1417 queue_size = vmsg->payload.inflight.queue_size;
1418
1419 DPRINT("set_inflight_fd mmap_size: %"PRId64"\n", mmap_size);
1420 DPRINT("set_inflight_fd mmap_offset: %"PRId64"\n", mmap_offset);
1421 DPRINT("set_inflight_fd num_queues: %"PRId16"\n", num_queues);
1422 DPRINT("set_inflight_fd queue_size: %"PRId16"\n", queue_size);
1423
1424 rc = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1425 fd, mmap_offset);
1426
1427 if (rc == MAP_FAILED) {
1428 vu_panic(dev, "set_inflight_fd mmap error: %s", strerror(errno));
1429 return false;
1430 }
1431
1432 if (dev->inflight_info.fd) {
1433 close(dev->inflight_info.fd);
1434 }
1435
1436 if (dev->inflight_info.addr) {
1437 munmap(dev->inflight_info.addr, dev->inflight_info.size);
1438 }
1439
1440 dev->inflight_info.fd = fd;
1441 dev->inflight_info.addr = rc;
1442 dev->inflight_info.size = mmap_size;
1443
1444 for (i = 0; i < num_queues; i++) {
1445 dev->vq[i].inflight = (VuVirtqInflight *)rc;
1446 dev->vq[i].inflight->desc_num = queue_size;
1447 rc = (void *)((char *)rc + vu_inflight_queue_size(queue_size));
1448 }
1449
1450 return false;
1451 }
1452
1453 static bool
1454 vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
1455 {
1456 int do_reply = 0;
1457
1458 /* Print out generic part of the request. */
1459 DPRINT("================ Vhost user message ================\n");
1460 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg->request),
1461 vmsg->request);
1462 DPRINT("Flags: 0x%x\n", vmsg->flags);
1463 DPRINT("Size: %d\n", vmsg->size);
1464
1465 if (vmsg->fd_num) {
1466 int i;
1467 DPRINT("Fds:");
1468 for (i = 0; i < vmsg->fd_num; i++) {
1469 DPRINT(" %d", vmsg->fds[i]);
1470 }
1471 DPRINT("\n");
1472 }
1473
1474 if (dev->iface->process_msg &&
1475 dev->iface->process_msg(dev, vmsg, &do_reply)) {
1476 return do_reply;
1477 }
1478
1479 switch (vmsg->request) {
1480 case VHOST_USER_GET_FEATURES:
1481 return vu_get_features_exec(dev, vmsg);
1482 case VHOST_USER_SET_FEATURES:
1483 return vu_set_features_exec(dev, vmsg);
1484 case VHOST_USER_GET_PROTOCOL_FEATURES:
1485 return vu_get_protocol_features_exec(dev, vmsg);
1486 case VHOST_USER_SET_PROTOCOL_FEATURES:
1487 return vu_set_protocol_features_exec(dev, vmsg);
1488 case VHOST_USER_SET_OWNER:
1489 return vu_set_owner_exec(dev, vmsg);
1490 case VHOST_USER_RESET_OWNER:
1491 return vu_reset_device_exec(dev, vmsg);
1492 case VHOST_USER_SET_MEM_TABLE:
1493 return vu_set_mem_table_exec(dev, vmsg);
1494 case VHOST_USER_SET_LOG_BASE:
1495 return vu_set_log_base_exec(dev, vmsg);
1496 case VHOST_USER_SET_LOG_FD:
1497 return vu_set_log_fd_exec(dev, vmsg);
1498 case VHOST_USER_SET_VRING_NUM:
1499 return vu_set_vring_num_exec(dev, vmsg);
1500 case VHOST_USER_SET_VRING_ADDR:
1501 return vu_set_vring_addr_exec(dev, vmsg);
1502 case VHOST_USER_SET_VRING_BASE:
1503 return vu_set_vring_base_exec(dev, vmsg);
1504 case VHOST_USER_GET_VRING_BASE:
1505 return vu_get_vring_base_exec(dev, vmsg);
1506 case VHOST_USER_SET_VRING_KICK:
1507 return vu_set_vring_kick_exec(dev, vmsg);
1508 case VHOST_USER_SET_VRING_CALL:
1509 return vu_set_vring_call_exec(dev, vmsg);
1510 case VHOST_USER_SET_VRING_ERR:
1511 return vu_set_vring_err_exec(dev, vmsg);
1512 case VHOST_USER_GET_QUEUE_NUM:
1513 return vu_get_queue_num_exec(dev, vmsg);
1514 case VHOST_USER_SET_VRING_ENABLE:
1515 return vu_set_vring_enable_exec(dev, vmsg);
1516 case VHOST_USER_SET_SLAVE_REQ_FD:
1517 return vu_set_slave_req_fd(dev, vmsg);
1518 case VHOST_USER_GET_CONFIG:
1519 return vu_get_config(dev, vmsg);
1520 case VHOST_USER_SET_CONFIG:
1521 return vu_set_config(dev, vmsg);
1522 case VHOST_USER_NONE:
1523 /* if you need processing before exit, override iface->process_msg */
1524 exit(0);
1525 case VHOST_USER_POSTCOPY_ADVISE:
1526 return vu_set_postcopy_advise(dev, vmsg);
1527 case VHOST_USER_POSTCOPY_LISTEN:
1528 return vu_set_postcopy_listen(dev, vmsg);
1529 case VHOST_USER_POSTCOPY_END:
1530 return vu_set_postcopy_end(dev, vmsg);
1531 case VHOST_USER_GET_INFLIGHT_FD:
1532 return vu_get_inflight_fd(dev, vmsg);
1533 case VHOST_USER_SET_INFLIGHT_FD:
1534 return vu_set_inflight_fd(dev, vmsg);
1535 default:
1536 vmsg_close_fds(vmsg);
1537 vu_panic(dev, "Unhandled request: %d", vmsg->request);
1538 }
1539
1540 return false;
1541 }
1542
1543 bool
1544 vu_dispatch(VuDev *dev)
1545 {
1546 VhostUserMsg vmsg = { 0, };
1547 int reply_requested;
1548 bool success = false;
1549
1550 if (!vu_message_read(dev, dev->sock, &vmsg)) {
1551 goto end;
1552 }
1553
1554 reply_requested = vu_process_message(dev, &vmsg);
1555 if (!reply_requested) {
1556 success = true;
1557 goto end;
1558 }
1559
1560 if (!vu_send_reply(dev, dev->sock, &vmsg)) {
1561 goto end;
1562 }
1563
1564 success = true;
1565
1566 end:
1567 free(vmsg.data);
1568 return success;
1569 }
1570
1571 void
1572 vu_deinit(VuDev *dev)
1573 {
1574 int i;
1575
1576 for (i = 0; i < dev->nregions; i++) {
1577 VuDevRegion *r = &dev->regions[i];
1578 void *m = (void *) (uintptr_t) r->mmap_addr;
1579 if (m != MAP_FAILED) {
1580 munmap(m, r->size + r->mmap_offset);
1581 }
1582 }
1583 dev->nregions = 0;
1584
1585 for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
1586 VuVirtq *vq = &dev->vq[i];
1587
1588 if (vq->call_fd != -1) {
1589 close(vq->call_fd);
1590 vq->call_fd = -1;
1591 }
1592
1593 if (vq->kick_fd != -1) {
1594 close(vq->kick_fd);
1595 vq->kick_fd = -1;
1596 }
1597
1598 if (vq->err_fd != -1) {
1599 close(vq->err_fd);
1600 vq->err_fd = -1;
1601 }
1602
1603 if (vq->resubmit_list) {
1604 free(vq->resubmit_list);
1605 vq->resubmit_list = NULL;
1606 }
1607
1608 vq->inflight = NULL;
1609 }
1610
1611 if (dev->inflight_info.addr) {
1612 munmap(dev->inflight_info.addr, dev->inflight_info.size);
1613 dev->inflight_info.addr = NULL;
1614 }
1615
1616 if (dev->inflight_info.fd > 0) {
1617 close(dev->inflight_info.fd);
1618 dev->inflight_info.fd = -1;
1619 }
1620
1621 vu_close_log(dev);
1622 if (dev->slave_fd != -1) {
1623 close(dev->slave_fd);
1624 dev->slave_fd = -1;
1625 }
1626
1627 if (dev->sock != -1) {
1628 close(dev->sock);
1629 }
1630 }
1631
1632 void
1633 vu_init(VuDev *dev,
1634 int socket,
1635 vu_panic_cb panic,
1636 vu_set_watch_cb set_watch,
1637 vu_remove_watch_cb remove_watch,
1638 const VuDevIface *iface)
1639 {
1640 int i;
1641
1642 assert(socket >= 0);
1643 assert(set_watch);
1644 assert(remove_watch);
1645 assert(iface);
1646 assert(panic);
1647
1648 memset(dev, 0, sizeof(*dev));
1649
1650 dev->sock = socket;
1651 dev->panic = panic;
1652 dev->set_watch = set_watch;
1653 dev->remove_watch = remove_watch;
1654 dev->iface = iface;
1655 dev->log_call_fd = -1;
1656 dev->slave_fd = -1;
1657 for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
1658 dev->vq[i] = (VuVirtq) {
1659 .call_fd = -1, .kick_fd = -1, .err_fd = -1,
1660 .notification = true,
1661 };
1662 }
1663 }
1664
1665 VuVirtq *
1666 vu_get_queue(VuDev *dev, int qidx)
1667 {
1668 assert(qidx < VHOST_MAX_NR_VIRTQUEUE);
1669 return &dev->vq[qidx];
1670 }
1671
1672 bool
1673 vu_queue_enabled(VuDev *dev, VuVirtq *vq)
1674 {
1675 return vq->enable;
1676 }
1677
1678 bool
1679 vu_queue_started(const VuDev *dev, const VuVirtq *vq)
1680 {
1681 return vq->started;
1682 }
1683
1684 static inline uint16_t
1685 vring_avail_flags(VuVirtq *vq)
1686 {
1687 return vq->vring.avail->flags;
1688 }
1689
1690 static inline uint16_t
1691 vring_avail_idx(VuVirtq *vq)
1692 {
1693 vq->shadow_avail_idx = vq->vring.avail->idx;
1694
1695 return vq->shadow_avail_idx;
1696 }
1697
1698 static inline uint16_t
1699 vring_avail_ring(VuVirtq *vq, int i)
1700 {
1701 return vq->vring.avail->ring[i];
1702 }
1703
1704 static inline uint16_t
1705 vring_get_used_event(VuVirtq *vq)
1706 {
1707 return vring_avail_ring(vq, vq->vring.num);
1708 }
1709
1710 static int
1711 virtqueue_num_heads(VuDev *dev, VuVirtq *vq, unsigned int idx)
1712 {
1713 uint16_t num_heads = vring_avail_idx(vq) - idx;
1714
1715 /* Check it isn't doing very strange things with descriptor numbers. */
1716 if (num_heads > vq->vring.num) {
1717 vu_panic(dev, "Guest moved used index from %u to %u",
1718 idx, vq->shadow_avail_idx);
1719 return -1;
1720 }
1721 if (num_heads) {
1722 /* On success, callers read a descriptor at vq->last_avail_idx.
1723 * Make sure descriptor read does not bypass avail index read. */
1724 smp_rmb();
1725 }
1726
1727 return num_heads;
1728 }
1729
1730 static bool
1731 virtqueue_get_head(VuDev *dev, VuVirtq *vq,
1732 unsigned int idx, unsigned int *head)
1733 {
1734 /* Grab the next descriptor number they're advertising, and increment
1735 * the index we've seen. */
1736 *head = vring_avail_ring(vq, idx % vq->vring.num);
1737
1738 /* If their number is silly, that's a fatal mistake. */
1739 if (*head >= vq->vring.num) {
1740 vu_panic(dev, "Guest says index %u is available", head);
1741 return false;
1742 }
1743
1744 return true;
1745 }
1746
1747 static int
1748 virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
1749 uint64_t addr, size_t len)
1750 {
1751 struct vring_desc *ori_desc;
1752 uint64_t read_len;
1753
1754 if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
1755 return -1;
1756 }
1757
1758 if (len == 0) {
1759 return -1;
1760 }
1761
1762 while (len) {
1763 read_len = len;
1764 ori_desc = vu_gpa_to_va(dev, &read_len, addr);
1765 if (!ori_desc) {
1766 return -1;
1767 }
1768
1769 memcpy(desc, ori_desc, read_len);
1770 len -= read_len;
1771 addr += read_len;
1772 desc += read_len;
1773 }
1774
1775 return 0;
1776 }
1777
1778 enum {
1779 VIRTQUEUE_READ_DESC_ERROR = -1,
1780 VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
1781 VIRTQUEUE_READ_DESC_MORE = 1, /* more buffers in chain */
1782 };
1783
1784 static int
1785 virtqueue_read_next_desc(VuDev *dev, struct vring_desc *desc,
1786 int i, unsigned int max, unsigned int *next)
1787 {
1788 /* If this descriptor says it doesn't chain, we're done. */
1789 if (!(desc[i].flags & VRING_DESC_F_NEXT)) {
1790 return VIRTQUEUE_READ_DESC_DONE;
1791 }
1792
1793 /* Check they're not leading us off end of descriptors. */
1794 *next = desc[i].next;
1795 /* Make sure compiler knows to grab that: we don't want it changing! */
1796 smp_wmb();
1797
1798 if (*next >= max) {
1799 vu_panic(dev, "Desc next is %u", next);
1800 return VIRTQUEUE_READ_DESC_ERROR;
1801 }
1802
1803 return VIRTQUEUE_READ_DESC_MORE;
1804 }
1805
1806 void
1807 vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
1808 unsigned int *out_bytes,
1809 unsigned max_in_bytes, unsigned max_out_bytes)
1810 {
1811 unsigned int idx;
1812 unsigned int total_bufs, in_total, out_total;
1813 int rc;
1814
1815 idx = vq->last_avail_idx;
1816
1817 total_bufs = in_total = out_total = 0;
1818 if (unlikely(dev->broken) ||
1819 unlikely(!vq->vring.avail)) {
1820 goto done;
1821 }
1822
1823 while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
1824 unsigned int max, desc_len, num_bufs, indirect = 0;
1825 uint64_t desc_addr, read_len;
1826 struct vring_desc *desc;
1827 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
1828 unsigned int i;
1829
1830 max = vq->vring.num;
1831 num_bufs = total_bufs;
1832 if (!virtqueue_get_head(dev, vq, idx++, &i)) {
1833 goto err;
1834 }
1835 desc = vq->vring.desc;
1836
1837 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
1838 if (desc[i].len % sizeof(struct vring_desc)) {
1839 vu_panic(dev, "Invalid size for indirect buffer table");
1840 goto err;
1841 }
1842
1843 /* If we've got too many, that implies a descriptor loop. */
1844 if (num_bufs >= max) {
1845 vu_panic(dev, "Looped descriptor");
1846 goto err;
1847 }
1848
1849 /* loop over the indirect descriptor table */
1850 indirect = 1;
1851 desc_addr = desc[i].addr;
1852 desc_len = desc[i].len;
1853 max = desc_len / sizeof(struct vring_desc);
1854 read_len = desc_len;
1855 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
1856 if (unlikely(desc && read_len != desc_len)) {
1857 /* Failed to use zero copy */
1858 desc = NULL;
1859 if (!virtqueue_read_indirect_desc(dev, desc_buf,
1860 desc_addr,
1861 desc_len)) {
1862 desc = desc_buf;
1863 }
1864 }
1865 if (!desc) {
1866 vu_panic(dev, "Invalid indirect buffer table");
1867 goto err;
1868 }
1869 num_bufs = i = 0;
1870 }
1871
1872 do {
1873 /* If we've got too many, that implies a descriptor loop. */
1874 if (++num_bufs > max) {
1875 vu_panic(dev, "Looped descriptor");
1876 goto err;
1877 }
1878
1879 if (desc[i].flags & VRING_DESC_F_WRITE) {
1880 in_total += desc[i].len;
1881 } else {
1882 out_total += desc[i].len;
1883 }
1884 if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1885 goto done;
1886 }
1887 rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
1888 } while (rc == VIRTQUEUE_READ_DESC_MORE);
1889
1890 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1891 goto err;
1892 }
1893
1894 if (!indirect) {
1895 total_bufs = num_bufs;
1896 } else {
1897 total_bufs++;
1898 }
1899 }
1900 if (rc < 0) {
1901 goto err;
1902 }
1903 done:
1904 if (in_bytes) {
1905 *in_bytes = in_total;
1906 }
1907 if (out_bytes) {
1908 *out_bytes = out_total;
1909 }
1910 return;
1911
1912 err:
1913 in_total = out_total = 0;
1914 goto done;
1915 }
1916
1917 bool
1918 vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
1919 unsigned int out_bytes)
1920 {
1921 unsigned int in_total, out_total;
1922
1923 vu_queue_get_avail_bytes(dev, vq, &in_total, &out_total,
1924 in_bytes, out_bytes);
1925
1926 return in_bytes <= in_total && out_bytes <= out_total;
1927 }
1928
1929 /* Fetch avail_idx from VQ memory only when we really need to know if
1930 * guest has added some buffers. */
1931 bool
1932 vu_queue_empty(VuDev *dev, VuVirtq *vq)
1933 {
1934 if (unlikely(dev->broken) ||
1935 unlikely(!vq->vring.avail)) {
1936 return true;
1937 }
1938
1939 if (vq->shadow_avail_idx != vq->last_avail_idx) {
1940 return false;
1941 }
1942
1943 return vring_avail_idx(vq) == vq->last_avail_idx;
1944 }
1945
1946 static bool
1947 vring_notify(VuDev *dev, VuVirtq *vq)
1948 {
1949 uint16_t old, new;
1950 bool v;
1951
1952 /* We need to expose used array entries before checking used event. */
1953 smp_mb();
1954
1955 /* Always notify when queue is empty (when feature acknowledge) */
1956 if (vu_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
1957 !vq->inuse && vu_queue_empty(dev, vq)) {
1958 return true;
1959 }
1960
1961 if (!vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
1962 return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
1963 }
1964
1965 v = vq->signalled_used_valid;
1966 vq->signalled_used_valid = true;
1967 old = vq->signalled_used;
1968 new = vq->signalled_used = vq->used_idx;
1969 return !v || vring_need_event(vring_get_used_event(vq), new, old);
1970 }
1971
1972 void
1973 vu_queue_notify(VuDev *dev, VuVirtq *vq)
1974 {
1975 if (unlikely(dev->broken) ||
1976 unlikely(!vq->vring.avail)) {
1977 return;
1978 }
1979
1980 if (!vring_notify(dev, vq)) {
1981 DPRINT("skipped notify...\n");
1982 return;
1983 }
1984
1985 if (eventfd_write(vq->call_fd, 1) < 0) {
1986 vu_panic(dev, "Error writing eventfd: %s", strerror(errno));
1987 }
1988 }
1989
1990 static inline void
1991 vring_used_flags_set_bit(VuVirtq *vq, int mask)
1992 {
1993 uint16_t *flags;
1994
1995 flags = (uint16_t *)((char*)vq->vring.used +
1996 offsetof(struct vring_used, flags));
1997 *flags |= mask;
1998 }
1999
2000 static inline void
2001 vring_used_flags_unset_bit(VuVirtq *vq, int mask)
2002 {
2003 uint16_t *flags;
2004
2005 flags = (uint16_t *)((char*)vq->vring.used +
2006 offsetof(struct vring_used, flags));
2007 *flags &= ~mask;
2008 }
2009
2010 static inline void
2011 vring_set_avail_event(VuVirtq *vq, uint16_t val)
2012 {
2013 if (!vq->notification) {
2014 return;
2015 }
2016
2017 *((uint16_t *) &vq->vring.used->ring[vq->vring.num]) = val;
2018 }
2019
2020 void
2021 vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable)
2022 {
2023 vq->notification = enable;
2024 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2025 vring_set_avail_event(vq, vring_avail_idx(vq));
2026 } else if (enable) {
2027 vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
2028 } else {
2029 vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
2030 }
2031 if (enable) {
2032 /* Expose avail event/used flags before caller checks the avail idx. */
2033 smp_mb();
2034 }
2035 }
2036
2037 static void
2038 virtqueue_map_desc(VuDev *dev,
2039 unsigned int *p_num_sg, struct iovec *iov,
2040 unsigned int max_num_sg, bool is_write,
2041 uint64_t pa, size_t sz)
2042 {
2043 unsigned num_sg = *p_num_sg;
2044
2045 assert(num_sg <= max_num_sg);
2046
2047 if (!sz) {
2048 vu_panic(dev, "virtio: zero sized buffers are not allowed");
2049 return;
2050 }
2051
2052 while (sz) {
2053 uint64_t len = sz;
2054
2055 if (num_sg == max_num_sg) {
2056 vu_panic(dev, "virtio: too many descriptors in indirect table");
2057 return;
2058 }
2059
2060 iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
2061 if (iov[num_sg].iov_base == NULL) {
2062 vu_panic(dev, "virtio: invalid address for buffers");
2063 return;
2064 }
2065 iov[num_sg].iov_len = len;
2066 num_sg++;
2067 sz -= len;
2068 pa += len;
2069 }
2070
2071 *p_num_sg = num_sg;
2072 }
2073
2074 static void *
2075 virtqueue_alloc_element(size_t sz,
2076 unsigned out_num, unsigned in_num)
2077 {
2078 VuVirtqElement *elem;
2079 size_t in_sg_ofs = ALIGN_UP(sz, __alignof__(elem->in_sg[0]));
2080 size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
2081 size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
2082
2083 assert(sz >= sizeof(VuVirtqElement));
2084 elem = malloc(out_sg_end);
2085 elem->out_num = out_num;
2086 elem->in_num = in_num;
2087 elem->in_sg = (void *)elem + in_sg_ofs;
2088 elem->out_sg = (void *)elem + out_sg_ofs;
2089 return elem;
2090 }
2091
2092 static void *
2093 vu_queue_map_desc(VuDev *dev, VuVirtq *vq, unsigned int idx, size_t sz)
2094 {
2095 struct vring_desc *desc = vq->vring.desc;
2096 uint64_t desc_addr, read_len;
2097 unsigned int desc_len;
2098 unsigned int max = vq->vring.num;
2099 unsigned int i = idx;
2100 VuVirtqElement *elem;
2101 unsigned int out_num = 0, in_num = 0;
2102 struct iovec iov[VIRTQUEUE_MAX_SIZE];
2103 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2104 int rc;
2105
2106 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2107 if (desc[i].len % sizeof(struct vring_desc)) {
2108 vu_panic(dev, "Invalid size for indirect buffer table");
2109 }
2110
2111 /* loop over the indirect descriptor table */
2112 desc_addr = desc[i].addr;
2113 desc_len = desc[i].len;
2114 max = desc_len / sizeof(struct vring_desc);
2115 read_len = desc_len;
2116 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2117 if (unlikely(desc && read_len != desc_len)) {
2118 /* Failed to use zero copy */
2119 desc = NULL;
2120 if (!virtqueue_read_indirect_desc(dev, desc_buf,
2121 desc_addr,
2122 desc_len)) {
2123 desc = desc_buf;
2124 }
2125 }
2126 if (!desc) {
2127 vu_panic(dev, "Invalid indirect buffer table");
2128 return NULL;
2129 }
2130 i = 0;
2131 }
2132
2133 /* Collect all the descriptors */
2134 do {
2135 if (desc[i].flags & VRING_DESC_F_WRITE) {
2136 virtqueue_map_desc(dev, &in_num, iov + out_num,
2137 VIRTQUEUE_MAX_SIZE - out_num, true,
2138 desc[i].addr, desc[i].len);
2139 } else {
2140 if (in_num) {
2141 vu_panic(dev, "Incorrect order for descriptors");
2142 return NULL;
2143 }
2144 virtqueue_map_desc(dev, &out_num, iov,
2145 VIRTQUEUE_MAX_SIZE, false,
2146 desc[i].addr, desc[i].len);
2147 }
2148
2149 /* If we've got too many, that implies a descriptor loop. */
2150 if ((in_num + out_num) > max) {
2151 vu_panic(dev, "Looped descriptor");
2152 }
2153 rc = virtqueue_read_next_desc(dev, desc, i, max, &i);
2154 } while (rc == VIRTQUEUE_READ_DESC_MORE);
2155
2156 if (rc == VIRTQUEUE_READ_DESC_ERROR) {
2157 vu_panic(dev, "read descriptor error");
2158 return NULL;
2159 }
2160
2161 /* Now copy what we have collected and mapped */
2162 elem = virtqueue_alloc_element(sz, out_num, in_num);
2163 elem->index = idx;
2164 for (i = 0; i < out_num; i++) {
2165 elem->out_sg[i] = iov[i];
2166 }
2167 for (i = 0; i < in_num; i++) {
2168 elem->in_sg[i] = iov[out_num + i];
2169 }
2170
2171 return elem;
2172 }
2173
2174 static int
2175 vu_queue_inflight_get(VuDev *dev, VuVirtq *vq, int desc_idx)
2176 {
2177 if (!has_feature(dev->protocol_features,
2178 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2179 return 0;
2180 }
2181
2182 if (unlikely(!vq->inflight)) {
2183 return -1;
2184 }
2185
2186 vq->inflight->desc[desc_idx].counter = vq->counter++;
2187 vq->inflight->desc[desc_idx].inflight = 1;
2188
2189 return 0;
2190 }
2191
2192 static int
2193 vu_queue_inflight_pre_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2194 {
2195 if (!has_feature(dev->protocol_features,
2196 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2197 return 0;
2198 }
2199
2200 if (unlikely(!vq->inflight)) {
2201 return -1;
2202 }
2203
2204 vq->inflight->last_batch_head = desc_idx;
2205
2206 return 0;
2207 }
2208
2209 static int
2210 vu_queue_inflight_post_put(VuDev *dev, VuVirtq *vq, int desc_idx)
2211 {
2212 if (!has_feature(dev->protocol_features,
2213 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2214 return 0;
2215 }
2216
2217 if (unlikely(!vq->inflight)) {
2218 return -1;
2219 }
2220
2221 barrier();
2222
2223 vq->inflight->desc[desc_idx].inflight = 0;
2224
2225 barrier();
2226
2227 vq->inflight->used_idx = vq->used_idx;
2228
2229 return 0;
2230 }
2231
2232 void *
2233 vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
2234 {
2235 int i;
2236 unsigned int head;
2237 VuVirtqElement *elem;
2238
2239 if (unlikely(dev->broken) ||
2240 unlikely(!vq->vring.avail)) {
2241 return NULL;
2242 }
2243
2244 if (unlikely(vq->resubmit_list && vq->resubmit_num > 0)) {
2245 i = (--vq->resubmit_num);
2246 elem = vu_queue_map_desc(dev, vq, vq->resubmit_list[i].index, sz);
2247
2248 if (!vq->resubmit_num) {
2249 free(vq->resubmit_list);
2250 vq->resubmit_list = NULL;
2251 }
2252
2253 return elem;
2254 }
2255
2256 if (vu_queue_empty(dev, vq)) {
2257 return NULL;
2258 }
2259 /*
2260 * Needed after virtio_queue_empty(), see comment in
2261 * virtqueue_num_heads().
2262 */
2263 smp_rmb();
2264
2265 if (vq->inuse >= vq->vring.num) {
2266 vu_panic(dev, "Virtqueue size exceeded");
2267 return NULL;
2268 }
2269
2270 if (!virtqueue_get_head(dev, vq, vq->last_avail_idx++, &head)) {
2271 return NULL;
2272 }
2273
2274 if (vu_has_feature(dev, VIRTIO_RING_F_EVENT_IDX)) {
2275 vring_set_avail_event(vq, vq->last_avail_idx);
2276 }
2277
2278 elem = vu_queue_map_desc(dev, vq, head, sz);
2279
2280 if (!elem) {
2281 return NULL;
2282 }
2283
2284 vq->inuse++;
2285
2286 vu_queue_inflight_get(dev, vq, head);
2287
2288 return elem;
2289 }
2290
2291 static void
2292 vu_queue_detach_element(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2293 size_t len)
2294 {
2295 vq->inuse--;
2296 /* unmap, when DMA support is added */
2297 }
2298
2299 void
2300 vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
2301 size_t len)
2302 {
2303 vq->last_avail_idx--;
2304 vu_queue_detach_element(dev, vq, elem, len);
2305 }
2306
2307 bool
2308 vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num)
2309 {
2310 if (num > vq->inuse) {
2311 return false;
2312 }
2313 vq->last_avail_idx -= num;
2314 vq->inuse -= num;
2315 return true;
2316 }
2317
2318 static inline
2319 void vring_used_write(VuDev *dev, VuVirtq *vq,
2320 struct vring_used_elem *uelem, int i)
2321 {
2322 struct vring_used *used = vq->vring.used;
2323
2324 used->ring[i] = *uelem;
2325 vu_log_write(dev, vq->vring.log_guest_addr +
2326 offsetof(struct vring_used, ring[i]),
2327 sizeof(used->ring[i]));
2328 }
2329
2330
2331 static void
2332 vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
2333 const VuVirtqElement *elem,
2334 unsigned int len)
2335 {
2336 struct vring_desc *desc = vq->vring.desc;
2337 unsigned int i, max, min, desc_len;
2338 uint64_t desc_addr, read_len;
2339 struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
2340 unsigned num_bufs = 0;
2341
2342 max = vq->vring.num;
2343 i = elem->index;
2344
2345 if (desc[i].flags & VRING_DESC_F_INDIRECT) {
2346 if (desc[i].len % sizeof(struct vring_desc)) {
2347 vu_panic(dev, "Invalid size for indirect buffer table");
2348 }
2349
2350 /* loop over the indirect descriptor table */
2351 desc_addr = desc[i].addr;
2352 desc_len = desc[i].len;
2353 max = desc_len / sizeof(struct vring_desc);
2354 read_len = desc_len;
2355 desc = vu_gpa_to_va(dev, &read_len, desc_addr);
2356 if (unlikely(desc && read_len != desc_len)) {
2357 /* Failed to use zero copy */
2358 desc = NULL;
2359 if (!virtqueue_read_indirect_desc(dev, desc_buf,
2360 desc_addr,
2361 desc_len)) {
2362 desc = desc_buf;
2363 }
2364 }
2365 if (!desc) {
2366 vu_panic(dev, "Invalid indirect buffer table");
2367 return;
2368 }
2369 i = 0;
2370 }
2371
2372 do {
2373 if (++num_bufs > max) {
2374 vu_panic(dev, "Looped descriptor");
2375 return;
2376 }
2377
2378 if (desc[i].flags & VRING_DESC_F_WRITE) {
2379 min = MIN(desc[i].len, len);
2380 vu_log_write(dev, desc[i].addr, min);
2381 len -= min;
2382 }
2383
2384 } while (len > 0 &&
2385 (virtqueue_read_next_desc(dev, desc, i, max, &i)
2386 == VIRTQUEUE_READ_DESC_MORE));
2387 }
2388
2389 void
2390 vu_queue_fill(VuDev *dev, VuVirtq *vq,
2391 const VuVirtqElement *elem,
2392 unsigned int len, unsigned int idx)
2393 {
2394 struct vring_used_elem uelem;
2395
2396 if (unlikely(dev->broken) ||
2397 unlikely(!vq->vring.avail)) {
2398 return;
2399 }
2400
2401 vu_log_queue_fill(dev, vq, elem, len);
2402
2403 idx = (idx + vq->used_idx) % vq->vring.num;
2404
2405 uelem.id = elem->index;
2406 uelem.len = len;
2407 vring_used_write(dev, vq, &uelem, idx);
2408 }
2409
2410 static inline
2411 void vring_used_idx_set(VuDev *dev, VuVirtq *vq, uint16_t val)
2412 {
2413 vq->vring.used->idx = val;
2414 vu_log_write(dev,
2415 vq->vring.log_guest_addr + offsetof(struct vring_used, idx),
2416 sizeof(vq->vring.used->idx));
2417
2418 vq->used_idx = val;
2419 }
2420
2421 void
2422 vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int count)
2423 {
2424 uint16_t old, new;
2425
2426 if (unlikely(dev->broken) ||
2427 unlikely(!vq->vring.avail)) {
2428 return;
2429 }
2430
2431 /* Make sure buffer is written before we update index. */
2432 smp_wmb();
2433
2434 old = vq->used_idx;
2435 new = old + count;
2436 vring_used_idx_set(dev, vq, new);
2437 vq->inuse -= count;
2438 if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old))) {
2439 vq->signalled_used_valid = false;
2440 }
2441 }
2442
2443 void
2444 vu_queue_push(VuDev *dev, VuVirtq *vq,
2445 const VuVirtqElement *elem, unsigned int len)
2446 {
2447 vu_queue_fill(dev, vq, elem, len, 0);
2448 vu_queue_inflight_pre_put(dev, vq, elem->index);
2449 vu_queue_flush(dev, vq, 1);
2450 vu_queue_inflight_post_put(dev, vq, elem->index);
2451 }