]>
Commit | Line | Data |
---|---|---|
d5970055 MT |
1 | /* |
2 | * vhost support | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Michael S. Tsirkin <mst@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
6b620ca3 PB |
11 | * |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
d5970055 MT |
14 | */ |
15 | ||
9b8bfe21 | 16 | #include "qemu/osdep.h" |
da34e65c | 17 | #include "qapi/error.h" |
0d09e41a | 18 | #include "hw/virtio/vhost.h" |
5444e768 | 19 | #include "qemu/atomic.h" |
1de7afc9 | 20 | #include "qemu/range.h" |
04b7a152 | 21 | #include "qemu/error-report.h" |
15324404 | 22 | #include "qemu/memfd.h" |
345cc1cb | 23 | #include "qemu/log.h" |
18658a3c | 24 | #include "standard-headers/linux/vhost_types.h" |
1c819449 | 25 | #include "hw/virtio/virtio-bus.h" |
766aa0a6 | 26 | #include "hw/mem/memory-device.h" |
795c40b8 | 27 | #include "migration/blocker.h" |
ca77ee28 | 28 | #include "migration/qemu-file-types.h" |
c471ad0e | 29 | #include "sysemu/dma.h" |
aa3c40f6 | 30 | #include "trace.h" |
d5970055 | 31 | |
162bba7f MAL |
32 | /* enabled until disconnected backend stabilizes */ |
33 | #define _VHOST_DEBUG 1 | |
34 | ||
35 | #ifdef _VHOST_DEBUG | |
5d33ae4b RK |
36 | #define VHOST_OPS_DEBUG(retval, fmt, ...) \ |
37 | do { \ | |
38 | error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ | |
39 | strerror(-retval), -retval); \ | |
40 | } while (0) | |
162bba7f | 41 | #else |
5d33ae4b | 42 | #define VHOST_OPS_DEBUG(retval, fmt, ...) \ |
162bba7f MAL |
43 | do { } while (0) |
44 | #endif | |
45 | ||
309750fa | 46 | static struct vhost_log *vhost_log; |
15324404 | 47 | static struct vhost_log *vhost_log_shm; |
309750fa | 48 | |
552b2522 | 49 | /* Memslots used by backends that support private memslots (without an fd). */ |
2ce68e4c | 50 | static unsigned int used_memslots; |
552b2522 DH |
51 | |
52 | /* Memslots used by backends that only support shared memslots (with an fd). */ | |
53 | static unsigned int used_shared_memslots; | |
54 | ||
2ce68e4c IM |
55 | static QLIST_HEAD(, vhost_dev) vhost_devices = |
56 | QLIST_HEAD_INITIALIZER(vhost_devices); | |
57 | ||
cd89c065 DH |
58 | unsigned int vhost_get_max_memslots(void) |
59 | { | |
60 | unsigned int max = UINT_MAX; | |
61 | struct vhost_dev *hdev; | |
62 | ||
63 | QLIST_FOREACH(hdev, &vhost_devices, entry) { | |
64 | max = MIN(max, hdev->vhost_ops->vhost_backend_memslots_limit(hdev)); | |
65 | } | |
66 | return max; | |
67 | } | |
68 | ||
8c49951c | 69 | unsigned int vhost_get_free_memslots(void) |
2ce68e4c | 70 | { |
552b2522 | 71 | unsigned int free = UINT_MAX; |
2ce68e4c IM |
72 | struct vhost_dev *hdev; |
73 | ||
74 | QLIST_FOREACH(hdev, &vhost_devices, entry) { | |
75 | unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); | |
552b2522 DH |
76 | unsigned int cur_free; |
77 | ||
78 | if (hdev->vhost_ops->vhost_backend_no_private_memslots && | |
79 | hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) { | |
80 | cur_free = r - used_shared_memslots; | |
81 | } else { | |
82 | cur_free = r - used_memslots; | |
83 | } | |
84 | free = MIN(free, cur_free); | |
2ce68e4c | 85 | } |
8c49951c | 86 | return free; |
2ce68e4c IM |
87 | } |
88 | ||
d5970055 | 89 | static void vhost_dev_sync_region(struct vhost_dev *dev, |
2817b260 | 90 | MemoryRegionSection *section, |
d5970055 MT |
91 | uint64_t mfirst, uint64_t mlast, |
92 | uint64_t rfirst, uint64_t rlast) | |
93 | { | |
da318288 | 94 | vhost_log_chunk_t *dev_log = dev->log->log; |
309750fa | 95 | |
d5970055 MT |
96 | uint64_t start = MAX(mfirst, rfirst); |
97 | uint64_t end = MIN(mlast, rlast); | |
da318288 TH |
98 | vhost_log_chunk_t *from = dev_log + start / VHOST_LOG_CHUNK; |
99 | vhost_log_chunk_t *to = dev_log + end / VHOST_LOG_CHUNK + 1; | |
33c5793b | 100 | uint64_t addr = QEMU_ALIGN_DOWN(start, VHOST_LOG_CHUNK); |
d5970055 | 101 | |
d5970055 MT |
102 | if (end < start) { |
103 | return; | |
104 | } | |
e314672a | 105 | assert(end / VHOST_LOG_CHUNK < dev->log_size); |
fbbaf9ae | 106 | assert(start / VHOST_LOG_CHUNK < dev->log_size); |
e314672a | 107 | |
d5970055 MT |
108 | for (;from < to; ++from) { |
109 | vhost_log_chunk_t log; | |
d5970055 MT |
110 | /* We first check with non-atomic: much cheaper, |
111 | * and we expect non-dirty to be the common case. */ | |
112 | if (!*from) { | |
0c600ce2 | 113 | addr += VHOST_LOG_CHUNK; |
d5970055 MT |
114 | continue; |
115 | } | |
5444e768 PB |
116 | /* Data must be read atomically. We don't really need barrier semantics |
117 | * but it's easier to use atomic_* than roll our own. */ | |
d73415a3 | 118 | log = qatomic_xchg(from, 0); |
747eb78b NC |
119 | while (log) { |
120 | int bit = ctzl(log); | |
6b37a23d MT |
121 | hwaddr page_addr; |
122 | hwaddr section_offset; | |
123 | hwaddr mr_offset; | |
6b37a23d MT |
124 | page_addr = addr + bit * VHOST_LOG_PAGE; |
125 | section_offset = page_addr - section->offset_within_address_space; | |
126 | mr_offset = section_offset + section->offset_within_region; | |
127 | memory_region_set_dirty(section->mr, mr_offset, VHOST_LOG_PAGE); | |
d5970055 MT |
128 | log &= ~(0x1ull << bit); |
129 | } | |
130 | addr += VHOST_LOG_CHUNK; | |
131 | } | |
132 | } | |
133 | ||
74b5d2b5 | 134 | bool vhost_dev_has_iommu(struct vhost_dev *dev) |
345cc1cb JW |
135 | { |
136 | VirtIODevice *vdev = dev->vdev; | |
137 | ||
138 | /* | |
139 | * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support | |
140 | * incremental memory mapping API via IOTLB API. For platform that | |
141 | * does not have IOMMU, there's no need to enable this feature | |
142 | * which may cause unnecessary IOTLB miss/update transactions. | |
143 | */ | |
144 | if (vdev) { | |
145 | return virtio_bus_device_iommu_enabled(vdev) && | |
146 | virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); | |
147 | } else { | |
148 | return false; | |
149 | } | |
150 | } | |
151 | ||
04097f7c | 152 | static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, |
2817b260 | 153 | MemoryRegionSection *section, |
6b37a23d MT |
154 | hwaddr first, |
155 | hwaddr last) | |
d5970055 | 156 | { |
d5970055 | 157 | int i; |
6b37a23d MT |
158 | hwaddr start_addr; |
159 | hwaddr end_addr; | |
04097f7c | 160 | |
d5970055 MT |
161 | if (!dev->log_enabled || !dev->started) { |
162 | return 0; | |
163 | } | |
6b37a23d | 164 | start_addr = section->offset_within_address_space; |
052e87b0 | 165 | end_addr = range_get_last(start_addr, int128_get64(section->size)); |
6b37a23d MT |
166 | start_addr = MAX(first, start_addr); |
167 | end_addr = MIN(last, end_addr); | |
168 | ||
d5970055 MT |
169 | for (i = 0; i < dev->mem->nregions; ++i) { |
170 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
2817b260 | 171 | vhost_dev_sync_region(dev, section, start_addr, end_addr, |
d5970055 MT |
172 | reg->guest_phys_addr, |
173 | range_get_last(reg->guest_phys_addr, | |
174 | reg->memory_size)); | |
175 | } | |
176 | for (i = 0; i < dev->nvqs; ++i) { | |
177 | struct vhost_virtqueue *vq = dev->vqs + i; | |
240e647a LH |
178 | |
179 | if (!vq->used_phys && !vq->used_size) { | |
180 | continue; | |
181 | } | |
182 | ||
345cc1cb JW |
183 | if (vhost_dev_has_iommu(dev)) { |
184 | IOMMUTLBEntry iotlb; | |
185 | hwaddr used_phys = vq->used_phys, used_size = vq->used_size; | |
186 | hwaddr phys, s, offset; | |
187 | ||
188 | while (used_size) { | |
189 | rcu_read_lock(); | |
190 | iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, | |
191 | used_phys, | |
192 | true, | |
193 | MEMTXATTRS_UNSPECIFIED); | |
194 | rcu_read_unlock(); | |
195 | ||
196 | if (!iotlb.target_as) { | |
197 | qemu_log_mask(LOG_GUEST_ERROR, "translation " | |
198 | "failure for used_iova %"PRIx64"\n", | |
199 | used_phys); | |
200 | return -EINVAL; | |
201 | } | |
202 | ||
203 | offset = used_phys & iotlb.addr_mask; | |
204 | phys = iotlb.translated_addr + offset; | |
205 | ||
206 | /* | |
207 | * Distance from start of used ring until last byte of | |
208 | * IOMMU page. | |
209 | */ | |
210 | s = iotlb.addr_mask - offset; | |
211 | /* | |
212 | * Size of used ring, or of the part of it until end | |
213 | * of IOMMU page. To avoid zero result, do the adding | |
214 | * outside of MIN(). | |
215 | */ | |
216 | s = MIN(s, used_size - 1) + 1; | |
217 | ||
218 | vhost_dev_sync_region(dev, section, start_addr, end_addr, phys, | |
219 | range_get_last(phys, s)); | |
220 | used_size -= s; | |
221 | used_phys += s; | |
222 | } | |
223 | } else { | |
224 | vhost_dev_sync_region(dev, section, start_addr, | |
225 | end_addr, vq->used_phys, | |
226 | range_get_last(vq->used_phys, vq->used_size)); | |
227 | } | |
d5970055 MT |
228 | } |
229 | return 0; | |
230 | } | |
231 | ||
04097f7c AK |
232 | static void vhost_log_sync(MemoryListener *listener, |
233 | MemoryRegionSection *section) | |
234 | { | |
235 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
236 | memory_listener); | |
6b37a23d MT |
237 | vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); |
238 | } | |
04097f7c | 239 | |
6b37a23d MT |
240 | static void vhost_log_sync_range(struct vhost_dev *dev, |
241 | hwaddr first, hwaddr last) | |
242 | { | |
243 | int i; | |
244 | /* FIXME: this is N^2 in number of sections */ | |
245 | for (i = 0; i < dev->n_mem_sections; ++i) { | |
246 | MemoryRegionSection *section = &dev->mem_sections[i]; | |
247 | vhost_sync_dirty_bitmap(dev, section, first, last); | |
248 | } | |
04097f7c AK |
249 | } |
250 | ||
d5970055 MT |
251 | static uint64_t vhost_get_log_size(struct vhost_dev *dev) |
252 | { | |
253 | uint64_t log_size = 0; | |
254 | int i; | |
255 | for (i = 0; i < dev->mem->nregions; ++i) { | |
256 | struct vhost_memory_region *reg = dev->mem->regions + i; | |
257 | uint64_t last = range_get_last(reg->guest_phys_addr, | |
258 | reg->memory_size); | |
259 | log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); | |
260 | } | |
d5970055 MT |
261 | return log_size; |
262 | } | |
15324404 | 263 | |
9b1d929a TG |
264 | static int vhost_set_backend_type(struct vhost_dev *dev, |
265 | VhostBackendType backend_type) | |
266 | { | |
267 | int r = 0; | |
268 | ||
269 | switch (backend_type) { | |
270 | #ifdef CONFIG_VHOST_KERNEL | |
271 | case VHOST_BACKEND_TYPE_KERNEL: | |
272 | dev->vhost_ops = &kernel_ops; | |
273 | break; | |
274 | #endif | |
275 | #ifdef CONFIG_VHOST_USER | |
276 | case VHOST_BACKEND_TYPE_USER: | |
277 | dev->vhost_ops = &user_ops; | |
278 | break; | |
279 | #endif | |
280 | #ifdef CONFIG_VHOST_VDPA | |
281 | case VHOST_BACKEND_TYPE_VDPA: | |
282 | dev->vhost_ops = &vdpa_ops; | |
283 | break; | |
284 | #endif | |
285 | default: | |
286 | error_report("Unknown vhost backend type"); | |
287 | r = -1; | |
288 | } | |
289 | ||
290 | return r; | |
291 | } | |
292 | ||
15324404 | 293 | static struct vhost_log *vhost_log_alloc(uint64_t size, bool share) |
309750fa | 294 | { |
0f2956f9 | 295 | Error *err = NULL; |
15324404 MAL |
296 | struct vhost_log *log; |
297 | uint64_t logsize = size * sizeof(*(log->log)); | |
298 | int fd = -1; | |
299 | ||
300 | log = g_new0(struct vhost_log, 1); | |
301 | if (share) { | |
302 | log->log = qemu_memfd_alloc("vhost-log", logsize, | |
303 | F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, | |
0f2956f9 MAL |
304 | &fd, &err); |
305 | if (err) { | |
306 | error_report_err(err); | |
307 | g_free(log); | |
308 | return NULL; | |
309 | } | |
15324404 MAL |
310 | memset(log->log, 0, logsize); |
311 | } else { | |
312 | log->log = g_malloc0(logsize); | |
313 | } | |
309750fa JW |
314 | |
315 | log->size = size; | |
316 | log->refcnt = 1; | |
15324404 | 317 | log->fd = fd; |
309750fa JW |
318 | |
319 | return log; | |
320 | } | |
321 | ||
15324404 | 322 | static struct vhost_log *vhost_log_get(uint64_t size, bool share) |
309750fa | 323 | { |
15324404 MAL |
324 | struct vhost_log *log = share ? vhost_log_shm : vhost_log; |
325 | ||
326 | if (!log || log->size != size) { | |
327 | log = vhost_log_alloc(size, share); | |
328 | if (share) { | |
329 | vhost_log_shm = log; | |
330 | } else { | |
331 | vhost_log = log; | |
332 | } | |
309750fa | 333 | } else { |
15324404 | 334 | ++log->refcnt; |
309750fa JW |
335 | } |
336 | ||
15324404 | 337 | return log; |
309750fa JW |
338 | } |
339 | ||
340 | static void vhost_log_put(struct vhost_dev *dev, bool sync) | |
341 | { | |
342 | struct vhost_log *log = dev->log; | |
343 | ||
344 | if (!log) { | |
345 | return; | |
346 | } | |
347 | ||
348 | --log->refcnt; | |
349 | if (log->refcnt == 0) { | |
350 | /* Sync only the range covered by the old log */ | |
351 | if (dev->log_size && sync) { | |
352 | vhost_log_sync_range(dev, 0, dev->log_size * VHOST_LOG_CHUNK - 1); | |
353 | } | |
15324404 | 354 | |
309750fa | 355 | if (vhost_log == log) { |
15324404 | 356 | g_free(log->log); |
309750fa | 357 | vhost_log = NULL; |
15324404 MAL |
358 | } else if (vhost_log_shm == log) { |
359 | qemu_memfd_free(log->log, log->size * sizeof(*(log->log)), | |
360 | log->fd); | |
361 | vhost_log_shm = NULL; | |
309750fa | 362 | } |
15324404 | 363 | |
309750fa JW |
364 | g_free(log); |
365 | } | |
5c0ba1be FF |
366 | |
367 | dev->log = NULL; | |
368 | dev->log_size = 0; | |
309750fa | 369 | } |
d5970055 | 370 | |
15324404 MAL |
371 | static bool vhost_dev_log_is_shared(struct vhost_dev *dev) |
372 | { | |
373 | return dev->vhost_ops->vhost_requires_shm_log && | |
374 | dev->vhost_ops->vhost_requires_shm_log(dev); | |
375 | } | |
376 | ||
377 | static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) | |
d5970055 | 378 | { |
15324404 | 379 | struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev)); |
309750fa | 380 | uint64_t log_base = (uintptr_t)log->log; |
6b37a23d | 381 | int r; |
6528499f | 382 | |
636f4ddd MAL |
383 | /* inform backend of log switching, this must be done before |
384 | releasing the current log, to ensure no logging is lost */ | |
9a78a5dd | 385 | r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); |
162bba7f | 386 | if (r < 0) { |
5d33ae4b | 387 | VHOST_OPS_DEBUG(r, "vhost_set_log_base failed"); |
162bba7f MAL |
388 | } |
389 | ||
309750fa | 390 | vhost_log_put(dev, true); |
d5970055 MT |
391 | dev->log = log; |
392 | dev->log_size = size; | |
393 | } | |
394 | ||
c471ad0e | 395 | static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, |
b897a474 | 396 | hwaddr *plen, bool is_write) |
c471ad0e JW |
397 | { |
398 | if (!vhost_dev_has_iommu(dev)) { | |
399 | return cpu_physical_memory_map(addr, plen, is_write); | |
400 | } else { | |
401 | return (void *)(uintptr_t)addr; | |
402 | } | |
403 | } | |
404 | ||
405 | static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer, | |
406 | hwaddr len, int is_write, | |
407 | hwaddr access_len) | |
408 | { | |
409 | if (!vhost_dev_has_iommu(dev)) { | |
410 | cpu_physical_memory_unmap(buffer, len, is_write, access_len); | |
411 | } | |
412 | } | |
f1f9e6c5 | 413 | |
0ca1fd2d DDAG |
414 | static int vhost_verify_ring_part_mapping(void *ring_hva, |
415 | uint64_t ring_gpa, | |
416 | uint64_t ring_size, | |
417 | void *reg_hva, | |
418 | uint64_t reg_gpa, | |
419 | uint64_t reg_size) | |
f1f9e6c5 | 420 | { |
0ca1fd2d DDAG |
421 | uint64_t hva_ring_offset; |
422 | uint64_t ring_last = range_get_last(ring_gpa, ring_size); | |
423 | uint64_t reg_last = range_get_last(reg_gpa, reg_size); | |
f1f9e6c5 | 424 | |
0ca1fd2d | 425 | if (ring_last < reg_gpa || ring_gpa > reg_last) { |
f1f9e6c5 GK |
426 | return 0; |
427 | } | |
0ca1fd2d DDAG |
428 | /* check that whole ring's is mapped */ |
429 | if (ring_last > reg_last) { | |
430 | return -ENOMEM; | |
f1f9e6c5 | 431 | } |
0ca1fd2d DDAG |
432 | /* check that ring's MemoryRegion wasn't replaced */ |
433 | hva_ring_offset = ring_gpa - reg_gpa; | |
434 | if (ring_hva != reg_hva + hva_ring_offset) { | |
435 | return -EBUSY; | |
f1f9e6c5 | 436 | } |
0ca1fd2d DDAG |
437 | |
438 | return 0; | |
f1f9e6c5 GK |
439 | } |
440 | ||
d5970055 | 441 | static int vhost_verify_ring_mappings(struct vhost_dev *dev, |
0ca1fd2d DDAG |
442 | void *reg_hva, |
443 | uint64_t reg_gpa, | |
444 | uint64_t reg_size) | |
d5970055 | 445 | { |
f1f9e6c5 | 446 | int i, j; |
8617343f | 447 | int r = 0; |
f1f9e6c5 GK |
448 | const char *part_name[] = { |
449 | "descriptor table", | |
450 | "available ring", | |
451 | "used ring" | |
452 | }; | |
8617343f | 453 | |
aebbdbee JW |
454 | if (vhost_dev_has_iommu(dev)) { |
455 | return 0; | |
456 | } | |
457 | ||
f1f9e6c5 | 458 | for (i = 0; i < dev->nvqs; ++i) { |
d5970055 | 459 | struct vhost_virtqueue *vq = dev->vqs + i; |
d5970055 | 460 | |
fb20fbb7 JH |
461 | if (vq->desc_phys == 0) { |
462 | continue; | |
463 | } | |
464 | ||
f1f9e6c5 | 465 | j = 0; |
0ca1fd2d DDAG |
466 | r = vhost_verify_ring_part_mapping( |
467 | vq->desc, vq->desc_phys, vq->desc_size, | |
468 | reg_hva, reg_gpa, reg_size); | |
2fe45ec3 | 469 | if (r) { |
f1f9e6c5 | 470 | break; |
d5970055 | 471 | } |
f1f9e6c5 GK |
472 | |
473 | j++; | |
0ca1fd2d | 474 | r = vhost_verify_ring_part_mapping( |
9fac50c8 | 475 | vq->avail, vq->avail_phys, vq->avail_size, |
0ca1fd2d | 476 | reg_hva, reg_gpa, reg_size); |
2fe45ec3 | 477 | if (r) { |
f1f9e6c5 | 478 | break; |
d5970055 | 479 | } |
f1f9e6c5 GK |
480 | |
481 | j++; | |
0ca1fd2d | 482 | r = vhost_verify_ring_part_mapping( |
9fac50c8 | 483 | vq->used, vq->used_phys, vq->used_size, |
0ca1fd2d | 484 | reg_hva, reg_gpa, reg_size); |
2fe45ec3 | 485 | if (r) { |
f1f9e6c5 | 486 | break; |
d5970055 | 487 | } |
f1f9e6c5 GK |
488 | } |
489 | ||
490 | if (r == -ENOMEM) { | |
491 | error_report("Unable to map %s for ring %d", part_name[j], i); | |
492 | } else if (r == -EBUSY) { | |
493 | error_report("%s relocated for ring %d", part_name[j], i); | |
d5970055 | 494 | } |
8617343f | 495 | return r; |
d5970055 MT |
496 | } |
497 | ||
083b9bd7 AB |
498 | /* |
499 | * vhost_section: identify sections needed for vhost access | |
500 | * | |
501 | * We only care about RAM sections here (where virtqueue and guest | |
552b2522 | 502 | * internals accessed by virtio might live). |
083b9bd7 | 503 | */ |
988a2775 | 504 | static bool vhost_section(struct vhost_dev *dev, MemoryRegionSection *section) |
af603142 | 505 | { |
083b9bd7 AB |
506 | MemoryRegion *mr = section->mr; |
507 | ||
508 | if (memory_region_is_ram(mr) && !memory_region_is_rom(mr)) { | |
509 | uint8_t dirty_mask = memory_region_get_dirty_log_mask(mr); | |
510 | uint8_t handled_dirty; | |
511 | ||
512 | /* | |
513 | * Kernel based vhost doesn't handle any block which is doing | |
514 | * dirty-tracking other than migration for which it has | |
515 | * specific logging support. However for TCG the kernel never | |
516 | * gets involved anyway so we can also ignore it's | |
517 | * self-modiying code detection flags. However a vhost-user | |
518 | * client could still confuse a TCG guest if it re-writes | |
519 | * executable memory that has already been translated. | |
520 | */ | |
521 | handled_dirty = (1 << DIRTY_MEMORY_MIGRATION) | | |
522 | (1 << DIRTY_MEMORY_CODE); | |
523 | ||
524 | if (dirty_mask & ~handled_dirty) { | |
525 | trace_vhost_reject_section(mr->name, 1); | |
526 | return false; | |
527 | } | |
aa3c40f6 | 528 | |
552b2522 DH |
529 | /* |
530 | * Some backends (like vhost-user) can only handle memory regions | |
531 | * that have an fd (can be mapped into a different process). Filter | |
532 | * the ones without an fd out, if requested. | |
533 | * | |
534 | * TODO: we might have to limit to MAP_SHARED as well. | |
535 | */ | |
536 | if (memory_region_get_fd(section->mr) < 0 && | |
537 | dev->vhost_ops->vhost_backend_no_private_memslots && | |
538 | dev->vhost_ops->vhost_backend_no_private_memslots(dev)) { | |
083b9bd7 AB |
539 | trace_vhost_reject_section(mr->name, 2); |
540 | return false; | |
541 | } | |
988a2775 | 542 | |
083b9bd7 AB |
543 | trace_vhost_section(mr->name); |
544 | return true; | |
545 | } else { | |
546 | trace_vhost_reject_section(mr->name, 3); | |
547 | return false; | |
548 | } | |
af603142 NB |
549 | } |
550 | ||
551 | static void vhost_begin(MemoryListener *listener) | |
552 | { | |
553 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
554 | memory_listener); | |
c44317ef DDAG |
555 | dev->tmp_sections = NULL; |
556 | dev->n_tmp_sections = 0; | |
af603142 | 557 | } |
d5970055 | 558 | |
af603142 NB |
559 | static void vhost_commit(MemoryListener *listener) |
560 | { | |
561 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
562 | memory_listener); | |
c44317ef DDAG |
563 | MemoryRegionSection *old_sections; |
564 | int n_old_sections; | |
af603142 | 565 | uint64_t log_size; |
ade6d081 | 566 | size_t regions_size; |
af603142 | 567 | int r; |
0ca1fd2d | 568 | int i; |
ade6d081 | 569 | bool changed = false; |
af603142 | 570 | |
ade6d081 DDAG |
571 | /* Note we can be called before the device is started, but then |
572 | * starting the device calls set_mem_table, so we need to have | |
573 | * built the data structures. | |
574 | */ | |
c44317ef DDAG |
575 | old_sections = dev->mem_sections; |
576 | n_old_sections = dev->n_mem_sections; | |
577 | dev->mem_sections = dev->tmp_sections; | |
578 | dev->n_mem_sections = dev->n_tmp_sections; | |
579 | ||
ade6d081 DDAG |
580 | if (dev->n_mem_sections != n_old_sections) { |
581 | changed = true; | |
582 | } else { | |
583 | /* Same size, lets check the contents */ | |
da318288 | 584 | for (i = 0; i < n_old_sections; i++) { |
3fc4a64c DDAG |
585 | if (!MemoryRegionSection_eq(&old_sections[i], |
586 | &dev->mem_sections[i])) { | |
587 | changed = true; | |
588 | break; | |
589 | } | |
590 | } | |
af603142 | 591 | } |
ade6d081 DDAG |
592 | |
593 | trace_vhost_commit(dev->started, changed); | |
594 | if (!changed) { | |
c44317ef | 595 | goto out; |
d5970055 | 596 | } |
ade6d081 DDAG |
597 | |
598 | /* Rebuild the regions list from the new sections list */ | |
599 | regions_size = offsetof(struct vhost_memory, regions) + | |
600 | dev->n_mem_sections * sizeof dev->mem->regions[0]; | |
601 | dev->mem = g_realloc(dev->mem, regions_size); | |
602 | dev->mem->nregions = dev->n_mem_sections; | |
552b2522 DH |
603 | |
604 | if (dev->vhost_ops->vhost_backend_no_private_memslots && | |
605 | dev->vhost_ops->vhost_backend_no_private_memslots(dev)) { | |
606 | used_shared_memslots = dev->mem->nregions; | |
607 | } else { | |
608 | used_memslots = dev->mem->nregions; | |
609 | } | |
610 | ||
ade6d081 DDAG |
611 | for (i = 0; i < dev->n_mem_sections; i++) { |
612 | struct vhost_memory_region *cur_vmr = dev->mem->regions + i; | |
613 | struct MemoryRegionSection *mrs = dev->mem_sections + i; | |
614 | ||
615 | cur_vmr->guest_phys_addr = mrs->offset_within_address_space; | |
616 | cur_vmr->memory_size = int128_get64(mrs->size); | |
617 | cur_vmr->userspace_addr = | |
618 | (uintptr_t)memory_region_get_ram_ptr(mrs->mr) + | |
619 | mrs->offset_within_region; | |
620 | cur_vmr->flags_padding = 0; | |
621 | } | |
622 | ||
623 | if (!dev->started) { | |
c44317ef | 624 | goto out; |
af603142 | 625 | } |
d5970055 | 626 | |
0ca1fd2d DDAG |
627 | for (i = 0; i < dev->mem->nregions; i++) { |
628 | if (vhost_verify_ring_mappings(dev, | |
629 | (void *)(uintptr_t)dev->mem->regions[i].userspace_addr, | |
630 | dev->mem->regions[i].guest_phys_addr, | |
631 | dev->mem->regions[i].memory_size)) { | |
632 | error_report("Verify ring failure on region %d", i); | |
633 | abort(); | |
634 | } | |
d5970055 MT |
635 | } |
636 | ||
637 | if (!dev->log_enabled) { | |
21e70425 | 638 | r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); |
162bba7f | 639 | if (r < 0) { |
5d33ae4b | 640 | VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); |
162bba7f | 641 | } |
c44317ef | 642 | goto out; |
d5970055 MT |
643 | } |
644 | log_size = vhost_get_log_size(dev); | |
645 | /* We allocate an extra 4K bytes to log, | |
646 | * to reduce the * number of reallocations. */ | |
647 | #define VHOST_LOG_BUFFER (0x1000 / sizeof *dev->log) | |
648 | /* To log more, must increase log size before table update. */ | |
649 | if (dev->log_size < log_size) { | |
650 | vhost_dev_log_resize(dev, log_size + VHOST_LOG_BUFFER); | |
651 | } | |
21e70425 | 652 | r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); |
162bba7f | 653 | if (r < 0) { |
5d33ae4b | 654 | VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); |
162bba7f | 655 | } |
d5970055 MT |
656 | /* To log less, can only decrease log size after table update. */ |
657 | if (dev->log_size > log_size + VHOST_LOG_BUFFER) { | |
658 | vhost_dev_log_resize(dev, log_size); | |
659 | } | |
c44317ef DDAG |
660 | |
661 | out: | |
662 | /* Deref the old list of sections, this must happen _after_ the | |
663 | * vhost_set_mem_table to ensure the client isn't still using the | |
664 | * section we're about to unref. | |
665 | */ | |
666 | while (n_old_sections--) { | |
667 | memory_region_unref(old_sections[n_old_sections].mr); | |
668 | } | |
669 | g_free(old_sections); | |
670 | return; | |
671 | } | |
672 | ||
48d7c975 DDAG |
673 | /* Adds the section data to the tmp_section structure. |
674 | * It relies on the listener calling us in memory address order | |
675 | * and for each region (via the _add and _nop methods) to | |
676 | * join neighbours. | |
677 | */ | |
678 | static void vhost_region_add_section(struct vhost_dev *dev, | |
679 | MemoryRegionSection *section) | |
c44317ef | 680 | { |
48d7c975 DDAG |
681 | bool need_add = true; |
682 | uint64_t mrs_size = int128_get64(section->size); | |
683 | uint64_t mrs_gpa = section->offset_within_address_space; | |
684 | uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + | |
685 | section->offset_within_region; | |
c1ece84e | 686 | RAMBlock *mrs_rb = section->mr->ram_block; |
48d7c975 DDAG |
687 | |
688 | trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size, | |
689 | mrs_host); | |
690 | ||
83475056 | 691 | if (dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER) { |
76525114 DDAG |
692 | /* Round the section to it's page size */ |
693 | /* First align the start down to a page boundary */ | |
694 | size_t mrs_page = qemu_ram_pagesize(mrs_rb); | |
695 | uint64_t alignage = mrs_host & (mrs_page - 1); | |
696 | if (alignage) { | |
697 | mrs_host -= alignage; | |
698 | mrs_size += alignage; | |
699 | mrs_gpa -= alignage; | |
700 | } | |
701 | /* Now align the size up to a page boundary */ | |
702 | alignage = mrs_size & (mrs_page - 1); | |
703 | if (alignage) { | |
704 | mrs_size += mrs_page - alignage; | |
705 | } | |
83475056 MT |
706 | trace_vhost_region_add_section_aligned(section->mr->name, mrs_gpa, |
707 | mrs_size, mrs_host); | |
76525114 | 708 | } |
c1ece84e | 709 | |
533f5d66 | 710 | if (dev->n_tmp_sections && !section->unmergeable) { |
48d7c975 DDAG |
711 | /* Since we already have at least one section, lets see if |
712 | * this extends it; since we're scanning in order, we only | |
713 | * have to look at the last one, and the FlatView that calls | |
714 | * us shouldn't have overlaps. | |
715 | */ | |
716 | MemoryRegionSection *prev_sec = dev->tmp_sections + | |
717 | (dev->n_tmp_sections - 1); | |
718 | uint64_t prev_gpa_start = prev_sec->offset_within_address_space; | |
719 | uint64_t prev_size = int128_get64(prev_sec->size); | |
720 | uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size); | |
721 | uint64_t prev_host_start = | |
722 | (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + | |
723 | prev_sec->offset_within_region; | |
724 | uint64_t prev_host_end = range_get_last(prev_host_start, prev_size); | |
725 | ||
c1ece84e DDAG |
726 | if (mrs_gpa <= (prev_gpa_end + 1)) { |
727 | /* OK, looks like overlapping/intersecting - it's possible that | |
728 | * the rounding to page sizes has made them overlap, but they should | |
729 | * match up in the same RAMBlock if they do. | |
730 | */ | |
731 | if (mrs_gpa < prev_gpa_start) { | |
ff477614 DDAG |
732 | error_report("%s:Section '%s' rounded to %"PRIx64 |
733 | " prior to previous '%s' %"PRIx64, | |
734 | __func__, section->mr->name, mrs_gpa, | |
735 | prev_sec->mr->name, prev_gpa_start); | |
c1ece84e DDAG |
736 | /* A way to cleanly fail here would be better */ |
737 | return; | |
738 | } | |
739 | /* Offset from the start of the previous GPA to this GPA */ | |
740 | size_t offset = mrs_gpa - prev_gpa_start; | |
741 | ||
742 | if (prev_host_start + offset == mrs_host && | |
533f5d66 | 743 | section->mr == prev_sec->mr && !prev_sec->unmergeable) { |
c1ece84e DDAG |
744 | uint64_t max_end = MAX(prev_host_end, mrs_host + mrs_size); |
745 | need_add = false; | |
746 | prev_sec->offset_within_address_space = | |
747 | MIN(prev_gpa_start, mrs_gpa); | |
748 | prev_sec->offset_within_region = | |
749 | MIN(prev_host_start, mrs_host) - | |
750 | (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr); | |
751 | prev_sec->size = int128_make64(max_end - MIN(prev_host_start, | |
752 | mrs_host)); | |
753 | trace_vhost_region_add_section_merge(section->mr->name, | |
754 | int128_get64(prev_sec->size), | |
755 | prev_sec->offset_within_address_space, | |
756 | prev_sec->offset_within_region); | |
757 | } else { | |
e7b94a84 DDAG |
758 | /* adjoining regions are fine, but overlapping ones with |
759 | * different blocks/offsets shouldn't happen | |
760 | */ | |
761 | if (mrs_gpa != prev_gpa_end + 1) { | |
762 | error_report("%s: Overlapping but not coherent sections " | |
763 | "at %"PRIx64, | |
764 | __func__, mrs_gpa); | |
765 | return; | |
766 | } | |
c1ece84e | 767 | } |
48d7c975 DDAG |
768 | } |
769 | } | |
770 | ||
771 | if (need_add) { | |
772 | ++dev->n_tmp_sections; | |
773 | dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections, | |
774 | dev->n_tmp_sections); | |
775 | dev->tmp_sections[dev->n_tmp_sections - 1] = *section; | |
776 | /* The flatview isn't stable and we don't use it, making it NULL | |
777 | * means we can memcmp the list. | |
778 | */ | |
779 | dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; | |
780 | memory_region_ref(section->mr); | |
781 | } | |
50c1e149 AK |
782 | } |
783 | ||
938eeb64 DDAG |
784 | /* Used for both add and nop callbacks */ |
785 | static void vhost_region_addnop(MemoryListener *listener, | |
786 | MemoryRegionSection *section) | |
04097f7c | 787 | { |
2817b260 AK |
788 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
789 | memory_listener); | |
790 | ||
988a2775 | 791 | if (!vhost_section(dev, section)) { |
c49450b9 AK |
792 | return; |
793 | } | |
48d7c975 | 794 | vhost_region_add_section(dev, section); |
04097f7c AK |
795 | } |
796 | ||
375f74f4 JW |
797 | static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) |
798 | { | |
799 | struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n); | |
800 | struct vhost_dev *hdev = iommu->hdev; | |
801 | hwaddr iova = iotlb->iova + iommu->iommu_offset; | |
802 | ||
020e571b MC |
803 | if (vhost_backend_invalidate_device_iotlb(hdev, iova, |
804 | iotlb->addr_mask + 1)) { | |
375f74f4 JW |
805 | error_report("Fail to invalidate device iotlb"); |
806 | } | |
807 | } | |
808 | ||
809 | static void vhost_iommu_region_add(MemoryListener *listener, | |
810 | MemoryRegionSection *section) | |
811 | { | |
812 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
813 | iommu_listener); | |
814 | struct vhost_iommu *iommu; | |
698feb5e | 815 | Int128 end; |
805d4496 | 816 | int iommu_idx; |
388a86df | 817 | IOMMUMemoryRegion *iommu_mr; |
375f74f4 JW |
818 | |
819 | if (!memory_region_is_iommu(section->mr)) { | |
820 | return; | |
821 | } | |
822 | ||
388a86df TB |
823 | iommu_mr = IOMMU_MEMORY_REGION(section->mr); |
824 | ||
375f74f4 | 825 | iommu = g_malloc0(sizeof(*iommu)); |
698feb5e PX |
826 | end = int128_add(int128_make64(section->offset_within_region), |
827 | section->size); | |
828 | end = int128_sub(end, int128_one()); | |
cb1efcf4 PM |
829 | iommu_idx = memory_region_iommu_attrs_to_index(iommu_mr, |
830 | MEMTXATTRS_UNSPECIFIED); | |
698feb5e | 831 | iommu_notifier_init(&iommu->n, vhost_iommu_unmap_notify, |
ee071f67 VP |
832 | dev->vdev->device_iotlb_enabled ? |
833 | IOMMU_NOTIFIER_DEVIOTLB_UNMAP : | |
834 | IOMMU_NOTIFIER_UNMAP, | |
698feb5e | 835 | section->offset_within_region, |
cb1efcf4 PM |
836 | int128_get64(end), |
837 | iommu_idx); | |
375f74f4 JW |
838 | iommu->mr = section->mr; |
839 | iommu->iommu_offset = section->offset_within_address_space - | |
840 | section->offset_within_region; | |
841 | iommu->hdev = dev; | |
ee071f67 VP |
842 | memory_region_register_iommu_notifier(section->mr, &iommu->n, |
843 | &error_fatal); | |
375f74f4 JW |
844 | QLIST_INSERT_HEAD(&dev->iommu_list, iommu, iommu_next); |
845 | /* TODO: can replay help performance here? */ | |
846 | } | |
847 | ||
848 | static void vhost_iommu_region_del(MemoryListener *listener, | |
849 | MemoryRegionSection *section) | |
850 | { | |
851 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, | |
852 | iommu_listener); | |
853 | struct vhost_iommu *iommu; | |
854 | ||
855 | if (!memory_region_is_iommu(section->mr)) { | |
856 | return; | |
857 | } | |
858 | ||
859 | QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { | |
698feb5e PX |
860 | if (iommu->mr == section->mr && |
861 | iommu->n.start == section->offset_within_region) { | |
375f74f4 JW |
862 | memory_region_unregister_iommu_notifier(iommu->mr, |
863 | &iommu->n); | |
864 | QLIST_REMOVE(iommu, iommu_next); | |
865 | g_free(iommu); | |
866 | break; | |
867 | } | |
868 | } | |
869 | } | |
870 | ||
ee071f67 VP |
871 | void vhost_toggle_device_iotlb(VirtIODevice *vdev) |
872 | { | |
873 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); | |
874 | struct vhost_dev *dev; | |
875 | struct vhost_iommu *iommu; | |
876 | ||
877 | if (vdev->vhost_started) { | |
878 | dev = vdc->get_vhost(vdev); | |
879 | } else { | |
880 | return; | |
881 | } | |
882 | ||
883 | QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { | |
884 | memory_region_unregister_iommu_notifier(iommu->mr, &iommu->n); | |
885 | iommu->n.notifier_flags = vdev->device_iotlb_enabled ? | |
886 | IOMMU_NOTIFIER_DEVIOTLB_UNMAP : IOMMU_NOTIFIER_UNMAP; | |
887 | memory_region_register_iommu_notifier(iommu->mr, &iommu->n, | |
888 | &error_fatal); | |
889 | } | |
890 | } | |
891 | ||
d5970055 MT |
892 | static int vhost_virtqueue_set_addr(struct vhost_dev *dev, |
893 | struct vhost_virtqueue *vq, | |
894 | unsigned idx, bool enable_log) | |
895 | { | |
b4ab225c CL |
896 | struct vhost_vring_addr addr; |
897 | int r; | |
898 | memset(&addr, 0, sizeof(struct vhost_vring_addr)); | |
899 | ||
900 | if (dev->vhost_ops->vhost_vq_get_addr) { | |
901 | r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq); | |
902 | if (r < 0) { | |
5d33ae4b RK |
903 | VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed"); |
904 | return r; | |
b4ab225c CL |
905 | } |
906 | } else { | |
907 | addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; | |
908 | addr.avail_user_addr = (uint64_t)(unsigned long)vq->avail; | |
909 | addr.used_user_addr = (uint64_t)(unsigned long)vq->used; | |
910 | } | |
911 | addr.index = idx; | |
912 | addr.log_guest_addr = vq->used_phys; | |
913 | addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0; | |
914 | r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); | |
d5970055 | 915 | if (r < 0) { |
5d33ae4b | 916 | VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed"); |
d5970055 | 917 | } |
5d33ae4b | 918 | return r; |
d5970055 MT |
919 | } |
920 | ||
c471ad0e JW |
921 | static int vhost_dev_set_features(struct vhost_dev *dev, |
922 | bool enable_log) | |
d5970055 MT |
923 | { |
924 | uint64_t features = dev->acked_features; | |
925 | int r; | |
926 | if (enable_log) { | |
9a2ba823 | 927 | features |= 0x1ULL << VHOST_F_LOG_ALL; |
d5970055 | 928 | } |
f7ef7e6e JW |
929 | if (!vhost_dev_has_iommu(dev)) { |
930 | features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM); | |
931 | } | |
7a471694 CL |
932 | if (dev->vhost_ops->vhost_force_iommu) { |
933 | if (dev->vhost_ops->vhost_force_iommu(dev) == true) { | |
934 | features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM; | |
935 | } | |
936 | } | |
21e70425 | 937 | r = dev->vhost_ops->vhost_set_features(dev, features); |
c6409692 | 938 | if (r < 0) { |
5d33ae4b | 939 | VHOST_OPS_DEBUG(r, "vhost_set_features failed"); |
b37556ed JW |
940 | goto out; |
941 | } | |
942 | if (dev->vhost_ops->vhost_set_backend_cap) { | |
943 | r = dev->vhost_ops->vhost_set_backend_cap(dev); | |
944 | if (r < 0) { | |
5d33ae4b | 945 | VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed"); |
b37556ed JW |
946 | goto out; |
947 | } | |
c6409692 | 948 | } |
b37556ed JW |
949 | |
950 | out: | |
5d33ae4b | 951 | return r; |
d5970055 MT |
952 | } |
953 | ||
954 | static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) | |
955 | { | |
162bba7f | 956 | int r, i, idx; |
1e5a050f DS |
957 | hwaddr addr; |
958 | ||
d5970055 MT |
959 | r = vhost_dev_set_features(dev, enable_log); |
960 | if (r < 0) { | |
961 | goto err_features; | |
962 | } | |
963 | for (i = 0; i < dev->nvqs; ++i) { | |
25a2a920 | 964 | idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); |
1e5a050f DS |
965 | addr = virtio_queue_get_desc_addr(dev->vdev, idx); |
966 | if (!addr) { | |
967 | /* | |
968 | * The queue might not be ready for start. If this | |
969 | * is the case there is no reason to continue the process. | |
970 | * The similar logic is used by the vhost_virtqueue_start() | |
971 | * routine. | |
972 | */ | |
973 | continue; | |
974 | } | |
25a2a920 | 975 | r = vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, |
d5970055 MT |
976 | enable_log); |
977 | if (r < 0) { | |
978 | goto err_vq; | |
979 | } | |
980 | } | |
981 | return 0; | |
982 | err_vq: | |
983 | for (; i >= 0; --i) { | |
25a2a920 | 984 | idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); |
9ce305c8 NX |
985 | addr = virtio_queue_get_desc_addr(dev->vdev, idx); |
986 | if (!addr) { | |
987 | continue; | |
988 | } | |
162bba7f MAL |
989 | vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, |
990 | dev->log_enabled); | |
d5970055 | 991 | } |
162bba7f | 992 | vhost_dev_set_features(dev, dev->log_enabled); |
d5970055 MT |
993 | err_features: |
994 | return r; | |
995 | } | |
996 | ||
705f7f2f | 997 | static int vhost_migration_log(MemoryListener *listener, bool enable) |
d5970055 | 998 | { |
04097f7c AK |
999 | struct vhost_dev *dev = container_of(listener, struct vhost_dev, |
1000 | memory_listener); | |
d5970055 | 1001 | int r; |
705f7f2f | 1002 | if (enable == dev->log_enabled) { |
d5970055 MT |
1003 | return 0; |
1004 | } | |
1005 | if (!dev->started) { | |
1006 | dev->log_enabled = enable; | |
1007 | return 0; | |
1008 | } | |
f5b22d06 DS |
1009 | |
1010 | r = 0; | |
d5970055 MT |
1011 | if (!enable) { |
1012 | r = vhost_dev_set_log(dev, false); | |
1013 | if (r < 0) { | |
f5b22d06 | 1014 | goto check_dev_state; |
d5970055 | 1015 | } |
309750fa | 1016 | vhost_log_put(dev, false); |
d5970055 MT |
1017 | } else { |
1018 | vhost_dev_log_resize(dev, vhost_get_log_size(dev)); | |
1019 | r = vhost_dev_set_log(dev, true); | |
1020 | if (r < 0) { | |
f5b22d06 | 1021 | goto check_dev_state; |
d5970055 MT |
1022 | } |
1023 | } | |
f5b22d06 DS |
1024 | |
1025 | check_dev_state: | |
d5970055 | 1026 | dev->log_enabled = enable; |
f5b22d06 DS |
1027 | /* |
1028 | * vhost-user-* devices could change their state during log | |
1029 | * initialization due to disconnect. So check dev state after | |
1030 | * vhost communication. | |
1031 | */ | |
1032 | if (!dev->started) { | |
1033 | /* | |
1034 | * Since device is in the stopped state, it is okay for | |
1035 | * migration. Return success. | |
1036 | */ | |
1037 | r = 0; | |
1038 | } | |
1039 | if (r) { | |
cba42d61 | 1040 | /* An error occurred. */ |
f5b22d06 DS |
1041 | dev->log_enabled = false; |
1042 | } | |
1043 | ||
1044 | return r; | |
d5970055 MT |
1045 | } |
1046 | ||
04097f7c AK |
1047 | static void vhost_log_global_start(MemoryListener *listener) |
1048 | { | |
1049 | int r; | |
1050 | ||
1051 | r = vhost_migration_log(listener, true); | |
1052 | if (r < 0) { | |
1053 | abort(); | |
1054 | } | |
1055 | } | |
1056 | ||
1057 | static void vhost_log_global_stop(MemoryListener *listener) | |
1058 | { | |
1059 | int r; | |
1060 | ||
1061 | r = vhost_migration_log(listener, false); | |
1062 | if (r < 0) { | |
1063 | abort(); | |
1064 | } | |
1065 | } | |
1066 | ||
1067 | static void vhost_log_start(MemoryListener *listener, | |
b2dfd71c PB |
1068 | MemoryRegionSection *section, |
1069 | int old, int new) | |
04097f7c AK |
1070 | { |
1071 | /* FIXME: implement */ | |
1072 | } | |
1073 | ||
1074 | static void vhost_log_stop(MemoryListener *listener, | |
b2dfd71c PB |
1075 | MemoryRegionSection *section, |
1076 | int old, int new) | |
04097f7c AK |
1077 | { |
1078 | /* FIXME: implement */ | |
1079 | } | |
1080 | ||
46f70ff1 GK |
1081 | /* The vhost driver natively knows how to handle the vrings of non |
1082 | * cross-endian legacy devices and modern devices. Only legacy devices | |
1083 | * exposed to a bi-endian guest may require the vhost driver to use a | |
1084 | * specific endianness. | |
1085 | */ | |
a122ab24 GK |
1086 | static inline bool vhost_needs_vring_endian(VirtIODevice *vdev) |
1087 | { | |
e5848123 GK |
1088 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { |
1089 | return false; | |
1090 | } | |
e03b5686 | 1091 | #if HOST_BIG_ENDIAN |
46f70ff1 | 1092 | return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_LITTLE; |
a122ab24 | 1093 | #else |
46f70ff1 | 1094 | return vdev->device_endian == VIRTIO_DEVICE_ENDIAN_BIG; |
a122ab24 | 1095 | #endif |
a122ab24 GK |
1096 | } |
1097 | ||
04b7a152 GK |
1098 | static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, |
1099 | bool is_big_endian, | |
1100 | int vhost_vq_index) | |
1101 | { | |
5d33ae4b | 1102 | int r; |
04b7a152 GK |
1103 | struct vhost_vring_state s = { |
1104 | .index = vhost_vq_index, | |
1105 | .num = is_big_endian | |
1106 | }; | |
1107 | ||
5d33ae4b RK |
1108 | r = dev->vhost_ops->vhost_set_vring_endian(dev, &s); |
1109 | if (r < 0) { | |
1110 | VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed"); | |
04b7a152 | 1111 | } |
5d33ae4b | 1112 | return r; |
04b7a152 GK |
1113 | } |
1114 | ||
c471ad0e JW |
1115 | static int vhost_memory_region_lookup(struct vhost_dev *hdev, |
1116 | uint64_t gpa, uint64_t *uaddr, | |
1117 | uint64_t *len) | |
1118 | { | |
1119 | int i; | |
1120 | ||
1121 | for (i = 0; i < hdev->mem->nregions; i++) { | |
1122 | struct vhost_memory_region *reg = hdev->mem->regions + i; | |
1123 | ||
1124 | if (gpa >= reg->guest_phys_addr && | |
1125 | reg->guest_phys_addr + reg->memory_size > gpa) { | |
1126 | *uaddr = reg->userspace_addr + gpa - reg->guest_phys_addr; | |
1127 | *len = reg->guest_phys_addr + reg->memory_size - gpa; | |
1128 | return 0; | |
1129 | } | |
1130 | } | |
1131 | ||
1132 | return -EFAULT; | |
1133 | } | |
1134 | ||
fc58bd0d | 1135 | int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write) |
c471ad0e JW |
1136 | { |
1137 | IOMMUTLBEntry iotlb; | |
1138 | uint64_t uaddr, len; | |
fc58bd0d | 1139 | int ret = -EFAULT; |
c471ad0e | 1140 | |
7a064bcc | 1141 | RCU_READ_LOCK_GUARD(); |
c471ad0e | 1142 | |
ffcbbe72 PX |
1143 | trace_vhost_iotlb_miss(dev, 1); |
1144 | ||
c471ad0e | 1145 | iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, |
7446eb07 PM |
1146 | iova, write, |
1147 | MEMTXATTRS_UNSPECIFIED); | |
c471ad0e | 1148 | if (iotlb.target_as != NULL) { |
fc58bd0d MC |
1149 | ret = vhost_memory_region_lookup(dev, iotlb.translated_addr, |
1150 | &uaddr, &len); | |
1151 | if (ret) { | |
ffcbbe72 | 1152 | trace_vhost_iotlb_miss(dev, 3); |
c471ad0e JW |
1153 | error_report("Fail to lookup the translated address " |
1154 | "%"PRIx64, iotlb.translated_addr); | |
1155 | goto out; | |
1156 | } | |
1157 | ||
1158 | len = MIN(iotlb.addr_mask + 1, len); | |
1159 | iova = iova & ~iotlb.addr_mask; | |
1160 | ||
020e571b MC |
1161 | ret = vhost_backend_update_device_iotlb(dev, iova, uaddr, |
1162 | len, iotlb.perm); | |
fc58bd0d | 1163 | if (ret) { |
ffcbbe72 | 1164 | trace_vhost_iotlb_miss(dev, 4); |
c471ad0e JW |
1165 | error_report("Fail to update device iotlb"); |
1166 | goto out; | |
1167 | } | |
1168 | } | |
ffcbbe72 PX |
1169 | |
1170 | trace_vhost_iotlb_miss(dev, 2); | |
1171 | ||
c471ad0e | 1172 | out: |
fc58bd0d | 1173 | return ret; |
c471ad0e JW |
1174 | } |
1175 | ||
ff48b628 KX |
1176 | int vhost_virtqueue_start(struct vhost_dev *dev, |
1177 | struct VirtIODevice *vdev, | |
1178 | struct vhost_virtqueue *vq, | |
1179 | unsigned idx) | |
d5970055 | 1180 | { |
96a3d98d JW |
1181 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
1182 | VirtioBusState *vbus = VIRTIO_BUS(qbus); | |
1183 | VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); | |
a8170e5e | 1184 | hwaddr s, l, a; |
d5970055 | 1185 | int r; |
21e70425 | 1186 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); |
d5970055 | 1187 | struct vhost_vring_file file = { |
a9f98bb5 | 1188 | .index = vhost_vq_index |
d5970055 MT |
1189 | }; |
1190 | struct vhost_vring_state state = { | |
a9f98bb5 | 1191 | .index = vhost_vq_index |
d5970055 MT |
1192 | }; |
1193 | struct VirtQueue *vvq = virtio_get_queue(vdev, idx); | |
1194 | ||
fb20fbb7 JH |
1195 | a = virtio_queue_get_desc_addr(vdev, idx); |
1196 | if (a == 0) { | |
1197 | /* Queue might not be ready for start */ | |
1198 | return 0; | |
1199 | } | |
a9f98bb5 | 1200 | |
d5970055 | 1201 | vq->num = state.num = virtio_queue_get_num(vdev, idx); |
21e70425 | 1202 | r = dev->vhost_ops->vhost_set_vring_num(dev, &state); |
d5970055 | 1203 | if (r) { |
5d33ae4b RK |
1204 | VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed"); |
1205 | return r; | |
d5970055 MT |
1206 | } |
1207 | ||
1208 | state.num = virtio_queue_get_last_avail_idx(vdev, idx); | |
21e70425 | 1209 | r = dev->vhost_ops->vhost_set_vring_base(dev, &state); |
d5970055 | 1210 | if (r) { |
5d33ae4b RK |
1211 | VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed"); |
1212 | return r; | |
d5970055 MT |
1213 | } |
1214 | ||
e5848123 | 1215 | if (vhost_needs_vring_endian(vdev)) { |
04b7a152 GK |
1216 | r = vhost_virtqueue_set_vring_endian_legacy(dev, |
1217 | virtio_is_big_endian(vdev), | |
1218 | vhost_vq_index); | |
1219 | if (r) { | |
5d33ae4b | 1220 | return r; |
04b7a152 GK |
1221 | } |
1222 | } | |
1223 | ||
f1f9e6c5 | 1224 | vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx); |
fb20fbb7 | 1225 | vq->desc_phys = a; |
b897a474 | 1226 | vq->desc = vhost_memory_map(dev, a, &l, false); |
d5970055 MT |
1227 | if (!vq->desc || l != s) { |
1228 | r = -ENOMEM; | |
1229 | goto fail_alloc_desc; | |
1230 | } | |
f1f9e6c5 GK |
1231 | vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx); |
1232 | vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx); | |
b897a474 | 1233 | vq->avail = vhost_memory_map(dev, a, &l, false); |
d5970055 MT |
1234 | if (!vq->avail || l != s) { |
1235 | r = -ENOMEM; | |
1236 | goto fail_alloc_avail; | |
1237 | } | |
1238 | vq->used_size = s = l = virtio_queue_get_used_size(vdev, idx); | |
1239 | vq->used_phys = a = virtio_queue_get_used_addr(vdev, idx); | |
b897a474 | 1240 | vq->used = vhost_memory_map(dev, a, &l, true); |
d5970055 MT |
1241 | if (!vq->used || l != s) { |
1242 | r = -ENOMEM; | |
1243 | goto fail_alloc_used; | |
1244 | } | |
1245 | ||
a9f98bb5 | 1246 | r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); |
d5970055 | 1247 | if (r < 0) { |
d5970055 MT |
1248 | goto fail_alloc; |
1249 | } | |
a9f98bb5 | 1250 | |
d5970055 | 1251 | file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); |
21e70425 | 1252 | r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); |
d5970055 | 1253 | if (r) { |
5d33ae4b | 1254 | VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed"); |
d5970055 MT |
1255 | goto fail_kick; |
1256 | } | |
1257 | ||
f56a1247 MT |
1258 | /* Clear and discard previous events if any. */ |
1259 | event_notifier_test_and_clear(&vq->masked_notifier); | |
d5970055 | 1260 | |
5669655a VK |
1261 | /* Init vring in unmasked state, unless guest_notifier_mask |
1262 | * will do it later. | |
1263 | */ | |
1264 | if (!vdev->use_guest_notifier_mask) { | |
1265 | /* TODO: check and handle errors. */ | |
1266 | vhost_virtqueue_mask(dev, vdev, idx, false); | |
1267 | } | |
1268 | ||
96a3d98d JW |
1269 | if (k->query_guest_notifiers && |
1270 | k->query_guest_notifiers(qbus->parent) && | |
1271 | virtio_queue_vector(vdev, idx) == VIRTIO_NO_VECTOR) { | |
1272 | file.fd = -1; | |
1273 | r = dev->vhost_ops->vhost_set_vring_call(dev, &file); | |
1274 | if (r) { | |
1275 | goto fail_vector; | |
1276 | } | |
1277 | } | |
1278 | ||
d5970055 MT |
1279 | return 0; |
1280 | ||
96a3d98d | 1281 | fail_vector: |
d5970055 | 1282 | fail_kick: |
d5970055 | 1283 | fail_alloc: |
c471ad0e JW |
1284 | vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), |
1285 | 0, 0); | |
d5970055 | 1286 | fail_alloc_used: |
c471ad0e JW |
1287 | vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), |
1288 | 0, 0); | |
d5970055 | 1289 | fail_alloc_avail: |
c471ad0e JW |
1290 | vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), |
1291 | 0, 0); | |
d5970055 MT |
1292 | fail_alloc_desc: |
1293 | return r; | |
1294 | } | |
1295 | ||
e1f101d9 KX |
1296 | void vhost_virtqueue_stop(struct vhost_dev *dev, |
1297 | struct VirtIODevice *vdev, | |
1298 | struct vhost_virtqueue *vq, | |
1299 | unsigned idx) | |
d5970055 | 1300 | { |
21e70425 | 1301 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, idx); |
d5970055 | 1302 | struct vhost_vring_state state = { |
04b7a152 | 1303 | .index = vhost_vq_index, |
d5970055 MT |
1304 | }; |
1305 | int r; | |
fb20fbb7 | 1306 | |
fa4ae4be | 1307 | if (virtio_queue_get_desc_addr(vdev, idx) == 0) { |
fb20fbb7 JH |
1308 | /* Don't stop the virtqueue which might have not been started */ |
1309 | return; | |
1310 | } | |
fc57fd99 | 1311 | |
21e70425 | 1312 | r = dev->vhost_ops->vhost_get_vring_base(dev, &state); |
d5970055 | 1313 | if (r < 0) { |
5d33ae4b | 1314 | VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r); |
2ae39a11 MC |
1315 | /* Connection to the backend is broken, so let's sync internal |
1316 | * last avail idx to the device used idx. | |
1317 | */ | |
1318 | virtio_queue_restore_last_avail_idx(vdev, idx); | |
499c5579 MAL |
1319 | } else { |
1320 | virtio_queue_set_last_avail_idx(vdev, idx, state.num); | |
d5970055 | 1321 | } |
3561ba14 | 1322 | virtio_queue_invalidate_signalled_used(vdev, idx); |
aa94d521 | 1323 | virtio_queue_update_used_idx(vdev, idx); |
04b7a152 GK |
1324 | |
1325 | /* In the cross-endian case, we need to reset the vring endianness to | |
1326 | * native as legacy devices expect so by default. | |
1327 | */ | |
e5848123 | 1328 | if (vhost_needs_vring_endian(vdev)) { |
162bba7f MAL |
1329 | vhost_virtqueue_set_vring_endian_legacy(dev, |
1330 | !virtio_is_big_endian(vdev), | |
1331 | vhost_vq_index); | |
04b7a152 GK |
1332 | } |
1333 | ||
c471ad0e JW |
1334 | vhost_memory_unmap(dev, vq->used, virtio_queue_get_used_size(vdev, idx), |
1335 | 1, virtio_queue_get_used_size(vdev, idx)); | |
1336 | vhost_memory_unmap(dev, vq->avail, virtio_queue_get_avail_size(vdev, idx), | |
1337 | 0, virtio_queue_get_avail_size(vdev, idx)); | |
1338 | vhost_memory_unmap(dev, vq->desc, virtio_queue_get_desc_size(vdev, idx), | |
1339 | 0, virtio_queue_get_desc_size(vdev, idx)); | |
d5970055 MT |
1340 | } |
1341 | ||
69e87b32 JW |
1342 | static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, |
1343 | int n, uint32_t timeout) | |
1344 | { | |
1345 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); | |
1346 | struct vhost_vring_state state = { | |
1347 | .index = vhost_vq_index, | |
1348 | .num = timeout, | |
1349 | }; | |
1350 | int r; | |
1351 | ||
1352 | if (!dev->vhost_ops->vhost_set_vring_busyloop_timeout) { | |
1353 | return -EINVAL; | |
1354 | } | |
1355 | ||
1356 | r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); | |
1357 | if (r) { | |
5d33ae4b | 1358 | VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed"); |
69e87b32 JW |
1359 | return r; |
1360 | } | |
1361 | ||
1362 | return 0; | |
1363 | } | |
1364 | ||
ae50ae0b KK |
1365 | static void vhost_virtqueue_error_notifier(EventNotifier *n) |
1366 | { | |
1367 | struct vhost_virtqueue *vq = container_of(n, struct vhost_virtqueue, | |
1368 | error_notifier); | |
1369 | struct vhost_dev *dev = vq->dev; | |
1370 | int index = vq - dev->vqs; | |
1371 | ||
1372 | if (event_notifier_test_and_clear(n) && dev->vdev) { | |
1373 | VHOST_OPS_DEBUG(-EINVAL, "vhost vring error in virtqueue %d", | |
1374 | dev->vq_index + index); | |
1375 | } | |
1376 | } | |
1377 | ||
f56a1247 MT |
1378 | static int vhost_virtqueue_init(struct vhost_dev *dev, |
1379 | struct vhost_virtqueue *vq, int n) | |
1380 | { | |
21e70425 | 1381 | int vhost_vq_index = dev->vhost_ops->vhost_get_vq_index(dev, n); |
f56a1247 | 1382 | struct vhost_vring_file file = { |
b931bfbf | 1383 | .index = vhost_vq_index, |
f56a1247 MT |
1384 | }; |
1385 | int r = event_notifier_init(&vq->masked_notifier, 0); | |
1386 | if (r < 0) { | |
1387 | return r; | |
1388 | } | |
1389 | ||
ff5eb77b | 1390 | file.fd = event_notifier_get_wfd(&vq->masked_notifier); |
21e70425 | 1391 | r = dev->vhost_ops->vhost_set_vring_call(dev, &file); |
f56a1247 | 1392 | if (r) { |
5d33ae4b | 1393 | VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed"); |
f56a1247 MT |
1394 | goto fail_call; |
1395 | } | |
c471ad0e JW |
1396 | |
1397 | vq->dev = dev; | |
1398 | ||
ae50ae0b KK |
1399 | if (dev->vhost_ops->vhost_set_vring_err) { |
1400 | r = event_notifier_init(&vq->error_notifier, 0); | |
1401 | if (r < 0) { | |
1402 | goto fail_call; | |
1403 | } | |
1404 | ||
1405 | file.fd = event_notifier_get_fd(&vq->error_notifier); | |
1406 | r = dev->vhost_ops->vhost_set_vring_err(dev, &file); | |
1407 | if (r) { | |
1408 | VHOST_OPS_DEBUG(r, "vhost_set_vring_err failed"); | |
1409 | goto fail_err; | |
1410 | } | |
1411 | ||
1412 | event_notifier_set_handler(&vq->error_notifier, | |
1413 | vhost_virtqueue_error_notifier); | |
1414 | } | |
1415 | ||
f56a1247 | 1416 | return 0; |
ae50ae0b KK |
1417 | |
1418 | fail_err: | |
1419 | event_notifier_cleanup(&vq->error_notifier); | |
f56a1247 MT |
1420 | fail_call: |
1421 | event_notifier_cleanup(&vq->masked_notifier); | |
1422 | return r; | |
1423 | } | |
1424 | ||
1425 | static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) | |
1426 | { | |
1427 | event_notifier_cleanup(&vq->masked_notifier); | |
ae50ae0b KK |
1428 | if (vq->dev->vhost_ops->vhost_set_vring_err) { |
1429 | event_notifier_set_handler(&vq->error_notifier, NULL); | |
1430 | event_notifier_cleanup(&vq->error_notifier); | |
1431 | } | |
f56a1247 MT |
1432 | } |
1433 | ||
81647a65 | 1434 | int vhost_dev_init(struct vhost_dev *hdev, void *opaque, |
a6945f22 KW |
1435 | VhostBackendType backend_type, uint32_t busyloop_timeout, |
1436 | Error **errp) | |
d5970055 | 1437 | { |
766aa0a6 | 1438 | unsigned int used, reserved, limit; |
d5970055 | 1439 | uint64_t features; |
a06db3ec | 1440 | int i, r, n_initialized_vqs = 0; |
81647a65 | 1441 | |
c471ad0e | 1442 | hdev->vdev = NULL; |
d2fc4402 MAL |
1443 | hdev->migration_blocker = NULL; |
1444 | ||
7cb8a9b9 MAL |
1445 | r = vhost_set_backend_type(hdev, backend_type); |
1446 | assert(r >= 0); | |
1a1bfac9 | 1447 | |
28770ff9 | 1448 | r = hdev->vhost_ops->vhost_backend_init(hdev, opaque, errp); |
7cb8a9b9 MAL |
1449 | if (r < 0) { |
1450 | goto fail; | |
24d1eb33 NN |
1451 | } |
1452 | ||
21e70425 | 1453 | r = hdev->vhost_ops->vhost_set_owner(hdev); |
d5970055 | 1454 | if (r < 0) { |
f2a6e6c4 | 1455 | error_setg_errno(errp, -r, "vhost_set_owner failed"); |
d5970055 MT |
1456 | goto fail; |
1457 | } | |
1458 | ||
21e70425 | 1459 | r = hdev->vhost_ops->vhost_get_features(hdev, &features); |
d5970055 | 1460 | if (r < 0) { |
f2a6e6c4 | 1461 | error_setg_errno(errp, -r, "vhost_get_features failed"); |
d5970055 MT |
1462 | goto fail; |
1463 | } | |
f56a1247 | 1464 | |
a2335113 DH |
1465 | limit = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); |
1466 | if (limit < MEMORY_DEVICES_SAFE_MAX_MEMSLOTS && | |
1467 | memory_devices_memslot_auto_decision_active()) { | |
1468 | error_setg(errp, "some memory device (like virtio-mem)" | |
1469 | " decided how many memory slots to use based on the overall" | |
1470 | " number of memory slots; this vhost backend would further" | |
1471 | " restricts the overall number of memory slots"); | |
1472 | error_append_hint(errp, "Try plugging this vhost backend before" | |
1473 | " plugging such memory devices.\n"); | |
1474 | r = -EINVAL; | |
1475 | goto fail; | |
1476 | } | |
1477 | ||
a06db3ec | 1478 | for (i = 0; i < hdev->nvqs; ++i, ++n_initialized_vqs) { |
b931bfbf | 1479 | r = vhost_virtqueue_init(hdev, hdev->vqs + i, hdev->vq_index + i); |
f56a1247 | 1480 | if (r < 0) { |
a6945f22 | 1481 | error_setg_errno(errp, -r, "Failed to initialize virtqueue %d", i); |
a06db3ec | 1482 | goto fail; |
f56a1247 MT |
1483 | } |
1484 | } | |
69e87b32 JW |
1485 | |
1486 | if (busyloop_timeout) { | |
1487 | for (i = 0; i < hdev->nvqs; ++i) { | |
1488 | r = vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, | |
1489 | busyloop_timeout); | |
1490 | if (r < 0) { | |
f2a6e6c4 | 1491 | error_setg_errno(errp, -r, "Failed to set busyloop timeout"); |
69e87b32 JW |
1492 | goto fail_busyloop; |
1493 | } | |
1494 | } | |
1495 | } | |
1496 | ||
d5970055 MT |
1497 | hdev->features = features; |
1498 | ||
04097f7c | 1499 | hdev->memory_listener = (MemoryListener) { |
142518bd | 1500 | .name = "vhost", |
50c1e149 AK |
1501 | .begin = vhost_begin, |
1502 | .commit = vhost_commit, | |
938eeb64 DDAG |
1503 | .region_add = vhost_region_addnop, |
1504 | .region_nop = vhost_region_addnop, | |
04097f7c AK |
1505 | .log_start = vhost_log_start, |
1506 | .log_stop = vhost_log_stop, | |
1507 | .log_sync = vhost_log_sync, | |
1508 | .log_global_start = vhost_log_global_start, | |
1509 | .log_global_stop = vhost_log_global_stop, | |
8be0461d | 1510 | .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND |
04097f7c | 1511 | }; |
d2fc4402 | 1512 | |
375f74f4 | 1513 | hdev->iommu_listener = (MemoryListener) { |
142518bd | 1514 | .name = "vhost-iommu", |
375f74f4 JW |
1515 | .region_add = vhost_iommu_region_add, |
1516 | .region_del = vhost_iommu_region_del, | |
1517 | }; | |
c471ad0e | 1518 | |
d2fc4402 MAL |
1519 | if (hdev->migration_blocker == NULL) { |
1520 | if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { | |
1521 | error_setg(&hdev->migration_blocker, | |
1522 | "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); | |
648abbfb | 1523 | } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) { |
31190ed7 MAL |
1524 | error_setg(&hdev->migration_blocker, |
1525 | "Migration disabled: failed to allocate shared memory"); | |
d2fc4402 MAL |
1526 | } |
1527 | } | |
1528 | ||
1529 | if (hdev->migration_blocker != NULL) { | |
28770ff9 | 1530 | r = migrate_add_blocker(hdev->migration_blocker, errp); |
436c831a | 1531 | if (r < 0) { |
fe44dc91 AA |
1532 | error_free(hdev->migration_blocker); |
1533 | goto fail_busyloop; | |
1534 | } | |
7145872e | 1535 | } |
d2fc4402 | 1536 | |
7267c094 | 1537 | hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions)); |
2817b260 AK |
1538 | hdev->n_mem_sections = 0; |
1539 | hdev->mem_sections = NULL; | |
d5970055 MT |
1540 | hdev->log = NULL; |
1541 | hdev->log_size = 0; | |
1542 | hdev->log_enabled = false; | |
1543 | hdev->started = false; | |
f6790af6 | 1544 | memory_listener_register(&hdev->memory_listener, &address_space_memory); |
5be5f9be | 1545 | QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); |
9e2a2a3e | 1546 | |
552b2522 DH |
1547 | /* |
1548 | * The listener we registered properly updated the corresponding counter. | |
1549 | * So we can trust that these values are accurate. | |
1550 | */ | |
1551 | if (hdev->vhost_ops->vhost_backend_no_private_memslots && | |
1552 | hdev->vhost_ops->vhost_backend_no_private_memslots(hdev)) { | |
1553 | used = used_shared_memslots; | |
1554 | } else { | |
1555 | used = used_memslots; | |
1556 | } | |
766aa0a6 DH |
1557 | /* |
1558 | * We assume that all reserved memslots actually require a real memslot | |
1559 | * in our vhost backend. This might not be true, for example, if the | |
1560 | * memslot would be ROM. If ever relevant, we can optimize for that -- | |
1561 | * but we'll need additional information about the reservations. | |
1562 | */ | |
1563 | reserved = memory_devices_get_reserved_memslots(); | |
766aa0a6 DH |
1564 | if (used + reserved > limit) { |
1565 | error_setg(errp, "vhost backend memory slots limit (%d) is less" | |
1566 | " than current number of used (%d) and reserved (%d)" | |
1567 | " memory slots for memory devices.", limit, used, reserved); | |
f2a6e6c4 | 1568 | r = -EINVAL; |
1d8d014e | 1569 | goto fail_busyloop; |
9e2a2a3e JZ |
1570 | } |
1571 | ||
d5970055 | 1572 | return 0; |
a06db3ec | 1573 | |
69e87b32 | 1574 | fail_busyloop: |
1d8d014e SH |
1575 | if (busyloop_timeout) { |
1576 | while (--i >= 0) { | |
1577 | vhost_virtqueue_set_busyloop_timeout(hdev, hdev->vq_index + i, 0); | |
1578 | } | |
69e87b32 | 1579 | } |
d5970055 | 1580 | fail: |
a06db3ec MAL |
1581 | hdev->nvqs = n_initialized_vqs; |
1582 | vhost_dev_cleanup(hdev); | |
d5970055 MT |
1583 | return r; |
1584 | } | |
1585 | ||
1586 | void vhost_dev_cleanup(struct vhost_dev *hdev) | |
1587 | { | |
f56a1247 | 1588 | int i; |
e0547b59 | 1589 | |
a2761231 AB |
1590 | trace_vhost_dev_cleanup(hdev); |
1591 | ||
f56a1247 MT |
1592 | for (i = 0; i < hdev->nvqs; ++i) { |
1593 | vhost_virtqueue_cleanup(hdev->vqs + i); | |
1594 | } | |
5be5f9be MAL |
1595 | if (hdev->mem) { |
1596 | /* those are only safe after successful init */ | |
1597 | memory_listener_unregister(&hdev->memory_listener); | |
1598 | QLIST_REMOVE(hdev, entry); | |
1599 | } | |
7145872e MT |
1600 | if (hdev->migration_blocker) { |
1601 | migrate_del_blocker(hdev->migration_blocker); | |
1602 | error_free(hdev->migration_blocker); | |
1603 | } | |
7267c094 | 1604 | g_free(hdev->mem); |
2817b260 | 1605 | g_free(hdev->mem_sections); |
e0547b59 MAL |
1606 | if (hdev->vhost_ops) { |
1607 | hdev->vhost_ops->vhost_backend_cleanup(hdev); | |
1608 | } | |
7b527247 | 1609 | assert(!hdev->log); |
e0547b59 MAL |
1610 | |
1611 | memset(hdev, 0, sizeof(struct vhost_dev)); | |
d5970055 MT |
1612 | } |
1613 | ||
92099aa4 LV |
1614 | static void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev, |
1615 | VirtIODevice *vdev, | |
1616 | unsigned int nvqs) | |
1617 | { | |
1618 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); | |
1619 | int i, r; | |
1620 | ||
1621 | /* | |
1622 | * Batch all the host notifiers in a single transaction to avoid | |
1623 | * quadratic time complexity in address_space_update_ioeventfds(). | |
1624 | */ | |
1625 | memory_region_transaction_begin(); | |
1626 | ||
1627 | for (i = 0; i < nvqs; ++i) { | |
1628 | r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, | |
1629 | false); | |
1630 | if (r < 0) { | |
1631 | error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); | |
1632 | } | |
1633 | assert(r >= 0); | |
1634 | } | |
1635 | ||
1636 | /* | |
1637 | * The transaction expects the ioeventfds to be open when it | |
1638 | * commits. Do it now, before the cleanup loop. | |
1639 | */ | |
1640 | memory_region_transaction_commit(); | |
1641 | ||
1642 | for (i = 0; i < nvqs; ++i) { | |
1643 | virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); | |
1644 | } | |
1645 | virtio_device_release_ioeventfd(vdev); | |
1646 | } | |
1647 | ||
b0b3db79 MT |
1648 | /* Stop processing guest IO notifications in qemu. |
1649 | * Start processing them in vhost in kernel. | |
1650 | */ | |
1651 | int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
1652 | { | |
1c819449 | 1653 | BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); |
8771589b | 1654 | int i, r; |
4afba631 | 1655 | |
310837de PB |
1656 | /* We will pass the notifiers to the kernel, make sure that QEMU |
1657 | * doesn't interfere. | |
1658 | */ | |
1659 | r = virtio_device_grab_ioeventfd(vdev); | |
1660 | if (r < 0) { | |
4afba631 | 1661 | error_report("binding does not support host notifiers"); |
8771589b | 1662 | return r; |
b0b3db79 MT |
1663 | } |
1664 | ||
0fdc6b85 LM |
1665 | /* |
1666 | * Batch all the host notifiers in a single transaction to avoid | |
1667 | * quadratic time complexity in address_space_update_ioeventfds(). | |
1668 | */ | |
1669 | memory_region_transaction_begin(); | |
1670 | ||
b0b3db79 | 1671 | for (i = 0; i < hdev->nvqs; ++i) { |
b1f0a33d CH |
1672 | r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i, |
1673 | true); | |
b0b3db79 | 1674 | if (r < 0) { |
4afba631 | 1675 | error_report("vhost VQ %d notifier binding failed: %d", i, -r); |
0fdc6b85 | 1676 | memory_region_transaction_commit(); |
92099aa4 | 1677 | vhost_dev_disable_notifiers_nvqs(hdev, vdev, i); |
8771589b | 1678 | return r; |
b0b3db79 MT |
1679 | } |
1680 | } | |
1681 | ||
0fdc6b85 LM |
1682 | memory_region_transaction_commit(); |
1683 | ||
b0b3db79 | 1684 | return 0; |
b0b3db79 MT |
1685 | } |
1686 | ||
1687 | /* Stop processing guest IO notifications in vhost. | |
1688 | * Start processing them in qemu. | |
1689 | * This might actually run the qemu handlers right away, | |
1690 | * so virtio in qemu must be completely setup when this is called. | |
1691 | */ | |
1692 | void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) | |
1693 | { | |
92099aa4 | 1694 | vhost_dev_disable_notifiers_nvqs(hdev, vdev, hdev->nvqs); |
b0b3db79 MT |
1695 | } |
1696 | ||
f56a1247 MT |
1697 | /* Test and clear event pending status. |
1698 | * Should be called after unmask to avoid losing events. | |
1699 | */ | |
1700 | bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n) | |
1701 | { | |
a9f98bb5 | 1702 | struct vhost_virtqueue *vq = hdev->vqs + n - hdev->vq_index; |
a9f98bb5 | 1703 | assert(n >= hdev->vq_index && n < hdev->vq_index + hdev->nvqs); |
f56a1247 MT |
1704 | return event_notifier_test_and_clear(&vq->masked_notifier); |
1705 | } | |
1706 | ||
1707 | /* Mask/unmask events from this vq. */ | |
1708 | void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, | |
1709 | bool mask) | |
1710 | { | |
1711 | struct VirtQueue *vvq = virtio_get_queue(vdev, n); | |
a9f98bb5 | 1712 | int r, index = n - hdev->vq_index; |
fc57fd99 | 1713 | struct vhost_vring_file file; |
f56a1247 | 1714 | |
8695de0f MAL |
1715 | /* should only be called after backend is connected */ |
1716 | assert(hdev->vhost_ops); | |
1717 | ||
f56a1247 | 1718 | if (mask) { |
5669655a | 1719 | assert(vdev->use_guest_notifier_mask); |
ff5eb77b | 1720 | file.fd = event_notifier_get_wfd(&hdev->vqs[index].masked_notifier); |
f56a1247 | 1721 | } else { |
ff5eb77b | 1722 | file.fd = event_notifier_get_wfd(virtio_queue_get_guest_notifier(vvq)); |
f56a1247 | 1723 | } |
fc57fd99 | 1724 | |
21e70425 MAL |
1725 | file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); |
1726 | r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); | |
162bba7f | 1727 | if (r < 0) { |
f9a09ca3 CL |
1728 | error_report("vhost_set_vring_call failed %d", -r); |
1729 | } | |
1730 | } | |
1731 | ||
1732 | bool vhost_config_pending(struct vhost_dev *hdev) | |
1733 | { | |
1734 | assert(hdev->vhost_ops); | |
1735 | if ((hdev->started == false) || | |
1736 | (hdev->vhost_ops->vhost_set_config_call == NULL)) { | |
1737 | return false; | |
1738 | } | |
1739 | ||
1740 | EventNotifier *notifier = | |
1741 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; | |
1742 | return event_notifier_test_and_clear(notifier); | |
1743 | } | |
1744 | ||
1745 | void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask) | |
1746 | { | |
1747 | int fd; | |
1748 | int r; | |
1749 | EventNotifier *notifier = | |
1750 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; | |
1751 | EventNotifier *config_notifier = &vdev->config_notifier; | |
1752 | assert(hdev->vhost_ops); | |
1753 | ||
1754 | if ((hdev->started == false) || | |
1755 | (hdev->vhost_ops->vhost_set_config_call == NULL)) { | |
1756 | return; | |
1757 | } | |
1758 | if (mask) { | |
1759 | assert(vdev->use_guest_notifier_mask); | |
1760 | fd = event_notifier_get_fd(notifier); | |
1761 | } else { | |
1762 | fd = event_notifier_get_fd(config_notifier); | |
1763 | } | |
1764 | r = hdev->vhost_ops->vhost_set_config_call(hdev, fd); | |
1765 | if (r < 0) { | |
1766 | error_report("vhost_set_config_call failed %d", -r); | |
1767 | } | |
1768 | } | |
1769 | ||
1770 | static void vhost_stop_config_intr(struct vhost_dev *dev) | |
1771 | { | |
1772 | int fd = -1; | |
1773 | assert(dev->vhost_ops); | |
1774 | if (dev->vhost_ops->vhost_set_config_call) { | |
1775 | dev->vhost_ops->vhost_set_config_call(dev, fd); | |
1776 | } | |
1777 | } | |
1778 | ||
1779 | static void vhost_start_config_intr(struct vhost_dev *dev) | |
1780 | { | |
1781 | int r; | |
1782 | ||
1783 | assert(dev->vhost_ops); | |
1784 | int fd = event_notifier_get_fd(&dev->vdev->config_notifier); | |
1785 | if (dev->vhost_ops->vhost_set_config_call) { | |
1786 | r = dev->vhost_ops->vhost_set_config_call(dev, fd); | |
1787 | if (!r) { | |
1788 | event_notifier_set(&dev->vdev->config_notifier); | |
1789 | } | |
162bba7f | 1790 | } |
f56a1247 MT |
1791 | } |
1792 | ||
9a2ba823 CH |
1793 | uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, |
1794 | uint64_t features) | |
2e6d46d7 NN |
1795 | { |
1796 | const int *bit = feature_bits; | |
1797 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
9a2ba823 | 1798 | uint64_t bit_mask = (1ULL << *bit); |
2e6d46d7 NN |
1799 | if (!(hdev->features & bit_mask)) { |
1800 | features &= ~bit_mask; | |
1801 | } | |
1802 | bit++; | |
1803 | } | |
1804 | return features; | |
1805 | } | |
1806 | ||
1807 | void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, | |
9a2ba823 | 1808 | uint64_t features) |
2e6d46d7 NN |
1809 | { |
1810 | const int *bit = feature_bits; | |
1811 | while (*bit != VHOST_INVALID_FEATURE_BIT) { | |
9a2ba823 | 1812 | uint64_t bit_mask = (1ULL << *bit); |
2e6d46d7 NN |
1813 | if (features & bit_mask) { |
1814 | hdev->acked_features |= bit_mask; | |
1815 | } | |
1816 | bit++; | |
1817 | } | |
1818 | } | |
1819 | ||
4c3e257b | 1820 | int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, |
50de5138 | 1821 | uint32_t config_len, Error **errp) |
4c3e257b CL |
1822 | { |
1823 | assert(hdev->vhost_ops); | |
1824 | ||
1825 | if (hdev->vhost_ops->vhost_get_config) { | |
66647ed4 MA |
1826 | return hdev->vhost_ops->vhost_get_config(hdev, config, config_len, |
1827 | errp); | |
4c3e257b CL |
1828 | } |
1829 | ||
50de5138 | 1830 | error_setg(errp, "vhost_get_config not implemented"); |
5d33ae4b | 1831 | return -ENOSYS; |
4c3e257b CL |
1832 | } |
1833 | ||
1834 | int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, | |
1835 | uint32_t offset, uint32_t size, uint32_t flags) | |
1836 | { | |
1837 | assert(hdev->vhost_ops); | |
1838 | ||
1839 | if (hdev->vhost_ops->vhost_set_config) { | |
1840 | return hdev->vhost_ops->vhost_set_config(hdev, data, offset, | |
1841 | size, flags); | |
1842 | } | |
1843 | ||
5d33ae4b | 1844 | return -ENOSYS; |
4c3e257b CL |
1845 | } |
1846 | ||
1847 | void vhost_dev_set_config_notifier(struct vhost_dev *hdev, | |
1848 | const VhostDevConfigOps *ops) | |
1849 | { | |
4c3e257b CL |
1850 | hdev->config_ops = ops; |
1851 | } | |
1852 | ||
5ad204bf XY |
1853 | void vhost_dev_free_inflight(struct vhost_inflight *inflight) |
1854 | { | |
0ac2e635 | 1855 | if (inflight && inflight->addr) { |
5ad204bf XY |
1856 | qemu_memfd_free(inflight->addr, inflight->size, inflight->fd); |
1857 | inflight->addr = NULL; | |
1858 | inflight->fd = -1; | |
1859 | } | |
1860 | } | |
1861 | ||
1862 | static int vhost_dev_resize_inflight(struct vhost_inflight *inflight, | |
1863 | uint64_t new_size) | |
1864 | { | |
1865 | Error *err = NULL; | |
1866 | int fd = -1; | |
1867 | void *addr = qemu_memfd_alloc("vhost-inflight", new_size, | |
1868 | F_SEAL_GROW | F_SEAL_SHRINK | F_SEAL_SEAL, | |
1869 | &fd, &err); | |
1870 | ||
1871 | if (err) { | |
1872 | error_report_err(err); | |
5d33ae4b | 1873 | return -ENOMEM; |
5ad204bf XY |
1874 | } |
1875 | ||
1876 | vhost_dev_free_inflight(inflight); | |
1877 | inflight->offset = 0; | |
1878 | inflight->addr = addr; | |
1879 | inflight->fd = fd; | |
1880 | inflight->size = new_size; | |
1881 | ||
1882 | return 0; | |
1883 | } | |
1884 | ||
1885 | void vhost_dev_save_inflight(struct vhost_inflight *inflight, QEMUFile *f) | |
1886 | { | |
1887 | if (inflight->addr) { | |
1888 | qemu_put_be64(f, inflight->size); | |
1889 | qemu_put_be16(f, inflight->queue_size); | |
1890 | qemu_put_buffer(f, inflight->addr, inflight->size); | |
1891 | } else { | |
1892 | qemu_put_be64(f, 0); | |
1893 | } | |
1894 | } | |
1895 | ||
1896 | int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) | |
1897 | { | |
1898 | uint64_t size; | |
1899 | ||
1900 | size = qemu_get_be64(f); | |
1901 | if (!size) { | |
1902 | return 0; | |
1903 | } | |
1904 | ||
1905 | if (inflight->size != size) { | |
5d33ae4b RK |
1906 | int ret = vhost_dev_resize_inflight(inflight, size); |
1907 | if (ret < 0) { | |
1908 | return ret; | |
5ad204bf XY |
1909 | } |
1910 | } | |
1911 | inflight->queue_size = qemu_get_be16(f); | |
1912 | ||
1913 | qemu_get_buffer(f, inflight->addr, size); | |
1914 | ||
1915 | return 0; | |
1916 | } | |
1917 | ||
1b0063b3 JY |
1918 | int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev) |
1919 | { | |
1920 | int r; | |
1921 | ||
1922 | if (hdev->vhost_ops->vhost_get_inflight_fd == NULL || | |
1923 | hdev->vhost_ops->vhost_set_inflight_fd == NULL) { | |
1924 | return 0; | |
1925 | } | |
1926 | ||
1927 | hdev->vdev = vdev; | |
1928 | ||
1929 | r = vhost_dev_set_features(hdev, hdev->log_enabled); | |
1930 | if (r < 0) { | |
5d33ae4b | 1931 | VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed"); |
1b0063b3 JY |
1932 | return r; |
1933 | } | |
1934 | ||
1935 | return 0; | |
1936 | } | |
1937 | ||
5ad204bf XY |
1938 | int vhost_dev_set_inflight(struct vhost_dev *dev, |
1939 | struct vhost_inflight *inflight) | |
1940 | { | |
1941 | int r; | |
1942 | ||
1943 | if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { | |
1944 | r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); | |
1945 | if (r) { | |
5d33ae4b RK |
1946 | VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed"); |
1947 | return r; | |
5ad204bf XY |
1948 | } |
1949 | } | |
1950 | ||
1951 | return 0; | |
1952 | } | |
1953 | ||
1954 | int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, | |
1955 | struct vhost_inflight *inflight) | |
1956 | { | |
1957 | int r; | |
1958 | ||
1959 | if (dev->vhost_ops->vhost_get_inflight_fd) { | |
1960 | r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); | |
1961 | if (r) { | |
5d33ae4b RK |
1962 | VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed"); |
1963 | return r; | |
5ad204bf XY |
1964 | } |
1965 | } | |
1966 | ||
1967 | return 0; | |
1968 | } | |
1969 | ||
4daa5054 SG |
1970 | static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable) |
1971 | { | |
1972 | if (!hdev->vhost_ops->vhost_set_vring_enable) { | |
1973 | return 0; | |
1974 | } | |
1975 | ||
1976 | /* | |
1977 | * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not | |
1978 | * been negotiated, the rings start directly in the enabled state, and | |
1979 | * .vhost_set_vring_enable callback will fail since | |
1980 | * VHOST_USER_SET_VRING_ENABLE is not supported. | |
1981 | */ | |
1982 | if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER && | |
1983 | !virtio_has_feature(hdev->backend_features, | |
1984 | VHOST_USER_F_PROTOCOL_FEATURES)) { | |
1985 | return 0; | |
1986 | } | |
1987 | ||
1988 | return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable); | |
1989 | } | |
1990 | ||
b0b3db79 | 1991 | /* Host notifiers must be enabled at this point. */ |
4daa5054 | 1992 | int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) |
d5970055 MT |
1993 | { |
1994 | int i, r; | |
24f4fe34 | 1995 | |
8695de0f MAL |
1996 | /* should only be called after backend is connected */ |
1997 | assert(hdev->vhost_ops); | |
1998 | ||
4daa5054 | 1999 | trace_vhost_dev_start(hdev, vdev->name, vrings); |
a2761231 | 2000 | |
c255488d | 2001 | vdev->vhost_started = true; |
24f4fe34 | 2002 | hdev->started = true; |
c471ad0e | 2003 | hdev->vdev = vdev; |
24f4fe34 | 2004 | |
d5970055 MT |
2005 | r = vhost_dev_set_features(hdev, hdev->log_enabled); |
2006 | if (r < 0) { | |
54dd9321 | 2007 | goto fail_features; |
d5970055 | 2008 | } |
c471ad0e JW |
2009 | |
2010 | if (vhost_dev_has_iommu(hdev)) { | |
375f74f4 | 2011 | memory_listener_register(&hdev->iommu_listener, vdev->dma_as); |
c471ad0e JW |
2012 | } |
2013 | ||
21e70425 | 2014 | r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); |
d5970055 | 2015 | if (r < 0) { |
5d33ae4b | 2016 | VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); |
54dd9321 | 2017 | goto fail_mem; |
d5970055 | 2018 | } |
d154e0ba | 2019 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 2020 | r = vhost_virtqueue_start(hdev, |
a9f98bb5 JW |
2021 | vdev, |
2022 | hdev->vqs + i, | |
2023 | hdev->vq_index + i); | |
d154e0ba MT |
2024 | if (r < 0) { |
2025 | goto fail_vq; | |
2026 | } | |
2027 | } | |
2028 | ||
f9a09ca3 CL |
2029 | r = event_notifier_init( |
2030 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0); | |
2031 | if (r < 0) { | |
77ece20b PP |
2032 | VHOST_OPS_DEBUG(r, "event_notifier_init failed"); |
2033 | goto fail_vq; | |
f9a09ca3 CL |
2034 | } |
2035 | event_notifier_test_and_clear( | |
2036 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); | |
2037 | if (!vdev->use_guest_notifier_mask) { | |
2038 | vhost_config_mask(hdev, vdev, true); | |
2039 | } | |
d5970055 | 2040 | if (hdev->log_enabled) { |
e05ca820 MT |
2041 | uint64_t log_base; |
2042 | ||
d5970055 | 2043 | hdev->log_size = vhost_get_log_size(hdev); |
15324404 MAL |
2044 | hdev->log = vhost_log_get(hdev->log_size, |
2045 | vhost_dev_log_is_shared(hdev)); | |
309750fa | 2046 | log_base = (uintptr_t)hdev->log->log; |
c2bea314 | 2047 | r = hdev->vhost_ops->vhost_set_log_base(hdev, |
9a78a5dd MAL |
2048 | hdev->log_size ? log_base : 0, |
2049 | hdev->log); | |
d5970055 | 2050 | if (r < 0) { |
5d33ae4b | 2051 | VHOST_OPS_DEBUG(r, "vhost_set_log_base failed"); |
54dd9321 | 2052 | goto fail_log; |
d5970055 MT |
2053 | } |
2054 | } | |
4daa5054 SG |
2055 | if (vrings) { |
2056 | r = vhost_dev_set_vring_enable(hdev, true); | |
2057 | if (r) { | |
2058 | goto fail_log; | |
2059 | } | |
2060 | } | |
ca71db43 CL |
2061 | if (hdev->vhost_ops->vhost_dev_start) { |
2062 | r = hdev->vhost_ops->vhost_dev_start(hdev, true); | |
2063 | if (r) { | |
4daa5054 | 2064 | goto fail_start; |
ca71db43 CL |
2065 | } |
2066 | } | |
3f63b4c6 JW |
2067 | if (vhost_dev_has_iommu(hdev) && |
2068 | hdev->vhost_ops->vhost_set_iotlb_callback) { | |
2069 | hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); | |
c471ad0e JW |
2070 | |
2071 | /* Update used ring information for IOTLB to work correctly, | |
2072 | * vhost-kernel code requires for this.*/ | |
2073 | for (i = 0; i < hdev->nvqs; ++i) { | |
2074 | struct vhost_virtqueue *vq = hdev->vqs + i; | |
2075 | vhost_device_iotlb_miss(hdev, vq->used_phys, true); | |
2076 | } | |
2077 | } | |
f9a09ca3 | 2078 | vhost_start_config_intr(hdev); |
d5970055 | 2079 | return 0; |
4daa5054 SG |
2080 | fail_start: |
2081 | if (vrings) { | |
2082 | vhost_dev_set_vring_enable(hdev, false); | |
2083 | } | |
54dd9321 | 2084 | fail_log: |
24bfa207 | 2085 | vhost_log_put(hdev, false); |
d5970055 MT |
2086 | fail_vq: |
2087 | while (--i >= 0) { | |
f56a1247 | 2088 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
2089 | vdev, |
2090 | hdev->vqs + i, | |
2091 | hdev->vq_index + i); | |
d5970055 | 2092 | } |
c471ad0e | 2093 | |
54dd9321 | 2094 | fail_mem: |
1e3ffb34 PP |
2095 | if (vhost_dev_has_iommu(hdev)) { |
2096 | memory_listener_unregister(&hdev->iommu_listener); | |
2097 | } | |
54dd9321 | 2098 | fail_features: |
c255488d | 2099 | vdev->vhost_started = false; |
24f4fe34 | 2100 | hdev->started = false; |
d5970055 MT |
2101 | return r; |
2102 | } | |
2103 | ||
b0b3db79 | 2104 | /* Host notifiers must be enabled at this point. */ |
4daa5054 | 2105 | void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) |
d5970055 | 2106 | { |
a9f98bb5 | 2107 | int i; |
54dd9321 | 2108 | |
8695de0f MAL |
2109 | /* should only be called after backend is connected */ |
2110 | assert(hdev->vhost_ops); | |
f9a09ca3 CL |
2111 | event_notifier_test_and_clear( |
2112 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); | |
2113 | event_notifier_test_and_clear(&vdev->config_notifier); | |
18f2971c LF |
2114 | event_notifier_cleanup( |
2115 | &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); | |
8695de0f | 2116 | |
4daa5054 | 2117 | trace_vhost_dev_stop(hdev, vdev->name, vrings); |
a2761231 | 2118 | |
ca71db43 CL |
2119 | if (hdev->vhost_ops->vhost_dev_start) { |
2120 | hdev->vhost_ops->vhost_dev_start(hdev, false); | |
2121 | } | |
4daa5054 SG |
2122 | if (vrings) { |
2123 | vhost_dev_set_vring_enable(hdev, false); | |
2124 | } | |
d5970055 | 2125 | for (i = 0; i < hdev->nvqs; ++i) { |
f56a1247 | 2126 | vhost_virtqueue_stop(hdev, |
a9f98bb5 JW |
2127 | vdev, |
2128 | hdev->vqs + i, | |
2129 | hdev->vq_index + i); | |
d5970055 | 2130 | } |
c3716f26 EP |
2131 | if (hdev->vhost_ops->vhost_reset_status) { |
2132 | hdev->vhost_ops->vhost_reset_status(hdev); | |
2133 | } | |
54dd9321 | 2134 | |
c471ad0e | 2135 | if (vhost_dev_has_iommu(hdev)) { |
3f63b4c6 JW |
2136 | if (hdev->vhost_ops->vhost_set_iotlb_callback) { |
2137 | hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); | |
2138 | } | |
375f74f4 | 2139 | memory_listener_unregister(&hdev->iommu_listener); |
c471ad0e | 2140 | } |
f9a09ca3 | 2141 | vhost_stop_config_intr(hdev); |
309750fa | 2142 | vhost_log_put(hdev, true); |
d5970055 | 2143 | hdev->started = false; |
c255488d | 2144 | vdev->vhost_started = false; |
c471ad0e | 2145 | hdev->vdev = NULL; |
d5970055 | 2146 | } |
950d94ba MAL |
2147 | |
2148 | int vhost_net_set_backend(struct vhost_dev *hdev, | |
2149 | struct vhost_vring_file *file) | |
2150 | { | |
2151 | if (hdev->vhost_ops->vhost_net_set_backend) { | |
2152 | return hdev->vhost_ops->vhost_net_set_backend(hdev, file); | |
2153 | } | |
2154 | ||
5d33ae4b | 2155 | return -ENOSYS; |
950d94ba | 2156 | } |
c0c4f147 SH |
2157 | |
2158 | int vhost_reset_device(struct vhost_dev *hdev) | |
2159 | { | |
2160 | if (hdev->vhost_ops->vhost_reset_device) { | |
2161 | return hdev->vhost_ops->vhost_reset_device(hdev); | |
2162 | } | |
2163 | ||
2164 | return -ENOSYS; | |
2165 | } |