]> git.ipfire.org Git - thirdparty/qemu.git/blob - block/block-backend.c
block: Move I/O status and error actions into BB
[thirdparty/qemu.git] / block / block-backend.c
1 /*
2 * QEMU Block backends
3 *
4 * Copyright (C) 2014 Red Hat, Inc.
5 *
6 * Authors:
7 * Markus Armbruster <armbru@redhat.com>,
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2.1
10 * or later. See the COPYING.LIB file in the top-level directory.
11 */
12
13 #include "sysemu/block-backend.h"
14 #include "block/block_int.h"
15 #include "block/blockjob.h"
16 #include "sysemu/blockdev.h"
17 #include "sysemu/sysemu.h"
18 #include "qapi-event.h"
19
20 /* Number of coroutines to reserve per attached device model */
21 #define COROUTINE_POOL_RESERVATION 64
22
23 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
24
25 struct BlockBackend {
26 char *name;
27 int refcnt;
28 BlockDriverState *bs;
29 DriveInfo *legacy_dinfo; /* null unless created by drive_new() */
30 QTAILQ_ENTRY(BlockBackend) link; /* for blk_backends */
31
32 void *dev; /* attached device model, if any */
33 /* TODO change to DeviceState when all users are qdevified */
34 const BlockDevOps *dev_ops;
35 void *dev_opaque;
36
37 /* the block size for which the guest device expects atomicity */
38 int guest_block_size;
39
40 /* I/O stats (display with "info blockstats"). */
41 BlockAcctStats stats;
42
43 BlockdevOnError on_read_error, on_write_error;
44 bool iostatus_enabled;
45 BlockDeviceIoStatus iostatus;
46 };
47
48 typedef struct BlockBackendAIOCB {
49 BlockAIOCB common;
50 QEMUBH *bh;
51 BlockBackend *blk;
52 int ret;
53 } BlockBackendAIOCB;
54
55 static const AIOCBInfo block_backend_aiocb_info = {
56 .get_aio_context = blk_aiocb_get_aio_context,
57 .aiocb_size = sizeof(BlockBackendAIOCB),
58 };
59
60 static void drive_info_del(DriveInfo *dinfo);
61
62 /* All the BlockBackends (except for hidden ones) */
63 static QTAILQ_HEAD(, BlockBackend) blk_backends =
64 QTAILQ_HEAD_INITIALIZER(blk_backends);
65
66 /*
67 * Create a new BlockBackend with @name, with a reference count of one.
68 * @name must not be null or empty.
69 * Fail if a BlockBackend with this name already exists.
70 * Store an error through @errp on failure, unless it's null.
71 * Return the new BlockBackend on success, null on failure.
72 */
73 BlockBackend *blk_new(const char *name, Error **errp)
74 {
75 BlockBackend *blk;
76
77 assert(name && name[0]);
78 if (!id_wellformed(name)) {
79 error_setg(errp, "Invalid device name");
80 return NULL;
81 }
82 if (blk_by_name(name)) {
83 error_setg(errp, "Device with id '%s' already exists", name);
84 return NULL;
85 }
86 if (bdrv_find_node(name)) {
87 error_setg(errp,
88 "Device name '%s' conflicts with an existing node name",
89 name);
90 return NULL;
91 }
92
93 blk = g_new0(BlockBackend, 1);
94 blk->name = g_strdup(name);
95 blk->refcnt = 1;
96 QTAILQ_INSERT_TAIL(&blk_backends, blk, link);
97 return blk;
98 }
99
100 /*
101 * Create a new BlockBackend with a new BlockDriverState attached.
102 * Otherwise just like blk_new(), which see.
103 */
104 BlockBackend *blk_new_with_bs(const char *name, Error **errp)
105 {
106 BlockBackend *blk;
107 BlockDriverState *bs;
108
109 blk = blk_new(name, errp);
110 if (!blk) {
111 return NULL;
112 }
113
114 bs = bdrv_new_root();
115 blk->bs = bs;
116 bs->blk = blk;
117 return blk;
118 }
119
120 /*
121 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
122 *
123 * Just as with bdrv_open(), after having called this function the reference to
124 * @options belongs to the block layer (even on failure).
125 *
126 * TODO: Remove @filename and @flags; it should be possible to specify a whole
127 * BDS tree just by specifying the @options QDict (or @reference,
128 * alternatively). At the time of adding this function, this is not possible,
129 * though, so callers of this function have to be able to specify @filename and
130 * @flags.
131 */
132 BlockBackend *blk_new_open(const char *name, const char *filename,
133 const char *reference, QDict *options, int flags,
134 Error **errp)
135 {
136 BlockBackend *blk;
137 int ret;
138
139 blk = blk_new_with_bs(name, errp);
140 if (!blk) {
141 QDECREF(options);
142 return NULL;
143 }
144
145 ret = bdrv_open(&blk->bs, filename, reference, options, flags, errp);
146 if (ret < 0) {
147 blk_unref(blk);
148 return NULL;
149 }
150
151 return blk;
152 }
153
154 static void blk_delete(BlockBackend *blk)
155 {
156 assert(!blk->refcnt);
157 assert(!blk->dev);
158 if (blk->bs) {
159 assert(blk->bs->blk == blk);
160 blk->bs->blk = NULL;
161 bdrv_unref(blk->bs);
162 blk->bs = NULL;
163 }
164 /* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
165 if (blk->name[0]) {
166 QTAILQ_REMOVE(&blk_backends, blk, link);
167 }
168 g_free(blk->name);
169 drive_info_del(blk->legacy_dinfo);
170 g_free(blk);
171 }
172
173 static void drive_info_del(DriveInfo *dinfo)
174 {
175 if (!dinfo) {
176 return;
177 }
178 qemu_opts_del(dinfo->opts);
179 g_free(dinfo->serial);
180 g_free(dinfo);
181 }
182
183 /*
184 * Increment @blk's reference count.
185 * @blk must not be null.
186 */
187 void blk_ref(BlockBackend *blk)
188 {
189 blk->refcnt++;
190 }
191
192 /*
193 * Decrement @blk's reference count.
194 * If this drops it to zero, destroy @blk.
195 * For convenience, do nothing if @blk is null.
196 */
197 void blk_unref(BlockBackend *blk)
198 {
199 if (blk) {
200 assert(blk->refcnt > 0);
201 if (!--blk->refcnt) {
202 blk_delete(blk);
203 }
204 }
205 }
206
207 /*
208 * Return the BlockBackend after @blk.
209 * If @blk is null, return the first one.
210 * Else, return @blk's next sibling, which may be null.
211 *
212 * To iterate over all BlockBackends, do
213 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
214 * ...
215 * }
216 */
217 BlockBackend *blk_next(BlockBackend *blk)
218 {
219 return blk ? QTAILQ_NEXT(blk, link) : QTAILQ_FIRST(&blk_backends);
220 }
221
222 /*
223 * Return @blk's name, a non-null string.
224 * Wart: the name is empty iff @blk has been hidden with
225 * blk_hide_on_behalf_of_hmp_drive_del().
226 */
227 const char *blk_name(BlockBackend *blk)
228 {
229 return blk->name;
230 }
231
232 /*
233 * Return the BlockBackend with name @name if it exists, else null.
234 * @name must not be null.
235 */
236 BlockBackend *blk_by_name(const char *name)
237 {
238 BlockBackend *blk;
239
240 assert(name);
241 QTAILQ_FOREACH(blk, &blk_backends, link) {
242 if (!strcmp(name, blk->name)) {
243 return blk;
244 }
245 }
246 return NULL;
247 }
248
249 /*
250 * Return the BlockDriverState attached to @blk if any, else null.
251 */
252 BlockDriverState *blk_bs(BlockBackend *blk)
253 {
254 return blk->bs;
255 }
256
257 /*
258 * Changes the BlockDriverState attached to @blk
259 */
260 void blk_set_bs(BlockBackend *blk, BlockDriverState *bs)
261 {
262 bdrv_ref(bs);
263
264 if (blk->bs) {
265 blk->bs->blk = NULL;
266 bdrv_unref(blk->bs);
267 }
268 assert(bs->blk == NULL);
269
270 blk->bs = bs;
271 bs->blk = blk;
272 }
273
274 /*
275 * Return @blk's DriveInfo if any, else null.
276 */
277 DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
278 {
279 return blk->legacy_dinfo;
280 }
281
282 /*
283 * Set @blk's DriveInfo to @dinfo, and return it.
284 * @blk must not have a DriveInfo set already.
285 * No other BlockBackend may have the same DriveInfo set.
286 */
287 DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
288 {
289 assert(!blk->legacy_dinfo);
290 return blk->legacy_dinfo = dinfo;
291 }
292
293 /*
294 * Return the BlockBackend with DriveInfo @dinfo.
295 * It must exist.
296 */
297 BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
298 {
299 BlockBackend *blk;
300
301 QTAILQ_FOREACH(blk, &blk_backends, link) {
302 if (blk->legacy_dinfo == dinfo) {
303 return blk;
304 }
305 }
306 abort();
307 }
308
309 /*
310 * Hide @blk.
311 * @blk must not have been hidden already.
312 * Make attached BlockDriverState, if any, anonymous.
313 * Once hidden, @blk is invisible to all functions that don't receive
314 * it as argument. For example, blk_by_name() won't return it.
315 * Strictly for use by do_drive_del().
316 * TODO get rid of it!
317 */
318 void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk)
319 {
320 QTAILQ_REMOVE(&blk_backends, blk, link);
321 blk->name[0] = 0;
322 if (blk->bs) {
323 bdrv_make_anon(blk->bs);
324 }
325 }
326
327 /*
328 * Attach device model @dev to @blk.
329 * Return 0 on success, -EBUSY when a device model is attached already.
330 */
331 int blk_attach_dev(BlockBackend *blk, void *dev)
332 /* TODO change to DeviceState *dev when all users are qdevified */
333 {
334 if (blk->dev) {
335 return -EBUSY;
336 }
337 blk_ref(blk);
338 blk->dev = dev;
339 blk_iostatus_reset(blk);
340 return 0;
341 }
342
343 /*
344 * Attach device model @dev to @blk.
345 * @blk must not have a device model attached already.
346 * TODO qdevified devices don't use this, remove when devices are qdevified
347 */
348 void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
349 {
350 if (blk_attach_dev(blk, dev) < 0) {
351 abort();
352 }
353 }
354
355 /*
356 * Detach device model @dev from @blk.
357 * @dev must be currently attached to @blk.
358 */
359 void blk_detach_dev(BlockBackend *blk, void *dev)
360 /* TODO change to DeviceState *dev when all users are qdevified */
361 {
362 assert(blk->dev == dev);
363 blk->dev = NULL;
364 blk->dev_ops = NULL;
365 blk->dev_opaque = NULL;
366 blk->guest_block_size = 512;
367 blk_unref(blk);
368 }
369
370 /*
371 * Return the device model attached to @blk if any, else null.
372 */
373 void *blk_get_attached_dev(BlockBackend *blk)
374 /* TODO change to return DeviceState * when all users are qdevified */
375 {
376 return blk->dev;
377 }
378
379 /*
380 * Set @blk's device model callbacks to @ops.
381 * @opaque is the opaque argument to pass to the callbacks.
382 * This is for use by device models.
383 */
384 void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
385 void *opaque)
386 {
387 blk->dev_ops = ops;
388 blk->dev_opaque = opaque;
389 }
390
391 /*
392 * Notify @blk's attached device model of media change.
393 * If @load is true, notify of media load.
394 * Else, notify of media eject.
395 * Also send DEVICE_TRAY_MOVED events as appropriate.
396 */
397 void blk_dev_change_media_cb(BlockBackend *blk, bool load)
398 {
399 if (blk->dev_ops && blk->dev_ops->change_media_cb) {
400 bool tray_was_closed = !blk_dev_is_tray_open(blk);
401
402 blk->dev_ops->change_media_cb(blk->dev_opaque, load);
403 if (tray_was_closed) {
404 /* tray open */
405 qapi_event_send_device_tray_moved(blk_name(blk),
406 true, &error_abort);
407 }
408 if (load) {
409 /* tray close */
410 qapi_event_send_device_tray_moved(blk_name(blk),
411 false, &error_abort);
412 }
413 }
414 }
415
416 /*
417 * Does @blk's attached device model have removable media?
418 * %true if no device model is attached.
419 */
420 bool blk_dev_has_removable_media(BlockBackend *blk)
421 {
422 return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
423 }
424
425 /*
426 * Notify @blk's attached device model of a media eject request.
427 * If @force is true, the medium is about to be yanked out forcefully.
428 */
429 void blk_dev_eject_request(BlockBackend *blk, bool force)
430 {
431 if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
432 blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
433 }
434 }
435
436 /*
437 * Does @blk's attached device model have a tray, and is it open?
438 */
439 bool blk_dev_is_tray_open(BlockBackend *blk)
440 {
441 if (blk->dev_ops && blk->dev_ops->is_tray_open) {
442 return blk->dev_ops->is_tray_open(blk->dev_opaque);
443 }
444 return false;
445 }
446
447 /*
448 * Does @blk's attached device model have the medium locked?
449 * %false if the device model has no such lock.
450 */
451 bool blk_dev_is_medium_locked(BlockBackend *blk)
452 {
453 if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
454 return blk->dev_ops->is_medium_locked(blk->dev_opaque);
455 }
456 return false;
457 }
458
459 /*
460 * Notify @blk's attached device model of a backend size change.
461 */
462 void blk_dev_resize_cb(BlockBackend *blk)
463 {
464 if (blk->dev_ops && blk->dev_ops->resize_cb) {
465 blk->dev_ops->resize_cb(blk->dev_opaque);
466 }
467 }
468
469 void blk_iostatus_enable(BlockBackend *blk)
470 {
471 blk->iostatus_enabled = true;
472 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
473 }
474
475 /* The I/O status is only enabled if the drive explicitly
476 * enables it _and_ the VM is configured to stop on errors */
477 bool blk_iostatus_is_enabled(const BlockBackend *blk)
478 {
479 return (blk->iostatus_enabled &&
480 (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
481 blk->on_write_error == BLOCKDEV_ON_ERROR_STOP ||
482 blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
483 }
484
485 BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
486 {
487 return blk->iostatus;
488 }
489
490 void blk_iostatus_disable(BlockBackend *blk)
491 {
492 blk->iostatus_enabled = false;
493 }
494
495 void blk_iostatus_reset(BlockBackend *blk)
496 {
497 if (blk_iostatus_is_enabled(blk)) {
498 blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
499 if (blk->bs && blk->bs->job) {
500 block_job_iostatus_reset(blk->bs->job);
501 }
502 }
503 }
504
505 void blk_iostatus_set_err(BlockBackend *blk, int error)
506 {
507 assert(blk_iostatus_is_enabled(blk));
508 if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
509 blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
510 BLOCK_DEVICE_IO_STATUS_FAILED;
511 }
512 }
513
514 static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
515 size_t size)
516 {
517 int64_t len;
518
519 if (size > INT_MAX) {
520 return -EIO;
521 }
522
523 if (!blk_is_inserted(blk)) {
524 return -ENOMEDIUM;
525 }
526
527 len = blk_getlength(blk);
528 if (len < 0) {
529 return len;
530 }
531
532 if (offset < 0) {
533 return -EIO;
534 }
535
536 if (offset > len || len - offset < size) {
537 return -EIO;
538 }
539
540 return 0;
541 }
542
543 static int blk_check_request(BlockBackend *blk, int64_t sector_num,
544 int nb_sectors)
545 {
546 if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
547 return -EIO;
548 }
549
550 if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
551 return -EIO;
552 }
553
554 return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
555 nb_sectors * BDRV_SECTOR_SIZE);
556 }
557
558 int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
559 int nb_sectors)
560 {
561 int ret = blk_check_request(blk, sector_num, nb_sectors);
562 if (ret < 0) {
563 return ret;
564 }
565
566 return bdrv_read(blk->bs, sector_num, buf, nb_sectors);
567 }
568
569 int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
570 int nb_sectors)
571 {
572 int ret = blk_check_request(blk, sector_num, nb_sectors);
573 if (ret < 0) {
574 return ret;
575 }
576
577 return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors);
578 }
579
580 int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
581 int nb_sectors)
582 {
583 int ret = blk_check_request(blk, sector_num, nb_sectors);
584 if (ret < 0) {
585 return ret;
586 }
587
588 return bdrv_write(blk->bs, sector_num, buf, nb_sectors);
589 }
590
591 int blk_write_zeroes(BlockBackend *blk, int64_t sector_num,
592 int nb_sectors, BdrvRequestFlags flags)
593 {
594 int ret = blk_check_request(blk, sector_num, nb_sectors);
595 if (ret < 0) {
596 return ret;
597 }
598
599 return bdrv_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
600 }
601
602 static void error_callback_bh(void *opaque)
603 {
604 struct BlockBackendAIOCB *acb = opaque;
605 qemu_bh_delete(acb->bh);
606 acb->common.cb(acb->common.opaque, acb->ret);
607 qemu_aio_unref(acb);
608 }
609
610 static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb,
611 void *opaque, int ret)
612 {
613 struct BlockBackendAIOCB *acb;
614 QEMUBH *bh;
615
616 acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
617 acb->blk = blk;
618 acb->ret = ret;
619
620 bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
621 acb->bh = bh;
622 qemu_bh_schedule(bh);
623
624 return &acb->common;
625 }
626
627 BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
628 int nb_sectors, BdrvRequestFlags flags,
629 BlockCompletionFunc *cb, void *opaque)
630 {
631 int ret = blk_check_request(blk, sector_num, nb_sectors);
632 if (ret < 0) {
633 return abort_aio_request(blk, cb, opaque, ret);
634 }
635
636 return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags,
637 cb, opaque);
638 }
639
640 int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
641 {
642 int ret = blk_check_byte_request(blk, offset, count);
643 if (ret < 0) {
644 return ret;
645 }
646
647 return bdrv_pread(blk->bs, offset, buf, count);
648 }
649
650 int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
651 {
652 int ret = blk_check_byte_request(blk, offset, count);
653 if (ret < 0) {
654 return ret;
655 }
656
657 return bdrv_pwrite(blk->bs, offset, buf, count);
658 }
659
660 int64_t blk_getlength(BlockBackend *blk)
661 {
662 return bdrv_getlength(blk->bs);
663 }
664
665 void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
666 {
667 bdrv_get_geometry(blk->bs, nb_sectors_ptr);
668 }
669
670 int64_t blk_nb_sectors(BlockBackend *blk)
671 {
672 return bdrv_nb_sectors(blk->bs);
673 }
674
675 BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
676 QEMUIOVector *iov, int nb_sectors,
677 BlockCompletionFunc *cb, void *opaque)
678 {
679 int ret = blk_check_request(blk, sector_num, nb_sectors);
680 if (ret < 0) {
681 return abort_aio_request(blk, cb, opaque, ret);
682 }
683
684 return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
685 }
686
687 BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
688 QEMUIOVector *iov, int nb_sectors,
689 BlockCompletionFunc *cb, void *opaque)
690 {
691 int ret = blk_check_request(blk, sector_num, nb_sectors);
692 if (ret < 0) {
693 return abort_aio_request(blk, cb, opaque, ret);
694 }
695
696 return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
697 }
698
699 BlockAIOCB *blk_aio_flush(BlockBackend *blk,
700 BlockCompletionFunc *cb, void *opaque)
701 {
702 return bdrv_aio_flush(blk->bs, cb, opaque);
703 }
704
705 BlockAIOCB *blk_aio_discard(BlockBackend *blk,
706 int64_t sector_num, int nb_sectors,
707 BlockCompletionFunc *cb, void *opaque)
708 {
709 int ret = blk_check_request(blk, sector_num, nb_sectors);
710 if (ret < 0) {
711 return abort_aio_request(blk, cb, opaque, ret);
712 }
713
714 return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque);
715 }
716
717 void blk_aio_cancel(BlockAIOCB *acb)
718 {
719 bdrv_aio_cancel(acb);
720 }
721
722 void blk_aio_cancel_async(BlockAIOCB *acb)
723 {
724 bdrv_aio_cancel_async(acb);
725 }
726
727 int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
728 {
729 int i, ret;
730
731 for (i = 0; i < num_reqs; i++) {
732 ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
733 if (ret < 0) {
734 return ret;
735 }
736 }
737
738 return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs);
739 }
740
741 int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
742 {
743 return bdrv_ioctl(blk->bs, req, buf);
744 }
745
746 BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
747 BlockCompletionFunc *cb, void *opaque)
748 {
749 return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
750 }
751
752 int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
753 {
754 int ret = blk_check_request(blk, sector_num, nb_sectors);
755 if (ret < 0) {
756 return ret;
757 }
758
759 return bdrv_co_discard(blk->bs, sector_num, nb_sectors);
760 }
761
762 int blk_co_flush(BlockBackend *blk)
763 {
764 return bdrv_co_flush(blk->bs);
765 }
766
767 int blk_flush(BlockBackend *blk)
768 {
769 return bdrv_flush(blk->bs);
770 }
771
772 int blk_flush_all(void)
773 {
774 return bdrv_flush_all();
775 }
776
777 void blk_drain(BlockBackend *blk)
778 {
779 bdrv_drain(blk->bs);
780 }
781
782 void blk_drain_all(void)
783 {
784 bdrv_drain_all();
785 }
786
787 void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
788 BlockdevOnError on_write_error)
789 {
790 blk->on_read_error = on_read_error;
791 blk->on_write_error = on_write_error;
792 }
793
794 BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
795 {
796 return is_read ? blk->on_read_error : blk->on_write_error;
797 }
798
799 BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
800 int error)
801 {
802 BlockdevOnError on_err = blk_get_on_error(blk, is_read);
803
804 switch (on_err) {
805 case BLOCKDEV_ON_ERROR_ENOSPC:
806 return (error == ENOSPC) ?
807 BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
808 case BLOCKDEV_ON_ERROR_STOP:
809 return BLOCK_ERROR_ACTION_STOP;
810 case BLOCKDEV_ON_ERROR_REPORT:
811 return BLOCK_ERROR_ACTION_REPORT;
812 case BLOCKDEV_ON_ERROR_IGNORE:
813 return BLOCK_ERROR_ACTION_IGNORE;
814 default:
815 abort();
816 }
817 }
818
819 static void send_qmp_error_event(BlockBackend *blk,
820 BlockErrorAction action,
821 bool is_read, int error)
822 {
823 IoOperationType optype;
824
825 optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
826 qapi_event_send_block_io_error(blk_name(blk), optype, action,
827 blk_iostatus_is_enabled(blk),
828 error == ENOSPC, strerror(error),
829 &error_abort);
830 }
831
832 /* This is done by device models because, while the block layer knows
833 * about the error, it does not know whether an operation comes from
834 * the device or the block layer (from a job, for example).
835 */
836 void blk_error_action(BlockBackend *blk, BlockErrorAction action,
837 bool is_read, int error)
838 {
839 assert(error >= 0);
840
841 if (action == BLOCK_ERROR_ACTION_STOP) {
842 /* First set the iostatus, so that "info block" returns an iostatus
843 * that matches the events raised so far (an additional error iostatus
844 * is fine, but not a lost one).
845 */
846 blk_iostatus_set_err(blk, error);
847
848 /* Then raise the request to stop the VM and the event.
849 * qemu_system_vmstop_request_prepare has two effects. First,
850 * it ensures that the STOP event always comes after the
851 * BLOCK_IO_ERROR event. Second, it ensures that even if management
852 * can observe the STOP event and do a "cont" before the STOP
853 * event is issued, the VM will not stop. In this case, vm_start()
854 * also ensures that the STOP/RESUME pair of events is emitted.
855 */
856 qemu_system_vmstop_request_prepare();
857 send_qmp_error_event(blk, action, is_read, error);
858 qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
859 } else {
860 send_qmp_error_event(blk, action, is_read, error);
861 }
862 }
863
864 int blk_is_read_only(BlockBackend *blk)
865 {
866 return bdrv_is_read_only(blk->bs);
867 }
868
869 int blk_is_sg(BlockBackend *blk)
870 {
871 return bdrv_is_sg(blk->bs);
872 }
873
874 int blk_enable_write_cache(BlockBackend *blk)
875 {
876 return bdrv_enable_write_cache(blk->bs);
877 }
878
879 void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
880 {
881 bdrv_set_enable_write_cache(blk->bs, wce);
882 }
883
884 void blk_invalidate_cache(BlockBackend *blk, Error **errp)
885 {
886 bdrv_invalidate_cache(blk->bs, errp);
887 }
888
889 bool blk_is_inserted(BlockBackend *blk)
890 {
891 return blk->bs && bdrv_is_inserted(blk->bs);
892 }
893
894 bool blk_is_available(BlockBackend *blk)
895 {
896 return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
897 }
898
899 void blk_lock_medium(BlockBackend *blk, bool locked)
900 {
901 bdrv_lock_medium(blk->bs, locked);
902 }
903
904 void blk_eject(BlockBackend *blk, bool eject_flag)
905 {
906 bdrv_eject(blk->bs, eject_flag);
907 }
908
909 int blk_get_flags(BlockBackend *blk)
910 {
911 return bdrv_get_flags(blk->bs);
912 }
913
914 int blk_get_max_transfer_length(BlockBackend *blk)
915 {
916 return blk->bs->bl.max_transfer_length;
917 }
918
919 void blk_set_guest_block_size(BlockBackend *blk, int align)
920 {
921 blk->guest_block_size = align;
922 }
923
924 void *blk_blockalign(BlockBackend *blk, size_t size)
925 {
926 return qemu_blockalign(blk ? blk->bs : NULL, size);
927 }
928
929 bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
930 {
931 return bdrv_op_is_blocked(blk->bs, op, errp);
932 }
933
934 void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
935 {
936 bdrv_op_unblock(blk->bs, op, reason);
937 }
938
939 void blk_op_block_all(BlockBackend *blk, Error *reason)
940 {
941 bdrv_op_block_all(blk->bs, reason);
942 }
943
944 void blk_op_unblock_all(BlockBackend *blk, Error *reason)
945 {
946 bdrv_op_unblock_all(blk->bs, reason);
947 }
948
949 AioContext *blk_get_aio_context(BlockBackend *blk)
950 {
951 if (blk->bs) {
952 return bdrv_get_aio_context(blk->bs);
953 } else {
954 return qemu_get_aio_context();
955 }
956 }
957
958 static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
959 {
960 BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
961 return blk_get_aio_context(blk_acb->blk);
962 }
963
964 void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
965 {
966 bdrv_set_aio_context(blk->bs, new_context);
967 }
968
969 void blk_add_aio_context_notifier(BlockBackend *blk,
970 void (*attached_aio_context)(AioContext *new_context, void *opaque),
971 void (*detach_aio_context)(void *opaque), void *opaque)
972 {
973 bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
974 detach_aio_context, opaque);
975 }
976
977 void blk_remove_aio_context_notifier(BlockBackend *blk,
978 void (*attached_aio_context)(AioContext *,
979 void *),
980 void (*detach_aio_context)(void *),
981 void *opaque)
982 {
983 bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
984 detach_aio_context, opaque);
985 }
986
987 void blk_add_close_notifier(BlockBackend *blk, Notifier *notify)
988 {
989 bdrv_add_close_notifier(blk->bs, notify);
990 }
991
992 void blk_io_plug(BlockBackend *blk)
993 {
994 bdrv_io_plug(blk->bs);
995 }
996
997 void blk_io_unplug(BlockBackend *blk)
998 {
999 bdrv_io_unplug(blk->bs);
1000 }
1001
1002 BlockAcctStats *blk_get_stats(BlockBackend *blk)
1003 {
1004 return &blk->stats;
1005 }
1006
1007 void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1008 BlockCompletionFunc *cb, void *opaque)
1009 {
1010 return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1011 }
1012
1013 int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
1014 int nb_sectors, BdrvRequestFlags flags)
1015 {
1016 int ret = blk_check_request(blk, sector_num, nb_sectors);
1017 if (ret < 0) {
1018 return ret;
1019 }
1020
1021 return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
1022 }
1023
1024 int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
1025 const uint8_t *buf, int nb_sectors)
1026 {
1027 int ret = blk_check_request(blk, sector_num, nb_sectors);
1028 if (ret < 0) {
1029 return ret;
1030 }
1031
1032 return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors);
1033 }
1034
1035 int blk_truncate(BlockBackend *blk, int64_t offset)
1036 {
1037 return bdrv_truncate(blk->bs, offset);
1038 }
1039
1040 int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
1041 {
1042 int ret = blk_check_request(blk, sector_num, nb_sectors);
1043 if (ret < 0) {
1044 return ret;
1045 }
1046
1047 return bdrv_discard(blk->bs, sector_num, nb_sectors);
1048 }
1049
1050 int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
1051 int64_t pos, int size)
1052 {
1053 return bdrv_save_vmstate(blk->bs, buf, pos, size);
1054 }
1055
1056 int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
1057 {
1058 return bdrv_load_vmstate(blk->bs, buf, pos, size);
1059 }
1060
1061 int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
1062 {
1063 return bdrv_probe_blocksizes(blk->bs, bsz);
1064 }
1065
1066 int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
1067 {
1068 return bdrv_probe_geometry(blk->bs, geo);
1069 }