1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Xenbus code for blkif backend
3 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
4 Copyright (C) 2005 XenSource Ltd
9 #define pr_fmt(fmt) "xen-blkback: " fmt
11 #include <linux/module.h>
12 #include <linux/kthread.h>
13 #include <linux/pagemap.h>
14 #include <xen/events.h>
15 #include <xen/grant_table.h>
18 /* On the XenBus the max length of 'ring-ref%u'. */
19 #define RINGREF_NAME_LEN (20)
22 struct xenbus_device
*dev
;
23 struct xen_blkif
*blkif
;
24 struct xenbus_watch backend_watch
;
30 static struct kmem_cache
*xen_blkif_cachep
;
31 static void connect(struct backend_info
*);
32 static int connect_ring(struct backend_info
*);
33 static void backend_changed(struct xenbus_watch
*, const char *,
35 static void xen_blkif_free(struct xen_blkif
*blkif
);
36 static void xen_vbd_free(struct xen_vbd
*vbd
);
38 struct xenbus_device
*xen_blkbk_xenbus(struct backend_info
*be
)
44 * The last request could free the device from softirq context and
45 * xen_blkif_free() can sleep.
47 static void xen_blkif_deferred_free(struct work_struct
*work
)
49 struct xen_blkif
*blkif
;
51 blkif
= container_of(work
, struct xen_blkif
, free_work
);
52 xen_blkif_free(blkif
);
55 static int blkback_name(struct xen_blkif
*blkif
, char *buf
)
57 char *devpath
, *devname
;
58 struct xenbus_device
*dev
= blkif
->be
->dev
;
60 devpath
= xenbus_read(XBT_NIL
, dev
->nodename
, "dev", NULL
);
62 return PTR_ERR(devpath
);
64 devname
= strstr(devpath
, "/dev/");
66 devname
+= strlen("/dev/");
70 snprintf(buf
, TASK_COMM_LEN
, "%d.%s", blkif
->domid
, devname
);
76 static void xen_update_blkif_status(struct xen_blkif
*blkif
)
79 char name
[TASK_COMM_LEN
];
80 struct xen_blkif_ring
*ring
;
83 /* Not ready to connect? */
84 if (!blkif
->rings
|| !blkif
->rings
[0].irq
|| !blkif
->vbd
.bdev_handle
)
87 /* Already connected? */
88 if (blkif
->be
->dev
->state
== XenbusStateConnected
)
91 /* Attempt to connect: exit if we fail to. */
93 if (blkif
->be
->dev
->state
!= XenbusStateConnected
)
96 err
= blkback_name(blkif
, name
);
98 xenbus_dev_error(blkif
->be
->dev
, err
, "get blkback dev name");
102 err
= sync_blockdev(blkif
->vbd
.bdev_handle
->bdev
);
104 xenbus_dev_error(blkif
->be
->dev
, err
, "block flush");
107 invalidate_inode_pages2(
108 blkif
->vbd
.bdev_handle
->bdev
->bd_inode
->i_mapping
);
110 for (i
= 0; i
< blkif
->nr_rings
; i
++) {
111 ring
= &blkif
->rings
[i
];
112 ring
->xenblkd
= kthread_run(xen_blkif_schedule
, ring
, "%s-%d", name
, i
);
113 if (IS_ERR(ring
->xenblkd
)) {
114 err
= PTR_ERR(ring
->xenblkd
);
115 ring
->xenblkd
= NULL
;
116 xenbus_dev_fatal(blkif
->be
->dev
, err
,
117 "start %s-%d xenblkd", name
, i
);
125 ring
= &blkif
->rings
[i
];
126 kthread_stop(ring
->xenblkd
);
131 static int xen_blkif_alloc_rings(struct xen_blkif
*blkif
)
135 blkif
->rings
= kcalloc(blkif
->nr_rings
, sizeof(struct xen_blkif_ring
),
140 for (r
= 0; r
< blkif
->nr_rings
; r
++) {
141 struct xen_blkif_ring
*ring
= &blkif
->rings
[r
];
143 spin_lock_init(&ring
->blk_ring_lock
);
144 init_waitqueue_head(&ring
->wq
);
145 INIT_LIST_HEAD(&ring
->pending_free
);
146 INIT_LIST_HEAD(&ring
->persistent_purge_list
);
147 INIT_WORK(&ring
->persistent_purge_work
, xen_blkbk_unmap_purged_grants
);
148 gnttab_page_cache_init(&ring
->free_pages
);
150 spin_lock_init(&ring
->pending_free_lock
);
151 init_waitqueue_head(&ring
->pending_free_wq
);
152 init_waitqueue_head(&ring
->shutdown_wq
);
154 ring
->st_print
= jiffies
;
161 /* Enable the persistent grants feature. */
162 static bool feature_persistent
= true;
163 module_param(feature_persistent
, bool, 0644);
164 MODULE_PARM_DESC(feature_persistent
, "Enables the persistent grants feature");
166 static struct xen_blkif
*xen_blkif_alloc(domid_t domid
)
168 struct xen_blkif
*blkif
;
170 BUILD_BUG_ON(MAX_INDIRECT_PAGES
> BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST
);
172 blkif
= kmem_cache_zalloc(xen_blkif_cachep
, GFP_KERNEL
);
174 return ERR_PTR(-ENOMEM
);
176 blkif
->domid
= domid
;
177 atomic_set(&blkif
->refcnt
, 1);
178 init_completion(&blkif
->drain_complete
);
181 * Because freeing back to the cache may be deferred, it is not
182 * safe to unload the module (and hence destroy the cache) until
183 * this has completed. To prevent premature unloading, take an
184 * extra module reference here and release only when the object
185 * has been freed back to the cache.
187 __module_get(THIS_MODULE
);
188 INIT_WORK(&blkif
->free_work
, xen_blkif_deferred_free
);
193 static int xen_blkif_map(struct xen_blkif_ring
*ring
, grant_ref_t
*gref
,
194 unsigned int nr_grefs
, unsigned int evtchn
)
197 struct xen_blkif
*blkif
= ring
->blkif
;
198 const struct blkif_common_sring
*sring_common
;
199 RING_IDX rsp_prod
, req_prod
;
202 /* Already connected through? */
206 err
= xenbus_map_ring_valloc(blkif
->be
->dev
, gref
, nr_grefs
,
211 sring_common
= (struct blkif_common_sring
*)ring
->blk_ring
;
212 rsp_prod
= READ_ONCE(sring_common
->rsp_prod
);
213 req_prod
= READ_ONCE(sring_common
->req_prod
);
215 switch (blkif
->blk_protocol
) {
216 case BLKIF_PROTOCOL_NATIVE
:
218 struct blkif_sring
*sring_native
=
219 (struct blkif_sring
*)ring
->blk_ring
;
221 BACK_RING_ATTACH(&ring
->blk_rings
.native
, sring_native
,
222 rsp_prod
, XEN_PAGE_SIZE
* nr_grefs
);
223 size
= __RING_SIZE(sring_native
, XEN_PAGE_SIZE
* nr_grefs
);
226 case BLKIF_PROTOCOL_X86_32
:
228 struct blkif_x86_32_sring
*sring_x86_32
=
229 (struct blkif_x86_32_sring
*)ring
->blk_ring
;
231 BACK_RING_ATTACH(&ring
->blk_rings
.x86_32
, sring_x86_32
,
232 rsp_prod
, XEN_PAGE_SIZE
* nr_grefs
);
233 size
= __RING_SIZE(sring_x86_32
, XEN_PAGE_SIZE
* nr_grefs
);
236 case BLKIF_PROTOCOL_X86_64
:
238 struct blkif_x86_64_sring
*sring_x86_64
=
239 (struct blkif_x86_64_sring
*)ring
->blk_ring
;
241 BACK_RING_ATTACH(&ring
->blk_rings
.x86_64
, sring_x86_64
,
242 rsp_prod
, XEN_PAGE_SIZE
* nr_grefs
);
243 size
= __RING_SIZE(sring_x86_64
, XEN_PAGE_SIZE
* nr_grefs
);
251 if (req_prod
- rsp_prod
> size
)
254 err
= bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif
->be
->dev
,
255 evtchn
, xen_blkif_be_int
, 0, "blkif-backend", ring
);
263 xenbus_unmap_ring_vfree(blkif
->be
->dev
, ring
->blk_ring
);
264 ring
->blk_rings
.common
.sring
= NULL
;
268 static int xen_blkif_disconnect(struct xen_blkif
*blkif
)
270 struct pending_req
*req
, *n
;
274 for (r
= 0; r
< blkif
->nr_rings
; r
++) {
275 struct xen_blkif_ring
*ring
= &blkif
->rings
[r
];
282 kthread_stop(ring
->xenblkd
);
283 ring
->xenblkd
= NULL
;
284 wake_up(&ring
->shutdown_wq
);
287 /* The above kthread_stop() guarantees that at this point we
288 * don't have any discard_io or other_io requests. So, checking
289 * for inflight IO is enough.
291 if (atomic_read(&ring
->inflight
) > 0) {
297 unbind_from_irqhandler(ring
->irq
, ring
);
301 if (ring
->blk_rings
.common
.sring
) {
302 xenbus_unmap_ring_vfree(blkif
->be
->dev
, ring
->blk_ring
);
303 ring
->blk_rings
.common
.sring
= NULL
;
306 /* Remove all persistent grants and the cache of ballooned pages. */
307 xen_blkbk_free_caches(ring
);
309 /* Check that there is no request in use */
310 list_for_each_entry_safe(req
, n
, &ring
->pending_free
, free_list
) {
311 list_del(&req
->free_list
);
313 for (j
= 0; j
< MAX_INDIRECT_SEGMENTS
; j
++)
314 kfree(req
->segments
[j
]);
316 for (j
= 0; j
< MAX_INDIRECT_PAGES
; j
++)
317 kfree(req
->indirect_pages
[j
]);
323 BUG_ON(atomic_read(&ring
->persistent_gnt_in_use
) != 0);
324 BUG_ON(!list_empty(&ring
->persistent_purge_list
));
325 BUG_ON(!RB_EMPTY_ROOT(&ring
->persistent_gnts
));
326 BUG_ON(ring
->free_pages
.num_pages
!= 0);
327 BUG_ON(ring
->persistent_gnt_c
!= 0);
328 WARN_ON(i
!= (XEN_BLKIF_REQS_PER_PAGE
* blkif
->nr_ring_pages
));
329 ring
->active
= false;
334 blkif
->nr_ring_pages
= 0;
336 * blkif->rings was allocated in connect_ring, so we should free it in
346 static void xen_blkif_free(struct xen_blkif
*blkif
)
348 WARN_ON(xen_blkif_disconnect(blkif
));
349 xen_vbd_free(&blkif
->vbd
);
350 kfree(blkif
->be
->mode
);
353 /* Make sure everything is drained before shutting down */
354 kmem_cache_free(xen_blkif_cachep
, blkif
);
355 module_put(THIS_MODULE
);
358 int __init
xen_blkif_interface_init(void)
360 xen_blkif_cachep
= kmem_cache_create("blkif_cache",
361 sizeof(struct xen_blkif
),
363 if (!xen_blkif_cachep
)
369 void xen_blkif_interface_fini(void)
371 kmem_cache_destroy(xen_blkif_cachep
);
372 xen_blkif_cachep
= NULL
;
376 * sysfs interface for VBD I/O requests
379 #define VBD_SHOW_ALLRING(name, format) \
380 static ssize_t show_##name(struct device *_dev, \
381 struct device_attribute *attr, \
384 struct xenbus_device *dev = to_xenbus_device(_dev); \
385 struct backend_info *be = dev_get_drvdata(&dev->dev); \
386 struct xen_blkif *blkif = be->blkif; \
388 unsigned long long result = 0; \
393 for (i = 0; i < blkif->nr_rings; i++) { \
394 struct xen_blkif_ring *ring = &blkif->rings[i]; \
396 result += ring->st_##name; \
400 return sprintf(buf, format, result); \
402 static DEVICE_ATTR(name, 0444, show_##name, NULL)
404 VBD_SHOW_ALLRING(oo_req
, "%llu\n");
405 VBD_SHOW_ALLRING(rd_req
, "%llu\n");
406 VBD_SHOW_ALLRING(wr_req
, "%llu\n");
407 VBD_SHOW_ALLRING(f_req
, "%llu\n");
408 VBD_SHOW_ALLRING(ds_req
, "%llu\n");
409 VBD_SHOW_ALLRING(rd_sect
, "%llu\n");
410 VBD_SHOW_ALLRING(wr_sect
, "%llu\n");
412 static struct attribute
*xen_vbdstat_attrs
[] = {
413 &dev_attr_oo_req
.attr
,
414 &dev_attr_rd_req
.attr
,
415 &dev_attr_wr_req
.attr
,
416 &dev_attr_f_req
.attr
,
417 &dev_attr_ds_req
.attr
,
418 &dev_attr_rd_sect
.attr
,
419 &dev_attr_wr_sect
.attr
,
423 static const struct attribute_group xen_vbdstat_group
= {
424 .name
= "statistics",
425 .attrs
= xen_vbdstat_attrs
,
428 #define VBD_SHOW(name, format, args...) \
429 static ssize_t show_##name(struct device *_dev, \
430 struct device_attribute *attr, \
433 struct xenbus_device *dev = to_xenbus_device(_dev); \
434 struct backend_info *be = dev_get_drvdata(&dev->dev); \
436 return sprintf(buf, format, ##args); \
438 static DEVICE_ATTR(name, 0444, show_##name, NULL)
440 VBD_SHOW(physical_device
, "%x:%x\n", be
->major
, be
->minor
);
441 VBD_SHOW(mode
, "%s\n", be
->mode
);
443 static int xenvbd_sysfs_addif(struct xenbus_device
*dev
)
447 error
= device_create_file(&dev
->dev
, &dev_attr_physical_device
);
451 error
= device_create_file(&dev
->dev
, &dev_attr_mode
);
455 error
= sysfs_create_group(&dev
->dev
.kobj
, &xen_vbdstat_group
);
461 fail3
: sysfs_remove_group(&dev
->dev
.kobj
, &xen_vbdstat_group
);
462 fail2
: device_remove_file(&dev
->dev
, &dev_attr_mode
);
463 fail1
: device_remove_file(&dev
->dev
, &dev_attr_physical_device
);
467 static void xenvbd_sysfs_delif(struct xenbus_device
*dev
)
469 sysfs_remove_group(&dev
->dev
.kobj
, &xen_vbdstat_group
);
470 device_remove_file(&dev
->dev
, &dev_attr_mode
);
471 device_remove_file(&dev
->dev
, &dev_attr_physical_device
);
474 static void xen_vbd_free(struct xen_vbd
*vbd
)
476 if (vbd
->bdev_handle
)
477 bdev_release(vbd
->bdev_handle
);
478 vbd
->bdev_handle
= NULL
;
481 static int xen_vbd_create(struct xen_blkif
*blkif
, blkif_vdev_t handle
,
482 unsigned major
, unsigned minor
, int readonly
,
486 struct bdev_handle
*bdev_handle
;
489 vbd
->handle
= handle
;
490 vbd
->readonly
= readonly
;
493 vbd
->pdevice
= MKDEV(major
, minor
);
495 bdev_handle
= bdev_open_by_dev(vbd
->pdevice
, vbd
->readonly
?
496 BLK_OPEN_READ
: BLK_OPEN_WRITE
, NULL
, NULL
);
498 if (IS_ERR(bdev_handle
)) {
499 pr_warn("xen_vbd_create: device %08x could not be opened\n",
504 vbd
->bdev_handle
= bdev_handle
;
505 if (vbd
->bdev_handle
->bdev
->bd_disk
== NULL
) {
506 pr_warn("xen_vbd_create: device %08x doesn't exist\n",
511 vbd
->size
= vbd_sz(vbd
);
513 if (cdrom
|| disk_to_cdi(vbd
->bdev_handle
->bdev
->bd_disk
))
514 vbd
->type
|= VDISK_CDROM
;
515 if (vbd
->bdev_handle
->bdev
->bd_disk
->flags
& GENHD_FL_REMOVABLE
)
516 vbd
->type
|= VDISK_REMOVABLE
;
518 if (bdev_write_cache(bdev_handle
->bdev
))
519 vbd
->flush_support
= true;
520 if (bdev_max_secure_erase_sectors(bdev_handle
->bdev
))
521 vbd
->discard_secure
= true;
523 pr_debug("Successful creation of handle=%04x (dom=%u)\n",
524 handle
, blkif
->domid
);
528 static void xen_blkbk_remove(struct xenbus_device
*dev
)
530 struct backend_info
*be
= dev_get_drvdata(&dev
->dev
);
532 pr_debug("%s %p %d\n", __func__
, dev
, dev
->otherend_id
);
534 if (be
->major
|| be
->minor
)
535 xenvbd_sysfs_delif(dev
);
537 if (be
->backend_watch
.node
) {
538 unregister_xenbus_watch(&be
->backend_watch
);
539 kfree(be
->backend_watch
.node
);
540 be
->backend_watch
.node
= NULL
;
543 dev_set_drvdata(&dev
->dev
, NULL
);
546 xen_blkif_disconnect(be
->blkif
);
548 /* Put the reference we set in xen_blkif_alloc(). */
549 xen_blkif_put(be
->blkif
);
553 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt
,
554 struct backend_info
*be
, int state
)
556 struct xenbus_device
*dev
= be
->dev
;
559 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-flush-cache",
562 dev_warn(&dev
->dev
, "writing feature-flush-cache (%d)", err
);
567 static void xen_blkbk_discard(struct xenbus_transaction xbt
, struct backend_info
*be
)
569 struct xenbus_device
*dev
= be
->dev
;
570 struct xen_blkif
*blkif
= be
->blkif
;
573 struct block_device
*bdev
= be
->blkif
->vbd
.bdev_handle
->bdev
;
575 if (!xenbus_read_unsigned(dev
->nodename
, "discard-enable", 1))
578 if (bdev_max_discard_sectors(bdev
)) {
579 err
= xenbus_printf(xbt
, dev
->nodename
,
580 "discard-granularity", "%u",
581 bdev_discard_granularity(bdev
));
583 dev_warn(&dev
->dev
, "writing discard-granularity (%d)", err
);
586 err
= xenbus_printf(xbt
, dev
->nodename
,
587 "discard-alignment", "%u",
588 bdev_discard_alignment(bdev
));
590 dev_warn(&dev
->dev
, "writing discard-alignment (%d)", err
);
595 err
= xenbus_printf(xbt
, dev
->nodename
,
596 "discard-secure", "%d",
597 blkif
->vbd
.discard_secure
);
599 dev_warn(&dev
->dev
, "writing discard-secure (%d)", err
);
603 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-discard",
606 dev_warn(&dev
->dev
, "writing feature-discard (%d)", err
);
609 int xen_blkbk_barrier(struct xenbus_transaction xbt
,
610 struct backend_info
*be
, int state
)
612 struct xenbus_device
*dev
= be
->dev
;
615 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-barrier",
618 dev_warn(&dev
->dev
, "writing feature-barrier (%d)", err
);
624 * Entry point to this code when a new device is created. Allocate the basic
625 * structures, and watch the store waiting for the hotplug scripts to tell us
626 * the device's physical major and minor numbers. Switch to InitWait.
628 static int xen_blkbk_probe(struct xenbus_device
*dev
,
629 const struct xenbus_device_id
*id
)
632 struct backend_info
*be
= kzalloc(sizeof(struct backend_info
),
635 /* match the pr_debug in xen_blkbk_remove */
636 pr_debug("%s %p %d\n", __func__
, dev
, dev
->otherend_id
);
639 xenbus_dev_fatal(dev
, -ENOMEM
,
640 "allocating backend structure");
644 dev_set_drvdata(&dev
->dev
, be
);
646 be
->blkif
= xen_blkif_alloc(dev
->otherend_id
);
647 if (IS_ERR(be
->blkif
)) {
648 err
= PTR_ERR(be
->blkif
);
650 xenbus_dev_fatal(dev
, err
, "creating block interface");
654 err
= xenbus_printf(XBT_NIL
, dev
->nodename
,
655 "feature-max-indirect-segments", "%u",
656 MAX_INDIRECT_SEGMENTS
);
659 "writing %s/feature-max-indirect-segments (%d)",
662 /* Multi-queue: advertise how many queues are supported by us.*/
663 err
= xenbus_printf(XBT_NIL
, dev
->nodename
,
664 "multi-queue-max-queues", "%u", xenblk_max_queues
);
666 pr_warn("Error writing multi-queue-max-queues\n");
668 /* setup back pointer */
671 err
= xenbus_watch_pathfmt(dev
, &be
->backend_watch
, NULL
,
673 "%s/%s", dev
->nodename
, "physical-device");
677 err
= xenbus_printf(XBT_NIL
, dev
->nodename
, "max-ring-page-order", "%u",
678 xen_blkif_max_ring_order
);
680 pr_warn("%s write out 'max-ring-page-order' failed\n", __func__
);
682 err
= xenbus_switch_state(dev
, XenbusStateInitWait
);
689 pr_warn("%s failed\n", __func__
);
690 xen_blkbk_remove(dev
);
695 * Callback received when the hotplug scripts have placed the physical-device
696 * node. Read it and the mode node, and create a vbd. If the frontend is
699 static void backend_changed(struct xenbus_watch
*watch
,
700 const char *path
, const char *token
)
705 struct backend_info
*be
706 = container_of(watch
, struct backend_info
, backend_watch
);
707 struct xenbus_device
*dev
= be
->dev
;
709 unsigned long handle
;
712 pr_debug("%s %p %d\n", __func__
, dev
, dev
->otherend_id
);
714 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
, "physical-device", "%x:%x",
716 if (XENBUS_EXIST_ERR(err
)) {
718 * Since this watch will fire once immediately after it is
719 * registered, we expect this. Ignore it, and wait for the
725 xenbus_dev_fatal(dev
, err
, "reading physical-device");
729 if (be
->major
| be
->minor
) {
730 if (be
->major
!= major
|| be
->minor
!= minor
)
731 pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
732 be
->major
, be
->minor
, major
, minor
);
736 be
->mode
= xenbus_read(XBT_NIL
, dev
->nodename
, "mode", NULL
);
737 if (IS_ERR(be
->mode
)) {
738 err
= PTR_ERR(be
->mode
);
740 xenbus_dev_fatal(dev
, err
, "reading mode");
744 device_type
= xenbus_read(XBT_NIL
, dev
->otherend
, "device-type", NULL
);
745 if (!IS_ERR(device_type
)) {
746 cdrom
= strcmp(device_type
, "cdrom") == 0;
750 /* Front end dir is a number, which is used as the handle. */
751 err
= kstrtoul(strrchr(dev
->otherend
, '/') + 1, 0, &handle
);
761 err
= xen_vbd_create(be
->blkif
, handle
, major
, minor
,
762 !strchr(be
->mode
, 'w'), cdrom
);
765 xenbus_dev_fatal(dev
, err
, "creating vbd structure");
767 err
= xenvbd_sysfs_addif(dev
);
769 xen_vbd_free(&be
->blkif
->vbd
);
770 xenbus_dev_fatal(dev
, err
, "creating sysfs entries");
780 /* We're potentially connected now */
781 xen_update_blkif_status(be
->blkif
);
786 * Callback received when the frontend's state changes.
788 static void frontend_changed(struct xenbus_device
*dev
,
789 enum xenbus_state frontend_state
)
791 struct backend_info
*be
= dev_get_drvdata(&dev
->dev
);
794 pr_debug("%s %p %s\n", __func__
, dev
, xenbus_strstate(frontend_state
));
796 switch (frontend_state
) {
797 case XenbusStateInitialising
:
798 if (dev
->state
== XenbusStateClosed
) {
799 pr_info("%s: prepare for reconnect\n", dev
->nodename
);
800 xenbus_switch_state(dev
, XenbusStateInitWait
);
804 case XenbusStateInitialised
:
805 case XenbusStateConnected
:
807 * Ensure we connect even when two watches fire in
808 * close succession and we miss the intermediate value
811 if (dev
->state
== XenbusStateConnected
)
815 * Enforce precondition before potential leak point.
816 * xen_blkif_disconnect() is idempotent.
818 err
= xen_blkif_disconnect(be
->blkif
);
820 xenbus_dev_fatal(dev
, err
, "pending I/O");
824 err
= connect_ring(be
);
827 * Clean up so that memory resources can be used by
828 * other devices. connect_ring reported already error.
830 xen_blkif_disconnect(be
->blkif
);
833 xen_update_blkif_status(be
->blkif
);
836 case XenbusStateClosing
:
837 xenbus_switch_state(dev
, XenbusStateClosing
);
840 case XenbusStateClosed
:
841 xen_blkif_disconnect(be
->blkif
);
842 xenbus_switch_state(dev
, XenbusStateClosed
);
843 if (xenbus_dev_is_online(dev
))
847 case XenbusStateUnknown
:
848 /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
849 device_unregister(&dev
->dev
);
853 xenbus_dev_fatal(dev
, -EINVAL
, "saw state %d at frontend",
859 /* Once a memory pressure is detected, squeeze free page pools for a while. */
860 static unsigned int buffer_squeeze_duration_ms
= 10;
861 module_param_named(buffer_squeeze_duration_ms
,
862 buffer_squeeze_duration_ms
, int, 0644);
863 MODULE_PARM_DESC(buffer_squeeze_duration_ms
,
864 "Duration in ms to squeeze pages buffer when a memory pressure is detected");
867 * Callback received when the memory pressure is detected.
869 static void reclaim_memory(struct xenbus_device
*dev
)
871 struct backend_info
*be
= dev_get_drvdata(&dev
->dev
);
875 be
->blkif
->buffer_squeeze_end
= jiffies
+
876 msecs_to_jiffies(buffer_squeeze_duration_ms
);
879 /* ** Connection ** */
882 * Write the physical details regarding the block device to the store, and
883 * switch to Connected state.
885 static void connect(struct backend_info
*be
)
887 struct xenbus_transaction xbt
;
889 struct xenbus_device
*dev
= be
->dev
;
891 pr_debug("%s %s\n", __func__
, dev
->otherend
);
893 /* Supply the information about the device the frontend needs */
895 err
= xenbus_transaction_start(&xbt
);
897 xenbus_dev_fatal(dev
, err
, "starting transaction");
901 /* If we can't advertise it is OK. */
902 xen_blkbk_flush_diskcache(xbt
, be
, be
->blkif
->vbd
.flush_support
);
904 xen_blkbk_discard(xbt
, be
);
906 xen_blkbk_barrier(xbt
, be
, be
->blkif
->vbd
.flush_support
);
908 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-persistent", "%u",
909 be
->blkif
->vbd
.feature_gnt_persistent_parm
);
911 xenbus_dev_fatal(dev
, err
, "writing %s/feature-persistent",
916 err
= xenbus_printf(xbt
, dev
->nodename
, "sectors", "%llu",
917 (unsigned long long)vbd_sz(&be
->blkif
->vbd
));
919 xenbus_dev_fatal(dev
, err
, "writing %s/sectors",
924 /* FIXME: use a typename instead */
925 err
= xenbus_printf(xbt
, dev
->nodename
, "info", "%u",
926 be
->blkif
->vbd
.type
|
927 (be
->blkif
->vbd
.readonly
? VDISK_READONLY
: 0));
929 xenbus_dev_fatal(dev
, err
, "writing %s/info",
933 err
= xenbus_printf(xbt
, dev
->nodename
, "sector-size", "%lu",
934 (unsigned long)bdev_logical_block_size(
935 be
->blkif
->vbd
.bdev_handle
->bdev
));
937 xenbus_dev_fatal(dev
, err
, "writing %s/sector-size",
941 err
= xenbus_printf(xbt
, dev
->nodename
, "physical-sector-size", "%u",
942 bdev_physical_block_size(
943 be
->blkif
->vbd
.bdev_handle
->bdev
));
945 xenbus_dev_error(dev
, err
, "writing %s/physical-sector-size",
948 err
= xenbus_transaction_end(xbt
, 0);
952 xenbus_dev_fatal(dev
, err
, "ending transaction");
954 err
= xenbus_switch_state(dev
, XenbusStateConnected
);
956 xenbus_dev_fatal(dev
, err
, "%s: switching to Connected state",
961 xenbus_transaction_end(xbt
, 1);
965 * Each ring may have multi pages, depends on "ring-page-order".
967 static int read_per_ring_refs(struct xen_blkif_ring
*ring
, const char *dir
)
969 unsigned int ring_ref
[XENBUS_MAX_RING_GRANTS
];
970 struct pending_req
*req
, *n
;
972 struct xen_blkif
*blkif
= ring
->blkif
;
973 struct xenbus_device
*dev
= blkif
->be
->dev
;
974 unsigned int nr_grefs
, evtchn
;
976 err
= xenbus_scanf(XBT_NIL
, dir
, "event-channel", "%u",
980 xenbus_dev_fatal(dev
, err
, "reading %s/event-channel", dir
);
984 nr_grefs
= blkif
->nr_ring_pages
;
986 if (unlikely(!nr_grefs
)) {
991 for (i
= 0; i
< nr_grefs
; i
++) {
992 char ring_ref_name
[RINGREF_NAME_LEN
];
994 if (blkif
->multi_ref
)
995 snprintf(ring_ref_name
, RINGREF_NAME_LEN
, "ring-ref%u", i
);
998 snprintf(ring_ref_name
, RINGREF_NAME_LEN
, "ring-ref");
1001 err
= xenbus_scanf(XBT_NIL
, dir
, ring_ref_name
,
1002 "%u", &ring_ref
[i
]);
1006 xenbus_dev_fatal(dev
, err
, "reading %s/%s",
1007 dir
, ring_ref_name
);
1013 for (i
= 0; i
< nr_grefs
* XEN_BLKIF_REQS_PER_PAGE
; i
++) {
1014 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
1017 list_add_tail(&req
->free_list
, &ring
->pending_free
);
1018 for (j
= 0; j
< MAX_INDIRECT_SEGMENTS
; j
++) {
1019 req
->segments
[j
] = kzalloc(sizeof(*req
->segments
[0]), GFP_KERNEL
);
1020 if (!req
->segments
[j
])
1023 for (j
= 0; j
< MAX_INDIRECT_PAGES
; j
++) {
1024 req
->indirect_pages
[j
] = kzalloc(sizeof(*req
->indirect_pages
[0]),
1026 if (!req
->indirect_pages
[j
])
1031 /* Map the shared frame, irq etc. */
1032 err
= xen_blkif_map(ring
, ring_ref
, nr_grefs
, evtchn
);
1034 xenbus_dev_fatal(dev
, err
, "mapping ring-ref port %u", evtchn
);
1041 list_for_each_entry_safe(req
, n
, &ring
->pending_free
, free_list
) {
1042 list_del(&req
->free_list
);
1043 for (j
= 0; j
< MAX_INDIRECT_SEGMENTS
; j
++) {
1044 if (!req
->segments
[j
])
1046 kfree(req
->segments
[j
]);
1048 for (j
= 0; j
< MAX_INDIRECT_PAGES
; j
++) {
1049 if (!req
->indirect_pages
[j
])
1051 kfree(req
->indirect_pages
[j
]);
1058 static int connect_ring(struct backend_info
*be
)
1060 struct xenbus_device
*dev
= be
->dev
;
1061 struct xen_blkif
*blkif
= be
->blkif
;
1062 char protocol
[64] = "";
1066 const size_t xenstore_path_ext_size
= 11; /* sufficient for "/queue-NNN" */
1067 unsigned int requested_num_queues
= 0;
1068 unsigned int ring_page_order
;
1070 pr_debug("%s %s\n", __func__
, dev
->otherend
);
1072 blkif
->blk_protocol
= BLKIF_PROTOCOL_DEFAULT
;
1073 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
, "protocol",
1076 strcpy(protocol
, "unspecified, assuming default");
1077 else if (0 == strcmp(protocol
, XEN_IO_PROTO_ABI_NATIVE
))
1078 blkif
->blk_protocol
= BLKIF_PROTOCOL_NATIVE
;
1079 else if (0 == strcmp(protocol
, XEN_IO_PROTO_ABI_X86_32
))
1080 blkif
->blk_protocol
= BLKIF_PROTOCOL_X86_32
;
1081 else if (0 == strcmp(protocol
, XEN_IO_PROTO_ABI_X86_64
))
1082 blkif
->blk_protocol
= BLKIF_PROTOCOL_X86_64
;
1084 xenbus_dev_fatal(dev
, err
, "unknown fe protocol %s", protocol
);
1088 blkif
->vbd
.feature_gnt_persistent_parm
= feature_persistent
;
1089 blkif
->vbd
.feature_gnt_persistent
=
1090 blkif
->vbd
.feature_gnt_persistent_parm
&&
1091 xenbus_read_unsigned(dev
->otherend
, "feature-persistent", 0);
1093 blkif
->vbd
.overflow_max_grants
= 0;
1096 * Read the number of hardware queues from frontend.
1098 requested_num_queues
= xenbus_read_unsigned(dev
->otherend
,
1099 "multi-queue-num-queues",
1101 if (requested_num_queues
> xenblk_max_queues
1102 || requested_num_queues
== 0) {
1103 /* Buggy or malicious guest. */
1104 xenbus_dev_fatal(dev
, err
,
1105 "guest requested %u queues, exceeding the maximum of %u.",
1106 requested_num_queues
, xenblk_max_queues
);
1109 blkif
->nr_rings
= requested_num_queues
;
1110 if (xen_blkif_alloc_rings(blkif
))
1113 pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev
->nodename
,
1114 blkif
->nr_rings
, blkif
->blk_protocol
, protocol
,
1115 blkif
->vbd
.feature_gnt_persistent
? "persistent grants" : "");
1117 err
= xenbus_scanf(XBT_NIL
, dev
->otherend
, "ring-page-order", "%u",
1120 blkif
->nr_ring_pages
= 1;
1121 blkif
->multi_ref
= false;
1122 } else if (ring_page_order
<= xen_blkif_max_ring_order
) {
1123 blkif
->nr_ring_pages
= 1 << ring_page_order
;
1124 blkif
->multi_ref
= true;
1127 xenbus_dev_fatal(dev
, err
,
1128 "requested ring page order %d exceed max:%d",
1130 xen_blkif_max_ring_order
);
1134 if (blkif
->nr_rings
== 1)
1135 return read_per_ring_refs(&blkif
->rings
[0], dev
->otherend
);
1137 xspathsize
= strlen(dev
->otherend
) + xenstore_path_ext_size
;
1138 xspath
= kmalloc(xspathsize
, GFP_KERNEL
);
1140 xenbus_dev_fatal(dev
, -ENOMEM
, "reading ring references");
1144 for (i
= 0; i
< blkif
->nr_rings
; i
++) {
1145 memset(xspath
, 0, xspathsize
);
1146 snprintf(xspath
, xspathsize
, "%s/queue-%u", dev
->otherend
, i
);
1147 err
= read_per_ring_refs(&blkif
->rings
[i
], xspath
);
1158 static const struct xenbus_device_id xen_blkbk_ids
[] = {
1163 static struct xenbus_driver xen_blkbk_driver
= {
1164 .ids
= xen_blkbk_ids
,
1165 .probe
= xen_blkbk_probe
,
1166 .remove
= xen_blkbk_remove
,
1167 .otherend_changed
= frontend_changed
,
1168 .allow_rebind
= true,
1169 .reclaim_memory
= reclaim_memory
,
1172 int xen_blkif_xenbus_init(void)
1174 return xenbus_register_backend(&xen_blkbk_driver
);
1177 void xen_blkif_xenbus_fini(void)
1179 xenbus_unregister_driver(&xen_blkbk_driver
);