2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
22 #include "qemu/osdep.h"
23 #include <sys/ioctl.h>
28 #include "hw/xen/xen_backend.h"
29 #include "xen_blkif.h"
30 #include "sysemu/blockdev.h"
31 #include "sysemu/block-backend.h"
32 #include "qapi/error.h"
33 #include "qapi/qmp/qdict.h"
34 #include "qapi/qmp/qstring.h"
36 /* ------------------------------------------------------------- */
38 static int batch_maps
= 0;
40 static int max_requests
= 32;
42 /* ------------------------------------------------------------- */
44 #define BLOCK_SIZE 512
45 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
47 struct PersistentGrant
{
49 struct XenBlkDev
*blkdev
;
52 typedef struct PersistentGrant PersistentGrant
;
54 struct PersistentRegion
{
59 typedef struct PersistentRegion PersistentRegion
;
72 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
73 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
75 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
83 struct XenBlkDev
*blkdev
;
84 QLIST_ENTRY(ioreq
) list
;
89 struct XenDevice xendev
; /* must be first */
96 const char *fileproto
;
103 blkif_back_rings_t rings
;
108 QLIST_HEAD(inflight_head
, ioreq
) inflight
;
109 QLIST_HEAD(finished_head
, ioreq
) finished
;
110 QLIST_HEAD(freelist_head
, ioreq
) freelist
;
112 int requests_inflight
;
113 int requests_finished
;
115 /* Persistent grants extension */
116 gboolean feature_discard
;
117 gboolean feature_persistent
;
118 GTree
*persistent_gnts
;
119 GSList
*persistent_regions
;
120 unsigned int persistent_gnt_count
;
121 unsigned int max_grants
;
123 /* qemu block driver */
129 /* ------------------------------------------------------------- */
131 static void ioreq_reset(struct ioreq
*ioreq
)
133 memset(&ioreq
->req
, 0, sizeof(ioreq
->req
));
139 memset(ioreq
->domids
, 0, sizeof(ioreq
->domids
));
140 memset(ioreq
->refs
, 0, sizeof(ioreq
->refs
));
142 memset(ioreq
->page
, 0, sizeof(ioreq
->page
));
145 ioreq
->aio_inflight
= 0;
146 ioreq
->aio_errors
= 0;
148 ioreq
->blkdev
= NULL
;
149 memset(&ioreq
->list
, 0, sizeof(ioreq
->list
));
150 memset(&ioreq
->acct
, 0, sizeof(ioreq
->acct
));
152 qemu_iovec_reset(&ioreq
->v
);
155 static gint
int_cmp(gconstpointer a
, gconstpointer b
, gpointer user_data
)
157 uint ua
= GPOINTER_TO_UINT(a
);
158 uint ub
= GPOINTER_TO_UINT(b
);
159 return (ua
> ub
) - (ua
< ub
);
162 static void destroy_grant(gpointer pgnt
)
164 PersistentGrant
*grant
= pgnt
;
165 xengnttab_handle
*gnt
= grant
->blkdev
->xendev
.gnttabdev
;
167 if (xengnttab_unmap(gnt
, grant
->page
, 1) != 0) {
168 xen_be_printf(&grant
->blkdev
->xendev
, 0,
169 "xengnttab_unmap failed: %s\n",
172 grant
->blkdev
->persistent_gnt_count
--;
173 xen_be_printf(&grant
->blkdev
->xendev
, 3,
174 "unmapped grant %p\n", grant
->page
);
178 static void remove_persistent_region(gpointer data
, gpointer dev
)
180 PersistentRegion
*region
= data
;
181 struct XenBlkDev
*blkdev
= dev
;
182 xengnttab_handle
*gnt
= blkdev
->xendev
.gnttabdev
;
184 if (xengnttab_unmap(gnt
, region
->addr
, region
->num
) != 0) {
185 xen_be_printf(&blkdev
->xendev
, 0,
186 "xengnttab_unmap region %p failed: %s\n",
187 region
->addr
, strerror(errno
));
189 xen_be_printf(&blkdev
->xendev
, 3,
190 "unmapped grant region %p with %d pages\n",
191 region
->addr
, region
->num
);
195 static struct ioreq
*ioreq_start(struct XenBlkDev
*blkdev
)
197 struct ioreq
*ioreq
= NULL
;
199 if (QLIST_EMPTY(&blkdev
->freelist
)) {
200 if (blkdev
->requests_total
>= max_requests
) {
203 /* allocate new struct */
204 ioreq
= g_malloc0(sizeof(*ioreq
));
205 ioreq
->blkdev
= blkdev
;
206 blkdev
->requests_total
++;
207 qemu_iovec_init(&ioreq
->v
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
209 /* get one from freelist */
210 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
211 QLIST_REMOVE(ioreq
, list
);
213 QLIST_INSERT_HEAD(&blkdev
->inflight
, ioreq
, list
);
214 blkdev
->requests_inflight
++;
220 static void ioreq_finish(struct ioreq
*ioreq
)
222 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
224 QLIST_REMOVE(ioreq
, list
);
225 QLIST_INSERT_HEAD(&blkdev
->finished
, ioreq
, list
);
226 blkdev
->requests_inflight
--;
227 blkdev
->requests_finished
++;
230 static void ioreq_release(struct ioreq
*ioreq
, bool finish
)
232 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
234 QLIST_REMOVE(ioreq
, list
);
236 ioreq
->blkdev
= blkdev
;
237 QLIST_INSERT_HEAD(&blkdev
->freelist
, ioreq
, list
);
239 blkdev
->requests_finished
--;
241 blkdev
->requests_inflight
--;
246 * translate request into iovec + start offset
247 * do sanity checks along the way
249 static int ioreq_parse(struct ioreq
*ioreq
)
251 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
256 xen_be_printf(&blkdev
->xendev
, 3,
257 "op %d, nr %d, handle %d, id %" PRId64
", sector %" PRId64
"\n",
258 ioreq
->req
.operation
, ioreq
->req
.nr_segments
,
259 ioreq
->req
.handle
, ioreq
->req
.id
, ioreq
->req
.sector_number
);
260 switch (ioreq
->req
.operation
) {
262 ioreq
->prot
= PROT_WRITE
; /* to memory */
264 case BLKIF_OP_FLUSH_DISKCACHE
:
266 if (!ioreq
->req
.nr_segments
) {
271 ioreq
->prot
= PROT_READ
; /* from memory */
273 case BLKIF_OP_DISCARD
:
276 xen_be_printf(&blkdev
->xendev
, 0, "error: unknown operation (%d)\n",
277 ioreq
->req
.operation
);
281 if (ioreq
->req
.operation
!= BLKIF_OP_READ
&& blkdev
->mode
[0] != 'w') {
282 xen_be_printf(&blkdev
->xendev
, 0, "error: write req for ro device\n");
286 ioreq
->start
= ioreq
->req
.sector_number
* blkdev
->file_blk
;
287 for (i
= 0; i
< ioreq
->req
.nr_segments
; i
++) {
288 if (i
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
289 xen_be_printf(&blkdev
->xendev
, 0, "error: nr_segments too big\n");
292 if (ioreq
->req
.seg
[i
].first_sect
> ioreq
->req
.seg
[i
].last_sect
) {
293 xen_be_printf(&blkdev
->xendev
, 0, "error: first > last sector\n");
296 if (ioreq
->req
.seg
[i
].last_sect
* BLOCK_SIZE
>= XC_PAGE_SIZE
) {
297 xen_be_printf(&blkdev
->xendev
, 0, "error: page crossing\n");
301 ioreq
->domids
[i
] = blkdev
->xendev
.dom
;
302 ioreq
->refs
[i
] = ioreq
->req
.seg
[i
].gref
;
304 mem
= ioreq
->req
.seg
[i
].first_sect
* blkdev
->file_blk
;
305 len
= (ioreq
->req
.seg
[i
].last_sect
- ioreq
->req
.seg
[i
].first_sect
+ 1) * blkdev
->file_blk
;
306 qemu_iovec_add(&ioreq
->v
, (void*)mem
, len
);
308 if (ioreq
->start
+ ioreq
->v
.size
> blkdev
->file_size
) {
309 xen_be_printf(&blkdev
->xendev
, 0, "error: access beyond end of file\n");
315 ioreq
->status
= BLKIF_RSP_ERROR
;
319 static void ioreq_unmap(struct ioreq
*ioreq
)
321 xengnttab_handle
*gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
324 if (ioreq
->num_unmap
== 0 || ioreq
->mapped
== 0) {
331 if (xengnttab_unmap(gnt
, ioreq
->pages
, ioreq
->num_unmap
) != 0) {
332 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
333 "xengnttab_unmap failed: %s\n",
336 ioreq
->blkdev
->cnt_map
-= ioreq
->num_unmap
;
339 for (i
= 0; i
< ioreq
->num_unmap
; i
++) {
340 if (!ioreq
->page
[i
]) {
343 if (xengnttab_unmap(gnt
, ioreq
->page
[i
], 1) != 0) {
344 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
345 "xengnttab_unmap failed: %s\n",
348 ioreq
->blkdev
->cnt_map
--;
349 ioreq
->page
[i
] = NULL
;
355 static int ioreq_map(struct ioreq
*ioreq
)
357 xengnttab_handle
*gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
358 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
359 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
360 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
361 int i
, j
, new_maps
= 0;
362 PersistentGrant
*grant
;
363 PersistentRegion
*region
;
364 /* domids and refs variables will contain the information necessary
365 * to map the grants that are needed to fulfill this request.
367 * After mapping the needed grants, the page array will contain the
368 * memory address of each granted page in the order specified in ioreq
369 * (disregarding if it's a persistent grant or not).
372 if (ioreq
->v
.niov
== 0 || ioreq
->mapped
== 1) {
375 if (ioreq
->blkdev
->feature_persistent
) {
376 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
377 grant
= g_tree_lookup(ioreq
->blkdev
->persistent_gnts
,
378 GUINT_TO_POINTER(ioreq
->refs
[i
]));
381 page
[i
] = grant
->page
;
382 xen_be_printf(&ioreq
->blkdev
->xendev
, 3,
383 "using persistent-grant %" PRIu32
"\n",
386 /* Add the grant to the list of grants that
389 domids
[new_maps
] = ioreq
->domids
[i
];
390 refs
[new_maps
] = ioreq
->refs
[i
];
395 /* Set the protection to RW, since grants may be reused later
396 * with a different protection than the one needed for this request
398 ioreq
->prot
= PROT_WRITE
| PROT_READ
;
400 /* All grants in the request should be mapped */
401 memcpy(refs
, ioreq
->refs
, sizeof(refs
));
402 memcpy(domids
, ioreq
->domids
, sizeof(domids
));
403 memset(page
, 0, sizeof(page
));
404 new_maps
= ioreq
->v
.niov
;
407 if (batch_maps
&& new_maps
) {
408 ioreq
->pages
= xengnttab_map_grant_refs
409 (gnt
, new_maps
, domids
, refs
, ioreq
->prot
);
410 if (ioreq
->pages
== NULL
) {
411 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
412 "can't map %d grant refs (%s, %d maps)\n",
413 new_maps
, strerror(errno
), ioreq
->blkdev
->cnt_map
);
416 for (i
= 0, j
= 0; i
< ioreq
->v
.niov
; i
++) {
417 if (page
[i
] == NULL
) {
418 page
[i
] = ioreq
->pages
+ (j
++) * XC_PAGE_SIZE
;
421 ioreq
->blkdev
->cnt_map
+= new_maps
;
422 } else if (new_maps
) {
423 for (i
= 0; i
< new_maps
; i
++) {
424 ioreq
->page
[i
] = xengnttab_map_grant_ref
425 (gnt
, domids
[i
], refs
[i
], ioreq
->prot
);
426 if (ioreq
->page
[i
] == NULL
) {
427 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
428 "can't map grant ref %d (%s, %d maps)\n",
429 refs
[i
], strerror(errno
), ioreq
->blkdev
->cnt_map
);
434 ioreq
->blkdev
->cnt_map
++;
436 for (i
= 0, j
= 0; i
< ioreq
->v
.niov
; i
++) {
437 if (page
[i
] == NULL
) {
438 page
[i
] = ioreq
->page
[j
++];
442 if (ioreq
->blkdev
->feature_persistent
&& new_maps
!= 0 &&
443 (!batch_maps
|| (ioreq
->blkdev
->persistent_gnt_count
+ new_maps
<=
444 ioreq
->blkdev
->max_grants
))) {
446 * If we are using persistent grants and batch mappings only
447 * add the new maps to the list of persistent grants if the whole
448 * area can be persistently mapped.
451 region
= g_malloc0(sizeof(*region
));
452 region
->addr
= ioreq
->pages
;
453 region
->num
= new_maps
;
454 ioreq
->blkdev
->persistent_regions
= g_slist_append(
455 ioreq
->blkdev
->persistent_regions
,
458 while ((ioreq
->blkdev
->persistent_gnt_count
< ioreq
->blkdev
->max_grants
)
460 /* Go through the list of newly mapped grants and add as many
461 * as possible to the list of persistently mapped grants.
463 * Since we start at the end of ioreq->page(s), we only need
464 * to decrease new_maps to prevent this granted pages from
465 * being unmapped in ioreq_unmap.
467 grant
= g_malloc0(sizeof(*grant
));
470 grant
->page
= ioreq
->pages
+ (new_maps
) * XC_PAGE_SIZE
;
472 grant
->page
= ioreq
->page
[new_maps
];
474 grant
->blkdev
= ioreq
->blkdev
;
475 xen_be_printf(&ioreq
->blkdev
->xendev
, 3,
476 "adding grant %" PRIu32
" page: %p\n",
477 refs
[new_maps
], grant
->page
);
478 g_tree_insert(ioreq
->blkdev
->persistent_gnts
,
479 GUINT_TO_POINTER(refs
[new_maps
]),
481 ioreq
->blkdev
->persistent_gnt_count
++;
483 assert(!batch_maps
|| new_maps
== 0);
485 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
486 ioreq
->v
.iov
[i
].iov_base
+= (uintptr_t)page
[i
];
489 ioreq
->num_unmap
= new_maps
;
493 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
);
495 static void qemu_aio_complete(void *opaque
, int ret
)
497 struct ioreq
*ioreq
= opaque
;
500 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "%s I/O error\n",
501 ioreq
->req
.operation
== BLKIF_OP_READ
? "read" : "write");
505 ioreq
->aio_inflight
--;
506 if (ioreq
->presync
) {
508 ioreq_runio_qemu_aio(ioreq
);
511 if (ioreq
->aio_inflight
> 0) {
515 ioreq
->status
= ioreq
->aio_errors
? BLKIF_RSP_ERROR
: BLKIF_RSP_OKAY
;
518 switch (ioreq
->req
.operation
) {
520 case BLKIF_OP_FLUSH_DISKCACHE
:
521 if (!ioreq
->req
.nr_segments
) {
525 if (ioreq
->status
== BLKIF_RSP_OKAY
) {
526 block_acct_done(blk_get_stats(ioreq
->blkdev
->blk
), &ioreq
->acct
);
528 block_acct_failed(blk_get_stats(ioreq
->blkdev
->blk
), &ioreq
->acct
);
531 case BLKIF_OP_DISCARD
:
535 qemu_bh_schedule(ioreq
->blkdev
->bh
);
538 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
)
540 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
542 if (ioreq
->req
.nr_segments
&& ioreq_map(ioreq
) == -1) {
546 ioreq
->aio_inflight
++;
547 if (ioreq
->presync
) {
548 blk_aio_flush(ioreq
->blkdev
->blk
, qemu_aio_complete
, ioreq
);
552 switch (ioreq
->req
.operation
) {
554 block_acct_start(blk_get_stats(blkdev
->blk
), &ioreq
->acct
,
555 ioreq
->v
.size
, BLOCK_ACCT_READ
);
556 ioreq
->aio_inflight
++;
557 blk_aio_readv(blkdev
->blk
, ioreq
->start
/ BLOCK_SIZE
,
558 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
559 qemu_aio_complete
, ioreq
);
562 case BLKIF_OP_FLUSH_DISKCACHE
:
563 if (!ioreq
->req
.nr_segments
) {
567 block_acct_start(blk_get_stats(blkdev
->blk
), &ioreq
->acct
,
569 ioreq
->req
.operation
== BLKIF_OP_WRITE
?
570 BLOCK_ACCT_WRITE
: BLOCK_ACCT_FLUSH
);
571 ioreq
->aio_inflight
++;
572 blk_aio_writev(blkdev
->blk
, ioreq
->start
/ BLOCK_SIZE
,
573 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
574 qemu_aio_complete
, ioreq
);
576 case BLKIF_OP_DISCARD
:
578 struct blkif_request_discard
*discard_req
= (void *)&ioreq
->req
;
579 ioreq
->aio_inflight
++;
580 blk_aio_discard(blkdev
->blk
,
581 discard_req
->sector_number
, discard_req
->nr_sectors
,
582 qemu_aio_complete
, ioreq
);
586 /* unknown operation (shouldn't happen -- parse catches this) */
590 qemu_aio_complete(ioreq
, 0);
598 ioreq
->status
= BLKIF_RSP_ERROR
;
602 static int blk_send_response_one(struct ioreq
*ioreq
)
604 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
606 int have_requests
= 0;
607 blkif_response_t resp
;
610 resp
.id
= ioreq
->req
.id
;
611 resp
.operation
= ioreq
->req
.operation
;
612 resp
.status
= ioreq
->status
;
614 /* Place on the response ring for the relevant domain. */
615 switch (blkdev
->protocol
) {
616 case BLKIF_PROTOCOL_NATIVE
:
617 dst
= RING_GET_RESPONSE(&blkdev
->rings
.native
, blkdev
->rings
.native
.rsp_prod_pvt
);
619 case BLKIF_PROTOCOL_X86_32
:
620 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_32_part
,
621 blkdev
->rings
.x86_32_part
.rsp_prod_pvt
);
623 case BLKIF_PROTOCOL_X86_64
:
624 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_64_part
,
625 blkdev
->rings
.x86_64_part
.rsp_prod_pvt
);
631 memcpy(dst
, &resp
, sizeof(resp
));
632 blkdev
->rings
.common
.rsp_prod_pvt
++;
634 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev
->rings
.common
, send_notify
);
635 if (blkdev
->rings
.common
.rsp_prod_pvt
== blkdev
->rings
.common
.req_cons
) {
637 * Tail check for pending requests. Allows frontend to avoid
638 * notifications if requests are already in flight (lower
639 * overheads and promotes batching).
641 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev
->rings
.common
, have_requests
);
642 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev
->rings
.common
)) {
652 /* walk finished list, send outstanding responses, free requests */
653 static void blk_send_response_all(struct XenBlkDev
*blkdev
)
658 while (!QLIST_EMPTY(&blkdev
->finished
)) {
659 ioreq
= QLIST_FIRST(&blkdev
->finished
);
660 send_notify
+= blk_send_response_one(ioreq
);
661 ioreq_release(ioreq
, true);
664 xen_be_send_notify(&blkdev
->xendev
);
668 static int blk_get_request(struct XenBlkDev
*blkdev
, struct ioreq
*ioreq
, RING_IDX rc
)
670 switch (blkdev
->protocol
) {
671 case BLKIF_PROTOCOL_NATIVE
:
672 memcpy(&ioreq
->req
, RING_GET_REQUEST(&blkdev
->rings
.native
, rc
),
675 case BLKIF_PROTOCOL_X86_32
:
676 blkif_get_x86_32_req(&ioreq
->req
,
677 RING_GET_REQUEST(&blkdev
->rings
.x86_32_part
, rc
));
679 case BLKIF_PROTOCOL_X86_64
:
680 blkif_get_x86_64_req(&ioreq
->req
,
681 RING_GET_REQUEST(&blkdev
->rings
.x86_64_part
, rc
));
687 static void blk_handle_requests(struct XenBlkDev
*blkdev
)
692 blkdev
->more_work
= 0;
694 rc
= blkdev
->rings
.common
.req_cons
;
695 rp
= blkdev
->rings
.common
.sring
->req_prod
;
696 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
698 blk_send_response_all(blkdev
);
700 /* pull request from ring */
701 if (RING_REQUEST_CONS_OVERFLOW(&blkdev
->rings
.common
, rc
)) {
704 ioreq
= ioreq_start(blkdev
);
709 blk_get_request(blkdev
, ioreq
, rc
);
710 blkdev
->rings
.common
.req_cons
= ++rc
;
713 if (ioreq_parse(ioreq
) != 0) {
715 switch (ioreq
->req
.operation
) {
717 block_acct_invalid(blk_get_stats(blkdev
->blk
),
721 block_acct_invalid(blk_get_stats(blkdev
->blk
),
724 case BLKIF_OP_FLUSH_DISKCACHE
:
725 block_acct_invalid(blk_get_stats(blkdev
->blk
),
731 if (blk_send_response_one(ioreq
)) {
732 xen_be_send_notify(&blkdev
->xendev
);
734 ioreq_release(ioreq
, false);
738 ioreq_runio_qemu_aio(ioreq
);
741 if (blkdev
->more_work
&& blkdev
->requests_inflight
< max_requests
) {
742 qemu_bh_schedule(blkdev
->bh
);
746 /* ------------------------------------------------------------- */
748 static void blk_bh(void *opaque
)
750 struct XenBlkDev
*blkdev
= opaque
;
751 blk_handle_requests(blkdev
);
755 * We need to account for the grant allocations requiring contiguous
756 * chunks; the worst case number would be
757 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
758 * but in order to keep things simple just use
759 * 2 * max_req * max_seg.
761 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
763 static void blk_alloc(struct XenDevice
*xendev
)
765 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
767 QLIST_INIT(&blkdev
->inflight
);
768 QLIST_INIT(&blkdev
->finished
);
769 QLIST_INIT(&blkdev
->freelist
);
770 blkdev
->bh
= qemu_bh_new(blk_bh
, blkdev
);
771 if (xen_mode
!= XEN_EMULATE
) {
774 if (xengnttab_set_max_grants(xendev
->gnttabdev
,
775 MAX_GRANTS(max_requests
, BLKIF_MAX_SEGMENTS_PER_REQUEST
)) < 0) {
776 xen_be_printf(xendev
, 0, "xengnttab_set_max_grants failed: %s\n",
781 static void blk_parse_discard(struct XenBlkDev
*blkdev
)
785 blkdev
->feature_discard
= true;
787 if (xenstore_read_be_int(&blkdev
->xendev
, "discard-enable", &enable
) == 0) {
788 blkdev
->feature_discard
= !!enable
;
791 if (blkdev
->feature_discard
) {
792 xenstore_write_be_int(&blkdev
->xendev
, "feature-discard", 1);
796 static int blk_init(struct XenDevice
*xendev
)
798 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
800 char *directiosafe
= NULL
;
802 /* read xenstore entries */
803 if (blkdev
->params
== NULL
) {
805 blkdev
->params
= xenstore_read_be_str(&blkdev
->xendev
, "params");
806 if (blkdev
->params
!= NULL
) {
807 h
= strchr(blkdev
->params
, ':');
810 blkdev
->fileproto
= blkdev
->params
;
811 blkdev
->filename
= h
+1;
814 blkdev
->fileproto
= "<unset>";
815 blkdev
->filename
= blkdev
->params
;
818 if (!strcmp("aio", blkdev
->fileproto
)) {
819 blkdev
->fileproto
= "raw";
821 if (!strcmp("vhd", blkdev
->fileproto
)) {
822 blkdev
->fileproto
= "vpc";
824 if (blkdev
->mode
== NULL
) {
825 blkdev
->mode
= xenstore_read_be_str(&blkdev
->xendev
, "mode");
827 if (blkdev
->type
== NULL
) {
828 blkdev
->type
= xenstore_read_be_str(&blkdev
->xendev
, "type");
830 if (blkdev
->dev
== NULL
) {
831 blkdev
->dev
= xenstore_read_be_str(&blkdev
->xendev
, "dev");
833 if (blkdev
->devtype
== NULL
) {
834 blkdev
->devtype
= xenstore_read_be_str(&blkdev
->xendev
, "device-type");
836 directiosafe
= xenstore_read_be_str(&blkdev
->xendev
, "direct-io-safe");
837 blkdev
->directiosafe
= (directiosafe
&& atoi(directiosafe
));
839 /* do we have all we need? */
840 if (blkdev
->params
== NULL
||
841 blkdev
->mode
== NULL
||
842 blkdev
->type
== NULL
||
843 blkdev
->dev
== NULL
) {
848 if (strcmp(blkdev
->mode
, "w")) {
849 info
|= VDISK_READONLY
;
853 if (blkdev
->devtype
&& !strcmp(blkdev
->devtype
, "cdrom")) {
857 blkdev
->file_blk
= BLOCK_SIZE
;
860 * blk_connect supplies sector-size and sectors
862 xenstore_write_be_int(&blkdev
->xendev
, "feature-flush-cache", 1);
863 xenstore_write_be_int(&blkdev
->xendev
, "feature-persistent", 1);
864 xenstore_write_be_int(&blkdev
->xendev
, "info", info
);
866 blk_parse_discard(blkdev
);
868 g_free(directiosafe
);
872 g_free(blkdev
->params
);
873 blkdev
->params
= NULL
;
874 g_free(blkdev
->mode
);
876 g_free(blkdev
->type
);
880 g_free(blkdev
->devtype
);
881 blkdev
->devtype
= NULL
;
882 g_free(directiosafe
);
883 blkdev
->directiosafe
= false;
887 static int blk_connect(struct XenDevice
*xendev
)
889 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
890 int pers
, index
, qflags
;
891 bool readonly
= true;
894 if (blkdev
->directiosafe
) {
895 qflags
= BDRV_O_NOCACHE
| BDRV_O_NATIVE_AIO
;
897 qflags
= BDRV_O_CACHE_WB
;
899 if (strcmp(blkdev
->mode
, "w") == 0) {
900 qflags
|= BDRV_O_RDWR
;
903 if (blkdev
->feature_discard
) {
904 qflags
|= BDRV_O_UNMAP
;
907 /* init qemu block driver */
908 index
= (blkdev
->xendev
.dev
- 202 * 256) / 16;
909 blkdev
->dinfo
= drive_get(IF_XEN
, 0, index
);
910 if (!blkdev
->dinfo
) {
911 Error
*local_err
= NULL
;
912 QDict
*options
= NULL
;
914 if (strcmp(blkdev
->fileproto
, "<unset>")) {
915 options
= qdict_new();
916 qdict_put(options
, "driver", qstring_from_str(blkdev
->fileproto
));
919 /* setup via xenbus -> create new block driver instance */
920 xen_be_printf(&blkdev
->xendev
, 2, "create new bdrv (xenbus setup)\n");
921 blkdev
->blk
= blk_new_open(blkdev
->filename
, NULL
, options
,
924 xen_be_printf(&blkdev
->xendev
, 0, "error: %s\n",
925 error_get_pretty(local_err
));
926 error_free(local_err
);
930 /* setup via qemu cmdline -> already setup for us */
931 xen_be_printf(&blkdev
->xendev
, 2, "get configured bdrv (cmdline setup)\n");
932 blkdev
->blk
= blk_by_legacy_dinfo(blkdev
->dinfo
);
933 if (blk_is_read_only(blkdev
->blk
) && !readonly
) {
934 xen_be_printf(&blkdev
->xendev
, 0, "Unexpected read-only drive");
938 /* blkdev->blk is not create by us, we get a reference
939 * so we can blk_unref() unconditionally */
940 blk_ref(blkdev
->blk
);
942 blk_attach_dev_nofail(blkdev
->blk
, blkdev
);
943 blkdev
->file_size
= blk_getlength(blkdev
->blk
);
944 if (blkdev
->file_size
< 0) {
945 BlockDriverState
*bs
= blk_bs(blkdev
->blk
);
946 const char *drv_name
= bs
? bdrv_get_format_name(bs
) : NULL
;
947 xen_be_printf(&blkdev
->xendev
, 1, "blk_getlength: %d (%s) | drv %s\n",
948 (int)blkdev
->file_size
, strerror(-blkdev
->file_size
),
950 blkdev
->file_size
= 0;
953 xen_be_printf(xendev
, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
954 " size %" PRId64
" (%" PRId64
" MB)\n",
955 blkdev
->type
, blkdev
->fileproto
, blkdev
->filename
,
956 blkdev
->file_size
, blkdev
->file_size
>> 20);
958 /* Fill in number of sector size and number of sectors */
959 xenstore_write_be_int(&blkdev
->xendev
, "sector-size", blkdev
->file_blk
);
960 xenstore_write_be_int64(&blkdev
->xendev
, "sectors",
961 blkdev
->file_size
/ blkdev
->file_blk
);
963 if (xenstore_read_fe_int(&blkdev
->xendev
, "ring-ref", &blkdev
->ring_ref
) == -1) {
966 if (xenstore_read_fe_int(&blkdev
->xendev
, "event-channel",
967 &blkdev
->xendev
.remote_port
) == -1) {
970 if (xenstore_read_fe_int(&blkdev
->xendev
, "feature-persistent", &pers
)) {
971 blkdev
->feature_persistent
= FALSE
;
973 blkdev
->feature_persistent
= !!pers
;
976 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
977 if (blkdev
->xendev
.protocol
) {
978 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_32
) == 0) {
979 blkdev
->protocol
= BLKIF_PROTOCOL_X86_32
;
981 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_64
) == 0) {
982 blkdev
->protocol
= BLKIF_PROTOCOL_X86_64
;
986 blkdev
->sring
= xengnttab_map_grant_ref(blkdev
->xendev
.gnttabdev
,
989 PROT_READ
| PROT_WRITE
);
990 if (!blkdev
->sring
) {
995 switch (blkdev
->protocol
) {
996 case BLKIF_PROTOCOL_NATIVE
:
998 blkif_sring_t
*sring_native
= blkdev
->sring
;
999 BACK_RING_INIT(&blkdev
->rings
.native
, sring_native
, XC_PAGE_SIZE
);
1002 case BLKIF_PROTOCOL_X86_32
:
1004 blkif_x86_32_sring_t
*sring_x86_32
= blkdev
->sring
;
1006 BACK_RING_INIT(&blkdev
->rings
.x86_32_part
, sring_x86_32
, XC_PAGE_SIZE
);
1009 case BLKIF_PROTOCOL_X86_64
:
1011 blkif_x86_64_sring_t
*sring_x86_64
= blkdev
->sring
;
1013 BACK_RING_INIT(&blkdev
->rings
.x86_64_part
, sring_x86_64
, XC_PAGE_SIZE
);
1018 if (blkdev
->feature_persistent
) {
1019 /* Init persistent grants */
1020 blkdev
->max_grants
= max_requests
* BLKIF_MAX_SEGMENTS_PER_REQUEST
;
1021 blkdev
->persistent_gnts
= g_tree_new_full((GCompareDataFunc
)int_cmp
,
1024 (GDestroyNotify
)g_free
:
1025 (GDestroyNotify
)destroy_grant
);
1026 blkdev
->persistent_regions
= NULL
;
1027 blkdev
->persistent_gnt_count
= 0;
1030 xen_be_bind_evtchn(&blkdev
->xendev
);
1032 xen_be_printf(&blkdev
->xendev
, 1, "ok: proto %s, ring-ref %d, "
1033 "remote port %d, local port %d\n",
1034 blkdev
->xendev
.protocol
, blkdev
->ring_ref
,
1035 blkdev
->xendev
.remote_port
, blkdev
->xendev
.local_port
);
1039 static void blk_disconnect(struct XenDevice
*xendev
)
1041 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1044 blk_detach_dev(blkdev
->blk
, blkdev
);
1045 blk_unref(blkdev
->blk
);
1048 xen_be_unbind_evtchn(&blkdev
->xendev
);
1050 if (blkdev
->sring
) {
1051 xengnttab_unmap(blkdev
->xendev
.gnttabdev
, blkdev
->sring
, 1);
1053 blkdev
->sring
= NULL
;
1057 * Unmap persistent grants before switching to the closed state
1058 * so the frontend can free them.
1060 * In the !batch_maps case g_tree_destroy will take care of unmapping
1061 * the grant, but in the batch_maps case we need to iterate over every
1062 * region in persistent_regions and unmap it.
1064 if (blkdev
->feature_persistent
) {
1065 g_tree_destroy(blkdev
->persistent_gnts
);
1066 assert(batch_maps
|| blkdev
->persistent_gnt_count
== 0);
1068 blkdev
->persistent_gnt_count
= 0;
1069 g_slist_foreach(blkdev
->persistent_regions
,
1070 (GFunc
)remove_persistent_region
, blkdev
);
1071 g_slist_free(blkdev
->persistent_regions
);
1073 blkdev
->feature_persistent
= false;
1077 static int blk_free(struct XenDevice
*xendev
)
1079 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1080 struct ioreq
*ioreq
;
1082 if (blkdev
->blk
|| blkdev
->sring
) {
1083 blk_disconnect(xendev
);
1086 while (!QLIST_EMPTY(&blkdev
->freelist
)) {
1087 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
1088 QLIST_REMOVE(ioreq
, list
);
1089 qemu_iovec_destroy(&ioreq
->v
);
1093 g_free(blkdev
->params
);
1094 g_free(blkdev
->mode
);
1095 g_free(blkdev
->type
);
1096 g_free(blkdev
->dev
);
1097 g_free(blkdev
->devtype
);
1098 qemu_bh_delete(blkdev
->bh
);
1102 static void blk_event(struct XenDevice
*xendev
)
1104 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
1106 qemu_bh_schedule(blkdev
->bh
);
1109 struct XenDevOps xen_blkdev_ops
= {
1110 .size
= sizeof(struct XenBlkDev
),
1111 .flags
= DEVOPS_FLAG_NEED_GNTDEV
,
1114 .initialise
= blk_connect
,
1115 .disconnect
= blk_disconnect
,