2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
40 #include <xen/io/xenbus.h>
43 #include "qemu-char.h"
44 #include "xen_blkif.h"
45 #include "xen_backend.h"
48 /* ------------------------------------------------------------- */
50 static int batch_maps
= 0;
52 static int max_requests
= 32;
54 /* ------------------------------------------------------------- */
56 #define BLOCK_SIZE 512
57 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
71 uint32_t domids
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
72 uint32_t refs
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
74 void *page
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
81 struct XenBlkDev
*blkdev
;
82 QLIST_ENTRY(ioreq
) list
;
87 struct XenDevice xendev
; /* must be first */
93 const char *fileproto
;
100 blkif_back_rings_t rings
;
105 QLIST_HEAD(inflight_head
, ioreq
) inflight
;
106 QLIST_HEAD(finished_head
, ioreq
) finished
;
107 QLIST_HEAD(freelist_head
, ioreq
) freelist
;
109 int requests_inflight
;
110 int requests_finished
;
112 /* qemu block driver */
114 BlockDriverState
*bs
;
118 /* ------------------------------------------------------------- */
120 static struct ioreq
*ioreq_start(struct XenBlkDev
*blkdev
)
122 struct ioreq
*ioreq
= NULL
;
124 if (QLIST_EMPTY(&blkdev
->freelist
)) {
125 if (blkdev
->requests_total
>= max_requests
) {
128 /* allocate new struct */
129 ioreq
= g_malloc0(sizeof(*ioreq
));
130 ioreq
->blkdev
= blkdev
;
131 blkdev
->requests_total
++;
132 qemu_iovec_init(&ioreq
->v
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
134 /* get one from freelist */
135 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
136 QLIST_REMOVE(ioreq
, list
);
137 qemu_iovec_reset(&ioreq
->v
);
139 QLIST_INSERT_HEAD(&blkdev
->inflight
, ioreq
, list
);
140 blkdev
->requests_inflight
++;
146 static void ioreq_finish(struct ioreq
*ioreq
)
148 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
150 QLIST_REMOVE(ioreq
, list
);
151 QLIST_INSERT_HEAD(&blkdev
->finished
, ioreq
, list
);
152 blkdev
->requests_inflight
--;
153 blkdev
->requests_finished
++;
156 static void ioreq_release(struct ioreq
*ioreq
, bool finish
)
158 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
160 QLIST_REMOVE(ioreq
, list
);
161 memset(ioreq
, 0, sizeof(*ioreq
));
162 ioreq
->blkdev
= blkdev
;
163 QLIST_INSERT_HEAD(&blkdev
->freelist
, ioreq
, list
);
165 blkdev
->requests_finished
--;
167 blkdev
->requests_inflight
--;
172 * translate request into iovec + start offset
173 * do sanity checks along the way
175 static int ioreq_parse(struct ioreq
*ioreq
)
177 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
182 xen_be_printf(&blkdev
->xendev
, 3,
183 "op %d, nr %d, handle %d, id %" PRId64
", sector %" PRId64
"\n",
184 ioreq
->req
.operation
, ioreq
->req
.nr_segments
,
185 ioreq
->req
.handle
, ioreq
->req
.id
, ioreq
->req
.sector_number
);
186 switch (ioreq
->req
.operation
) {
188 ioreq
->prot
= PROT_WRITE
; /* to memory */
190 case BLKIF_OP_WRITE_BARRIER
:
191 if (!ioreq
->req
.nr_segments
) {
195 ioreq
->presync
= ioreq
->postsync
= 1;
198 ioreq
->prot
= PROT_READ
; /* from memory */
201 xen_be_printf(&blkdev
->xendev
, 0, "error: unknown operation (%d)\n",
202 ioreq
->req
.operation
);
206 if (ioreq
->req
.operation
!= BLKIF_OP_READ
&& blkdev
->mode
[0] != 'w') {
207 xen_be_printf(&blkdev
->xendev
, 0, "error: write req for ro device\n");
211 ioreq
->start
= ioreq
->req
.sector_number
* blkdev
->file_blk
;
212 for (i
= 0; i
< ioreq
->req
.nr_segments
; i
++) {
213 if (i
== BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
214 xen_be_printf(&blkdev
->xendev
, 0, "error: nr_segments too big\n");
217 if (ioreq
->req
.seg
[i
].first_sect
> ioreq
->req
.seg
[i
].last_sect
) {
218 xen_be_printf(&blkdev
->xendev
, 0, "error: first > last sector\n");
221 if (ioreq
->req
.seg
[i
].last_sect
* BLOCK_SIZE
>= XC_PAGE_SIZE
) {
222 xen_be_printf(&blkdev
->xendev
, 0, "error: page crossing\n");
226 ioreq
->domids
[i
] = blkdev
->xendev
.dom
;
227 ioreq
->refs
[i
] = ioreq
->req
.seg
[i
].gref
;
229 mem
= ioreq
->req
.seg
[i
].first_sect
* blkdev
->file_blk
;
230 len
= (ioreq
->req
.seg
[i
].last_sect
- ioreq
->req
.seg
[i
].first_sect
+ 1) * blkdev
->file_blk
;
231 qemu_iovec_add(&ioreq
->v
, (void*)mem
, len
);
233 if (ioreq
->start
+ ioreq
->v
.size
> blkdev
->file_size
) {
234 xen_be_printf(&blkdev
->xendev
, 0, "error: access beyond end of file\n");
240 ioreq
->status
= BLKIF_RSP_ERROR
;
244 static void ioreq_unmap(struct ioreq
*ioreq
)
246 XenGnttab gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
249 if (ioreq
->v
.niov
== 0 || ioreq
->mapped
== 0) {
256 if (xc_gnttab_munmap(gnt
, ioreq
->pages
, ioreq
->v
.niov
) != 0) {
257 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "xc_gnttab_munmap failed: %s\n",
260 ioreq
->blkdev
->cnt_map
-= ioreq
->v
.niov
;
263 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
264 if (!ioreq
->page
[i
]) {
267 if (xc_gnttab_munmap(gnt
, ioreq
->page
[i
], 1) != 0) {
268 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "xc_gnttab_munmap failed: %s\n",
271 ioreq
->blkdev
->cnt_map
--;
272 ioreq
->page
[i
] = NULL
;
278 static int ioreq_map(struct ioreq
*ioreq
)
280 XenGnttab gnt
= ioreq
->blkdev
->xendev
.gnttabdev
;
283 if (ioreq
->v
.niov
== 0 || ioreq
->mapped
== 1) {
287 ioreq
->pages
= xc_gnttab_map_grant_refs
288 (gnt
, ioreq
->v
.niov
, ioreq
->domids
, ioreq
->refs
, ioreq
->prot
);
289 if (ioreq
->pages
== NULL
) {
290 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
291 "can't map %d grant refs (%s, %d maps)\n",
292 ioreq
->v
.niov
, strerror(errno
), ioreq
->blkdev
->cnt_map
);
295 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
296 ioreq
->v
.iov
[i
].iov_base
= ioreq
->pages
+ i
* XC_PAGE_SIZE
+
297 (uintptr_t)ioreq
->v
.iov
[i
].iov_base
;
299 ioreq
->blkdev
->cnt_map
+= ioreq
->v
.niov
;
301 for (i
= 0; i
< ioreq
->v
.niov
; i
++) {
302 ioreq
->page
[i
] = xc_gnttab_map_grant_ref
303 (gnt
, ioreq
->domids
[i
], ioreq
->refs
[i
], ioreq
->prot
);
304 if (ioreq
->page
[i
] == NULL
) {
305 xen_be_printf(&ioreq
->blkdev
->xendev
, 0,
306 "can't map grant ref %d (%s, %d maps)\n",
307 ioreq
->refs
[i
], strerror(errno
), ioreq
->blkdev
->cnt_map
);
311 ioreq
->v
.iov
[i
].iov_base
= ioreq
->page
[i
] + (uintptr_t)ioreq
->v
.iov
[i
].iov_base
;
312 ioreq
->blkdev
->cnt_map
++;
319 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
);
321 static void qemu_aio_complete(void *opaque
, int ret
)
323 struct ioreq
*ioreq
= opaque
;
326 xen_be_printf(&ioreq
->blkdev
->xendev
, 0, "%s I/O error\n",
327 ioreq
->req
.operation
== BLKIF_OP_READ
? "read" : "write");
331 ioreq
->aio_inflight
--;
332 if (ioreq
->presync
) {
334 ioreq_runio_qemu_aio(ioreq
);
337 if (ioreq
->aio_inflight
> 0) {
340 if (ioreq
->postsync
) {
342 ioreq
->aio_inflight
++;
343 bdrv_aio_flush(ioreq
->blkdev
->bs
, qemu_aio_complete
, ioreq
);
347 ioreq
->status
= ioreq
->aio_errors
? BLKIF_RSP_ERROR
: BLKIF_RSP_OKAY
;
350 bdrv_acct_done(ioreq
->blkdev
->bs
, &ioreq
->acct
);
351 qemu_bh_schedule(ioreq
->blkdev
->bh
);
354 static int ioreq_runio_qemu_aio(struct ioreq
*ioreq
)
356 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
358 if (ioreq
->req
.nr_segments
&& ioreq_map(ioreq
) == -1) {
362 ioreq
->aio_inflight
++;
363 if (ioreq
->presync
) {
364 bdrv_aio_flush(ioreq
->blkdev
->bs
, qemu_aio_complete
, ioreq
);
368 switch (ioreq
->req
.operation
) {
370 bdrv_acct_start(blkdev
->bs
, &ioreq
->acct
, ioreq
->v
.size
, BDRV_ACCT_READ
);
371 ioreq
->aio_inflight
++;
372 bdrv_aio_readv(blkdev
->bs
, ioreq
->start
/ BLOCK_SIZE
,
373 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
374 qemu_aio_complete
, ioreq
);
377 case BLKIF_OP_WRITE_BARRIER
:
378 if (!ioreq
->req
.nr_segments
) {
382 bdrv_acct_start(blkdev
->bs
, &ioreq
->acct
, ioreq
->v
.size
, BDRV_ACCT_WRITE
);
383 ioreq
->aio_inflight
++;
384 bdrv_aio_writev(blkdev
->bs
, ioreq
->start
/ BLOCK_SIZE
,
385 &ioreq
->v
, ioreq
->v
.size
/ BLOCK_SIZE
,
386 qemu_aio_complete
, ioreq
);
389 /* unknown operation (shouldn't happen -- parse catches this) */
393 qemu_aio_complete(ioreq
, 0);
401 ioreq
->status
= BLKIF_RSP_ERROR
;
405 static int blk_send_response_one(struct ioreq
*ioreq
)
407 struct XenBlkDev
*blkdev
= ioreq
->blkdev
;
409 int have_requests
= 0;
410 blkif_response_t resp
;
413 resp
.id
= ioreq
->req
.id
;
414 resp
.operation
= ioreq
->req
.operation
;
415 resp
.status
= ioreq
->status
;
417 /* Place on the response ring for the relevant domain. */
418 switch (blkdev
->protocol
) {
419 case BLKIF_PROTOCOL_NATIVE
:
420 dst
= RING_GET_RESPONSE(&blkdev
->rings
.native
, blkdev
->rings
.native
.rsp_prod_pvt
);
422 case BLKIF_PROTOCOL_X86_32
:
423 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_32_part
,
424 blkdev
->rings
.x86_32_part
.rsp_prod_pvt
);
426 case BLKIF_PROTOCOL_X86_64
:
427 dst
= RING_GET_RESPONSE(&blkdev
->rings
.x86_64_part
,
428 blkdev
->rings
.x86_64_part
.rsp_prod_pvt
);
433 memcpy(dst
, &resp
, sizeof(resp
));
434 blkdev
->rings
.common
.rsp_prod_pvt
++;
436 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev
->rings
.common
, send_notify
);
437 if (blkdev
->rings
.common
.rsp_prod_pvt
== blkdev
->rings
.common
.req_cons
) {
439 * Tail check for pending requests. Allows frontend to avoid
440 * notifications if requests are already in flight (lower
441 * overheads and promotes batching).
443 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev
->rings
.common
, have_requests
);
444 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev
->rings
.common
)) {
454 /* walk finished list, send outstanding responses, free requests */
455 static void blk_send_response_all(struct XenBlkDev
*blkdev
)
460 while (!QLIST_EMPTY(&blkdev
->finished
)) {
461 ioreq
= QLIST_FIRST(&blkdev
->finished
);
462 send_notify
+= blk_send_response_one(ioreq
);
463 ioreq_release(ioreq
, true);
466 xen_be_send_notify(&blkdev
->xendev
);
470 static int blk_get_request(struct XenBlkDev
*blkdev
, struct ioreq
*ioreq
, RING_IDX rc
)
472 switch (blkdev
->protocol
) {
473 case BLKIF_PROTOCOL_NATIVE
:
474 memcpy(&ioreq
->req
, RING_GET_REQUEST(&blkdev
->rings
.native
, rc
),
477 case BLKIF_PROTOCOL_X86_32
:
478 blkif_get_x86_32_req(&ioreq
->req
,
479 RING_GET_REQUEST(&blkdev
->rings
.x86_32_part
, rc
));
481 case BLKIF_PROTOCOL_X86_64
:
482 blkif_get_x86_64_req(&ioreq
->req
,
483 RING_GET_REQUEST(&blkdev
->rings
.x86_64_part
, rc
));
489 static void blk_handle_requests(struct XenBlkDev
*blkdev
)
494 blkdev
->more_work
= 0;
496 rc
= blkdev
->rings
.common
.req_cons
;
497 rp
= blkdev
->rings
.common
.sring
->req_prod
;
498 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
500 blk_send_response_all(blkdev
);
502 /* pull request from ring */
503 if (RING_REQUEST_CONS_OVERFLOW(&blkdev
->rings
.common
, rc
)) {
506 ioreq
= ioreq_start(blkdev
);
511 blk_get_request(blkdev
, ioreq
, rc
);
512 blkdev
->rings
.common
.req_cons
= ++rc
;
515 if (ioreq_parse(ioreq
) != 0) {
516 if (blk_send_response_one(ioreq
)) {
517 xen_be_send_notify(&blkdev
->xendev
);
519 ioreq_release(ioreq
, false);
523 ioreq_runio_qemu_aio(ioreq
);
526 if (blkdev
->more_work
&& blkdev
->requests_inflight
< max_requests
) {
527 qemu_bh_schedule(blkdev
->bh
);
531 /* ------------------------------------------------------------- */
533 static void blk_bh(void *opaque
)
535 struct XenBlkDev
*blkdev
= opaque
;
536 blk_handle_requests(blkdev
);
540 * We need to account for the grant allocations requiring contiguous
541 * chunks; the worst case number would be
542 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
543 * but in order to keep things simple just use
544 * 2 * max_req * max_seg.
546 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
548 static void blk_alloc(struct XenDevice
*xendev
)
550 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
552 QLIST_INIT(&blkdev
->inflight
);
553 QLIST_INIT(&blkdev
->finished
);
554 QLIST_INIT(&blkdev
->freelist
);
555 blkdev
->bh
= qemu_bh_new(blk_bh
, blkdev
);
556 if (xen_mode
!= XEN_EMULATE
) {
559 if (xc_gnttab_set_max_grants(xendev
->gnttabdev
,
560 MAX_GRANTS(max_requests
, BLKIF_MAX_SEGMENTS_PER_REQUEST
)) < 0) {
561 xen_be_printf(xendev
, 0, "xc_gnttab_set_max_grants failed: %s\n",
566 static int blk_init(struct XenDevice
*xendev
)
568 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
569 int index
, qflags
, info
= 0;
571 /* read xenstore entries */
572 if (blkdev
->params
== NULL
) {
574 blkdev
->params
= xenstore_read_be_str(&blkdev
->xendev
, "params");
575 if (blkdev
->params
!= NULL
) {
576 h
= strchr(blkdev
->params
, ':');
579 blkdev
->fileproto
= blkdev
->params
;
580 blkdev
->filename
= h
+1;
583 blkdev
->fileproto
= "<unset>";
584 blkdev
->filename
= blkdev
->params
;
587 if (!strcmp("aio", blkdev
->fileproto
)) {
588 blkdev
->fileproto
= "raw";
590 if (blkdev
->mode
== NULL
) {
591 blkdev
->mode
= xenstore_read_be_str(&blkdev
->xendev
, "mode");
593 if (blkdev
->type
== NULL
) {
594 blkdev
->type
= xenstore_read_be_str(&blkdev
->xendev
, "type");
596 if (blkdev
->dev
== NULL
) {
597 blkdev
->dev
= xenstore_read_be_str(&blkdev
->xendev
, "dev");
599 if (blkdev
->devtype
== NULL
) {
600 blkdev
->devtype
= xenstore_read_be_str(&blkdev
->xendev
, "device-type");
603 /* do we have all we need? */
604 if (blkdev
->params
== NULL
||
605 blkdev
->mode
== NULL
||
606 blkdev
->type
== NULL
||
607 blkdev
->dev
== NULL
) {
612 qflags
= BDRV_O_NOCACHE
| BDRV_O_CACHE_WB
| BDRV_O_NATIVE_AIO
;
613 if (strcmp(blkdev
->mode
, "w") == 0) {
614 qflags
|= BDRV_O_RDWR
;
616 info
|= VDISK_READONLY
;
620 if (blkdev
->devtype
&& !strcmp(blkdev
->devtype
, "cdrom")) {
624 /* init qemu block driver */
625 index
= (blkdev
->xendev
.dev
- 202 * 256) / 16;
626 blkdev
->dinfo
= drive_get(IF_XEN
, 0, index
);
627 if (!blkdev
->dinfo
) {
628 /* setup via xenbus -> create new block driver instance */
629 xen_be_printf(&blkdev
->xendev
, 2, "create new bdrv (xenbus setup)\n");
630 blkdev
->bs
= bdrv_new(blkdev
->dev
);
632 if (bdrv_open(blkdev
->bs
, blkdev
->filename
, qflags
,
633 bdrv_find_whitelisted_format(blkdev
->fileproto
)) != 0) {
634 bdrv_delete(blkdev
->bs
);
642 /* setup via qemu cmdline -> already setup for us */
643 xen_be_printf(&blkdev
->xendev
, 2, "get configured bdrv (cmdline setup)\n");
644 blkdev
->bs
= blkdev
->dinfo
->bdrv
;
646 bdrv_attach_dev_nofail(blkdev
->bs
, blkdev
);
647 blkdev
->file_blk
= BLOCK_SIZE
;
648 blkdev
->file_size
= bdrv_getlength(blkdev
->bs
);
649 if (blkdev
->file_size
< 0) {
650 xen_be_printf(&blkdev
->xendev
, 1, "bdrv_getlength: %d (%s) | drv %s\n",
651 (int)blkdev
->file_size
, strerror(-blkdev
->file_size
),
652 bdrv_get_format_name(blkdev
->bs
) ?: "-");
653 blkdev
->file_size
= 0;
656 xen_be_printf(xendev
, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
657 " size %" PRId64
" (%" PRId64
" MB)\n",
658 blkdev
->type
, blkdev
->fileproto
, blkdev
->filename
,
659 blkdev
->file_size
, blkdev
->file_size
>> 20);
662 xenstore_write_be_int(&blkdev
->xendev
, "feature-barrier", 1);
663 xenstore_write_be_int(&blkdev
->xendev
, "info", info
);
664 xenstore_write_be_int(&blkdev
->xendev
, "sector-size", blkdev
->file_blk
);
665 xenstore_write_be_int(&blkdev
->xendev
, "sectors",
666 blkdev
->file_size
/ blkdev
->file_blk
);
670 g_free(blkdev
->params
);
671 blkdev
->params
= NULL
;
672 g_free(blkdev
->mode
);
674 g_free(blkdev
->type
);
678 g_free(blkdev
->devtype
);
679 blkdev
->devtype
= NULL
;
683 static int blk_connect(struct XenDevice
*xendev
)
685 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
687 if (xenstore_read_fe_int(&blkdev
->xendev
, "ring-ref", &blkdev
->ring_ref
) == -1) {
690 if (xenstore_read_fe_int(&blkdev
->xendev
, "event-channel",
691 &blkdev
->xendev
.remote_port
) == -1) {
695 blkdev
->protocol
= BLKIF_PROTOCOL_NATIVE
;
696 if (blkdev
->xendev
.protocol
) {
697 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_32
) == 0) {
698 blkdev
->protocol
= BLKIF_PROTOCOL_X86_32
;
700 if (strcmp(blkdev
->xendev
.protocol
, XEN_IO_PROTO_ABI_X86_64
) == 0) {
701 blkdev
->protocol
= BLKIF_PROTOCOL_X86_64
;
705 blkdev
->sring
= xc_gnttab_map_grant_ref(blkdev
->xendev
.gnttabdev
,
708 PROT_READ
| PROT_WRITE
);
709 if (!blkdev
->sring
) {
714 switch (blkdev
->protocol
) {
715 case BLKIF_PROTOCOL_NATIVE
:
717 blkif_sring_t
*sring_native
= blkdev
->sring
;
718 BACK_RING_INIT(&blkdev
->rings
.native
, sring_native
, XC_PAGE_SIZE
);
721 case BLKIF_PROTOCOL_X86_32
:
723 blkif_x86_32_sring_t
*sring_x86_32
= blkdev
->sring
;
725 BACK_RING_INIT(&blkdev
->rings
.x86_32_part
, sring_x86_32
, XC_PAGE_SIZE
);
728 case BLKIF_PROTOCOL_X86_64
:
730 blkif_x86_64_sring_t
*sring_x86_64
= blkdev
->sring
;
732 BACK_RING_INIT(&blkdev
->rings
.x86_64_part
, sring_x86_64
, XC_PAGE_SIZE
);
737 xen_be_bind_evtchn(&blkdev
->xendev
);
739 xen_be_printf(&blkdev
->xendev
, 1, "ok: proto %s, ring-ref %d, "
740 "remote port %d, local port %d\n",
741 blkdev
->xendev
.protocol
, blkdev
->ring_ref
,
742 blkdev
->xendev
.remote_port
, blkdev
->xendev
.local_port
);
746 static void blk_disconnect(struct XenDevice
*xendev
)
748 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
751 if (!blkdev
->dinfo
) {
752 /* close/delete only if we created it ourself */
753 bdrv_close(blkdev
->bs
);
754 bdrv_detach_dev(blkdev
->bs
, blkdev
);
755 bdrv_delete(blkdev
->bs
);
759 xen_be_unbind_evtchn(&blkdev
->xendev
);
762 xc_gnttab_munmap(blkdev
->xendev
.gnttabdev
, blkdev
->sring
, 1);
764 blkdev
->sring
= NULL
;
768 static int blk_free(struct XenDevice
*xendev
)
770 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
773 if (blkdev
->bs
|| blkdev
->sring
) {
774 blk_disconnect(xendev
);
777 while (!QLIST_EMPTY(&blkdev
->freelist
)) {
778 ioreq
= QLIST_FIRST(&blkdev
->freelist
);
779 QLIST_REMOVE(ioreq
, list
);
780 qemu_iovec_destroy(&ioreq
->v
);
784 g_free(blkdev
->params
);
785 g_free(blkdev
->mode
);
786 g_free(blkdev
->type
);
788 g_free(blkdev
->devtype
);
789 qemu_bh_delete(blkdev
->bh
);
793 static void blk_event(struct XenDevice
*xendev
)
795 struct XenBlkDev
*blkdev
= container_of(xendev
, struct XenBlkDev
, xendev
);
797 qemu_bh_schedule(blkdev
->bh
);
800 struct XenDevOps xen_blkdev_ops
= {
801 .size
= sizeof(struct XenBlkDev
),
802 .flags
= DEVOPS_FLAG_NEED_GNTDEV
,
805 .initialise
= blk_connect
,
806 .disconnect
= blk_disconnect
,