xen: Don't peek behind the BlockDriverState abstraction
[qemu/agraf.git] / hw / xen_disk.c
blobfb68ed9bbf15f3d8f75c6f57fc9591937a410aee
1 /*
2 * xen paravirt block device backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <signal.h>
28 #include <inttypes.h>
29 #include <time.h>
30 #include <fcntl.h>
31 #include <errno.h>
32 #include <sys/ioctl.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <sys/mman.h>
36 #include <sys/uio.h>
38 #include <xs.h>
39 #include <xenctrl.h>
40 #include <xen/io/xenbus.h>
42 #include "hw.h"
43 #include "qemu-char.h"
44 #include "xen_blkif.h"
45 #include "xen_backend.h"
46 #include "blockdev.h"
48 /* ------------------------------------------------------------- */
50 static int batch_maps = 0;
52 static int max_requests = 32;
54 /* ------------------------------------------------------------- */
56 #define BLOCK_SIZE 512
57 #define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
59 struct ioreq {
60 blkif_request_t req;
61 int16_t status;
63 /* parsed request */
64 off_t start;
65 QEMUIOVector v;
66 int presync;
67 int postsync;
68 uint8_t mapped;
70 /* grant mapping */
71 uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
72 uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73 int prot;
74 void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75 void *pages;
77 /* aio status */
78 int aio_inflight;
79 int aio_errors;
81 struct XenBlkDev *blkdev;
82 QLIST_ENTRY(ioreq) list;
83 BlockAcctCookie acct;
86 struct XenBlkDev {
87 struct XenDevice xendev; /* must be first */
88 char *params;
89 char *mode;
90 char *type;
91 char *dev;
92 char *devtype;
93 const char *fileproto;
94 const char *filename;
95 int ring_ref;
96 void *sring;
97 int64_t file_blk;
98 int64_t file_size;
99 int protocol;
100 blkif_back_rings_t rings;
101 int more_work;
102 int cnt_map;
104 /* request lists */
105 QLIST_HEAD(inflight_head, ioreq) inflight;
106 QLIST_HEAD(finished_head, ioreq) finished;
107 QLIST_HEAD(freelist_head, ioreq) freelist;
108 int requests_total;
109 int requests_inflight;
110 int requests_finished;
112 /* qemu block driver */
113 DriveInfo *dinfo;
114 BlockDriverState *bs;
115 QEMUBH *bh;
118 /* ------------------------------------------------------------- */
120 static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
122 struct ioreq *ioreq = NULL;
124 if (QLIST_EMPTY(&blkdev->freelist)) {
125 if (blkdev->requests_total >= max_requests) {
126 goto out;
128 /* allocate new struct */
129 ioreq = g_malloc0(sizeof(*ioreq));
130 ioreq->blkdev = blkdev;
131 blkdev->requests_total++;
132 qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
133 } else {
134 /* get one from freelist */
135 ioreq = QLIST_FIRST(&blkdev->freelist);
136 QLIST_REMOVE(ioreq, list);
137 qemu_iovec_reset(&ioreq->v);
139 QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
140 blkdev->requests_inflight++;
142 out:
143 return ioreq;
146 static void ioreq_finish(struct ioreq *ioreq)
148 struct XenBlkDev *blkdev = ioreq->blkdev;
150 QLIST_REMOVE(ioreq, list);
151 QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
152 blkdev->requests_inflight--;
153 blkdev->requests_finished++;
156 static void ioreq_release(struct ioreq *ioreq, bool finish)
158 struct XenBlkDev *blkdev = ioreq->blkdev;
160 QLIST_REMOVE(ioreq, list);
161 memset(ioreq, 0, sizeof(*ioreq));
162 ioreq->blkdev = blkdev;
163 QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
164 if (finish) {
165 blkdev->requests_finished--;
166 } else {
167 blkdev->requests_inflight--;
172 * translate request into iovec + start offset
173 * do sanity checks along the way
175 static int ioreq_parse(struct ioreq *ioreq)
177 struct XenBlkDev *blkdev = ioreq->blkdev;
178 uintptr_t mem;
179 size_t len;
180 int i;
182 xen_be_printf(&blkdev->xendev, 3,
183 "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
184 ioreq->req.operation, ioreq->req.nr_segments,
185 ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
186 switch (ioreq->req.operation) {
187 case BLKIF_OP_READ:
188 ioreq->prot = PROT_WRITE; /* to memory */
189 break;
190 case BLKIF_OP_WRITE_BARRIER:
191 if (!ioreq->req.nr_segments) {
192 ioreq->presync = 1;
193 return 0;
195 ioreq->presync = ioreq->postsync = 1;
196 /* fall through */
197 case BLKIF_OP_WRITE:
198 ioreq->prot = PROT_READ; /* from memory */
199 break;
200 default:
201 xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
202 ioreq->req.operation);
203 goto err;
206 if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
207 xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
208 goto err;
211 ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
212 for (i = 0; i < ioreq->req.nr_segments; i++) {
213 if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
214 xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
215 goto err;
217 if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
218 xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
219 goto err;
221 if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
222 xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
223 goto err;
226 ioreq->domids[i] = blkdev->xendev.dom;
227 ioreq->refs[i] = ioreq->req.seg[i].gref;
229 mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
230 len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
231 qemu_iovec_add(&ioreq->v, (void*)mem, len);
233 if (ioreq->start + ioreq->v.size > blkdev->file_size) {
234 xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
235 goto err;
237 return 0;
239 err:
240 ioreq->status = BLKIF_RSP_ERROR;
241 return -1;
244 static void ioreq_unmap(struct ioreq *ioreq)
246 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
247 int i;
249 if (ioreq->v.niov == 0 || ioreq->mapped == 0) {
250 return;
252 if (batch_maps) {
253 if (!ioreq->pages) {
254 return;
256 if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) {
257 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
258 strerror(errno));
260 ioreq->blkdev->cnt_map -= ioreq->v.niov;
261 ioreq->pages = NULL;
262 } else {
263 for (i = 0; i < ioreq->v.niov; i++) {
264 if (!ioreq->page[i]) {
265 continue;
267 if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
268 xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
269 strerror(errno));
271 ioreq->blkdev->cnt_map--;
272 ioreq->page[i] = NULL;
275 ioreq->mapped = 0;
278 static int ioreq_map(struct ioreq *ioreq)
280 XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
281 int i;
283 if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
284 return 0;
286 if (batch_maps) {
287 ioreq->pages = xc_gnttab_map_grant_refs
288 (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
289 if (ioreq->pages == NULL) {
290 xen_be_printf(&ioreq->blkdev->xendev, 0,
291 "can't map %d grant refs (%s, %d maps)\n",
292 ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map);
293 return -1;
295 for (i = 0; i < ioreq->v.niov; i++) {
296 ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
297 (uintptr_t)ioreq->v.iov[i].iov_base;
299 ioreq->blkdev->cnt_map += ioreq->v.niov;
300 } else {
301 for (i = 0; i < ioreq->v.niov; i++) {
302 ioreq->page[i] = xc_gnttab_map_grant_ref
303 (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
304 if (ioreq->page[i] == NULL) {
305 xen_be_printf(&ioreq->blkdev->xendev, 0,
306 "can't map grant ref %d (%s, %d maps)\n",
307 ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map);
308 ioreq_unmap(ioreq);
309 return -1;
311 ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base;
312 ioreq->blkdev->cnt_map++;
315 ioreq->mapped = 1;
316 return 0;
319 static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
321 static void qemu_aio_complete(void *opaque, int ret)
323 struct ioreq *ioreq = opaque;
325 if (ret != 0) {
326 xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
327 ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
328 ioreq->aio_errors++;
331 ioreq->aio_inflight--;
332 if (ioreq->presync) {
333 ioreq->presync = 0;
334 ioreq_runio_qemu_aio(ioreq);
335 return;
337 if (ioreq->aio_inflight > 0) {
338 return;
340 if (ioreq->postsync) {
341 ioreq->postsync = 0;
342 ioreq->aio_inflight++;
343 bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
344 return;
347 ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
348 ioreq_unmap(ioreq);
349 ioreq_finish(ioreq);
350 bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
351 qemu_bh_schedule(ioreq->blkdev->bh);
354 static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
356 struct XenBlkDev *blkdev = ioreq->blkdev;
358 if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
359 goto err_no_map;
362 ioreq->aio_inflight++;
363 if (ioreq->presync) {
364 bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
365 return 0;
368 switch (ioreq->req.operation) {
369 case BLKIF_OP_READ:
370 bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
371 ioreq->aio_inflight++;
372 bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
373 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
374 qemu_aio_complete, ioreq);
375 break;
376 case BLKIF_OP_WRITE:
377 case BLKIF_OP_WRITE_BARRIER:
378 if (!ioreq->req.nr_segments) {
379 break;
382 bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
383 ioreq->aio_inflight++;
384 bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
385 &ioreq->v, ioreq->v.size / BLOCK_SIZE,
386 qemu_aio_complete, ioreq);
387 break;
388 default:
389 /* unknown operation (shouldn't happen -- parse catches this) */
390 goto err;
393 qemu_aio_complete(ioreq, 0);
395 return 0;
397 err:
398 ioreq_unmap(ioreq);
399 err_no_map:
400 ioreq_finish(ioreq);
401 ioreq->status = BLKIF_RSP_ERROR;
402 return -1;
405 static int blk_send_response_one(struct ioreq *ioreq)
407 struct XenBlkDev *blkdev = ioreq->blkdev;
408 int send_notify = 0;
409 int have_requests = 0;
410 blkif_response_t resp;
411 void *dst;
413 resp.id = ioreq->req.id;
414 resp.operation = ioreq->req.operation;
415 resp.status = ioreq->status;
417 /* Place on the response ring for the relevant domain. */
418 switch (blkdev->protocol) {
419 case BLKIF_PROTOCOL_NATIVE:
420 dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
421 break;
422 case BLKIF_PROTOCOL_X86_32:
423 dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
424 blkdev->rings.x86_32_part.rsp_prod_pvt);
425 break;
426 case BLKIF_PROTOCOL_X86_64:
427 dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
428 blkdev->rings.x86_64_part.rsp_prod_pvt);
429 break;
430 default:
431 dst = NULL;
433 memcpy(dst, &resp, sizeof(resp));
434 blkdev->rings.common.rsp_prod_pvt++;
436 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
437 if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
439 * Tail check for pending requests. Allows frontend to avoid
440 * notifications if requests are already in flight (lower
441 * overheads and promotes batching).
443 RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
444 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
445 have_requests = 1;
448 if (have_requests) {
449 blkdev->more_work++;
451 return send_notify;
454 /* walk finished list, send outstanding responses, free requests */
455 static void blk_send_response_all(struct XenBlkDev *blkdev)
457 struct ioreq *ioreq;
458 int send_notify = 0;
460 while (!QLIST_EMPTY(&blkdev->finished)) {
461 ioreq = QLIST_FIRST(&blkdev->finished);
462 send_notify += blk_send_response_one(ioreq);
463 ioreq_release(ioreq, true);
465 if (send_notify) {
466 xen_be_send_notify(&blkdev->xendev);
470 static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
472 switch (blkdev->protocol) {
473 case BLKIF_PROTOCOL_NATIVE:
474 memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
475 sizeof(ioreq->req));
476 break;
477 case BLKIF_PROTOCOL_X86_32:
478 blkif_get_x86_32_req(&ioreq->req,
479 RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
480 break;
481 case BLKIF_PROTOCOL_X86_64:
482 blkif_get_x86_64_req(&ioreq->req,
483 RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
484 break;
486 return 0;
489 static void blk_handle_requests(struct XenBlkDev *blkdev)
491 RING_IDX rc, rp;
492 struct ioreq *ioreq;
494 blkdev->more_work = 0;
496 rc = blkdev->rings.common.req_cons;
497 rp = blkdev->rings.common.sring->req_prod;
498 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
500 blk_send_response_all(blkdev);
501 while (rc != rp) {
502 /* pull request from ring */
503 if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
504 break;
506 ioreq = ioreq_start(blkdev);
507 if (ioreq == NULL) {
508 blkdev->more_work++;
509 break;
511 blk_get_request(blkdev, ioreq, rc);
512 blkdev->rings.common.req_cons = ++rc;
514 /* parse them */
515 if (ioreq_parse(ioreq) != 0) {
516 if (blk_send_response_one(ioreq)) {
517 xen_be_send_notify(&blkdev->xendev);
519 ioreq_release(ioreq, false);
520 continue;
523 ioreq_runio_qemu_aio(ioreq);
526 if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
527 qemu_bh_schedule(blkdev->bh);
531 /* ------------------------------------------------------------- */
533 static void blk_bh(void *opaque)
535 struct XenBlkDev *blkdev = opaque;
536 blk_handle_requests(blkdev);
540 * We need to account for the grant allocations requiring contiguous
541 * chunks; the worst case number would be
542 * max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
543 * but in order to keep things simple just use
544 * 2 * max_req * max_seg.
546 #define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
548 static void blk_alloc(struct XenDevice *xendev)
550 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
552 QLIST_INIT(&blkdev->inflight);
553 QLIST_INIT(&blkdev->finished);
554 QLIST_INIT(&blkdev->freelist);
555 blkdev->bh = qemu_bh_new(blk_bh, blkdev);
556 if (xen_mode != XEN_EMULATE) {
557 batch_maps = 1;
559 if (xc_gnttab_set_max_grants(xendev->gnttabdev,
560 MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
561 xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
562 strerror(errno));
566 static int blk_init(struct XenDevice *xendev)
568 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
569 int index, qflags, info = 0;
571 /* read xenstore entries */
572 if (blkdev->params == NULL) {
573 char *h = NULL;
574 blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
575 if (blkdev->params != NULL) {
576 h = strchr(blkdev->params, ':');
578 if (h != NULL) {
579 blkdev->fileproto = blkdev->params;
580 blkdev->filename = h+1;
581 *h = 0;
582 } else {
583 blkdev->fileproto = "<unset>";
584 blkdev->filename = blkdev->params;
587 if (!strcmp("aio", blkdev->fileproto)) {
588 blkdev->fileproto = "raw";
590 if (blkdev->mode == NULL) {
591 blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
593 if (blkdev->type == NULL) {
594 blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
596 if (blkdev->dev == NULL) {
597 blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
599 if (blkdev->devtype == NULL) {
600 blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
603 /* do we have all we need? */
604 if (blkdev->params == NULL ||
605 blkdev->mode == NULL ||
606 blkdev->type == NULL ||
607 blkdev->dev == NULL) {
608 goto out_error;
611 /* read-only ? */
612 qflags = BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NATIVE_AIO;
613 if (strcmp(blkdev->mode, "w") == 0) {
614 qflags |= BDRV_O_RDWR;
615 } else {
616 info |= VDISK_READONLY;
619 /* cdrom ? */
620 if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
621 info |= VDISK_CDROM;
624 /* init qemu block driver */
625 index = (blkdev->xendev.dev - 202 * 256) / 16;
626 blkdev->dinfo = drive_get(IF_XEN, 0, index);
627 if (!blkdev->dinfo) {
628 /* setup via xenbus -> create new block driver instance */
629 xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
630 blkdev->bs = bdrv_new(blkdev->dev);
631 if (blkdev->bs) {
632 if (bdrv_open(blkdev->bs, blkdev->filename, qflags,
633 bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) {
634 bdrv_delete(blkdev->bs);
635 blkdev->bs = NULL;
638 if (!blkdev->bs) {
639 goto out_error;
641 } else {
642 /* setup via qemu cmdline -> already setup for us */
643 xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
644 blkdev->bs = blkdev->dinfo->bdrv;
646 bdrv_attach_dev_nofail(blkdev->bs, blkdev);
647 blkdev->file_blk = BLOCK_SIZE;
648 blkdev->file_size = bdrv_getlength(blkdev->bs);
649 if (blkdev->file_size < 0) {
650 xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
651 (int)blkdev->file_size, strerror(-blkdev->file_size),
652 bdrv_get_format_name(blkdev->bs) ?: "-");
653 blkdev->file_size = 0;
656 xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
657 " size %" PRId64 " (%" PRId64 " MB)\n",
658 blkdev->type, blkdev->fileproto, blkdev->filename,
659 blkdev->file_size, blkdev->file_size >> 20);
661 /* fill info */
662 xenstore_write_be_int(&blkdev->xendev, "feature-barrier", 1);
663 xenstore_write_be_int(&blkdev->xendev, "info", info);
664 xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
665 xenstore_write_be_int(&blkdev->xendev, "sectors",
666 blkdev->file_size / blkdev->file_blk);
667 return 0;
669 out_error:
670 g_free(blkdev->params);
671 blkdev->params = NULL;
672 g_free(blkdev->mode);
673 blkdev->mode = NULL;
674 g_free(blkdev->type);
675 blkdev->type = NULL;
676 g_free(blkdev->dev);
677 blkdev->dev = NULL;
678 g_free(blkdev->devtype);
679 blkdev->devtype = NULL;
680 return -1;
683 static int blk_connect(struct XenDevice *xendev)
685 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
687 if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
688 return -1;
690 if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
691 &blkdev->xendev.remote_port) == -1) {
692 return -1;
695 blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
696 if (blkdev->xendev.protocol) {
697 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
698 blkdev->protocol = BLKIF_PROTOCOL_X86_32;
700 if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
701 blkdev->protocol = BLKIF_PROTOCOL_X86_64;
705 blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
706 blkdev->xendev.dom,
707 blkdev->ring_ref,
708 PROT_READ | PROT_WRITE);
709 if (!blkdev->sring) {
710 return -1;
712 blkdev->cnt_map++;
714 switch (blkdev->protocol) {
715 case BLKIF_PROTOCOL_NATIVE:
717 blkif_sring_t *sring_native = blkdev->sring;
718 BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
719 break;
721 case BLKIF_PROTOCOL_X86_32:
723 blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
725 BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
726 break;
728 case BLKIF_PROTOCOL_X86_64:
730 blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
732 BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
733 break;
737 xen_be_bind_evtchn(&blkdev->xendev);
739 xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
740 "remote port %d, local port %d\n",
741 blkdev->xendev.protocol, blkdev->ring_ref,
742 blkdev->xendev.remote_port, blkdev->xendev.local_port);
743 return 0;
746 static void blk_disconnect(struct XenDevice *xendev)
748 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
750 if (blkdev->bs) {
751 if (!blkdev->dinfo) {
752 /* close/delete only if we created it ourself */
753 bdrv_close(blkdev->bs);
754 bdrv_detach_dev(blkdev->bs, blkdev);
755 bdrv_delete(blkdev->bs);
757 blkdev->bs = NULL;
759 xen_be_unbind_evtchn(&blkdev->xendev);
761 if (blkdev->sring) {
762 xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
763 blkdev->cnt_map--;
764 blkdev->sring = NULL;
768 static int blk_free(struct XenDevice *xendev)
770 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
771 struct ioreq *ioreq;
773 if (blkdev->bs || blkdev->sring) {
774 blk_disconnect(xendev);
777 while (!QLIST_EMPTY(&blkdev->freelist)) {
778 ioreq = QLIST_FIRST(&blkdev->freelist);
779 QLIST_REMOVE(ioreq, list);
780 qemu_iovec_destroy(&ioreq->v);
781 g_free(ioreq);
784 g_free(blkdev->params);
785 g_free(blkdev->mode);
786 g_free(blkdev->type);
787 g_free(blkdev->dev);
788 g_free(blkdev->devtype);
789 qemu_bh_delete(blkdev->bh);
790 return 0;
793 static void blk_event(struct XenDevice *xendev)
795 struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
797 qemu_bh_schedule(blkdev->bh);
800 struct XenDevOps xen_blkdev_ops = {
801 .size = sizeof(struct XenBlkDev),
802 .flags = DEVOPS_FLAG_NEED_GNTDEV,
803 .alloc = blk_alloc,
804 .init = blk_init,
805 .initialise = blk_connect,
806 .disconnect = blk_disconnect,
807 .event = blk_event,
808 .free = blk_free,