1 /* Xenbus code for blkif backend
2 Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
3 Copyright (C) 2005 XenSource Ltd
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <xen/events.h>
21 #include <xen/grant_table.h>
25 struct xenbus_device
*dev
;
26 struct xen_blkif
*blkif
;
27 struct xenbus_watch backend_watch
;
33 static struct kmem_cache
*xen_blkif_cachep
;
34 static void connect(struct backend_info
*);
35 static int connect_ring(struct backend_info
*);
36 static void backend_changed(struct xenbus_watch
*, const char **,
39 struct xenbus_device
*xen_blkbk_xenbus(struct backend_info
*be
)
44 static int blkback_name(struct xen_blkif
*blkif
, char *buf
)
46 char *devpath
, *devname
;
47 struct xenbus_device
*dev
= blkif
->be
->dev
;
49 devpath
= xenbus_read(XBT_NIL
, dev
->nodename
, "dev", NULL
);
51 return PTR_ERR(devpath
);
53 devname
= strstr(devpath
, "/dev/");
55 devname
+= strlen("/dev/");
59 snprintf(buf
, TASK_COMM_LEN
, "blkback.%d.%s", blkif
->domid
, devname
);
65 static void xen_update_blkif_status(struct xen_blkif
*blkif
)
68 char name
[TASK_COMM_LEN
];
70 /* Not ready to connect? */
71 if (!blkif
->irq
|| !blkif
->vbd
.bdev
)
74 /* Already connected? */
75 if (blkif
->be
->dev
->state
== XenbusStateConnected
)
78 /* Attempt to connect: exit if we fail to. */
80 if (blkif
->be
->dev
->state
!= XenbusStateConnected
)
83 err
= blkback_name(blkif
, name
);
85 xenbus_dev_error(blkif
->be
->dev
, err
, "get blkback dev name");
89 err
= filemap_write_and_wait(blkif
->vbd
.bdev
->bd_inode
->i_mapping
);
91 xenbus_dev_error(blkif
->be
->dev
, err
, "block flush");
94 invalidate_inode_pages2(blkif
->vbd
.bdev
->bd_inode
->i_mapping
);
96 blkif
->xenblkd
= kthread_run(xen_blkif_schedule
, blkif
, name
);
97 if (IS_ERR(blkif
->xenblkd
)) {
98 err
= PTR_ERR(blkif
->xenblkd
);
99 blkif
->xenblkd
= NULL
;
100 xenbus_dev_error(blkif
->be
->dev
, err
, "start xenblkd");
104 static struct xen_blkif
*xen_blkif_alloc(domid_t domid
)
106 struct xen_blkif
*blkif
;
108 blkif
= kmem_cache_alloc(xen_blkif_cachep
, GFP_KERNEL
);
110 return ERR_PTR(-ENOMEM
);
112 memset(blkif
, 0, sizeof(*blkif
));
113 blkif
->domid
= domid
;
114 spin_lock_init(&blkif
->blk_ring_lock
);
115 atomic_set(&blkif
->refcnt
, 1);
116 init_waitqueue_head(&blkif
->wq
);
117 init_completion(&blkif
->drain_complete
);
118 atomic_set(&blkif
->drain
, 0);
119 blkif
->st_print
= jiffies
;
120 init_waitqueue_head(&blkif
->waiting_to_free
);
125 static int xen_blkif_map(struct xen_blkif
*blkif
, unsigned long shared_page
,
130 /* Already connected through? */
134 err
= xenbus_map_ring_valloc(blkif
->be
->dev
, shared_page
, &blkif
->blk_ring
);
138 switch (blkif
->blk_protocol
) {
139 case BLKIF_PROTOCOL_NATIVE
:
141 struct blkif_sring
*sring
;
142 sring
= (struct blkif_sring
*)blkif
->blk_ring
;
143 BACK_RING_INIT(&blkif
->blk_rings
.native
, sring
, PAGE_SIZE
);
146 case BLKIF_PROTOCOL_X86_32
:
148 struct blkif_x86_32_sring
*sring_x86_32
;
149 sring_x86_32
= (struct blkif_x86_32_sring
*)blkif
->blk_ring
;
150 BACK_RING_INIT(&blkif
->blk_rings
.x86_32
, sring_x86_32
, PAGE_SIZE
);
153 case BLKIF_PROTOCOL_X86_64
:
155 struct blkif_x86_64_sring
*sring_x86_64
;
156 sring_x86_64
= (struct blkif_x86_64_sring
*)blkif
->blk_ring
;
157 BACK_RING_INIT(&blkif
->blk_rings
.x86_64
, sring_x86_64
, PAGE_SIZE
);
164 err
= bind_interdomain_evtchn_to_irqhandler(blkif
->domid
, evtchn
,
166 "blkif-backend", blkif
);
168 xenbus_unmap_ring_vfree(blkif
->be
->dev
, blkif
->blk_ring
);
169 blkif
->blk_rings
.common
.sring
= NULL
;
177 static void xen_blkif_disconnect(struct xen_blkif
*blkif
)
179 if (blkif
->xenblkd
) {
180 kthread_stop(blkif
->xenblkd
);
181 blkif
->xenblkd
= NULL
;
184 atomic_dec(&blkif
->refcnt
);
185 wait_event(blkif
->waiting_to_free
, atomic_read(&blkif
->refcnt
) == 0);
186 atomic_inc(&blkif
->refcnt
);
189 unbind_from_irqhandler(blkif
->irq
, blkif
);
193 if (blkif
->blk_rings
.common
.sring
) {
194 xenbus_unmap_ring_vfree(blkif
->be
->dev
, blkif
->blk_ring
);
195 blkif
->blk_rings
.common
.sring
= NULL
;
199 void xen_blkif_free(struct xen_blkif
*blkif
)
201 if (!atomic_dec_and_test(&blkif
->refcnt
))
203 kmem_cache_free(xen_blkif_cachep
, blkif
);
206 int __init
xen_blkif_interface_init(void)
208 xen_blkif_cachep
= kmem_cache_create("blkif_cache",
209 sizeof(struct xen_blkif
),
211 if (!xen_blkif_cachep
)
218 * sysfs interface for VBD I/O requests
221 #define VBD_SHOW(name, format, args...) \
222 static ssize_t show_##name(struct device *_dev, \
223 struct device_attribute *attr, \
226 struct xenbus_device *dev = to_xenbus_device(_dev); \
227 struct backend_info *be = dev_get_drvdata(&dev->dev); \
229 return sprintf(buf, format, ##args); \
231 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
233 VBD_SHOW(oo_req
, "%d\n", be
->blkif
->st_oo_req
);
234 VBD_SHOW(rd_req
, "%d\n", be
->blkif
->st_rd_req
);
235 VBD_SHOW(wr_req
, "%d\n", be
->blkif
->st_wr_req
);
236 VBD_SHOW(f_req
, "%d\n", be
->blkif
->st_f_req
);
237 VBD_SHOW(ds_req
, "%d\n", be
->blkif
->st_ds_req
);
238 VBD_SHOW(rd_sect
, "%d\n", be
->blkif
->st_rd_sect
);
239 VBD_SHOW(wr_sect
, "%d\n", be
->blkif
->st_wr_sect
);
241 static struct attribute
*xen_vbdstat_attrs
[] = {
242 &dev_attr_oo_req
.attr
,
243 &dev_attr_rd_req
.attr
,
244 &dev_attr_wr_req
.attr
,
245 &dev_attr_f_req
.attr
,
246 &dev_attr_ds_req
.attr
,
247 &dev_attr_rd_sect
.attr
,
248 &dev_attr_wr_sect
.attr
,
252 static struct attribute_group xen_vbdstat_group
= {
253 .name
= "statistics",
254 .attrs
= xen_vbdstat_attrs
,
257 VBD_SHOW(physical_device
, "%x:%x\n", be
->major
, be
->minor
);
258 VBD_SHOW(mode
, "%s\n", be
->mode
);
260 int xenvbd_sysfs_addif(struct xenbus_device
*dev
)
264 error
= device_create_file(&dev
->dev
, &dev_attr_physical_device
);
268 error
= device_create_file(&dev
->dev
, &dev_attr_mode
);
272 error
= sysfs_create_group(&dev
->dev
.kobj
, &xen_vbdstat_group
);
278 fail3
: sysfs_remove_group(&dev
->dev
.kobj
, &xen_vbdstat_group
);
279 fail2
: device_remove_file(&dev
->dev
, &dev_attr_mode
);
280 fail1
: device_remove_file(&dev
->dev
, &dev_attr_physical_device
);
284 void xenvbd_sysfs_delif(struct xenbus_device
*dev
)
286 sysfs_remove_group(&dev
->dev
.kobj
, &xen_vbdstat_group
);
287 device_remove_file(&dev
->dev
, &dev_attr_mode
);
288 device_remove_file(&dev
->dev
, &dev_attr_physical_device
);
292 static void xen_vbd_free(struct xen_vbd
*vbd
)
295 blkdev_put(vbd
->bdev
, vbd
->readonly
? FMODE_READ
: FMODE_WRITE
);
299 static int xen_vbd_create(struct xen_blkif
*blkif
, blkif_vdev_t handle
,
300 unsigned major
, unsigned minor
, int readonly
,
304 struct block_device
*bdev
;
305 struct request_queue
*q
;
308 vbd
->handle
= handle
;
309 vbd
->readonly
= readonly
;
312 vbd
->pdevice
= MKDEV(major
, minor
);
314 bdev
= blkdev_get_by_dev(vbd
->pdevice
, vbd
->readonly
?
315 FMODE_READ
: FMODE_WRITE
, NULL
);
318 DPRINTK("xen_vbd_create: device %08x could not be opened.\n",
324 if (vbd
->bdev
->bd_disk
== NULL
) {
325 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
330 vbd
->size
= vbd_sz(vbd
);
332 if (vbd
->bdev
->bd_disk
->flags
& GENHD_FL_CD
|| cdrom
)
333 vbd
->type
|= VDISK_CDROM
;
334 if (vbd
->bdev
->bd_disk
->flags
& GENHD_FL_REMOVABLE
)
335 vbd
->type
|= VDISK_REMOVABLE
;
337 q
= bdev_get_queue(bdev
);
338 if (q
&& q
->flush_flags
)
339 vbd
->flush_support
= true;
341 if (q
&& blk_queue_secdiscard(q
))
342 vbd
->discard_secure
= true;
344 DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
345 handle
, blkif
->domid
);
348 static int xen_blkbk_remove(struct xenbus_device
*dev
)
350 struct backend_info
*be
= dev_get_drvdata(&dev
->dev
);
354 if (be
->major
|| be
->minor
)
355 xenvbd_sysfs_delif(dev
);
357 if (be
->backend_watch
.node
) {
358 unregister_xenbus_watch(&be
->backend_watch
);
359 kfree(be
->backend_watch
.node
);
360 be
->backend_watch
.node
= NULL
;
364 xen_blkif_disconnect(be
->blkif
);
365 xen_vbd_free(&be
->blkif
->vbd
);
366 xen_blkif_free(be
->blkif
);
371 dev_set_drvdata(&dev
->dev
, NULL
);
375 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt
,
376 struct backend_info
*be
, int state
)
378 struct xenbus_device
*dev
= be
->dev
;
381 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-flush-cache",
384 xenbus_dev_fatal(dev
, err
, "writing feature-flush-cache");
389 int xen_blkbk_discard(struct xenbus_transaction xbt
, struct backend_info
*be
)
391 struct xenbus_device
*dev
= be
->dev
;
392 struct xen_blkif
*blkif
= be
->blkif
;
397 type
= xenbus_read(XBT_NIL
, dev
->nodename
, "type", NULL
);
399 if (strncmp(type
, "file", 4) == 0) {
401 blkif
->blk_backend_type
= BLKIF_BACKEND_FILE
;
403 if (strncmp(type
, "phy", 3) == 0) {
404 struct block_device
*bdev
= be
->blkif
->vbd
.bdev
;
405 struct request_queue
*q
= bdev_get_queue(bdev
);
406 if (blk_queue_discard(q
)) {
407 err
= xenbus_printf(xbt
, dev
->nodename
,
408 "discard-granularity", "%u",
409 q
->limits
.discard_granularity
);
411 xenbus_dev_fatal(dev
, err
,
412 "writing discard-granularity");
415 err
= xenbus_printf(xbt
, dev
->nodename
,
416 "discard-alignment", "%u",
417 q
->limits
.discard_alignment
);
419 xenbus_dev_fatal(dev
, err
,
420 "writing discard-alignment");
424 blkif
->blk_backend_type
= BLKIF_BACKEND_PHY
;
427 err
= xenbus_printf(xbt
, dev
->nodename
,
428 "discard-secure", "%d",
429 blkif
->vbd
.discard_secure
);
431 xenbus_dev_fatal(dev
, err
,
432 "writting discard-secure");
438 xenbus_dev_fatal(dev
, err
, "reading type");
442 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-discard",
445 xenbus_dev_fatal(dev
, err
, "writing feature-discard");
451 int xen_blkbk_barrier(struct xenbus_transaction xbt
,
452 struct backend_info
*be
, int state
)
454 struct xenbus_device
*dev
= be
->dev
;
457 err
= xenbus_printf(xbt
, dev
->nodename
, "feature-barrier",
460 xenbus_dev_fatal(dev
, err
, "writing feature-barrier");
466 * Entry point to this code when a new device is created. Allocate the basic
467 * structures, and watch the store waiting for the hotplug scripts to tell us
468 * the device's physical major and minor numbers. Switch to InitWait.
470 static int xen_blkbk_probe(struct xenbus_device
*dev
,
471 const struct xenbus_device_id
*id
)
474 struct backend_info
*be
= kzalloc(sizeof(struct backend_info
),
477 xenbus_dev_fatal(dev
, -ENOMEM
,
478 "allocating backend structure");
482 dev_set_drvdata(&dev
->dev
, be
);
484 be
->blkif
= xen_blkif_alloc(dev
->otherend_id
);
485 if (IS_ERR(be
->blkif
)) {
486 err
= PTR_ERR(be
->blkif
);
488 xenbus_dev_fatal(dev
, err
, "creating block interface");
492 /* setup back pointer */
495 err
= xenbus_watch_pathfmt(dev
, &be
->backend_watch
, backend_changed
,
496 "%s/%s", dev
->nodename
, "physical-device");
500 err
= xenbus_switch_state(dev
, XenbusStateInitWait
);
508 xen_blkbk_remove(dev
);
514 * Callback received when the hotplug scripts have placed the physical-device
515 * node. Read it and the mode node, and create a vbd. If the frontend is
518 static void backend_changed(struct xenbus_watch
*watch
,
519 const char **vec
, unsigned int len
)
524 struct backend_info
*be
525 = container_of(watch
, struct backend_info
, backend_watch
);
526 struct xenbus_device
*dev
= be
->dev
;
532 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
, "physical-device", "%x:%x",
534 if (XENBUS_EXIST_ERR(err
)) {
536 * Since this watch will fire once immediately after it is
537 * registered, we expect this. Ignore it, and wait for the
543 xenbus_dev_fatal(dev
, err
, "reading physical-device");
547 if ((be
->major
|| be
->minor
) &&
548 ((be
->major
!= major
) || (be
->minor
!= minor
))) {
549 pr_warn(DRV_PFX
"changing physical device (from %x:%x to %x:%x) not supported.\n",
550 be
->major
, be
->minor
, major
, minor
);
554 be
->mode
= xenbus_read(XBT_NIL
, dev
->nodename
, "mode", NULL
);
555 if (IS_ERR(be
->mode
)) {
556 err
= PTR_ERR(be
->mode
);
558 xenbus_dev_fatal(dev
, err
, "reading mode");
562 device_type
= xenbus_read(XBT_NIL
, dev
->otherend
, "device-type", NULL
);
563 if (!IS_ERR(device_type
)) {
564 cdrom
= strcmp(device_type
, "cdrom") == 0;
568 if (be
->major
== 0 && be
->minor
== 0) {
569 /* Front end dir is a number, which is used as the handle. */
571 char *p
= strrchr(dev
->otherend
, '/') + 1;
573 err
= strict_strtoul(p
, 0, &handle
);
580 err
= xen_vbd_create(be
->blkif
, handle
, major
, minor
,
581 (NULL
== strchr(be
->mode
, 'w')), cdrom
);
585 xenbus_dev_fatal(dev
, err
, "creating vbd structure");
589 err
= xenvbd_sysfs_addif(dev
);
591 xen_vbd_free(&be
->blkif
->vbd
);
594 xenbus_dev_fatal(dev
, err
, "creating sysfs entries");
598 /* We're potentially connected now */
599 xen_update_blkif_status(be
->blkif
);
605 * Callback received when the frontend's state changes.
607 static void frontend_changed(struct xenbus_device
*dev
,
608 enum xenbus_state frontend_state
)
610 struct backend_info
*be
= dev_get_drvdata(&dev
->dev
);
613 DPRINTK("%s", xenbus_strstate(frontend_state
));
615 switch (frontend_state
) {
616 case XenbusStateInitialising
:
617 if (dev
->state
== XenbusStateClosed
) {
618 pr_info(DRV_PFX
"%s: prepare for reconnect\n",
620 xenbus_switch_state(dev
, XenbusStateInitWait
);
624 case XenbusStateInitialised
:
625 case XenbusStateConnected
:
627 * Ensure we connect even when two watches fire in
628 * close succession and we miss the intermediate value
631 if (dev
->state
== XenbusStateConnected
)
635 * Enforce precondition before potential leak point.
636 * xen_blkif_disconnect() is idempotent.
638 xen_blkif_disconnect(be
->blkif
);
640 err
= connect_ring(be
);
643 xen_update_blkif_status(be
->blkif
);
646 case XenbusStateClosing
:
647 xenbus_switch_state(dev
, XenbusStateClosing
);
650 case XenbusStateClosed
:
651 xen_blkif_disconnect(be
->blkif
);
652 xenbus_switch_state(dev
, XenbusStateClosed
);
653 if (xenbus_dev_is_online(dev
))
655 /* fall through if not online */
656 case XenbusStateUnknown
:
657 /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
658 device_unregister(&dev
->dev
);
662 xenbus_dev_fatal(dev
, -EINVAL
, "saw state %d at frontend",
669 /* ** Connection ** */
673 * Write the physical details regarding the block device to the store, and
674 * switch to Connected state.
676 static void connect(struct backend_info
*be
)
678 struct xenbus_transaction xbt
;
680 struct xenbus_device
*dev
= be
->dev
;
682 DPRINTK("%s", dev
->otherend
);
684 /* Supply the information about the device the frontend needs */
686 err
= xenbus_transaction_start(&xbt
);
688 xenbus_dev_fatal(dev
, err
, "starting transaction");
692 err
= xen_blkbk_flush_diskcache(xbt
, be
, be
->blkif
->vbd
.flush_support
);
696 err
= xen_blkbk_discard(xbt
, be
);
698 /* If we can't advertise it is OK. */
699 err
= xen_blkbk_barrier(xbt
, be
, be
->blkif
->vbd
.flush_support
);
701 err
= xenbus_printf(xbt
, dev
->nodename
, "sectors", "%llu",
702 (unsigned long long)vbd_sz(&be
->blkif
->vbd
));
704 xenbus_dev_fatal(dev
, err
, "writing %s/sectors",
709 /* FIXME: use a typename instead */
710 err
= xenbus_printf(xbt
, dev
->nodename
, "info", "%u",
711 be
->blkif
->vbd
.type
|
712 (be
->blkif
->vbd
.readonly
? VDISK_READONLY
: 0));
714 xenbus_dev_fatal(dev
, err
, "writing %s/info",
718 err
= xenbus_printf(xbt
, dev
->nodename
, "sector-size", "%lu",
720 bdev_logical_block_size(be
->blkif
->vbd
.bdev
));
722 xenbus_dev_fatal(dev
, err
, "writing %s/sector-size",
727 err
= xenbus_transaction_end(xbt
, 0);
731 xenbus_dev_fatal(dev
, err
, "ending transaction");
733 err
= xenbus_switch_state(dev
, XenbusStateConnected
);
735 xenbus_dev_fatal(dev
, err
, "%s: switching to Connected state",
740 xenbus_transaction_end(xbt
, 1);
744 static int connect_ring(struct backend_info
*be
)
746 struct xenbus_device
*dev
= be
->dev
;
747 unsigned long ring_ref
;
749 char protocol
[64] = "";
752 DPRINTK("%s", dev
->otherend
);
754 err
= xenbus_gather(XBT_NIL
, dev
->otherend
, "ring-ref", "%lu",
755 &ring_ref
, "event-channel", "%u", &evtchn
, NULL
);
757 xenbus_dev_fatal(dev
, err
,
758 "reading %s/ring-ref and event-channel",
763 be
->blkif
->blk_protocol
= BLKIF_PROTOCOL_NATIVE
;
764 err
= xenbus_gather(XBT_NIL
, dev
->otherend
, "protocol",
765 "%63s", protocol
, NULL
);
767 strcpy(protocol
, "unspecified, assuming native");
768 else if (0 == strcmp(protocol
, XEN_IO_PROTO_ABI_NATIVE
))
769 be
->blkif
->blk_protocol
= BLKIF_PROTOCOL_NATIVE
;
770 else if (0 == strcmp(protocol
, XEN_IO_PROTO_ABI_X86_32
))
771 be
->blkif
->blk_protocol
= BLKIF_PROTOCOL_X86_32
;
772 else if (0 == strcmp(protocol
, XEN_IO_PROTO_ABI_X86_64
))
773 be
->blkif
->blk_protocol
= BLKIF_PROTOCOL_X86_64
;
775 xenbus_dev_fatal(dev
, err
, "unknown fe protocol %s", protocol
);
778 pr_info(DRV_PFX
"ring-ref %ld, event-channel %d, protocol %d (%s)\n",
779 ring_ref
, evtchn
, be
->blkif
->blk_protocol
, protocol
);
781 /* Map the shared frame, irq etc. */
782 err
= xen_blkif_map(be
->blkif
, ring_ref
, evtchn
);
784 xenbus_dev_fatal(dev
, err
, "mapping ring-ref %lu port %u",
793 /* ** Driver Registration ** */
796 static const struct xenbus_device_id xen_blkbk_ids
[] = {
802 static DEFINE_XENBUS_DRIVER(xen_blkbk
, ,
803 .probe
= xen_blkbk_probe
,
804 .remove
= xen_blkbk_remove
,
805 .otherend_changed
= frontend_changed
809 int xen_blkif_xenbus_init(void)
811 return xenbus_register_backend(&xen_blkbk_driver
);