ia64/kvm: compilation fix. export account_system_vtime.
[pv_ops_mirror.git] / drivers / s390 / cio / device.c
blobe22813db74a2b6bb81d2d0e1e27ec64acde946e2
1 /*
2 * drivers/s390/cio/device.c
3 * bus driver for ccw devices
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6 * IBM Corporation
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 * Martin Schwidefsky (schwidefsky@de.ibm.com)
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/errno.h>
15 #include <linux/err.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
18 #include <linux/device.h>
19 #include <linux/workqueue.h>
20 #include <linux/timer.h>
22 #include <asm/ccwdev.h>
23 #include <asm/cio.h>
24 #include <asm/param.h> /* HZ */
25 #include <asm/cmb.h>
27 #include "cio.h"
28 #include "cio_debug.h"
29 #include "css.h"
30 #include "device.h"
31 #include "ioasm.h"
32 #include "io_sch.h"
34 static struct timer_list recovery_timer;
35 static DEFINE_SPINLOCK(recovery_lock);
36 static int recovery_phase;
37 static const unsigned long recovery_delay[] = { 3, 30, 300 };
39 /******************* bus type handling ***********************/
41 /* The Linux driver model distinguishes between a bus type and
42 * the bus itself. Of course we only have one channel
43 * subsystem driver and one channel system per machine, but
44 * we still use the abstraction. T.R. says it's a good idea. */
45 static int
46 ccw_bus_match (struct device * dev, struct device_driver * drv)
48 struct ccw_device *cdev = to_ccwdev(dev);
49 struct ccw_driver *cdrv = to_ccwdrv(drv);
50 const struct ccw_device_id *ids = cdrv->ids, *found;
52 if (!ids)
53 return 0;
55 found = ccw_device_id_match(ids, &cdev->id);
56 if (!found)
57 return 0;
59 cdev->id.driver_info = found->driver_info;
61 return 1;
64 /* Store modalias string delimited by prefix/suffix string into buffer with
65 * specified size. Return length of resulting string (excluding trailing '\0')
66 * even if string doesn't fit buffer (snprintf semantics). */
67 static int snprint_alias(char *buf, size_t size,
68 struct ccw_device_id *id, const char *suffix)
70 int len;
72 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
73 if (len > size)
74 return len;
75 buf += len;
76 size -= len;
78 if (id->dev_type != 0)
79 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
80 id->dev_model, suffix);
81 else
82 len += snprintf(buf, size, "dtdm%s", suffix);
84 return len;
87 /* Set up environment variables for ccw device uevent. Return 0 on success,
88 * non-zero otherwise. */
89 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
91 struct ccw_device *cdev = to_ccwdev(dev);
92 struct ccw_device_id *id = &(cdev->id);
93 int ret;
94 char modalias_buf[30];
96 /* CU_TYPE= */
97 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
98 if (ret)
99 return ret;
101 /* CU_MODEL= */
102 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
103 if (ret)
104 return ret;
106 /* The next two can be zero, that's ok for us */
107 /* DEV_TYPE= */
108 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
109 if (ret)
110 return ret;
112 /* DEV_MODEL= */
113 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
114 if (ret)
115 return ret;
117 /* MODALIAS= */
118 snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
119 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
120 return ret;
123 struct bus_type ccw_bus_type;
125 static void io_subchannel_irq(struct subchannel *);
126 static int io_subchannel_probe(struct subchannel *);
127 static int io_subchannel_remove(struct subchannel *);
128 static int io_subchannel_notify(struct subchannel *, int);
129 static void io_subchannel_verify(struct subchannel *);
130 static void io_subchannel_ioterm(struct subchannel *);
131 static void io_subchannel_shutdown(struct subchannel *);
133 static struct css_driver io_subchannel_driver = {
134 .owner = THIS_MODULE,
135 .subchannel_type = SUBCHANNEL_TYPE_IO,
136 .name = "io_subchannel",
137 .irq = io_subchannel_irq,
138 .notify = io_subchannel_notify,
139 .verify = io_subchannel_verify,
140 .termination = io_subchannel_ioterm,
141 .probe = io_subchannel_probe,
142 .remove = io_subchannel_remove,
143 .shutdown = io_subchannel_shutdown,
146 struct workqueue_struct *ccw_device_work;
147 struct workqueue_struct *ccw_device_notify_work;
148 wait_queue_head_t ccw_device_init_wq;
149 atomic_t ccw_device_init_count;
151 static void recovery_func(unsigned long data);
153 static int __init
154 init_ccw_bus_type (void)
156 int ret;
158 init_waitqueue_head(&ccw_device_init_wq);
159 atomic_set(&ccw_device_init_count, 0);
160 setup_timer(&recovery_timer, recovery_func, 0);
162 ccw_device_work = create_singlethread_workqueue("cio");
163 if (!ccw_device_work)
164 return -ENOMEM; /* FIXME: better errno ? */
165 ccw_device_notify_work = create_singlethread_workqueue("cio_notify");
166 if (!ccw_device_notify_work) {
167 ret = -ENOMEM; /* FIXME: better errno ? */
168 goto out_err;
170 slow_path_wq = create_singlethread_workqueue("kslowcrw");
171 if (!slow_path_wq) {
172 ret = -ENOMEM; /* FIXME: better errno ? */
173 goto out_err;
175 if ((ret = bus_register (&ccw_bus_type)))
176 goto out_err;
178 ret = css_driver_register(&io_subchannel_driver);
179 if (ret)
180 goto out_err;
182 wait_event(ccw_device_init_wq,
183 atomic_read(&ccw_device_init_count) == 0);
184 flush_workqueue(ccw_device_work);
185 return 0;
186 out_err:
187 if (ccw_device_work)
188 destroy_workqueue(ccw_device_work);
189 if (ccw_device_notify_work)
190 destroy_workqueue(ccw_device_notify_work);
191 if (slow_path_wq)
192 destroy_workqueue(slow_path_wq);
193 return ret;
196 static void __exit
197 cleanup_ccw_bus_type (void)
199 css_driver_unregister(&io_subchannel_driver);
200 bus_unregister(&ccw_bus_type);
201 destroy_workqueue(ccw_device_notify_work);
202 destroy_workqueue(ccw_device_work);
205 subsys_initcall(init_ccw_bus_type);
206 module_exit(cleanup_ccw_bus_type);
208 /************************ device handling **************************/
211 * A ccw_device has some interfaces in sysfs in addition to the
212 * standard ones.
213 * The following entries are designed to export the information which
214 * resided in 2.4 in /proc/subchannels. Subchannel and device number
215 * are obvious, so they don't have an entry :)
216 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
218 static ssize_t
219 chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
221 struct subchannel *sch = to_subchannel(dev);
222 struct chsc_ssd_info *ssd = &sch->ssd_info;
223 ssize_t ret = 0;
224 int chp;
225 int mask;
227 for (chp = 0; chp < 8; chp++) {
228 mask = 0x80 >> chp;
229 if (ssd->path_mask & mask)
230 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
231 else
232 ret += sprintf(buf + ret, "00 ");
234 ret += sprintf (buf+ret, "\n");
235 return min((ssize_t)PAGE_SIZE, ret);
238 static ssize_t
239 pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
241 struct subchannel *sch = to_subchannel(dev);
242 struct pmcw *pmcw = &sch->schib.pmcw;
244 return sprintf (buf, "%02x %02x %02x\n",
245 pmcw->pim, pmcw->pam, pmcw->pom);
248 static ssize_t
249 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
251 struct ccw_device *cdev = to_ccwdev(dev);
252 struct ccw_device_id *id = &(cdev->id);
254 if (id->dev_type != 0)
255 return sprintf(buf, "%04x/%02x\n",
256 id->dev_type, id->dev_model);
257 else
258 return sprintf(buf, "n/a\n");
261 static ssize_t
262 cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
264 struct ccw_device *cdev = to_ccwdev(dev);
265 struct ccw_device_id *id = &(cdev->id);
267 return sprintf(buf, "%04x/%02x\n",
268 id->cu_type, id->cu_model);
271 static ssize_t
272 modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
274 struct ccw_device *cdev = to_ccwdev(dev);
275 struct ccw_device_id *id = &(cdev->id);
276 int len;
278 len = snprint_alias(buf, PAGE_SIZE, id, "\n");
280 return len > PAGE_SIZE ? PAGE_SIZE : len;
283 static ssize_t
284 online_show (struct device *dev, struct device_attribute *attr, char *buf)
286 struct ccw_device *cdev = to_ccwdev(dev);
288 return sprintf(buf, cdev->online ? "1\n" : "0\n");
291 int ccw_device_is_orphan(struct ccw_device *cdev)
293 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
296 static void ccw_device_unregister(struct ccw_device *cdev)
298 if (test_and_clear_bit(1, &cdev->private->registered))
299 device_del(&cdev->dev);
302 static void ccw_device_remove_orphan_cb(struct device *dev)
304 struct ccw_device *cdev = to_ccwdev(dev);
306 ccw_device_unregister(cdev);
307 put_device(&cdev->dev);
310 static void ccw_device_remove_sch_cb(struct device *dev)
312 struct subchannel *sch;
314 sch = to_subchannel(dev);
315 css_sch_device_unregister(sch);
316 /* Reset intparm to zeroes. */
317 sch->schib.pmcw.intparm = 0;
318 cio_modify(sch);
319 put_device(&sch->dev);
322 static void
323 ccw_device_remove_disconnected(struct ccw_device *cdev)
325 unsigned long flags;
326 int rc;
329 * Forced offline in disconnected state means
330 * 'throw away device'.
332 if (ccw_device_is_orphan(cdev)) {
334 * Deregister ccw device.
335 * Unfortunately, we cannot do this directly from the
336 * attribute method.
338 spin_lock_irqsave(cdev->ccwlock, flags);
339 cdev->private->state = DEV_STATE_NOT_OPER;
340 spin_unlock_irqrestore(cdev->ccwlock, flags);
341 rc = device_schedule_callback(&cdev->dev,
342 ccw_device_remove_orphan_cb);
343 if (rc)
344 CIO_MSG_EVENT(0, "Couldn't unregister orphan "
345 "0.%x.%04x\n",
346 cdev->private->dev_id.ssid,
347 cdev->private->dev_id.devno);
348 return;
350 /* Deregister subchannel, which will kill the ccw device. */
351 rc = device_schedule_callback(cdev->dev.parent,
352 ccw_device_remove_sch_cb);
353 if (rc)
354 CIO_MSG_EVENT(0, "Couldn't unregister disconnected device "
355 "0.%x.%04x\n",
356 cdev->private->dev_id.ssid,
357 cdev->private->dev_id.devno);
361 * ccw_device_set_offline() - disable a ccw device for I/O
362 * @cdev: target ccw device
364 * This function calls the driver's set_offline() function for @cdev, if
365 * given, and then disables @cdev.
366 * Returns:
367 * %0 on success and a negative error value on failure.
368 * Context:
369 * enabled, ccw device lock not held
371 int ccw_device_set_offline(struct ccw_device *cdev)
373 int ret;
375 if (!cdev)
376 return -ENODEV;
377 if (!cdev->online || !cdev->drv)
378 return -EINVAL;
380 if (cdev->drv->set_offline) {
381 ret = cdev->drv->set_offline(cdev);
382 if (ret != 0)
383 return ret;
385 cdev->online = 0;
386 spin_lock_irq(cdev->ccwlock);
387 ret = ccw_device_offline(cdev);
388 if (ret == -ENODEV) {
389 if (cdev->private->state != DEV_STATE_NOT_OPER) {
390 cdev->private->state = DEV_STATE_OFFLINE;
391 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
393 spin_unlock_irq(cdev->ccwlock);
394 return ret;
396 spin_unlock_irq(cdev->ccwlock);
397 if (ret == 0)
398 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
399 else {
400 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
401 "device 0.%x.%04x\n",
402 ret, cdev->private->dev_id.ssid,
403 cdev->private->dev_id.devno);
404 cdev->online = 1;
406 return ret;
410 * ccw_device_set_online() - enable a ccw device for I/O
411 * @cdev: target ccw device
413 * This function first enables @cdev and then calls the driver's set_online()
414 * function for @cdev, if given. If set_online() returns an error, @cdev is
415 * disabled again.
416 * Returns:
417 * %0 on success and a negative error value on failure.
418 * Context:
419 * enabled, ccw device lock not held
421 int ccw_device_set_online(struct ccw_device *cdev)
423 int ret;
425 if (!cdev)
426 return -ENODEV;
427 if (cdev->online || !cdev->drv)
428 return -EINVAL;
430 spin_lock_irq(cdev->ccwlock);
431 ret = ccw_device_online(cdev);
432 spin_unlock_irq(cdev->ccwlock);
433 if (ret == 0)
434 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
435 else {
436 CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
437 "device 0.%x.%04x\n",
438 ret, cdev->private->dev_id.ssid,
439 cdev->private->dev_id.devno);
440 return ret;
442 if (cdev->private->state != DEV_STATE_ONLINE)
443 return -ENODEV;
444 if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) {
445 cdev->online = 1;
446 return 0;
448 spin_lock_irq(cdev->ccwlock);
449 ret = ccw_device_offline(cdev);
450 spin_unlock_irq(cdev->ccwlock);
451 if (ret == 0)
452 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
453 else
454 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
455 "device 0.%x.%04x\n",
456 ret, cdev->private->dev_id.ssid,
457 cdev->private->dev_id.devno);
458 return (ret == 0) ? -ENODEV : ret;
461 static void online_store_handle_offline(struct ccw_device *cdev)
463 if (cdev->private->state == DEV_STATE_DISCONNECTED)
464 ccw_device_remove_disconnected(cdev);
465 else if (cdev->drv && cdev->drv->set_offline)
466 ccw_device_set_offline(cdev);
469 static int online_store_recog_and_online(struct ccw_device *cdev)
471 int ret;
473 /* Do device recognition, if needed. */
474 if (cdev->id.cu_type == 0) {
475 ret = ccw_device_recognition(cdev);
476 if (ret) {
477 CIO_MSG_EVENT(0, "Couldn't start recognition "
478 "for device 0.%x.%04x (ret=%d)\n",
479 cdev->private->dev_id.ssid,
480 cdev->private->dev_id.devno, ret);
481 return ret;
483 wait_event(cdev->private->wait_q,
484 cdev->private->flags.recog_done);
486 if (cdev->drv && cdev->drv->set_online)
487 ccw_device_set_online(cdev);
488 return 0;
490 static void online_store_handle_online(struct ccw_device *cdev, int force)
492 int ret;
494 ret = online_store_recog_and_online(cdev);
495 if (ret)
496 return;
497 if (force && cdev->private->state == DEV_STATE_BOXED) {
498 ret = ccw_device_stlck(cdev);
499 if (ret) {
500 dev_warn(&cdev->dev,
501 "ccw_device_stlck returned %d!\n", ret);
502 return;
504 if (cdev->id.cu_type == 0)
505 cdev->private->state = DEV_STATE_NOT_OPER;
506 online_store_recog_and_online(cdev);
511 static ssize_t online_store (struct device *dev, struct device_attribute *attr,
512 const char *buf, size_t count)
514 struct ccw_device *cdev = to_ccwdev(dev);
515 int force, ret;
516 unsigned long i;
518 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
519 return -EAGAIN;
521 if (cdev->drv && !try_module_get(cdev->drv->owner)) {
522 atomic_set(&cdev->private->onoff, 0);
523 return -EINVAL;
525 if (!strncmp(buf, "force\n", count)) {
526 force = 1;
527 i = 1;
528 ret = 0;
529 } else {
530 force = 0;
531 ret = strict_strtoul(buf, 16, &i);
533 if (ret)
534 goto out;
535 switch (i) {
536 case 0:
537 online_store_handle_offline(cdev);
538 ret = count;
539 break;
540 case 1:
541 online_store_handle_online(cdev, force);
542 ret = count;
543 break;
544 default:
545 ret = -EINVAL;
547 out:
548 if (cdev->drv)
549 module_put(cdev->drv->owner);
550 atomic_set(&cdev->private->onoff, 0);
551 return ret;
554 static ssize_t
555 available_show (struct device *dev, struct device_attribute *attr, char *buf)
557 struct ccw_device *cdev = to_ccwdev(dev);
558 struct subchannel *sch;
560 if (ccw_device_is_orphan(cdev))
561 return sprintf(buf, "no device\n");
562 switch (cdev->private->state) {
563 case DEV_STATE_BOXED:
564 return sprintf(buf, "boxed\n");
565 case DEV_STATE_DISCONNECTED:
566 case DEV_STATE_DISCONNECTED_SENSE_ID:
567 case DEV_STATE_NOT_OPER:
568 sch = to_subchannel(dev->parent);
569 if (!sch->lpm)
570 return sprintf(buf, "no path\n");
571 else
572 return sprintf(buf, "no device\n");
573 default:
574 /* All other states considered fine. */
575 return sprintf(buf, "good\n");
579 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
580 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
581 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
582 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
583 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
584 static DEVICE_ATTR(online, 0644, online_show, online_store);
585 static DEVICE_ATTR(availability, 0444, available_show, NULL);
587 static struct attribute * subch_attrs[] = {
588 &dev_attr_chpids.attr,
589 &dev_attr_pimpampom.attr,
590 NULL,
593 static struct attribute_group subch_attr_group = {
594 .attrs = subch_attrs,
597 struct attribute_group *subch_attr_groups[] = {
598 &subch_attr_group,
599 NULL,
602 static struct attribute * ccwdev_attrs[] = {
603 &dev_attr_devtype.attr,
604 &dev_attr_cutype.attr,
605 &dev_attr_modalias.attr,
606 &dev_attr_online.attr,
607 &dev_attr_cmb_enable.attr,
608 &dev_attr_availability.attr,
609 NULL,
612 static struct attribute_group ccwdev_attr_group = {
613 .attrs = ccwdev_attrs,
616 static struct attribute_group *ccwdev_attr_groups[] = {
617 &ccwdev_attr_group,
618 NULL,
621 /* this is a simple abstraction for device_register that sets the
622 * correct bus type and adds the bus specific files */
623 static int ccw_device_register(struct ccw_device *cdev)
625 struct device *dev = &cdev->dev;
626 int ret;
628 dev->bus = &ccw_bus_type;
630 if ((ret = device_add(dev)))
631 return ret;
633 set_bit(1, &cdev->private->registered);
634 return ret;
637 struct match_data {
638 struct ccw_dev_id dev_id;
639 struct ccw_device * sibling;
642 static int
643 match_devno(struct device * dev, void * data)
645 struct match_data * d = data;
646 struct ccw_device * cdev;
648 cdev = to_ccwdev(dev);
649 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
650 !ccw_device_is_orphan(cdev) &&
651 ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
652 (cdev != d->sibling))
653 return 1;
654 return 0;
657 static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
658 struct ccw_device *sibling)
660 struct device *dev;
661 struct match_data data;
663 data.dev_id = *dev_id;
664 data.sibling = sibling;
665 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
667 return dev ? to_ccwdev(dev) : NULL;
670 static int match_orphan(struct device *dev, void *data)
672 struct ccw_dev_id *dev_id;
673 struct ccw_device *cdev;
675 dev_id = data;
676 cdev = to_ccwdev(dev);
677 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
680 static struct ccw_device *
681 get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
682 struct ccw_dev_id *dev_id)
684 struct device *dev;
686 dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
687 match_orphan);
689 return dev ? to_ccwdev(dev) : NULL;
692 static void
693 ccw_device_add_changed(struct work_struct *work)
695 struct ccw_device_private *priv;
696 struct ccw_device *cdev;
698 priv = container_of(work, struct ccw_device_private, kick_work);
699 cdev = priv->cdev;
700 if (device_add(&cdev->dev)) {
701 put_device(&cdev->dev);
702 return;
704 set_bit(1, &cdev->private->registered);
707 void ccw_device_do_unreg_rereg(struct work_struct *work)
709 struct ccw_device_private *priv;
710 struct ccw_device *cdev;
711 struct subchannel *sch;
713 priv = container_of(work, struct ccw_device_private, kick_work);
714 cdev = priv->cdev;
715 sch = to_subchannel(cdev->dev.parent);
717 ccw_device_unregister(cdev);
718 PREPARE_WORK(&cdev->private->kick_work,
719 ccw_device_add_changed);
720 queue_work(ccw_device_work, &cdev->private->kick_work);
723 static void
724 ccw_device_release(struct device *dev)
726 struct ccw_device *cdev;
728 cdev = to_ccwdev(dev);
729 kfree(cdev->private);
730 kfree(cdev);
733 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
735 struct ccw_device *cdev;
737 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
738 if (cdev) {
739 cdev->private = kzalloc(sizeof(struct ccw_device_private),
740 GFP_KERNEL | GFP_DMA);
741 if (cdev->private)
742 return cdev;
744 kfree(cdev);
745 return ERR_PTR(-ENOMEM);
748 static int io_subchannel_initialize_dev(struct subchannel *sch,
749 struct ccw_device *cdev)
751 cdev->private->cdev = cdev;
752 atomic_set(&cdev->private->onoff, 0);
753 cdev->dev.parent = &sch->dev;
754 cdev->dev.release = ccw_device_release;
755 INIT_WORK(&cdev->private->kick_work, NULL);
756 cdev->dev.groups = ccwdev_attr_groups;
757 /* Do first half of device_register. */
758 device_initialize(&cdev->dev);
759 if (!get_device(&sch->dev)) {
760 if (cdev->dev.release)
761 cdev->dev.release(&cdev->dev);
762 return -ENODEV;
764 return 0;
767 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
769 struct ccw_device *cdev;
770 int ret;
772 cdev = io_subchannel_allocate_dev(sch);
773 if (!IS_ERR(cdev)) {
774 ret = io_subchannel_initialize_dev(sch, cdev);
775 if (ret) {
776 kfree(cdev);
777 cdev = ERR_PTR(ret);
780 return cdev;
783 static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
785 static void sch_attach_device(struct subchannel *sch,
786 struct ccw_device *cdev)
788 css_update_ssd_info(sch);
789 spin_lock_irq(sch->lock);
790 sch_set_cdev(sch, cdev);
791 cdev->private->schid = sch->schid;
792 cdev->ccwlock = sch->lock;
793 device_trigger_reprobe(sch);
794 spin_unlock_irq(sch->lock);
797 static void sch_attach_disconnected_device(struct subchannel *sch,
798 struct ccw_device *cdev)
800 struct subchannel *other_sch;
801 int ret;
803 other_sch = to_subchannel(get_device(cdev->dev.parent));
804 ret = device_move(&cdev->dev, &sch->dev);
805 if (ret) {
806 CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed "
807 "(ret=%d)!\n", cdev->private->dev_id.ssid,
808 cdev->private->dev_id.devno, ret);
809 put_device(&other_sch->dev);
810 return;
812 sch_set_cdev(other_sch, NULL);
813 /* No need to keep a subchannel without ccw device around. */
814 css_sch_device_unregister(other_sch);
815 put_device(&other_sch->dev);
816 sch_attach_device(sch, cdev);
819 static void sch_attach_orphaned_device(struct subchannel *sch,
820 struct ccw_device *cdev)
822 int ret;
824 /* Try to move the ccw device to its new subchannel. */
825 ret = device_move(&cdev->dev, &sch->dev);
826 if (ret) {
827 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
828 "failed (ret=%d)!\n",
829 cdev->private->dev_id.ssid,
830 cdev->private->dev_id.devno, ret);
831 return;
833 sch_attach_device(sch, cdev);
836 static void sch_create_and_recog_new_device(struct subchannel *sch)
838 struct ccw_device *cdev;
840 /* Need to allocate a new ccw device. */
841 cdev = io_subchannel_create_ccwdev(sch);
842 if (IS_ERR(cdev)) {
843 /* OK, we did everything we could... */
844 css_sch_device_unregister(sch);
845 return;
847 spin_lock_irq(sch->lock);
848 sch_set_cdev(sch, cdev);
849 spin_unlock_irq(sch->lock);
850 /* Start recognition for the new ccw device. */
851 if (io_subchannel_recog(cdev, sch)) {
852 spin_lock_irq(sch->lock);
853 sch_set_cdev(sch, NULL);
854 spin_unlock_irq(sch->lock);
855 if (cdev->dev.release)
856 cdev->dev.release(&cdev->dev);
857 css_sch_device_unregister(sch);
862 void ccw_device_move_to_orphanage(struct work_struct *work)
864 struct ccw_device_private *priv;
865 struct ccw_device *cdev;
866 struct ccw_device *replacing_cdev;
867 struct subchannel *sch;
868 int ret;
869 struct channel_subsystem *css;
870 struct ccw_dev_id dev_id;
872 priv = container_of(work, struct ccw_device_private, kick_work);
873 cdev = priv->cdev;
874 sch = to_subchannel(cdev->dev.parent);
875 css = to_css(sch->dev.parent);
876 dev_id.devno = sch->schib.pmcw.dev;
877 dev_id.ssid = sch->schid.ssid;
880 * Move the orphaned ccw device to the orphanage so the replacing
881 * ccw device can take its place on the subchannel.
883 ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
884 if (ret) {
885 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
886 "(ret=%d)!\n", cdev->private->dev_id.ssid,
887 cdev->private->dev_id.devno, ret);
888 return;
890 cdev->ccwlock = css->pseudo_subchannel->lock;
892 * Search for the replacing ccw device
893 * - among the disconnected devices
894 * - in the orphanage
896 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
897 if (replacing_cdev) {
898 sch_attach_disconnected_device(sch, replacing_cdev);
899 return;
901 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
902 if (replacing_cdev) {
903 sch_attach_orphaned_device(sch, replacing_cdev);
904 return;
906 sch_create_and_recog_new_device(sch);
910 * Register recognized device.
912 static void
913 io_subchannel_register(struct work_struct *work)
915 struct ccw_device_private *priv;
916 struct ccw_device *cdev;
917 struct subchannel *sch;
918 int ret;
919 unsigned long flags;
921 priv = container_of(work, struct ccw_device_private, kick_work);
922 cdev = priv->cdev;
923 sch = to_subchannel(cdev->dev.parent);
924 css_update_ssd_info(sch);
926 * io_subchannel_register() will also be called after device
927 * recognition has been done for a boxed device (which will already
928 * be registered). We need to reprobe since we may now have sense id
929 * information.
931 if (klist_node_attached(&cdev->dev.knode_parent)) {
932 if (!cdev->drv) {
933 ret = device_reprobe(&cdev->dev);
934 if (ret)
935 /* We can't do much here. */
936 CIO_MSG_EVENT(0, "device_reprobe() returned"
937 " %d for 0.%x.%04x\n", ret,
938 cdev->private->dev_id.ssid,
939 cdev->private->dev_id.devno);
941 goto out;
944 * Now we know this subchannel will stay, we can throw
945 * our delayed uevent.
947 sch->dev.uevent_suppress = 0;
948 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
949 /* make it known to the system */
950 ret = ccw_device_register(cdev);
951 if (ret) {
952 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
953 cdev->private->dev_id.ssid,
954 cdev->private->dev_id.devno, ret);
955 put_device(&cdev->dev);
956 spin_lock_irqsave(sch->lock, flags);
957 sch_set_cdev(sch, NULL);
958 spin_unlock_irqrestore(sch->lock, flags);
959 kfree (cdev->private);
960 kfree (cdev);
961 put_device(&sch->dev);
962 if (atomic_dec_and_test(&ccw_device_init_count))
963 wake_up(&ccw_device_init_wq);
964 return;
966 put_device(&cdev->dev);
967 out:
968 cdev->private->flags.recog_done = 1;
969 put_device(&sch->dev);
970 wake_up(&cdev->private->wait_q);
971 if (atomic_dec_and_test(&ccw_device_init_count))
972 wake_up(&ccw_device_init_wq);
975 static void ccw_device_call_sch_unregister(struct work_struct *work)
977 struct ccw_device_private *priv;
978 struct ccw_device *cdev;
979 struct subchannel *sch;
981 priv = container_of(work, struct ccw_device_private, kick_work);
982 cdev = priv->cdev;
983 sch = to_subchannel(cdev->dev.parent);
984 css_sch_device_unregister(sch);
985 /* Reset intparm to zeroes. */
986 sch->schib.pmcw.intparm = 0;
987 cio_modify(sch);
988 put_device(&cdev->dev);
989 put_device(&sch->dev);
993 * subchannel recognition done. Called from the state machine.
995 void
996 io_subchannel_recog_done(struct ccw_device *cdev)
998 struct subchannel *sch;
1000 if (css_init_done == 0) {
1001 cdev->private->flags.recog_done = 1;
1002 return;
1004 switch (cdev->private->state) {
1005 case DEV_STATE_NOT_OPER:
1006 cdev->private->flags.recog_done = 1;
1007 /* Remove device found not operational. */
1008 if (!get_device(&cdev->dev))
1009 break;
1010 sch = to_subchannel(cdev->dev.parent);
1011 PREPARE_WORK(&cdev->private->kick_work,
1012 ccw_device_call_sch_unregister);
1013 queue_work(slow_path_wq, &cdev->private->kick_work);
1014 if (atomic_dec_and_test(&ccw_device_init_count))
1015 wake_up(&ccw_device_init_wq);
1016 break;
1017 case DEV_STATE_BOXED:
1018 /* Device did not respond in time. */
1019 case DEV_STATE_OFFLINE:
1021 * We can't register the device in interrupt context so
1022 * we schedule a work item.
1024 if (!get_device(&cdev->dev))
1025 break;
1026 PREPARE_WORK(&cdev->private->kick_work,
1027 io_subchannel_register);
1028 queue_work(slow_path_wq, &cdev->private->kick_work);
1029 break;
1033 static int
1034 io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
1036 int rc;
1037 struct ccw_device_private *priv;
1039 sch_set_cdev(sch, cdev);
1040 sch->driver = &io_subchannel_driver;
1041 cdev->ccwlock = sch->lock;
1043 /* Init private data. */
1044 priv = cdev->private;
1045 priv->dev_id.devno = sch->schib.pmcw.dev;
1046 priv->dev_id.ssid = sch->schid.ssid;
1047 priv->schid = sch->schid;
1048 priv->state = DEV_STATE_NOT_OPER;
1049 INIT_LIST_HEAD(&priv->cmb_list);
1050 init_waitqueue_head(&priv->wait_q);
1051 init_timer(&priv->timer);
1053 /* Set an initial name for the device. */
1054 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
1055 sch->schid.ssid, sch->schib.pmcw.dev);
1057 /* Increase counter of devices currently in recognition. */
1058 atomic_inc(&ccw_device_init_count);
1060 /* Start async. device sensing. */
1061 spin_lock_irq(sch->lock);
1062 rc = ccw_device_recognition(cdev);
1063 spin_unlock_irq(sch->lock);
1064 if (rc) {
1065 if (atomic_dec_and_test(&ccw_device_init_count))
1066 wake_up(&ccw_device_init_wq);
1068 return rc;
1071 static void ccw_device_move_to_sch(struct work_struct *work)
1073 struct ccw_device_private *priv;
1074 int rc;
1075 struct subchannel *sch;
1076 struct ccw_device *cdev;
1077 struct subchannel *former_parent;
1079 priv = container_of(work, struct ccw_device_private, kick_work);
1080 sch = priv->sch;
1081 cdev = priv->cdev;
1082 former_parent = ccw_device_is_orphan(cdev) ?
1083 NULL : to_subchannel(get_device(cdev->dev.parent));
1084 mutex_lock(&sch->reg_mutex);
1085 /* Try to move the ccw device to its new subchannel. */
1086 rc = device_move(&cdev->dev, &sch->dev);
1087 mutex_unlock(&sch->reg_mutex);
1088 if (rc) {
1089 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel "
1090 "0.%x.%04x failed (ret=%d)!\n",
1091 cdev->private->dev_id.ssid,
1092 cdev->private->dev_id.devno, sch->schid.ssid,
1093 sch->schid.sch_no, rc);
1094 css_sch_device_unregister(sch);
1095 goto out;
1097 if (former_parent) {
1098 spin_lock_irq(former_parent->lock);
1099 sch_set_cdev(former_parent, NULL);
1100 spin_unlock_irq(former_parent->lock);
1101 css_sch_device_unregister(former_parent);
1102 /* Reset intparm to zeroes. */
1103 former_parent->schib.pmcw.intparm = 0;
1104 cio_modify(former_parent);
1106 sch_attach_device(sch, cdev);
1107 out:
1108 if (former_parent)
1109 put_device(&former_parent->dev);
1110 put_device(&cdev->dev);
1113 static void io_subchannel_irq(struct subchannel *sch)
1115 struct ccw_device *cdev;
1117 cdev = sch_get_cdev(sch);
1119 CIO_TRACE_EVENT(3, "IRQ");
1120 CIO_TRACE_EVENT(3, sch->dev.bus_id);
1121 if (cdev)
1122 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1125 static int
1126 io_subchannel_probe (struct subchannel *sch)
1128 struct ccw_device *cdev;
1129 int rc;
1130 unsigned long flags;
1131 struct ccw_dev_id dev_id;
1133 cdev = sch_get_cdev(sch);
1134 if (cdev) {
1136 * This subchannel already has an associated ccw_device.
1137 * Register it and exit. This happens for all early
1138 * device, e.g. the console.
1140 cdev->dev.groups = ccwdev_attr_groups;
1141 device_initialize(&cdev->dev);
1142 ccw_device_register(cdev);
1144 * Check if the device is already online. If it is
1145 * the reference count needs to be corrected
1146 * (see ccw_device_online and css_init_done for the
1147 * ugly details).
1149 if (cdev->private->state != DEV_STATE_NOT_OPER &&
1150 cdev->private->state != DEV_STATE_OFFLINE &&
1151 cdev->private->state != DEV_STATE_BOXED)
1152 get_device(&cdev->dev);
1153 return 0;
1156 * First check if a fitting device may be found amongst the
1157 * disconnected devices or in the orphanage.
1159 dev_id.devno = sch->schib.pmcw.dev;
1160 dev_id.ssid = sch->schid.ssid;
1161 /* Allocate I/O subchannel private data. */
1162 sch->private = kzalloc(sizeof(struct io_subchannel_private),
1163 GFP_KERNEL | GFP_DMA);
1164 if (!sch->private)
1165 return -ENOMEM;
1166 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
1167 if (!cdev)
1168 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
1169 &dev_id);
1170 if (cdev) {
1172 * Schedule moving the device until when we have a registered
1173 * subchannel to move to and succeed the probe. We can
1174 * unregister later again, when the probe is through.
1176 cdev->private->sch = sch;
1177 PREPARE_WORK(&cdev->private->kick_work,
1178 ccw_device_move_to_sch);
1179 queue_work(slow_path_wq, &cdev->private->kick_work);
1180 return 0;
1182 cdev = io_subchannel_create_ccwdev(sch);
1183 if (IS_ERR(cdev)) {
1184 kfree(sch->private);
1185 return PTR_ERR(cdev);
1187 rc = io_subchannel_recog(cdev, sch);
1188 if (rc) {
1189 spin_lock_irqsave(sch->lock, flags);
1190 sch_set_cdev(sch, NULL);
1191 spin_unlock_irqrestore(sch->lock, flags);
1192 if (cdev->dev.release)
1193 cdev->dev.release(&cdev->dev);
1194 kfree(sch->private);
1197 return rc;
1200 static int
1201 io_subchannel_remove (struct subchannel *sch)
1203 struct ccw_device *cdev;
1204 unsigned long flags;
1206 cdev = sch_get_cdev(sch);
1207 if (!cdev)
1208 return 0;
1209 /* Set ccw device to not operational and drop reference. */
1210 spin_lock_irqsave(cdev->ccwlock, flags);
1211 sch_set_cdev(sch, NULL);
1212 cdev->private->state = DEV_STATE_NOT_OPER;
1213 spin_unlock_irqrestore(cdev->ccwlock, flags);
1214 ccw_device_unregister(cdev);
1215 put_device(&cdev->dev);
1216 kfree(sch->private);
1217 return 0;
1220 static int io_subchannel_notify(struct subchannel *sch, int event)
1222 struct ccw_device *cdev;
1224 cdev = sch_get_cdev(sch);
1225 if (!cdev)
1226 return 0;
1227 if (!cdev->drv)
1228 return 0;
1229 if (!cdev->online)
1230 return 0;
1231 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0;
1234 static void io_subchannel_verify(struct subchannel *sch)
1236 struct ccw_device *cdev;
1238 cdev = sch_get_cdev(sch);
1239 if (cdev)
1240 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1243 static void io_subchannel_ioterm(struct subchannel *sch)
1245 struct ccw_device *cdev;
1247 cdev = sch_get_cdev(sch);
1248 if (!cdev)
1249 return;
1250 /* Internal I/O will be retried by the interrupt handler. */
1251 if (cdev->private->flags.intretry)
1252 return;
1253 cdev->private->state = DEV_STATE_CLEAR_VERIFY;
1254 if (cdev->handler)
1255 cdev->handler(cdev, cdev->private->intparm,
1256 ERR_PTR(-EIO));
1259 static void
1260 io_subchannel_shutdown(struct subchannel *sch)
1262 struct ccw_device *cdev;
1263 int ret;
1265 cdev = sch_get_cdev(sch);
1267 if (cio_is_console(sch->schid))
1268 return;
1269 if (!sch->schib.pmcw.ena)
1270 /* Nothing to do. */
1271 return;
1272 ret = cio_disable_subchannel(sch);
1273 if (ret != -EBUSY)
1274 /* Subchannel is disabled, we're done. */
1275 return;
1276 cdev->private->state = DEV_STATE_QUIESCE;
1277 if (cdev->handler)
1278 cdev->handler(cdev, cdev->private->intparm,
1279 ERR_PTR(-EIO));
1280 ret = ccw_device_cancel_halt_clear(cdev);
1281 if (ret == -EBUSY) {
1282 ccw_device_set_timeout(cdev, HZ/10);
1283 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1285 cio_disable_subchannel(sch);
1288 #ifdef CONFIG_CCW_CONSOLE
1289 static struct ccw_device console_cdev;
1290 static struct ccw_device_private console_private;
1291 static int console_cdev_in_use;
1293 static DEFINE_SPINLOCK(ccw_console_lock);
1295 spinlock_t * cio_get_console_lock(void)
1297 return &ccw_console_lock;
1300 static int
1301 ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
1303 int rc;
1305 /* Attach subchannel private data. */
1306 sch->private = cio_get_console_priv();
1307 memset(sch->private, 0, sizeof(struct io_subchannel_private));
1308 /* Initialize the ccw_device structure. */
1309 cdev->dev.parent= &sch->dev;
1310 rc = io_subchannel_recog(cdev, sch);
1311 if (rc)
1312 return rc;
1314 /* Now wait for the async. recognition to come to an end. */
1315 spin_lock_irq(cdev->ccwlock);
1316 while (!dev_fsm_final_state(cdev))
1317 wait_cons_dev();
1318 rc = -EIO;
1319 if (cdev->private->state != DEV_STATE_OFFLINE)
1320 goto out_unlock;
1321 ccw_device_online(cdev);
1322 while (!dev_fsm_final_state(cdev))
1323 wait_cons_dev();
1324 if (cdev->private->state != DEV_STATE_ONLINE)
1325 goto out_unlock;
1326 rc = 0;
1327 out_unlock:
1328 spin_unlock_irq(cdev->ccwlock);
1329 return 0;
1332 struct ccw_device *
1333 ccw_device_probe_console(void)
1335 struct subchannel *sch;
1336 int ret;
1338 if (xchg(&console_cdev_in_use, 1) != 0)
1339 return ERR_PTR(-EBUSY);
1340 sch = cio_probe_console();
1341 if (IS_ERR(sch)) {
1342 console_cdev_in_use = 0;
1343 return (void *) sch;
1345 memset(&console_cdev, 0, sizeof(struct ccw_device));
1346 memset(&console_private, 0, sizeof(struct ccw_device_private));
1347 console_cdev.private = &console_private;
1348 console_private.cdev = &console_cdev;
1349 ret = ccw_device_console_enable(&console_cdev, sch);
1350 if (ret) {
1351 cio_release_console();
1352 console_cdev_in_use = 0;
1353 return ERR_PTR(ret);
1355 console_cdev.online = 1;
1356 return &console_cdev;
1358 #endif
1361 * get ccw_device matching the busid, but only if owned by cdrv
1363 static int
1364 __ccwdev_check_busid(struct device *dev, void *id)
1366 char *bus_id;
1368 bus_id = id;
1370 return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0);
1375 * get_ccwdev_by_busid() - obtain device from a bus id
1376 * @cdrv: driver the device is owned by
1377 * @bus_id: bus id of the device to be searched
1379 * This function searches all devices owned by @cdrv for a device with a bus
1380 * id matching @bus_id.
1381 * Returns:
1382 * If a match is found, its reference count of the found device is increased
1383 * and it is returned; else %NULL is returned.
1385 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1386 const char *bus_id)
1388 struct device *dev;
1389 struct device_driver *drv;
1391 drv = get_driver(&cdrv->driver);
1392 if (!drv)
1393 return NULL;
1395 dev = driver_find_device(drv, NULL, (void *)bus_id,
1396 __ccwdev_check_busid);
1397 put_driver(drv);
1399 return dev ? to_ccwdev(dev) : NULL;
1402 /************************** device driver handling ************************/
1404 /* This is the implementation of the ccw_driver class. The probe, remove
1405 * and release methods are initially very similar to the device_driver
1406 * implementations, with the difference that they have ccw_device
1407 * arguments.
1409 * A ccw driver also contains the information that is needed for
1410 * device matching.
1412 static int
1413 ccw_device_probe (struct device *dev)
1415 struct ccw_device *cdev = to_ccwdev(dev);
1416 struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1417 int ret;
1419 cdev->drv = cdrv; /* to let the driver call _set_online */
1421 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1423 if (ret) {
1424 cdev->drv = NULL;
1425 return ret;
1428 return 0;
1431 static int
1432 ccw_device_remove (struct device *dev)
1434 struct ccw_device *cdev = to_ccwdev(dev);
1435 struct ccw_driver *cdrv = cdev->drv;
1436 int ret;
1438 if (cdrv->remove)
1439 cdrv->remove(cdev);
1440 if (cdev->online) {
1441 cdev->online = 0;
1442 spin_lock_irq(cdev->ccwlock);
1443 ret = ccw_device_offline(cdev);
1444 spin_unlock_irq(cdev->ccwlock);
1445 if (ret == 0)
1446 wait_event(cdev->private->wait_q,
1447 dev_fsm_final_state(cdev));
1448 else
1449 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1450 "device 0.%x.%04x\n",
1451 ret, cdev->private->dev_id.ssid,
1452 cdev->private->dev_id.devno);
1454 ccw_device_set_timeout(cdev, 0);
1455 cdev->drv = NULL;
1456 return 0;
1459 static void ccw_device_shutdown(struct device *dev)
1461 struct ccw_device *cdev;
1463 cdev = to_ccwdev(dev);
1464 if (cdev->drv && cdev->drv->shutdown)
1465 cdev->drv->shutdown(cdev);
1466 disable_cmf(cdev);
1469 struct bus_type ccw_bus_type = {
1470 .name = "ccw",
1471 .match = ccw_bus_match,
1472 .uevent = ccw_uevent,
1473 .probe = ccw_device_probe,
1474 .remove = ccw_device_remove,
1475 .shutdown = ccw_device_shutdown,
1479 * ccw_driver_register() - register a ccw driver
1480 * @cdriver: driver to be registered
1482 * This function is mainly a wrapper around driver_register().
1483 * Returns:
1484 * %0 on success and a negative error value on failure.
1486 int ccw_driver_register(struct ccw_driver *cdriver)
1488 struct device_driver *drv = &cdriver->driver;
1490 drv->bus = &ccw_bus_type;
1491 drv->name = cdriver->name;
1492 drv->owner = cdriver->owner;
1494 return driver_register(drv);
1498 * ccw_driver_unregister() - deregister a ccw driver
1499 * @cdriver: driver to be deregistered
1501 * This function is mainly a wrapper around driver_unregister().
1503 void ccw_driver_unregister(struct ccw_driver *cdriver)
1505 driver_unregister(&cdriver->driver);
1508 /* Helper func for qdio. */
1509 struct subchannel_id
1510 ccw_device_get_subchannel_id(struct ccw_device *cdev)
1512 struct subchannel *sch;
1514 sch = to_subchannel(cdev->dev.parent);
1515 return sch->schid;
1518 static int recovery_check(struct device *dev, void *data)
1520 struct ccw_device *cdev = to_ccwdev(dev);
1521 int *redo = data;
1523 spin_lock_irq(cdev->ccwlock);
1524 switch (cdev->private->state) {
1525 case DEV_STATE_DISCONNECTED:
1526 CIO_MSG_EVENT(4, "recovery: trigger 0.%x.%04x\n",
1527 cdev->private->dev_id.ssid,
1528 cdev->private->dev_id.devno);
1529 dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1530 *redo = 1;
1531 break;
1532 case DEV_STATE_DISCONNECTED_SENSE_ID:
1533 *redo = 1;
1534 break;
1536 spin_unlock_irq(cdev->ccwlock);
1538 return 0;
1541 static void recovery_work_func(struct work_struct *unused)
1543 int redo = 0;
1545 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1546 if (redo) {
1547 spin_lock_irq(&recovery_lock);
1548 if (!timer_pending(&recovery_timer)) {
1549 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1550 recovery_phase++;
1551 mod_timer(&recovery_timer, jiffies +
1552 recovery_delay[recovery_phase] * HZ);
1554 spin_unlock_irq(&recovery_lock);
1555 } else
1556 CIO_MSG_EVENT(4, "recovery: end\n");
1559 static DECLARE_WORK(recovery_work, recovery_work_func);
1561 static void recovery_func(unsigned long data)
1564 * We can't do our recovery in softirq context and it's not
1565 * performance critical, so we schedule it.
1567 schedule_work(&recovery_work);
1570 void ccw_device_schedule_recovery(void)
1572 unsigned long flags;
1574 CIO_MSG_EVENT(4, "recovery: schedule\n");
1575 spin_lock_irqsave(&recovery_lock, flags);
1576 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1577 recovery_phase = 0;
1578 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1580 spin_unlock_irqrestore(&recovery_lock, flags);
1583 MODULE_LICENSE("GPL");
1584 EXPORT_SYMBOL(ccw_device_set_online);
1585 EXPORT_SYMBOL(ccw_device_set_offline);
1586 EXPORT_SYMBOL(ccw_driver_register);
1587 EXPORT_SYMBOL(ccw_driver_unregister);
1588 EXPORT_SYMBOL(get_ccwdev_by_busid);
1589 EXPORT_SYMBOL(ccw_bus_type);
1590 EXPORT_SYMBOL(ccw_device_work);
1591 EXPORT_SYMBOL(ccw_device_notify_work);
1592 EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);