2 * drivers/s390/cio/device_fsm.c
3 * finite state machine for device handling
5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7 * Author(s): Cornelia Huck(cohuck@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
15 #include <asm/ccwdev.h>
19 #include "cio_debug.h"
27 device_is_disconnected(struct subchannel
*sch
)
29 struct ccw_device
*cdev
;
31 if (!sch
->dev
.driver_data
)
33 cdev
= sch
->dev
.driver_data
;
34 return (cdev
->private->state
== DEV_STATE_DISCONNECTED
||
35 cdev
->private->state
== DEV_STATE_DISCONNECTED_SENSE_ID
);
39 device_set_disconnected(struct subchannel
*sch
)
41 struct ccw_device
*cdev
;
43 if (!sch
->dev
.driver_data
)
45 cdev
= sch
->dev
.driver_data
;
46 ccw_device_set_timeout(cdev
, 0);
47 cdev
->private->state
= DEV_STATE_DISCONNECTED
;
51 device_set_waiting(struct subchannel
*sch
)
53 struct ccw_device
*cdev
;
55 if (!sch
->dev
.driver_data
)
57 cdev
= sch
->dev
.driver_data
;
58 ccw_device_set_timeout(cdev
, 10*HZ
);
59 cdev
->private->state
= DEV_STATE_WAIT4IO
;
63 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
66 ccw_device_timeout(unsigned long data
)
68 struct ccw_device
*cdev
;
70 cdev
= (struct ccw_device
*) data
;
71 spin_lock_irq(cdev
->ccwlock
);
72 dev_fsm_event(cdev
, DEV_EVENT_TIMEOUT
);
73 spin_unlock_irq(cdev
->ccwlock
);
80 ccw_device_set_timeout(struct ccw_device
*cdev
, int expires
)
83 del_timer(&cdev
->private->timer
);
86 if (timer_pending(&cdev
->private->timer
)) {
87 if (mod_timer(&cdev
->private->timer
, jiffies
+ expires
))
90 cdev
->private->timer
.function
= ccw_device_timeout
;
91 cdev
->private->timer
.data
= (unsigned long) cdev
;
92 cdev
->private->timer
.expires
= jiffies
+ expires
;
93 add_timer(&cdev
->private->timer
);
97 * Cancel running i/o. This is called repeatedly since halt/clear are
98 * asynchronous operations. We do one try with cio_cancel, two tries
99 * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
100 * Returns 0 if device now idle, -ENODEV for device not operational and
101 * -EBUSY if an interrupt is expected (either from halt/clear or from a
105 ccw_device_cancel_halt_clear(struct ccw_device
*cdev
)
107 struct subchannel
*sch
;
110 sch
= to_subchannel(cdev
->dev
.parent
);
111 ret
= stsch(sch
->irq
, &sch
->schib
);
112 if (ret
|| !sch
->schib
.pmcw
.dnv
)
114 if (!sch
->schib
.pmcw
.ena
|| sch
->schib
.scsw
.actl
== 0)
115 /* Not operational or no activity -> done. */
117 /* Stage 1: cancel io. */
118 if (!(sch
->schib
.scsw
.actl
& SCSW_ACTL_HALT_PEND
) &&
119 !(sch
->schib
.scsw
.actl
& SCSW_ACTL_CLEAR_PEND
)) {
120 ret
= cio_cancel(sch
);
123 /* cancel io unsuccessful. From now on it is asynchronous. */
124 cdev
->private->iretry
= 3; /* 3 halt retries. */
126 if (!(sch
->schib
.scsw
.actl
& SCSW_ACTL_CLEAR_PEND
)) {
127 /* Stage 2: halt io. */
128 if (cdev
->private->iretry
) {
129 cdev
->private->iretry
--;
131 return (ret
== 0) ? -EBUSY
: ret
;
133 /* halt io unsuccessful. */
134 cdev
->private->iretry
= 255; /* 255 clear retries. */
136 /* Stage 3: clear io. */
137 if (cdev
->private->iretry
) {
138 cdev
->private->iretry
--;
139 ret
= cio_clear (sch
);
140 return (ret
== 0) ? -EBUSY
: ret
;
142 panic("Can't stop i/o on subchannel.\n");
146 ccw_device_handle_oper(struct ccw_device
*cdev
)
148 struct subchannel
*sch
;
150 sch
= to_subchannel(cdev
->dev
.parent
);
151 cdev
->private->flags
.recog_done
= 1;
153 * Check if cu type and device type still match. If
154 * not, it is certainly another device and we have to
155 * de- and re-register. Also check here for non-matching devno.
157 if (cdev
->id
.cu_type
!= cdev
->private->senseid
.cu_type
||
158 cdev
->id
.cu_model
!= cdev
->private->senseid
.cu_model
||
159 cdev
->id
.dev_type
!= cdev
->private->senseid
.dev_type
||
160 cdev
->id
.dev_model
!= cdev
->private->senseid
.dev_model
||
161 cdev
->private->devno
!= sch
->schib
.pmcw
.dev
) {
162 PREPARE_WORK(&cdev
->private->kick_work
,
163 ccw_device_do_unreg_rereg
, (void *)cdev
);
164 queue_work(ccw_device_work
, &cdev
->private->kick_work
);
167 cdev
->private->flags
.donotify
= 1;
171 * The machine won't give us any notification by machine check if a chpid has
172 * been varied online on the SE so we have to find out by magic (i. e. driving
173 * the channel subsystem to device selection and updating our path masks).
176 __recover_lost_chpids(struct subchannel
*sch
, int old_lpm
)
180 for (i
= 0; i
<8; i
++) {
182 if (!(sch
->lpm
& mask
))
186 chpid_is_actually_online(sch
->schib
.pmcw
.chpid
[i
]);
191 * Stop device recognition.
194 ccw_device_recog_done(struct ccw_device
*cdev
, int state
)
196 struct subchannel
*sch
;
199 sch
= to_subchannel(cdev
->dev
.parent
);
201 ccw_device_set_timeout(cdev
, 0);
202 cio_disable_subchannel(sch
);
204 * Now that we tried recognition, we have performed device selection
205 * through ssch() and the path information is up to date.
208 stsch(sch
->irq
, &sch
->schib
);
209 sch
->lpm
= sch
->schib
.pmcw
.pim
&
210 sch
->schib
.pmcw
.pam
&
211 sch
->schib
.pmcw
.pom
&
213 if (cdev
->private->state
== DEV_STATE_DISCONNECTED_SENSE_ID
)
214 /* Force reprobe on all chpids. */
216 if (sch
->lpm
!= old_lpm
)
217 __recover_lost_chpids(sch
, old_lpm
);
218 if (cdev
->private->state
== DEV_STATE_DISCONNECTED_SENSE_ID
) {
219 if (state
== DEV_STATE_NOT_OPER
) {
220 cdev
->private->flags
.recog_done
= 1;
221 cdev
->private->state
= DEV_STATE_DISCONNECTED
;
224 /* Boxed devices don't need extra treatment. */
228 case DEV_STATE_NOT_OPER
:
229 CIO_DEBUG(KERN_WARNING
, 2,
230 "SenseID : unknown device %04x on subchannel %04x\n",
231 cdev
->private->devno
, sch
->irq
);
233 case DEV_STATE_OFFLINE
:
234 if (cdev
->private->state
== DEV_STATE_DISCONNECTED_SENSE_ID
) {
235 ccw_device_handle_oper(cdev
);
238 /* fill out sense information */
239 cdev
->id
= (struct ccw_device_id
) {
240 .cu_type
= cdev
->private->senseid
.cu_type
,
241 .cu_model
= cdev
->private->senseid
.cu_model
,
242 .dev_type
= cdev
->private->senseid
.dev_type
,
243 .dev_model
= cdev
->private->senseid
.dev_model
,
246 /* Get device online again. */
247 cdev
->private->state
= DEV_STATE_OFFLINE
;
248 ccw_device_online(cdev
);
249 wake_up(&cdev
->private->wait_q
);
252 /* Issue device info message. */
253 CIO_DEBUG(KERN_INFO
, 2, "SenseID : device %04x reports: "
254 "CU Type/Mod = %04X/%02X, Dev Type/Mod = "
255 "%04X/%02X\n", cdev
->private->devno
,
256 cdev
->id
.cu_type
, cdev
->id
.cu_model
,
257 cdev
->id
.dev_type
, cdev
->id
.dev_model
);
259 case DEV_STATE_BOXED
:
260 CIO_DEBUG(KERN_WARNING
, 2,
261 "SenseID : boxed device %04x on subchannel %04x\n",
262 cdev
->private->devno
, sch
->irq
);
265 cdev
->private->state
= state
;
266 io_subchannel_recog_done(cdev
);
267 if (state
!= DEV_STATE_NOT_OPER
)
268 wake_up(&cdev
->private->wait_q
);
272 * Function called from device_id.c after sense id has completed.
275 ccw_device_sense_id_done(struct ccw_device
*cdev
, int err
)
279 ccw_device_recog_done(cdev
, DEV_STATE_OFFLINE
);
281 case -ETIME
: /* Sense id stopped by timeout. */
282 ccw_device_recog_done(cdev
, DEV_STATE_BOXED
);
285 ccw_device_recog_done(cdev
, DEV_STATE_NOT_OPER
);
291 ccw_device_oper_notify(void *data
)
293 struct ccw_device
*cdev
;
294 struct subchannel
*sch
;
297 cdev
= (struct ccw_device
*)data
;
298 sch
= to_subchannel(cdev
->dev
.parent
);
299 ret
= (sch
->driver
&& sch
->driver
->notify
) ?
300 sch
->driver
->notify(&sch
->dev
, CIO_OPER
) : 0;
302 /* Driver doesn't want device back. */
303 ccw_device_do_unreg_rereg((void *)cdev
);
305 wake_up(&cdev
->private->wait_q
);
309 * Finished with online/offline processing.
312 ccw_device_done(struct ccw_device
*cdev
, int state
)
314 struct subchannel
*sch
;
316 sch
= to_subchannel(cdev
->dev
.parent
);
318 if (state
!= DEV_STATE_ONLINE
)
319 cio_disable_subchannel(sch
);
321 /* Reset device status. */
322 memset(&cdev
->private->irb
, 0, sizeof(struct irb
));
324 cdev
->private->state
= state
;
327 if (state
== DEV_STATE_BOXED
)
328 CIO_DEBUG(KERN_WARNING
, 2,
329 "Boxed device %04x on subchannel %04x\n",
330 cdev
->private->devno
, sch
->irq
);
332 if (cdev
->private->flags
.donotify
) {
333 cdev
->private->flags
.donotify
= 0;
334 PREPARE_WORK(&cdev
->private->kick_work
, ccw_device_oper_notify
,
336 queue_work(ccw_device_notify_work
, &cdev
->private->kick_work
);
338 wake_up(&cdev
->private->wait_q
);
340 if (css_init_done
&& state
!= DEV_STATE_ONLINE
)
341 put_device (&cdev
->dev
);
345 * Function called from device_pgid.c after sense path ground has completed.
348 ccw_device_sense_pgid_done(struct ccw_device
*cdev
, int err
)
350 struct subchannel
*sch
;
352 sch
= to_subchannel(cdev
->dev
.parent
);
355 /* Start Path Group verification. */
356 sch
->vpm
= 0; /* Start with no path groups set. */
357 cdev
->private->state
= DEV_STATE_VERIFY
;
358 ccw_device_verify_start(cdev
);
360 case -ETIME
: /* Sense path group id stopped by timeout. */
361 case -EUSERS
: /* device is reserved for someone else. */
362 ccw_device_done(cdev
, DEV_STATE_BOXED
);
364 case -EOPNOTSUPP
: /* path grouping not supported, just set online. */
365 cdev
->private->options
.pgroup
= 0;
366 ccw_device_done(cdev
, DEV_STATE_ONLINE
);
369 ccw_device_done(cdev
, DEV_STATE_NOT_OPER
);
375 * Start device recognition.
378 ccw_device_recognition(struct ccw_device
*cdev
)
380 struct subchannel
*sch
;
383 if ((cdev
->private->state
!= DEV_STATE_NOT_OPER
) &&
384 (cdev
->private->state
!= DEV_STATE_BOXED
))
386 sch
= to_subchannel(cdev
->dev
.parent
);
387 ret
= cio_enable_subchannel(sch
, sch
->schib
.pmcw
.isc
);
389 /* Couldn't enable the subchannel for i/o. Sick device. */
392 /* After 60s the device recognition is considered to have failed. */
393 ccw_device_set_timeout(cdev
, 60*HZ
);
396 * We used to start here with a sense pgid to find out whether a device
397 * is locked by someone else. Unfortunately, the sense pgid command
398 * code has other meanings on devices predating the path grouping
399 * algorithm, so we start with sense id and box the device after an
400 * timeout (or if sense pgid during path verification detects the device
401 * is locked, as may happen on newer devices).
403 cdev
->private->flags
.recog_done
= 0;
404 cdev
->private->state
= DEV_STATE_SENSE_ID
;
405 ccw_device_sense_id_start(cdev
);
410 * Handle timeout in device recognition.
413 ccw_device_recog_timeout(struct ccw_device
*cdev
, enum dev_event dev_event
)
417 ret
= ccw_device_cancel_halt_clear(cdev
);
420 ccw_device_recog_done(cdev
, DEV_STATE_BOXED
);
423 ccw_device_recog_done(cdev
, DEV_STATE_NOT_OPER
);
426 ccw_device_set_timeout(cdev
, 3*HZ
);
432 ccw_device_nopath_notify(void *data
)
434 struct ccw_device
*cdev
;
435 struct subchannel
*sch
;
438 cdev
= (struct ccw_device
*)data
;
439 sch
= to_subchannel(cdev
->dev
.parent
);
443 ret
= (sch
->driver
&& sch
->driver
->notify
) ?
444 sch
->driver
->notify(&sch
->dev
, CIO_NO_PATH
) : 0;
446 if (get_device(&sch
->dev
)) {
447 /* Driver doesn't want to keep device. */
448 cio_disable_subchannel(sch
);
449 if (get_device(&cdev
->dev
)) {
450 PREPARE_WORK(&cdev
->private->kick_work
,
451 ccw_device_call_sch_unregister
,
453 queue_work(ccw_device_work
,
454 &cdev
->private->kick_work
);
458 cio_disable_subchannel(sch
);
459 ccw_device_set_timeout(cdev
, 0);
460 cdev
->private->state
= DEV_STATE_DISCONNECTED
;
461 wake_up(&cdev
->private->wait_q
);
466 ccw_device_verify_done(struct ccw_device
*cdev
, int err
)
468 cdev
->private->flags
.doverify
= 0;
470 case -EOPNOTSUPP
: /* path grouping not supported, just set online. */
471 cdev
->private->options
.pgroup
= 0;
473 ccw_device_done(cdev
, DEV_STATE_ONLINE
);
476 ccw_device_done(cdev
, DEV_STATE_BOXED
);
479 PREPARE_WORK(&cdev
->private->kick_work
,
480 ccw_device_nopath_notify
, (void *)cdev
);
481 queue_work(ccw_device_notify_work
, &cdev
->private->kick_work
);
482 ccw_device_done(cdev
, DEV_STATE_NOT_OPER
);
491 ccw_device_online(struct ccw_device
*cdev
)
493 struct subchannel
*sch
;
496 if ((cdev
->private->state
!= DEV_STATE_OFFLINE
) &&
497 (cdev
->private->state
!= DEV_STATE_BOXED
))
499 sch
= to_subchannel(cdev
->dev
.parent
);
500 if (css_init_done
&& !get_device(&cdev
->dev
))
502 ret
= cio_enable_subchannel(sch
, sch
->schib
.pmcw
.isc
);
504 /* Couldn't enable the subchannel for i/o. Sick device. */
506 dev_fsm_event(cdev
, DEV_EVENT_NOTOPER
);
509 /* Do we want to do path grouping? */
510 if (!cdev
->private->options
.pgroup
) {
511 /* No, set state online immediately. */
512 ccw_device_done(cdev
, DEV_STATE_ONLINE
);
515 /* Do a SensePGID first. */
516 cdev
->private->state
= DEV_STATE_SENSE_PGID
;
517 ccw_device_sense_pgid_start(cdev
);
522 ccw_device_disband_done(struct ccw_device
*cdev
, int err
)
526 ccw_device_done(cdev
, DEV_STATE_OFFLINE
);
529 ccw_device_done(cdev
, DEV_STATE_BOXED
);
532 ccw_device_done(cdev
, DEV_STATE_NOT_OPER
);
541 ccw_device_offline(struct ccw_device
*cdev
)
543 struct subchannel
*sch
;
545 sch
= to_subchannel(cdev
->dev
.parent
);
546 if (cdev
->private->state
!= DEV_STATE_ONLINE
) {
547 if (sch
->schib
.scsw
.actl
!= 0)
551 if (sch
->schib
.scsw
.actl
!= 0)
553 /* Are we doing path grouping? */
554 if (!cdev
->private->options
.pgroup
) {
555 /* No, set state offline immediately. */
556 ccw_device_done(cdev
, DEV_STATE_OFFLINE
);
559 /* Start Set Path Group commands. */
560 cdev
->private->state
= DEV_STATE_DISBAND_PGID
;
561 ccw_device_disband_start(cdev
);
566 * Handle timeout in device online/offline process.
569 ccw_device_onoff_timeout(struct ccw_device
*cdev
, enum dev_event dev_event
)
573 ret
= ccw_device_cancel_halt_clear(cdev
);
576 ccw_device_done(cdev
, DEV_STATE_BOXED
);
579 ccw_device_done(cdev
, DEV_STATE_NOT_OPER
);
582 ccw_device_set_timeout(cdev
, 3*HZ
);
587 * Handle not oper event in device recognition.
590 ccw_device_recog_notoper(struct ccw_device
*cdev
, enum dev_event dev_event
)
592 ccw_device_recog_done(cdev
, DEV_STATE_NOT_OPER
);
596 * Handle not operational event while offline.
599 ccw_device_offline_notoper(struct ccw_device
*cdev
, enum dev_event dev_event
)
601 struct subchannel
*sch
;
603 cdev
->private->state
= DEV_STATE_NOT_OPER
;
604 sch
= to_subchannel(cdev
->dev
.parent
);
605 device_unregister(&sch
->dev
);
606 sch
->schib
.pmcw
.intparm
= 0;
608 wake_up(&cdev
->private->wait_q
);
612 * Handle not operational event while online.
615 ccw_device_online_notoper(struct ccw_device
*cdev
, enum dev_event dev_event
)
617 struct subchannel
*sch
;
619 sch
= to_subchannel(cdev
->dev
.parent
);
620 if (sch
->driver
->notify
&&
621 sch
->driver
->notify(&sch
->dev
, sch
->lpm
? CIO_GONE
: CIO_NO_PATH
)) {
622 ccw_device_set_timeout(cdev
, 0);
623 cdev
->private->state
= DEV_STATE_DISCONNECTED
;
624 wake_up(&cdev
->private->wait_q
);
627 cdev
->private->state
= DEV_STATE_NOT_OPER
;
628 cio_disable_subchannel(sch
);
629 if (sch
->schib
.scsw
.actl
!= 0) {
630 // FIXME: not-oper indication to device driver ?
631 ccw_device_call_handler(cdev
);
633 device_unregister(&sch
->dev
);
634 sch
->schib
.pmcw
.intparm
= 0;
636 wake_up(&cdev
->private->wait_q
);
640 * Handle path verification event.
643 ccw_device_online_verify(struct ccw_device
*cdev
, enum dev_event dev_event
)
645 struct subchannel
*sch
;
647 if (!cdev
->private->options
.pgroup
)
649 if (cdev
->private->state
== DEV_STATE_W4SENSE
) {
650 cdev
->private->flags
.doverify
= 1;
653 sch
= to_subchannel(cdev
->dev
.parent
);
654 if (sch
->schib
.scsw
.actl
!= 0 ||
655 (cdev
->private->irb
.scsw
.stctl
& SCSW_STCTL_STATUS_PEND
)) {
657 * No final status yet or final status not yet delivered
658 * to the device driver. Can't do path verfication now,
659 * delay until final status was delivered.
661 cdev
->private->flags
.doverify
= 1;
664 /* Device is idle, we can do the path verification. */
665 cdev
->private->state
= DEV_STATE_VERIFY
;
666 ccw_device_verify_start(cdev
);
670 * Got an interrupt for a normal io (state online).
673 ccw_device_irq(struct ccw_device
*cdev
, enum dev_event dev_event
)
677 irb
= (struct irb
*) __LC_IRB
;
678 /* Check for unsolicited interrupt. */
679 if ((irb
->scsw
.stctl
==
680 (SCSW_STCTL_STATUS_PEND
| SCSW_STCTL_ALERT_STATUS
))
681 && (!irb
->scsw
.cc
)) {
682 if ((irb
->scsw
.dstat
& DEV_STAT_UNIT_CHECK
) &&
683 !irb
->esw
.esw0
.erw
.cons
) {
684 /* Unit check but no sense data. Need basic sense. */
685 if (ccw_device_do_sense(cdev
, irb
) != 0)
686 goto call_handler_unsol
;
687 memcpy(irb
, &cdev
->private->irb
, sizeof(struct irb
));
688 cdev
->private->state
= DEV_STATE_W4SENSE
;
689 cdev
->private->intparm
= 0;
694 cdev
->handler (cdev
, 0, irb
);
697 /* Accumulate status and find out if a basic sense is needed. */
698 ccw_device_accumulate_irb(cdev
, irb
);
699 if (cdev
->private->flags
.dosense
) {
700 if (ccw_device_do_sense(cdev
, irb
) == 0) {
701 cdev
->private->state
= DEV_STATE_W4SENSE
;
705 /* Call the handler. */
706 if (ccw_device_call_handler(cdev
) && cdev
->private->flags
.doverify
)
707 /* Start delayed path verification. */
708 ccw_device_online_verify(cdev
, 0);
712 * Got an timeout in online state.
715 ccw_device_online_timeout(struct ccw_device
*cdev
, enum dev_event dev_event
)
719 ccw_device_set_timeout(cdev
, 0);
720 ret
= ccw_device_cancel_halt_clear(cdev
);
722 ccw_device_set_timeout(cdev
, 3*HZ
);
723 cdev
->private->state
= DEV_STATE_TIMEOUT_KILL
;
726 if (ret
== -ENODEV
) {
727 struct subchannel
*sch
;
729 sch
= to_subchannel(cdev
->dev
.parent
);
731 PREPARE_WORK(&cdev
->private->kick_work
,
732 ccw_device_nopath_notify
, (void *)cdev
);
733 queue_work(ccw_device_notify_work
,
734 &cdev
->private->kick_work
);
736 dev_fsm_event(cdev
, DEV_EVENT_NOTOPER
);
737 } else if (cdev
->handler
)
738 cdev
->handler(cdev
, cdev
->private->intparm
,
739 ERR_PTR(-ETIMEDOUT
));
743 * Got an interrupt for a basic sense.
746 ccw_device_w4sense(struct ccw_device
*cdev
, enum dev_event dev_event
)
750 irb
= (struct irb
*) __LC_IRB
;
751 /* Check for unsolicited interrupt. */
752 if (irb
->scsw
.stctl
==
753 (SCSW_STCTL_STATUS_PEND
| SCSW_STCTL_ALERT_STATUS
)) {
754 if (irb
->scsw
.cc
== 1)
755 /* Basic sense hasn't started. Try again. */
756 ccw_device_do_sense(cdev
, irb
);
758 printk("Huh? %s(%s): unsolicited interrupt...\n",
759 __FUNCTION__
, cdev
->dev
.bus_id
);
761 cdev
->handler (cdev
, 0, irb
);
765 /* Add basic sense info to irb. */
766 ccw_device_accumulate_basic_sense(cdev
, irb
);
767 if (cdev
->private->flags
.dosense
) {
768 /* Another basic sense is needed. */
769 ccw_device_do_sense(cdev
, irb
);
772 cdev
->private->state
= DEV_STATE_ONLINE
;
773 /* Call the handler. */
774 if (ccw_device_call_handler(cdev
) && cdev
->private->flags
.doverify
)
775 /* Start delayed path verification. */
776 ccw_device_online_verify(cdev
, 0);
780 ccw_device_clear_verify(struct ccw_device
*cdev
, enum dev_event dev_event
)
784 irb
= (struct irb
*) __LC_IRB
;
785 /* Accumulate status. We don't do basic sense. */
786 ccw_device_accumulate_irb(cdev
, irb
);
787 /* Try to start delayed device verification. */
788 ccw_device_online_verify(cdev
, 0);
789 /* Note: Don't call handler for cio initiated clear! */
793 ccw_device_killing_irq(struct ccw_device
*cdev
, enum dev_event dev_event
)
795 struct subchannel
*sch
;
797 sch
= to_subchannel(cdev
->dev
.parent
);
798 ccw_device_set_timeout(cdev
, 0);
799 /* OK, i/o is dead now. Call interrupt handler. */
800 cdev
->private->state
= DEV_STATE_ONLINE
;
802 cdev
->handler(cdev
, cdev
->private->intparm
,
803 ERR_PTR(-ETIMEDOUT
));
805 PREPARE_WORK(&cdev
->private->kick_work
,
806 ccw_device_nopath_notify
, (void *)cdev
);
807 queue_work(ccw_device_notify_work
, &cdev
->private->kick_work
);
808 } else if (cdev
->private->flags
.doverify
)
809 /* Start delayed path verification. */
810 ccw_device_online_verify(cdev
, 0);
814 ccw_device_killing_timeout(struct ccw_device
*cdev
, enum dev_event dev_event
)
818 ret
= ccw_device_cancel_halt_clear(cdev
);
820 ccw_device_set_timeout(cdev
, 3*HZ
);
823 if (ret
== -ENODEV
) {
824 struct subchannel
*sch
;
826 sch
= to_subchannel(cdev
->dev
.parent
);
828 PREPARE_WORK(&cdev
->private->kick_work
,
829 ccw_device_nopath_notify
, (void *)cdev
);
830 queue_work(ccw_device_notify_work
,
831 &cdev
->private->kick_work
);
833 dev_fsm_event(cdev
, DEV_EVENT_NOTOPER
);
836 //FIXME: Can we get here?
837 cdev
->private->state
= DEV_STATE_ONLINE
;
839 cdev
->handler(cdev
, cdev
->private->intparm
,
840 ERR_PTR(-ETIMEDOUT
));
844 ccw_device_wait4io_irq(struct ccw_device
*cdev
, enum dev_event dev_event
)
847 struct subchannel
*sch
;
849 irb
= (struct irb
*) __LC_IRB
;
851 * Accumulate status and find out if a basic sense is needed.
852 * This is fine since we have already adapted the lpm.
854 ccw_device_accumulate_irb(cdev
, irb
);
855 if (cdev
->private->flags
.dosense
) {
856 if (ccw_device_do_sense(cdev
, irb
) == 0) {
857 cdev
->private->state
= DEV_STATE_W4SENSE
;
862 /* Iff device is idle, reset timeout. */
863 sch
= to_subchannel(cdev
->dev
.parent
);
864 if (!stsch(sch
->irq
, &sch
->schib
))
865 if (sch
->schib
.scsw
.actl
== 0)
866 ccw_device_set_timeout(cdev
, 0);
867 /* Call the handler. */
868 ccw_device_call_handler(cdev
);
870 PREPARE_WORK(&cdev
->private->kick_work
,
871 ccw_device_nopath_notify
, (void *)cdev
);
872 queue_work(ccw_device_notify_work
, &cdev
->private->kick_work
);
873 } else if (cdev
->private->flags
.doverify
)
874 ccw_device_online_verify(cdev
, 0);
878 ccw_device_wait4io_timeout(struct ccw_device
*cdev
, enum dev_event dev_event
)
881 struct subchannel
*sch
;
883 sch
= to_subchannel(cdev
->dev
.parent
);
884 ccw_device_set_timeout(cdev
, 0);
885 ret
= ccw_device_cancel_halt_clear(cdev
);
887 ccw_device_set_timeout(cdev
, 3*HZ
);
888 cdev
->private->state
= DEV_STATE_TIMEOUT_KILL
;
891 if (ret
== -ENODEV
) {
893 PREPARE_WORK(&cdev
->private->kick_work
,
894 ccw_device_nopath_notify
, (void *)cdev
);
895 queue_work(ccw_device_notify_work
,
896 &cdev
->private->kick_work
);
898 dev_fsm_event(cdev
, DEV_EVENT_NOTOPER
);
902 cdev
->handler(cdev
, cdev
->private->intparm
,
903 ERR_PTR(-ETIMEDOUT
));
905 PREPARE_WORK(&cdev
->private->kick_work
,
906 ccw_device_nopath_notify
, (void *)cdev
);
907 queue_work(ccw_device_notify_work
, &cdev
->private->kick_work
);
908 } else if (cdev
->private->flags
.doverify
)
909 /* Start delayed path verification. */
910 ccw_device_online_verify(cdev
, 0);
914 ccw_device_wait4io_verify(struct ccw_device
*cdev
, enum dev_event dev_event
)
916 /* When the I/O has terminated, we have to start verification. */
917 if (cdev
->private->options
.pgroup
)
918 cdev
->private->flags
.doverify
= 1;
922 ccw_device_stlck_done(struct ccw_device
*cdev
, enum dev_event dev_event
)
927 case DEV_EVENT_INTERRUPT
:
928 irb
= (struct irb
*) __LC_IRB
;
929 /* Check for unsolicited interrupt. */
930 if ((irb
->scsw
.stctl
==
931 (SCSW_STCTL_STATUS_PEND
| SCSW_STCTL_ALERT_STATUS
)) &&
933 /* FIXME: we should restart stlck here, but this
934 * is extremely unlikely ... */
937 ccw_device_accumulate_irb(cdev
, irb
);
938 /* We don't care about basic sense etc. */
940 default: /* timeout */
944 wake_up(&cdev
->private->wait_q
);
948 ccw_device_start_id(struct ccw_device
*cdev
, enum dev_event dev_event
)
950 struct subchannel
*sch
;
952 sch
= to_subchannel(cdev
->dev
.parent
);
953 if (cio_enable_subchannel(sch
, sch
->schib
.pmcw
.isc
) != 0)
954 /* Couldn't enable the subchannel for i/o. Sick device. */
957 /* After 60s the device recognition is considered to have failed. */
958 ccw_device_set_timeout(cdev
, 60*HZ
);
960 cdev
->private->state
= DEV_STATE_DISCONNECTED_SENSE_ID
;
961 ccw_device_sense_id_start(cdev
);
965 device_trigger_reprobe(struct subchannel
*sch
)
967 struct ccw_device
*cdev
;
970 if (!sch
->dev
.driver_data
)
972 cdev
= sch
->dev
.driver_data
;
973 spin_lock_irqsave(&sch
->lock
, flags
);
974 if (cdev
->private->state
!= DEV_STATE_DISCONNECTED
) {
975 spin_unlock_irqrestore(&sch
->lock
, flags
);
978 /* Update some values. */
979 if (stsch(sch
->irq
, &sch
->schib
)) {
980 spin_unlock_irqrestore(&sch
->lock
, flags
);
984 * The pim, pam, pom values may not be accurate, but they are the best
985 * we have before performing device selection :/
987 sch
->lpm
= sch
->schib
.pmcw
.pim
&
988 sch
->schib
.pmcw
.pam
&
989 sch
->schib
.pmcw
.pom
&
991 /* Re-set some bits in the pmcw that were lost. */
992 sch
->schib
.pmcw
.isc
= 3;
993 sch
->schib
.pmcw
.csense
= 1;
994 sch
->schib
.pmcw
.ena
= 0;
995 if ((sch
->lpm
& (sch
->lpm
- 1)) != 0)
996 sch
->schib
.pmcw
.mp
= 1;
997 sch
->schib
.pmcw
.intparm
= (__u32
)(unsigned long)sch
;
998 /* We should also udate ssd info, but this has to wait. */
999 ccw_device_start_id(cdev
, 0);
1000 spin_unlock_irqrestore(&sch
->lock
, flags
);
1004 ccw_device_offline_irq(struct ccw_device
*cdev
, enum dev_event dev_event
)
1006 struct subchannel
*sch
;
1008 sch
= to_subchannel(cdev
->dev
.parent
);
1010 * An interrupt in state offline means a previous disable was not
1011 * successful. Try again.
1013 cio_disable_subchannel(sch
);
1017 ccw_device_change_cmfstate(struct ccw_device
*cdev
, enum dev_event dev_event
)
1019 retry_set_schib(cdev
);
1020 cdev
->private->state
= DEV_STATE_ONLINE
;
1021 dev_fsm_event(cdev
, dev_event
);
1026 ccw_device_quiesce_done(struct ccw_device
*cdev
, enum dev_event dev_event
)
1028 ccw_device_set_timeout(cdev
, 0);
1029 if (dev_event
== DEV_EVENT_NOTOPER
)
1030 cdev
->private->state
= DEV_STATE_NOT_OPER
;
1032 cdev
->private->state
= DEV_STATE_OFFLINE
;
1033 wake_up(&cdev
->private->wait_q
);
1037 ccw_device_quiesce_timeout(struct ccw_device
*cdev
, enum dev_event dev_event
)
1041 ret
= ccw_device_cancel_halt_clear(cdev
);
1044 cdev
->private->state
= DEV_STATE_OFFLINE
;
1045 wake_up(&cdev
->private->wait_q
);
1048 cdev
->private->state
= DEV_STATE_NOT_OPER
;
1049 wake_up(&cdev
->private->wait_q
);
1052 ccw_device_set_timeout(cdev
, HZ
/10);
1057 * No operation action. This is used e.g. to ignore a timeout event in
1061 ccw_device_nop(struct ccw_device
*cdev
, enum dev_event dev_event
)
1066 * Bug operation action.
1069 ccw_device_bug(struct ccw_device
*cdev
, enum dev_event dev_event
)
1071 printk(KERN_EMERG
"dev_jumptable[%i][%i] == NULL\n",
1072 cdev
->private->state
, dev_event
);
1077 * device statemachine
1079 fsm_func_t
*dev_jumptable
[NR_DEV_STATES
][NR_DEV_EVENTS
] = {
1080 [DEV_STATE_NOT_OPER
] = {
1081 [DEV_EVENT_NOTOPER
] = ccw_device_nop
,
1082 [DEV_EVENT_INTERRUPT
] = ccw_device_bug
,
1083 [DEV_EVENT_TIMEOUT
] = ccw_device_nop
,
1084 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1086 [DEV_STATE_SENSE_PGID
] = {
1087 [DEV_EVENT_NOTOPER
] = ccw_device_online_notoper
,
1088 [DEV_EVENT_INTERRUPT
] = ccw_device_sense_pgid_irq
,
1089 [DEV_EVENT_TIMEOUT
] = ccw_device_onoff_timeout
,
1090 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1092 [DEV_STATE_SENSE_ID
] = {
1093 [DEV_EVENT_NOTOPER
] = ccw_device_recog_notoper
,
1094 [DEV_EVENT_INTERRUPT
] = ccw_device_sense_id_irq
,
1095 [DEV_EVENT_TIMEOUT
] = ccw_device_recog_timeout
,
1096 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1098 [DEV_STATE_OFFLINE
] = {
1099 [DEV_EVENT_NOTOPER
] = ccw_device_offline_notoper
,
1100 [DEV_EVENT_INTERRUPT
] = ccw_device_offline_irq
,
1101 [DEV_EVENT_TIMEOUT
] = ccw_device_nop
,
1102 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1104 [DEV_STATE_VERIFY
] = {
1105 [DEV_EVENT_NOTOPER
] = ccw_device_online_notoper
,
1106 [DEV_EVENT_INTERRUPT
] = ccw_device_verify_irq
,
1107 [DEV_EVENT_TIMEOUT
] = ccw_device_onoff_timeout
,
1108 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1110 [DEV_STATE_ONLINE
] = {
1111 [DEV_EVENT_NOTOPER
] = ccw_device_online_notoper
,
1112 [DEV_EVENT_INTERRUPT
] = ccw_device_irq
,
1113 [DEV_EVENT_TIMEOUT
] = ccw_device_online_timeout
,
1114 [DEV_EVENT_VERIFY
] = ccw_device_online_verify
,
1116 [DEV_STATE_W4SENSE
] = {
1117 [DEV_EVENT_NOTOPER
] = ccw_device_online_notoper
,
1118 [DEV_EVENT_INTERRUPT
] = ccw_device_w4sense
,
1119 [DEV_EVENT_TIMEOUT
] = ccw_device_nop
,
1120 [DEV_EVENT_VERIFY
] = ccw_device_online_verify
,
1122 [DEV_STATE_DISBAND_PGID
] = {
1123 [DEV_EVENT_NOTOPER
] = ccw_device_online_notoper
,
1124 [DEV_EVENT_INTERRUPT
] = ccw_device_disband_irq
,
1125 [DEV_EVENT_TIMEOUT
] = ccw_device_onoff_timeout
,
1126 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1128 [DEV_STATE_BOXED
] = {
1129 [DEV_EVENT_NOTOPER
] = ccw_device_offline_notoper
,
1130 [DEV_EVENT_INTERRUPT
] = ccw_device_stlck_done
,
1131 [DEV_EVENT_TIMEOUT
] = ccw_device_stlck_done
,
1132 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1134 /* states to wait for i/o completion before doing something */
1135 [DEV_STATE_CLEAR_VERIFY
] = {
1136 [DEV_EVENT_NOTOPER
] = ccw_device_online_notoper
,
1137 [DEV_EVENT_INTERRUPT
] = ccw_device_clear_verify
,
1138 [DEV_EVENT_TIMEOUT
] = ccw_device_nop
,
1139 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1141 [DEV_STATE_TIMEOUT_KILL
] = {
1142 [DEV_EVENT_NOTOPER
] = ccw_device_online_notoper
,
1143 [DEV_EVENT_INTERRUPT
] = ccw_device_killing_irq
,
1144 [DEV_EVENT_TIMEOUT
] = ccw_device_killing_timeout
,
1145 [DEV_EVENT_VERIFY
] = ccw_device_nop
, //FIXME
1147 [DEV_STATE_WAIT4IO
] = {
1148 [DEV_EVENT_NOTOPER
] = ccw_device_online_notoper
,
1149 [DEV_EVENT_INTERRUPT
] = ccw_device_wait4io_irq
,
1150 [DEV_EVENT_TIMEOUT
] = ccw_device_wait4io_timeout
,
1151 [DEV_EVENT_VERIFY
] = ccw_device_wait4io_verify
,
1153 [DEV_STATE_QUIESCE
] = {
1154 [DEV_EVENT_NOTOPER
] = ccw_device_quiesce_done
,
1155 [DEV_EVENT_INTERRUPT
] = ccw_device_quiesce_done
,
1156 [DEV_EVENT_TIMEOUT
] = ccw_device_quiesce_timeout
,
1157 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1159 /* special states for devices gone not operational */
1160 [DEV_STATE_DISCONNECTED
] = {
1161 [DEV_EVENT_NOTOPER
] = ccw_device_nop
,
1162 [DEV_EVENT_INTERRUPT
] = ccw_device_start_id
,
1163 [DEV_EVENT_TIMEOUT
] = ccw_device_bug
,
1164 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1166 [DEV_STATE_DISCONNECTED_SENSE_ID
] = {
1167 [DEV_EVENT_NOTOPER
] = ccw_device_recog_notoper
,
1168 [DEV_EVENT_INTERRUPT
] = ccw_device_sense_id_irq
,
1169 [DEV_EVENT_TIMEOUT
] = ccw_device_recog_timeout
,
1170 [DEV_EVENT_VERIFY
] = ccw_device_nop
,
1172 [DEV_STATE_CMFCHANGE
] = {
1173 [DEV_EVENT_NOTOPER
] = ccw_device_change_cmfstate
,
1174 [DEV_EVENT_INTERRUPT
] = ccw_device_change_cmfstate
,
1175 [DEV_EVENT_TIMEOUT
] = ccw_device_change_cmfstate
,
1176 [DEV_EVENT_VERIFY
] = ccw_device_change_cmfstate
,
1181 * io_subchannel_irq is called for "real" interrupts or for status
1182 * pending conditions on msch.
1185 io_subchannel_irq (struct device
*pdev
)
1187 struct ccw_device
*cdev
;
1189 cdev
= to_subchannel(pdev
)->dev
.driver_data
;
1191 CIO_TRACE_EVENT (3, "IRQ");
1192 CIO_TRACE_EVENT (3, pdev
->bus_id
);
1194 dev_fsm_event(cdev
, DEV_EVENT_INTERRUPT
);
1197 EXPORT_SYMBOL_GPL(ccw_device_set_timeout
);