2 * drivers/s390/cio/device_ops.c
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
14 #include <linux/device.h>
15 #include <linux/delay.h>
17 #include <asm/ccwdev.h>
18 #include <asm/idals.h>
19 #include <asm/chpid.h>
22 #include "cio_debug.h"
28 int ccw_device_set_options_mask(struct ccw_device
*cdev
, unsigned long flags
)
31 * The flag usage is mutal exclusive ...
33 if ((flags
& CCWDEV_EARLY_NOTIFICATION
) &&
34 (flags
& CCWDEV_REPORT_ALL
))
36 cdev
->private->options
.fast
= (flags
& CCWDEV_EARLY_NOTIFICATION
) != 0;
37 cdev
->private->options
.repall
= (flags
& CCWDEV_REPORT_ALL
) != 0;
38 cdev
->private->options
.pgroup
= (flags
& CCWDEV_DO_PATHGROUP
) != 0;
39 cdev
->private->options
.force
= (flags
& CCWDEV_ALLOW_FORCE
) != 0;
43 int ccw_device_set_options(struct ccw_device
*cdev
, unsigned long flags
)
46 * The flag usage is mutal exclusive ...
48 if (((flags
& CCWDEV_EARLY_NOTIFICATION
) &&
49 (flags
& CCWDEV_REPORT_ALL
)) ||
50 ((flags
& CCWDEV_EARLY_NOTIFICATION
) &&
51 cdev
->private->options
.repall
) ||
52 ((flags
& CCWDEV_REPORT_ALL
) &&
53 cdev
->private->options
.fast
))
55 cdev
->private->options
.fast
|= (flags
& CCWDEV_EARLY_NOTIFICATION
) != 0;
56 cdev
->private->options
.repall
|= (flags
& CCWDEV_REPORT_ALL
) != 0;
57 cdev
->private->options
.pgroup
|= (flags
& CCWDEV_DO_PATHGROUP
) != 0;
58 cdev
->private->options
.force
|= (flags
& CCWDEV_ALLOW_FORCE
) != 0;
62 void ccw_device_clear_options(struct ccw_device
*cdev
, unsigned long flags
)
64 cdev
->private->options
.fast
&= (flags
& CCWDEV_EARLY_NOTIFICATION
) == 0;
65 cdev
->private->options
.repall
&= (flags
& CCWDEV_REPORT_ALL
) == 0;
66 cdev
->private->options
.pgroup
&= (flags
& CCWDEV_DO_PATHGROUP
) == 0;
67 cdev
->private->options
.force
&= (flags
& CCWDEV_ALLOW_FORCE
) == 0;
71 ccw_device_clear(struct ccw_device
*cdev
, unsigned long intparm
)
73 struct subchannel
*sch
;
78 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
80 if (cdev
->private->state
!= DEV_STATE_ONLINE
&&
81 cdev
->private->state
!= DEV_STATE_W4SENSE
)
83 sch
= to_subchannel(cdev
->dev
.parent
);
88 cdev
->private->intparm
= intparm
;
93 ccw_device_start_key(struct ccw_device
*cdev
, struct ccw1
*cpa
,
94 unsigned long intparm
, __u8 lpm
, __u8 key
,
97 struct subchannel
*sch
;
102 sch
= to_subchannel(cdev
->dev
.parent
);
105 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
107 if (cdev
->private->state
== DEV_STATE_VERIFY
||
108 cdev
->private->state
== DEV_STATE_CLEAR_VERIFY
) {
109 /* Remember to fake irb when finished. */
110 if (!cdev
->private->flags
.fake_irb
) {
111 cdev
->private->flags
.fake_irb
= 1;
112 cdev
->private->intparm
= intparm
;
115 /* There's already a fake I/O around. */
118 if (cdev
->private->state
!= DEV_STATE_ONLINE
||
119 ((sch
->schib
.scsw
.stctl
& SCSW_STCTL_PRIM_STATUS
) &&
120 !(sch
->schib
.scsw
.stctl
& SCSW_STCTL_SEC_STATUS
)) ||
121 cdev
->private->flags
.doverify
)
123 ret
= cio_set_options (sch
, flags
);
126 /* Adjust requested path mask to excluded varied off paths. */
132 ret
= cio_start_key (sch
, cpa
, lpm
, key
);
134 cdev
->private->intparm
= intparm
;
140 ccw_device_start_timeout_key(struct ccw_device
*cdev
, struct ccw1
*cpa
,
141 unsigned long intparm
, __u8 lpm
, __u8 key
,
142 unsigned long flags
, int expires
)
148 ccw_device_set_timeout(cdev
, expires
);
149 ret
= ccw_device_start_key(cdev
, cpa
, intparm
, lpm
, key
, flags
);
151 ccw_device_set_timeout(cdev
, 0);
156 ccw_device_start(struct ccw_device
*cdev
, struct ccw1
*cpa
,
157 unsigned long intparm
, __u8 lpm
, unsigned long flags
)
159 return ccw_device_start_key(cdev
, cpa
, intparm
, lpm
,
160 PAGE_DEFAULT_KEY
, flags
);
164 ccw_device_start_timeout(struct ccw_device
*cdev
, struct ccw1
*cpa
,
165 unsigned long intparm
, __u8 lpm
, unsigned long flags
,
168 return ccw_device_start_timeout_key(cdev
, cpa
, intparm
, lpm
,
169 PAGE_DEFAULT_KEY
, flags
,
175 ccw_device_halt(struct ccw_device
*cdev
, unsigned long intparm
)
177 struct subchannel
*sch
;
182 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
184 if (cdev
->private->state
!= DEV_STATE_ONLINE
&&
185 cdev
->private->state
!= DEV_STATE_W4SENSE
)
187 sch
= to_subchannel(cdev
->dev
.parent
);
192 cdev
->private->intparm
= intparm
;
197 ccw_device_resume(struct ccw_device
*cdev
)
199 struct subchannel
*sch
;
203 sch
= to_subchannel(cdev
->dev
.parent
);
206 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
208 if (cdev
->private->state
!= DEV_STATE_ONLINE
||
209 !(sch
->schib
.scsw
.actl
& SCSW_ACTL_SUSPENDED
))
211 return cio_resume(sch
);
215 * Pass interrupt to device driver.
218 ccw_device_call_handler(struct ccw_device
*cdev
)
220 struct subchannel
*sch
;
224 sch
= to_subchannel(cdev
->dev
.parent
);
227 * we allow for the device action handler if .
228 * - we received ending status
229 * - the action handler requested to see all interrupts
230 * - we received an intermediate status
231 * - fast notification was requested (primary status)
232 * - unsolicited interrupts
234 stctl
= cdev
->private->irb
.scsw
.stctl
;
235 ending_status
= (stctl
& SCSW_STCTL_SEC_STATUS
) ||
236 (stctl
== (SCSW_STCTL_ALERT_STATUS
| SCSW_STCTL_STATUS_PEND
)) ||
237 (stctl
== SCSW_STCTL_STATUS_PEND
);
238 if (!ending_status
&&
239 !cdev
->private->options
.repall
&&
240 !(stctl
& SCSW_STCTL_INTER_STATUS
) &&
241 !(cdev
->private->options
.fast
&&
242 (stctl
& SCSW_STCTL_PRIM_STATUS
)))
245 /* Clear pending timers for device driver initiated I/O. */
247 ccw_device_set_timeout(cdev
, 0);
249 * Now we are ready to call the device driver interrupt handler.
252 cdev
->handler(cdev
, cdev
->private->intparm
,
253 &cdev
->private->irb
);
256 * Clear the old and now useless interrupt response block.
258 memset(&cdev
->private->irb
, 0, sizeof(struct irb
));
264 * Search for CIW command in extended sense data.
267 ccw_device_get_ciw(struct ccw_device
*cdev
, __u32 ct
)
271 if (cdev
->private->flags
.esid
== 0)
273 for (ciw_cnt
= 0; ciw_cnt
< MAX_CIWS
; ciw_cnt
++)
274 if (cdev
->private->senseid
.ciw
[ciw_cnt
].ct
== ct
)
275 return cdev
->private->senseid
.ciw
+ ciw_cnt
;
280 ccw_device_get_path_mask(struct ccw_device
*cdev
)
282 struct subchannel
*sch
;
284 sch
= to_subchannel(cdev
->dev
.parent
);
292 ccw_device_wake_up(struct ccw_device
*cdev
, unsigned long ip
, struct irb
*irb
)
295 /* unsolicited interrupt */
298 /* Abuse intparm for error reporting. */
300 cdev
->private->intparm
= -EIO
;
301 else if (irb
->scsw
.cc
== 1)
302 /* Retry for deferred condition code. */
303 cdev
->private->intparm
= -EAGAIN
;
304 else if ((irb
->scsw
.dstat
!=
305 (DEV_STAT_CHN_END
|DEV_STAT_DEV_END
)) ||
306 (irb
->scsw
.cstat
!= 0)) {
308 * We didn't get channel end / device end. Check if path
309 * verification has been started; we can retry after it has
310 * finished. We also retry unit checks except for command reject
311 * or intervention required. Also check for long busy
314 if (cdev
->private->flags
.doverify
||
315 cdev
->private->state
== DEV_STATE_VERIFY
)
316 cdev
->private->intparm
= -EAGAIN
;
317 else if ((irb
->scsw
.dstat
& DEV_STAT_UNIT_CHECK
) &&
319 (SNS0_CMD_REJECT
| SNS0_INTERVENTION_REQ
)))
320 cdev
->private->intparm
= -EAGAIN
;
321 else if ((irb
->scsw
.dstat
& DEV_STAT_ATTENTION
) &&
322 (irb
->scsw
.dstat
& DEV_STAT_DEV_END
) &&
323 (irb
->scsw
.dstat
& DEV_STAT_UNIT_EXCEP
))
324 cdev
->private->intparm
= -EAGAIN
;
326 cdev
->private->intparm
= -EIO
;
329 cdev
->private->intparm
= 0;
330 wake_up(&cdev
->private->wait_q
);
334 __ccw_device_retry_loop(struct ccw_device
*cdev
, struct ccw1
*ccw
, long magic
, __u8 lpm
)
337 struct subchannel
*sch
;
339 sch
= to_subchannel(cdev
->dev
.parent
);
341 ccw_device_set_timeout(cdev
, 60 * HZ
);
342 ret
= cio_start (sch
, ccw
, lpm
);
344 ccw_device_set_timeout(cdev
, 0);
346 /* Try again later. */
347 spin_unlock_irq(sch
->lock
);
349 spin_lock_irq(sch
->lock
);
353 /* Non-retryable error. */
355 /* Wait for end of request. */
356 cdev
->private->intparm
= magic
;
357 spin_unlock_irq(sch
->lock
);
358 wait_event(cdev
->private->wait_q
,
359 (cdev
->private->intparm
== -EIO
) ||
360 (cdev
->private->intparm
== -EAGAIN
) ||
361 (cdev
->private->intparm
== 0));
362 spin_lock_irq(sch
->lock
);
363 /* Check at least for channel end / device end */
364 if (cdev
->private->intparm
== -EIO
) {
365 /* Non-retryable error. */
369 if (cdev
->private->intparm
== 0)
372 /* Try again later. */
373 spin_unlock_irq(sch
->lock
);
375 spin_lock_irq(sch
->lock
);
382 * read_dev_chars() - read device characteristics
383 * @param cdev target ccw device
384 * @param buffer pointer to buffer for rdc data
385 * @param length size of rdc data
386 * @returns 0 for success, negative error value on failure
389 * called for online device, lock not held
392 read_dev_chars (struct ccw_device
*cdev
, void **buffer
, int length
)
394 void (*handler
)(struct ccw_device
*, unsigned long, struct irb
*);
395 struct subchannel
*sch
;
397 struct ccw1
*rdc_ccw
;
401 if (!buffer
|| !length
)
403 sch
= to_subchannel(cdev
->dev
.parent
);
405 CIO_TRACE_EVENT (4, "rddevch");
406 CIO_TRACE_EVENT (4, sch
->dev
.bus_id
);
408 rdc_ccw
= kzalloc(sizeof(struct ccw1
), GFP_KERNEL
| GFP_DMA
);
411 rdc_ccw
->cmd_code
= CCW_CMD_RDC
;
412 rdc_ccw
->count
= length
;
413 rdc_ccw
->flags
= CCW_FLAG_SLI
;
414 ret
= set_normalized_cda (rdc_ccw
, (*buffer
));
420 spin_lock_irq(sch
->lock
);
421 /* Save interrupt handler. */
422 handler
= cdev
->handler
;
423 /* Temporarily install own handler. */
424 cdev
->handler
= ccw_device_wake_up
;
425 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
427 else if (((sch
->schib
.scsw
.stctl
& SCSW_STCTL_PRIM_STATUS
) &&
428 !(sch
->schib
.scsw
.stctl
& SCSW_STCTL_SEC_STATUS
)) ||
429 cdev
->private->flags
.doverify
)
432 /* 0x00D9C4C3 == ebcdic "RDC" */
433 ret
= __ccw_device_retry_loop(cdev
, rdc_ccw
, 0x00D9C4C3, 0);
435 /* Restore interrupt handler. */
436 cdev
->handler
= handler
;
437 spin_unlock_irq(sch
->lock
);
439 clear_normalized_cda (rdc_ccw
);
446 * Read Configuration data using path mask
449 read_conf_data_lpm (struct ccw_device
*cdev
, void **buffer
, int *length
, __u8 lpm
)
451 void (*handler
)(struct ccw_device
*, unsigned long, struct irb
*);
452 struct subchannel
*sch
;
456 struct ccw1
*rcd_ccw
;
460 if (!buffer
|| !length
)
462 sch
= to_subchannel(cdev
->dev
.parent
);
464 CIO_TRACE_EVENT (4, "rdconf");
465 CIO_TRACE_EVENT (4, sch
->dev
.bus_id
);
468 * scan for RCD command in extended SenseID data
470 ciw
= ccw_device_get_ciw(cdev
, CIW_TYPE_RCD
);
471 if (!ciw
|| ciw
->cmd
== 0)
474 /* Adjust requested path mask to excluded varied off paths. */
481 rcd_ccw
= kzalloc(sizeof(struct ccw1
), GFP_KERNEL
| GFP_DMA
);
484 rcd_buf
= kzalloc(ciw
->count
, GFP_KERNEL
| GFP_DMA
);
489 rcd_ccw
->cmd_code
= ciw
->cmd
;
490 rcd_ccw
->cda
= (__u32
) __pa (rcd_buf
);
491 rcd_ccw
->count
= ciw
->count
;
492 rcd_ccw
->flags
= CCW_FLAG_SLI
;
494 spin_lock_irq(sch
->lock
);
495 /* Save interrupt handler. */
496 handler
= cdev
->handler
;
497 /* Temporarily install own handler. */
498 cdev
->handler
= ccw_device_wake_up
;
499 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
501 else if (((sch
->schib
.scsw
.stctl
& SCSW_STCTL_PRIM_STATUS
) &&
502 !(sch
->schib
.scsw
.stctl
& SCSW_STCTL_SEC_STATUS
)) ||
503 cdev
->private->flags
.doverify
)
506 /* 0x00D9C3C4 == ebcdic "RCD" */
507 ret
= __ccw_device_retry_loop(cdev
, rcd_ccw
, 0x00D9C3C4, lpm
);
509 /* Restore interrupt handler. */
510 cdev
->handler
= handler
;
511 spin_unlock_irq(sch
->lock
);
514 * on success we update the user input parms
521 *length
= ciw
->count
;
530 * Read Configuration data
533 read_conf_data (struct ccw_device
*cdev
, void **buffer
, int *length
)
535 return read_conf_data_lpm (cdev
, buffer
, length
, 0);
539 * Try to break the lock on a boxed device.
542 ccw_device_stlck(struct ccw_device
*cdev
)
546 struct subchannel
*sch
;
552 if (cdev
->drv
&& !cdev
->private->options
.force
)
555 sch
= to_subchannel(cdev
->dev
.parent
);
557 CIO_TRACE_EVENT(2, "stl lock");
558 CIO_TRACE_EVENT(2, cdev
->dev
.bus_id
);
560 buf
= kmalloc(32*sizeof(char), GFP_DMA
|GFP_KERNEL
);
563 buf2
= kmalloc(32*sizeof(char), GFP_DMA
|GFP_KERNEL
);
568 spin_lock_irqsave(sch
->lock
, flags
);
569 ret
= cio_enable_subchannel(sch
, 3);
573 * Setup ccw. We chain an unconditional reserve and a release so we
574 * only break the lock.
576 cdev
->private->iccws
[0].cmd_code
= CCW_CMD_STLCK
;
577 cdev
->private->iccws
[0].cda
= (__u32
) __pa(buf
);
578 cdev
->private->iccws
[0].count
= 32;
579 cdev
->private->iccws
[0].flags
= CCW_FLAG_CC
;
580 cdev
->private->iccws
[1].cmd_code
= CCW_CMD_RELEASE
;
581 cdev
->private->iccws
[1].cda
= (__u32
) __pa(buf2
);
582 cdev
->private->iccws
[1].count
= 32;
583 cdev
->private->iccws
[1].flags
= 0;
584 ret
= cio_start(sch
, cdev
->private->iccws
, 0);
586 cio_disable_subchannel(sch
); //FIXME: return code?
589 cdev
->private->irb
.scsw
.actl
|= SCSW_ACTL_START_PEND
;
590 spin_unlock_irqrestore(sch
->lock
, flags
);
591 wait_event(cdev
->private->wait_q
, cdev
->private->irb
.scsw
.actl
== 0);
592 spin_lock_irqsave(sch
->lock
, flags
);
593 cio_disable_subchannel(sch
); //FIXME: return code?
594 if ((cdev
->private->irb
.scsw
.dstat
!=
595 (DEV_STAT_CHN_END
|DEV_STAT_DEV_END
)) ||
596 (cdev
->private->irb
.scsw
.cstat
!= 0))
599 memset(&cdev
->private->irb
, 0, sizeof(struct irb
));
603 spin_unlock_irqrestore(sch
->lock
, flags
);
608 ccw_device_get_chp_desc(struct ccw_device
*cdev
, int chp_no
)
610 struct subchannel
*sch
;
613 sch
= to_subchannel(cdev
->dev
.parent
);
615 chpid
.id
= sch
->schib
.pmcw
.chpid
[chp_no
];
616 return chp_get_chp_desc(chpid
);
620 * ccw_device_get_id - obtain a ccw device id
621 * @cdev: device to obtain the id for
622 * @dev_id: where to fill in the values
624 void ccw_device_get_id(struct ccw_device
*cdev
, struct ccw_dev_id
*dev_id
)
626 *dev_id
= cdev
->private->dev_id
;
628 EXPORT_SYMBOL(ccw_device_get_id
);
630 // FIXME: these have to go:
633 _ccw_device_get_subchannel_number(struct ccw_device
*cdev
)
635 return cdev
->private->schid
.sch_no
;
639 _ccw_device_get_device_number(struct ccw_device
*cdev
)
641 return cdev
->private->dev_id
.devno
;
645 MODULE_LICENSE("GPL");
646 EXPORT_SYMBOL(ccw_device_set_options_mask
);
647 EXPORT_SYMBOL(ccw_device_set_options
);
648 EXPORT_SYMBOL(ccw_device_clear_options
);
649 EXPORT_SYMBOL(ccw_device_clear
);
650 EXPORT_SYMBOL(ccw_device_halt
);
651 EXPORT_SYMBOL(ccw_device_resume
);
652 EXPORT_SYMBOL(ccw_device_start_timeout
);
653 EXPORT_SYMBOL(ccw_device_start
);
654 EXPORT_SYMBOL(ccw_device_start_timeout_key
);
655 EXPORT_SYMBOL(ccw_device_start_key
);
656 EXPORT_SYMBOL(ccw_device_get_ciw
);
657 EXPORT_SYMBOL(ccw_device_get_path_mask
);
658 EXPORT_SYMBOL(read_conf_data
);
659 EXPORT_SYMBOL(read_dev_chars
);
660 EXPORT_SYMBOL(_ccw_device_get_subchannel_number
);
661 EXPORT_SYMBOL(_ccw_device_get_device_number
);
662 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc
);
663 EXPORT_SYMBOL_GPL(read_conf_data_lpm
);