2 * drivers/s390/cio/device_ops.c
6 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
8 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Cornelia Huck (cohuck@de.ibm.com)
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
20 #include <asm/ccwdev.h>
21 #include <asm/idals.h>
24 #include "cio_debug.h"
30 ccw_device_set_options(struct ccw_device
*cdev
, unsigned long flags
)
33 * The flag usage is mutal exclusive ...
35 if ((flags
& CCWDEV_EARLY_NOTIFICATION
) &&
36 (flags
& CCWDEV_REPORT_ALL
))
38 cdev
->private->options
.fast
= (flags
& CCWDEV_EARLY_NOTIFICATION
) != 0;
39 cdev
->private->options
.repall
= (flags
& CCWDEV_REPORT_ALL
) != 0;
40 cdev
->private->options
.pgroup
= (flags
& CCWDEV_DO_PATHGROUP
) != 0;
41 cdev
->private->options
.force
= (flags
& CCWDEV_ALLOW_FORCE
) != 0;
46 ccw_device_clear(struct ccw_device
*cdev
, unsigned long intparm
)
48 struct subchannel
*sch
;
53 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
55 if (cdev
->private->state
!= DEV_STATE_ONLINE
&&
56 cdev
->private->state
!= DEV_STATE_WAIT4IO
&&
57 cdev
->private->state
!= DEV_STATE_W4SENSE
)
59 sch
= to_subchannel(cdev
->dev
.parent
);
64 cdev
->private->intparm
= intparm
;
69 ccw_device_start_key(struct ccw_device
*cdev
, struct ccw1
*cpa
,
70 unsigned long intparm
, __u8 lpm
, __u8 key
,
73 struct subchannel
*sch
;
78 sch
= to_subchannel(cdev
->dev
.parent
);
81 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
83 if (cdev
->private->state
== DEV_STATE_VERIFY
) {
84 /* Remember to fake irb when finished. */
85 if (!cdev
->private->flags
.fake_irb
) {
86 cdev
->private->flags
.fake_irb
= 1;
87 cdev
->private->intparm
= intparm
;
90 /* There's already a fake I/O around. */
93 if (cdev
->private->state
!= DEV_STATE_ONLINE
||
94 ((sch
->schib
.scsw
.stctl
& SCSW_STCTL_PRIM_STATUS
) &&
95 !(sch
->schib
.scsw
.stctl
& SCSW_STCTL_SEC_STATUS
)) ||
96 cdev
->private->flags
.doverify
)
98 ret
= cio_set_options (sch
, flags
);
101 ret
= cio_start_key (sch
, cpa
, lpm
, key
);
103 cdev
->private->intparm
= intparm
;
109 ccw_device_start_timeout_key(struct ccw_device
*cdev
, struct ccw1
*cpa
,
110 unsigned long intparm
, __u8 lpm
, __u8 key
,
111 unsigned long flags
, int expires
)
117 ccw_device_set_timeout(cdev
, expires
);
118 ret
= ccw_device_start_key(cdev
, cpa
, intparm
, lpm
, key
, flags
);
120 ccw_device_set_timeout(cdev
, 0);
125 ccw_device_start(struct ccw_device
*cdev
, struct ccw1
*cpa
,
126 unsigned long intparm
, __u8 lpm
, unsigned long flags
)
128 return ccw_device_start_key(cdev
, cpa
, intparm
, lpm
,
129 PAGE_DEFAULT_KEY
, flags
);
133 ccw_device_start_timeout(struct ccw_device
*cdev
, struct ccw1
*cpa
,
134 unsigned long intparm
, __u8 lpm
, unsigned long flags
,
137 return ccw_device_start_timeout_key(cdev
, cpa
, intparm
, lpm
,
138 PAGE_DEFAULT_KEY
, flags
,
144 ccw_device_halt(struct ccw_device
*cdev
, unsigned long intparm
)
146 struct subchannel
*sch
;
151 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
153 if (cdev
->private->state
!= DEV_STATE_ONLINE
&&
154 cdev
->private->state
!= DEV_STATE_WAIT4IO
&&
155 cdev
->private->state
!= DEV_STATE_W4SENSE
)
157 sch
= to_subchannel(cdev
->dev
.parent
);
162 cdev
->private->intparm
= intparm
;
167 ccw_device_resume(struct ccw_device
*cdev
)
169 struct subchannel
*sch
;
173 sch
= to_subchannel(cdev
->dev
.parent
);
176 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
178 if (cdev
->private->state
!= DEV_STATE_ONLINE
||
179 !(sch
->schib
.scsw
.actl
& SCSW_ACTL_SUSPENDED
))
181 return cio_resume(sch
);
185 * Pass interrupt to device driver.
188 ccw_device_call_handler(struct ccw_device
*cdev
)
190 struct subchannel
*sch
;
194 sch
= to_subchannel(cdev
->dev
.parent
);
197 * we allow for the device action handler if .
198 * - we received ending status
199 * - the action handler requested to see all interrupts
200 * - we received an intermediate status
201 * - fast notification was requested (primary status)
202 * - unsolicited interrupts
204 stctl
= cdev
->private->irb
.scsw
.stctl
;
205 ending_status
= (stctl
& SCSW_STCTL_SEC_STATUS
) ||
206 (stctl
== (SCSW_STCTL_ALERT_STATUS
| SCSW_STCTL_STATUS_PEND
)) ||
207 (stctl
== SCSW_STCTL_STATUS_PEND
);
208 if (!ending_status
&&
209 !cdev
->private->options
.repall
&&
210 !(stctl
& SCSW_STCTL_INTER_STATUS
) &&
211 !(cdev
->private->options
.fast
&&
212 (stctl
& SCSW_STCTL_PRIM_STATUS
)))
216 * Now we are ready to call the device driver interrupt handler.
219 cdev
->handler(cdev
, cdev
->private->intparm
,
220 &cdev
->private->irb
);
223 * Clear the old and now useless interrupt response block.
225 memset(&cdev
->private->irb
, 0, sizeof(struct irb
));
231 * Search for CIW command in extended sense data.
234 ccw_device_get_ciw(struct ccw_device
*cdev
, __u32 ct
)
238 if (cdev
->private->flags
.esid
== 0)
240 for (ciw_cnt
= 0; ciw_cnt
< MAX_CIWS
; ciw_cnt
++)
241 if (cdev
->private->senseid
.ciw
[ciw_cnt
].ct
== ct
)
242 return cdev
->private->senseid
.ciw
+ ciw_cnt
;
247 ccw_device_get_path_mask(struct ccw_device
*cdev
)
249 struct subchannel
*sch
;
251 sch
= to_subchannel(cdev
->dev
.parent
);
259 ccw_device_wake_up(struct ccw_device
*cdev
, unsigned long ip
, struct irb
*irb
)
262 /* unsolicited interrupt */
265 /* Abuse intparm for error reporting. */
267 cdev
->private->intparm
= -EIO
;
268 else if ((irb
->scsw
.dstat
!=
269 (DEV_STAT_CHN_END
|DEV_STAT_DEV_END
)) ||
270 (irb
->scsw
.cstat
!= 0)) {
272 * We didn't get channel end / device end. Check if path
273 * verification has been started; we can retry after it has
274 * finished. We also retry unit checks except for command reject
275 * or intervention required.
277 if (cdev
->private->flags
.doverify
||
278 cdev
->private->state
== DEV_STATE_VERIFY
)
279 cdev
->private->intparm
= -EAGAIN
;
280 if ((irb
->scsw
.dstat
& DEV_STAT_UNIT_CHECK
) &&
282 (SNS0_CMD_REJECT
| SNS0_INTERVENTION_REQ
)))
283 cdev
->private->intparm
= -EAGAIN
;
285 cdev
->private->intparm
= -EIO
;
288 cdev
->private->intparm
= 0;
289 wake_up(&cdev
->private->wait_q
);
293 __ccw_device_retry_loop(struct ccw_device
*cdev
, struct ccw1
*ccw
, long magic
, __u8 lpm
)
296 struct subchannel
*sch
;
298 sch
= to_subchannel(cdev
->dev
.parent
);
300 ret
= cio_start (sch
, ccw
, lpm
);
301 if ((ret
== -EBUSY
) || (ret
== -EACCES
)) {
302 /* Try again later. */
303 spin_unlock_irq(&sch
->lock
);
305 spin_lock_irq(&sch
->lock
);
309 /* Non-retryable error. */
311 /* Wait for end of request. */
312 cdev
->private->intparm
= magic
;
313 spin_unlock_irq(&sch
->lock
);
314 wait_event(cdev
->private->wait_q
,
315 (cdev
->private->intparm
== -EIO
) ||
316 (cdev
->private->intparm
== -EAGAIN
) ||
317 (cdev
->private->intparm
== 0));
318 spin_lock_irq(&sch
->lock
);
319 /* Check at least for channel end / device end */
320 if (cdev
->private->intparm
== -EIO
) {
321 /* Non-retryable error. */
325 if (cdev
->private->intparm
== 0)
328 /* Try again later. */
329 spin_unlock_irq(&sch
->lock
);
331 spin_lock_irq(&sch
->lock
);
338 * read_dev_chars() - read device characteristics
339 * @param cdev target ccw device
340 * @param buffer pointer to buffer for rdc data
341 * @param length size of rdc data
342 * @returns 0 for success, negative error value on failure
345 * called for online device, lock not held
348 read_dev_chars (struct ccw_device
*cdev
, void **buffer
, int length
)
350 void (*handler
)(struct ccw_device
*, unsigned long, struct irb
*);
351 struct subchannel
*sch
;
353 struct ccw1
*rdc_ccw
;
357 if (!buffer
|| !length
)
359 sch
= to_subchannel(cdev
->dev
.parent
);
361 CIO_TRACE_EVENT (4, "rddevch");
362 CIO_TRACE_EVENT (4, sch
->dev
.bus_id
);
364 rdc_ccw
= kmalloc(sizeof(struct ccw1
), GFP_KERNEL
| GFP_DMA
);
367 memset(rdc_ccw
, 0, sizeof(struct ccw1
));
368 rdc_ccw
->cmd_code
= CCW_CMD_RDC
;
369 rdc_ccw
->count
= length
;
370 rdc_ccw
->flags
= CCW_FLAG_SLI
;
371 ret
= set_normalized_cda (rdc_ccw
, (*buffer
));
377 spin_lock_irq(&sch
->lock
);
378 /* Save interrupt handler. */
379 handler
= cdev
->handler
;
380 /* Temporarily install own handler. */
381 cdev
->handler
= ccw_device_wake_up
;
382 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
384 else if (((sch
->schib
.scsw
.stctl
& SCSW_STCTL_PRIM_STATUS
) &&
385 !(sch
->schib
.scsw
.stctl
& SCSW_STCTL_SEC_STATUS
)) ||
386 cdev
->private->flags
.doverify
)
389 /* 0x00D9C4C3 == ebcdic "RDC" */
390 ret
= __ccw_device_retry_loop(cdev
, rdc_ccw
, 0x00D9C4C3, 0);
392 /* Restore interrupt handler. */
393 cdev
->handler
= handler
;
394 spin_unlock_irq(&sch
->lock
);
396 clear_normalized_cda (rdc_ccw
);
403 * Read Configuration data using path mask
406 read_conf_data_lpm (struct ccw_device
*cdev
, void **buffer
, int *length
, __u8 lpm
)
408 void (*handler
)(struct ccw_device
*, unsigned long, struct irb
*);
409 struct subchannel
*sch
;
413 struct ccw1
*rcd_ccw
;
417 if (!buffer
|| !length
)
419 sch
= to_subchannel(cdev
->dev
.parent
);
421 CIO_TRACE_EVENT (4, "rdconf");
422 CIO_TRACE_EVENT (4, sch
->dev
.bus_id
);
425 * scan for RCD command in extended SenseID data
427 ciw
= ccw_device_get_ciw(cdev
, CIW_TYPE_RCD
);
428 if (!ciw
|| ciw
->cmd
== 0)
431 rcd_ccw
= kmalloc(sizeof(struct ccw1
), GFP_KERNEL
| GFP_DMA
);
434 memset(rcd_ccw
, 0, sizeof(struct ccw1
));
435 rcd_buf
= kmalloc(ciw
->count
, GFP_KERNEL
| GFP_DMA
);
440 memset (rcd_buf
, 0, ciw
->count
);
441 rcd_ccw
->cmd_code
= ciw
->cmd
;
442 rcd_ccw
->cda
= (__u32
) __pa (rcd_buf
);
443 rcd_ccw
->count
= ciw
->count
;
444 rcd_ccw
->flags
= CCW_FLAG_SLI
;
446 spin_lock_irq(&sch
->lock
);
447 /* Save interrupt handler. */
448 handler
= cdev
->handler
;
449 /* Temporarily install own handler. */
450 cdev
->handler
= ccw_device_wake_up
;
451 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
453 else if (((sch
->schib
.scsw
.stctl
& SCSW_STCTL_PRIM_STATUS
) &&
454 !(sch
->schib
.scsw
.stctl
& SCSW_STCTL_SEC_STATUS
)) ||
455 cdev
->private->flags
.doverify
)
458 /* 0x00D9C3C4 == ebcdic "RCD" */
459 ret
= __ccw_device_retry_loop(cdev
, rcd_ccw
, 0x00D9C3C4, lpm
);
461 /* Restore interrupt handler. */
462 cdev
->handler
= handler
;
463 spin_unlock_irq(&sch
->lock
);
466 * on success we update the user input parms
473 *length
= ciw
->count
;
482 * Read Configuration data
485 read_conf_data (struct ccw_device
*cdev
, void **buffer
, int *length
)
487 return read_conf_data_lpm (cdev
, buffer
, length
, 0);
491 * Try to break the lock on a boxed device.
494 ccw_device_stlck(struct ccw_device
*cdev
)
498 struct subchannel
*sch
;
504 if (cdev
->drv
&& !cdev
->private->options
.force
)
507 sch
= to_subchannel(cdev
->dev
.parent
);
509 CIO_TRACE_EVENT(2, "stl lock");
510 CIO_TRACE_EVENT(2, cdev
->dev
.bus_id
);
512 buf
= kmalloc(32*sizeof(char), GFP_DMA
|GFP_KERNEL
);
515 buf2
= kmalloc(32*sizeof(char), GFP_DMA
|GFP_KERNEL
);
520 spin_lock_irqsave(&sch
->lock
, flags
);
521 ret
= cio_enable_subchannel(sch
, 3);
525 * Setup ccw. We chain an unconditional reserve and a release so we
526 * only break the lock.
528 cdev
->private->iccws
[0].cmd_code
= CCW_CMD_STLCK
;
529 cdev
->private->iccws
[0].cda
= (__u32
) __pa(buf
);
530 cdev
->private->iccws
[0].count
= 32;
531 cdev
->private->iccws
[0].flags
= CCW_FLAG_CC
;
532 cdev
->private->iccws
[1].cmd_code
= CCW_CMD_RELEASE
;
533 cdev
->private->iccws
[1].cda
= (__u32
) __pa(buf2
);
534 cdev
->private->iccws
[1].count
= 32;
535 cdev
->private->iccws
[1].flags
= 0;
536 ret
= cio_start(sch
, cdev
->private->iccws
, 0);
538 cio_disable_subchannel(sch
); //FIXME: return code?
541 cdev
->private->irb
.scsw
.actl
|= SCSW_ACTL_START_PEND
;
542 spin_unlock_irqrestore(&sch
->lock
, flags
);
543 wait_event(cdev
->private->wait_q
, cdev
->private->irb
.scsw
.actl
== 0);
544 spin_lock_irqsave(&sch
->lock
, flags
);
545 cio_disable_subchannel(sch
); //FIXME: return code?
546 if ((cdev
->private->irb
.scsw
.dstat
!=
547 (DEV_STAT_CHN_END
|DEV_STAT_DEV_END
)) ||
548 (cdev
->private->irb
.scsw
.cstat
!= 0))
551 memset(&cdev
->private->irb
, 0, sizeof(struct irb
));
557 spin_unlock_irqrestore(&sch
->lock
, flags
);
562 ccw_device_get_chp_desc(struct ccw_device
*cdev
, int chp_no
)
564 struct subchannel
*sch
;
566 sch
= to_subchannel(cdev
->dev
.parent
);
567 return chsc_get_chp_desc(sch
, chp_no
);
570 // FIXME: these have to go:
573 _ccw_device_get_subchannel_number(struct ccw_device
*cdev
)
575 return cdev
->private->irq
;
579 _ccw_device_get_device_number(struct ccw_device
*cdev
)
581 return cdev
->private->devno
;
585 MODULE_LICENSE("GPL");
586 EXPORT_SYMBOL(ccw_device_set_options
);
587 EXPORT_SYMBOL(ccw_device_clear
);
588 EXPORT_SYMBOL(ccw_device_halt
);
589 EXPORT_SYMBOL(ccw_device_resume
);
590 EXPORT_SYMBOL(ccw_device_start_timeout
);
591 EXPORT_SYMBOL(ccw_device_start
);
592 EXPORT_SYMBOL(ccw_device_start_timeout_key
);
593 EXPORT_SYMBOL(ccw_device_start_key
);
594 EXPORT_SYMBOL(ccw_device_get_ciw
);
595 EXPORT_SYMBOL(ccw_device_get_path_mask
);
596 EXPORT_SYMBOL(read_conf_data
);
597 EXPORT_SYMBOL(read_dev_chars
);
598 EXPORT_SYMBOL(_ccw_device_get_subchannel_number
);
599 EXPORT_SYMBOL(_ccw_device_get_device_number
);
600 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc
);
601 EXPORT_SYMBOL_GPL(read_conf_data_lpm
);