2 * drivers/s390/cio/device_ops.c
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 #include <linux/config.h>
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/device.h>
16 #include <linux/delay.h>
18 #include <asm/ccwdev.h>
19 #include <asm/idals.h>
22 #include "cio_debug.h"
28 ccw_device_set_options(struct ccw_device
*cdev
, unsigned long flags
)
31 * The flag usage is mutal exclusive ...
33 if ((flags
& CCWDEV_EARLY_NOTIFICATION
) &&
34 (flags
& CCWDEV_REPORT_ALL
))
36 cdev
->private->options
.fast
= (flags
& CCWDEV_EARLY_NOTIFICATION
) != 0;
37 cdev
->private->options
.repall
= (flags
& CCWDEV_REPORT_ALL
) != 0;
38 cdev
->private->options
.pgroup
= (flags
& CCWDEV_DO_PATHGROUP
) != 0;
39 cdev
->private->options
.force
= (flags
& CCWDEV_ALLOW_FORCE
) != 0;
44 ccw_device_clear(struct ccw_device
*cdev
, unsigned long intparm
)
46 struct subchannel
*sch
;
51 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
53 if (cdev
->private->state
!= DEV_STATE_ONLINE
&&
54 cdev
->private->state
!= DEV_STATE_WAIT4IO
&&
55 cdev
->private->state
!= DEV_STATE_W4SENSE
)
57 sch
= to_subchannel(cdev
->dev
.parent
);
62 cdev
->private->intparm
= intparm
;
67 ccw_device_start_key(struct ccw_device
*cdev
, struct ccw1
*cpa
,
68 unsigned long intparm
, __u8 lpm
, __u8 key
,
71 struct subchannel
*sch
;
76 sch
= to_subchannel(cdev
->dev
.parent
);
79 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
81 if (cdev
->private->state
== DEV_STATE_VERIFY
) {
82 /* Remember to fake irb when finished. */
83 if (!cdev
->private->flags
.fake_irb
) {
84 cdev
->private->flags
.fake_irb
= 1;
85 cdev
->private->intparm
= intparm
;
88 /* There's already a fake I/O around. */
91 if (cdev
->private->state
!= DEV_STATE_ONLINE
||
92 ((sch
->schib
.scsw
.stctl
& SCSW_STCTL_PRIM_STATUS
) &&
93 !(sch
->schib
.scsw
.stctl
& SCSW_STCTL_SEC_STATUS
)) ||
94 cdev
->private->flags
.doverify
)
96 ret
= cio_set_options (sch
, flags
);
99 ret
= cio_start_key (sch
, cpa
, lpm
, key
);
101 cdev
->private->intparm
= intparm
;
107 ccw_device_start_timeout_key(struct ccw_device
*cdev
, struct ccw1
*cpa
,
108 unsigned long intparm
, __u8 lpm
, __u8 key
,
109 unsigned long flags
, int expires
)
115 ccw_device_set_timeout(cdev
, expires
);
116 ret
= ccw_device_start_key(cdev
, cpa
, intparm
, lpm
, key
, flags
);
118 ccw_device_set_timeout(cdev
, 0);
123 ccw_device_start(struct ccw_device
*cdev
, struct ccw1
*cpa
,
124 unsigned long intparm
, __u8 lpm
, unsigned long flags
)
126 return ccw_device_start_key(cdev
, cpa
, intparm
, lpm
,
127 PAGE_DEFAULT_KEY
, flags
);
131 ccw_device_start_timeout(struct ccw_device
*cdev
, struct ccw1
*cpa
,
132 unsigned long intparm
, __u8 lpm
, unsigned long flags
,
135 return ccw_device_start_timeout_key(cdev
, cpa
, intparm
, lpm
,
136 PAGE_DEFAULT_KEY
, flags
,
142 ccw_device_halt(struct ccw_device
*cdev
, unsigned long intparm
)
144 struct subchannel
*sch
;
149 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
151 if (cdev
->private->state
!= DEV_STATE_ONLINE
&&
152 cdev
->private->state
!= DEV_STATE_WAIT4IO
&&
153 cdev
->private->state
!= DEV_STATE_W4SENSE
)
155 sch
= to_subchannel(cdev
->dev
.parent
);
160 cdev
->private->intparm
= intparm
;
165 ccw_device_resume(struct ccw_device
*cdev
)
167 struct subchannel
*sch
;
171 sch
= to_subchannel(cdev
->dev
.parent
);
174 if (cdev
->private->state
== DEV_STATE_NOT_OPER
)
176 if (cdev
->private->state
!= DEV_STATE_ONLINE
||
177 !(sch
->schib
.scsw
.actl
& SCSW_ACTL_SUSPENDED
))
179 return cio_resume(sch
);
183 * Pass interrupt to device driver.
186 ccw_device_call_handler(struct ccw_device
*cdev
)
188 struct subchannel
*sch
;
192 sch
= to_subchannel(cdev
->dev
.parent
);
195 * we allow for the device action handler if .
196 * - we received ending status
197 * - the action handler requested to see all interrupts
198 * - we received an intermediate status
199 * - fast notification was requested (primary status)
200 * - unsolicited interrupts
202 stctl
= cdev
->private->irb
.scsw
.stctl
;
203 ending_status
= (stctl
& SCSW_STCTL_SEC_STATUS
) ||
204 (stctl
== (SCSW_STCTL_ALERT_STATUS
| SCSW_STCTL_STATUS_PEND
)) ||
205 (stctl
== SCSW_STCTL_STATUS_PEND
);
206 if (!ending_status
&&
207 !cdev
->private->options
.repall
&&
208 !(stctl
& SCSW_STCTL_INTER_STATUS
) &&
209 !(cdev
->private->options
.fast
&&
210 (stctl
& SCSW_STCTL_PRIM_STATUS
)))
214 * Now we are ready to call the device driver interrupt handler.
217 cdev
->handler(cdev
, cdev
->private->intparm
,
218 &cdev
->private->irb
);
221 * Clear the old and now useless interrupt response block.
223 memset(&cdev
->private->irb
, 0, sizeof(struct irb
));
229 * Search for CIW command in extended sense data.
232 ccw_device_get_ciw(struct ccw_device
*cdev
, __u32 ct
)
236 if (cdev
->private->flags
.esid
== 0)
238 for (ciw_cnt
= 0; ciw_cnt
< MAX_CIWS
; ciw_cnt
++)
239 if (cdev
->private->senseid
.ciw
[ciw_cnt
].ct
== ct
)
240 return cdev
->private->senseid
.ciw
+ ciw_cnt
;
245 ccw_device_get_path_mask(struct ccw_device
*cdev
)
247 struct subchannel
*sch
;
249 sch
= to_subchannel(cdev
->dev
.parent
);
257 ccw_device_wake_up(struct ccw_device
*cdev
, unsigned long ip
, struct irb
*irb
)
260 /* unsolicited interrupt */
263 /* Abuse intparm for error reporting. */
265 cdev
->private->intparm
= -EIO
;
266 else if ((irb
->scsw
.dstat
!=
267 (DEV_STAT_CHN_END
|DEV_STAT_DEV_END
)) ||
268 (irb
->scsw
.cstat
!= 0)) {
270 * We didn't get channel end / device end. Check if path
271 * verification has been started; we can retry after it has
272 * finished. We also retry unit checks except for command reject
273 * or intervention required.
275 if (cdev
->private->flags
.doverify
||
276 cdev
->private->state
== DEV_STATE_VERIFY
)
277 cdev
->private->intparm
= -EAGAIN
;
278 if ((irb
->scsw
.dstat
& DEV_STAT_UNIT_CHECK
) &&
280 (SNS0_CMD_REJECT
| SNS0_INTERVENTION_REQ
)))
281 cdev
->private->intparm
= -EAGAIN
;
283 cdev
->private->intparm
= -EIO
;
286 cdev
->private->intparm
= 0;
287 wake_up(&cdev
->private->wait_q
);
291 __ccw_device_retry_loop(struct ccw_device
*cdev
, struct ccw1
*ccw
, long magic
, __u8 lpm
)
294 struct subchannel
*sch
;
296 sch
= to_subchannel(cdev
->dev
.parent
);
298 ret
= cio_start (sch
, ccw
, lpm
);
299 if ((ret
== -EBUSY
) || (ret
== -EACCES
)) {
300 /* Try again later. */
301 spin_unlock_irq(&sch
->lock
);
303 spin_lock_irq(&sch
->lock
);
307 /* Non-retryable error. */
309 /* Wait for end of request. */
310 cdev
->private->intparm
= magic
;
311 spin_unlock_irq(&sch
->lock
);
312 wait_event(cdev
->private->wait_q
,
313 (cdev
->private->intparm
== -EIO
) ||
314 (cdev
->private->intparm
== -EAGAIN
) ||
315 (cdev
->private->intparm
== 0));
316 spin_lock_irq(&sch
->lock
);
317 /* Check at least for channel end / device end */
318 if (cdev
->private->intparm
== -EIO
) {
319 /* Non-retryable error. */
323 if (cdev
->private->intparm
== 0)
326 /* Try again later. */
327 spin_unlock_irq(&sch
->lock
);
329 spin_lock_irq(&sch
->lock
);
336 * read_dev_chars() - read device characteristics
337 * @param cdev target ccw device
338 * @param buffer pointer to buffer for rdc data
339 * @param length size of rdc data
340 * @returns 0 for success, negative error value on failure
343 * called for online device, lock not held
346 read_dev_chars (struct ccw_device
*cdev
, void **buffer
, int length
)
348 void (*handler
)(struct ccw_device
*, unsigned long, struct irb
*);
349 struct subchannel
*sch
;
351 struct ccw1
*rdc_ccw
;
355 if (!buffer
|| !length
)
357 sch
= to_subchannel(cdev
->dev
.parent
);
359 CIO_TRACE_EVENT (4, "rddevch");
360 CIO_TRACE_EVENT (4, sch
->dev
.bus_id
);
362 rdc_ccw
= kzalloc(sizeof(struct ccw1
), GFP_KERNEL
| GFP_DMA
);
365 rdc_ccw
->cmd_code
= CCW_CMD_RDC
;
366 rdc_ccw
->count
= length
;
367 rdc_ccw
->flags
= CCW_FLAG_SLI
;
368 ret
= set_normalized_cda (rdc_ccw
, (*buffer
));
374 spin_lock_irq(&sch
->lock
);
375 /* Save interrupt handler. */
376 handler
= cdev
->handler
;
377 /* Temporarily install own handler. */
378 cdev
->handler
= ccw_device_wake_up
;
379 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
381 else if (((sch
->schib
.scsw
.stctl
& SCSW_STCTL_PRIM_STATUS
) &&
382 !(sch
->schib
.scsw
.stctl
& SCSW_STCTL_SEC_STATUS
)) ||
383 cdev
->private->flags
.doverify
)
386 /* 0x00D9C4C3 == ebcdic "RDC" */
387 ret
= __ccw_device_retry_loop(cdev
, rdc_ccw
, 0x00D9C4C3, 0);
389 /* Restore interrupt handler. */
390 cdev
->handler
= handler
;
391 spin_unlock_irq(&sch
->lock
);
393 clear_normalized_cda (rdc_ccw
);
400 * Read Configuration data using path mask
403 read_conf_data_lpm (struct ccw_device
*cdev
, void **buffer
, int *length
, __u8 lpm
)
405 void (*handler
)(struct ccw_device
*, unsigned long, struct irb
*);
406 struct subchannel
*sch
;
410 struct ccw1
*rcd_ccw
;
414 if (!buffer
|| !length
)
416 sch
= to_subchannel(cdev
->dev
.parent
);
418 CIO_TRACE_EVENT (4, "rdconf");
419 CIO_TRACE_EVENT (4, sch
->dev
.bus_id
);
422 * scan for RCD command in extended SenseID data
424 ciw
= ccw_device_get_ciw(cdev
, CIW_TYPE_RCD
);
425 if (!ciw
|| ciw
->cmd
== 0)
428 rcd_ccw
= kzalloc(sizeof(struct ccw1
), GFP_KERNEL
| GFP_DMA
);
431 rcd_buf
= kzalloc(ciw
->count
, GFP_KERNEL
| GFP_DMA
);
436 rcd_ccw
->cmd_code
= ciw
->cmd
;
437 rcd_ccw
->cda
= (__u32
) __pa (rcd_buf
);
438 rcd_ccw
->count
= ciw
->count
;
439 rcd_ccw
->flags
= CCW_FLAG_SLI
;
441 spin_lock_irq(&sch
->lock
);
442 /* Save interrupt handler. */
443 handler
= cdev
->handler
;
444 /* Temporarily install own handler. */
445 cdev
->handler
= ccw_device_wake_up
;
446 if (cdev
->private->state
!= DEV_STATE_ONLINE
)
448 else if (((sch
->schib
.scsw
.stctl
& SCSW_STCTL_PRIM_STATUS
) &&
449 !(sch
->schib
.scsw
.stctl
& SCSW_STCTL_SEC_STATUS
)) ||
450 cdev
->private->flags
.doverify
)
453 /* 0x00D9C3C4 == ebcdic "RCD" */
454 ret
= __ccw_device_retry_loop(cdev
, rcd_ccw
, 0x00D9C3C4, lpm
);
456 /* Restore interrupt handler. */
457 cdev
->handler
= handler
;
458 spin_unlock_irq(&sch
->lock
);
461 * on success we update the user input parms
468 *length
= ciw
->count
;
477 * Read Configuration data
480 read_conf_data (struct ccw_device
*cdev
, void **buffer
, int *length
)
482 return read_conf_data_lpm (cdev
, buffer
, length
, 0);
486 * Try to break the lock on a boxed device.
489 ccw_device_stlck(struct ccw_device
*cdev
)
493 struct subchannel
*sch
;
499 if (cdev
->drv
&& !cdev
->private->options
.force
)
502 sch
= to_subchannel(cdev
->dev
.parent
);
504 CIO_TRACE_EVENT(2, "stl lock");
505 CIO_TRACE_EVENT(2, cdev
->dev
.bus_id
);
507 buf
= kmalloc(32*sizeof(char), GFP_DMA
|GFP_KERNEL
);
510 buf2
= kmalloc(32*sizeof(char), GFP_DMA
|GFP_KERNEL
);
515 spin_lock_irqsave(&sch
->lock
, flags
);
516 ret
= cio_enable_subchannel(sch
, 3);
520 * Setup ccw. We chain an unconditional reserve and a release so we
521 * only break the lock.
523 cdev
->private->iccws
[0].cmd_code
= CCW_CMD_STLCK
;
524 cdev
->private->iccws
[0].cda
= (__u32
) __pa(buf
);
525 cdev
->private->iccws
[0].count
= 32;
526 cdev
->private->iccws
[0].flags
= CCW_FLAG_CC
;
527 cdev
->private->iccws
[1].cmd_code
= CCW_CMD_RELEASE
;
528 cdev
->private->iccws
[1].cda
= (__u32
) __pa(buf2
);
529 cdev
->private->iccws
[1].count
= 32;
530 cdev
->private->iccws
[1].flags
= 0;
531 ret
= cio_start(sch
, cdev
->private->iccws
, 0);
533 cio_disable_subchannel(sch
); //FIXME: return code?
536 cdev
->private->irb
.scsw
.actl
|= SCSW_ACTL_START_PEND
;
537 spin_unlock_irqrestore(&sch
->lock
, flags
);
538 wait_event(cdev
->private->wait_q
, cdev
->private->irb
.scsw
.actl
== 0);
539 spin_lock_irqsave(&sch
->lock
, flags
);
540 cio_disable_subchannel(sch
); //FIXME: return code?
541 if ((cdev
->private->irb
.scsw
.dstat
!=
542 (DEV_STAT_CHN_END
|DEV_STAT_DEV_END
)) ||
543 (cdev
->private->irb
.scsw
.cstat
!= 0))
546 memset(&cdev
->private->irb
, 0, sizeof(struct irb
));
550 spin_unlock_irqrestore(&sch
->lock
, flags
);
555 ccw_device_get_chp_desc(struct ccw_device
*cdev
, int chp_no
)
557 struct subchannel
*sch
;
559 sch
= to_subchannel(cdev
->dev
.parent
);
560 return chsc_get_chp_desc(sch
, chp_no
);
563 // FIXME: these have to go:
566 _ccw_device_get_subchannel_number(struct ccw_device
*cdev
)
568 return cdev
->private->sch_no
;
572 _ccw_device_get_device_number(struct ccw_device
*cdev
)
574 return cdev
->private->devno
;
578 MODULE_LICENSE("GPL");
579 EXPORT_SYMBOL(ccw_device_set_options
);
580 EXPORT_SYMBOL(ccw_device_clear
);
581 EXPORT_SYMBOL(ccw_device_halt
);
582 EXPORT_SYMBOL(ccw_device_resume
);
583 EXPORT_SYMBOL(ccw_device_start_timeout
);
584 EXPORT_SYMBOL(ccw_device_start
);
585 EXPORT_SYMBOL(ccw_device_start_timeout_key
);
586 EXPORT_SYMBOL(ccw_device_start_key
);
587 EXPORT_SYMBOL(ccw_device_get_ciw
);
588 EXPORT_SYMBOL(ccw_device_get_path_mask
);
589 EXPORT_SYMBOL(read_conf_data
);
590 EXPORT_SYMBOL(read_dev_chars
);
591 EXPORT_SYMBOL(_ccw_device_get_subchannel_number
);
592 EXPORT_SYMBOL(_ccw_device_get_device_number
);
593 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc
);
594 EXPORT_SYMBOL_GPL(read_conf_data_lpm
);