2 * drivers/s390/char/tape_core.c
3 * basic function of the tape device driver
5 * S390 and zSeries version
6 * Copyright IBM Corp. 2001,2006
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Stefan Bader <shbader@de.ibm.com>
14 #include <linux/module.h>
15 #include <linux/init.h> // for kernel parameters
16 #include <linux/kmod.h> // for requesting modules
17 #include <linux/spinlock.h> // for locks
18 #include <linux/vmalloc.h>
19 #include <linux/list.h>
21 #include <asm/types.h> // for variable types
23 #define TAPE_DBF_AREA tape_core_dbf
28 #define PRINTK_HEADER "TAPE_CORE: "
29 #define LONG_BUSY_TIMEOUT 180 /* seconds */
31 static void __tape_do_irq (struct ccw_device
*, unsigned long, struct irb
*);
32 static void tape_delayed_next_request(struct work_struct
*);
33 static void tape_long_busy_timeout(unsigned long data
);
36 * One list to contain all tape devices of all disciplines, so
37 * we can assign the devices to minor numbers of the same major
38 * The list is protected by the rwlock
40 static LIST_HEAD(tape_device_list
);
41 static DEFINE_RWLOCK(tape_device_lock
);
44 * Pointer to debug area.
46 debug_info_t
*TAPE_DBF_AREA
= NULL
;
47 EXPORT_SYMBOL(TAPE_DBF_AREA
);
50 * Printable strings for tape enumerations.
52 const char *tape_state_verbose
[TS_SIZE
] =
54 [TS_UNUSED
] = "UNUSED",
55 [TS_IN_USE
] = "IN_USE",
56 [TS_BLKUSE
] = "BLKUSE",
58 [TS_NOT_OPER
] = "NOT_OP"
61 const char *tape_op_verbose
[TO_SIZE
] =
63 [TO_BLOCK
] = "BLK", [TO_BSB
] = "BSB",
64 [TO_BSF
] = "BSF", [TO_DSE
] = "DSE",
65 [TO_FSB
] = "FSB", [TO_FSF
] = "FSF",
66 [TO_LBL
] = "LBL", [TO_NOP
] = "NOP",
67 [TO_RBA
] = "RBA", [TO_RBI
] = "RBI",
68 [TO_RFO
] = "RFO", [TO_REW
] = "REW",
69 [TO_RUN
] = "RUN", [TO_WRI
] = "WRI",
70 [TO_WTM
] = "WTM", [TO_MSEN
] = "MSN",
71 [TO_LOAD
] = "LOA", [TO_READ_CONFIG
] = "RCF",
72 [TO_READ_ATTMSG
] = "RAT",
73 [TO_DIS
] = "DIS", [TO_ASSIGN
] = "ASS",
74 [TO_UNASSIGN
] = "UAS", [TO_CRYPT_ON
] = "CON",
75 [TO_CRYPT_OFF
] = "COF", [TO_KEKL_SET
] = "KLS",
76 [TO_KEKL_QUERY
] = "KLQ",[TO_RDC
] = "RDC",
79 static int devid_to_int(struct ccw_dev_id
*dev_id
)
81 return dev_id
->devno
+ (dev_id
->ssid
<< 16);
85 * Some channel attached tape specific attributes.
87 * FIXME: In the future the first_minor and blocksize attribute should be
88 * replaced by a link to the cdev tree.
91 tape_medium_state_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
93 struct tape_device
*tdev
;
95 tdev
= (struct tape_device
*) dev
->driver_data
;
96 return scnprintf(buf
, PAGE_SIZE
, "%i\n", tdev
->medium_state
);
100 DEVICE_ATTR(medium_state
, 0444, tape_medium_state_show
, NULL
);
103 tape_first_minor_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
105 struct tape_device
*tdev
;
107 tdev
= (struct tape_device
*) dev
->driver_data
;
108 return scnprintf(buf
, PAGE_SIZE
, "%i\n", tdev
->first_minor
);
112 DEVICE_ATTR(first_minor
, 0444, tape_first_minor_show
, NULL
);
115 tape_state_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
117 struct tape_device
*tdev
;
119 tdev
= (struct tape_device
*) dev
->driver_data
;
120 return scnprintf(buf
, PAGE_SIZE
, "%s\n", (tdev
->first_minor
< 0) ?
121 "OFFLINE" : tape_state_verbose
[tdev
->tape_state
]);
125 DEVICE_ATTR(state
, 0444, tape_state_show
, NULL
);
128 tape_operation_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
130 struct tape_device
*tdev
;
133 tdev
= (struct tape_device
*) dev
->driver_data
;
134 if (tdev
->first_minor
< 0)
135 return scnprintf(buf
, PAGE_SIZE
, "N/A\n");
137 spin_lock_irq(get_ccwdev_lock(tdev
->cdev
));
138 if (list_empty(&tdev
->req_queue
))
139 rc
= scnprintf(buf
, PAGE_SIZE
, "---\n");
141 struct tape_request
*req
;
143 req
= list_entry(tdev
->req_queue
.next
, struct tape_request
,
145 rc
= scnprintf(buf
,PAGE_SIZE
, "%s\n", tape_op_verbose
[req
->op
]);
147 spin_unlock_irq(get_ccwdev_lock(tdev
->cdev
));
152 DEVICE_ATTR(operation
, 0444, tape_operation_show
, NULL
);
155 tape_blocksize_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
157 struct tape_device
*tdev
;
159 tdev
= (struct tape_device
*) dev
->driver_data
;
161 return scnprintf(buf
, PAGE_SIZE
, "%i\n", tdev
->char_data
.block_size
);
165 DEVICE_ATTR(blocksize
, 0444, tape_blocksize_show
, NULL
);
167 static struct attribute
*tape_attrs
[] = {
168 &dev_attr_medium_state
.attr
,
169 &dev_attr_first_minor
.attr
,
170 &dev_attr_state
.attr
,
171 &dev_attr_operation
.attr
,
172 &dev_attr_blocksize
.attr
,
176 static struct attribute_group tape_attr_group
= {
181 * Tape state functions
184 tape_state_set(struct tape_device
*device
, enum tape_state newstate
)
188 if (device
->tape_state
== TS_NOT_OPER
) {
189 DBF_EVENT(3, "ts_set err: not oper\n");
192 DBF_EVENT(4, "ts. dev: %x\n", device
->first_minor
);
193 DBF_EVENT(4, "old ts:\t\n");
194 if (device
->tape_state
< TS_SIZE
&& device
->tape_state
>=0 )
195 str
= tape_state_verbose
[device
->tape_state
];
198 DBF_EVENT(4, "%s\n", str
);
199 DBF_EVENT(4, "new ts:\t\n");
200 if (newstate
< TS_SIZE
&& newstate
>= 0)
201 str
= tape_state_verbose
[newstate
];
204 DBF_EVENT(4, "%s\n", str
);
205 device
->tape_state
= newstate
;
206 wake_up(&device
->state_change_wq
);
210 tape_med_state_set(struct tape_device
*device
, enum tape_medium_state newstate
)
212 if (device
->medium_state
== newstate
)
216 device
->tape_generic_status
|= GMT_DR_OPEN(~0);
217 PRINT_INFO("(%s): Tape is unloaded\n",
218 device
->cdev
->dev
.bus_id
);
221 device
->tape_generic_status
&= ~GMT_DR_OPEN(~0);
222 PRINT_INFO("(%s): Tape has been mounted\n",
223 device
->cdev
->dev
.bus_id
);
229 device
->medium_state
= newstate
;
230 wake_up(&device
->state_change_wq
);
234 * Stop running ccw. Has to be called with the device lock held.
237 __tape_cancel_io(struct tape_device
*device
, struct tape_request
*request
)
242 /* Check if interrupt has already been processed */
243 if (request
->callback
== NULL
)
247 for (retries
= 0; retries
< 5; retries
++) {
248 rc
= ccw_device_clear(device
->cdev
, (long) request
);
252 request
->status
= TAPE_REQUEST_DONE
;
255 request
->status
= TAPE_REQUEST_CANCEL
;
256 schedule_delayed_work(&device
->tape_dnr
, 0);
259 DBF_EXCEPTION(2, "device gone, retry\n");
262 DBF_EXCEPTION(2, "I/O error, retry\n");
273 * Add device into the sorted list, giving it the first
274 * available minor number.
277 tape_assign_minor(struct tape_device
*device
)
279 struct tape_device
*tmp
;
283 write_lock(&tape_device_lock
);
284 list_for_each_entry(tmp
, &tape_device_list
, node
) {
285 if (minor
< tmp
->first_minor
)
287 minor
+= TAPE_MINORS_PER_DEV
;
290 write_unlock(&tape_device_lock
);
293 device
->first_minor
= minor
;
294 list_add_tail(&device
->node
, &tmp
->node
);
295 write_unlock(&tape_device_lock
);
299 /* remove device from the list */
301 tape_remove_minor(struct tape_device
*device
)
303 write_lock(&tape_device_lock
);
304 list_del_init(&device
->node
);
305 device
->first_minor
= -1;
306 write_unlock(&tape_device_lock
);
310 * Set a device online.
312 * This function is called by the common I/O layer to move a device from the
313 * detected but offline into the online state.
314 * If we return an error (RC < 0) the device remains in the offline state. This
315 * can happen if the device is assigned somewhere else, for example.
318 tape_generic_online(struct tape_device
*device
,
319 struct tape_discipline
*discipline
)
323 DBF_LH(6, "tape_enable_device(%p, %p)\n", device
, discipline
);
325 if (device
->tape_state
!= TS_INIT
) {
326 DBF_LH(3, "Tapestate not INIT (%d)\n", device
->tape_state
);
330 init_timer(&device
->lb_timeout
);
331 device
->lb_timeout
.function
= tape_long_busy_timeout
;
333 /* Let the discipline have a go at the device. */
334 device
->discipline
= discipline
;
335 if (!try_module_get(discipline
->owner
)) {
336 PRINT_ERR("Cannot get module. Module gone.\n");
340 rc
= discipline
->setup_device(device
);
343 rc
= tape_assign_minor(device
);
347 rc
= tapechar_setup_device(device
);
350 rc
= tapeblock_setup_device(device
);
354 tape_state_set(device
, TS_UNUSED
);
356 DBF_LH(3, "(%08x): Drive set online\n", device
->cdev_id
);
361 tapechar_cleanup_device(device
);
363 device
->discipline
->cleanup_device(device
);
364 device
->discipline
= NULL
;
366 tape_remove_minor(device
);
368 module_put(discipline
->owner
);
373 tape_cleanup_device(struct tape_device
*device
)
375 tapeblock_cleanup_device(device
);
376 tapechar_cleanup_device(device
);
377 device
->discipline
->cleanup_device(device
);
378 module_put(device
->discipline
->owner
);
379 tape_remove_minor(device
);
380 tape_med_state_set(device
, MS_UNKNOWN
);
384 * Set device offline.
386 * Called by the common I/O layer if the drive should set offline on user
387 * request. We may prevent this by returning an error.
388 * Manual offline is only allowed while the drive is not in use.
391 tape_generic_offline(struct tape_device
*device
)
394 PRINT_ERR("tape_generic_offline: no such device\n");
398 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
399 device
->cdev_id
, device
);
401 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
402 switch (device
->tape_state
) {
405 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
408 tape_state_set(device
, TS_INIT
);
409 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
410 tape_cleanup_device(device
);
413 DBF_EVENT(3, "(%08x): Set offline failed "
416 PRINT_WARN("(%s): Set offline failed "
418 device
->cdev
->dev
.bus_id
);
419 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
423 DBF_LH(3, "(%08x): Drive set offline.\n", device
->cdev_id
);
428 * Allocate memory for a new device structure.
430 static struct tape_device
*
431 tape_alloc_device(void)
433 struct tape_device
*device
;
435 device
= kzalloc(sizeof(struct tape_device
), GFP_KERNEL
);
436 if (device
== NULL
) {
437 DBF_EXCEPTION(2, "ti:no mem\n");
438 PRINT_INFO ("can't allocate memory for "
439 "tape info structure\n");
440 return ERR_PTR(-ENOMEM
);
442 device
->modeset_byte
= kmalloc(1, GFP_KERNEL
| GFP_DMA
);
443 if (device
->modeset_byte
== NULL
) {
444 DBF_EXCEPTION(2, "ti:no mem\n");
445 PRINT_INFO("can't allocate memory for modeset byte\n");
447 return ERR_PTR(-ENOMEM
);
449 INIT_LIST_HEAD(&device
->req_queue
);
450 INIT_LIST_HEAD(&device
->node
);
451 init_waitqueue_head(&device
->state_change_wq
);
452 device
->tape_state
= TS_INIT
;
453 device
->medium_state
= MS_UNKNOWN
;
454 *device
->modeset_byte
= 0;
455 device
->first_minor
= -1;
456 atomic_set(&device
->ref_count
, 1);
457 INIT_DELAYED_WORK(&device
->tape_dnr
, tape_delayed_next_request
);
463 * Get a reference to an existing device structure. This will automatically
464 * increment the reference count.
467 tape_get_device_reference(struct tape_device
*device
)
469 DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device
,
470 atomic_inc_return(&device
->ref_count
));
476 * Decrease the reference counter of a devices structure. If the
477 * reference counter reaches zero free the device structure.
478 * The function returns a NULL pointer to be used by the caller
479 * for clearing reference pointers.
482 tape_put_device(struct tape_device
*device
)
486 remain
= atomic_dec_return(&device
->ref_count
);
488 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device
, remain
);
491 DBF_EVENT(4, "put device without reference\n");
492 PRINT_ERR("put device without reference\n");
494 DBF_EVENT(4, "tape_free_device(%p)\n", device
);
495 kfree(device
->modeset_byte
);
504 * Find tape device by a device index.
507 tape_get_device(int devindex
)
509 struct tape_device
*device
, *tmp
;
511 device
= ERR_PTR(-ENODEV
);
512 read_lock(&tape_device_lock
);
513 list_for_each_entry(tmp
, &tape_device_list
, node
) {
514 if (tmp
->first_minor
/ TAPE_MINORS_PER_DEV
== devindex
) {
515 device
= tape_get_device_reference(tmp
);
519 read_unlock(&tape_device_lock
);
524 * Driverfs tape probe function.
527 tape_generic_probe(struct ccw_device
*cdev
)
529 struct tape_device
*device
;
531 struct ccw_dev_id dev_id
;
533 device
= tape_alloc_device();
536 ccw_device_set_options(cdev
, CCWDEV_DO_PATHGROUP
);
537 ret
= sysfs_create_group(&cdev
->dev
.kobj
, &tape_attr_group
);
539 tape_put_device(device
);
540 PRINT_ERR("probe failed for tape device %s\n", cdev
->dev
.bus_id
);
543 cdev
->dev
.driver_data
= device
;
544 cdev
->handler
= __tape_do_irq
;
546 ccw_device_get_id(cdev
, &dev_id
);
547 device
->cdev_id
= devid_to_int(&dev_id
);
548 PRINT_INFO("tape device %s found\n", cdev
->dev
.bus_id
);
553 __tape_discard_requests(struct tape_device
*device
)
555 struct tape_request
* request
;
556 struct list_head
* l
, *n
;
558 list_for_each_safe(l
, n
, &device
->req_queue
) {
559 request
= list_entry(l
, struct tape_request
, list
);
560 if (request
->status
== TAPE_REQUEST_IN_IO
)
561 request
->status
= TAPE_REQUEST_DONE
;
562 list_del(&request
->list
);
564 /* Decrease ref_count for removed request. */
565 request
->device
= tape_put_device(device
);
567 if (request
->callback
!= NULL
)
568 request
->callback(request
, request
->callback_data
);
573 * Driverfs tape remove function.
575 * This function is called whenever the common I/O layer detects the device
576 * gone. This can happen at any time and we cannot refuse.
579 tape_generic_remove(struct ccw_device
*cdev
)
581 struct tape_device
* device
;
583 device
= cdev
->dev
.driver_data
;
585 PRINT_ERR("No device pointer in tape_generic_remove!\n");
588 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device
->cdev_id
, cdev
);
590 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
591 switch (device
->tape_state
) {
593 tape_state_set(device
, TS_NOT_OPER
);
598 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
602 * Need only to release the device.
604 tape_state_set(device
, TS_NOT_OPER
);
605 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
606 tape_cleanup_device(device
);
610 * There may be requests on the queue. We will not get
611 * an interrupt for a request that was running. So we
612 * just post them all as I/O errors.
614 DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
616 PRINT_WARN("(%s): Drive in use vanished - "
618 device
->cdev
->dev
.bus_id
);
619 PRINT_WARN("State was %i\n", device
->tape_state
);
620 tape_state_set(device
, TS_NOT_OPER
);
621 __tape_discard_requests(device
);
622 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
623 tape_cleanup_device(device
);
626 if (cdev
->dev
.driver_data
!= NULL
) {
627 sysfs_remove_group(&cdev
->dev
.kobj
, &tape_attr_group
);
628 cdev
->dev
.driver_data
= tape_put_device(cdev
->dev
.driver_data
);
633 * Allocate a new tape ccw request
635 struct tape_request
*
636 tape_alloc_request(int cplength
, int datasize
)
638 struct tape_request
*request
;
640 if (datasize
> PAGE_SIZE
|| (cplength
*sizeof(struct ccw1
)) > PAGE_SIZE
)
643 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength
, datasize
);
645 request
= kzalloc(sizeof(struct tape_request
), GFP_KERNEL
);
646 if (request
== NULL
) {
647 DBF_EXCEPTION(1, "cqra nomem\n");
648 return ERR_PTR(-ENOMEM
);
650 /* allocate channel program */
652 request
->cpaddr
= kcalloc(cplength
, sizeof(struct ccw1
),
653 GFP_ATOMIC
| GFP_DMA
);
654 if (request
->cpaddr
== NULL
) {
655 DBF_EXCEPTION(1, "cqra nomem\n");
657 return ERR_PTR(-ENOMEM
);
660 /* alloc small kernel buffer */
662 request
->cpdata
= kzalloc(datasize
, GFP_KERNEL
| GFP_DMA
);
663 if (request
->cpdata
== NULL
) {
664 DBF_EXCEPTION(1, "cqra nomem\n");
665 kfree(request
->cpaddr
);
667 return ERR_PTR(-ENOMEM
);
670 DBF_LH(6, "New request %p(%p/%p)\n", request
, request
->cpaddr
,
677 * Free tape ccw request
680 tape_free_request (struct tape_request
* request
)
682 DBF_LH(6, "Free request %p\n", request
);
684 if (request
->device
!= NULL
) {
685 request
->device
= tape_put_device(request
->device
);
687 kfree(request
->cpdata
);
688 kfree(request
->cpaddr
);
693 __tape_start_io(struct tape_device
*device
, struct tape_request
*request
)
697 #ifdef CONFIG_S390_TAPE_BLOCK
698 if (request
->op
== TO_BLOCK
)
699 device
->discipline
->check_locate(device
, request
);
701 rc
= ccw_device_start(
704 (unsigned long) request
,
709 request
->status
= TAPE_REQUEST_IN_IO
;
710 } else if (rc
== -EBUSY
) {
711 /* The common I/O subsystem is currently busy. Retry later. */
712 request
->status
= TAPE_REQUEST_QUEUED
;
713 schedule_delayed_work(&device
->tape_dnr
, 0);
716 /* Start failed. Remove request and indicate failure. */
717 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc
);
723 __tape_start_next_request(struct tape_device
*device
)
725 struct list_head
*l
, *n
;
726 struct tape_request
*request
;
729 DBF_LH(6, "__tape_start_next_request(%p)\n", device
);
731 * Try to start each request on request queue until one is
732 * started successful.
734 list_for_each_safe(l
, n
, &device
->req_queue
) {
735 request
= list_entry(l
, struct tape_request
, list
);
738 * Avoid race condition if bottom-half was triggered more than
741 if (request
->status
== TAPE_REQUEST_IN_IO
)
744 * Request has already been stopped. We have to wait until
745 * the request is removed from the queue in the interrupt
748 if (request
->status
== TAPE_REQUEST_DONE
)
752 * We wanted to cancel the request but the common I/O layer
753 * was busy at that time. This can only happen if this
754 * function is called by delayed_next_request.
755 * Otherwise we start the next request on the queue.
757 if (request
->status
== TAPE_REQUEST_CANCEL
) {
758 rc
= __tape_cancel_io(device
, request
);
760 rc
= __tape_start_io(device
, request
);
765 /* Set ending status. */
767 request
->status
= TAPE_REQUEST_DONE
;
769 /* Remove from request queue. */
770 list_del(&request
->list
);
773 if (request
->callback
!= NULL
)
774 request
->callback(request
, request
->callback_data
);
779 tape_delayed_next_request(struct work_struct
*work
)
781 struct tape_device
*device
=
782 container_of(work
, struct tape_device
, tape_dnr
.work
);
784 DBF_LH(6, "tape_delayed_next_request(%p)\n", device
);
785 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
786 __tape_start_next_request(device
);
787 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
790 static void tape_long_busy_timeout(unsigned long data
)
792 struct tape_request
*request
;
793 struct tape_device
*device
;
795 device
= (struct tape_device
*) data
;
796 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
797 request
= list_entry(device
->req_queue
.next
, struct tape_request
, list
);
798 if (request
->status
!= TAPE_REQUEST_LONG_BUSY
)
800 DBF_LH(6, "%08x: Long busy timeout.\n", device
->cdev_id
);
801 __tape_start_next_request(device
);
802 device
->lb_timeout
.data
= (unsigned long) tape_put_device(device
);
803 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
808 struct tape_device
* device
,
809 struct tape_request
* request
,
812 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device
, request
, rc
);
815 request
->status
= TAPE_REQUEST_DONE
;
817 /* Remove from request queue. */
818 list_del(&request
->list
);
821 if (request
->callback
!= NULL
)
822 request
->callback(request
, request
->callback_data
);
825 /* Start next request. */
826 if (!list_empty(&device
->req_queue
))
827 __tape_start_next_request(device
);
831 * Write sense data to console/dbf
834 tape_dump_sense(struct tape_device
* device
, struct tape_request
*request
,
839 PRINT_INFO("-------------------------------------------------\n");
840 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
841 irb
->scsw
.dstat
, irb
->scsw
.cstat
, irb
->scsw
.cpa
);
842 PRINT_INFO("DEVICE: %s\n", device
->cdev
->dev
.bus_id
);
844 PRINT_INFO("OP : %s\n", tape_op_verbose
[request
->op
]);
846 sptr
= (unsigned int *) irb
->ecw
;
847 PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
848 sptr
[0], sptr
[1], sptr
[2], sptr
[3]);
849 PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
850 sptr
[4], sptr
[5], sptr
[6], sptr
[7]);
851 PRINT_INFO("--------------------------------------------------\n");
855 * Write sense data to dbf
858 tape_dump_sense_dbf(struct tape_device
*device
, struct tape_request
*request
,
865 op
= tape_op_verbose
[request
->op
];
868 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
869 irb
->scsw
.dstat
,irb
->scsw
.cstat
);
870 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device
->cdev_id
, op
);
871 sptr
= (unsigned int *) irb
->ecw
;
872 DBF_EVENT(3, "%08x %08x\n", sptr
[0], sptr
[1]);
873 DBF_EVENT(3, "%08x %08x\n", sptr
[2], sptr
[3]);
874 DBF_EVENT(3, "%08x %08x\n", sptr
[4], sptr
[5]);
875 DBF_EVENT(3, "%08x %08x\n", sptr
[6], sptr
[7]);
879 * I/O helper function. Adds the request to the request queue
880 * and starts it if the tape is idle. Has to be called with
881 * the device lock held.
884 __tape_start_request(struct tape_device
*device
, struct tape_request
*request
)
888 switch (request
->op
) {
894 if (device
->tape_state
== TS_INIT
)
896 if (device
->tape_state
== TS_UNUSED
)
899 if (device
->tape_state
== TS_BLKUSE
)
901 if (device
->tape_state
!= TS_IN_USE
)
905 /* Increase use count of device for the added request. */
906 request
->device
= tape_get_device_reference(device
);
908 if (list_empty(&device
->req_queue
)) {
909 /* No other requests are on the queue. Start this one. */
910 rc
= __tape_start_io(device
, request
);
914 DBF_LH(5, "Request %p added for execution.\n", request
);
915 list_add(&request
->list
, &device
->req_queue
);
917 DBF_LH(5, "Request %p add to queue.\n", request
);
918 request
->status
= TAPE_REQUEST_QUEUED
;
919 list_add_tail(&request
->list
, &device
->req_queue
);
925 * Add the request to the request queue, try to start it if the
926 * tape is idle. Return without waiting for end of i/o.
929 tape_do_io_async(struct tape_device
*device
, struct tape_request
*request
)
933 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device
, request
);
935 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
936 /* Add request to request queue and try to start it. */
937 rc
= __tape_start_request(device
, request
);
938 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
943 * tape_do_io/__tape_wake_up
944 * Add the request to the request queue, try to start it if the
945 * tape is idle and wait uninterruptible for its completion.
948 __tape_wake_up(struct tape_request
*request
, void *data
)
950 request
->callback
= NULL
;
951 wake_up((wait_queue_head_t
*) data
);
955 tape_do_io(struct tape_device
*device
, struct tape_request
*request
)
957 wait_queue_head_t wq
;
960 init_waitqueue_head(&wq
);
961 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
963 request
->callback
= __tape_wake_up
;
964 request
->callback_data
= &wq
;
965 /* Add request to request queue and try to start it. */
966 rc
= __tape_start_request(device
, request
);
967 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
970 /* Request added to the queue. Wait for its completion. */
971 wait_event(wq
, (request
->callback
== NULL
));
972 /* Get rc from request */
977 * tape_do_io_interruptible/__tape_wake_up_interruptible
978 * Add the request to the request queue, try to start it if the
979 * tape is idle and wait uninterruptible for its completion.
982 __tape_wake_up_interruptible(struct tape_request
*request
, void *data
)
984 request
->callback
= NULL
;
985 wake_up_interruptible((wait_queue_head_t
*) data
);
989 tape_do_io_interruptible(struct tape_device
*device
,
990 struct tape_request
*request
)
992 wait_queue_head_t wq
;
995 init_waitqueue_head(&wq
);
996 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
998 request
->callback
= __tape_wake_up_interruptible
;
999 request
->callback_data
= &wq
;
1000 rc
= __tape_start_request(device
, request
);
1001 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1004 /* Request added to the queue. Wait for its completion. */
1005 rc
= wait_event_interruptible(wq
, (request
->callback
== NULL
));
1006 if (rc
!= -ERESTARTSYS
)
1007 /* Request finished normally. */
1010 /* Interrupted by a signal. We have to stop the current request. */
1011 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1012 rc
= __tape_cancel_io(device
, request
);
1013 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1015 /* Wait for the interrupt that acknowledges the halt. */
1017 rc
= wait_event_interruptible(
1019 (request
->callback
== NULL
)
1021 } while (rc
== -ERESTARTSYS
);
1023 DBF_EVENT(3, "IO stopped on %08x\n", device
->cdev_id
);
1033 tape_cancel_io(struct tape_device
*device
, struct tape_request
*request
)
1037 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1038 rc
= __tape_cancel_io(device
, request
);
1039 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1044 * Tape interrupt routine, called from the ccw_device layer
1047 __tape_do_irq (struct ccw_device
*cdev
, unsigned long intparm
, struct irb
*irb
)
1049 struct tape_device
*device
;
1050 struct tape_request
*request
;
1053 device
= (struct tape_device
*) cdev
->dev
.driver_data
;
1054 if (device
== NULL
) {
1055 PRINT_ERR("could not get device structure for %s "
1056 "in interrupt\n", cdev
->dev
.bus_id
);
1059 request
= (struct tape_request
*) intparm
;
1061 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device
, request
);
1063 /* On special conditions irb is an error pointer */
1065 /* FIXME: What to do with the request? */
1066 switch (PTR_ERR(irb
)) {
1068 PRINT_WARN("(%s): Request timed out\n",
1071 __tape_end_request(device
, request
, -EIO
);
1074 PRINT_ERR("(%s): Unexpected i/o error %li\n",
1082 * If the condition code is not zero and the start function bit is
1083 * still set, this is an deferred error and the last start I/O did
1084 * not succeed. At this point the condition that caused the deferred
1085 * error might still apply. So we just schedule the request to be
1088 if (irb
->scsw
.cc
!= 0 && (irb
->scsw
.fctl
& SCSW_FCTL_START_FUNC
) &&
1089 (request
->status
== TAPE_REQUEST_IN_IO
)) {
1090 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
1091 device
->cdev_id
, irb
->scsw
.cc
, irb
->scsw
.fctl
);
1092 request
->status
= TAPE_REQUEST_QUEUED
;
1093 schedule_delayed_work(&device
->tape_dnr
, HZ
);
1097 /* May be an unsolicited irq */
1099 request
->rescnt
= irb
->scsw
.count
;
1100 else if ((irb
->scsw
.dstat
== 0x85 || irb
->scsw
.dstat
== 0x80) &&
1101 !list_empty(&device
->req_queue
)) {
1102 /* Not Ready to Ready after long busy ? */
1103 struct tape_request
*req
;
1104 req
= list_entry(device
->req_queue
.next
,
1105 struct tape_request
, list
);
1106 if (req
->status
== TAPE_REQUEST_LONG_BUSY
) {
1107 DBF_EVENT(3, "(%08x): del timer\n", device
->cdev_id
);
1108 if (del_timer(&device
->lb_timeout
)) {
1109 device
->lb_timeout
.data
= (unsigned long)
1110 tape_put_device(device
);
1111 __tape_start_next_request(device
);
1116 if (irb
->scsw
.dstat
!= 0x0c) {
1117 /* Set the 'ONLINE' flag depending on sense byte 1 */
1118 if(*(((__u8
*) irb
->ecw
) + 1) & SENSE_DRIVE_ONLINE
)
1119 device
->tape_generic_status
|= GMT_ONLINE(~0);
1121 device
->tape_generic_status
&= ~GMT_ONLINE(~0);
1124 * Any request that does not come back with channel end
1125 * and device end is unusual. Log the sense data.
1127 DBF_EVENT(3,"-- Tape Interrupthandler --\n");
1128 tape_dump_sense_dbf(device
, request
, irb
);
1130 /* Upon normal completion the device _is_ online */
1131 device
->tape_generic_status
|= GMT_ONLINE(~0);
1133 if (device
->tape_state
== TS_NOT_OPER
) {
1134 DBF_EVENT(6, "tape:device is not operational\n");
1139 * Request that were canceled still come back with an interrupt.
1140 * To detect these request the state will be set to TAPE_REQUEST_DONE.
1142 if(request
!= NULL
&& request
->status
== TAPE_REQUEST_DONE
) {
1143 __tape_end_request(device
, request
, -EIO
);
1147 rc
= device
->discipline
->irq(device
, request
, irb
);
1149 * rc < 0 : request finished unsuccessfully.
1150 * rc == TAPE_IO_SUCCESS: request finished successfully.
1151 * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
1152 * rc == TAPE_IO_RETRY: request finished but needs another go.
1153 * rc == TAPE_IO_STOP: request needs to get terminated.
1156 case TAPE_IO_SUCCESS
:
1157 /* Upon normal completion the device _is_ online */
1158 device
->tape_generic_status
|= GMT_ONLINE(~0);
1159 __tape_end_request(device
, request
, rc
);
1161 case TAPE_IO_PENDING
:
1163 case TAPE_IO_LONG_BUSY
:
1164 device
->lb_timeout
.data
=
1165 (unsigned long)tape_get_device_reference(device
);
1166 device
->lb_timeout
.expires
= jiffies
+
1167 LONG_BUSY_TIMEOUT
* HZ
;
1168 DBF_EVENT(3, "(%08x): add timer\n", device
->cdev_id
);
1169 add_timer(&device
->lb_timeout
);
1170 request
->status
= TAPE_REQUEST_LONG_BUSY
;
1173 rc
= __tape_start_io(device
, request
);
1175 __tape_end_request(device
, request
, rc
);
1178 rc
= __tape_cancel_io(device
, request
);
1180 __tape_end_request(device
, request
, rc
);
1184 DBF_EVENT(6, "xunknownrc\n");
1185 PRINT_ERR("Invalid return code from discipline "
1186 "interrupt function.\n");
1187 __tape_end_request(device
, request
, -EIO
);
1189 __tape_end_request(device
, request
, rc
);
1196 * Tape device open function used by tape_char & tape_block frontends.
1199 tape_open(struct tape_device
*device
)
1203 spin_lock(get_ccwdev_lock(device
->cdev
));
1204 if (device
->tape_state
== TS_NOT_OPER
) {
1205 DBF_EVENT(6, "TAPE:nodev\n");
1207 } else if (device
->tape_state
== TS_IN_USE
) {
1208 DBF_EVENT(6, "TAPE:dbusy\n");
1210 } else if (device
->tape_state
== TS_BLKUSE
) {
1211 DBF_EVENT(6, "TAPE:dbusy\n");
1213 } else if (device
->discipline
!= NULL
&&
1214 !try_module_get(device
->discipline
->owner
)) {
1215 DBF_EVENT(6, "TAPE:nodisc\n");
1218 tape_state_set(device
, TS_IN_USE
);
1221 spin_unlock(get_ccwdev_lock(device
->cdev
));
1226 * Tape device release function used by tape_char & tape_block frontends.
1229 tape_release(struct tape_device
*device
)
1231 spin_lock(get_ccwdev_lock(device
->cdev
));
1232 if (device
->tape_state
== TS_IN_USE
)
1233 tape_state_set(device
, TS_UNUSED
);
1234 module_put(device
->discipline
->owner
);
1235 spin_unlock(get_ccwdev_lock(device
->cdev
));
1240 * Execute a magnetic tape command a number of times.
1243 tape_mtop(struct tape_device
*device
, int mt_op
, int mt_count
)
1248 DBF_EVENT(6, "TAPE:mtio\n");
1249 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op
);
1250 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count
);
1252 if (mt_op
< 0 || mt_op
>= TAPE_NR_MTOPS
)
1254 fn
= device
->discipline
->mtop_array
[mt_op
];
1258 /* We assume that the backends can handle count up to 500. */
1259 if (mt_op
== MTBSR
|| mt_op
== MTFSR
|| mt_op
== MTFSF
||
1260 mt_op
== MTBSF
|| mt_op
== MTFSFM
|| mt_op
== MTBSFM
) {
1262 for (; mt_count
> 500; mt_count
-= 500)
1263 if ((rc
= fn(device
, 500)) != 0)
1266 rc
= fn(device
, mt_count
);
1268 rc
= fn(device
, mt_count
);
1274 * Tape init function.
1279 TAPE_DBF_AREA
= debug_register ( "tape", 2, 2, 4*sizeof(long));
1280 debug_register_view(TAPE_DBF_AREA
, &debug_sprintf_view
);
1281 #ifdef DBF_LIKE_HELL
1282 debug_set_level(TAPE_DBF_AREA
, 6);
1284 DBF_EVENT(3, "tape init\n");
1292 * Tape exit function.
1297 DBF_EVENT(6, "tape exit\n");
1299 /* Get rid of the frontends */
1302 tape_proc_cleanup();
1303 debug_unregister (TAPE_DBF_AREA
);
1306 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
1307 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
1308 MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
1309 MODULE_LICENSE("GPL");
1311 module_init(tape_init
);
1312 module_exit(tape_exit
);
1314 EXPORT_SYMBOL(tape_generic_remove
);
1315 EXPORT_SYMBOL(tape_generic_probe
);
1316 EXPORT_SYMBOL(tape_generic_online
);
1317 EXPORT_SYMBOL(tape_generic_offline
);
1318 EXPORT_SYMBOL(tape_put_device
);
1319 EXPORT_SYMBOL(tape_get_device_reference
);
1320 EXPORT_SYMBOL(tape_state_verbose
);
1321 EXPORT_SYMBOL(tape_op_verbose
);
1322 EXPORT_SYMBOL(tape_state_set
);
1323 EXPORT_SYMBOL(tape_med_state_set
);
1324 EXPORT_SYMBOL(tape_alloc_request
);
1325 EXPORT_SYMBOL(tape_free_request
);
1326 EXPORT_SYMBOL(tape_dump_sense
);
1327 EXPORT_SYMBOL(tape_dump_sense_dbf
);
1328 EXPORT_SYMBOL(tape_do_io
);
1329 EXPORT_SYMBOL(tape_do_io_async
);
1330 EXPORT_SYMBOL(tape_do_io_interruptible
);
1331 EXPORT_SYMBOL(tape_cancel_io
);
1332 EXPORT_SYMBOL(tape_mtop
);