2 * File...........: linux/drivers/s390/block/dasd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
12 #include <linux/kmod.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/ctype.h>
16 #include <linux/major.h>
17 #include <linux/slab.h>
18 #include <linux/buffer_head.h>
19 #include <linux/hdreg.h>
21 #include <asm/ccwdev.h>
22 #include <asm/ebcdic.h>
23 #include <asm/idals.h>
24 #include <asm/todclk.h>
27 #define PRINTK_HEADER "dasd:"
31 * SECTION: Constant definitions to be used within this file
33 #define DASD_CHANQ_MAX_SIZE 4
36 * SECTION: exported variables of dasd.c
38 debug_info_t
*dasd_debug_area
;
39 struct dasd_discipline
*dasd_diag_discipline_pointer
;
41 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
42 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
43 " Copyright 2000 IBM Corporation");
44 MODULE_SUPPORTED_DEVICE("dasd");
45 MODULE_LICENSE("GPL");
48 * SECTION: prototypes for static functions of dasd.c
50 static int dasd_alloc_queue(struct dasd_device
* device
);
51 static void dasd_setup_queue(struct dasd_device
* device
);
52 static void dasd_free_queue(struct dasd_device
* device
);
53 static void dasd_flush_request_queue(struct dasd_device
*);
54 static void dasd_int_handler(struct ccw_device
*, unsigned long, struct irb
*);
55 static void dasd_flush_ccw_queue(struct dasd_device
*, int);
56 static void dasd_tasklet(struct dasd_device
*);
57 static void do_kick_device(void *data
);
60 * SECTION: Operations on the device structure.
62 static wait_queue_head_t dasd_init_waitq
;
65 * Allocate memory for a new device structure.
68 dasd_alloc_device(void)
70 struct dasd_device
*device
;
72 device
= kzalloc(sizeof (struct dasd_device
), GFP_ATOMIC
);
74 return ERR_PTR(-ENOMEM
);
75 /* open_count = 0 means device online but not in use */
76 atomic_set(&device
->open_count
, -1);
78 /* Get two pages for normal block device operations. */
79 device
->ccw_mem
= (void *) __get_free_pages(GFP_ATOMIC
| GFP_DMA
, 1);
80 if (device
->ccw_mem
== NULL
) {
82 return ERR_PTR(-ENOMEM
);
84 /* Get one page for error recovery. */
85 device
->erp_mem
= (void *) get_zeroed_page(GFP_ATOMIC
| GFP_DMA
);
86 if (device
->erp_mem
== NULL
) {
87 free_pages((unsigned long) device
->ccw_mem
, 1);
89 return ERR_PTR(-ENOMEM
);
92 dasd_init_chunklist(&device
->ccw_chunks
, device
->ccw_mem
, PAGE_SIZE
*2);
93 dasd_init_chunklist(&device
->erp_chunks
, device
->erp_mem
, PAGE_SIZE
);
94 spin_lock_init(&device
->mem_lock
);
95 spin_lock_init(&device
->request_queue_lock
);
96 atomic_set (&device
->tasklet_scheduled
, 0);
97 tasklet_init(&device
->tasklet
,
98 (void (*)(unsigned long)) dasd_tasklet
,
99 (unsigned long) device
);
100 INIT_LIST_HEAD(&device
->ccw_queue
);
101 init_timer(&device
->timer
);
102 INIT_WORK(&device
->kick_work
, do_kick_device
, device
);
103 device
->state
= DASD_STATE_NEW
;
104 device
->target
= DASD_STATE_NEW
;
110 * Free memory of a device structure.
113 dasd_free_device(struct dasd_device
*device
)
115 kfree(device
->private);
116 free_page((unsigned long) device
->erp_mem
);
117 free_pages((unsigned long) device
->ccw_mem
, 1);
122 * Make a new device known to the system.
125 dasd_state_new_to_known(struct dasd_device
*device
)
130 * As long as the device is not in state DASD_STATE_NEW we want to
131 * keep the reference count > 0.
133 dasd_get_device(device
);
135 rc
= dasd_alloc_queue(device
);
137 dasd_put_device(device
);
141 device
->state
= DASD_STATE_KNOWN
;
146 * Let the system forget about a device.
149 dasd_state_known_to_new(struct dasd_device
* device
)
151 /* Disable extended error reporting for this device. */
152 dasd_eer_disable(device
);
153 /* Forget the discipline information. */
154 if (device
->discipline
)
155 module_put(device
->discipline
->owner
);
156 device
->discipline
= NULL
;
157 if (device
->base_discipline
)
158 module_put(device
->base_discipline
->owner
);
159 device
->base_discipline
= NULL
;
160 device
->state
= DASD_STATE_NEW
;
162 dasd_free_queue(device
);
164 /* Give up reference we took in dasd_state_new_to_known. */
165 dasd_put_device(device
);
169 * Request the irq line for the device.
172 dasd_state_known_to_basic(struct dasd_device
* device
)
176 /* Allocate and register gendisk structure. */
177 rc
= dasd_gendisk_alloc(device
);
181 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
182 device
->debug_area
= debug_register(device
->cdev
->dev
.bus_id
, 1, 2,
184 debug_register_view(device
->debug_area
, &debug_sprintf_view
);
185 debug_set_level(device
->debug_area
, DBF_EMERG
);
186 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s", "debug area created");
188 device
->state
= DASD_STATE_BASIC
;
193 * Release the irq line for the device. Terminate any running i/o.
196 dasd_state_basic_to_known(struct dasd_device
* device
)
198 dasd_gendisk_free(device
);
199 dasd_flush_ccw_queue(device
, 1);
200 DBF_DEV_EVENT(DBF_EMERG
, device
, "%p debug area deleted", device
);
201 if (device
->debug_area
!= NULL
) {
202 debug_unregister(device
->debug_area
);
203 device
->debug_area
= NULL
;
205 device
->state
= DASD_STATE_KNOWN
;
209 * Do the initial analysis. The do_analysis function may return
210 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
211 * until the discipline decides to continue the startup sequence
212 * by calling the function dasd_change_state. The eckd disciplines
213 * uses this to start a ccw that detects the format. The completion
214 * interrupt for this detection ccw uses the kernel event daemon to
215 * trigger the call to dasd_change_state. All this is done in the
216 * discipline code, see dasd_eckd.c.
217 * After the analysis ccw is done (do_analysis returned 0) the block
219 * In case the analysis returns an error, the device setup is stopped
220 * (a fake disk was already added to allow formatting).
223 dasd_state_basic_to_ready(struct dasd_device
* device
)
228 if (device
->discipline
->do_analysis
!= NULL
)
229 rc
= device
->discipline
->do_analysis(device
);
232 device
->state
= DASD_STATE_UNFMT
;
235 /* make disk known with correct capacity */
236 dasd_setup_queue(device
);
237 set_capacity(device
->gdp
, device
->blocks
<< device
->s2b_shift
);
238 device
->state
= DASD_STATE_READY
;
239 rc
= dasd_scan_partitions(device
);
241 device
->state
= DASD_STATE_BASIC
;
246 * Remove device from block device layer. Destroy dirty buffers.
247 * Forget format information. Check if the target level is basic
248 * and if it is create fake disk for formatting.
251 dasd_state_ready_to_basic(struct dasd_device
* device
)
253 dasd_flush_ccw_queue(device
, 0);
254 dasd_destroy_partitions(device
);
255 dasd_flush_request_queue(device
);
257 device
->bp_block
= 0;
258 device
->s2b_shift
= 0;
259 device
->state
= DASD_STATE_BASIC
;
266 dasd_state_unfmt_to_basic(struct dasd_device
* device
)
268 device
->state
= DASD_STATE_BASIC
;
272 * Make the device online and schedule the bottom half to start
273 * the requeueing of requests from the linux request queue to the
277 dasd_state_ready_to_online(struct dasd_device
* device
)
279 device
->state
= DASD_STATE_ONLINE
;
280 dasd_schedule_bh(device
);
285 * Stop the requeueing of requests again.
288 dasd_state_online_to_ready(struct dasd_device
* device
)
290 device
->state
= DASD_STATE_READY
;
294 * Device startup state changes.
297 dasd_increase_state(struct dasd_device
*device
)
302 if (device
->state
== DASD_STATE_NEW
&&
303 device
->target
>= DASD_STATE_KNOWN
)
304 rc
= dasd_state_new_to_known(device
);
307 device
->state
== DASD_STATE_KNOWN
&&
308 device
->target
>= DASD_STATE_BASIC
)
309 rc
= dasd_state_known_to_basic(device
);
312 device
->state
== DASD_STATE_BASIC
&&
313 device
->target
>= DASD_STATE_READY
)
314 rc
= dasd_state_basic_to_ready(device
);
317 device
->state
== DASD_STATE_UNFMT
&&
318 device
->target
> DASD_STATE_UNFMT
)
322 device
->state
== DASD_STATE_READY
&&
323 device
->target
>= DASD_STATE_ONLINE
)
324 rc
= dasd_state_ready_to_online(device
);
330 * Device shutdown state changes.
333 dasd_decrease_state(struct dasd_device
*device
)
335 if (device
->state
== DASD_STATE_ONLINE
&&
336 device
->target
<= DASD_STATE_READY
)
337 dasd_state_online_to_ready(device
);
339 if (device
->state
== DASD_STATE_READY
&&
340 device
->target
<= DASD_STATE_BASIC
)
341 dasd_state_ready_to_basic(device
);
343 if (device
->state
== DASD_STATE_UNFMT
&&
344 device
->target
<= DASD_STATE_BASIC
)
345 dasd_state_unfmt_to_basic(device
);
347 if (device
->state
== DASD_STATE_BASIC
&&
348 device
->target
<= DASD_STATE_KNOWN
)
349 dasd_state_basic_to_known(device
);
351 if (device
->state
== DASD_STATE_KNOWN
&&
352 device
->target
<= DASD_STATE_NEW
)
353 dasd_state_known_to_new(device
);
359 * This is the main startup/shutdown routine.
362 dasd_change_state(struct dasd_device
*device
)
366 if (device
->state
== device
->target
)
367 /* Already where we want to go today... */
369 if (device
->state
< device
->target
)
370 rc
= dasd_increase_state(device
);
372 rc
= dasd_decrease_state(device
);
373 if (rc
&& rc
!= -EAGAIN
)
374 device
->target
= device
->state
;
376 if (device
->state
== device
->target
)
377 wake_up(&dasd_init_waitq
);
381 * Kick starter for devices that did not complete the startup/shutdown
382 * procedure or were sleeping because of a pending state.
383 * dasd_kick_device will schedule a call do do_kick_device to the kernel
387 do_kick_device(void *data
)
389 struct dasd_device
*device
;
391 device
= (struct dasd_device
*) data
;
392 dasd_change_state(device
);
393 dasd_schedule_bh(device
);
394 dasd_put_device(device
);
398 dasd_kick_device(struct dasd_device
*device
)
400 dasd_get_device(device
);
401 /* queue call to dasd_kick_device to the kernel event daemon. */
402 schedule_work(&device
->kick_work
);
406 * Set the target state for a device and starts the state change.
409 dasd_set_target_state(struct dasd_device
*device
, int target
)
411 /* If we are in probeonly mode stop at DASD_STATE_READY. */
412 if (dasd_probeonly
&& target
> DASD_STATE_READY
)
413 target
= DASD_STATE_READY
;
414 if (device
->target
!= target
) {
415 if (device
->state
== target
)
416 wake_up(&dasd_init_waitq
);
417 device
->target
= target
;
419 if (device
->state
!= device
->target
)
420 dasd_change_state(device
);
424 * Enable devices with device numbers in [from..to].
427 _wait_for_device(struct dasd_device
*device
)
429 return (device
->state
== device
->target
);
433 dasd_enable_device(struct dasd_device
*device
)
435 dasd_set_target_state(device
, DASD_STATE_ONLINE
);
436 if (device
->state
<= DASD_STATE_KNOWN
)
437 /* No discipline for device found. */
438 dasd_set_target_state(device
, DASD_STATE_NEW
);
439 /* Now wait for the devices to come up. */
440 wait_event(dasd_init_waitq
, _wait_for_device(device
));
444 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
446 #ifdef CONFIG_DASD_PROFILE
448 struct dasd_profile_info_t dasd_global_profile
;
449 unsigned int dasd_profile_level
= DASD_PROFILE_OFF
;
452 * Increments counter in global and local profiling structures.
454 #define dasd_profile_counter(value, counter, device) \
457 for (index = 0; index < 31 && value >> (2+index); index++); \
458 dasd_global_profile.counter[index]++; \
459 device->profile.counter[index]++; \
463 * Add profiling information for cqr before execution.
466 dasd_profile_start(struct dasd_device
*device
, struct dasd_ccw_req
* cqr
,
470 unsigned int counter
;
472 if (dasd_profile_level
!= DASD_PROFILE_ON
)
475 /* count the length of the chanq for statistics */
477 list_for_each(l
, &device
->ccw_queue
)
480 dasd_global_profile
.dasd_io_nr_req
[counter
]++;
481 device
->profile
.dasd_io_nr_req
[counter
]++;
485 * Add profiling information for cqr after execution.
488 dasd_profile_end(struct dasd_device
*device
, struct dasd_ccw_req
* cqr
,
491 long strtime
, irqtime
, endtime
, tottime
; /* in microseconds */
492 long tottimeps
, sectors
;
494 if (dasd_profile_level
!= DASD_PROFILE_ON
)
497 sectors
= req
->nr_sectors
;
498 if (!cqr
->buildclk
|| !cqr
->startclk
||
499 !cqr
->stopclk
|| !cqr
->endclk
||
503 strtime
= ((cqr
->startclk
- cqr
->buildclk
) >> 12);
504 irqtime
= ((cqr
->stopclk
- cqr
->startclk
) >> 12);
505 endtime
= ((cqr
->endclk
- cqr
->stopclk
) >> 12);
506 tottime
= ((cqr
->endclk
- cqr
->buildclk
) >> 12);
507 tottimeps
= tottime
/ sectors
;
509 if (!dasd_global_profile
.dasd_io_reqs
)
510 memset(&dasd_global_profile
, 0,
511 sizeof (struct dasd_profile_info_t
));
512 dasd_global_profile
.dasd_io_reqs
++;
513 dasd_global_profile
.dasd_io_sects
+= sectors
;
515 if (!device
->profile
.dasd_io_reqs
)
516 memset(&device
->profile
, 0,
517 sizeof (struct dasd_profile_info_t
));
518 device
->profile
.dasd_io_reqs
++;
519 device
->profile
.dasd_io_sects
+= sectors
;
521 dasd_profile_counter(sectors
, dasd_io_secs
, device
);
522 dasd_profile_counter(tottime
, dasd_io_times
, device
);
523 dasd_profile_counter(tottimeps
, dasd_io_timps
, device
);
524 dasd_profile_counter(strtime
, dasd_io_time1
, device
);
525 dasd_profile_counter(irqtime
, dasd_io_time2
, device
);
526 dasd_profile_counter(irqtime
/ sectors
, dasd_io_time2ps
, device
);
527 dasd_profile_counter(endtime
, dasd_io_time3
, device
);
530 #define dasd_profile_start(device, cqr, req) do {} while (0)
531 #define dasd_profile_end(device, cqr, req) do {} while (0)
532 #endif /* CONFIG_DASD_PROFILE */
535 * Allocate memory for a channel program with 'cplength' channel
536 * command words and 'datasize' additional space. There are two
537 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
538 * memory and 2) dasd_smalloc_request uses the static ccw memory
539 * that gets allocated for each device.
541 struct dasd_ccw_req
*
542 dasd_kmalloc_request(char *magic
, int cplength
, int datasize
,
543 struct dasd_device
* device
)
545 struct dasd_ccw_req
*cqr
;
548 BUG_ON( magic
== NULL
|| datasize
> PAGE_SIZE
||
549 (cplength
*sizeof(struct ccw1
)) > PAGE_SIZE
);
551 cqr
= kzalloc(sizeof(struct dasd_ccw_req
), GFP_ATOMIC
);
553 return ERR_PTR(-ENOMEM
);
556 cqr
->cpaddr
= kcalloc(cplength
, sizeof(struct ccw1
),
557 GFP_ATOMIC
| GFP_DMA
);
558 if (cqr
->cpaddr
== NULL
) {
560 return ERR_PTR(-ENOMEM
);
565 cqr
->data
= kzalloc(datasize
, GFP_ATOMIC
| GFP_DMA
);
566 if (cqr
->data
== NULL
) {
569 return ERR_PTR(-ENOMEM
);
572 strncpy((char *) &cqr
->magic
, magic
, 4);
573 ASCEBC((char *) &cqr
->magic
, 4);
574 set_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
575 dasd_get_device(device
);
579 struct dasd_ccw_req
*
580 dasd_smalloc_request(char *magic
, int cplength
, int datasize
,
581 struct dasd_device
* device
)
584 struct dasd_ccw_req
*cqr
;
589 BUG_ON( magic
== NULL
|| datasize
> PAGE_SIZE
||
590 (cplength
*sizeof(struct ccw1
)) > PAGE_SIZE
);
592 size
= (sizeof(struct dasd_ccw_req
) + 7L) & -8L;
594 size
+= cplength
* sizeof(struct ccw1
);
597 spin_lock_irqsave(&device
->mem_lock
, flags
);
598 cqr
= (struct dasd_ccw_req
*)
599 dasd_alloc_chunk(&device
->ccw_chunks
, size
);
600 spin_unlock_irqrestore(&device
->mem_lock
, flags
);
602 return ERR_PTR(-ENOMEM
);
603 memset(cqr
, 0, sizeof(struct dasd_ccw_req
));
604 data
= (char *) cqr
+ ((sizeof(struct dasd_ccw_req
) + 7L) & -8L);
607 cqr
->cpaddr
= (struct ccw1
*) data
;
608 data
+= cplength
*sizeof(struct ccw1
);
609 memset(cqr
->cpaddr
, 0, cplength
*sizeof(struct ccw1
));
614 memset(cqr
->data
, 0, datasize
);
616 strncpy((char *) &cqr
->magic
, magic
, 4);
617 ASCEBC((char *) &cqr
->magic
, 4);
618 set_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
619 dasd_get_device(device
);
624 * Free memory of a channel program. This function needs to free all the
625 * idal lists that might have been created by dasd_set_cda and the
626 * struct dasd_ccw_req itself.
629 dasd_kfree_request(struct dasd_ccw_req
* cqr
, struct dasd_device
* device
)
634 /* Clear any idals used for the request. */
637 clear_normalized_cda(ccw
);
638 } while (ccw
++->flags
& (CCW_FLAG_CC
| CCW_FLAG_DC
));
643 dasd_put_device(device
);
647 dasd_sfree_request(struct dasd_ccw_req
* cqr
, struct dasd_device
* device
)
651 spin_lock_irqsave(&device
->mem_lock
, flags
);
652 dasd_free_chunk(&device
->ccw_chunks
, cqr
);
653 spin_unlock_irqrestore(&device
->mem_lock
, flags
);
654 dasd_put_device(device
);
658 * Check discipline magic in cqr.
661 dasd_check_cqr(struct dasd_ccw_req
*cqr
)
663 struct dasd_device
*device
;
667 device
= cqr
->device
;
668 if (strncmp((char *) &cqr
->magic
, device
->discipline
->ebcname
, 4)) {
669 DEV_MESSAGE(KERN_WARNING
, device
,
670 " dasd_ccw_req 0x%08x magic doesn't match"
671 " discipline 0x%08x",
673 *(unsigned int *) device
->discipline
->name
);
680 * Terminate the current i/o and set the request to clear_pending.
681 * Timer keeps device runnig.
682 * ccw_device_clear can fail if the i/o subsystem
686 dasd_term_IO(struct dasd_ccw_req
* cqr
)
688 struct dasd_device
*device
;
692 rc
= dasd_check_cqr(cqr
);
696 device
= (struct dasd_device
*) cqr
->device
;
697 while ((retries
< 5) && (cqr
->status
== DASD_CQR_IN_IO
)) {
698 rc
= ccw_device_clear(device
->cdev
, (long) cqr
);
700 case 0: /* termination successful */
702 cqr
->status
= DASD_CQR_CLEAR
;
703 cqr
->stopclk
= get_clock();
704 DBF_DEV_EVENT(DBF_DEBUG
, device
,
705 "terminate cqr %p successful",
709 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
710 "device gone, retry");
713 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
718 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
719 "device busy, retry later");
722 DEV_MESSAGE(KERN_ERR
, device
,
723 "line %d unknown RC=%d, please "
724 "report to linux390@de.ibm.com",
731 dasd_schedule_bh(device
);
736 * Start the i/o. This start_IO can fail if the channel is really busy.
737 * In that case set up a timer to start the request later.
740 dasd_start_IO(struct dasd_ccw_req
* cqr
)
742 struct dasd_device
*device
;
746 rc
= dasd_check_cqr(cqr
);
749 device
= (struct dasd_device
*) cqr
->device
;
750 if (cqr
->retries
< 0) {
751 DEV_MESSAGE(KERN_DEBUG
, device
,
752 "start_IO: request %p (%02x/%i) - no retry left.",
753 cqr
, cqr
->status
, cqr
->retries
);
754 cqr
->status
= DASD_CQR_FAILED
;
757 cqr
->startclk
= get_clock();
758 cqr
->starttime
= jiffies
;
760 rc
= ccw_device_start(device
->cdev
, cqr
->cpaddr
, (long) cqr
,
764 cqr
->status
= DASD_CQR_IN_IO
;
765 DBF_DEV_EVENT(DBF_DEBUG
, device
,
766 "start_IO: request %p started successful",
770 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
771 "start_IO: device busy, retry later");
774 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
775 "start_IO: request timeout, retry later");
778 /* -EACCES indicates that the request used only a
779 * subset of the available pathes and all these
781 * Do a retry with all available pathes.
783 cqr
->lpm
= LPM_ANYPATH
;
784 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
785 "start_IO: selected pathes gone,"
786 " retry on all pathes");
790 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
791 "start_IO: device gone, retry");
794 DEV_MESSAGE(KERN_ERR
, device
,
795 "line %d unknown RC=%d, please report"
796 " to linux390@de.ibm.com", __LINE__
, rc
);
804 * Timeout function for dasd devices. This is used for different purposes
805 * 1) missing interrupt handler for normal operation
806 * 2) delayed start of request where start_IO failed with -EBUSY
807 * 3) timeout for missing state change interrupts
808 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
809 * DASD_CQR_QUEUED for 2) and 3).
812 dasd_timeout_device(unsigned long ptr
)
815 struct dasd_device
*device
;
817 device
= (struct dasd_device
*) ptr
;
818 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
819 /* re-activate request queue */
820 device
->stopped
&= ~DASD_STOPPED_PENDING
;
821 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
822 dasd_schedule_bh(device
);
826 * Setup timeout for a device in jiffies.
829 dasd_set_timer(struct dasd_device
*device
, int expires
)
832 if (timer_pending(&device
->timer
))
833 del_timer(&device
->timer
);
836 if (timer_pending(&device
->timer
)) {
837 if (mod_timer(&device
->timer
, jiffies
+ expires
))
840 device
->timer
.function
= dasd_timeout_device
;
841 device
->timer
.data
= (unsigned long) device
;
842 device
->timer
.expires
= jiffies
+ expires
;
843 add_timer(&device
->timer
);
847 * Clear timeout for a device.
850 dasd_clear_timer(struct dasd_device
*device
)
852 if (timer_pending(&device
->timer
))
853 del_timer(&device
->timer
);
857 dasd_handle_killed_request(struct ccw_device
*cdev
, unsigned long intparm
)
859 struct dasd_ccw_req
*cqr
;
860 struct dasd_device
*device
;
862 cqr
= (struct dasd_ccw_req
*) intparm
;
863 if (cqr
->status
!= DASD_CQR_IN_IO
) {
865 "invalid status in handle_killed_request: "
866 "bus_id %s, status %02x",
867 cdev
->dev
.bus_id
, cqr
->status
);
871 device
= (struct dasd_device
*) cqr
->device
;
872 if (device
== NULL
||
873 device
!= dasd_device_from_cdev(cdev
) ||
874 strncmp(device
->discipline
->ebcname
, (char *) &cqr
->magic
, 4)) {
875 MESSAGE(KERN_DEBUG
, "invalid device in request: bus_id %s",
880 /* Schedule request to be retried. */
881 cqr
->status
= DASD_CQR_QUEUED
;
883 dasd_clear_timer(device
);
884 dasd_schedule_bh(device
);
885 dasd_put_device(device
);
889 dasd_handle_state_change_pending(struct dasd_device
*device
)
891 struct dasd_ccw_req
*cqr
;
892 struct list_head
*l
, *n
;
894 /* First of all start sense subsystem status request. */
895 dasd_eer_snss(device
);
897 device
->stopped
&= ~DASD_STOPPED_PENDING
;
899 /* restart all 'running' IO on queue */
900 list_for_each_safe(l
, n
, &device
->ccw_queue
) {
901 cqr
= list_entry(l
, struct dasd_ccw_req
, list
);
902 if (cqr
->status
== DASD_CQR_IN_IO
) {
903 cqr
->status
= DASD_CQR_QUEUED
;
906 dasd_clear_timer(device
);
907 dasd_schedule_bh(device
);
911 * Interrupt handler for "normal" ssch-io based dasd devices.
914 dasd_int_handler(struct ccw_device
*cdev
, unsigned long intparm
,
917 struct dasd_ccw_req
*cqr
, *next
;
918 struct dasd_device
*device
;
919 unsigned long long now
;
925 switch (PTR_ERR(irb
)) {
927 dasd_handle_killed_request(cdev
, intparm
);
930 printk(KERN_WARNING
"%s(%s): request timed out\n",
931 __FUNCTION__
, cdev
->dev
.bus_id
);
932 //FIXME - dasd uses own timeout interface...
935 printk(KERN_WARNING
"%s(%s): unknown error %ld\n",
936 __FUNCTION__
, cdev
->dev
.bus_id
, PTR_ERR(irb
));
943 DBF_EVENT(DBF_ERR
, "Interrupt: bus_id %s CS/DS %04x ip %08x",
944 cdev
->dev
.bus_id
, ((irb
->scsw
.cstat
<<8)|irb
->scsw
.dstat
),
945 (unsigned int) intparm
);
947 /* first of all check for state change pending interrupt */
948 mask
= DEV_STAT_ATTENTION
| DEV_STAT_DEV_END
| DEV_STAT_UNIT_EXCEP
;
949 if ((irb
->scsw
.dstat
& mask
) == mask
) {
950 device
= dasd_device_from_cdev(cdev
);
951 if (!IS_ERR(device
)) {
952 dasd_handle_state_change_pending(device
);
953 dasd_put_device(device
);
958 cqr
= (struct dasd_ccw_req
*) intparm
;
960 /* check for unsolicited interrupts */
963 "unsolicited interrupt received: bus_id %s",
968 device
= (struct dasd_device
*) cqr
->device
;
969 if (device
== NULL
||
970 strncmp(device
->discipline
->ebcname
, (char *) &cqr
->magic
, 4)) {
971 MESSAGE(KERN_DEBUG
, "invalid device in request: bus_id %s",
976 /* Check for clear pending */
977 if (cqr
->status
== DASD_CQR_CLEAR
&&
978 irb
->scsw
.fctl
& SCSW_FCTL_CLEAR_FUNC
) {
979 cqr
->status
= DASD_CQR_QUEUED
;
980 dasd_clear_timer(device
);
981 dasd_schedule_bh(device
);
985 /* check status - the request might have been killed by dyn detach */
986 if (cqr
->status
!= DASD_CQR_IN_IO
) {
988 "invalid status: bus_id %s, status %02x",
989 cdev
->dev
.bus_id
, cqr
->status
);
992 DBF_DEV_EVENT(DBF_DEBUG
, device
, "Int: CS/DS 0x%04x for cqr %p",
993 ((irb
->scsw
.cstat
<< 8) | irb
->scsw
.dstat
), cqr
);
995 /* Find out the appropriate era_action. */
996 if (irb
->scsw
.fctl
& SCSW_FCTL_HALT_FUNC
)
997 era
= dasd_era_fatal
;
998 else if (irb
->scsw
.dstat
== (DEV_STAT_CHN_END
| DEV_STAT_DEV_END
) &&
999 irb
->scsw
.cstat
== 0 &&
1000 !irb
->esw
.esw0
.erw
.cons
)
1001 era
= dasd_era_none
;
1002 else if (!test_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
))
1003 era
= dasd_era_fatal
; /* don't recover this request */
1004 else if (irb
->esw
.esw0
.erw
.cons
)
1005 era
= device
->discipline
->examine_error(cqr
, irb
);
1007 era
= dasd_era_recover
;
1009 DBF_DEV_EVENT(DBF_DEBUG
, device
, "era_code %d", era
);
1011 if (era
== dasd_era_none
) {
1012 cqr
->status
= DASD_CQR_DONE
;
1014 /* Start first request on queue if possible -> fast_io. */
1015 if (cqr
->list
.next
!= &device
->ccw_queue
) {
1016 next
= list_entry(cqr
->list
.next
,
1017 struct dasd_ccw_req
, list
);
1018 if ((next
->status
== DASD_CQR_QUEUED
) &&
1019 (!device
->stopped
)) {
1020 if (device
->discipline
->start_IO(next
) == 0)
1021 expires
= next
->expires
;
1023 DEV_MESSAGE(KERN_DEBUG
, device
, "%s",
1024 "Interrupt fastpath "
1028 } else { /* error */
1029 memcpy(&cqr
->irb
, irb
, sizeof (struct irb
));
1031 /* dump sense data */
1032 dasd_log_sense(cqr
, irb
);
1035 case dasd_era_fatal
:
1036 cqr
->status
= DASD_CQR_FAILED
;
1039 case dasd_era_recover
:
1040 cqr
->status
= DASD_CQR_ERROR
;
1047 dasd_set_timer(device
, expires
);
1049 dasd_clear_timer(device
);
1050 dasd_schedule_bh(device
);
1054 * posts the buffer_cache about a finalized request
1057 dasd_end_request(struct request
*req
, int uptodate
)
1059 if (end_that_request_first(req
, uptodate
, req
->hard_nr_sectors
))
1061 add_disk_randomness(req
->rq_disk
);
1062 end_that_request_last(req
, uptodate
);
1066 * Process finished error recovery ccw.
1069 __dasd_process_erp(struct dasd_device
*device
, struct dasd_ccw_req
*cqr
)
1071 dasd_erp_fn_t erp_fn
;
1073 if (cqr
->status
== DASD_CQR_DONE
)
1074 DBF_DEV_EVENT(DBF_NOTICE
, device
, "%s", "ERP successful");
1076 DEV_MESSAGE(KERN_ERR
, device
, "%s", "ERP unsuccessful");
1077 erp_fn
= device
->discipline
->erp_postaction(cqr
);
1082 * Process ccw request queue.
1085 __dasd_process_ccw_queue(struct dasd_device
* device
,
1086 struct list_head
*final_queue
)
1088 struct list_head
*l
, *n
;
1089 struct dasd_ccw_req
*cqr
;
1090 dasd_erp_fn_t erp_fn
;
1093 /* Process request with final status. */
1094 list_for_each_safe(l
, n
, &device
->ccw_queue
) {
1095 cqr
= list_entry(l
, struct dasd_ccw_req
, list
);
1096 /* Stop list processing at the first non-final request. */
1097 if (cqr
->status
!= DASD_CQR_DONE
&&
1098 cqr
->status
!= DASD_CQR_FAILED
&&
1099 cqr
->status
!= DASD_CQR_ERROR
)
1101 /* Process requests with DASD_CQR_ERROR */
1102 if (cqr
->status
== DASD_CQR_ERROR
) {
1103 if (cqr
->irb
.scsw
.fctl
& SCSW_FCTL_HALT_FUNC
) {
1104 cqr
->status
= DASD_CQR_FAILED
;
1105 cqr
->stopclk
= get_clock();
1107 if (cqr
->irb
.esw
.esw0
.erw
.cons
) {
1108 erp_fn
= device
->discipline
->
1112 dasd_default_erp_action(cqr
);
1117 /* First of all call extended error reporting. */
1118 if (dasd_eer_enabled(device
) &&
1119 cqr
->status
== DASD_CQR_FAILED
) {
1120 dasd_eer_write(device
, cqr
, DASD_EER_FATALERROR
);
1122 /* restart request */
1123 cqr
->status
= DASD_CQR_QUEUED
;
1125 device
->stopped
|= DASD_STOPPED_QUIESCE
;
1129 /* Process finished ERP request. */
1131 __dasd_process_erp(device
, cqr
);
1135 /* Rechain finished requests to final queue */
1136 cqr
->endclk
= get_clock();
1137 list_move_tail(&cqr
->list
, final_queue
);
1142 dasd_end_request_cb(struct dasd_ccw_req
* cqr
, void *data
)
1144 struct request
*req
;
1145 struct dasd_device
*device
;
1148 req
= (struct request
*) data
;
1149 device
= cqr
->device
;
1150 dasd_profile_end(device
, cqr
, req
);
1151 status
= cqr
->device
->discipline
->free_cp(cqr
,req
);
1152 spin_lock_irq(&device
->request_queue_lock
);
1153 dasd_end_request(req
, status
);
1154 spin_unlock_irq(&device
->request_queue_lock
);
1159 * Fetch requests from the block device queue.
1162 __dasd_process_blk_queue(struct dasd_device
* device
)
1164 request_queue_t
*queue
;
1165 struct request
*req
;
1166 struct dasd_ccw_req
*cqr
;
1169 queue
= device
->request_queue
;
1170 /* No queue ? Then there is nothing to do. */
1175 * We requeue request from the block device queue to the ccw
1176 * queue only in two states. In state DASD_STATE_READY the
1177 * partition detection is done and we need to requeue requests
1178 * for that. State DASD_STATE_ONLINE is normal block device
1181 if (device
->state
!= DASD_STATE_READY
&&
1182 device
->state
!= DASD_STATE_ONLINE
)
1185 /* Now we try to fetch requests from the request queue */
1186 list_for_each_entry(cqr
, &device
->ccw_queue
, list
)
1187 if (cqr
->status
== DASD_CQR_QUEUED
)
1189 while (!blk_queue_plugged(queue
) &&
1190 elv_next_request(queue
) &&
1191 nr_queued
< DASD_CHANQ_MAX_SIZE
) {
1192 req
= elv_next_request(queue
);
1194 if (device
->features
& DASD_FEATURE_READONLY
&&
1195 rq_data_dir(req
) == WRITE
) {
1196 DBF_DEV_EVENT(DBF_ERR
, device
,
1197 "Rejecting write request %p",
1199 blkdev_dequeue_request(req
);
1200 dasd_end_request(req
, 0);
1203 if (device
->stopped
& DASD_STOPPED_DC_EIO
) {
1204 blkdev_dequeue_request(req
);
1205 dasd_end_request(req
, 0);
1208 cqr
= device
->discipline
->build_cp(device
, req
);
1210 if (PTR_ERR(cqr
) == -ENOMEM
)
1211 break; /* terminate request queue loop */
1212 DBF_DEV_EVENT(DBF_ERR
, device
,
1213 "CCW creation failed (rc=%ld) "
1216 blkdev_dequeue_request(req
);
1217 dasd_end_request(req
, 0);
1220 cqr
->callback
= dasd_end_request_cb
;
1221 cqr
->callback_data
= (void *) req
;
1222 cqr
->status
= DASD_CQR_QUEUED
;
1223 blkdev_dequeue_request(req
);
1224 list_add_tail(&cqr
->list
, &device
->ccw_queue
);
1225 dasd_profile_start(device
, cqr
, req
);
1231 * Take a look at the first request on the ccw queue and check
1232 * if it reached its expire time. If so, terminate the IO.
1235 __dasd_check_expire(struct dasd_device
* device
)
1237 struct dasd_ccw_req
*cqr
;
1239 if (list_empty(&device
->ccw_queue
))
1241 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, list
);
1242 if (cqr
->status
== DASD_CQR_IN_IO
&& cqr
->expires
!= 0) {
1243 if (time_after_eq(jiffies
, cqr
->expires
+ cqr
->starttime
)) {
1244 if (device
->discipline
->term_IO(cqr
) != 0)
1245 /* Hmpf, try again in 1/10 sec */
1246 dasd_set_timer(device
, 10);
1252 * Take a look at the first request on the ccw queue and check
1253 * if it needs to be started.
1256 __dasd_start_head(struct dasd_device
* device
)
1258 struct dasd_ccw_req
*cqr
;
1261 if (list_empty(&device
->ccw_queue
))
1263 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, list
);
1264 if (cqr
->status
!= DASD_CQR_QUEUED
)
1266 /* Non-temporary stop condition will trigger fail fast */
1267 if (device
->stopped
& ~DASD_STOPPED_PENDING
&&
1268 test_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
) &&
1269 (!dasd_eer_enabled(device
))) {
1270 cqr
->status
= DASD_CQR_FAILED
;
1271 dasd_schedule_bh(device
);
1274 /* Don't try to start requests if device is stopped */
1275 if (device
->stopped
)
1278 rc
= device
->discipline
->start_IO(cqr
);
1280 dasd_set_timer(device
, cqr
->expires
);
1281 else if (rc
== -EACCES
) {
1282 dasd_schedule_bh(device
);
1284 /* Hmpf, try again in 1/2 sec */
1285 dasd_set_timer(device
, 50);
1289 * Remove requests from the ccw queue.
1292 dasd_flush_ccw_queue(struct dasd_device
* device
, int all
)
1294 struct list_head flush_queue
;
1295 struct list_head
*l
, *n
;
1296 struct dasd_ccw_req
*cqr
;
1298 INIT_LIST_HEAD(&flush_queue
);
1299 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1300 list_for_each_safe(l
, n
, &device
->ccw_queue
) {
1301 cqr
= list_entry(l
, struct dasd_ccw_req
, list
);
1302 /* Flush all request or only block device requests? */
1303 if (all
== 0 && cqr
->callback
== dasd_end_request_cb
)
1305 if (cqr
->status
== DASD_CQR_IN_IO
)
1306 device
->discipline
->term_IO(cqr
);
1307 if (cqr
->status
!= DASD_CQR_DONE
||
1308 cqr
->status
!= DASD_CQR_FAILED
) {
1309 cqr
->status
= DASD_CQR_FAILED
;
1310 cqr
->stopclk
= get_clock();
1312 /* Process finished ERP request. */
1314 __dasd_process_erp(device
, cqr
);
1317 /* Rechain request on device request queue */
1318 cqr
->endclk
= get_clock();
1319 list_move_tail(&cqr
->list
, &flush_queue
);
1321 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1322 /* Now call the callback function of flushed requests */
1323 list_for_each_safe(l
, n
, &flush_queue
) {
1324 cqr
= list_entry(l
, struct dasd_ccw_req
, list
);
1325 if (cqr
->callback
!= NULL
)
1326 (cqr
->callback
)(cqr
, cqr
->callback_data
);
1331 * Acquire the device lock and process queues for the device.
1334 dasd_tasklet(struct dasd_device
* device
)
1336 struct list_head final_queue
;
1337 struct list_head
*l
, *n
;
1338 struct dasd_ccw_req
*cqr
;
1340 atomic_set (&device
->tasklet_scheduled
, 0);
1341 INIT_LIST_HEAD(&final_queue
);
1342 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1343 /* Check expire time of first request on the ccw queue. */
1344 __dasd_check_expire(device
);
1345 /* Finish off requests on ccw queue */
1346 __dasd_process_ccw_queue(device
, &final_queue
);
1347 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1348 /* Now call the callback function of requests with final status */
1349 list_for_each_safe(l
, n
, &final_queue
) {
1350 cqr
= list_entry(l
, struct dasd_ccw_req
, list
);
1351 list_del_init(&cqr
->list
);
1352 if (cqr
->callback
!= NULL
)
1353 (cqr
->callback
)(cqr
, cqr
->callback_data
);
1355 spin_lock_irq(&device
->request_queue_lock
);
1356 spin_lock(get_ccwdev_lock(device
->cdev
));
1357 /* Get new request from the block device request queue */
1358 __dasd_process_blk_queue(device
);
1359 /* Now check if the head of the ccw queue needs to be started. */
1360 __dasd_start_head(device
);
1361 spin_unlock(get_ccwdev_lock(device
->cdev
));
1362 spin_unlock_irq(&device
->request_queue_lock
);
1363 dasd_put_device(device
);
1367 * Schedules a call to dasd_tasklet over the device tasklet.
1370 dasd_schedule_bh(struct dasd_device
* device
)
1372 /* Protect against rescheduling. */
1373 if (atomic_cmpxchg (&device
->tasklet_scheduled
, 0, 1) != 0)
1375 dasd_get_device(device
);
1376 tasklet_hi_schedule(&device
->tasklet
);
1380 * Queue a request to the head of the ccw_queue. Start the I/O if
1384 dasd_add_request_head(struct dasd_ccw_req
*req
)
1386 struct dasd_device
*device
;
1387 unsigned long flags
;
1389 device
= req
->device
;
1390 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1391 req
->status
= DASD_CQR_QUEUED
;
1392 req
->device
= device
;
1393 list_add(&req
->list
, &device
->ccw_queue
);
1394 /* let the bh start the request to keep them in order */
1395 dasd_schedule_bh(device
);
1396 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1400 * Queue a request to the tail of the ccw_queue. Start the I/O if
1404 dasd_add_request_tail(struct dasd_ccw_req
*req
)
1406 struct dasd_device
*device
;
1407 unsigned long flags
;
1409 device
= req
->device
;
1410 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1411 req
->status
= DASD_CQR_QUEUED
;
1412 req
->device
= device
;
1413 list_add_tail(&req
->list
, &device
->ccw_queue
);
1414 /* let the bh start the request to keep them in order */
1415 dasd_schedule_bh(device
);
1416 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1423 dasd_wakeup_cb(struct dasd_ccw_req
*cqr
, void *data
)
1425 wake_up((wait_queue_head_t
*) data
);
1429 _wait_for_wakeup(struct dasd_ccw_req
*cqr
)
1431 struct dasd_device
*device
;
1434 device
= cqr
->device
;
1435 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1436 rc
= ((cqr
->status
== DASD_CQR_DONE
||
1437 cqr
->status
== DASD_CQR_FAILED
) &&
1438 list_empty(&cqr
->list
));
1439 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1444 * Attempts to start a special ccw queue and waits for its completion.
1447 dasd_sleep_on(struct dasd_ccw_req
* cqr
)
1449 wait_queue_head_t wait_q
;
1450 struct dasd_device
*device
;
1453 device
= cqr
->device
;
1454 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1456 init_waitqueue_head (&wait_q
);
1457 cqr
->callback
= dasd_wakeup_cb
;
1458 cqr
->callback_data
= (void *) &wait_q
;
1459 cqr
->status
= DASD_CQR_QUEUED
;
1460 list_add_tail(&cqr
->list
, &device
->ccw_queue
);
1462 /* let the bh start the request to keep them in order */
1463 dasd_schedule_bh(device
);
1465 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1467 wait_event(wait_q
, _wait_for_wakeup(cqr
));
1469 /* Request status is either done or failed. */
1470 rc
= (cqr
->status
== DASD_CQR_FAILED
) ? -EIO
: 0;
1475 * Attempts to start a special ccw queue and wait interruptible
1476 * for its completion.
1479 dasd_sleep_on_interruptible(struct dasd_ccw_req
* cqr
)
1481 wait_queue_head_t wait_q
;
1482 struct dasd_device
*device
;
1485 device
= cqr
->device
;
1486 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1488 init_waitqueue_head (&wait_q
);
1489 cqr
->callback
= dasd_wakeup_cb
;
1490 cqr
->callback_data
= (void *) &wait_q
;
1491 cqr
->status
= DASD_CQR_QUEUED
;
1492 list_add_tail(&cqr
->list
, &device
->ccw_queue
);
1494 /* let the bh start the request to keep them in order */
1495 dasd_schedule_bh(device
);
1496 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1500 rc
= wait_event_interruptible(wait_q
, _wait_for_wakeup(cqr
));
1501 if (rc
!= -ERESTARTSYS
) {
1502 /* Request is final (done or failed) */
1503 rc
= (cqr
->status
== DASD_CQR_DONE
) ? 0 : -EIO
;
1506 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1507 switch (cqr
->status
) {
1508 case DASD_CQR_IN_IO
:
1509 /* terminate runnig cqr */
1510 if (device
->discipline
->term_IO
) {
1512 device
->discipline
->term_IO(cqr
);
1514 * wait (non-interruptible) for final status
1515 * because signal ist still pending
1517 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1518 wait_event(wait_q
, _wait_for_wakeup(cqr
));
1519 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1520 rc
= (cqr
->status
== DASD_CQR_DONE
) ? 0 : -EIO
;
1524 case DASD_CQR_QUEUED
:
1526 list_del_init(&cqr
->list
);
1531 /* cqr with 'non-interruptable' status - just wait */
1534 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1540 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1541 * for eckd devices) the currently running request has to be terminated
1542 * and be put back to status queued, before the special request is added
1543 * to the head of the queue. Then the special request is waited on normally.
1546 _dasd_term_running_cqr(struct dasd_device
*device
)
1548 struct dasd_ccw_req
*cqr
;
1551 if (list_empty(&device
->ccw_queue
))
1553 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, list
);
1554 rc
= device
->discipline
->term_IO(cqr
);
1556 /* termination successful */
1557 cqr
->status
= DASD_CQR_QUEUED
;
1558 cqr
->startclk
= cqr
->stopclk
= 0;
1565 dasd_sleep_on_immediatly(struct dasd_ccw_req
* cqr
)
1567 wait_queue_head_t wait_q
;
1568 struct dasd_device
*device
;
1571 device
= cqr
->device
;
1572 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
1573 rc
= _dasd_term_running_cqr(device
);
1575 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1579 init_waitqueue_head (&wait_q
);
1580 cqr
->callback
= dasd_wakeup_cb
;
1581 cqr
->callback_data
= (void *) &wait_q
;
1582 cqr
->status
= DASD_CQR_QUEUED
;
1583 list_add(&cqr
->list
, &device
->ccw_queue
);
1585 /* let the bh start the request to keep them in order */
1586 dasd_schedule_bh(device
);
1588 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
1590 wait_event(wait_q
, _wait_for_wakeup(cqr
));
1592 /* Request status is either done or failed. */
1593 rc
= (cqr
->status
== DASD_CQR_FAILED
) ? -EIO
: 0;
1598 * Cancels a request that was started with dasd_sleep_on_req.
1599 * This is useful to timeout requests. The request will be
1600 * terminated if it is currently in i/o.
1601 * Returns 1 if the request has been terminated.
1604 dasd_cancel_req(struct dasd_ccw_req
*cqr
)
1606 struct dasd_device
*device
= cqr
->device
;
1607 unsigned long flags
;
1611 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1612 switch (cqr
->status
) {
1613 case DASD_CQR_QUEUED
:
1614 /* request was not started - just set to failed */
1615 cqr
->status
= DASD_CQR_FAILED
;
1617 case DASD_CQR_IN_IO
:
1618 /* request in IO - terminate IO and release again */
1619 if (device
->discipline
->term_IO(cqr
) != 0)
1620 /* what to do if unable to terminate ??????
1622 cqr
->status
= DASD_CQR_FAILED
;
1623 cqr
->stopclk
= get_clock();
1627 case DASD_CQR_FAILED
:
1628 /* already finished - do nothing */
1631 DEV_MESSAGE(KERN_ALERT
, device
,
1632 "invalid status %02x in request",
1637 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1638 dasd_schedule_bh(device
);
1643 * SECTION: Block device operations (request queue, partitions, open, release).
1647 * Dasd request queue function. Called from ll_rw_blk.c
1650 do_dasd_request(request_queue_t
* queue
)
1652 struct dasd_device
*device
;
1654 device
= (struct dasd_device
*) queue
->queuedata
;
1655 spin_lock(get_ccwdev_lock(device
->cdev
));
1656 /* Get new request from the block device request queue */
1657 __dasd_process_blk_queue(device
);
1658 /* Now check if the head of the ccw queue needs to be started. */
1659 __dasd_start_head(device
);
1660 spin_unlock(get_ccwdev_lock(device
->cdev
));
1664 * Allocate and initialize request queue and default I/O scheduler.
1667 dasd_alloc_queue(struct dasd_device
* device
)
1671 device
->request_queue
= blk_init_queue(do_dasd_request
,
1672 &device
->request_queue_lock
);
1673 if (device
->request_queue
== NULL
)
1676 device
->request_queue
->queuedata
= device
;
1678 elevator_exit(device
->request_queue
->elevator
);
1679 rc
= elevator_init(device
->request_queue
, "deadline");
1681 blk_cleanup_queue(device
->request_queue
);
1688 * Allocate and initialize request queue.
1691 dasd_setup_queue(struct dasd_device
* device
)
1695 blk_queue_hardsect_size(device
->request_queue
, device
->bp_block
);
1696 max
= device
->discipline
->max_blocks
<< device
->s2b_shift
;
1697 blk_queue_max_sectors(device
->request_queue
, max
);
1698 blk_queue_max_phys_segments(device
->request_queue
, -1L);
1699 blk_queue_max_hw_segments(device
->request_queue
, -1L);
1700 blk_queue_max_segment_size(device
->request_queue
, -1L);
1701 blk_queue_segment_boundary(device
->request_queue
, -1L);
1702 blk_queue_ordered(device
->request_queue
, QUEUE_ORDERED_TAG
, NULL
);
1706 * Deactivate and free request queue.
1709 dasd_free_queue(struct dasd_device
* device
)
1711 if (device
->request_queue
) {
1712 blk_cleanup_queue(device
->request_queue
);
1713 device
->request_queue
= NULL
;
1718 * Flush request on the request queue.
1721 dasd_flush_request_queue(struct dasd_device
* device
)
1723 struct request
*req
;
1725 if (!device
->request_queue
)
1728 spin_lock_irq(&device
->request_queue_lock
);
1729 while (!list_empty(&device
->request_queue
->queue_head
)) {
1730 req
= elv_next_request(device
->request_queue
);
1733 blkdev_dequeue_request(req
);
1734 dasd_end_request(req
, 0);
1736 spin_unlock_irq(&device
->request_queue_lock
);
1740 dasd_open(struct inode
*inp
, struct file
*filp
)
1742 struct gendisk
*disk
= inp
->i_bdev
->bd_disk
;
1743 struct dasd_device
*device
= disk
->private_data
;
1746 atomic_inc(&device
->open_count
);
1747 if (test_bit(DASD_FLAG_OFFLINE
, &device
->flags
)) {
1752 if (!try_module_get(device
->discipline
->owner
)) {
1757 if (dasd_probeonly
) {
1758 DEV_MESSAGE(KERN_INFO
, device
, "%s",
1759 "No access to device due to probeonly mode");
1764 if (device
->state
<= DASD_STATE_BASIC
) {
1765 DBF_DEV_EVENT(DBF_ERR
, device
, " %s",
1766 " Cannot open unrecognized device");
1774 module_put(device
->discipline
->owner
);
1776 atomic_dec(&device
->open_count
);
1781 dasd_release(struct inode
*inp
, struct file
*filp
)
1783 struct gendisk
*disk
= inp
->i_bdev
->bd_disk
;
1784 struct dasd_device
*device
= disk
->private_data
;
1786 atomic_dec(&device
->open_count
);
1787 module_put(device
->discipline
->owner
);
1792 * Return disk geometry.
1795 dasd_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1797 struct dasd_device
*device
;
1799 device
= bdev
->bd_disk
->private_data
;
1803 if (!device
->discipline
||
1804 !device
->discipline
->fill_geometry
)
1807 device
->discipline
->fill_geometry(device
, geo
);
1808 geo
->start
= get_start_sect(bdev
) >> device
->s2b_shift
;
1812 struct block_device_operations
1813 dasd_device_operations
= {
1814 .owner
= THIS_MODULE
,
1816 .release
= dasd_release
,
1817 .ioctl
= dasd_ioctl
,
1818 .compat_ioctl
= dasd_compat_ioctl
,
1819 .getgeo
= dasd_getgeo
,
1826 #ifdef CONFIG_PROC_FS
1830 if (dasd_page_cache
!= NULL
) {
1831 kmem_cache_destroy(dasd_page_cache
);
1832 dasd_page_cache
= NULL
;
1834 dasd_gendisk_exit();
1836 if (dasd_debug_area
!= NULL
) {
1837 debug_unregister(dasd_debug_area
);
1838 dasd_debug_area
= NULL
;
1843 * SECTION: common functions for ccw_driver use
1847 * Initial attempt at a probe function. this can be simplified once
1848 * the other detection code is gone.
1851 dasd_generic_probe (struct ccw_device
*cdev
,
1852 struct dasd_discipline
*discipline
)
1856 ret
= ccw_device_set_options(cdev
, CCWDEV_DO_PATHGROUP
);
1859 "dasd_generic_probe: could not set ccw-device options "
1860 "for %s\n", cdev
->dev
.bus_id
);
1863 ret
= dasd_add_sysfs_files(cdev
);
1866 "dasd_generic_probe: could not add sysfs entries "
1867 "for %s\n", cdev
->dev
.bus_id
);
1870 cdev
->handler
= &dasd_int_handler
;
1873 * Automatically online either all dasd devices (dasd_autodetect)
1874 * or all devices specified with dasd= parameters during
1877 if ((dasd_get_feature(cdev
, DASD_FEATURE_INITIAL_ONLINE
) > 0 ) ||
1878 (dasd_autodetect
&& dasd_busid_known(cdev
->dev
.bus_id
) != 0))
1879 ret
= ccw_device_set_online(cdev
);
1882 "dasd_generic_probe: could not initially online "
1883 "ccw-device %s\n", cdev
->dev
.bus_id
);
1888 * This will one day be called from a global not_oper handler.
1889 * It is also used by driver_unregister during module unload.
1892 dasd_generic_remove (struct ccw_device
*cdev
)
1894 struct dasd_device
*device
;
1896 cdev
->handler
= NULL
;
1898 dasd_remove_sysfs_files(cdev
);
1899 device
= dasd_device_from_cdev(cdev
);
1902 if (test_and_set_bit(DASD_FLAG_OFFLINE
, &device
->flags
)) {
1903 /* Already doing offline processing */
1904 dasd_put_device(device
);
1908 * This device is removed unconditionally. Set offline
1909 * flag to prevent dasd_open from opening it while it is
1910 * no quite down yet.
1912 dasd_set_target_state(device
, DASD_STATE_NEW
);
1913 /* dasd_delete_device destroys the device reference. */
1914 dasd_delete_device(device
);
1918 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
1919 * the device is detected for the first time and is supposed to be used
1920 * or the user has started activation through sysfs.
1923 dasd_generic_set_online (struct ccw_device
*cdev
,
1924 struct dasd_discipline
*base_discipline
)
1927 struct dasd_discipline
*discipline
;
1928 struct dasd_device
*device
;
1931 /* first online clears initial online feature flag */
1932 dasd_set_feature(cdev
, DASD_FEATURE_INITIAL_ONLINE
, 0);
1933 device
= dasd_create_device(cdev
);
1935 return PTR_ERR(device
);
1937 discipline
= base_discipline
;
1938 if (device
->features
& DASD_FEATURE_USEDIAG
) {
1939 if (!dasd_diag_discipline_pointer
) {
1940 printk (KERN_WARNING
1941 "dasd_generic couldn't online device %s "
1942 "- discipline DIAG not available\n",
1944 dasd_delete_device(device
);
1947 discipline
= dasd_diag_discipline_pointer
;
1949 if (!try_module_get(base_discipline
->owner
)) {
1950 dasd_delete_device(device
);
1953 if (!try_module_get(discipline
->owner
)) {
1954 module_put(base_discipline
->owner
);
1955 dasd_delete_device(device
);
1958 device
->base_discipline
= base_discipline
;
1959 device
->discipline
= discipline
;
1961 rc
= discipline
->check_device(device
);
1963 printk (KERN_WARNING
1964 "dasd_generic couldn't online device %s "
1965 "with discipline %s rc=%i\n",
1966 cdev
->dev
.bus_id
, discipline
->name
, rc
);
1967 module_put(discipline
->owner
);
1968 module_put(base_discipline
->owner
);
1969 dasd_delete_device(device
);
1973 dasd_set_target_state(device
, DASD_STATE_ONLINE
);
1974 if (device
->state
<= DASD_STATE_KNOWN
) {
1975 printk (KERN_WARNING
1976 "dasd_generic discipline not found for %s\n",
1979 dasd_set_target_state(device
, DASD_STATE_NEW
);
1980 dasd_delete_device(device
);
1982 pr_debug("dasd_generic device %s found\n",
1985 /* FIXME: we have to wait for the root device but we don't want
1986 * to wait for each single device but for all at once. */
1987 wait_event(dasd_init_waitq
, _wait_for_device(device
));
1989 dasd_put_device(device
);
1995 dasd_generic_set_offline (struct ccw_device
*cdev
)
1997 struct dasd_device
*device
;
1998 int max_count
, open_count
;
2000 device
= dasd_device_from_cdev(cdev
);
2002 return PTR_ERR(device
);
2003 if (test_and_set_bit(DASD_FLAG_OFFLINE
, &device
->flags
)) {
2004 /* Already doing offline processing */
2005 dasd_put_device(device
);
2009 * We must make sure that this device is currently not in use.
2010 * The open_count is increased for every opener, that includes
2011 * the blkdev_get in dasd_scan_partitions. We are only interested
2012 * in the other openers.
2014 max_count
= device
->bdev
? 0 : -1;
2015 open_count
= (int) atomic_read(&device
->open_count
);
2016 if (open_count
> max_count
) {
2018 printk (KERN_WARNING
"Can't offline dasd device with "
2019 "open count = %i.\n",
2022 printk (KERN_WARNING
"%s",
2023 "Can't offline dasd device due to internal "
2025 clear_bit(DASD_FLAG_OFFLINE
, &device
->flags
);
2026 dasd_put_device(device
);
2029 dasd_set_target_state(device
, DASD_STATE_NEW
);
2030 /* dasd_delete_device destroys the device reference. */
2031 dasd_delete_device(device
);
2037 dasd_generic_notify(struct ccw_device
*cdev
, int event
)
2039 struct dasd_device
*device
;
2040 struct dasd_ccw_req
*cqr
;
2041 unsigned long flags
;
2044 device
= dasd_device_from_cdev(cdev
);
2047 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
2052 /* First of all call extended error reporting. */
2053 dasd_eer_write(device
, NULL
, DASD_EER_NOPATH
);
2055 if (device
->state
< DASD_STATE_BASIC
)
2057 /* Device is active. We want to keep it. */
2058 if (test_bit(DASD_FLAG_DSC_ERROR
, &device
->flags
)) {
2059 list_for_each_entry(cqr
, &device
->ccw_queue
, list
)
2060 if (cqr
->status
== DASD_CQR_IN_IO
)
2061 cqr
->status
= DASD_CQR_FAILED
;
2062 device
->stopped
|= DASD_STOPPED_DC_EIO
;
2064 list_for_each_entry(cqr
, &device
->ccw_queue
, list
)
2065 if (cqr
->status
== DASD_CQR_IN_IO
) {
2066 cqr
->status
= DASD_CQR_QUEUED
;
2069 device
->stopped
|= DASD_STOPPED_DC_WAIT
;
2070 dasd_set_timer(device
, 0);
2072 dasd_schedule_bh(device
);
2076 /* FIXME: add a sanity check. */
2077 device
->stopped
&= ~(DASD_STOPPED_DC_WAIT
|DASD_STOPPED_DC_EIO
);
2078 dasd_schedule_bh(device
);
2082 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
2083 dasd_put_device(device
);
2093 init_waitqueue_head(&dasd_init_waitq
);
2095 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2096 dasd_debug_area
= debug_register("dasd", 1, 2, 8 * sizeof (long));
2097 if (dasd_debug_area
== NULL
) {
2101 debug_register_view(dasd_debug_area
, &debug_sprintf_view
);
2102 debug_set_level(dasd_debug_area
, DBF_EMERG
);
2104 DBF_EVENT(DBF_EMERG
, "%s", "debug area created");
2106 dasd_diag_discipline_pointer
= NULL
;
2108 rc
= dasd_devmap_init();
2111 rc
= dasd_gendisk_init();
2117 rc
= dasd_eer_init();
2120 #ifdef CONFIG_PROC_FS
2121 rc
= dasd_proc_init();
2128 MESSAGE(KERN_INFO
, "%s", "initialization not performed due to errors");
2133 module_init(dasd_init
);
2134 module_exit(dasd_exit
);
2136 EXPORT_SYMBOL(dasd_debug_area
);
2137 EXPORT_SYMBOL(dasd_diag_discipline_pointer
);
2139 EXPORT_SYMBOL(dasd_add_request_head
);
2140 EXPORT_SYMBOL(dasd_add_request_tail
);
2141 EXPORT_SYMBOL(dasd_cancel_req
);
2142 EXPORT_SYMBOL(dasd_clear_timer
);
2143 EXPORT_SYMBOL(dasd_enable_device
);
2144 EXPORT_SYMBOL(dasd_int_handler
);
2145 EXPORT_SYMBOL(dasd_kfree_request
);
2146 EXPORT_SYMBOL(dasd_kick_device
);
2147 EXPORT_SYMBOL(dasd_kmalloc_request
);
2148 EXPORT_SYMBOL(dasd_schedule_bh
);
2149 EXPORT_SYMBOL(dasd_set_target_state
);
2150 EXPORT_SYMBOL(dasd_set_timer
);
2151 EXPORT_SYMBOL(dasd_sfree_request
);
2152 EXPORT_SYMBOL(dasd_sleep_on
);
2153 EXPORT_SYMBOL(dasd_sleep_on_immediatly
);
2154 EXPORT_SYMBOL(dasd_sleep_on_interruptible
);
2155 EXPORT_SYMBOL(dasd_smalloc_request
);
2156 EXPORT_SYMBOL(dasd_start_IO
);
2157 EXPORT_SYMBOL(dasd_term_IO
);
2159 EXPORT_SYMBOL_GPL(dasd_generic_probe
);
2160 EXPORT_SYMBOL_GPL(dasd_generic_remove
);
2161 EXPORT_SYMBOL_GPL(dasd_generic_notify
);
2162 EXPORT_SYMBOL_GPL(dasd_generic_set_online
);
2163 EXPORT_SYMBOL_GPL(dasd_generic_set_offline
);