1 // SPDX-License-Identifier: GPL-2.0
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
11 #define KMSG_COMPONENT "dasd"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/kmod.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/ctype.h>
18 #include <linux/major.h>
19 #include <linux/slab.h>
20 #include <linux/hdreg.h>
21 #include <linux/async.h>
22 #include <linux/mutex.h>
23 #include <linux/debugfs.h>
24 #include <linux/seq_file.h>
25 #include <linux/vmalloc.h>
27 #include <asm/ccwdev.h>
28 #include <asm/ebcdic.h>
29 #include <asm/idals.h>
34 #define PRINTK_HEADER "dasd:"
38 * SECTION: Constant definitions to be used within this file
40 #define DASD_CHANQ_MAX_SIZE 4
42 #define DASD_DIAG_MOD "dasd_diag_mod"
44 static unsigned int queue_depth
= 32;
45 static unsigned int nr_hw_queues
= 4;
47 module_param(queue_depth
, uint
, 0444);
48 MODULE_PARM_DESC(queue_depth
, "Default queue depth for new DASD devices");
50 module_param(nr_hw_queues
, uint
, 0444);
51 MODULE_PARM_DESC(nr_hw_queues
, "Default number of hardware queues for new DASD devices");
54 * SECTION: exported variables of dasd.c
56 debug_info_t
*dasd_debug_area
;
57 EXPORT_SYMBOL(dasd_debug_area
);
58 static struct dentry
*dasd_debugfs_root_entry
;
59 struct dasd_discipline
*dasd_diag_discipline_pointer
;
60 EXPORT_SYMBOL(dasd_diag_discipline_pointer
);
61 void dasd_int_handler(struct ccw_device
*, unsigned long, struct irb
*);
63 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
64 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
65 " Copyright IBM Corp. 2000");
66 MODULE_SUPPORTED_DEVICE("dasd");
67 MODULE_LICENSE("GPL");
70 * SECTION: prototypes for static functions of dasd.c
72 static int dasd_alloc_queue(struct dasd_block
*);
73 static void dasd_free_queue(struct dasd_block
*);
74 static int dasd_flush_block_queue(struct dasd_block
*);
75 static void dasd_device_tasklet(unsigned long);
76 static void dasd_block_tasklet(unsigned long);
77 static void do_kick_device(struct work_struct
*);
78 static void do_reload_device(struct work_struct
*);
79 static void do_requeue_requests(struct work_struct
*);
80 static void dasd_return_cqr_cb(struct dasd_ccw_req
*, void *);
81 static void dasd_device_timeout(struct timer_list
*);
82 static void dasd_block_timeout(struct timer_list
*);
83 static void __dasd_process_erp(struct dasd_device
*, struct dasd_ccw_req
*);
84 static void dasd_profile_init(struct dasd_profile
*, struct dentry
*);
85 static void dasd_profile_exit(struct dasd_profile
*);
86 static void dasd_hosts_init(struct dentry
*, struct dasd_device
*);
87 static void dasd_hosts_exit(struct dasd_device
*);
90 * SECTION: Operations on the device structure.
92 static wait_queue_head_t dasd_init_waitq
;
93 static wait_queue_head_t dasd_flush_wq
;
94 static wait_queue_head_t generic_waitq
;
95 static wait_queue_head_t shutdown_waitq
;
98 * Allocate memory for a new device structure.
100 struct dasd_device
*dasd_alloc_device(void)
102 struct dasd_device
*device
;
104 device
= kzalloc(sizeof(struct dasd_device
), GFP_ATOMIC
);
106 return ERR_PTR(-ENOMEM
);
108 /* Get two pages for normal block device operations. */
109 device
->ccw_mem
= (void *) __get_free_pages(GFP_ATOMIC
| GFP_DMA
, 1);
110 if (!device
->ccw_mem
) {
112 return ERR_PTR(-ENOMEM
);
114 /* Get one page for error recovery. */
115 device
->erp_mem
= (void *) get_zeroed_page(GFP_ATOMIC
| GFP_DMA
);
116 if (!device
->erp_mem
) {
117 free_pages((unsigned long) device
->ccw_mem
, 1);
119 return ERR_PTR(-ENOMEM
);
121 /* Get two pages for ese format. */
122 device
->ese_mem
= (void *)__get_free_pages(GFP_ATOMIC
| GFP_DMA
, 1);
123 if (!device
->ese_mem
) {
124 free_page((unsigned long) device
->erp_mem
);
125 free_pages((unsigned long) device
->ccw_mem
, 1);
127 return ERR_PTR(-ENOMEM
);
130 dasd_init_chunklist(&device
->ccw_chunks
, device
->ccw_mem
, PAGE_SIZE
*2);
131 dasd_init_chunklist(&device
->erp_chunks
, device
->erp_mem
, PAGE_SIZE
);
132 dasd_init_chunklist(&device
->ese_chunks
, device
->ese_mem
, PAGE_SIZE
* 2);
133 spin_lock_init(&device
->mem_lock
);
134 atomic_set(&device
->tasklet_scheduled
, 0);
135 tasklet_init(&device
->tasklet
, dasd_device_tasklet
,
136 (unsigned long) device
);
137 INIT_LIST_HEAD(&device
->ccw_queue
);
138 timer_setup(&device
->timer
, dasd_device_timeout
, 0);
139 INIT_WORK(&device
->kick_work
, do_kick_device
);
140 INIT_WORK(&device
->reload_device
, do_reload_device
);
141 INIT_WORK(&device
->requeue_requests
, do_requeue_requests
);
142 device
->state
= DASD_STATE_NEW
;
143 device
->target
= DASD_STATE_NEW
;
144 mutex_init(&device
->state_mutex
);
145 spin_lock_init(&device
->profile
.lock
);
150 * Free memory of a device structure.
152 void dasd_free_device(struct dasd_device
*device
)
154 kfree(device
->private);
155 free_pages((unsigned long) device
->ese_mem
, 1);
156 free_page((unsigned long) device
->erp_mem
);
157 free_pages((unsigned long) device
->ccw_mem
, 1);
162 * Allocate memory for a new device structure.
164 struct dasd_block
*dasd_alloc_block(void)
166 struct dasd_block
*block
;
168 block
= kzalloc(sizeof(*block
), GFP_ATOMIC
);
170 return ERR_PTR(-ENOMEM
);
171 /* open_count = 0 means device online but not in use */
172 atomic_set(&block
->open_count
, -1);
174 atomic_set(&block
->tasklet_scheduled
, 0);
175 tasklet_init(&block
->tasklet
, dasd_block_tasklet
,
176 (unsigned long) block
);
177 INIT_LIST_HEAD(&block
->ccw_queue
);
178 spin_lock_init(&block
->queue_lock
);
179 INIT_LIST_HEAD(&block
->format_list
);
180 spin_lock_init(&block
->format_lock
);
181 timer_setup(&block
->timer
, dasd_block_timeout
, 0);
182 spin_lock_init(&block
->profile
.lock
);
186 EXPORT_SYMBOL_GPL(dasd_alloc_block
);
189 * Free memory of a device structure.
191 void dasd_free_block(struct dasd_block
*block
)
195 EXPORT_SYMBOL_GPL(dasd_free_block
);
198 * Make a new device known to the system.
200 static int dasd_state_new_to_known(struct dasd_device
*device
)
205 * As long as the device is not in state DASD_STATE_NEW we want to
206 * keep the reference count > 0.
208 dasd_get_device(device
);
211 rc
= dasd_alloc_queue(device
->block
);
213 dasd_put_device(device
);
217 device
->state
= DASD_STATE_KNOWN
;
222 * Let the system forget about a device.
224 static int dasd_state_known_to_new(struct dasd_device
*device
)
226 /* Disable extended error reporting for this device. */
227 dasd_eer_disable(device
);
228 device
->state
= DASD_STATE_NEW
;
231 dasd_free_queue(device
->block
);
233 /* Give up reference we took in dasd_state_new_to_known. */
234 dasd_put_device(device
);
238 static struct dentry
*dasd_debugfs_setup(const char *name
,
239 struct dentry
*base_dentry
)
245 pde
= debugfs_create_dir(name
, base_dentry
);
246 if (!pde
|| IS_ERR(pde
))
252 * Request the irq line for the device.
254 static int dasd_state_known_to_basic(struct dasd_device
*device
)
256 struct dasd_block
*block
= device
->block
;
259 /* Allocate and register gendisk structure. */
261 rc
= dasd_gendisk_alloc(block
);
264 block
->debugfs_dentry
=
265 dasd_debugfs_setup(block
->gdp
->disk_name
,
266 dasd_debugfs_root_entry
);
267 dasd_profile_init(&block
->profile
, block
->debugfs_dentry
);
268 if (dasd_global_profile_level
== DASD_PROFILE_ON
)
269 dasd_profile_on(&device
->block
->profile
);
271 device
->debugfs_dentry
=
272 dasd_debugfs_setup(dev_name(&device
->cdev
->dev
),
273 dasd_debugfs_root_entry
);
274 dasd_profile_init(&device
->profile
, device
->debugfs_dentry
);
275 dasd_hosts_init(device
->debugfs_dentry
, device
);
277 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
278 device
->debug_area
= debug_register(dev_name(&device
->cdev
->dev
), 4, 1,
280 debug_register_view(device
->debug_area
, &debug_sprintf_view
);
281 debug_set_level(device
->debug_area
, DBF_WARNING
);
282 DBF_DEV_EVENT(DBF_EMERG
, device
, "%s", "debug area created");
284 device
->state
= DASD_STATE_BASIC
;
290 * Release the irq line for the device. Terminate any running i/o.
292 static int dasd_state_basic_to_known(struct dasd_device
*device
)
296 if (device
->discipline
->basic_to_known
) {
297 rc
= device
->discipline
->basic_to_known(device
);
303 dasd_profile_exit(&device
->block
->profile
);
304 debugfs_remove(device
->block
->debugfs_dentry
);
305 dasd_gendisk_free(device
->block
);
306 dasd_block_clear_timer(device
->block
);
308 rc
= dasd_flush_device_queue(device
);
311 dasd_device_clear_timer(device
);
312 dasd_profile_exit(&device
->profile
);
313 dasd_hosts_exit(device
);
314 debugfs_remove(device
->debugfs_dentry
);
315 DBF_DEV_EVENT(DBF_EMERG
, device
, "%p debug area deleted", device
);
316 if (device
->debug_area
!= NULL
) {
317 debug_unregister(device
->debug_area
);
318 device
->debug_area
= NULL
;
320 device
->state
= DASD_STATE_KNOWN
;
325 * Do the initial analysis. The do_analysis function may return
326 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
327 * until the discipline decides to continue the startup sequence
328 * by calling the function dasd_change_state. The eckd disciplines
329 * uses this to start a ccw that detects the format. The completion
330 * interrupt for this detection ccw uses the kernel event daemon to
331 * trigger the call to dasd_change_state. All this is done in the
332 * discipline code, see dasd_eckd.c.
333 * After the analysis ccw is done (do_analysis returned 0) the block
335 * In case the analysis returns an error, the device setup is stopped
336 * (a fake disk was already added to allow formatting).
338 static int dasd_state_basic_to_ready(struct dasd_device
*device
)
341 struct dasd_block
*block
;
342 struct gendisk
*disk
;
345 block
= device
->block
;
346 /* make disk known with correct capacity */
348 if (block
->base
->discipline
->do_analysis
!= NULL
)
349 rc
= block
->base
->discipline
->do_analysis(block
);
352 device
->state
= DASD_STATE_UNFMT
;
353 disk
= device
->block
->gdp
;
354 kobject_uevent(&disk_to_dev(disk
)->kobj
,
360 if (device
->discipline
->setup_blk_queue
)
361 device
->discipline
->setup_blk_queue(block
);
362 set_capacity(block
->gdp
,
363 block
->blocks
<< block
->s2b_shift
);
364 device
->state
= DASD_STATE_READY
;
365 rc
= dasd_scan_partitions(block
);
367 device
->state
= DASD_STATE_BASIC
;
371 device
->state
= DASD_STATE_READY
;
374 if (device
->discipline
->basic_to_ready
)
375 rc
= device
->discipline
->basic_to_ready(device
);
380 int _wait_for_empty_queues(struct dasd_device
*device
)
383 return list_empty(&device
->ccw_queue
) &&
384 list_empty(&device
->block
->ccw_queue
);
386 return list_empty(&device
->ccw_queue
);
390 * Remove device from block device layer. Destroy dirty buffers.
391 * Forget format information. Check if the target level is basic
392 * and if it is create fake disk for formatting.
394 static int dasd_state_ready_to_basic(struct dasd_device
*device
)
398 device
->state
= DASD_STATE_BASIC
;
400 struct dasd_block
*block
= device
->block
;
401 rc
= dasd_flush_block_queue(block
);
403 device
->state
= DASD_STATE_READY
;
406 dasd_destroy_partitions(block
);
409 block
->s2b_shift
= 0;
417 static int dasd_state_unfmt_to_basic(struct dasd_device
*device
)
419 device
->state
= DASD_STATE_BASIC
;
424 * Make the device online and schedule the bottom half to start
425 * the requeueing of requests from the linux request queue to the
429 dasd_state_ready_to_online(struct dasd_device
* device
)
431 struct gendisk
*disk
;
432 struct disk_part_iter piter
;
433 struct block_device
*part
;
435 device
->state
= DASD_STATE_ONLINE
;
437 dasd_schedule_block_bh(device
->block
);
438 if ((device
->features
& DASD_FEATURE_USERAW
)) {
439 disk
= device
->block
->gdp
;
440 kobject_uevent(&disk_to_dev(disk
)->kobj
, KOBJ_CHANGE
);
443 disk
= device
->block
->bdev
->bd_disk
;
444 disk_part_iter_init(&piter
, disk
, DISK_PITER_INCL_PART0
);
445 while ((part
= disk_part_iter_next(&piter
)))
446 kobject_uevent(bdev_kobj(part
), KOBJ_CHANGE
);
447 disk_part_iter_exit(&piter
);
453 * Stop the requeueing of requests again.
455 static int dasd_state_online_to_ready(struct dasd_device
*device
)
458 struct gendisk
*disk
;
459 struct disk_part_iter piter
;
460 struct block_device
*part
;
462 if (device
->discipline
->online_to_ready
) {
463 rc
= device
->discipline
->online_to_ready(device
);
468 device
->state
= DASD_STATE_READY
;
469 if (device
->block
&& !(device
->features
& DASD_FEATURE_USERAW
)) {
470 disk
= device
->block
->bdev
->bd_disk
;
471 disk_part_iter_init(&piter
, disk
, DISK_PITER_INCL_PART0
);
472 while ((part
= disk_part_iter_next(&piter
)))
473 kobject_uevent(bdev_kobj(part
), KOBJ_CHANGE
);
474 disk_part_iter_exit(&piter
);
480 * Device startup state changes.
482 static int dasd_increase_state(struct dasd_device
*device
)
487 if (device
->state
== DASD_STATE_NEW
&&
488 device
->target
>= DASD_STATE_KNOWN
)
489 rc
= dasd_state_new_to_known(device
);
492 device
->state
== DASD_STATE_KNOWN
&&
493 device
->target
>= DASD_STATE_BASIC
)
494 rc
= dasd_state_known_to_basic(device
);
497 device
->state
== DASD_STATE_BASIC
&&
498 device
->target
>= DASD_STATE_READY
)
499 rc
= dasd_state_basic_to_ready(device
);
502 device
->state
== DASD_STATE_UNFMT
&&
503 device
->target
> DASD_STATE_UNFMT
)
507 device
->state
== DASD_STATE_READY
&&
508 device
->target
>= DASD_STATE_ONLINE
)
509 rc
= dasd_state_ready_to_online(device
);
515 * Device shutdown state changes.
517 static int dasd_decrease_state(struct dasd_device
*device
)
522 if (device
->state
== DASD_STATE_ONLINE
&&
523 device
->target
<= DASD_STATE_READY
)
524 rc
= dasd_state_online_to_ready(device
);
527 device
->state
== DASD_STATE_READY
&&
528 device
->target
<= DASD_STATE_BASIC
)
529 rc
= dasd_state_ready_to_basic(device
);
532 device
->state
== DASD_STATE_UNFMT
&&
533 device
->target
<= DASD_STATE_BASIC
)
534 rc
= dasd_state_unfmt_to_basic(device
);
537 device
->state
== DASD_STATE_BASIC
&&
538 device
->target
<= DASD_STATE_KNOWN
)
539 rc
= dasd_state_basic_to_known(device
);
542 device
->state
== DASD_STATE_KNOWN
&&
543 device
->target
<= DASD_STATE_NEW
)
544 rc
= dasd_state_known_to_new(device
);
550 * This is the main startup/shutdown routine.
552 static void dasd_change_state(struct dasd_device
*device
)
556 if (device
->state
== device
->target
)
557 /* Already where we want to go today... */
559 if (device
->state
< device
->target
)
560 rc
= dasd_increase_state(device
);
562 rc
= dasd_decrease_state(device
);
566 device
->target
= device
->state
;
568 /* let user-space know that the device status changed */
569 kobject_uevent(&device
->cdev
->dev
.kobj
, KOBJ_CHANGE
);
571 if (device
->state
== device
->target
)
572 wake_up(&dasd_init_waitq
);
576 * Kick starter for devices that did not complete the startup/shutdown
577 * procedure or were sleeping because of a pending state.
578 * dasd_kick_device will schedule a call do do_kick_device to the kernel
581 static void do_kick_device(struct work_struct
*work
)
583 struct dasd_device
*device
= container_of(work
, struct dasd_device
, kick_work
);
584 mutex_lock(&device
->state_mutex
);
585 dasd_change_state(device
);
586 mutex_unlock(&device
->state_mutex
);
587 dasd_schedule_device_bh(device
);
588 dasd_put_device(device
);
591 void dasd_kick_device(struct dasd_device
*device
)
593 dasd_get_device(device
);
594 /* queue call to dasd_kick_device to the kernel event daemon. */
595 if (!schedule_work(&device
->kick_work
))
596 dasd_put_device(device
);
598 EXPORT_SYMBOL(dasd_kick_device
);
601 * dasd_reload_device will schedule a call do do_reload_device to the kernel
604 static void do_reload_device(struct work_struct
*work
)
606 struct dasd_device
*device
= container_of(work
, struct dasd_device
,
608 device
->discipline
->reload(device
);
609 dasd_put_device(device
);
612 void dasd_reload_device(struct dasd_device
*device
)
614 dasd_get_device(device
);
615 /* queue call to dasd_reload_device to the kernel event daemon. */
616 if (!schedule_work(&device
->reload_device
))
617 dasd_put_device(device
);
619 EXPORT_SYMBOL(dasd_reload_device
);
622 * Set the target state for a device and starts the state change.
624 void dasd_set_target_state(struct dasd_device
*device
, int target
)
626 dasd_get_device(device
);
627 mutex_lock(&device
->state_mutex
);
628 /* If we are in probeonly mode stop at DASD_STATE_READY. */
629 if (dasd_probeonly
&& target
> DASD_STATE_READY
)
630 target
= DASD_STATE_READY
;
631 if (device
->target
!= target
) {
632 if (device
->state
== target
)
633 wake_up(&dasd_init_waitq
);
634 device
->target
= target
;
636 if (device
->state
!= device
->target
)
637 dasd_change_state(device
);
638 mutex_unlock(&device
->state_mutex
);
639 dasd_put_device(device
);
641 EXPORT_SYMBOL(dasd_set_target_state
);
644 * Enable devices with device numbers in [from..to].
646 static inline int _wait_for_device(struct dasd_device
*device
)
648 return (device
->state
== device
->target
);
651 void dasd_enable_device(struct dasd_device
*device
)
653 dasd_set_target_state(device
, DASD_STATE_ONLINE
);
654 if (device
->state
<= DASD_STATE_KNOWN
)
655 /* No discipline for device found. */
656 dasd_set_target_state(device
, DASD_STATE_NEW
);
657 /* Now wait for the devices to come up. */
658 wait_event(dasd_init_waitq
, _wait_for_device(device
));
660 dasd_reload_device(device
);
661 if (device
->discipline
->kick_validate
)
662 device
->discipline
->kick_validate(device
);
664 EXPORT_SYMBOL(dasd_enable_device
);
667 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
670 unsigned int dasd_global_profile_level
= DASD_PROFILE_OFF
;
672 #ifdef CONFIG_DASD_PROFILE
673 struct dasd_profile dasd_global_profile
= {
674 .lock
= __SPIN_LOCK_UNLOCKED(dasd_global_profile
.lock
),
676 static struct dentry
*dasd_debugfs_global_entry
;
679 * Add profiling information for cqr before execution.
681 static void dasd_profile_start(struct dasd_block
*block
,
682 struct dasd_ccw_req
*cqr
,
686 unsigned int counter
;
687 struct dasd_device
*device
;
689 /* count the length of the chanq for statistics */
691 if (dasd_global_profile_level
|| block
->profile
.data
)
692 list_for_each(l
, &block
->ccw_queue
)
696 spin_lock(&dasd_global_profile
.lock
);
697 if (dasd_global_profile
.data
) {
698 dasd_global_profile
.data
->dasd_io_nr_req
[counter
]++;
699 if (rq_data_dir(req
) == READ
)
700 dasd_global_profile
.data
->dasd_read_nr_req
[counter
]++;
702 spin_unlock(&dasd_global_profile
.lock
);
704 spin_lock(&block
->profile
.lock
);
705 if (block
->profile
.data
) {
706 block
->profile
.data
->dasd_io_nr_req
[counter
]++;
707 if (rq_data_dir(req
) == READ
)
708 block
->profile
.data
->dasd_read_nr_req
[counter
]++;
710 spin_unlock(&block
->profile
.lock
);
713 * We count the request for the start device, even though it may run on
714 * some other device due to error recovery. This way we make sure that
715 * we count each request only once.
717 device
= cqr
->startdev
;
718 if (device
->profile
.data
) {
719 counter
= 1; /* request is not yet queued on the start device */
720 list_for_each(l
, &device
->ccw_queue
)
724 spin_lock(&device
->profile
.lock
);
725 if (device
->profile
.data
) {
726 device
->profile
.data
->dasd_io_nr_req
[counter
]++;
727 if (rq_data_dir(req
) == READ
)
728 device
->profile
.data
->dasd_read_nr_req
[counter
]++;
730 spin_unlock(&device
->profile
.lock
);
734 * Add profiling information for cqr after execution.
737 #define dasd_profile_counter(value, index) \
739 for (index = 0; index < 31 && value >> (2+index); index++) \
743 static void dasd_profile_end_add_data(struct dasd_profile_info
*data
,
756 /* in case of an overflow, reset the whole profile */
757 if (data
->dasd_io_reqs
== UINT_MAX
) {
758 memset(data
, 0, sizeof(*data
));
759 ktime_get_real_ts64(&data
->starttod
);
761 data
->dasd_io_reqs
++;
762 data
->dasd_io_sects
+= sectors
;
764 data
->dasd_io_alias
++;
768 data
->dasd_io_secs
[sectors_ind
]++;
769 data
->dasd_io_times
[tottime_ind
]++;
770 data
->dasd_io_timps
[tottimeps_ind
]++;
771 data
->dasd_io_time1
[strtime_ind
]++;
772 data
->dasd_io_time2
[irqtime_ind
]++;
773 data
->dasd_io_time2ps
[irqtimeps_ind
]++;
774 data
->dasd_io_time3
[endtime_ind
]++;
777 data
->dasd_read_reqs
++;
778 data
->dasd_read_sects
+= sectors
;
780 data
->dasd_read_alias
++;
782 data
->dasd_read_tpm
++;
783 data
->dasd_read_secs
[sectors_ind
]++;
784 data
->dasd_read_times
[tottime_ind
]++;
785 data
->dasd_read_time1
[strtime_ind
]++;
786 data
->dasd_read_time2
[irqtime_ind
]++;
787 data
->dasd_read_time3
[endtime_ind
]++;
791 static void dasd_profile_end(struct dasd_block
*block
,
792 struct dasd_ccw_req
*cqr
,
795 unsigned long strtime
, irqtime
, endtime
, tottime
;
796 unsigned long tottimeps
, sectors
;
797 struct dasd_device
*device
;
798 int sectors_ind
, tottime_ind
, tottimeps_ind
, strtime_ind
;
799 int irqtime_ind
, irqtimeps_ind
, endtime_ind
;
800 struct dasd_profile_info
*data
;
802 device
= cqr
->startdev
;
803 if (!(dasd_global_profile_level
||
804 block
->profile
.data
||
805 device
->profile
.data
))
808 sectors
= blk_rq_sectors(req
);
809 if (!cqr
->buildclk
|| !cqr
->startclk
||
810 !cqr
->stopclk
|| !cqr
->endclk
||
814 strtime
= ((cqr
->startclk
- cqr
->buildclk
) >> 12);
815 irqtime
= ((cqr
->stopclk
- cqr
->startclk
) >> 12);
816 endtime
= ((cqr
->endclk
- cqr
->stopclk
) >> 12);
817 tottime
= ((cqr
->endclk
- cqr
->buildclk
) >> 12);
818 tottimeps
= tottime
/ sectors
;
820 dasd_profile_counter(sectors
, sectors_ind
);
821 dasd_profile_counter(tottime
, tottime_ind
);
822 dasd_profile_counter(tottimeps
, tottimeps_ind
);
823 dasd_profile_counter(strtime
, strtime_ind
);
824 dasd_profile_counter(irqtime
, irqtime_ind
);
825 dasd_profile_counter(irqtime
/ sectors
, irqtimeps_ind
);
826 dasd_profile_counter(endtime
, endtime_ind
);
828 spin_lock(&dasd_global_profile
.lock
);
829 if (dasd_global_profile
.data
) {
830 data
= dasd_global_profile
.data
;
831 data
->dasd_sum_times
+= tottime
;
832 data
->dasd_sum_time_str
+= strtime
;
833 data
->dasd_sum_time_irq
+= irqtime
;
834 data
->dasd_sum_time_end
+= endtime
;
835 dasd_profile_end_add_data(dasd_global_profile
.data
,
836 cqr
->startdev
!= block
->base
,
838 rq_data_dir(req
) == READ
,
839 sectors
, sectors_ind
, tottime_ind
,
840 tottimeps_ind
, strtime_ind
,
841 irqtime_ind
, irqtimeps_ind
,
844 spin_unlock(&dasd_global_profile
.lock
);
846 spin_lock(&block
->profile
.lock
);
847 if (block
->profile
.data
) {
848 data
= block
->profile
.data
;
849 data
->dasd_sum_times
+= tottime
;
850 data
->dasd_sum_time_str
+= strtime
;
851 data
->dasd_sum_time_irq
+= irqtime
;
852 data
->dasd_sum_time_end
+= endtime
;
853 dasd_profile_end_add_data(block
->profile
.data
,
854 cqr
->startdev
!= block
->base
,
856 rq_data_dir(req
) == READ
,
857 sectors
, sectors_ind
, tottime_ind
,
858 tottimeps_ind
, strtime_ind
,
859 irqtime_ind
, irqtimeps_ind
,
862 spin_unlock(&block
->profile
.lock
);
864 spin_lock(&device
->profile
.lock
);
865 if (device
->profile
.data
) {
866 data
= device
->profile
.data
;
867 data
->dasd_sum_times
+= tottime
;
868 data
->dasd_sum_time_str
+= strtime
;
869 data
->dasd_sum_time_irq
+= irqtime
;
870 data
->dasd_sum_time_end
+= endtime
;
871 dasd_profile_end_add_data(device
->profile
.data
,
872 cqr
->startdev
!= block
->base
,
874 rq_data_dir(req
) == READ
,
875 sectors
, sectors_ind
, tottime_ind
,
876 tottimeps_ind
, strtime_ind
,
877 irqtime_ind
, irqtimeps_ind
,
880 spin_unlock(&device
->profile
.lock
);
883 void dasd_profile_reset(struct dasd_profile
*profile
)
885 struct dasd_profile_info
*data
;
887 spin_lock_bh(&profile
->lock
);
888 data
= profile
->data
;
890 spin_unlock_bh(&profile
->lock
);
893 memset(data
, 0, sizeof(*data
));
894 ktime_get_real_ts64(&data
->starttod
);
895 spin_unlock_bh(&profile
->lock
);
898 int dasd_profile_on(struct dasd_profile
*profile
)
900 struct dasd_profile_info
*data
;
902 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
905 spin_lock_bh(&profile
->lock
);
907 spin_unlock_bh(&profile
->lock
);
911 ktime_get_real_ts64(&data
->starttod
);
912 profile
->data
= data
;
913 spin_unlock_bh(&profile
->lock
);
917 void dasd_profile_off(struct dasd_profile
*profile
)
919 spin_lock_bh(&profile
->lock
);
920 kfree(profile
->data
);
921 profile
->data
= NULL
;
922 spin_unlock_bh(&profile
->lock
);
925 char *dasd_get_user_string(const char __user
*user_buf
, size_t user_len
)
929 buffer
= vmalloc(user_len
+ 1);
931 return ERR_PTR(-ENOMEM
);
932 if (copy_from_user(buffer
, user_buf
, user_len
) != 0) {
934 return ERR_PTR(-EFAULT
);
936 /* got the string, now strip linefeed. */
937 if (buffer
[user_len
- 1] == '\n')
938 buffer
[user_len
- 1] = 0;
940 buffer
[user_len
] = 0;
944 static ssize_t
dasd_stats_write(struct file
*file
,
945 const char __user
*user_buf
,
946 size_t user_len
, loff_t
*pos
)
950 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
951 struct dasd_profile
*prof
= m
->private;
953 if (user_len
> 65536)
955 buffer
= dasd_get_user_string(user_buf
, user_len
);
957 return PTR_ERR(buffer
);
959 str
= skip_spaces(buffer
);
961 if (strncmp(str
, "reset", 5) == 0) {
962 dasd_profile_reset(prof
);
963 } else if (strncmp(str
, "on", 2) == 0) {
964 rc
= dasd_profile_on(prof
);
968 if (prof
== &dasd_global_profile
) {
969 dasd_profile_reset(prof
);
970 dasd_global_profile_level
= DASD_PROFILE_GLOBAL_ONLY
;
972 } else if (strncmp(str
, "off", 3) == 0) {
973 if (prof
== &dasd_global_profile
)
974 dasd_global_profile_level
= DASD_PROFILE_OFF
;
975 dasd_profile_off(prof
);
983 static void dasd_stats_array(struct seq_file
*m
, unsigned int *array
)
987 for (i
= 0; i
< 32; i
++)
988 seq_printf(m
, "%u ", array
[i
]);
992 static void dasd_stats_seq_print(struct seq_file
*m
,
993 struct dasd_profile_info
*data
)
995 seq_printf(m
, "start_time %lld.%09ld\n",
996 (s64
)data
->starttod
.tv_sec
, data
->starttod
.tv_nsec
);
997 seq_printf(m
, "total_requests %u\n", data
->dasd_io_reqs
);
998 seq_printf(m
, "total_sectors %u\n", data
->dasd_io_sects
);
999 seq_printf(m
, "total_pav %u\n", data
->dasd_io_alias
);
1000 seq_printf(m
, "total_hpf %u\n", data
->dasd_io_tpm
);
1001 seq_printf(m
, "avg_total %lu\n", data
->dasd_io_reqs
?
1002 data
->dasd_sum_times
/ data
->dasd_io_reqs
: 0UL);
1003 seq_printf(m
, "avg_build_to_ssch %lu\n", data
->dasd_io_reqs
?
1004 data
->dasd_sum_time_str
/ data
->dasd_io_reqs
: 0UL);
1005 seq_printf(m
, "avg_ssch_to_irq %lu\n", data
->dasd_io_reqs
?
1006 data
->dasd_sum_time_irq
/ data
->dasd_io_reqs
: 0UL);
1007 seq_printf(m
, "avg_irq_to_end %lu\n", data
->dasd_io_reqs
?
1008 data
->dasd_sum_time_end
/ data
->dasd_io_reqs
: 0UL);
1009 seq_puts(m
, "histogram_sectors ");
1010 dasd_stats_array(m
, data
->dasd_io_secs
);
1011 seq_puts(m
, "histogram_io_times ");
1012 dasd_stats_array(m
, data
->dasd_io_times
);
1013 seq_puts(m
, "histogram_io_times_weighted ");
1014 dasd_stats_array(m
, data
->dasd_io_timps
);
1015 seq_puts(m
, "histogram_time_build_to_ssch ");
1016 dasd_stats_array(m
, data
->dasd_io_time1
);
1017 seq_puts(m
, "histogram_time_ssch_to_irq ");
1018 dasd_stats_array(m
, data
->dasd_io_time2
);
1019 seq_puts(m
, "histogram_time_ssch_to_irq_weighted ");
1020 dasd_stats_array(m
, data
->dasd_io_time2ps
);
1021 seq_puts(m
, "histogram_time_irq_to_end ");
1022 dasd_stats_array(m
, data
->dasd_io_time3
);
1023 seq_puts(m
, "histogram_ccw_queue_length ");
1024 dasd_stats_array(m
, data
->dasd_io_nr_req
);
1025 seq_printf(m
, "total_read_requests %u\n", data
->dasd_read_reqs
);
1026 seq_printf(m
, "total_read_sectors %u\n", data
->dasd_read_sects
);
1027 seq_printf(m
, "total_read_pav %u\n", data
->dasd_read_alias
);
1028 seq_printf(m
, "total_read_hpf %u\n", data
->dasd_read_tpm
);
1029 seq_puts(m
, "histogram_read_sectors ");
1030 dasd_stats_array(m
, data
->dasd_read_secs
);
1031 seq_puts(m
, "histogram_read_times ");
1032 dasd_stats_array(m
, data
->dasd_read_times
);
1033 seq_puts(m
, "histogram_read_time_build_to_ssch ");
1034 dasd_stats_array(m
, data
->dasd_read_time1
);
1035 seq_puts(m
, "histogram_read_time_ssch_to_irq ");
1036 dasd_stats_array(m
, data
->dasd_read_time2
);
1037 seq_puts(m
, "histogram_read_time_irq_to_end ");
1038 dasd_stats_array(m
, data
->dasd_read_time3
);
1039 seq_puts(m
, "histogram_read_ccw_queue_length ");
1040 dasd_stats_array(m
, data
->dasd_read_nr_req
);
1043 static int dasd_stats_show(struct seq_file
*m
, void *v
)
1045 struct dasd_profile
*profile
;
1046 struct dasd_profile_info
*data
;
1048 profile
= m
->private;
1049 spin_lock_bh(&profile
->lock
);
1050 data
= profile
->data
;
1052 spin_unlock_bh(&profile
->lock
);
1053 seq_puts(m
, "disabled\n");
1056 dasd_stats_seq_print(m
, data
);
1057 spin_unlock_bh(&profile
->lock
);
1061 static int dasd_stats_open(struct inode
*inode
, struct file
*file
)
1063 struct dasd_profile
*profile
= inode
->i_private
;
1064 return single_open(file
, dasd_stats_show
, profile
);
1067 static const struct file_operations dasd_stats_raw_fops
= {
1068 .owner
= THIS_MODULE
,
1069 .open
= dasd_stats_open
,
1071 .llseek
= seq_lseek
,
1072 .release
= single_release
,
1073 .write
= dasd_stats_write
,
1076 static void dasd_profile_init(struct dasd_profile
*profile
,
1077 struct dentry
*base_dentry
)
1084 profile
->dentry
= NULL
;
1085 profile
->data
= NULL
;
1086 mode
= (S_IRUSR
| S_IWUSR
| S_IFREG
);
1087 pde
= debugfs_create_file("statistics", mode
, base_dentry
,
1088 profile
, &dasd_stats_raw_fops
);
1089 if (pde
&& !IS_ERR(pde
))
1090 profile
->dentry
= pde
;
1094 static void dasd_profile_exit(struct dasd_profile
*profile
)
1096 dasd_profile_off(profile
);
1097 debugfs_remove(profile
->dentry
);
1098 profile
->dentry
= NULL
;
1101 static void dasd_statistics_removeroot(void)
1103 dasd_global_profile_level
= DASD_PROFILE_OFF
;
1104 dasd_profile_exit(&dasd_global_profile
);
1105 debugfs_remove(dasd_debugfs_global_entry
);
1106 debugfs_remove(dasd_debugfs_root_entry
);
1109 static void dasd_statistics_createroot(void)
1113 dasd_debugfs_root_entry
= NULL
;
1114 pde
= debugfs_create_dir("dasd", NULL
);
1115 if (!pde
|| IS_ERR(pde
))
1117 dasd_debugfs_root_entry
= pde
;
1118 pde
= debugfs_create_dir("global", dasd_debugfs_root_entry
);
1119 if (!pde
|| IS_ERR(pde
))
1121 dasd_debugfs_global_entry
= pde
;
1122 dasd_profile_init(&dasd_global_profile
, dasd_debugfs_global_entry
);
1126 DBF_EVENT(DBF_ERR
, "%s",
1127 "Creation of the dasd debugfs interface failed");
1128 dasd_statistics_removeroot();
1133 #define dasd_profile_start(block, cqr, req) do {} while (0)
1134 #define dasd_profile_end(block, cqr, req) do {} while (0)
1136 static void dasd_statistics_createroot(void)
1141 static void dasd_statistics_removeroot(void)
1146 int dasd_stats_generic_show(struct seq_file
*m
, void *v
)
1148 seq_puts(m
, "Statistics are not activated in this kernel\n");
1152 static void dasd_profile_init(struct dasd_profile
*profile
,
1153 struct dentry
*base_dentry
)
1158 static void dasd_profile_exit(struct dasd_profile
*profile
)
1163 int dasd_profile_on(struct dasd_profile
*profile
)
1168 #endif /* CONFIG_DASD_PROFILE */
1170 static int dasd_hosts_show(struct seq_file
*m
, void *v
)
1172 struct dasd_device
*device
;
1173 int rc
= -EOPNOTSUPP
;
1175 device
= m
->private;
1176 dasd_get_device(device
);
1178 if (device
->discipline
->hosts_print
)
1179 rc
= device
->discipline
->hosts_print(device
, m
);
1181 dasd_put_device(device
);
1185 DEFINE_SHOW_ATTRIBUTE(dasd_hosts
);
1187 static void dasd_hosts_exit(struct dasd_device
*device
)
1189 debugfs_remove(device
->hosts_dentry
);
1190 device
->hosts_dentry
= NULL
;
1193 static void dasd_hosts_init(struct dentry
*base_dentry
,
1194 struct dasd_device
*device
)
1202 mode
= S_IRUSR
| S_IFREG
;
1203 pde
= debugfs_create_file("host_access_list", mode
, base_dentry
,
1204 device
, &dasd_hosts_fops
);
1205 if (pde
&& !IS_ERR(pde
))
1206 device
->hosts_dentry
= pde
;
1209 struct dasd_ccw_req
*dasd_smalloc_request(int magic
, int cplength
, int datasize
,
1210 struct dasd_device
*device
,
1211 struct dasd_ccw_req
*cqr
)
1213 unsigned long flags
;
1218 size
+= cplength
* sizeof(struct ccw1
);
1222 size
+= (sizeof(*cqr
) + 7L) & -8L;
1224 spin_lock_irqsave(&device
->mem_lock
, flags
);
1225 data
= chunk
= dasd_alloc_chunk(&device
->ccw_chunks
, size
);
1226 spin_unlock_irqrestore(&device
->mem_lock
, flags
);
1228 return ERR_PTR(-ENOMEM
);
1230 cqr
= (void *) data
;
1231 data
+= (sizeof(*cqr
) + 7L) & -8L;
1233 memset(cqr
, 0, sizeof(*cqr
));
1234 cqr
->mem_chunk
= chunk
;
1237 data
+= cplength
* sizeof(struct ccw1
);
1238 memset(cqr
->cpaddr
, 0, cplength
* sizeof(struct ccw1
));
1242 memset(cqr
->data
, 0, datasize
);
1245 set_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
1246 dasd_get_device(device
);
1249 EXPORT_SYMBOL(dasd_smalloc_request
);
1251 struct dasd_ccw_req
*dasd_fmalloc_request(int magic
, int cplength
,
1253 struct dasd_device
*device
)
1255 struct dasd_ccw_req
*cqr
;
1256 unsigned long flags
;
1260 cqr_size
= (sizeof(*cqr
) + 7L) & -8L;
1263 size
+= cplength
* sizeof(struct ccw1
);
1267 spin_lock_irqsave(&device
->mem_lock
, flags
);
1268 cqr
= dasd_alloc_chunk(&device
->ese_chunks
, size
);
1269 spin_unlock_irqrestore(&device
->mem_lock
, flags
);
1271 return ERR_PTR(-ENOMEM
);
1272 memset(cqr
, 0, sizeof(*cqr
));
1273 data
= (char *)cqr
+ cqr_size
;
1277 data
+= cplength
* sizeof(struct ccw1
);
1278 memset(cqr
->cpaddr
, 0, cplength
* sizeof(struct ccw1
));
1283 memset(cqr
->data
, 0, datasize
);
1287 set_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
1288 dasd_get_device(device
);
1292 EXPORT_SYMBOL(dasd_fmalloc_request
);
1294 void dasd_sfree_request(struct dasd_ccw_req
*cqr
, struct dasd_device
*device
)
1296 unsigned long flags
;
1298 spin_lock_irqsave(&device
->mem_lock
, flags
);
1299 dasd_free_chunk(&device
->ccw_chunks
, cqr
->mem_chunk
);
1300 spin_unlock_irqrestore(&device
->mem_lock
, flags
);
1301 dasd_put_device(device
);
1303 EXPORT_SYMBOL(dasd_sfree_request
);
1305 void dasd_ffree_request(struct dasd_ccw_req
*cqr
, struct dasd_device
*device
)
1307 unsigned long flags
;
1309 spin_lock_irqsave(&device
->mem_lock
, flags
);
1310 dasd_free_chunk(&device
->ese_chunks
, cqr
);
1311 spin_unlock_irqrestore(&device
->mem_lock
, flags
);
1312 dasd_put_device(device
);
1314 EXPORT_SYMBOL(dasd_ffree_request
);
1317 * Check discipline magic in cqr.
1319 static inline int dasd_check_cqr(struct dasd_ccw_req
*cqr
)
1321 struct dasd_device
*device
;
1325 device
= cqr
->startdev
;
1326 if (strncmp((char *) &cqr
->magic
, device
->discipline
->ebcname
, 4)) {
1327 DBF_DEV_EVENT(DBF_WARNING
, device
,
1328 " dasd_ccw_req 0x%08x magic doesn't match"
1329 " discipline 0x%08x",
1331 *(unsigned int *) device
->discipline
->name
);
1338 * Terminate the current i/o and set the request to clear_pending.
1339 * Timer keeps device runnig.
1340 * ccw_device_clear can fail if the i/o subsystem
1343 int dasd_term_IO(struct dasd_ccw_req
*cqr
)
1345 struct dasd_device
*device
;
1347 char errorstring
[ERRORLENGTH
];
1350 rc
= dasd_check_cqr(cqr
);
1354 device
= (struct dasd_device
*) cqr
->startdev
;
1355 while ((retries
< 5) && (cqr
->status
== DASD_CQR_IN_IO
)) {
1356 rc
= ccw_device_clear(device
->cdev
, (long) cqr
);
1358 case 0: /* termination successful */
1359 cqr
->status
= DASD_CQR_CLEAR_PENDING
;
1360 cqr
->stopclk
= get_tod_clock();
1362 DBF_DEV_EVENT(DBF_DEBUG
, device
,
1363 "terminate cqr %p successful",
1367 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1368 "device gone, retry");
1372 * device not valid so no I/O could be running
1373 * handle CQR as termination successful
1375 cqr
->status
= DASD_CQR_CLEARED
;
1376 cqr
->stopclk
= get_tod_clock();
1378 /* no retries for invalid devices */
1380 DBF_DEV_EVENT(DBF_ERR
, device
, "%s",
1381 "EINVAL, handle as terminated");
1382 /* fake rc to success */
1386 /* internal error 10 - unknown rc*/
1387 snprintf(errorstring
, ERRORLENGTH
, "10 %d", rc
);
1388 dev_err(&device
->cdev
->dev
, "An error occurred in the "
1389 "DASD device driver, reason=%s\n", errorstring
);
1395 dasd_schedule_device_bh(device
);
1398 EXPORT_SYMBOL(dasd_term_IO
);
1401 * Start the i/o. This start_IO can fail if the channel is really busy.
1402 * In that case set up a timer to start the request later.
1404 int dasd_start_IO(struct dasd_ccw_req
*cqr
)
1406 struct dasd_device
*device
;
1408 char errorstring
[ERRORLENGTH
];
1411 rc
= dasd_check_cqr(cqr
);
1416 device
= (struct dasd_device
*) cqr
->startdev
;
1418 test_bit(DASD_FLAG_LOCK_STOLEN
, &cqr
->block
->base
->flags
)) ||
1419 test_bit(DASD_FLAG_LOCK_STOLEN
, &device
->flags
)) &&
1420 !test_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
)) {
1421 DBF_DEV_EVENT(DBF_DEBUG
, device
, "start_IO: return request %p "
1422 "because of stolen lock", cqr
);
1423 cqr
->status
= DASD_CQR_ERROR
;
1424 cqr
->intrc
= -EPERM
;
1427 if (cqr
->retries
< 0) {
1428 /* internal error 14 - start_IO run out of retries */
1429 sprintf(errorstring
, "14 %p", cqr
);
1430 dev_err(&device
->cdev
->dev
, "An error occurred in the DASD "
1431 "device driver, reason=%s\n", errorstring
);
1432 cqr
->status
= DASD_CQR_ERROR
;
1435 cqr
->startclk
= get_tod_clock();
1436 cqr
->starttime
= jiffies
;
1438 if (!test_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
)) {
1439 cqr
->lpm
&= dasd_path_get_opm(device
);
1441 cqr
->lpm
= dasd_path_get_opm(device
);
1443 if (cqr
->cpmode
== 1) {
1444 rc
= ccw_device_tm_start(device
->cdev
, cqr
->cpaddr
,
1445 (long) cqr
, cqr
->lpm
);
1447 rc
= ccw_device_start(device
->cdev
, cqr
->cpaddr
,
1448 (long) cqr
, cqr
->lpm
, 0);
1452 cqr
->status
= DASD_CQR_IN_IO
;
1455 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1456 "start_IO: device busy, retry later");
1459 /* -EACCES indicates that the request used only a subset of the
1460 * available paths and all these paths are gone. If the lpm of
1461 * this request was only a subset of the opm (e.g. the ppm) then
1462 * we just do a retry with all available paths.
1463 * If we already use the full opm, something is amiss, and we
1464 * need a full path verification.
1466 if (test_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
)) {
1467 DBF_DEV_EVENT(DBF_WARNING
, device
,
1468 "start_IO: selected paths gone (%x)",
1470 } else if (cqr
->lpm
!= dasd_path_get_opm(device
)) {
1471 cqr
->lpm
= dasd_path_get_opm(device
);
1472 DBF_DEV_EVENT(DBF_DEBUG
, device
, "%s",
1473 "start_IO: selected paths gone,"
1474 " retry on all paths");
1476 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1477 "start_IO: all paths in opm gone,"
1478 " do path verification");
1479 dasd_generic_last_path_gone(device
);
1480 dasd_path_no_path(device
);
1481 dasd_path_set_tbvpm(device
,
1482 ccw_device_get_path_mask(
1487 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1488 "start_IO: -ENODEV device gone, retry");
1491 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1492 "start_IO: -EIO device gone, retry");
1495 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
1496 "start_IO: -EINVAL device currently "
1500 /* internal error 11 - unknown rc */
1501 snprintf(errorstring
, ERRORLENGTH
, "11 %d", rc
);
1502 dev_err(&device
->cdev
->dev
,
1503 "An error occurred in the DASD device driver, "
1504 "reason=%s\n", errorstring
);
1511 EXPORT_SYMBOL(dasd_start_IO
);
1514 * Timeout function for dasd devices. This is used for different purposes
1515 * 1) missing interrupt handler for normal operation
1516 * 2) delayed start of request where start_IO failed with -EBUSY
1517 * 3) timeout for missing state change interrupts
1518 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1519 * DASD_CQR_QUEUED for 2) and 3).
1521 static void dasd_device_timeout(struct timer_list
*t
)
1523 unsigned long flags
;
1524 struct dasd_device
*device
;
1526 device
= from_timer(device
, t
, timer
);
1527 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
1528 /* re-activate request queue */
1529 dasd_device_remove_stop_bits(device
, DASD_STOPPED_PENDING
);
1530 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
1531 dasd_schedule_device_bh(device
);
1535 * Setup timeout for a device in jiffies.
1537 void dasd_device_set_timer(struct dasd_device
*device
, int expires
)
1540 del_timer(&device
->timer
);
1542 mod_timer(&device
->timer
, jiffies
+ expires
);
1544 EXPORT_SYMBOL(dasd_device_set_timer
);
1547 * Clear timeout for a device.
1549 void dasd_device_clear_timer(struct dasd_device
*device
)
1551 del_timer(&device
->timer
);
1553 EXPORT_SYMBOL(dasd_device_clear_timer
);
1555 static void dasd_handle_killed_request(struct ccw_device
*cdev
,
1556 unsigned long intparm
)
1558 struct dasd_ccw_req
*cqr
;
1559 struct dasd_device
*device
;
1563 cqr
= (struct dasd_ccw_req
*) intparm
;
1564 if (cqr
->status
!= DASD_CQR_IN_IO
) {
1565 DBF_EVENT_DEVID(DBF_DEBUG
, cdev
,
1566 "invalid status in handle_killed_request: "
1567 "%02x", cqr
->status
);
1571 device
= dasd_device_from_cdev_locked(cdev
);
1572 if (IS_ERR(device
)) {
1573 DBF_EVENT_DEVID(DBF_DEBUG
, cdev
, "%s",
1574 "unable to get device from cdev");
1578 if (!cqr
->startdev
||
1579 device
!= cqr
->startdev
||
1580 strncmp(cqr
->startdev
->discipline
->ebcname
,
1581 (char *) &cqr
->magic
, 4)) {
1582 DBF_EVENT_DEVID(DBF_DEBUG
, cdev
, "%s",
1583 "invalid device in request");
1584 dasd_put_device(device
);
1588 /* Schedule request to be retried. */
1589 cqr
->status
= DASD_CQR_QUEUED
;
1591 dasd_device_clear_timer(device
);
1592 dasd_schedule_device_bh(device
);
1593 dasd_put_device(device
);
1596 void dasd_generic_handle_state_change(struct dasd_device
*device
)
1598 /* First of all start sense subsystem status request. */
1599 dasd_eer_snss(device
);
1601 dasd_device_remove_stop_bits(device
, DASD_STOPPED_PENDING
);
1602 dasd_schedule_device_bh(device
);
1603 if (device
->block
) {
1604 dasd_schedule_block_bh(device
->block
);
1605 if (device
->block
->request_queue
)
1606 blk_mq_run_hw_queues(device
->block
->request_queue
,
1610 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change
);
1612 static int dasd_check_hpf_error(struct irb
*irb
)
1614 return (scsw_tm_is_valid_schxs(&irb
->scsw
) &&
1615 (irb
->scsw
.tm
.sesq
== SCSW_SESQ_DEV_NOFCX
||
1616 irb
->scsw
.tm
.sesq
== SCSW_SESQ_PATH_NOFCX
));
1619 static int dasd_ese_needs_format(struct dasd_block
*block
, struct irb
*irb
)
1621 struct dasd_device
*device
= NULL
;
1626 device
= block
->base
;
1627 if (!device
|| !device
->discipline
->is_ese
)
1629 if (!device
->discipline
->is_ese(device
))
1632 sense
= dasd_get_sense(irb
);
1636 return !!(sense
[1] & SNS1_NO_REC_FOUND
) ||
1637 !!(sense
[1] & SNS1_FILE_PROTECTED
) ||
1638 scsw_cstat(&irb
->scsw
) == SCHN_STAT_INCORR_LEN
;
1641 static int dasd_ese_oos_cond(u8
*sense
)
1643 return sense
[0] & SNS0_EQUIPMENT_CHECK
&&
1644 sense
[1] & SNS1_PERM_ERR
&&
1645 sense
[1] & SNS1_WRITE_INHIBITED
&&
1650 * Interrupt handler for "normal" ssch-io based dasd devices.
1652 void dasd_int_handler(struct ccw_device
*cdev
, unsigned long intparm
,
1655 struct dasd_ccw_req
*cqr
, *next
, *fcqr
;
1656 struct dasd_device
*device
;
1658 int nrf_suppressed
= 0;
1659 int fp_suppressed
= 0;
1663 cqr
= (struct dasd_ccw_req
*) intparm
;
1665 switch (PTR_ERR(irb
)) {
1667 if (cqr
&& cqr
->status
== DASD_CQR_CLEAR_PENDING
) {
1668 device
= cqr
->startdev
;
1669 cqr
->status
= DASD_CQR_CLEARED
;
1670 dasd_device_clear_timer(device
);
1671 wake_up(&dasd_flush_wq
);
1672 dasd_schedule_device_bh(device
);
1677 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s: "
1678 "request timed out\n", __func__
);
1681 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s: "
1682 "unknown error %ld\n", __func__
,
1685 dasd_handle_killed_request(cdev
, intparm
);
1689 now
= get_tod_clock();
1690 /* check for conditions that should be handled immediately */
1692 !(scsw_dstat(&irb
->scsw
) == (DEV_STAT_CHN_END
| DEV_STAT_DEV_END
) &&
1693 scsw_cstat(&irb
->scsw
) == 0)) {
1695 memcpy(&cqr
->irb
, irb
, sizeof(*irb
));
1696 device
= dasd_device_from_cdev_locked(cdev
);
1699 /* ignore unsolicited interrupts for DIAG discipline */
1700 if (device
->discipline
== dasd_diag_discipline_pointer
) {
1701 dasd_put_device(device
);
1706 * In some cases 'File Protected' or 'No Record Found' errors
1707 * might be expected and debug log messages for the
1708 * corresponding interrupts shouldn't be written then.
1709 * Check if either of the according suppress bits is set.
1711 sense
= dasd_get_sense(irb
);
1713 fp_suppressed
= (sense
[1] & SNS1_FILE_PROTECTED
) &&
1714 test_bit(DASD_CQR_SUPPRESS_FP
, &cqr
->flags
);
1715 nrf_suppressed
= (sense
[1] & SNS1_NO_REC_FOUND
) &&
1716 test_bit(DASD_CQR_SUPPRESS_NRF
, &cqr
->flags
);
1719 * Extent pool probably out-of-space.
1720 * Stop device and check exhaust level.
1722 if (dasd_ese_oos_cond(sense
)) {
1723 dasd_generic_space_exhaust(device
, cqr
);
1724 device
->discipline
->ext_pool_exhaust(device
, cqr
);
1725 dasd_put_device(device
);
1729 if (!(fp_suppressed
|| nrf_suppressed
))
1730 device
->discipline
->dump_sense_dbf(device
, irb
, "int");
1732 if (device
->features
& DASD_FEATURE_ERPLOG
)
1733 device
->discipline
->dump_sense(device
, cqr
, irb
);
1734 device
->discipline
->check_for_device_change(device
, cqr
, irb
);
1735 dasd_put_device(device
);
1738 /* check for for attention message */
1739 if (scsw_dstat(&irb
->scsw
) & DEV_STAT_ATTENTION
) {
1740 device
= dasd_device_from_cdev_locked(cdev
);
1741 if (!IS_ERR(device
)) {
1742 device
->discipline
->check_attention(device
,
1743 irb
->esw
.esw1
.lpum
);
1744 dasd_put_device(device
);
1751 device
= (struct dasd_device
*) cqr
->startdev
;
1753 strncmp(device
->discipline
->ebcname
, (char *) &cqr
->magic
, 4)) {
1754 DBF_EVENT_DEVID(DBF_DEBUG
, cdev
, "%s",
1755 "invalid device in request");
1759 if (dasd_ese_needs_format(cqr
->block
, irb
)) {
1760 if (rq_data_dir((struct request
*)cqr
->callback_data
) == READ
) {
1761 device
->discipline
->ese_read(cqr
, irb
);
1762 cqr
->status
= DASD_CQR_SUCCESS
;
1764 dasd_device_clear_timer(device
);
1765 dasd_schedule_device_bh(device
);
1768 fcqr
= device
->discipline
->ese_format(device
, cqr
, irb
);
1770 if (PTR_ERR(fcqr
) == -EINVAL
) {
1771 cqr
->status
= DASD_CQR_ERROR
;
1775 * If we can't format now, let the request go
1776 * one extra round. Maybe we can format later.
1778 cqr
->status
= DASD_CQR_QUEUED
;
1779 dasd_schedule_device_bh(device
);
1782 fcqr
->status
= DASD_CQR_QUEUED
;
1783 cqr
->status
= DASD_CQR_QUEUED
;
1784 list_add(&fcqr
->devlist
, &device
->ccw_queue
);
1785 dasd_schedule_device_bh(device
);
1790 /* Check for clear pending */
1791 if (cqr
->status
== DASD_CQR_CLEAR_PENDING
&&
1792 scsw_fctl(&irb
->scsw
) & SCSW_FCTL_CLEAR_FUNC
) {
1793 cqr
->status
= DASD_CQR_CLEARED
;
1794 dasd_device_clear_timer(device
);
1795 wake_up(&dasd_flush_wq
);
1796 dasd_schedule_device_bh(device
);
1800 /* check status - the request might have been killed by dyn detach */
1801 if (cqr
->status
!= DASD_CQR_IN_IO
) {
1802 DBF_DEV_EVENT(DBF_DEBUG
, device
, "invalid status: bus_id %s, "
1803 "status %02x", dev_name(&cdev
->dev
), cqr
->status
);
1809 if (scsw_dstat(&irb
->scsw
) == (DEV_STAT_CHN_END
| DEV_STAT_DEV_END
) &&
1810 scsw_cstat(&irb
->scsw
) == 0) {
1811 /* request was completed successfully */
1812 cqr
->status
= DASD_CQR_SUCCESS
;
1814 /* Start first request on queue if possible -> fast_io. */
1815 if (cqr
->devlist
.next
!= &device
->ccw_queue
) {
1816 next
= list_entry(cqr
->devlist
.next
,
1817 struct dasd_ccw_req
, devlist
);
1819 } else { /* error */
1820 /* check for HPF error
1821 * call discipline function to requeue all requests
1822 * and disable HPF accordingly
1824 if (cqr
->cpmode
&& dasd_check_hpf_error(irb
) &&
1825 device
->discipline
->handle_hpf_error
)
1826 device
->discipline
->handle_hpf_error(device
, irb
);
1828 * If we don't want complex ERP for this request, then just
1829 * reset this and retry it in the fastpath
1831 if (!test_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
) &&
1833 if (cqr
->lpm
== dasd_path_get_opm(device
))
1834 DBF_DEV_EVENT(DBF_DEBUG
, device
,
1835 "default ERP in fastpath "
1836 "(%i retries left)",
1838 if (!test_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
))
1839 cqr
->lpm
= dasd_path_get_opm(device
);
1840 cqr
->status
= DASD_CQR_QUEUED
;
1843 cqr
->status
= DASD_CQR_ERROR
;
1845 if (next
&& (next
->status
== DASD_CQR_QUEUED
) &&
1846 (!device
->stopped
)) {
1847 if (device
->discipline
->start_IO(next
) == 0)
1848 expires
= next
->expires
;
1851 dasd_device_set_timer(device
, expires
);
1853 dasd_device_clear_timer(device
);
1854 dasd_schedule_device_bh(device
);
1856 EXPORT_SYMBOL(dasd_int_handler
);
1858 enum uc_todo
dasd_generic_uc_handler(struct ccw_device
*cdev
, struct irb
*irb
)
1860 struct dasd_device
*device
;
1862 device
= dasd_device_from_cdev_locked(cdev
);
1866 if (test_bit(DASD_FLAG_OFFLINE
, &device
->flags
) ||
1867 device
->state
!= device
->target
||
1868 !device
->discipline
->check_for_device_change
){
1869 dasd_put_device(device
);
1872 if (device
->discipline
->dump_sense_dbf
)
1873 device
->discipline
->dump_sense_dbf(device
, irb
, "uc");
1874 device
->discipline
->check_for_device_change(device
, NULL
, irb
);
1875 dasd_put_device(device
);
1877 return UC_TODO_RETRY
;
1879 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler
);
1882 * If we have an error on a dasd_block layer request then we cancel
1883 * and return all further requests from the same dasd_block as well.
1885 static void __dasd_device_recovery(struct dasd_device
*device
,
1886 struct dasd_ccw_req
*ref_cqr
)
1888 struct list_head
*l
, *n
;
1889 struct dasd_ccw_req
*cqr
;
1892 * only requeue request that came from the dasd_block layer
1894 if (!ref_cqr
->block
)
1897 list_for_each_safe(l
, n
, &device
->ccw_queue
) {
1898 cqr
= list_entry(l
, struct dasd_ccw_req
, devlist
);
1899 if (cqr
->status
== DASD_CQR_QUEUED
&&
1900 ref_cqr
->block
== cqr
->block
) {
1901 cqr
->status
= DASD_CQR_CLEARED
;
1907 * Remove those ccw requests from the queue that need to be returned
1908 * to the upper layer.
1910 static void __dasd_device_process_ccw_queue(struct dasd_device
*device
,
1911 struct list_head
*final_queue
)
1913 struct list_head
*l
, *n
;
1914 struct dasd_ccw_req
*cqr
;
1916 /* Process request with final status. */
1917 list_for_each_safe(l
, n
, &device
->ccw_queue
) {
1918 cqr
= list_entry(l
, struct dasd_ccw_req
, devlist
);
1920 /* Skip any non-final request. */
1921 if (cqr
->status
== DASD_CQR_QUEUED
||
1922 cqr
->status
== DASD_CQR_IN_IO
||
1923 cqr
->status
== DASD_CQR_CLEAR_PENDING
)
1925 if (cqr
->status
== DASD_CQR_ERROR
) {
1926 __dasd_device_recovery(device
, cqr
);
1928 /* Rechain finished requests to final queue */
1929 list_move_tail(&cqr
->devlist
, final_queue
);
1933 static void __dasd_process_cqr(struct dasd_device
*device
,
1934 struct dasd_ccw_req
*cqr
)
1936 char errorstring
[ERRORLENGTH
];
1938 switch (cqr
->status
) {
1939 case DASD_CQR_SUCCESS
:
1940 cqr
->status
= DASD_CQR_DONE
;
1942 case DASD_CQR_ERROR
:
1943 cqr
->status
= DASD_CQR_NEED_ERP
;
1945 case DASD_CQR_CLEARED
:
1946 cqr
->status
= DASD_CQR_TERMINATED
;
1949 /* internal error 12 - wrong cqr status*/
1950 snprintf(errorstring
, ERRORLENGTH
, "12 %p %x02", cqr
, cqr
->status
);
1951 dev_err(&device
->cdev
->dev
,
1952 "An error occurred in the DASD device driver, "
1953 "reason=%s\n", errorstring
);
1957 cqr
->callback(cqr
, cqr
->callback_data
);
1961 * the cqrs from the final queue are returned to the upper layer
1962 * by setting a dasd_block state and calling the callback function
1964 static void __dasd_device_process_final_queue(struct dasd_device
*device
,
1965 struct list_head
*final_queue
)
1967 struct list_head
*l
, *n
;
1968 struct dasd_ccw_req
*cqr
;
1969 struct dasd_block
*block
;
1971 list_for_each_safe(l
, n
, final_queue
) {
1972 cqr
= list_entry(l
, struct dasd_ccw_req
, devlist
);
1973 list_del_init(&cqr
->devlist
);
1976 __dasd_process_cqr(device
, cqr
);
1978 spin_lock_bh(&block
->queue_lock
);
1979 __dasd_process_cqr(device
, cqr
);
1980 spin_unlock_bh(&block
->queue_lock
);
1986 * Take a look at the first request on the ccw queue and check
1987 * if it reached its expire time. If so, terminate the IO.
1989 static void __dasd_device_check_expire(struct dasd_device
*device
)
1991 struct dasd_ccw_req
*cqr
;
1993 if (list_empty(&device
->ccw_queue
))
1995 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, devlist
);
1996 if ((cqr
->status
== DASD_CQR_IN_IO
&& cqr
->expires
!= 0) &&
1997 (time_after_eq(jiffies
, cqr
->expires
+ cqr
->starttime
))) {
1998 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING
, &device
->flags
)) {
2000 * IO in safe offline processing should not
2001 * run out of retries
2005 if (device
->discipline
->term_IO(cqr
) != 0) {
2006 /* Hmpf, try again in 5 sec */
2007 dev_err(&device
->cdev
->dev
,
2008 "cqr %p timed out (%lus) but cannot be "
2009 "ended, retrying in 5 s\n",
2010 cqr
, (cqr
->expires
/HZ
));
2011 cqr
->expires
+= 5*HZ
;
2012 dasd_device_set_timer(device
, 5*HZ
);
2014 dev_err(&device
->cdev
->dev
,
2015 "cqr %p timed out (%lus), %i retries "
2016 "remaining\n", cqr
, (cqr
->expires
/HZ
),
2023 * return 1 when device is not eligible for IO
2025 static int __dasd_device_is_unusable(struct dasd_device
*device
,
2026 struct dasd_ccw_req
*cqr
)
2028 int mask
= ~(DASD_STOPPED_DC_WAIT
| DASD_STOPPED_NOSPC
);
2030 if (test_bit(DASD_FLAG_OFFLINE
, &device
->flags
) &&
2031 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING
, &device
->flags
)) {
2033 * dasd is being set offline
2034 * but it is no safe offline where we have to allow I/O
2038 if (device
->stopped
) {
2039 if (device
->stopped
& mask
) {
2040 /* stopped and CQR will not change that. */
2043 if (!test_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
)) {
2044 /* CQR is not able to change device to
2048 /* CQR required to get device operational. */
2054 * Take a look at the first request on the ccw queue and check
2055 * if it needs to be started.
2057 static void __dasd_device_start_head(struct dasd_device
*device
)
2059 struct dasd_ccw_req
*cqr
;
2062 if (list_empty(&device
->ccw_queue
))
2064 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, devlist
);
2065 if (cqr
->status
!= DASD_CQR_QUEUED
)
2067 /* if device is not usable return request to upper layer */
2068 if (__dasd_device_is_unusable(device
, cqr
)) {
2069 cqr
->intrc
= -EAGAIN
;
2070 cqr
->status
= DASD_CQR_CLEARED
;
2071 dasd_schedule_device_bh(device
);
2075 rc
= device
->discipline
->start_IO(cqr
);
2077 dasd_device_set_timer(device
, cqr
->expires
);
2078 else if (rc
== -EACCES
) {
2079 dasd_schedule_device_bh(device
);
2081 /* Hmpf, try again in 1/2 sec */
2082 dasd_device_set_timer(device
, 50);
2085 static void __dasd_device_check_path_events(struct dasd_device
*device
)
2087 __u8 tbvpm
, fcsecpm
;
2090 tbvpm
= dasd_path_get_tbvpm(device
);
2091 fcsecpm
= dasd_path_get_fcsecpm(device
);
2093 if (!tbvpm
&& !fcsecpm
)
2096 if (device
->stopped
& ~(DASD_STOPPED_DC_WAIT
))
2098 rc
= device
->discipline
->pe_handler(device
, tbvpm
, fcsecpm
);
2100 dasd_device_set_timer(device
, 50);
2102 dasd_path_clear_all_verify(device
);
2103 dasd_path_clear_all_fcsec(device
);
2108 * Go through all request on the dasd_device request queue,
2109 * terminate them on the cdev if necessary, and return them to the
2110 * submitting layer via callback.
2112 * Make sure that all 'submitting layers' still exist when
2113 * this function is called!. In other words, when 'device' is a base
2114 * device then all block layer requests must have been removed before
2115 * via dasd_flush_block_queue.
2117 int dasd_flush_device_queue(struct dasd_device
*device
)
2119 struct dasd_ccw_req
*cqr
, *n
;
2121 struct list_head flush_queue
;
2123 INIT_LIST_HEAD(&flush_queue
);
2124 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
2126 list_for_each_entry_safe(cqr
, n
, &device
->ccw_queue
, devlist
) {
2127 /* Check status and move request to flush_queue */
2128 switch (cqr
->status
) {
2129 case DASD_CQR_IN_IO
:
2130 rc
= device
->discipline
->term_IO(cqr
);
2132 /* unable to terminate requeust */
2133 dev_err(&device
->cdev
->dev
,
2134 "Flushing the DASD request queue "
2135 "failed for request %p\n", cqr
);
2136 /* stop flush processing */
2140 case DASD_CQR_QUEUED
:
2141 cqr
->stopclk
= get_tod_clock();
2142 cqr
->status
= DASD_CQR_CLEARED
;
2144 default: /* no need to modify the others */
2147 list_move_tail(&cqr
->devlist
, &flush_queue
);
2150 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
2152 * After this point all requests must be in state CLEAR_PENDING,
2153 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
2154 * one of the others.
2156 list_for_each_entry_safe(cqr
, n
, &flush_queue
, devlist
)
2157 wait_event(dasd_flush_wq
,
2158 (cqr
->status
!= DASD_CQR_CLEAR_PENDING
));
2160 * Now set each request back to TERMINATED, DONE or NEED_ERP
2161 * and call the callback function of flushed requests
2163 __dasd_device_process_final_queue(device
, &flush_queue
);
2166 EXPORT_SYMBOL_GPL(dasd_flush_device_queue
);
2169 * Acquire the device lock and process queues for the device.
2171 static void dasd_device_tasklet(unsigned long data
)
2173 struct dasd_device
*device
= (struct dasd_device
*) data
;
2174 struct list_head final_queue
;
2176 atomic_set (&device
->tasklet_scheduled
, 0);
2177 INIT_LIST_HEAD(&final_queue
);
2178 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
2179 /* Check expire time of first request on the ccw queue. */
2180 __dasd_device_check_expire(device
);
2181 /* find final requests on ccw queue */
2182 __dasd_device_process_ccw_queue(device
, &final_queue
);
2183 __dasd_device_check_path_events(device
);
2184 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
2185 /* Now call the callback function of requests with final status */
2186 __dasd_device_process_final_queue(device
, &final_queue
);
2187 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
2188 /* Now check if the head of the ccw queue needs to be started. */
2189 __dasd_device_start_head(device
);
2190 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
2191 if (waitqueue_active(&shutdown_waitq
))
2192 wake_up(&shutdown_waitq
);
2193 dasd_put_device(device
);
2197 * Schedules a call to dasd_tasklet over the device tasklet.
2199 void dasd_schedule_device_bh(struct dasd_device
*device
)
2201 /* Protect against rescheduling. */
2202 if (atomic_cmpxchg (&device
->tasklet_scheduled
, 0, 1) != 0)
2204 dasd_get_device(device
);
2205 tasklet_hi_schedule(&device
->tasklet
);
2207 EXPORT_SYMBOL(dasd_schedule_device_bh
);
2209 void dasd_device_set_stop_bits(struct dasd_device
*device
, int bits
)
2211 device
->stopped
|= bits
;
2213 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits
);
2215 void dasd_device_remove_stop_bits(struct dasd_device
*device
, int bits
)
2217 device
->stopped
&= ~bits
;
2218 if (!device
->stopped
)
2219 wake_up(&generic_waitq
);
2221 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits
);
2224 * Queue a request to the head of the device ccw_queue.
2225 * Start the I/O if possible.
2227 void dasd_add_request_head(struct dasd_ccw_req
*cqr
)
2229 struct dasd_device
*device
;
2230 unsigned long flags
;
2232 device
= cqr
->startdev
;
2233 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
2234 cqr
->status
= DASD_CQR_QUEUED
;
2235 list_add(&cqr
->devlist
, &device
->ccw_queue
);
2236 /* let the bh start the request to keep them in order */
2237 dasd_schedule_device_bh(device
);
2238 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
2240 EXPORT_SYMBOL(dasd_add_request_head
);
2243 * Queue a request to the tail of the device ccw_queue.
2244 * Start the I/O if possible.
2246 void dasd_add_request_tail(struct dasd_ccw_req
*cqr
)
2248 struct dasd_device
*device
;
2249 unsigned long flags
;
2251 device
= cqr
->startdev
;
2252 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
2253 cqr
->status
= DASD_CQR_QUEUED
;
2254 list_add_tail(&cqr
->devlist
, &device
->ccw_queue
);
2255 /* let the bh start the request to keep them in order */
2256 dasd_schedule_device_bh(device
);
2257 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
2259 EXPORT_SYMBOL(dasd_add_request_tail
);
2262 * Wakeup helper for the 'sleep_on' functions.
2264 void dasd_wakeup_cb(struct dasd_ccw_req
*cqr
, void *data
)
2266 spin_lock_irq(get_ccwdev_lock(cqr
->startdev
->cdev
));
2267 cqr
->callback_data
= DASD_SLEEPON_END_TAG
;
2268 spin_unlock_irq(get_ccwdev_lock(cqr
->startdev
->cdev
));
2269 wake_up(&generic_waitq
);
2271 EXPORT_SYMBOL_GPL(dasd_wakeup_cb
);
2273 static inline int _wait_for_wakeup(struct dasd_ccw_req
*cqr
)
2275 struct dasd_device
*device
;
2278 device
= cqr
->startdev
;
2279 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
2280 rc
= (cqr
->callback_data
== DASD_SLEEPON_END_TAG
);
2281 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
2286 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2288 static int __dasd_sleep_on_erp(struct dasd_ccw_req
*cqr
)
2290 struct dasd_device
*device
;
2291 dasd_erp_fn_t erp_fn
;
2293 if (cqr
->status
== DASD_CQR_FILLED
)
2295 device
= cqr
->startdev
;
2296 if (test_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
)) {
2297 if (cqr
->status
== DASD_CQR_TERMINATED
) {
2298 device
->discipline
->handle_terminated_request(cqr
);
2301 if (cqr
->status
== DASD_CQR_NEED_ERP
) {
2302 erp_fn
= device
->discipline
->erp_action(cqr
);
2306 if (cqr
->status
== DASD_CQR_FAILED
)
2307 dasd_log_sense(cqr
, &cqr
->irb
);
2309 __dasd_process_erp(device
, cqr
);
2316 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req
*cqr
)
2318 if (test_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
)) {
2319 if (cqr
->refers
) /* erp is not done yet */
2321 return ((cqr
->status
!= DASD_CQR_DONE
) &&
2322 (cqr
->status
!= DASD_CQR_FAILED
));
2324 return (cqr
->status
== DASD_CQR_FILLED
);
2327 static int _dasd_sleep_on(struct dasd_ccw_req
*maincqr
, int interruptible
)
2329 struct dasd_device
*device
;
2331 struct list_head ccw_queue
;
2332 struct dasd_ccw_req
*cqr
;
2334 INIT_LIST_HEAD(&ccw_queue
);
2335 maincqr
->status
= DASD_CQR_FILLED
;
2336 device
= maincqr
->startdev
;
2337 list_add(&maincqr
->blocklist
, &ccw_queue
);
2338 for (cqr
= maincqr
; __dasd_sleep_on_loop_condition(cqr
);
2339 cqr
= list_first_entry(&ccw_queue
,
2340 struct dasd_ccw_req
, blocklist
)) {
2342 if (__dasd_sleep_on_erp(cqr
))
2344 if (cqr
->status
!= DASD_CQR_FILLED
) /* could be failed */
2346 if (test_bit(DASD_FLAG_LOCK_STOLEN
, &device
->flags
) &&
2347 !test_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
)) {
2348 cqr
->status
= DASD_CQR_FAILED
;
2349 cqr
->intrc
= -EPERM
;
2352 /* Non-temporary stop condition will trigger fail fast */
2353 if (device
->stopped
& ~DASD_STOPPED_PENDING
&&
2354 test_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
) &&
2355 (!dasd_eer_enabled(device
))) {
2356 cqr
->status
= DASD_CQR_FAILED
;
2357 cqr
->intrc
= -ENOLINK
;
2361 * Don't try to start requests if device is in
2362 * offline processing, it might wait forever
2364 if (test_bit(DASD_FLAG_OFFLINE
, &device
->flags
)) {
2365 cqr
->status
= DASD_CQR_FAILED
;
2366 cqr
->intrc
= -ENODEV
;
2370 * Don't try to start requests if device is stopped
2371 * except path verification requests
2373 if (!test_bit(DASD_CQR_VERIFY_PATH
, &cqr
->flags
)) {
2374 if (interruptible
) {
2375 rc
= wait_event_interruptible(
2376 generic_waitq
, !(device
->stopped
));
2377 if (rc
== -ERESTARTSYS
) {
2378 cqr
->status
= DASD_CQR_FAILED
;
2379 maincqr
->intrc
= rc
;
2383 wait_event(generic_waitq
, !(device
->stopped
));
2386 cqr
->callback
= dasd_wakeup_cb
;
2388 cqr
->callback_data
= DASD_SLEEPON_START_TAG
;
2389 dasd_add_request_tail(cqr
);
2390 if (interruptible
) {
2391 rc
= wait_event_interruptible(
2392 generic_waitq
, _wait_for_wakeup(cqr
));
2393 if (rc
== -ERESTARTSYS
) {
2394 dasd_cancel_req(cqr
);
2395 /* wait (non-interruptible) for final status */
2396 wait_event(generic_waitq
,
2397 _wait_for_wakeup(cqr
));
2398 cqr
->status
= DASD_CQR_FAILED
;
2399 maincqr
->intrc
= rc
;
2403 wait_event(generic_waitq
, _wait_for_wakeup(cqr
));
2406 maincqr
->endclk
= get_tod_clock();
2407 if ((maincqr
->status
!= DASD_CQR_DONE
) &&
2408 (maincqr
->intrc
!= -ERESTARTSYS
))
2409 dasd_log_sense(maincqr
, &maincqr
->irb
);
2410 if (maincqr
->status
== DASD_CQR_DONE
)
2412 else if (maincqr
->intrc
)
2413 rc
= maincqr
->intrc
;
2419 static inline int _wait_for_wakeup_queue(struct list_head
*ccw_queue
)
2421 struct dasd_ccw_req
*cqr
;
2423 list_for_each_entry(cqr
, ccw_queue
, blocklist
) {
2424 if (cqr
->callback_data
!= DASD_SLEEPON_END_TAG
)
2431 static int _dasd_sleep_on_queue(struct list_head
*ccw_queue
, int interruptible
)
2433 struct dasd_device
*device
;
2434 struct dasd_ccw_req
*cqr
, *n
;
2439 list_for_each_entry_safe(cqr
, n
, ccw_queue
, blocklist
) {
2440 device
= cqr
->startdev
;
2441 if (cqr
->status
!= DASD_CQR_FILLED
) /*could be failed*/
2444 if (test_bit(DASD_FLAG_LOCK_STOLEN
, &device
->flags
) &&
2445 !test_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
)) {
2446 cqr
->status
= DASD_CQR_FAILED
;
2447 cqr
->intrc
= -EPERM
;
2450 /*Non-temporary stop condition will trigger fail fast*/
2451 if (device
->stopped
& ~DASD_STOPPED_PENDING
&&
2452 test_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
) &&
2453 !dasd_eer_enabled(device
)) {
2454 cqr
->status
= DASD_CQR_FAILED
;
2455 cqr
->intrc
= -EAGAIN
;
2459 /*Don't try to start requests if device is stopped*/
2460 if (interruptible
) {
2461 rc
= wait_event_interruptible(
2462 generic_waitq
, !device
->stopped
);
2463 if (rc
== -ERESTARTSYS
) {
2464 cqr
->status
= DASD_CQR_FAILED
;
2469 wait_event(generic_waitq
, !(device
->stopped
));
2472 cqr
->callback
= dasd_wakeup_cb
;
2473 cqr
->callback_data
= DASD_SLEEPON_START_TAG
;
2474 dasd_add_request_tail(cqr
);
2477 wait_event(generic_waitq
, _wait_for_wakeup_queue(ccw_queue
));
2480 list_for_each_entry_safe(cqr
, n
, ccw_queue
, blocklist
) {
2482 * In some cases the 'File Protected' or 'Incorrect Length'
2483 * error might be expected and error recovery would be
2484 * unnecessary in these cases. Check if the according suppress
2487 sense
= dasd_get_sense(&cqr
->irb
);
2488 if (sense
&& sense
[1] & SNS1_FILE_PROTECTED
&&
2489 test_bit(DASD_CQR_SUPPRESS_FP
, &cqr
->flags
))
2491 if (scsw_cstat(&cqr
->irb
.scsw
) == 0x40 &&
2492 test_bit(DASD_CQR_SUPPRESS_IL
, &cqr
->flags
))
2496 * for alias devices simplify error recovery and
2497 * return to upper layer
2498 * do not skip ERP requests
2500 if (cqr
->startdev
!= cqr
->basedev
&& !cqr
->refers
&&
2501 (cqr
->status
== DASD_CQR_TERMINATED
||
2502 cqr
->status
== DASD_CQR_NEED_ERP
))
2505 /* normal recovery for basedev IO */
2506 if (__dasd_sleep_on_erp(cqr
))
2507 /* handle erp first */
2515 * Queue a request to the tail of the device ccw_queue and wait for
2518 int dasd_sleep_on(struct dasd_ccw_req
*cqr
)
2520 return _dasd_sleep_on(cqr
, 0);
2522 EXPORT_SYMBOL(dasd_sleep_on
);
2525 * Start requests from a ccw_queue and wait for their completion.
2527 int dasd_sleep_on_queue(struct list_head
*ccw_queue
)
2529 return _dasd_sleep_on_queue(ccw_queue
, 0);
2531 EXPORT_SYMBOL(dasd_sleep_on_queue
);
2534 * Start requests from a ccw_queue and wait interruptible for their completion.
2536 int dasd_sleep_on_queue_interruptible(struct list_head
*ccw_queue
)
2538 return _dasd_sleep_on_queue(ccw_queue
, 1);
2540 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible
);
2543 * Queue a request to the tail of the device ccw_queue and wait
2544 * interruptible for it's completion.
2546 int dasd_sleep_on_interruptible(struct dasd_ccw_req
*cqr
)
2548 return _dasd_sleep_on(cqr
, 1);
2550 EXPORT_SYMBOL(dasd_sleep_on_interruptible
);
2553 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2554 * for eckd devices) the currently running request has to be terminated
2555 * and be put back to status queued, before the special request is added
2556 * to the head of the queue. Then the special request is waited on normally.
2558 static inline int _dasd_term_running_cqr(struct dasd_device
*device
)
2560 struct dasd_ccw_req
*cqr
;
2563 if (list_empty(&device
->ccw_queue
))
2565 cqr
= list_entry(device
->ccw_queue
.next
, struct dasd_ccw_req
, devlist
);
2566 rc
= device
->discipline
->term_IO(cqr
);
2569 * CQR terminated because a more important request is pending.
2570 * Undo decreasing of retry counter because this is
2571 * not an error case.
2577 int dasd_sleep_on_immediatly(struct dasd_ccw_req
*cqr
)
2579 struct dasd_device
*device
;
2582 device
= cqr
->startdev
;
2583 if (test_bit(DASD_FLAG_LOCK_STOLEN
, &device
->flags
) &&
2584 !test_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
)) {
2585 cqr
->status
= DASD_CQR_FAILED
;
2586 cqr
->intrc
= -EPERM
;
2589 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
2590 rc
= _dasd_term_running_cqr(device
);
2592 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
2595 cqr
->callback
= dasd_wakeup_cb
;
2596 cqr
->callback_data
= DASD_SLEEPON_START_TAG
;
2597 cqr
->status
= DASD_CQR_QUEUED
;
2599 * add new request as second
2600 * first the terminated cqr needs to be finished
2602 list_add(&cqr
->devlist
, device
->ccw_queue
.next
);
2604 /* let the bh start the request to keep them in order */
2605 dasd_schedule_device_bh(device
);
2607 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
2609 wait_event(generic_waitq
, _wait_for_wakeup(cqr
));
2611 if (cqr
->status
== DASD_CQR_DONE
)
2613 else if (cqr
->intrc
)
2619 dasd_schedule_device_bh(device
);
2621 dasd_schedule_block_bh(device
->block
);
2625 EXPORT_SYMBOL(dasd_sleep_on_immediatly
);
2628 * Cancels a request that was started with dasd_sleep_on_req.
2629 * This is useful to timeout requests. The request will be
2630 * terminated if it is currently in i/o.
2631 * Returns 0 if request termination was successful
2632 * negative error code if termination failed
2633 * Cancellation of a request is an asynchronous operation! The calling
2634 * function has to wait until the request is properly returned via callback.
2636 static int __dasd_cancel_req(struct dasd_ccw_req
*cqr
)
2638 struct dasd_device
*device
= cqr
->startdev
;
2641 switch (cqr
->status
) {
2642 case DASD_CQR_QUEUED
:
2643 /* request was not started - just set to cleared */
2644 cqr
->status
= DASD_CQR_CLEARED
;
2646 case DASD_CQR_IN_IO
:
2647 /* request in IO - terminate IO and release again */
2648 rc
= device
->discipline
->term_IO(cqr
);
2650 dev_err(&device
->cdev
->dev
,
2651 "Cancelling request %p failed with rc=%d\n",
2654 cqr
->stopclk
= get_tod_clock();
2657 default: /* already finished or clear pending - do nothing */
2660 dasd_schedule_device_bh(device
);
2664 int dasd_cancel_req(struct dasd_ccw_req
*cqr
)
2666 struct dasd_device
*device
= cqr
->startdev
;
2667 unsigned long flags
;
2670 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
2671 rc
= __dasd_cancel_req(cqr
);
2672 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
2677 * SECTION: Operations of the dasd_block layer.
2681 * Timeout function for dasd_block. This is used when the block layer
2682 * is waiting for something that may not come reliably, (e.g. a state
2685 static void dasd_block_timeout(struct timer_list
*t
)
2687 unsigned long flags
;
2688 struct dasd_block
*block
;
2690 block
= from_timer(block
, t
, timer
);
2691 spin_lock_irqsave(get_ccwdev_lock(block
->base
->cdev
), flags
);
2692 /* re-activate request queue */
2693 dasd_device_remove_stop_bits(block
->base
, DASD_STOPPED_PENDING
);
2694 spin_unlock_irqrestore(get_ccwdev_lock(block
->base
->cdev
), flags
);
2695 dasd_schedule_block_bh(block
);
2696 blk_mq_run_hw_queues(block
->request_queue
, true);
2700 * Setup timeout for a dasd_block in jiffies.
2702 void dasd_block_set_timer(struct dasd_block
*block
, int expires
)
2705 del_timer(&block
->timer
);
2707 mod_timer(&block
->timer
, jiffies
+ expires
);
2709 EXPORT_SYMBOL(dasd_block_set_timer
);
2712 * Clear timeout for a dasd_block.
2714 void dasd_block_clear_timer(struct dasd_block
*block
)
2716 del_timer(&block
->timer
);
2718 EXPORT_SYMBOL(dasd_block_clear_timer
);
2721 * Process finished error recovery ccw.
2723 static void __dasd_process_erp(struct dasd_device
*device
,
2724 struct dasd_ccw_req
*cqr
)
2726 dasd_erp_fn_t erp_fn
;
2728 if (cqr
->status
== DASD_CQR_DONE
)
2729 DBF_DEV_EVENT(DBF_NOTICE
, device
, "%s", "ERP successful");
2731 dev_err(&device
->cdev
->dev
, "ERP failed for the DASD\n");
2732 erp_fn
= device
->discipline
->erp_postaction(cqr
);
2736 static void __dasd_cleanup_cqr(struct dasd_ccw_req
*cqr
)
2738 struct request
*req
;
2739 blk_status_t error
= BLK_STS_OK
;
2740 unsigned int proc_bytes
;
2743 req
= (struct request
*) cqr
->callback_data
;
2744 dasd_profile_end(cqr
->block
, cqr
, req
);
2746 proc_bytes
= cqr
->proc_bytes
;
2747 status
= cqr
->block
->base
->discipline
->free_cp(cqr
, req
);
2749 error
= errno_to_blk_status(status
);
2750 else if (status
== 0) {
2751 switch (cqr
->intrc
) {
2753 error
= BLK_STS_NEXUS
;
2756 error
= BLK_STS_TRANSPORT
;
2759 error
= BLK_STS_TIMEOUT
;
2762 error
= BLK_STS_IOERR
;
2768 * We need to take care for ETIMEDOUT errors here since the
2769 * complete callback does not get called in this case.
2770 * Take care of all errors here and avoid additional code to
2771 * transfer the error value to the complete callback.
2774 blk_mq_end_request(req
, error
);
2775 blk_mq_run_hw_queues(req
->q
, true);
2778 * Partial completed requests can happen with ESE devices.
2779 * During read we might have gotten a NRF error and have to
2780 * complete a request partially.
2783 blk_update_request(req
, BLK_STS_OK
,
2784 blk_rq_bytes(req
) - proc_bytes
);
2785 blk_mq_requeue_request(req
, true);
2786 } else if (likely(!blk_should_fake_timeout(req
->q
))) {
2787 blk_mq_complete_request(req
);
2793 * Process ccw request queue.
2795 static void __dasd_process_block_ccw_queue(struct dasd_block
*block
,
2796 struct list_head
*final_queue
)
2798 struct list_head
*l
, *n
;
2799 struct dasd_ccw_req
*cqr
;
2800 dasd_erp_fn_t erp_fn
;
2801 unsigned long flags
;
2802 struct dasd_device
*base
= block
->base
;
2805 /* Process request with final status. */
2806 list_for_each_safe(l
, n
, &block
->ccw_queue
) {
2807 cqr
= list_entry(l
, struct dasd_ccw_req
, blocklist
);
2808 if (cqr
->status
!= DASD_CQR_DONE
&&
2809 cqr
->status
!= DASD_CQR_FAILED
&&
2810 cqr
->status
!= DASD_CQR_NEED_ERP
&&
2811 cqr
->status
!= DASD_CQR_TERMINATED
)
2814 if (cqr
->status
== DASD_CQR_TERMINATED
) {
2815 base
->discipline
->handle_terminated_request(cqr
);
2819 /* Process requests that may be recovered */
2820 if (cqr
->status
== DASD_CQR_NEED_ERP
) {
2821 erp_fn
= base
->discipline
->erp_action(cqr
);
2822 if (IS_ERR(erp_fn(cqr
)))
2827 /* log sense for fatal error */
2828 if (cqr
->status
== DASD_CQR_FAILED
) {
2829 dasd_log_sense(cqr
, &cqr
->irb
);
2832 /* First of all call extended error reporting. */
2833 if (dasd_eer_enabled(base
) &&
2834 cqr
->status
== DASD_CQR_FAILED
) {
2835 dasd_eer_write(base
, cqr
, DASD_EER_FATALERROR
);
2837 /* restart request */
2838 cqr
->status
= DASD_CQR_FILLED
;
2840 spin_lock_irqsave(get_ccwdev_lock(base
->cdev
), flags
);
2841 dasd_device_set_stop_bits(base
, DASD_STOPPED_QUIESCE
);
2842 spin_unlock_irqrestore(get_ccwdev_lock(base
->cdev
),
2847 /* Process finished ERP request. */
2849 __dasd_process_erp(base
, cqr
);
2853 /* Rechain finished requests to final queue */
2854 cqr
->endclk
= get_tod_clock();
2855 list_move_tail(&cqr
->blocklist
, final_queue
);
2859 static void dasd_return_cqr_cb(struct dasd_ccw_req
*cqr
, void *data
)
2861 dasd_schedule_block_bh(cqr
->block
);
2864 static void __dasd_block_start_head(struct dasd_block
*block
)
2866 struct dasd_ccw_req
*cqr
;
2868 if (list_empty(&block
->ccw_queue
))
2870 /* We allways begin with the first requests on the queue, as some
2871 * of previously started requests have to be enqueued on a
2872 * dasd_device again for error recovery.
2874 list_for_each_entry(cqr
, &block
->ccw_queue
, blocklist
) {
2875 if (cqr
->status
!= DASD_CQR_FILLED
)
2877 if (test_bit(DASD_FLAG_LOCK_STOLEN
, &block
->base
->flags
) &&
2878 !test_bit(DASD_CQR_ALLOW_SLOCK
, &cqr
->flags
)) {
2879 cqr
->status
= DASD_CQR_FAILED
;
2880 cqr
->intrc
= -EPERM
;
2881 dasd_schedule_block_bh(block
);
2884 /* Non-temporary stop condition will trigger fail fast */
2885 if (block
->base
->stopped
& ~DASD_STOPPED_PENDING
&&
2886 test_bit(DASD_CQR_FLAGS_FAILFAST
, &cqr
->flags
) &&
2887 (!dasd_eer_enabled(block
->base
))) {
2888 cqr
->status
= DASD_CQR_FAILED
;
2889 cqr
->intrc
= -ENOLINK
;
2890 dasd_schedule_block_bh(block
);
2893 /* Don't try to start requests if device is stopped */
2894 if (block
->base
->stopped
)
2897 /* just a fail safe check, should not happen */
2899 cqr
->startdev
= block
->base
;
2901 /* make sure that the requests we submit find their way back */
2902 cqr
->callback
= dasd_return_cqr_cb
;
2904 dasd_add_request_tail(cqr
);
2909 * Central dasd_block layer routine. Takes requests from the generic
2910 * block layer request queue, creates ccw requests, enqueues them on
2911 * a dasd_device and processes ccw requests that have been returned.
2913 static void dasd_block_tasklet(unsigned long data
)
2915 struct dasd_block
*block
= (struct dasd_block
*) data
;
2916 struct list_head final_queue
;
2917 struct list_head
*l
, *n
;
2918 struct dasd_ccw_req
*cqr
;
2919 struct dasd_queue
*dq
;
2921 atomic_set(&block
->tasklet_scheduled
, 0);
2922 INIT_LIST_HEAD(&final_queue
);
2923 spin_lock_irq(&block
->queue_lock
);
2924 /* Finish off requests on ccw queue */
2925 __dasd_process_block_ccw_queue(block
, &final_queue
);
2926 spin_unlock_irq(&block
->queue_lock
);
2928 /* Now call the callback function of requests with final status */
2929 list_for_each_safe(l
, n
, &final_queue
) {
2930 cqr
= list_entry(l
, struct dasd_ccw_req
, blocklist
);
2932 spin_lock_irq(&dq
->lock
);
2933 list_del_init(&cqr
->blocklist
);
2934 __dasd_cleanup_cqr(cqr
);
2935 spin_unlock_irq(&dq
->lock
);
2938 spin_lock_irq(&block
->queue_lock
);
2939 /* Now check if the head of the ccw queue needs to be started. */
2940 __dasd_block_start_head(block
);
2941 spin_unlock_irq(&block
->queue_lock
);
2943 if (waitqueue_active(&shutdown_waitq
))
2944 wake_up(&shutdown_waitq
);
2945 dasd_put_device(block
->base
);
2948 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req
*cqr
, void *data
)
2950 wake_up(&dasd_flush_wq
);
2954 * Requeue a request back to the block request queue
2955 * only works for block requests
2957 static int _dasd_requeue_request(struct dasd_ccw_req
*cqr
)
2959 struct dasd_block
*block
= cqr
->block
;
2960 struct request
*req
;
2965 * If the request is an ERP request there is nothing to requeue.
2966 * This will be done with the remaining original request.
2970 spin_lock_irq(&cqr
->dq
->lock
);
2971 req
= (struct request
*) cqr
->callback_data
;
2972 blk_mq_requeue_request(req
, false);
2973 spin_unlock_irq(&cqr
->dq
->lock
);
2979 * Go through all request on the dasd_block request queue, cancel them
2980 * on the respective dasd_device, and return them to the generic
2983 static int dasd_flush_block_queue(struct dasd_block
*block
)
2985 struct dasd_ccw_req
*cqr
, *n
;
2987 struct list_head flush_queue
;
2988 unsigned long flags
;
2990 INIT_LIST_HEAD(&flush_queue
);
2991 spin_lock_bh(&block
->queue_lock
);
2994 list_for_each_entry_safe(cqr
, n
, &block
->ccw_queue
, blocklist
) {
2995 /* if this request currently owned by a dasd_device cancel it */
2996 if (cqr
->status
>= DASD_CQR_QUEUED
)
2997 rc
= dasd_cancel_req(cqr
);
3000 /* Rechain request (including erp chain) so it won't be
3001 * touched by the dasd_block_tasklet anymore.
3002 * Replace the callback so we notice when the request
3003 * is returned from the dasd_device layer.
3005 cqr
->callback
= _dasd_wake_block_flush_cb
;
3006 for (i
= 0; cqr
!= NULL
; cqr
= cqr
->refers
, i
++)
3007 list_move_tail(&cqr
->blocklist
, &flush_queue
);
3009 /* moved more than one request - need to restart */
3012 spin_unlock_bh(&block
->queue_lock
);
3013 /* Now call the callback function of flushed requests */
3015 list_for_each_entry_safe(cqr
, n
, &flush_queue
, blocklist
) {
3016 wait_event(dasd_flush_wq
, (cqr
->status
< DASD_CQR_QUEUED
));
3017 /* Process finished ERP request. */
3019 spin_lock_bh(&block
->queue_lock
);
3020 __dasd_process_erp(block
->base
, cqr
);
3021 spin_unlock_bh(&block
->queue_lock
);
3022 /* restart list_for_xx loop since dasd_process_erp
3023 * might remove multiple elements */
3026 /* call the callback function */
3027 spin_lock_irqsave(&cqr
->dq
->lock
, flags
);
3028 cqr
->endclk
= get_tod_clock();
3029 list_del_init(&cqr
->blocklist
);
3030 __dasd_cleanup_cqr(cqr
);
3031 spin_unlock_irqrestore(&cqr
->dq
->lock
, flags
);
3037 * Schedules a call to dasd_tasklet over the device tasklet.
3039 void dasd_schedule_block_bh(struct dasd_block
*block
)
3041 /* Protect against rescheduling. */
3042 if (atomic_cmpxchg(&block
->tasklet_scheduled
, 0, 1) != 0)
3044 /* life cycle of block is bound to it's base device */
3045 dasd_get_device(block
->base
);
3046 tasklet_hi_schedule(&block
->tasklet
);
3048 EXPORT_SYMBOL(dasd_schedule_block_bh
);
3052 * SECTION: external block device operations
3053 * (request queue handling, open, release, etc.)
3057 * Dasd request queue function. Called from ll_rw_blk.c
3059 static blk_status_t
do_dasd_request(struct blk_mq_hw_ctx
*hctx
,
3060 const struct blk_mq_queue_data
*qd
)
3062 struct dasd_block
*block
= hctx
->queue
->queuedata
;
3063 struct dasd_queue
*dq
= hctx
->driver_data
;
3064 struct request
*req
= qd
->rq
;
3065 struct dasd_device
*basedev
;
3066 struct dasd_ccw_req
*cqr
;
3067 blk_status_t rc
= BLK_STS_OK
;
3069 basedev
= block
->base
;
3070 spin_lock_irq(&dq
->lock
);
3071 if (basedev
->state
< DASD_STATE_READY
) {
3072 DBF_DEV_EVENT(DBF_ERR
, basedev
,
3073 "device not ready for request %p", req
);
3079 * if device is stopped do not fetch new requests
3080 * except failfast is active which will let requests fail
3081 * immediately in __dasd_block_start_head()
3083 if (basedev
->stopped
&& !(basedev
->features
& DASD_FEATURE_FAILFAST
)) {
3084 DBF_DEV_EVENT(DBF_ERR
, basedev
,
3085 "device stopped request %p", req
);
3086 rc
= BLK_STS_RESOURCE
;
3090 if (basedev
->features
& DASD_FEATURE_READONLY
&&
3091 rq_data_dir(req
) == WRITE
) {
3092 DBF_DEV_EVENT(DBF_ERR
, basedev
,
3093 "Rejecting write request %p", req
);
3098 if (test_bit(DASD_FLAG_ABORTALL
, &basedev
->flags
) &&
3099 (basedev
->features
& DASD_FEATURE_FAILFAST
||
3100 blk_noretry_request(req
))) {
3101 DBF_DEV_EVENT(DBF_ERR
, basedev
,
3102 "Rejecting failfast request %p", req
);
3107 cqr
= basedev
->discipline
->build_cp(basedev
, block
, req
);
3109 if (PTR_ERR(cqr
) == -EBUSY
||
3110 PTR_ERR(cqr
) == -ENOMEM
||
3111 PTR_ERR(cqr
) == -EAGAIN
) {
3112 rc
= BLK_STS_RESOURCE
;
3115 DBF_DEV_EVENT(DBF_ERR
, basedev
,
3116 "CCW creation failed (rc=%ld) on request %p",
3122 * Note: callback is set to dasd_return_cqr_cb in
3123 * __dasd_block_start_head to cover erp requests as well
3125 cqr
->callback_data
= req
;
3126 cqr
->status
= DASD_CQR_FILLED
;
3129 blk_mq_start_request(req
);
3130 spin_lock(&block
->queue_lock
);
3131 list_add_tail(&cqr
->blocklist
, &block
->ccw_queue
);
3132 INIT_LIST_HEAD(&cqr
->devlist
);
3133 dasd_profile_start(block
, cqr
, req
);
3134 dasd_schedule_block_bh(block
);
3135 spin_unlock(&block
->queue_lock
);
3138 spin_unlock_irq(&dq
->lock
);
3143 * Block timeout callback, called from the block layer
3146 * BLK_EH_RESET_TIMER if the request should be left running
3147 * BLK_EH_DONE if the request is handled or terminated
3150 enum blk_eh_timer_return
dasd_times_out(struct request
*req
, bool reserved
)
3152 struct dasd_block
*block
= req
->q
->queuedata
;
3153 struct dasd_device
*device
;
3154 struct dasd_ccw_req
*cqr
;
3155 unsigned long flags
;
3158 cqr
= blk_mq_rq_to_pdu(req
);
3162 spin_lock_irqsave(&cqr
->dq
->lock
, flags
);
3163 device
= cqr
->startdev
? cqr
->startdev
: block
->base
;
3164 if (!device
->blk_timeout
) {
3165 spin_unlock_irqrestore(&cqr
->dq
->lock
, flags
);
3166 return BLK_EH_RESET_TIMER
;
3168 DBF_DEV_EVENT(DBF_WARNING
, device
,
3169 " dasd_times_out cqr %p status %x",
3172 spin_lock(&block
->queue_lock
);
3173 spin_lock(get_ccwdev_lock(device
->cdev
));
3175 cqr
->intrc
= -ETIMEDOUT
;
3176 if (cqr
->status
>= DASD_CQR_QUEUED
) {
3177 rc
= __dasd_cancel_req(cqr
);
3178 } else if (cqr
->status
== DASD_CQR_FILLED
||
3179 cqr
->status
== DASD_CQR_NEED_ERP
) {
3180 cqr
->status
= DASD_CQR_TERMINATED
;
3181 } else if (cqr
->status
== DASD_CQR_IN_ERP
) {
3182 struct dasd_ccw_req
*searchcqr
, *nextcqr
, *tmpcqr
;
3184 list_for_each_entry_safe(searchcqr
, nextcqr
,
3185 &block
->ccw_queue
, blocklist
) {
3187 while (tmpcqr
->refers
)
3188 tmpcqr
= tmpcqr
->refers
;
3191 /* searchcqr is an ERP request for cqr */
3192 searchcqr
->retries
= -1;
3193 searchcqr
->intrc
= -ETIMEDOUT
;
3194 if (searchcqr
->status
>= DASD_CQR_QUEUED
) {
3195 rc
= __dasd_cancel_req(searchcqr
);
3196 } else if ((searchcqr
->status
== DASD_CQR_FILLED
) ||
3197 (searchcqr
->status
== DASD_CQR_NEED_ERP
)) {
3198 searchcqr
->status
= DASD_CQR_TERMINATED
;
3200 } else if (searchcqr
->status
== DASD_CQR_IN_ERP
) {
3202 * Shouldn't happen; most recent ERP
3203 * request is at the front of queue
3210 spin_unlock(get_ccwdev_lock(device
->cdev
));
3211 dasd_schedule_block_bh(block
);
3212 spin_unlock(&block
->queue_lock
);
3213 spin_unlock_irqrestore(&cqr
->dq
->lock
, flags
);
3215 return rc
? BLK_EH_RESET_TIMER
: BLK_EH_DONE
;
3218 static int dasd_init_hctx(struct blk_mq_hw_ctx
*hctx
, void *data
,
3221 struct dasd_queue
*dq
= kzalloc(sizeof(*dq
), GFP_KERNEL
);
3226 spin_lock_init(&dq
->lock
);
3227 hctx
->driver_data
= dq
;
3232 static void dasd_exit_hctx(struct blk_mq_hw_ctx
*hctx
, unsigned int idx
)
3234 kfree(hctx
->driver_data
);
3235 hctx
->driver_data
= NULL
;
3238 static void dasd_request_done(struct request
*req
)
3240 blk_mq_end_request(req
, 0);
3241 blk_mq_run_hw_queues(req
->q
, true);
3244 static struct blk_mq_ops dasd_mq_ops
= {
3245 .queue_rq
= do_dasd_request
,
3246 .complete
= dasd_request_done
,
3247 .timeout
= dasd_times_out
,
3248 .init_hctx
= dasd_init_hctx
,
3249 .exit_hctx
= dasd_exit_hctx
,
3253 * Allocate and initialize request queue and default I/O scheduler.
3255 static int dasd_alloc_queue(struct dasd_block
*block
)
3259 block
->tag_set
.ops
= &dasd_mq_ops
;
3260 block
->tag_set
.cmd_size
= sizeof(struct dasd_ccw_req
);
3261 block
->tag_set
.nr_hw_queues
= nr_hw_queues
;
3262 block
->tag_set
.queue_depth
= queue_depth
;
3263 block
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
3264 block
->tag_set
.numa_node
= NUMA_NO_NODE
;
3266 rc
= blk_mq_alloc_tag_set(&block
->tag_set
);
3270 block
->request_queue
= blk_mq_init_queue(&block
->tag_set
);
3271 if (IS_ERR(block
->request_queue
))
3272 return PTR_ERR(block
->request_queue
);
3274 block
->request_queue
->queuedata
= block
;
3280 * Deactivate and free request queue.
3282 static void dasd_free_queue(struct dasd_block
*block
)
3284 if (block
->request_queue
) {
3285 blk_cleanup_queue(block
->request_queue
);
3286 blk_mq_free_tag_set(&block
->tag_set
);
3287 block
->request_queue
= NULL
;
3291 static int dasd_open(struct block_device
*bdev
, fmode_t mode
)
3293 struct dasd_device
*base
;
3296 base
= dasd_device_from_gendisk(bdev
->bd_disk
);
3300 atomic_inc(&base
->block
->open_count
);
3301 if (test_bit(DASD_FLAG_OFFLINE
, &base
->flags
)) {
3306 if (!try_module_get(base
->discipline
->owner
)) {
3311 if (dasd_probeonly
) {
3312 dev_info(&base
->cdev
->dev
,
3313 "Accessing the DASD failed because it is in "
3314 "probeonly mode\n");
3319 if (base
->state
<= DASD_STATE_BASIC
) {
3320 DBF_DEV_EVENT(DBF_ERR
, base
, " %s",
3321 " Cannot open unrecognized device");
3326 if ((mode
& FMODE_WRITE
) &&
3327 (test_bit(DASD_FLAG_DEVICE_RO
, &base
->flags
) ||
3328 (base
->features
& DASD_FEATURE_READONLY
))) {
3333 dasd_put_device(base
);
3337 module_put(base
->discipline
->owner
);
3339 atomic_dec(&base
->block
->open_count
);
3340 dasd_put_device(base
);
3344 static void dasd_release(struct gendisk
*disk
, fmode_t mode
)
3346 struct dasd_device
*base
= dasd_device_from_gendisk(disk
);
3348 atomic_dec(&base
->block
->open_count
);
3349 module_put(base
->discipline
->owner
);
3350 dasd_put_device(base
);
3355 * Return disk geometry.
3357 static int dasd_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
3359 struct dasd_device
*base
;
3361 base
= dasd_device_from_gendisk(bdev
->bd_disk
);
3365 if (!base
->discipline
||
3366 !base
->discipline
->fill_geometry
) {
3367 dasd_put_device(base
);
3370 base
->discipline
->fill_geometry(base
->block
, geo
);
3371 geo
->start
= get_start_sect(bdev
) >> base
->block
->s2b_shift
;
3372 dasd_put_device(base
);
3376 const struct block_device_operations
3377 dasd_device_operations
= {
3378 .owner
= THIS_MODULE
,
3380 .release
= dasd_release
,
3381 .ioctl
= dasd_ioctl
,
3382 .compat_ioctl
= dasd_ioctl
,
3383 .getgeo
= dasd_getgeo
,
3384 .set_read_only
= dasd_set_read_only
,
3387 /*******************************************************************************
3388 * end of block device operations
3394 #ifdef CONFIG_PROC_FS
3398 kmem_cache_destroy(dasd_page_cache
);
3399 dasd_page_cache
= NULL
;
3400 dasd_gendisk_exit();
3402 if (dasd_debug_area
!= NULL
) {
3403 debug_unregister(dasd_debug_area
);
3404 dasd_debug_area
= NULL
;
3406 dasd_statistics_removeroot();
3410 * SECTION: common functions for ccw_driver use
3414 * Is the device read-only?
3415 * Note that this function does not report the setting of the
3416 * readonly device attribute, but how it is configured in z/VM.
3418 int dasd_device_is_ro(struct dasd_device
*device
)
3420 struct ccw_dev_id dev_id
;
3421 struct diag210 diag_data
;
3426 ccw_device_get_id(device
->cdev
, &dev_id
);
3427 memset(&diag_data
, 0, sizeof(diag_data
));
3428 diag_data
.vrdcdvno
= dev_id
.devno
;
3429 diag_data
.vrdclen
= sizeof(diag_data
);
3430 rc
= diag210(&diag_data
);
3431 if (rc
== 0 || rc
== 2) {
3432 return diag_data
.vrdcvfla
& 0x80;
3434 DBF_EVENT(DBF_WARNING
, "diag210 failed for dev=%04x with rc=%d",
3439 EXPORT_SYMBOL_GPL(dasd_device_is_ro
);
3441 static void dasd_generic_auto_online(void *data
, async_cookie_t cookie
)
3443 struct ccw_device
*cdev
= data
;
3446 ret
= ccw_device_set_online(cdev
);
3448 pr_warn("%s: Setting the DASD online failed with rc=%d\n",
3449 dev_name(&cdev
->dev
), ret
);
3453 * Initial attempt at a probe function. this can be simplified once
3454 * the other detection code is gone.
3456 int dasd_generic_probe(struct ccw_device
*cdev
)
3460 ret
= dasd_add_sysfs_files(cdev
);
3462 DBF_EVENT_DEVID(DBF_WARNING
, cdev
, "%s",
3463 "dasd_generic_probe: could not add "
3467 cdev
->handler
= &dasd_int_handler
;
3470 * Automatically online either all dasd devices (dasd_autodetect)
3471 * or all devices specified with dasd= parameters during
3474 if ((dasd_get_feature(cdev
, DASD_FEATURE_INITIAL_ONLINE
) > 0 ) ||
3475 (dasd_autodetect
&& dasd_busid_known(dev_name(&cdev
->dev
)) != 0))
3476 async_schedule(dasd_generic_auto_online
, cdev
);
3479 EXPORT_SYMBOL_GPL(dasd_generic_probe
);
3481 void dasd_generic_free_discipline(struct dasd_device
*device
)
3483 /* Forget the discipline information. */
3484 if (device
->discipline
) {
3485 if (device
->discipline
->uncheck_device
)
3486 device
->discipline
->uncheck_device(device
);
3487 module_put(device
->discipline
->owner
);
3488 device
->discipline
= NULL
;
3490 if (device
->base_discipline
) {
3491 module_put(device
->base_discipline
->owner
);
3492 device
->base_discipline
= NULL
;
3495 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline
);
3498 * This will one day be called from a global not_oper handler.
3499 * It is also used by driver_unregister during module unload.
3501 void dasd_generic_remove(struct ccw_device
*cdev
)
3503 struct dasd_device
*device
;
3504 struct dasd_block
*block
;
3506 cdev
->handler
= NULL
;
3508 device
= dasd_device_from_cdev(cdev
);
3509 if (IS_ERR(device
)) {
3510 dasd_remove_sysfs_files(cdev
);
3513 if (test_and_set_bit(DASD_FLAG_OFFLINE
, &device
->flags
) &&
3514 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING
, &device
->flags
)) {
3515 /* Already doing offline processing */
3516 dasd_put_device(device
);
3517 dasd_remove_sysfs_files(cdev
);
3521 * This device is removed unconditionally. Set offline
3522 * flag to prevent dasd_open from opening it while it is
3523 * no quite down yet.
3525 dasd_set_target_state(device
, DASD_STATE_NEW
);
3526 /* dasd_delete_device destroys the device reference. */
3527 block
= device
->block
;
3528 dasd_delete_device(device
);
3530 * life cycle of block is bound to device, so delete it after
3531 * device was safely removed
3534 dasd_free_block(block
);
3536 dasd_remove_sysfs_files(cdev
);
3538 EXPORT_SYMBOL_GPL(dasd_generic_remove
);
3541 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3542 * the device is detected for the first time and is supposed to be used
3543 * or the user has started activation through sysfs.
3545 int dasd_generic_set_online(struct ccw_device
*cdev
,
3546 struct dasd_discipline
*base_discipline
)
3548 struct dasd_discipline
*discipline
;
3549 struct dasd_device
*device
;
3552 /* first online clears initial online feature flag */
3553 dasd_set_feature(cdev
, DASD_FEATURE_INITIAL_ONLINE
, 0);
3554 device
= dasd_create_device(cdev
);
3556 return PTR_ERR(device
);
3558 discipline
= base_discipline
;
3559 if (device
->features
& DASD_FEATURE_USEDIAG
) {
3560 if (!dasd_diag_discipline_pointer
) {
3561 /* Try to load the required module. */
3562 rc
= request_module(DASD_DIAG_MOD
);
3564 pr_warn("%s Setting the DASD online failed "
3565 "because the required module %s "
3566 "could not be loaded (rc=%d)\n",
3567 dev_name(&cdev
->dev
), DASD_DIAG_MOD
,
3569 dasd_delete_device(device
);
3573 /* Module init could have failed, so check again here after
3574 * request_module(). */
3575 if (!dasd_diag_discipline_pointer
) {
3576 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
3577 dev_name(&cdev
->dev
));
3578 dasd_delete_device(device
);
3581 discipline
= dasd_diag_discipline_pointer
;
3583 if (!try_module_get(base_discipline
->owner
)) {
3584 dasd_delete_device(device
);
3587 if (!try_module_get(discipline
->owner
)) {
3588 module_put(base_discipline
->owner
);
3589 dasd_delete_device(device
);
3592 device
->base_discipline
= base_discipline
;
3593 device
->discipline
= discipline
;
3595 /* check_device will allocate block device if necessary */
3596 rc
= discipline
->check_device(device
);
3598 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
3599 dev_name(&cdev
->dev
), discipline
->name
, rc
);
3600 module_put(discipline
->owner
);
3601 module_put(base_discipline
->owner
);
3602 dasd_delete_device(device
);
3606 dasd_set_target_state(device
, DASD_STATE_ONLINE
);
3607 if (device
->state
<= DASD_STATE_KNOWN
) {
3608 pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
3609 dev_name(&cdev
->dev
));
3611 dasd_set_target_state(device
, DASD_STATE_NEW
);
3613 dasd_free_block(device
->block
);
3614 dasd_delete_device(device
);
3616 pr_debug("dasd_generic device %s found\n",
3617 dev_name(&cdev
->dev
));
3619 wait_event(dasd_init_waitq
, _wait_for_device(device
));
3621 dasd_put_device(device
);
3624 EXPORT_SYMBOL_GPL(dasd_generic_set_online
);
3626 int dasd_generic_set_offline(struct ccw_device
*cdev
)
3628 struct dasd_device
*device
;
3629 struct dasd_block
*block
;
3630 int max_count
, open_count
, rc
;
3631 unsigned long flags
;
3634 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
3635 device
= dasd_device_from_cdev_locked(cdev
);
3636 if (IS_ERR(device
)) {
3637 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
3638 return PTR_ERR(device
);
3642 * We must make sure that this device is currently not in use.
3643 * The open_count is increased for every opener, that includes
3644 * the blkdev_get in dasd_scan_partitions. We are only interested
3645 * in the other openers.
3647 if (device
->block
) {
3648 max_count
= device
->block
->bdev
? 0 : -1;
3649 open_count
= atomic_read(&device
->block
->open_count
);
3650 if (open_count
> max_count
) {
3652 pr_warn("%s: The DASD cannot be set offline with open count %i\n",
3653 dev_name(&cdev
->dev
), open_count
);
3655 pr_warn("%s: The DASD cannot be set offline while it is in use\n",
3656 dev_name(&cdev
->dev
));
3663 * Test if the offline processing is already running and exit if so.
3664 * If a safe offline is being processed this could only be a normal
3665 * offline that should be able to overtake the safe offline and
3666 * cancel any I/O we do not want to wait for any longer
3668 if (test_bit(DASD_FLAG_OFFLINE
, &device
->flags
)) {
3669 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING
, &device
->flags
)) {
3670 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING
,
3677 set_bit(DASD_FLAG_OFFLINE
, &device
->flags
);
3680 * if safe_offline is called set safe_offline_running flag and
3681 * clear safe_offline so that a call to normal offline
3682 * can overrun safe_offline processing
3684 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE
, &device
->flags
) &&
3685 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING
, &device
->flags
)) {
3686 /* need to unlock here to wait for outstanding I/O */
3687 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
3689 * If we want to set the device safe offline all IO operations
3690 * should be finished before continuing the offline process
3691 * so sync bdev first and then wait for our queues to become
3694 if (device
->block
) {
3695 rc
= fsync_bdev(device
->block
->bdev
);
3699 dasd_schedule_device_bh(device
);
3700 rc
= wait_event_interruptible(shutdown_waitq
,
3701 _wait_for_empty_queues(device
));
3706 * check if a normal offline process overtook the offline
3707 * processing in this case simply do nothing beside returning
3708 * that we got interrupted
3709 * otherwise mark safe offline as not running any longer and
3710 * continue with normal offline
3712 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
3713 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING
, &device
->flags
)) {
3717 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING
, &device
->flags
);
3719 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
3721 dasd_set_target_state(device
, DASD_STATE_NEW
);
3722 /* dasd_delete_device destroys the device reference. */
3723 block
= device
->block
;
3724 dasd_delete_device(device
);
3726 * life cycle of block is bound to device, so delete it after
3727 * device was safely removed
3730 dasd_free_block(block
);
3735 /* interrupted by signal */
3736 spin_lock_irqsave(get_ccwdev_lock(cdev
), flags
);
3737 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING
, &device
->flags
);
3738 clear_bit(DASD_FLAG_OFFLINE
, &device
->flags
);
3740 dasd_put_device(device
);
3741 spin_unlock_irqrestore(get_ccwdev_lock(cdev
), flags
);
3744 EXPORT_SYMBOL_GPL(dasd_generic_set_offline
);
3746 int dasd_generic_last_path_gone(struct dasd_device
*device
)
3748 struct dasd_ccw_req
*cqr
;
3750 dev_warn(&device
->cdev
->dev
, "No operational channel path is left "
3751 "for the device\n");
3752 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s", "last path gone");
3753 /* First of all call extended error reporting. */
3754 dasd_eer_write(device
, NULL
, DASD_EER_NOPATH
);
3756 if (device
->state
< DASD_STATE_BASIC
)
3758 /* Device is active. We want to keep it. */
3759 list_for_each_entry(cqr
, &device
->ccw_queue
, devlist
)
3760 if ((cqr
->status
== DASD_CQR_IN_IO
) ||
3761 (cqr
->status
== DASD_CQR_CLEAR_PENDING
)) {
3762 cqr
->status
= DASD_CQR_QUEUED
;
3765 dasd_device_set_stop_bits(device
, DASD_STOPPED_DC_WAIT
);
3766 dasd_device_clear_timer(device
);
3767 dasd_schedule_device_bh(device
);
3770 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone
);
3772 int dasd_generic_path_operational(struct dasd_device
*device
)
3774 dev_info(&device
->cdev
->dev
, "A channel path to the device has become "
3776 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s", "path operational");
3777 dasd_device_remove_stop_bits(device
, DASD_STOPPED_DC_WAIT
);
3778 dasd_schedule_device_bh(device
);
3779 if (device
->block
) {
3780 dasd_schedule_block_bh(device
->block
);
3781 if (device
->block
->request_queue
)
3782 blk_mq_run_hw_queues(device
->block
->request_queue
,
3786 if (!device
->stopped
)
3787 wake_up(&generic_waitq
);
3791 EXPORT_SYMBOL_GPL(dasd_generic_path_operational
);
3793 int dasd_generic_notify(struct ccw_device
*cdev
, int event
)
3795 struct dasd_device
*device
;
3798 device
= dasd_device_from_cdev_locked(cdev
);
3806 dasd_path_no_path(device
);
3807 ret
= dasd_generic_last_path_gone(device
);
3811 if (dasd_path_get_opm(device
))
3812 ret
= dasd_generic_path_operational(device
);
3815 dasd_put_device(device
);
3818 EXPORT_SYMBOL_GPL(dasd_generic_notify
);
3820 void dasd_generic_path_event(struct ccw_device
*cdev
, int *path_event
)
3822 struct dasd_device
*device
;
3823 int chp
, oldopm
, hpfpm
, ifccpm
;
3825 device
= dasd_device_from_cdev_locked(cdev
);
3829 oldopm
= dasd_path_get_opm(device
);
3830 for (chp
= 0; chp
< 8; chp
++) {
3831 if (path_event
[chp
] & PE_PATH_GONE
) {
3832 dasd_path_notoper(device
, chp
);
3834 if (path_event
[chp
] & PE_PATH_AVAILABLE
) {
3835 dasd_path_available(device
, chp
);
3836 dasd_schedule_device_bh(device
);
3838 if (path_event
[chp
] & PE_PATHGROUP_ESTABLISHED
) {
3839 if (!dasd_path_is_operational(device
, chp
) &&
3840 !dasd_path_need_verify(device
, chp
)) {
3842 * we can not establish a pathgroup on an
3843 * unavailable path, so trigger a path
3844 * verification first
3846 dasd_path_available(device
, chp
);
3847 dasd_schedule_device_bh(device
);
3849 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
3850 "Pathgroup re-established\n");
3851 if (device
->discipline
->kick_validate
)
3852 device
->discipline
->kick_validate(device
);
3854 if (path_event
[chp
] & PE_PATH_FCES_EVENT
) {
3855 dasd_path_fcsec_update(device
, chp
);
3856 dasd_schedule_device_bh(device
);
3859 hpfpm
= dasd_path_get_hpfpm(device
);
3860 ifccpm
= dasd_path_get_ifccpm(device
);
3861 if (!dasd_path_get_opm(device
) && hpfpm
) {
3863 * device has no operational paths but at least one path is
3864 * disabled due to HPF errors
3865 * disable HPF at all and use the path(s) again
3867 if (device
->discipline
->disable_hpf
)
3868 device
->discipline
->disable_hpf(device
);
3869 dasd_device_set_stop_bits(device
, DASD_STOPPED_NOT_ACC
);
3870 dasd_path_set_tbvpm(device
, hpfpm
);
3871 dasd_schedule_device_bh(device
);
3872 dasd_schedule_requeue(device
);
3873 } else if (!dasd_path_get_opm(device
) && ifccpm
) {
3875 * device has no operational paths but at least one path is
3876 * disabled due to IFCC errors
3877 * trigger path verification on paths with IFCC errors
3879 dasd_path_set_tbvpm(device
, ifccpm
);
3880 dasd_schedule_device_bh(device
);
3882 if (oldopm
&& !dasd_path_get_opm(device
) && !hpfpm
&& !ifccpm
) {
3883 dev_warn(&device
->cdev
->dev
,
3884 "No verified channel paths remain for the device\n");
3885 DBF_DEV_EVENT(DBF_WARNING
, device
,
3886 "%s", "last verified path gone");
3887 dasd_eer_write(device
, NULL
, DASD_EER_NOPATH
);
3888 dasd_device_set_stop_bits(device
,
3889 DASD_STOPPED_DC_WAIT
);
3891 dasd_put_device(device
);
3893 EXPORT_SYMBOL_GPL(dasd_generic_path_event
);
3895 int dasd_generic_verify_path(struct dasd_device
*device
, __u8 lpm
)
3897 if (!dasd_path_get_opm(device
) && lpm
) {
3898 dasd_path_set_opm(device
, lpm
);
3899 dasd_generic_path_operational(device
);
3901 dasd_path_add_opm(device
, lpm
);
3904 EXPORT_SYMBOL_GPL(dasd_generic_verify_path
);
3906 void dasd_generic_space_exhaust(struct dasd_device
*device
,
3907 struct dasd_ccw_req
*cqr
)
3909 dasd_eer_write(device
, NULL
, DASD_EER_NOSPC
);
3911 if (device
->state
< DASD_STATE_BASIC
)
3914 if (cqr
->status
== DASD_CQR_IN_IO
||
3915 cqr
->status
== DASD_CQR_CLEAR_PENDING
) {
3916 cqr
->status
= DASD_CQR_QUEUED
;
3919 dasd_device_set_stop_bits(device
, DASD_STOPPED_NOSPC
);
3920 dasd_device_clear_timer(device
);
3921 dasd_schedule_device_bh(device
);
3923 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust
);
3925 void dasd_generic_space_avail(struct dasd_device
*device
)
3927 dev_info(&device
->cdev
->dev
, "Extent pool space is available\n");
3928 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s", "space available");
3930 dasd_device_remove_stop_bits(device
, DASD_STOPPED_NOSPC
);
3931 dasd_schedule_device_bh(device
);
3933 if (device
->block
) {
3934 dasd_schedule_block_bh(device
->block
);
3935 if (device
->block
->request_queue
)
3936 blk_mq_run_hw_queues(device
->block
->request_queue
, true);
3938 if (!device
->stopped
)
3939 wake_up(&generic_waitq
);
3941 EXPORT_SYMBOL_GPL(dasd_generic_space_avail
);
3944 * clear active requests and requeue them to block layer if possible
3946 static int dasd_generic_requeue_all_requests(struct dasd_device
*device
)
3948 struct list_head requeue_queue
;
3949 struct dasd_ccw_req
*cqr
, *n
;
3950 struct dasd_ccw_req
*refers
;
3953 INIT_LIST_HEAD(&requeue_queue
);
3954 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
3956 list_for_each_entry_safe(cqr
, n
, &device
->ccw_queue
, devlist
) {
3957 /* Check status and move request to flush_queue */
3958 if (cqr
->status
== DASD_CQR_IN_IO
) {
3959 rc
= device
->discipline
->term_IO(cqr
);
3961 /* unable to terminate requeust */
3962 dev_err(&device
->cdev
->dev
,
3963 "Unable to terminate request %p "
3964 "on suspend\n", cqr
);
3965 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
3966 dasd_put_device(device
);
3970 list_move_tail(&cqr
->devlist
, &requeue_queue
);
3972 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
3974 list_for_each_entry_safe(cqr
, n
, &requeue_queue
, devlist
) {
3975 wait_event(dasd_flush_wq
,
3976 (cqr
->status
!= DASD_CQR_CLEAR_PENDING
));
3979 * requeue requests to blocklayer will only work
3980 * for block device requests
3982 if (_dasd_requeue_request(cqr
))
3985 /* remove requests from device and block queue */
3986 list_del_init(&cqr
->devlist
);
3987 while (cqr
->refers
!= NULL
) {
3988 refers
= cqr
->refers
;
3989 /* remove the request from the block queue */
3990 list_del(&cqr
->blocklist
);
3991 /* free the finished erp request */
3992 dasd_free_erp_request(cqr
, cqr
->memdev
);
3997 * _dasd_requeue_request already checked for a valid
3998 * blockdevice, no need to check again
3999 * all erp requests (cqr->refers) have a cqr->block
4000 * pointer copy from the original cqr
4002 list_del_init(&cqr
->blocklist
);
4003 cqr
->block
->base
->discipline
->free_cp(
4004 cqr
, (struct request
*) cqr
->callback_data
);
4008 * if requests remain then they are internal request
4009 * and go back to the device queue
4011 if (!list_empty(&requeue_queue
)) {
4012 /* move freeze_queue to start of the ccw_queue */
4013 spin_lock_irq(get_ccwdev_lock(device
->cdev
));
4014 list_splice_tail(&requeue_queue
, &device
->ccw_queue
);
4015 spin_unlock_irq(get_ccwdev_lock(device
->cdev
));
4017 dasd_schedule_device_bh(device
);
4021 static void do_requeue_requests(struct work_struct
*work
)
4023 struct dasd_device
*device
= container_of(work
, struct dasd_device
,
4025 dasd_generic_requeue_all_requests(device
);
4026 dasd_device_remove_stop_bits(device
, DASD_STOPPED_NOT_ACC
);
4028 dasd_schedule_block_bh(device
->block
);
4029 dasd_put_device(device
);
4032 void dasd_schedule_requeue(struct dasd_device
*device
)
4034 dasd_get_device(device
);
4035 /* queue call to dasd_reload_device to the kernel event daemon. */
4036 if (!schedule_work(&device
->requeue_requests
))
4037 dasd_put_device(device
);
4039 EXPORT_SYMBOL(dasd_schedule_requeue
);
4041 static struct dasd_ccw_req
*dasd_generic_build_rdc(struct dasd_device
*device
,
4042 int rdc_buffer_size
,
4045 struct dasd_ccw_req
*cqr
;
4048 cqr
= dasd_smalloc_request(magic
, 1 /* RDC */, rdc_buffer_size
, device
,
4052 /* internal error 13 - Allocating the RDC request failed*/
4053 dev_err(&device
->cdev
->dev
,
4054 "An error occurred in the DASD device driver, "
4055 "reason=%s\n", "13");
4060 ccw
->cmd_code
= CCW_CMD_RDC
;
4061 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
4063 ccw
->count
= rdc_buffer_size
;
4064 cqr
->startdev
= device
;
4065 cqr
->memdev
= device
;
4066 cqr
->expires
= 10*HZ
;
4068 cqr
->buildclk
= get_tod_clock();
4069 cqr
->status
= DASD_CQR_FILLED
;
4074 int dasd_generic_read_dev_chars(struct dasd_device
*device
, int magic
,
4075 void *rdc_buffer
, int rdc_buffer_size
)
4078 struct dasd_ccw_req
*cqr
;
4080 cqr
= dasd_generic_build_rdc(device
, rdc_buffer_size
, magic
);
4082 return PTR_ERR(cqr
);
4084 ret
= dasd_sleep_on(cqr
);
4086 memcpy(rdc_buffer
, cqr
->data
, rdc_buffer_size
);
4087 dasd_sfree_request(cqr
, cqr
->memdev
);
4090 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars
);
4093 * In command mode and transport mode we need to look for sense
4094 * data in different places. The sense data itself is allways
4095 * an array of 32 bytes, so we can unify the sense data access
4098 char *dasd_get_sense(struct irb
*irb
)
4100 struct tsb
*tsb
= NULL
;
4103 if (scsw_is_tm(&irb
->scsw
) && (irb
->scsw
.tm
.fcxs
== 0x01)) {
4104 if (irb
->scsw
.tm
.tcw
)
4105 tsb
= tcw_get_tsb((struct tcw
*)(unsigned long)
4107 if (tsb
&& tsb
->length
== 64 && tsb
->flags
)
4108 switch (tsb
->flags
& 0x07) {
4109 case 1: /* tsa_iostat */
4110 sense
= tsb
->tsa
.iostat
.sense
;
4112 case 2: /* tsa_ddpc */
4113 sense
= tsb
->tsa
.ddpc
.sense
;
4116 /* currently we don't use interrogate data */
4119 } else if (irb
->esw
.esw0
.erw
.cons
) {
4124 EXPORT_SYMBOL_GPL(dasd_get_sense
);
4126 void dasd_generic_shutdown(struct ccw_device
*cdev
)
4128 struct dasd_device
*device
;
4130 device
= dasd_device_from_cdev(cdev
);
4135 dasd_schedule_block_bh(device
->block
);
4137 dasd_schedule_device_bh(device
);
4139 wait_event(shutdown_waitq
, _wait_for_empty_queues(device
));
4141 EXPORT_SYMBOL_GPL(dasd_generic_shutdown
);
4143 static int __init
dasd_init(void)
4147 init_waitqueue_head(&dasd_init_waitq
);
4148 init_waitqueue_head(&dasd_flush_wq
);
4149 init_waitqueue_head(&generic_waitq
);
4150 init_waitqueue_head(&shutdown_waitq
);
4152 /* register 'common' DASD debug area, used for all DBF_XXX calls */
4153 dasd_debug_area
= debug_register("dasd", 1, 1, 8 * sizeof(long));
4154 if (dasd_debug_area
== NULL
) {
4158 debug_register_view(dasd_debug_area
, &debug_sprintf_view
);
4159 debug_set_level(dasd_debug_area
, DBF_WARNING
);
4161 DBF_EVENT(DBF_EMERG
, "%s", "debug area created");
4163 dasd_diag_discipline_pointer
= NULL
;
4165 dasd_statistics_createroot();
4167 rc
= dasd_devmap_init();
4170 rc
= dasd_gendisk_init();
4176 rc
= dasd_eer_init();
4179 #ifdef CONFIG_PROC_FS
4180 rc
= dasd_proc_init();
4187 pr_info("The DASD device driver could not be initialized\n");
4192 module_init(dasd_init
);
4193 module_exit(dasd_exit
);