2 * PAV alias management for the DASD ECKD discipline
4 * Copyright IBM Corporation, 2007
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
8 #define KMSG_COMPONENT "dasd-eckd"
10 #include <linux/list.h>
11 #include <asm/ebcdic.h>
13 #include "dasd_eckd.h"
17 #endif /* PRINTK_HEADER */
18 #define PRINTK_HEADER "dasd(eckd):"
22 * General concept of alias management:
23 * - PAV and DASD alias management is specific to the eckd discipline.
24 * - A device is connected to an lcu as long as the device exists.
25 * dasd_alias_make_device_known_to_lcu will be called wenn the
26 * device is checked by the eckd discipline and
27 * dasd_alias_disconnect_device_from_lcu will be called
28 * before the device is deleted.
29 * - The dasd_alias_add_device / dasd_alias_remove_device
30 * functions mark the point when a device is 'ready for service'.
31 * - A summary unit check is a rare occasion, but it is mandatory to
32 * support it. It requires some complex recovery actions before the
33 * devices can be used again (see dasd_alias_handle_summary_unit_check).
34 * - dasd_alias_get_start_dev will find an alias device that can be used
35 * instead of the base device and does some (very simple) load balancing.
36 * This is the function that gets called for each I/O, so when improving
37 * something, this function should get faster or better, the rest has just
42 static void summary_unit_check_handling_work(struct work_struct
*);
43 static void lcu_update_work(struct work_struct
*);
44 static int _schedule_lcu_update(struct alias_lcu
*, struct dasd_device
*);
46 static struct alias_root aliastree
= {
47 .serverlist
= LIST_HEAD_INIT(aliastree
.serverlist
),
48 .lock
= __SPIN_LOCK_UNLOCKED(aliastree
.lock
),
51 static struct alias_server
*_find_server(struct dasd_uid
*uid
)
53 struct alias_server
*pos
;
54 list_for_each_entry(pos
, &aliastree
.serverlist
, server
) {
55 if (!strncmp(pos
->uid
.vendor
, uid
->vendor
,
57 && !strncmp(pos
->uid
.serial
, uid
->serial
,
64 static struct alias_lcu
*_find_lcu(struct alias_server
*server
,
67 struct alias_lcu
*pos
;
68 list_for_each_entry(pos
, &server
->lculist
, lcu
) {
69 if (pos
->uid
.ssid
== uid
->ssid
)
75 static struct alias_pav_group
*_find_group(struct alias_lcu
*lcu
,
78 struct alias_pav_group
*pos
;
79 __u8 search_unit_addr
;
81 /* for hyper pav there is only one group */
82 if (lcu
->pav
== HYPER_PAV
) {
83 if (list_empty(&lcu
->grouplist
))
86 return list_first_entry(&lcu
->grouplist
,
87 struct alias_pav_group
, group
);
90 /* for base pav we have to find the group that matches the base */
91 if (uid
->type
== UA_BASE_DEVICE
)
92 search_unit_addr
= uid
->real_unit_addr
;
94 search_unit_addr
= uid
->base_unit_addr
;
95 list_for_each_entry(pos
, &lcu
->grouplist
, group
) {
96 if (pos
->uid
.base_unit_addr
== search_unit_addr
&&
97 !strncmp(pos
->uid
.vduit
, uid
->vduit
, sizeof(uid
->vduit
)))
103 static struct alias_server
*_allocate_server(struct dasd_uid
*uid
)
105 struct alias_server
*server
;
107 server
= kzalloc(sizeof(*server
), GFP_KERNEL
);
109 return ERR_PTR(-ENOMEM
);
110 memcpy(server
->uid
.vendor
, uid
->vendor
, sizeof(uid
->vendor
));
111 memcpy(server
->uid
.serial
, uid
->serial
, sizeof(uid
->serial
));
112 INIT_LIST_HEAD(&server
->server
);
113 INIT_LIST_HEAD(&server
->lculist
);
117 static void _free_server(struct alias_server
*server
)
122 static struct alias_lcu
*_allocate_lcu(struct dasd_uid
*uid
)
124 struct alias_lcu
*lcu
;
126 lcu
= kzalloc(sizeof(*lcu
), GFP_KERNEL
);
128 return ERR_PTR(-ENOMEM
);
129 lcu
->uac
= kzalloc(sizeof(*(lcu
->uac
)), GFP_KERNEL
| GFP_DMA
);
132 lcu
->rsu_cqr
= kzalloc(sizeof(*lcu
->rsu_cqr
), GFP_KERNEL
| GFP_DMA
);
135 lcu
->rsu_cqr
->cpaddr
= kzalloc(sizeof(struct ccw1
),
136 GFP_KERNEL
| GFP_DMA
);
137 if (!lcu
->rsu_cqr
->cpaddr
)
139 lcu
->rsu_cqr
->data
= kzalloc(16, GFP_KERNEL
| GFP_DMA
);
140 if (!lcu
->rsu_cqr
->data
)
143 memcpy(lcu
->uid
.vendor
, uid
->vendor
, sizeof(uid
->vendor
));
144 memcpy(lcu
->uid
.serial
, uid
->serial
, sizeof(uid
->serial
));
145 lcu
->uid
.ssid
= uid
->ssid
;
147 lcu
->flags
= NEED_UAC_UPDATE
| UPDATE_PENDING
;
148 INIT_LIST_HEAD(&lcu
->lcu
);
149 INIT_LIST_HEAD(&lcu
->inactive_devices
);
150 INIT_LIST_HEAD(&lcu
->active_devices
);
151 INIT_LIST_HEAD(&lcu
->grouplist
);
152 INIT_WORK(&lcu
->suc_data
.worker
, summary_unit_check_handling_work
);
153 INIT_DELAYED_WORK(&lcu
->ruac_data
.dwork
, lcu_update_work
);
154 spin_lock_init(&lcu
->lock
);
158 kfree(lcu
->rsu_cqr
->cpaddr
);
165 return ERR_PTR(-ENOMEM
);
168 static void _free_lcu(struct alias_lcu
*lcu
)
170 kfree(lcu
->rsu_cqr
->data
);
171 kfree(lcu
->rsu_cqr
->cpaddr
);
178 * This is the function that will allocate all the server and lcu data,
179 * so this function must be called first for a new device.
180 * If the return value is 1, the lcu was already known before, if it
181 * is 0, this is a new lcu.
182 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
184 int dasd_alias_make_device_known_to_lcu(struct dasd_device
*device
)
186 struct dasd_eckd_private
*private;
188 struct alias_server
*server
, *newserver
;
189 struct alias_lcu
*lcu
, *newlcu
;
191 struct dasd_uid
*uid
;
193 private = (struct dasd_eckd_private
*) device
->private;
195 spin_lock_irqsave(&aliastree
.lock
, flags
);
197 server
= _find_server(uid
);
199 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
200 newserver
= _allocate_server(uid
);
201 if (IS_ERR(newserver
))
202 return PTR_ERR(newserver
);
203 spin_lock_irqsave(&aliastree
.lock
, flags
);
204 server
= _find_server(uid
);
206 list_add(&newserver
->server
, &aliastree
.serverlist
);
210 /* someone was faster */
211 _free_server(newserver
);
215 lcu
= _find_lcu(server
, uid
);
217 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
218 newlcu
= _allocate_lcu(uid
);
221 spin_lock_irqsave(&aliastree
.lock
, flags
);
222 lcu
= _find_lcu(server
, uid
);
224 list_add(&newlcu
->lcu
, &server
->lculist
);
228 /* someone was faster */
233 spin_lock(&lcu
->lock
);
234 list_add(&device
->alias_list
, &lcu
->inactive_devices
);
236 spin_unlock(&lcu
->lock
);
237 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
243 * This function removes a device from the scope of alias management.
244 * The complicated part is to make sure that it is not in use by
245 * any of the workers. If necessary cancel the work.
247 void dasd_alias_disconnect_device_from_lcu(struct dasd_device
*device
)
249 struct dasd_eckd_private
*private;
251 struct alias_lcu
*lcu
;
252 struct alias_server
*server
;
255 private = (struct dasd_eckd_private
*) device
->private;
257 spin_lock_irqsave(&lcu
->lock
, flags
);
258 list_del_init(&device
->alias_list
);
259 /* make sure that the workers don't use this device */
260 if (device
== lcu
->suc_data
.device
) {
261 spin_unlock_irqrestore(&lcu
->lock
, flags
);
262 cancel_work_sync(&lcu
->suc_data
.worker
);
263 spin_lock_irqsave(&lcu
->lock
, flags
);
264 if (device
== lcu
->suc_data
.device
)
265 lcu
->suc_data
.device
= NULL
;
268 if (device
== lcu
->ruac_data
.device
) {
269 spin_unlock_irqrestore(&lcu
->lock
, flags
);
271 cancel_delayed_work_sync(&lcu
->ruac_data
.dwork
);
272 spin_lock_irqsave(&lcu
->lock
, flags
);
273 if (device
== lcu
->ruac_data
.device
)
274 lcu
->ruac_data
.device
= NULL
;
277 spin_unlock_irqrestore(&lcu
->lock
, flags
);
279 spin_lock_irqsave(&aliastree
.lock
, flags
);
280 spin_lock(&lcu
->lock
);
281 if (list_empty(&lcu
->grouplist
) &&
282 list_empty(&lcu
->active_devices
) &&
283 list_empty(&lcu
->inactive_devices
)) {
285 spin_unlock(&lcu
->lock
);
290 _schedule_lcu_update(lcu
, NULL
);
291 spin_unlock(&lcu
->lock
);
293 server
= _find_server(&private->uid
);
294 if (server
&& list_empty(&server
->lculist
)) {
295 list_del(&server
->server
);
296 _free_server(server
);
298 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
302 * This function assumes that the unit address configuration stored
303 * in the lcu is up to date and will update the device uid before
304 * adding it to a pav group.
306 static int _add_device_to_lcu(struct alias_lcu
*lcu
,
307 struct dasd_device
*device
)
310 struct dasd_eckd_private
*private;
311 struct alias_pav_group
*group
;
312 struct dasd_uid
*uid
;
314 private = (struct dasd_eckd_private
*) device
->private;
316 uid
->type
= lcu
->uac
->unit
[uid
->real_unit_addr
].ua_type
;
317 uid
->base_unit_addr
= lcu
->uac
->unit
[uid
->real_unit_addr
].base_ua
;
318 dasd_set_uid(device
->cdev
, &private->uid
);
320 /* if we have no PAV anyway, we don't need to bother with PAV groups */
321 if (lcu
->pav
== NO_PAV
) {
322 list_move(&device
->alias_list
, &lcu
->active_devices
);
326 group
= _find_group(lcu
, uid
);
328 group
= kzalloc(sizeof(*group
), GFP_ATOMIC
);
331 memcpy(group
->uid
.vendor
, uid
->vendor
, sizeof(uid
->vendor
));
332 memcpy(group
->uid
.serial
, uid
->serial
, sizeof(uid
->serial
));
333 group
->uid
.ssid
= uid
->ssid
;
334 if (uid
->type
== UA_BASE_DEVICE
)
335 group
->uid
.base_unit_addr
= uid
->real_unit_addr
;
337 group
->uid
.base_unit_addr
= uid
->base_unit_addr
;
338 memcpy(group
->uid
.vduit
, uid
->vduit
, sizeof(uid
->vduit
));
339 INIT_LIST_HEAD(&group
->group
);
340 INIT_LIST_HEAD(&group
->baselist
);
341 INIT_LIST_HEAD(&group
->aliaslist
);
342 list_add(&group
->group
, &lcu
->grouplist
);
344 if (uid
->type
== UA_BASE_DEVICE
)
345 list_move(&device
->alias_list
, &group
->baselist
);
347 list_move(&device
->alias_list
, &group
->aliaslist
);
348 private->pavgroup
= group
;
352 static void _remove_device_from_lcu(struct alias_lcu
*lcu
,
353 struct dasd_device
*device
)
355 struct dasd_eckd_private
*private;
356 struct alias_pav_group
*group
;
358 private = (struct dasd_eckd_private
*) device
->private;
359 list_move(&device
->alias_list
, &lcu
->inactive_devices
);
360 group
= private->pavgroup
;
363 private->pavgroup
= NULL
;
364 if (list_empty(&group
->baselist
) && list_empty(&group
->aliaslist
)) {
365 list_del(&group
->group
);
369 if (group
->next
== device
)
373 static int read_unit_address_configuration(struct dasd_device
*device
,
374 struct alias_lcu
*lcu
)
376 struct dasd_psf_prssd_data
*prssdp
;
377 struct dasd_ccw_req
*cqr
;
382 cqr
= dasd_kmalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
383 (sizeof(struct dasd_psf_prssd_data
)),
387 cqr
->startdev
= device
;
388 cqr
->memdev
= device
;
389 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
391 cqr
->expires
= 20 * HZ
;
393 /* Prepare for Read Subsystem Data */
394 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
395 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
396 prssdp
->order
= PSF_ORDER_PRSSD
;
397 prssdp
->suborder
= 0x0e; /* Read unit address configuration */
398 /* all other bytes of prssdp must be zero */
401 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
402 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
403 ccw
->flags
|= CCW_FLAG_CC
;
404 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
406 /* Read Subsystem Data - feature codes */
407 memset(lcu
->uac
, 0, sizeof(*(lcu
->uac
)));
410 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
411 ccw
->count
= sizeof(*(lcu
->uac
));
412 ccw
->cda
= (__u32
)(addr_t
) lcu
->uac
;
414 cqr
->buildclk
= get_clock();
415 cqr
->status
= DASD_CQR_FILLED
;
417 /* need to unset flag here to detect race with summary unit check */
418 spin_lock_irqsave(&lcu
->lock
, flags
);
419 lcu
->flags
&= ~NEED_UAC_UPDATE
;
420 spin_unlock_irqrestore(&lcu
->lock
, flags
);
423 rc
= dasd_sleep_on(cqr
);
424 } while (rc
&& (cqr
->retries
> 0));
426 spin_lock_irqsave(&lcu
->lock
, flags
);
427 lcu
->flags
|= NEED_UAC_UPDATE
;
428 spin_unlock_irqrestore(&lcu
->lock
, flags
);
430 dasd_kfree_request(cqr
, cqr
->memdev
);
434 static int _lcu_update(struct dasd_device
*refdev
, struct alias_lcu
*lcu
)
437 struct alias_pav_group
*pavgroup
, *tempgroup
;
438 struct dasd_device
*device
, *tempdev
;
440 struct dasd_eckd_private
*private;
442 spin_lock_irqsave(&lcu
->lock
, flags
);
443 list_for_each_entry_safe(pavgroup
, tempgroup
, &lcu
->grouplist
, group
) {
444 list_for_each_entry_safe(device
, tempdev
, &pavgroup
->baselist
,
446 list_move(&device
->alias_list
, &lcu
->active_devices
);
447 private = (struct dasd_eckd_private
*) device
->private;
448 private->pavgroup
= NULL
;
450 list_for_each_entry_safe(device
, tempdev
, &pavgroup
->aliaslist
,
452 list_move(&device
->alias_list
, &lcu
->active_devices
);
453 private = (struct dasd_eckd_private
*) device
->private;
454 private->pavgroup
= NULL
;
456 list_del(&pavgroup
->group
);
459 spin_unlock_irqrestore(&lcu
->lock
, flags
);
461 rc
= read_unit_address_configuration(refdev
, lcu
);
465 spin_lock_irqsave(&lcu
->lock
, flags
);
467 for (i
= 0; i
< MAX_DEVICES_PER_LCU
; ++i
) {
468 switch (lcu
->uac
->unit
[i
].ua_type
) {
469 case UA_BASE_PAV_ALIAS
:
472 case UA_HYPER_PAV_ALIAS
:
473 lcu
->pav
= HYPER_PAV
;
476 if (lcu
->pav
!= NO_PAV
)
480 list_for_each_entry_safe(device
, tempdev
, &lcu
->active_devices
,
482 _add_device_to_lcu(lcu
, device
);
484 spin_unlock_irqrestore(&lcu
->lock
, flags
);
488 static void lcu_update_work(struct work_struct
*work
)
490 struct alias_lcu
*lcu
;
491 struct read_uac_work_data
*ruac_data
;
492 struct dasd_device
*device
;
496 ruac_data
= container_of(work
, struct read_uac_work_data
, dwork
.work
);
497 lcu
= container_of(ruac_data
, struct alias_lcu
, ruac_data
);
498 device
= ruac_data
->device
;
499 rc
= _lcu_update(device
, lcu
);
501 * Need to check flags again, as there could have been another
502 * prepare_update or a new device a new device while we were still
503 * processing the data
505 spin_lock_irqsave(&lcu
->lock
, flags
);
506 if (rc
|| (lcu
->flags
& NEED_UAC_UPDATE
)) {
507 DBF_DEV_EVENT(DBF_WARNING
, device
, "could not update"
508 " alias data in lcu (rc = %d), retry later", rc
);
509 schedule_delayed_work(&lcu
->ruac_data
.dwork
, 30*HZ
);
511 lcu
->ruac_data
.device
= NULL
;
512 lcu
->flags
&= ~UPDATE_PENDING
;
514 spin_unlock_irqrestore(&lcu
->lock
, flags
);
517 static int _schedule_lcu_update(struct alias_lcu
*lcu
,
518 struct dasd_device
*device
)
520 struct dasd_device
*usedev
= NULL
;
521 struct alias_pav_group
*group
;
523 lcu
->flags
|= NEED_UAC_UPDATE
;
524 if (lcu
->ruac_data
.device
) {
525 /* already scheduled or running */
528 if (device
&& !list_empty(&device
->alias_list
))
531 if (!usedev
&& !list_empty(&lcu
->grouplist
)) {
532 group
= list_first_entry(&lcu
->grouplist
,
533 struct alias_pav_group
, group
);
534 if (!list_empty(&group
->baselist
))
535 usedev
= list_first_entry(&group
->baselist
,
538 else if (!list_empty(&group
->aliaslist
))
539 usedev
= list_first_entry(&group
->aliaslist
,
543 if (!usedev
&& !list_empty(&lcu
->active_devices
)) {
544 usedev
= list_first_entry(&lcu
->active_devices
,
545 struct dasd_device
, alias_list
);
548 * if we haven't found a proper device yet, give up for now, the next
549 * device that will be set active will trigger an lcu update
553 lcu
->ruac_data
.device
= usedev
;
554 schedule_delayed_work(&lcu
->ruac_data
.dwork
, 0);
558 int dasd_alias_add_device(struct dasd_device
*device
)
560 struct dasd_eckd_private
*private;
561 struct alias_lcu
*lcu
;
565 private = (struct dasd_eckd_private
*) device
->private;
568 spin_lock_irqsave(&lcu
->lock
, flags
);
569 if (!(lcu
->flags
& UPDATE_PENDING
)) {
570 rc
= _add_device_to_lcu(lcu
, device
);
572 lcu
->flags
|= UPDATE_PENDING
;
574 if (lcu
->flags
& UPDATE_PENDING
) {
575 list_move(&device
->alias_list
, &lcu
->active_devices
);
576 _schedule_lcu_update(lcu
, device
);
578 spin_unlock_irqrestore(&lcu
->lock
, flags
);
582 int dasd_alias_remove_device(struct dasd_device
*device
)
584 struct dasd_eckd_private
*private;
585 struct alias_lcu
*lcu
;
588 private = (struct dasd_eckd_private
*) device
->private;
590 spin_lock_irqsave(&lcu
->lock
, flags
);
591 _remove_device_from_lcu(lcu
, device
);
592 spin_unlock_irqrestore(&lcu
->lock
, flags
);
596 struct dasd_device
*dasd_alias_get_start_dev(struct dasd_device
*base_device
)
599 struct dasd_device
*alias_device
;
600 struct alias_pav_group
*group
;
601 struct alias_lcu
*lcu
;
602 struct dasd_eckd_private
*private, *alias_priv
;
605 private = (struct dasd_eckd_private
*) base_device
->private;
606 group
= private->pavgroup
;
610 if (lcu
->pav
== NO_PAV
||
611 lcu
->flags
& (NEED_UAC_UPDATE
| UPDATE_PENDING
))
614 spin_lock_irqsave(&lcu
->lock
, flags
);
615 alias_device
= group
->next
;
617 if (list_empty(&group
->aliaslist
)) {
618 spin_unlock_irqrestore(&lcu
->lock
, flags
);
621 alias_device
= list_first_entry(&group
->aliaslist
,
626 if (list_is_last(&alias_device
->alias_list
, &group
->aliaslist
))
627 group
->next
= list_first_entry(&group
->aliaslist
,
628 struct dasd_device
, alias_list
);
630 group
->next
= list_first_entry(&alias_device
->alias_list
,
631 struct dasd_device
, alias_list
);
632 spin_unlock_irqrestore(&lcu
->lock
, flags
);
633 alias_priv
= (struct dasd_eckd_private
*) alias_device
->private;
634 if ((alias_priv
->count
< private->count
) && !alias_device
->stopped
)
641 * Summary unit check handling depends on the way alias devices
642 * are handled so it is done here rather then in dasd_eckd.c
644 static int reset_summary_unit_check(struct alias_lcu
*lcu
,
645 struct dasd_device
*device
,
648 struct dasd_ccw_req
*cqr
;
653 strncpy((char *) &cqr
->magic
, "ECKD", 4);
654 ASCEBC((char *) &cqr
->magic
, 4);
656 ccw
->cmd_code
= DASD_ECKD_CCW_RSCK
;
659 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
660 ((char *)cqr
->data
)[0] = reason
;
662 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
663 cqr
->retries
= 255; /* set retry counter to enable basic ERP */
664 cqr
->startdev
= device
;
665 cqr
->memdev
= device
;
667 cqr
->expires
= 5 * HZ
;
668 cqr
->buildclk
= get_clock();
669 cqr
->status
= DASD_CQR_FILLED
;
671 rc
= dasd_sleep_on_immediatly(cqr
);
675 static void _restart_all_base_devices_on_lcu(struct alias_lcu
*lcu
)
677 struct alias_pav_group
*pavgroup
;
678 struct dasd_device
*device
;
679 struct dasd_eckd_private
*private;
681 /* active and inactive list can contain alias as well as base devices */
682 list_for_each_entry(device
, &lcu
->active_devices
, alias_list
) {
683 private = (struct dasd_eckd_private
*) device
->private;
684 if (private->uid
.type
!= UA_BASE_DEVICE
)
686 dasd_schedule_block_bh(device
->block
);
687 dasd_schedule_device_bh(device
);
689 list_for_each_entry(device
, &lcu
->inactive_devices
, alias_list
) {
690 private = (struct dasd_eckd_private
*) device
->private;
691 if (private->uid
.type
!= UA_BASE_DEVICE
)
693 dasd_schedule_block_bh(device
->block
);
694 dasd_schedule_device_bh(device
);
696 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
697 list_for_each_entry(device
, &pavgroup
->baselist
, alias_list
) {
698 dasd_schedule_block_bh(device
->block
);
699 dasd_schedule_device_bh(device
);
704 static void flush_all_alias_devices_on_lcu(struct alias_lcu
*lcu
)
706 struct alias_pav_group
*pavgroup
;
707 struct dasd_device
*device
, *temp
;
708 struct dasd_eckd_private
*private;
714 * Problem here ist that dasd_flush_device_queue may wait
715 * for termination of a request to complete. We can't keep
716 * the lcu lock during that time, so we must assume that
717 * the lists may have changed.
718 * Idea: first gather all active alias devices in a separate list,
719 * then flush the first element of this list unlocked, and afterwards
720 * check if it is still on the list before moving it to the
721 * active_devices list.
724 spin_lock_irqsave(&lcu
->lock
, flags
);
725 list_for_each_entry_safe(device
, temp
, &lcu
->active_devices
,
727 private = (struct dasd_eckd_private
*) device
->private;
728 if (private->uid
.type
== UA_BASE_DEVICE
)
730 list_move(&device
->alias_list
, &active
);
733 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
734 list_splice_init(&pavgroup
->aliaslist
, &active
);
736 while (!list_empty(&active
)) {
737 device
= list_first_entry(&active
, struct dasd_device
,
739 spin_unlock_irqrestore(&lcu
->lock
, flags
);
740 rc
= dasd_flush_device_queue(device
);
741 spin_lock_irqsave(&lcu
->lock
, flags
);
743 * only move device around if it wasn't moved away while we
744 * were waiting for the flush
746 if (device
== list_first_entry(&active
,
747 struct dasd_device
, alias_list
))
748 list_move(&device
->alias_list
, &lcu
->active_devices
);
750 spin_unlock_irqrestore(&lcu
->lock
, flags
);
753 static void __stop_device_on_lcu(struct dasd_device
*device
,
754 struct dasd_device
*pos
)
756 /* If pos == device then device is already locked! */
758 pos
->stopped
|= DASD_STOPPED_SU
;
761 spin_lock(get_ccwdev_lock(pos
->cdev
));
762 pos
->stopped
|= DASD_STOPPED_SU
;
763 spin_unlock(get_ccwdev_lock(pos
->cdev
));
767 * This function is called in interrupt context, so the
768 * cdev lock for device is already locked!
770 static void _stop_all_devices_on_lcu(struct alias_lcu
*lcu
,
771 struct dasd_device
*device
)
773 struct alias_pav_group
*pavgroup
;
774 struct dasd_device
*pos
;
776 list_for_each_entry(pos
, &lcu
->active_devices
, alias_list
)
777 __stop_device_on_lcu(device
, pos
);
778 list_for_each_entry(pos
, &lcu
->inactive_devices
, alias_list
)
779 __stop_device_on_lcu(device
, pos
);
780 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
781 list_for_each_entry(pos
, &pavgroup
->baselist
, alias_list
)
782 __stop_device_on_lcu(device
, pos
);
783 list_for_each_entry(pos
, &pavgroup
->aliaslist
, alias_list
)
784 __stop_device_on_lcu(device
, pos
);
788 static void _unstop_all_devices_on_lcu(struct alias_lcu
*lcu
)
790 struct alias_pav_group
*pavgroup
;
791 struct dasd_device
*device
;
794 list_for_each_entry(device
, &lcu
->active_devices
, alias_list
) {
795 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
796 device
->stopped
&= ~DASD_STOPPED_SU
;
797 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
800 list_for_each_entry(device
, &lcu
->inactive_devices
, alias_list
) {
801 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
802 device
->stopped
&= ~DASD_STOPPED_SU
;
803 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
806 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
807 list_for_each_entry(device
, &pavgroup
->baselist
, alias_list
) {
808 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
809 device
->stopped
&= ~DASD_STOPPED_SU
;
810 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
),
813 list_for_each_entry(device
, &pavgroup
->aliaslist
, alias_list
) {
814 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
815 device
->stopped
&= ~DASD_STOPPED_SU
;
816 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
),
822 static void summary_unit_check_handling_work(struct work_struct
*work
)
824 struct alias_lcu
*lcu
;
825 struct summary_unit_check_work_data
*suc_data
;
827 struct dasd_device
*device
;
829 suc_data
= container_of(work
, struct summary_unit_check_work_data
,
831 lcu
= container_of(suc_data
, struct alias_lcu
, suc_data
);
832 device
= suc_data
->device
;
834 /* 1. flush alias devices */
835 flush_all_alias_devices_on_lcu(lcu
);
837 /* 2. reset summary unit check */
838 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
839 device
->stopped
&= ~(DASD_STOPPED_SU
| DASD_STOPPED_PENDING
);
840 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
841 reset_summary_unit_check(lcu
, device
, suc_data
->reason
);
843 spin_lock_irqsave(&lcu
->lock
, flags
);
844 _unstop_all_devices_on_lcu(lcu
);
845 _restart_all_base_devices_on_lcu(lcu
);
846 /* 3. read new alias configuration */
847 _schedule_lcu_update(lcu
, device
);
848 lcu
->suc_data
.device
= NULL
;
849 spin_unlock_irqrestore(&lcu
->lock
, flags
);
853 * note: this will be called from int handler context (cdev locked)
855 void dasd_alias_handle_summary_unit_check(struct dasd_device
*device
,
858 struct alias_lcu
*lcu
;
860 struct dasd_eckd_private
*private;
863 private = (struct dasd_eckd_private
*) device
->private;
865 sense
= dasd_get_sense(irb
);
868 DBF_DEV_EVENT(DBF_NOTICE
, device
, "%s %x",
869 "eckd handle summary unit check: reason", reason
);
871 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
872 "eckd handle summary unit check:"
873 " no reason code available");
879 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
880 "device not ready to handle summary"
881 " unit check (no lcu structure)");
884 spin_lock(&lcu
->lock
);
885 _stop_all_devices_on_lcu(lcu
, device
);
886 /* prepare for lcu_update */
887 private->lcu
->flags
|= NEED_UAC_UPDATE
| UPDATE_PENDING
;
888 /* If this device is about to be removed just return and wait for
889 * the next interrupt on a different device
891 if (list_empty(&device
->alias_list
)) {
892 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
893 "device is in offline processing,"
894 " don't do summary unit check handling");
895 spin_unlock(&lcu
->lock
);
898 if (lcu
->suc_data
.device
) {
899 /* already scheduled or running */
900 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
901 "previous instance of summary unit check worker"
903 spin_unlock(&lcu
->lock
);
906 lcu
->suc_data
.reason
= reason
;
907 lcu
->suc_data
.device
= device
;
908 spin_unlock(&lcu
->lock
);
909 schedule_work(&lcu
->suc_data
.worker
);