2 * PAV alias management for the DASD ECKD discipline
4 * Copyright IBM Corporation, 2007
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
8 #define KMSG_COMPONENT "dasd-eckd"
10 #include <linux/list.h>
11 #include <linux/slab.h>
12 #include <asm/ebcdic.h>
14 #include "dasd_eckd.h"
18 #endif /* PRINTK_HEADER */
19 #define PRINTK_HEADER "dasd(eckd):"
23 * General concept of alias management:
24 * - PAV and DASD alias management is specific to the eckd discipline.
25 * - A device is connected to an lcu as long as the device exists.
26 * dasd_alias_make_device_known_to_lcu will be called wenn the
27 * device is checked by the eckd discipline and
28 * dasd_alias_disconnect_device_from_lcu will be called
29 * before the device is deleted.
30 * - The dasd_alias_add_device / dasd_alias_remove_device
31 * functions mark the point when a device is 'ready for service'.
32 * - A summary unit check is a rare occasion, but it is mandatory to
33 * support it. It requires some complex recovery actions before the
34 * devices can be used again (see dasd_alias_handle_summary_unit_check).
35 * - dasd_alias_get_start_dev will find an alias device that can be used
36 * instead of the base device and does some (very simple) load balancing.
37 * This is the function that gets called for each I/O, so when improving
38 * something, this function should get faster or better, the rest has just
43 static void summary_unit_check_handling_work(struct work_struct
*);
44 static void lcu_update_work(struct work_struct
*);
45 static int _schedule_lcu_update(struct alias_lcu
*, struct dasd_device
*);
47 static struct alias_root aliastree
= {
48 .serverlist
= LIST_HEAD_INIT(aliastree
.serverlist
),
49 .lock
= __SPIN_LOCK_UNLOCKED(aliastree
.lock
),
52 static struct alias_server
*_find_server(struct dasd_uid
*uid
)
54 struct alias_server
*pos
;
55 list_for_each_entry(pos
, &aliastree
.serverlist
, server
) {
56 if (!strncmp(pos
->uid
.vendor
, uid
->vendor
,
58 && !strncmp(pos
->uid
.serial
, uid
->serial
,
65 static struct alias_lcu
*_find_lcu(struct alias_server
*server
,
68 struct alias_lcu
*pos
;
69 list_for_each_entry(pos
, &server
->lculist
, lcu
) {
70 if (pos
->uid
.ssid
== uid
->ssid
)
76 static struct alias_pav_group
*_find_group(struct alias_lcu
*lcu
,
79 struct alias_pav_group
*pos
;
80 __u8 search_unit_addr
;
82 /* for hyper pav there is only one group */
83 if (lcu
->pav
== HYPER_PAV
) {
84 if (list_empty(&lcu
->grouplist
))
87 return list_first_entry(&lcu
->grouplist
,
88 struct alias_pav_group
, group
);
91 /* for base pav we have to find the group that matches the base */
92 if (uid
->type
== UA_BASE_DEVICE
)
93 search_unit_addr
= uid
->real_unit_addr
;
95 search_unit_addr
= uid
->base_unit_addr
;
96 list_for_each_entry(pos
, &lcu
->grouplist
, group
) {
97 if (pos
->uid
.base_unit_addr
== search_unit_addr
&&
98 !strncmp(pos
->uid
.vduit
, uid
->vduit
, sizeof(uid
->vduit
)))
104 static struct alias_server
*_allocate_server(struct dasd_uid
*uid
)
106 struct alias_server
*server
;
108 server
= kzalloc(sizeof(*server
), GFP_KERNEL
);
110 return ERR_PTR(-ENOMEM
);
111 memcpy(server
->uid
.vendor
, uid
->vendor
, sizeof(uid
->vendor
));
112 memcpy(server
->uid
.serial
, uid
->serial
, sizeof(uid
->serial
));
113 INIT_LIST_HEAD(&server
->server
);
114 INIT_LIST_HEAD(&server
->lculist
);
118 static void _free_server(struct alias_server
*server
)
123 static struct alias_lcu
*_allocate_lcu(struct dasd_uid
*uid
)
125 struct alias_lcu
*lcu
;
127 lcu
= kzalloc(sizeof(*lcu
), GFP_KERNEL
);
129 return ERR_PTR(-ENOMEM
);
130 lcu
->uac
= kzalloc(sizeof(*(lcu
->uac
)), GFP_KERNEL
| GFP_DMA
);
133 lcu
->rsu_cqr
= kzalloc(sizeof(*lcu
->rsu_cqr
), GFP_KERNEL
| GFP_DMA
);
136 lcu
->rsu_cqr
->cpaddr
= kzalloc(sizeof(struct ccw1
),
137 GFP_KERNEL
| GFP_DMA
);
138 if (!lcu
->rsu_cqr
->cpaddr
)
140 lcu
->rsu_cqr
->data
= kzalloc(16, GFP_KERNEL
| GFP_DMA
);
141 if (!lcu
->rsu_cqr
->data
)
144 memcpy(lcu
->uid
.vendor
, uid
->vendor
, sizeof(uid
->vendor
));
145 memcpy(lcu
->uid
.serial
, uid
->serial
, sizeof(uid
->serial
));
146 lcu
->uid
.ssid
= uid
->ssid
;
148 lcu
->flags
= NEED_UAC_UPDATE
| UPDATE_PENDING
;
149 INIT_LIST_HEAD(&lcu
->lcu
);
150 INIT_LIST_HEAD(&lcu
->inactive_devices
);
151 INIT_LIST_HEAD(&lcu
->active_devices
);
152 INIT_LIST_HEAD(&lcu
->grouplist
);
153 INIT_WORK(&lcu
->suc_data
.worker
, summary_unit_check_handling_work
);
154 INIT_DELAYED_WORK(&lcu
->ruac_data
.dwork
, lcu_update_work
);
155 spin_lock_init(&lcu
->lock
);
156 init_completion(&lcu
->lcu_setup
);
160 kfree(lcu
->rsu_cqr
->cpaddr
);
167 return ERR_PTR(-ENOMEM
);
170 static void _free_lcu(struct alias_lcu
*lcu
)
172 kfree(lcu
->rsu_cqr
->data
);
173 kfree(lcu
->rsu_cqr
->cpaddr
);
180 * This is the function that will allocate all the server and lcu data,
181 * so this function must be called first for a new device.
182 * If the return value is 1, the lcu was already known before, if it
183 * is 0, this is a new lcu.
184 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
186 int dasd_alias_make_device_known_to_lcu(struct dasd_device
*device
)
188 struct dasd_eckd_private
*private;
190 struct alias_server
*server
, *newserver
;
191 struct alias_lcu
*lcu
, *newlcu
;
194 private = (struct dasd_eckd_private
*) device
->private;
196 device
->discipline
->get_uid(device
, &uid
);
197 spin_lock_irqsave(&aliastree
.lock
, flags
);
198 server
= _find_server(&uid
);
200 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
201 newserver
= _allocate_server(&uid
);
202 if (IS_ERR(newserver
))
203 return PTR_ERR(newserver
);
204 spin_lock_irqsave(&aliastree
.lock
, flags
);
205 server
= _find_server(&uid
);
207 list_add(&newserver
->server
, &aliastree
.serverlist
);
210 /* someone was faster */
211 _free_server(newserver
);
215 lcu
= _find_lcu(server
, &uid
);
217 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
218 newlcu
= _allocate_lcu(&uid
);
220 return PTR_ERR(newlcu
);
221 spin_lock_irqsave(&aliastree
.lock
, flags
);
222 lcu
= _find_lcu(server
, &uid
);
224 list_add(&newlcu
->lcu
, &server
->lculist
);
227 /* someone was faster */
231 spin_lock(&lcu
->lock
);
232 list_add(&device
->alias_list
, &lcu
->inactive_devices
);
234 spin_unlock(&lcu
->lock
);
235 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
241 * This function removes a device from the scope of alias management.
242 * The complicated part is to make sure that it is not in use by
243 * any of the workers. If necessary cancel the work.
245 void dasd_alias_disconnect_device_from_lcu(struct dasd_device
*device
)
247 struct dasd_eckd_private
*private;
249 struct alias_lcu
*lcu
;
250 struct alias_server
*server
;
254 private = (struct dasd_eckd_private
*) device
->private;
256 /* nothing to do if already disconnected */
259 device
->discipline
->get_uid(device
, &uid
);
260 spin_lock_irqsave(&lcu
->lock
, flags
);
261 list_del_init(&device
->alias_list
);
262 /* make sure that the workers don't use this device */
263 if (device
== lcu
->suc_data
.device
) {
264 spin_unlock_irqrestore(&lcu
->lock
, flags
);
265 cancel_work_sync(&lcu
->suc_data
.worker
);
266 spin_lock_irqsave(&lcu
->lock
, flags
);
267 if (device
== lcu
->suc_data
.device
)
268 lcu
->suc_data
.device
= NULL
;
271 if (device
== lcu
->ruac_data
.device
) {
272 spin_unlock_irqrestore(&lcu
->lock
, flags
);
274 cancel_delayed_work_sync(&lcu
->ruac_data
.dwork
);
275 spin_lock_irqsave(&lcu
->lock
, flags
);
276 if (device
== lcu
->ruac_data
.device
)
277 lcu
->ruac_data
.device
= NULL
;
280 spin_unlock_irqrestore(&lcu
->lock
, flags
);
282 spin_lock_irqsave(&aliastree
.lock
, flags
);
283 spin_lock(&lcu
->lock
);
284 if (list_empty(&lcu
->grouplist
) &&
285 list_empty(&lcu
->active_devices
) &&
286 list_empty(&lcu
->inactive_devices
)) {
288 spin_unlock(&lcu
->lock
);
293 _schedule_lcu_update(lcu
, NULL
);
294 spin_unlock(&lcu
->lock
);
296 server
= _find_server(&uid
);
297 if (server
&& list_empty(&server
->lculist
)) {
298 list_del(&server
->server
);
299 _free_server(server
);
301 spin_unlock_irqrestore(&aliastree
.lock
, flags
);
305 * This function assumes that the unit address configuration stored
306 * in the lcu is up to date and will update the device uid before
307 * adding it to a pav group.
310 static int _add_device_to_lcu(struct alias_lcu
*lcu
,
311 struct dasd_device
*device
,
312 struct dasd_device
*pos
)
315 struct dasd_eckd_private
*private;
316 struct alias_pav_group
*group
;
320 private = (struct dasd_eckd_private
*) device
->private;
322 /* only lock if not already locked */
324 spin_lock_irqsave_nested(get_ccwdev_lock(device
->cdev
), flags
,
326 private->uid
.type
= lcu
->uac
->unit
[private->uid
.real_unit_addr
].ua_type
;
327 private->uid
.base_unit_addr
=
328 lcu
->uac
->unit
[private->uid
.real_unit_addr
].base_ua
;
332 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
334 /* if we have no PAV anyway, we don't need to bother with PAV groups */
335 if (lcu
->pav
== NO_PAV
) {
336 list_move(&device
->alias_list
, &lcu
->active_devices
);
340 group
= _find_group(lcu
, &uid
);
342 group
= kzalloc(sizeof(*group
), GFP_ATOMIC
);
345 memcpy(group
->uid
.vendor
, uid
.vendor
, sizeof(uid
.vendor
));
346 memcpy(group
->uid
.serial
, uid
.serial
, sizeof(uid
.serial
));
347 group
->uid
.ssid
= uid
.ssid
;
348 if (uid
.type
== UA_BASE_DEVICE
)
349 group
->uid
.base_unit_addr
= uid
.real_unit_addr
;
351 group
->uid
.base_unit_addr
= uid
.base_unit_addr
;
352 memcpy(group
->uid
.vduit
, uid
.vduit
, sizeof(uid
.vduit
));
353 INIT_LIST_HEAD(&group
->group
);
354 INIT_LIST_HEAD(&group
->baselist
);
355 INIT_LIST_HEAD(&group
->aliaslist
);
356 list_add(&group
->group
, &lcu
->grouplist
);
358 if (uid
.type
== UA_BASE_DEVICE
)
359 list_move(&device
->alias_list
, &group
->baselist
);
361 list_move(&device
->alias_list
, &group
->aliaslist
);
362 private->pavgroup
= group
;
366 static void _remove_device_from_lcu(struct alias_lcu
*lcu
,
367 struct dasd_device
*device
)
369 struct dasd_eckd_private
*private;
370 struct alias_pav_group
*group
;
372 private = (struct dasd_eckd_private
*) device
->private;
373 list_move(&device
->alias_list
, &lcu
->inactive_devices
);
374 group
= private->pavgroup
;
377 private->pavgroup
= NULL
;
378 if (list_empty(&group
->baselist
) && list_empty(&group
->aliaslist
)) {
379 list_del(&group
->group
);
383 if (group
->next
== device
)
387 static int read_unit_address_configuration(struct dasd_device
*device
,
388 struct alias_lcu
*lcu
)
390 struct dasd_psf_prssd_data
*prssdp
;
391 struct dasd_ccw_req
*cqr
;
396 cqr
= dasd_kmalloc_request(DASD_ECKD_MAGIC
, 1 /* PSF */ + 1 /* RSSD */,
397 (sizeof(struct dasd_psf_prssd_data
)),
401 cqr
->startdev
= device
;
402 cqr
->memdev
= device
;
403 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
405 cqr
->expires
= 20 * HZ
;
407 /* Prepare for Read Subsystem Data */
408 prssdp
= (struct dasd_psf_prssd_data
*) cqr
->data
;
409 memset(prssdp
, 0, sizeof(struct dasd_psf_prssd_data
));
410 prssdp
->order
= PSF_ORDER_PRSSD
;
411 prssdp
->suborder
= 0x0e; /* Read unit address configuration */
412 /* all other bytes of prssdp must be zero */
415 ccw
->cmd_code
= DASD_ECKD_CCW_PSF
;
416 ccw
->count
= sizeof(struct dasd_psf_prssd_data
);
417 ccw
->flags
|= CCW_FLAG_CC
;
418 ccw
->cda
= (__u32
)(addr_t
) prssdp
;
420 /* Read Subsystem Data - feature codes */
421 memset(lcu
->uac
, 0, sizeof(*(lcu
->uac
)));
424 ccw
->cmd_code
= DASD_ECKD_CCW_RSSD
;
425 ccw
->count
= sizeof(*(lcu
->uac
));
426 ccw
->cda
= (__u32
)(addr_t
) lcu
->uac
;
428 cqr
->buildclk
= get_clock();
429 cqr
->status
= DASD_CQR_FILLED
;
431 /* need to unset flag here to detect race with summary unit check */
432 spin_lock_irqsave(&lcu
->lock
, flags
);
433 lcu
->flags
&= ~NEED_UAC_UPDATE
;
434 spin_unlock_irqrestore(&lcu
->lock
, flags
);
437 rc
= dasd_sleep_on(cqr
);
438 } while (rc
&& (cqr
->retries
> 0));
440 spin_lock_irqsave(&lcu
->lock
, flags
);
441 lcu
->flags
|= NEED_UAC_UPDATE
;
442 spin_unlock_irqrestore(&lcu
->lock
, flags
);
444 dasd_kfree_request(cqr
, cqr
->memdev
);
448 static int _lcu_update(struct dasd_device
*refdev
, struct alias_lcu
*lcu
)
451 struct alias_pav_group
*pavgroup
, *tempgroup
;
452 struct dasd_device
*device
, *tempdev
;
454 struct dasd_eckd_private
*private;
456 spin_lock_irqsave(&lcu
->lock
, flags
);
457 list_for_each_entry_safe(pavgroup
, tempgroup
, &lcu
->grouplist
, group
) {
458 list_for_each_entry_safe(device
, tempdev
, &pavgroup
->baselist
,
460 list_move(&device
->alias_list
, &lcu
->active_devices
);
461 private = (struct dasd_eckd_private
*) device
->private;
462 private->pavgroup
= NULL
;
464 list_for_each_entry_safe(device
, tempdev
, &pavgroup
->aliaslist
,
466 list_move(&device
->alias_list
, &lcu
->active_devices
);
467 private = (struct dasd_eckd_private
*) device
->private;
468 private->pavgroup
= NULL
;
470 list_del(&pavgroup
->group
);
473 spin_unlock_irqrestore(&lcu
->lock
, flags
);
475 rc
= read_unit_address_configuration(refdev
, lcu
);
479 /* need to take cdev lock before lcu lock */
480 spin_lock_irqsave_nested(get_ccwdev_lock(refdev
->cdev
), flags
,
482 spin_lock(&lcu
->lock
);
484 for (i
= 0; i
< MAX_DEVICES_PER_LCU
; ++i
) {
485 switch (lcu
->uac
->unit
[i
].ua_type
) {
486 case UA_BASE_PAV_ALIAS
:
489 case UA_HYPER_PAV_ALIAS
:
490 lcu
->pav
= HYPER_PAV
;
493 if (lcu
->pav
!= NO_PAV
)
497 list_for_each_entry_safe(device
, tempdev
, &lcu
->active_devices
,
499 _add_device_to_lcu(lcu
, device
, refdev
);
501 spin_unlock(&lcu
->lock
);
502 spin_unlock_irqrestore(get_ccwdev_lock(refdev
->cdev
), flags
);
506 static void lcu_update_work(struct work_struct
*work
)
508 struct alias_lcu
*lcu
;
509 struct read_uac_work_data
*ruac_data
;
510 struct dasd_device
*device
;
514 ruac_data
= container_of(work
, struct read_uac_work_data
, dwork
.work
);
515 lcu
= container_of(ruac_data
, struct alias_lcu
, ruac_data
);
516 device
= ruac_data
->device
;
517 rc
= _lcu_update(device
, lcu
);
519 * Need to check flags again, as there could have been another
520 * prepare_update or a new device a new device while we were still
521 * processing the data
523 spin_lock_irqsave(&lcu
->lock
, flags
);
524 if (rc
|| (lcu
->flags
& NEED_UAC_UPDATE
)) {
525 DBF_DEV_EVENT(DBF_WARNING
, device
, "could not update"
526 " alias data in lcu (rc = %d), retry later", rc
);
527 schedule_delayed_work(&lcu
->ruac_data
.dwork
, 30*HZ
);
529 lcu
->ruac_data
.device
= NULL
;
530 lcu
->flags
&= ~UPDATE_PENDING
;
532 spin_unlock_irqrestore(&lcu
->lock
, flags
);
535 static int _schedule_lcu_update(struct alias_lcu
*lcu
,
536 struct dasd_device
*device
)
538 struct dasd_device
*usedev
= NULL
;
539 struct alias_pav_group
*group
;
541 lcu
->flags
|= NEED_UAC_UPDATE
;
542 if (lcu
->ruac_data
.device
) {
543 /* already scheduled or running */
546 if (device
&& !list_empty(&device
->alias_list
))
549 if (!usedev
&& !list_empty(&lcu
->grouplist
)) {
550 group
= list_first_entry(&lcu
->grouplist
,
551 struct alias_pav_group
, group
);
552 if (!list_empty(&group
->baselist
))
553 usedev
= list_first_entry(&group
->baselist
,
556 else if (!list_empty(&group
->aliaslist
))
557 usedev
= list_first_entry(&group
->aliaslist
,
561 if (!usedev
&& !list_empty(&lcu
->active_devices
)) {
562 usedev
= list_first_entry(&lcu
->active_devices
,
563 struct dasd_device
, alias_list
);
566 * if we haven't found a proper device yet, give up for now, the next
567 * device that will be set active will trigger an lcu update
571 lcu
->ruac_data
.device
= usedev
;
572 schedule_delayed_work(&lcu
->ruac_data
.dwork
, 0);
576 int dasd_alias_add_device(struct dasd_device
*device
)
578 struct dasd_eckd_private
*private;
579 struct alias_lcu
*lcu
;
583 private = (struct dasd_eckd_private
*) device
->private;
587 /* need to take cdev lock before lcu lock */
588 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
589 spin_lock(&lcu
->lock
);
590 if (!(lcu
->flags
& UPDATE_PENDING
)) {
591 rc
= _add_device_to_lcu(lcu
, device
, device
);
593 lcu
->flags
|= UPDATE_PENDING
;
595 if (lcu
->flags
& UPDATE_PENDING
) {
596 list_move(&device
->alias_list
, &lcu
->active_devices
);
597 _schedule_lcu_update(lcu
, device
);
599 spin_unlock(&lcu
->lock
);
600 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
604 int dasd_alias_update_add_device(struct dasd_device
*device
)
606 struct dasd_eckd_private
*private;
607 private = (struct dasd_eckd_private
*) device
->private;
608 private->lcu
->flags
|= UPDATE_PENDING
;
609 return dasd_alias_add_device(device
);
612 int dasd_alias_remove_device(struct dasd_device
*device
)
614 struct dasd_eckd_private
*private;
615 struct alias_lcu
*lcu
;
618 private = (struct dasd_eckd_private
*) device
->private;
620 /* nothing to do if already removed */
623 spin_lock_irqsave(&lcu
->lock
, flags
);
624 _remove_device_from_lcu(lcu
, device
);
625 spin_unlock_irqrestore(&lcu
->lock
, flags
);
629 struct dasd_device
*dasd_alias_get_start_dev(struct dasd_device
*base_device
)
632 struct dasd_device
*alias_device
;
633 struct alias_pav_group
*group
;
634 struct alias_lcu
*lcu
;
635 struct dasd_eckd_private
*private, *alias_priv
;
638 private = (struct dasd_eckd_private
*) base_device
->private;
639 group
= private->pavgroup
;
643 if (lcu
->pav
== NO_PAV
||
644 lcu
->flags
& (NEED_UAC_UPDATE
| UPDATE_PENDING
))
646 if (unlikely(!(private->features
.feature
[8] & 0x01))) {
648 * PAV enabled but prefix not, very unlikely
649 * seems to be a lost pathgroup
650 * use base device to do IO
652 DBF_DEV_EVENT(DBF_ERR
, base_device
, "%s",
653 "Prefix not enabled with PAV enabled\n");
657 spin_lock_irqsave(&lcu
->lock
, flags
);
658 alias_device
= group
->next
;
660 if (list_empty(&group
->aliaslist
)) {
661 spin_unlock_irqrestore(&lcu
->lock
, flags
);
664 alias_device
= list_first_entry(&group
->aliaslist
,
669 if (list_is_last(&alias_device
->alias_list
, &group
->aliaslist
))
670 group
->next
= list_first_entry(&group
->aliaslist
,
671 struct dasd_device
, alias_list
);
673 group
->next
= list_first_entry(&alias_device
->alias_list
,
674 struct dasd_device
, alias_list
);
675 spin_unlock_irqrestore(&lcu
->lock
, flags
);
676 alias_priv
= (struct dasd_eckd_private
*) alias_device
->private;
677 if ((alias_priv
->count
< private->count
) && !alias_device
->stopped
)
684 * Summary unit check handling depends on the way alias devices
685 * are handled so it is done here rather then in dasd_eckd.c
687 static int reset_summary_unit_check(struct alias_lcu
*lcu
,
688 struct dasd_device
*device
,
691 struct dasd_ccw_req
*cqr
;
696 strncpy((char *) &cqr
->magic
, "ECKD", 4);
697 ASCEBC((char *) &cqr
->magic
, 4);
699 ccw
->cmd_code
= DASD_ECKD_CCW_RSCK
;
702 ccw
->cda
= (__u32
)(addr_t
) cqr
->data
;
703 ((char *)cqr
->data
)[0] = reason
;
705 clear_bit(DASD_CQR_FLAGS_USE_ERP
, &cqr
->flags
);
706 cqr
->retries
= 255; /* set retry counter to enable basic ERP */
707 cqr
->startdev
= device
;
708 cqr
->memdev
= device
;
710 cqr
->expires
= 5 * HZ
;
711 cqr
->buildclk
= get_clock();
712 cqr
->status
= DASD_CQR_FILLED
;
714 rc
= dasd_sleep_on_immediatly(cqr
);
718 static void _restart_all_base_devices_on_lcu(struct alias_lcu
*lcu
)
720 struct alias_pav_group
*pavgroup
;
721 struct dasd_device
*device
;
722 struct dasd_eckd_private
*private;
725 /* active and inactive list can contain alias as well as base devices */
726 list_for_each_entry(device
, &lcu
->active_devices
, alias_list
) {
727 private = (struct dasd_eckd_private
*) device
->private;
728 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
729 if (private->uid
.type
!= UA_BASE_DEVICE
) {
730 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
),
734 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
735 dasd_schedule_block_bh(device
->block
);
736 dasd_schedule_device_bh(device
);
738 list_for_each_entry(device
, &lcu
->inactive_devices
, alias_list
) {
739 private = (struct dasd_eckd_private
*) device
->private;
740 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
741 if (private->uid
.type
!= UA_BASE_DEVICE
) {
742 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
),
746 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
747 dasd_schedule_block_bh(device
->block
);
748 dasd_schedule_device_bh(device
);
750 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
751 list_for_each_entry(device
, &pavgroup
->baselist
, alias_list
) {
752 dasd_schedule_block_bh(device
->block
);
753 dasd_schedule_device_bh(device
);
758 static void flush_all_alias_devices_on_lcu(struct alias_lcu
*lcu
)
760 struct alias_pav_group
*pavgroup
;
761 struct dasd_device
*device
, *temp
;
762 struct dasd_eckd_private
*private;
768 * Problem here ist that dasd_flush_device_queue may wait
769 * for termination of a request to complete. We can't keep
770 * the lcu lock during that time, so we must assume that
771 * the lists may have changed.
772 * Idea: first gather all active alias devices in a separate list,
773 * then flush the first element of this list unlocked, and afterwards
774 * check if it is still on the list before moving it to the
775 * active_devices list.
778 spin_lock_irqsave(&lcu
->lock
, flags
);
779 list_for_each_entry_safe(device
, temp
, &lcu
->active_devices
,
781 private = (struct dasd_eckd_private
*) device
->private;
782 if (private->uid
.type
== UA_BASE_DEVICE
)
784 list_move(&device
->alias_list
, &active
);
787 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
788 list_splice_init(&pavgroup
->aliaslist
, &active
);
790 while (!list_empty(&active
)) {
791 device
= list_first_entry(&active
, struct dasd_device
,
793 spin_unlock_irqrestore(&lcu
->lock
, flags
);
794 rc
= dasd_flush_device_queue(device
);
795 spin_lock_irqsave(&lcu
->lock
, flags
);
797 * only move device around if it wasn't moved away while we
798 * were waiting for the flush
800 if (device
== list_first_entry(&active
,
801 struct dasd_device
, alias_list
))
802 list_move(&device
->alias_list
, &lcu
->active_devices
);
804 spin_unlock_irqrestore(&lcu
->lock
, flags
);
807 static void __stop_device_on_lcu(struct dasd_device
*device
,
808 struct dasd_device
*pos
)
810 /* If pos == device then device is already locked! */
812 dasd_device_set_stop_bits(pos
, DASD_STOPPED_SU
);
815 spin_lock(get_ccwdev_lock(pos
->cdev
));
816 dasd_device_set_stop_bits(pos
, DASD_STOPPED_SU
);
817 spin_unlock(get_ccwdev_lock(pos
->cdev
));
821 * This function is called in interrupt context, so the
822 * cdev lock for device is already locked!
824 static void _stop_all_devices_on_lcu(struct alias_lcu
*lcu
,
825 struct dasd_device
*device
)
827 struct alias_pav_group
*pavgroup
;
828 struct dasd_device
*pos
;
830 list_for_each_entry(pos
, &lcu
->active_devices
, alias_list
)
831 __stop_device_on_lcu(device
, pos
);
832 list_for_each_entry(pos
, &lcu
->inactive_devices
, alias_list
)
833 __stop_device_on_lcu(device
, pos
);
834 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
835 list_for_each_entry(pos
, &pavgroup
->baselist
, alias_list
)
836 __stop_device_on_lcu(device
, pos
);
837 list_for_each_entry(pos
, &pavgroup
->aliaslist
, alias_list
)
838 __stop_device_on_lcu(device
, pos
);
842 static void _unstop_all_devices_on_lcu(struct alias_lcu
*lcu
)
844 struct alias_pav_group
*pavgroup
;
845 struct dasd_device
*device
;
848 list_for_each_entry(device
, &lcu
->active_devices
, alias_list
) {
849 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
850 dasd_device_remove_stop_bits(device
, DASD_STOPPED_SU
);
851 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
854 list_for_each_entry(device
, &lcu
->inactive_devices
, alias_list
) {
855 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
856 dasd_device_remove_stop_bits(device
, DASD_STOPPED_SU
);
857 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
860 list_for_each_entry(pavgroup
, &lcu
->grouplist
, group
) {
861 list_for_each_entry(device
, &pavgroup
->baselist
, alias_list
) {
862 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
863 dasd_device_remove_stop_bits(device
, DASD_STOPPED_SU
);
864 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
),
867 list_for_each_entry(device
, &pavgroup
->aliaslist
, alias_list
) {
868 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
869 dasd_device_remove_stop_bits(device
, DASD_STOPPED_SU
);
870 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
),
876 static void summary_unit_check_handling_work(struct work_struct
*work
)
878 struct alias_lcu
*lcu
;
879 struct summary_unit_check_work_data
*suc_data
;
881 struct dasd_device
*device
;
883 suc_data
= container_of(work
, struct summary_unit_check_work_data
,
885 lcu
= container_of(suc_data
, struct alias_lcu
, suc_data
);
886 device
= suc_data
->device
;
888 /* 1. flush alias devices */
889 flush_all_alias_devices_on_lcu(lcu
);
891 /* 2. reset summary unit check */
892 spin_lock_irqsave(get_ccwdev_lock(device
->cdev
), flags
);
893 dasd_device_remove_stop_bits(device
,
894 (DASD_STOPPED_SU
| DASD_STOPPED_PENDING
));
895 spin_unlock_irqrestore(get_ccwdev_lock(device
->cdev
), flags
);
896 reset_summary_unit_check(lcu
, device
, suc_data
->reason
);
898 spin_lock_irqsave(&lcu
->lock
, flags
);
899 _unstop_all_devices_on_lcu(lcu
);
900 _restart_all_base_devices_on_lcu(lcu
);
901 /* 3. read new alias configuration */
902 _schedule_lcu_update(lcu
, device
);
903 lcu
->suc_data
.device
= NULL
;
904 spin_unlock_irqrestore(&lcu
->lock
, flags
);
908 * note: this will be called from int handler context (cdev locked)
910 void dasd_alias_handle_summary_unit_check(struct dasd_device
*device
,
913 struct alias_lcu
*lcu
;
915 struct dasd_eckd_private
*private;
918 private = (struct dasd_eckd_private
*) device
->private;
920 sense
= dasd_get_sense(irb
);
923 DBF_DEV_EVENT(DBF_NOTICE
, device
, "%s %x",
924 "eckd handle summary unit check: reason", reason
);
926 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
927 "eckd handle summary unit check:"
928 " no reason code available");
934 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
935 "device not ready to handle summary"
936 " unit check (no lcu structure)");
939 spin_lock(&lcu
->lock
);
940 _stop_all_devices_on_lcu(lcu
, device
);
941 /* prepare for lcu_update */
942 private->lcu
->flags
|= NEED_UAC_UPDATE
| UPDATE_PENDING
;
943 /* If this device is about to be removed just return and wait for
944 * the next interrupt on a different device
946 if (list_empty(&device
->alias_list
)) {
947 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
948 "device is in offline processing,"
949 " don't do summary unit check handling");
950 spin_unlock(&lcu
->lock
);
953 if (lcu
->suc_data
.device
) {
954 /* already scheduled or running */
955 DBF_DEV_EVENT(DBF_WARNING
, device
, "%s",
956 "previous instance of summary unit check worker"
958 spin_unlock(&lcu
->lock
);
961 lcu
->suc_data
.reason
= reason
;
962 lcu
->suc_data
.device
= device
;
963 spin_unlock(&lcu
->lock
);
964 schedule_work(&lcu
->suc_data
.worker
);