[ARM] pxa: update defconfig for Verdex Pro
[linux-2.6/verdex.git] / drivers / s390 / block / dasd_alias.c
blob70a008c00522079c1f369c687474cb9c943a575c
1 /*
2 * PAV alias management for the DASD ECKD discipline
4 * Copyright IBM Corporation, 2007
5 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6 */
8 #define KMSG_COMPONENT "dasd-eckd"
10 #include <linux/list.h>
11 #include <asm/ebcdic.h>
12 #include "dasd_int.h"
13 #include "dasd_eckd.h"
15 #ifdef PRINTK_HEADER
16 #undef PRINTK_HEADER
17 #endif /* PRINTK_HEADER */
18 #define PRINTK_HEADER "dasd(eckd):"
22 * General concept of alias management:
23 * - PAV and DASD alias management is specific to the eckd discipline.
24 * - A device is connected to an lcu as long as the device exists.
25 * dasd_alias_make_device_known_to_lcu will be called wenn the
26 * device is checked by the eckd discipline and
27 * dasd_alias_disconnect_device_from_lcu will be called
28 * before the device is deleted.
29 * - The dasd_alias_add_device / dasd_alias_remove_device
30 * functions mark the point when a device is 'ready for service'.
31 * - A summary unit check is a rare occasion, but it is mandatory to
32 * support it. It requires some complex recovery actions before the
33 * devices can be used again (see dasd_alias_handle_summary_unit_check).
34 * - dasd_alias_get_start_dev will find an alias device that can be used
35 * instead of the base device and does some (very simple) load balancing.
36 * This is the function that gets called for each I/O, so when improving
37 * something, this function should get faster or better, the rest has just
38 * to be correct.
42 static void summary_unit_check_handling_work(struct work_struct *);
43 static void lcu_update_work(struct work_struct *);
44 static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
46 static struct alias_root aliastree = {
47 .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
48 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
51 static struct alias_server *_find_server(struct dasd_uid *uid)
53 struct alias_server *pos;
54 list_for_each_entry(pos, &aliastree.serverlist, server) {
55 if (!strncmp(pos->uid.vendor, uid->vendor,
56 sizeof(uid->vendor))
57 && !strncmp(pos->uid.serial, uid->serial,
58 sizeof(uid->serial)))
59 return pos;
61 return NULL;
64 static struct alias_lcu *_find_lcu(struct alias_server *server,
65 struct dasd_uid *uid)
67 struct alias_lcu *pos;
68 list_for_each_entry(pos, &server->lculist, lcu) {
69 if (pos->uid.ssid == uid->ssid)
70 return pos;
72 return NULL;
75 static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
76 struct dasd_uid *uid)
78 struct alias_pav_group *pos;
79 __u8 search_unit_addr;
81 /* for hyper pav there is only one group */
82 if (lcu->pav == HYPER_PAV) {
83 if (list_empty(&lcu->grouplist))
84 return NULL;
85 else
86 return list_first_entry(&lcu->grouplist,
87 struct alias_pav_group, group);
90 /* for base pav we have to find the group that matches the base */
91 if (uid->type == UA_BASE_DEVICE)
92 search_unit_addr = uid->real_unit_addr;
93 else
94 search_unit_addr = uid->base_unit_addr;
95 list_for_each_entry(pos, &lcu->grouplist, group) {
96 if (pos->uid.base_unit_addr == search_unit_addr &&
97 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
98 return pos;
100 return NULL;
103 static struct alias_server *_allocate_server(struct dasd_uid *uid)
105 struct alias_server *server;
107 server = kzalloc(sizeof(*server), GFP_KERNEL);
108 if (!server)
109 return ERR_PTR(-ENOMEM);
110 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
111 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
112 INIT_LIST_HEAD(&server->server);
113 INIT_LIST_HEAD(&server->lculist);
114 return server;
117 static void _free_server(struct alias_server *server)
119 kfree(server);
122 static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
124 struct alias_lcu *lcu;
126 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
127 if (!lcu)
128 return ERR_PTR(-ENOMEM);
129 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
130 if (!lcu->uac)
131 goto out_err1;
132 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
133 if (!lcu->rsu_cqr)
134 goto out_err2;
135 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
136 GFP_KERNEL | GFP_DMA);
137 if (!lcu->rsu_cqr->cpaddr)
138 goto out_err3;
139 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
140 if (!lcu->rsu_cqr->data)
141 goto out_err4;
143 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
144 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
145 lcu->uid.ssid = uid->ssid;
146 lcu->pav = NO_PAV;
147 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
148 INIT_LIST_HEAD(&lcu->lcu);
149 INIT_LIST_HEAD(&lcu->inactive_devices);
150 INIT_LIST_HEAD(&lcu->active_devices);
151 INIT_LIST_HEAD(&lcu->grouplist);
152 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
153 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
154 spin_lock_init(&lcu->lock);
155 return lcu;
157 out_err4:
158 kfree(lcu->rsu_cqr->cpaddr);
159 out_err3:
160 kfree(lcu->rsu_cqr);
161 out_err2:
162 kfree(lcu->uac);
163 out_err1:
164 kfree(lcu);
165 return ERR_PTR(-ENOMEM);
168 static void _free_lcu(struct alias_lcu *lcu)
170 kfree(lcu->rsu_cqr->data);
171 kfree(lcu->rsu_cqr->cpaddr);
172 kfree(lcu->rsu_cqr);
173 kfree(lcu->uac);
174 kfree(lcu);
178 * This is the function that will allocate all the server and lcu data,
179 * so this function must be called first for a new device.
180 * If the return value is 1, the lcu was already known before, if it
181 * is 0, this is a new lcu.
182 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
184 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
186 struct dasd_eckd_private *private;
187 unsigned long flags;
188 struct alias_server *server, *newserver;
189 struct alias_lcu *lcu, *newlcu;
190 int is_lcu_known;
191 struct dasd_uid *uid;
193 private = (struct dasd_eckd_private *) device->private;
194 uid = &private->uid;
195 spin_lock_irqsave(&aliastree.lock, flags);
196 is_lcu_known = 1;
197 server = _find_server(uid);
198 if (!server) {
199 spin_unlock_irqrestore(&aliastree.lock, flags);
200 newserver = _allocate_server(uid);
201 if (IS_ERR(newserver))
202 return PTR_ERR(newserver);
203 spin_lock_irqsave(&aliastree.lock, flags);
204 server = _find_server(uid);
205 if (!server) {
206 list_add(&newserver->server, &aliastree.serverlist);
207 server = newserver;
208 is_lcu_known = 0;
209 } else {
210 /* someone was faster */
211 _free_server(newserver);
215 lcu = _find_lcu(server, uid);
216 if (!lcu) {
217 spin_unlock_irqrestore(&aliastree.lock, flags);
218 newlcu = _allocate_lcu(uid);
219 if (IS_ERR(newlcu))
220 return PTR_ERR(lcu);
221 spin_lock_irqsave(&aliastree.lock, flags);
222 lcu = _find_lcu(server, uid);
223 if (!lcu) {
224 list_add(&newlcu->lcu, &server->lculist);
225 lcu = newlcu;
226 is_lcu_known = 0;
227 } else {
228 /* someone was faster */
229 _free_lcu(newlcu);
231 is_lcu_known = 0;
233 spin_lock(&lcu->lock);
234 list_add(&device->alias_list, &lcu->inactive_devices);
235 private->lcu = lcu;
236 spin_unlock(&lcu->lock);
237 spin_unlock_irqrestore(&aliastree.lock, flags);
239 return is_lcu_known;
243 * This function removes a device from the scope of alias management.
244 * The complicated part is to make sure that it is not in use by
245 * any of the workers. If necessary cancel the work.
247 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
249 struct dasd_eckd_private *private;
250 unsigned long flags;
251 struct alias_lcu *lcu;
252 struct alias_server *server;
253 int was_pending;
255 private = (struct dasd_eckd_private *) device->private;
256 lcu = private->lcu;
257 spin_lock_irqsave(&lcu->lock, flags);
258 list_del_init(&device->alias_list);
259 /* make sure that the workers don't use this device */
260 if (device == lcu->suc_data.device) {
261 spin_unlock_irqrestore(&lcu->lock, flags);
262 cancel_work_sync(&lcu->suc_data.worker);
263 spin_lock_irqsave(&lcu->lock, flags);
264 if (device == lcu->suc_data.device)
265 lcu->suc_data.device = NULL;
267 was_pending = 0;
268 if (device == lcu->ruac_data.device) {
269 spin_unlock_irqrestore(&lcu->lock, flags);
270 was_pending = 1;
271 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
272 spin_lock_irqsave(&lcu->lock, flags);
273 if (device == lcu->ruac_data.device)
274 lcu->ruac_data.device = NULL;
276 private->lcu = NULL;
277 spin_unlock_irqrestore(&lcu->lock, flags);
279 spin_lock_irqsave(&aliastree.lock, flags);
280 spin_lock(&lcu->lock);
281 if (list_empty(&lcu->grouplist) &&
282 list_empty(&lcu->active_devices) &&
283 list_empty(&lcu->inactive_devices)) {
284 list_del(&lcu->lcu);
285 spin_unlock(&lcu->lock);
286 _free_lcu(lcu);
287 lcu = NULL;
288 } else {
289 if (was_pending)
290 _schedule_lcu_update(lcu, NULL);
291 spin_unlock(&lcu->lock);
293 server = _find_server(&private->uid);
294 if (server && list_empty(&server->lculist)) {
295 list_del(&server->server);
296 _free_server(server);
298 spin_unlock_irqrestore(&aliastree.lock, flags);
302 * This function assumes that the unit address configuration stored
303 * in the lcu is up to date and will update the device uid before
304 * adding it to a pav group.
306 static int _add_device_to_lcu(struct alias_lcu *lcu,
307 struct dasd_device *device)
310 struct dasd_eckd_private *private;
311 struct alias_pav_group *group;
312 struct dasd_uid *uid;
314 private = (struct dasd_eckd_private *) device->private;
315 uid = &private->uid;
316 uid->type = lcu->uac->unit[uid->real_unit_addr].ua_type;
317 uid->base_unit_addr = lcu->uac->unit[uid->real_unit_addr].base_ua;
318 dasd_set_uid(device->cdev, &private->uid);
320 /* if we have no PAV anyway, we don't need to bother with PAV groups */
321 if (lcu->pav == NO_PAV) {
322 list_move(&device->alias_list, &lcu->active_devices);
323 return 0;
326 group = _find_group(lcu, uid);
327 if (!group) {
328 group = kzalloc(sizeof(*group), GFP_ATOMIC);
329 if (!group)
330 return -ENOMEM;
331 memcpy(group->uid.vendor, uid->vendor, sizeof(uid->vendor));
332 memcpy(group->uid.serial, uid->serial, sizeof(uid->serial));
333 group->uid.ssid = uid->ssid;
334 if (uid->type == UA_BASE_DEVICE)
335 group->uid.base_unit_addr = uid->real_unit_addr;
336 else
337 group->uid.base_unit_addr = uid->base_unit_addr;
338 memcpy(group->uid.vduit, uid->vduit, sizeof(uid->vduit));
339 INIT_LIST_HEAD(&group->group);
340 INIT_LIST_HEAD(&group->baselist);
341 INIT_LIST_HEAD(&group->aliaslist);
342 list_add(&group->group, &lcu->grouplist);
344 if (uid->type == UA_BASE_DEVICE)
345 list_move(&device->alias_list, &group->baselist);
346 else
347 list_move(&device->alias_list, &group->aliaslist);
348 private->pavgroup = group;
349 return 0;
352 static void _remove_device_from_lcu(struct alias_lcu *lcu,
353 struct dasd_device *device)
355 struct dasd_eckd_private *private;
356 struct alias_pav_group *group;
358 private = (struct dasd_eckd_private *) device->private;
359 list_move(&device->alias_list, &lcu->inactive_devices);
360 group = private->pavgroup;
361 if (!group)
362 return;
363 private->pavgroup = NULL;
364 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
365 list_del(&group->group);
366 kfree(group);
367 return;
369 if (group->next == device)
370 group->next = NULL;
373 static int read_unit_address_configuration(struct dasd_device *device,
374 struct alias_lcu *lcu)
376 struct dasd_psf_prssd_data *prssdp;
377 struct dasd_ccw_req *cqr;
378 struct ccw1 *ccw;
379 int rc;
380 unsigned long flags;
382 cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
383 (sizeof(struct dasd_psf_prssd_data)),
384 device);
385 if (IS_ERR(cqr))
386 return PTR_ERR(cqr);
387 cqr->startdev = device;
388 cqr->memdev = device;
389 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
390 cqr->retries = 10;
391 cqr->expires = 20 * HZ;
393 /* Prepare for Read Subsystem Data */
394 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
395 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
396 prssdp->order = PSF_ORDER_PRSSD;
397 prssdp->suborder = 0x0e; /* Read unit address configuration */
398 /* all other bytes of prssdp must be zero */
400 ccw = cqr->cpaddr;
401 ccw->cmd_code = DASD_ECKD_CCW_PSF;
402 ccw->count = sizeof(struct dasd_psf_prssd_data);
403 ccw->flags |= CCW_FLAG_CC;
404 ccw->cda = (__u32)(addr_t) prssdp;
406 /* Read Subsystem Data - feature codes */
407 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
409 ccw++;
410 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
411 ccw->count = sizeof(*(lcu->uac));
412 ccw->cda = (__u32)(addr_t) lcu->uac;
414 cqr->buildclk = get_clock();
415 cqr->status = DASD_CQR_FILLED;
417 /* need to unset flag here to detect race with summary unit check */
418 spin_lock_irqsave(&lcu->lock, flags);
419 lcu->flags &= ~NEED_UAC_UPDATE;
420 spin_unlock_irqrestore(&lcu->lock, flags);
422 do {
423 rc = dasd_sleep_on(cqr);
424 } while (rc && (cqr->retries > 0));
425 if (rc) {
426 spin_lock_irqsave(&lcu->lock, flags);
427 lcu->flags |= NEED_UAC_UPDATE;
428 spin_unlock_irqrestore(&lcu->lock, flags);
430 dasd_kfree_request(cqr, cqr->memdev);
431 return rc;
434 static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
436 unsigned long flags;
437 struct alias_pav_group *pavgroup, *tempgroup;
438 struct dasd_device *device, *tempdev;
439 int i, rc;
440 struct dasd_eckd_private *private;
442 spin_lock_irqsave(&lcu->lock, flags);
443 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
444 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
445 alias_list) {
446 list_move(&device->alias_list, &lcu->active_devices);
447 private = (struct dasd_eckd_private *) device->private;
448 private->pavgroup = NULL;
450 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
451 alias_list) {
452 list_move(&device->alias_list, &lcu->active_devices);
453 private = (struct dasd_eckd_private *) device->private;
454 private->pavgroup = NULL;
456 list_del(&pavgroup->group);
457 kfree(pavgroup);
459 spin_unlock_irqrestore(&lcu->lock, flags);
461 rc = read_unit_address_configuration(refdev, lcu);
462 if (rc)
463 return rc;
465 spin_lock_irqsave(&lcu->lock, flags);
466 lcu->pav = NO_PAV;
467 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
468 switch (lcu->uac->unit[i].ua_type) {
469 case UA_BASE_PAV_ALIAS:
470 lcu->pav = BASE_PAV;
471 break;
472 case UA_HYPER_PAV_ALIAS:
473 lcu->pav = HYPER_PAV;
474 break;
476 if (lcu->pav != NO_PAV)
477 break;
480 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
481 alias_list) {
482 _add_device_to_lcu(lcu, device);
484 spin_unlock_irqrestore(&lcu->lock, flags);
485 return 0;
488 static void lcu_update_work(struct work_struct *work)
490 struct alias_lcu *lcu;
491 struct read_uac_work_data *ruac_data;
492 struct dasd_device *device;
493 unsigned long flags;
494 int rc;
496 ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
497 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
498 device = ruac_data->device;
499 rc = _lcu_update(device, lcu);
501 * Need to check flags again, as there could have been another
502 * prepare_update or a new device a new device while we were still
503 * processing the data
505 spin_lock_irqsave(&lcu->lock, flags);
506 if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
507 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
508 " alias data in lcu (rc = %d), retry later", rc);
509 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
510 } else {
511 lcu->ruac_data.device = NULL;
512 lcu->flags &= ~UPDATE_PENDING;
514 spin_unlock_irqrestore(&lcu->lock, flags);
517 static int _schedule_lcu_update(struct alias_lcu *lcu,
518 struct dasd_device *device)
520 struct dasd_device *usedev = NULL;
521 struct alias_pav_group *group;
523 lcu->flags |= NEED_UAC_UPDATE;
524 if (lcu->ruac_data.device) {
525 /* already scheduled or running */
526 return 0;
528 if (device && !list_empty(&device->alias_list))
529 usedev = device;
531 if (!usedev && !list_empty(&lcu->grouplist)) {
532 group = list_first_entry(&lcu->grouplist,
533 struct alias_pav_group, group);
534 if (!list_empty(&group->baselist))
535 usedev = list_first_entry(&group->baselist,
536 struct dasd_device,
537 alias_list);
538 else if (!list_empty(&group->aliaslist))
539 usedev = list_first_entry(&group->aliaslist,
540 struct dasd_device,
541 alias_list);
543 if (!usedev && !list_empty(&lcu->active_devices)) {
544 usedev = list_first_entry(&lcu->active_devices,
545 struct dasd_device, alias_list);
548 * if we haven't found a proper device yet, give up for now, the next
549 * device that will be set active will trigger an lcu update
551 if (!usedev)
552 return -EINVAL;
553 lcu->ruac_data.device = usedev;
554 schedule_delayed_work(&lcu->ruac_data.dwork, 0);
555 return 0;
558 int dasd_alias_add_device(struct dasd_device *device)
560 struct dasd_eckd_private *private;
561 struct alias_lcu *lcu;
562 unsigned long flags;
563 int rc;
565 private = (struct dasd_eckd_private *) device->private;
566 lcu = private->lcu;
567 rc = 0;
568 spin_lock_irqsave(&lcu->lock, flags);
569 if (!(lcu->flags & UPDATE_PENDING)) {
570 rc = _add_device_to_lcu(lcu, device);
571 if (rc)
572 lcu->flags |= UPDATE_PENDING;
574 if (lcu->flags & UPDATE_PENDING) {
575 list_move(&device->alias_list, &lcu->active_devices);
576 _schedule_lcu_update(lcu, device);
578 spin_unlock_irqrestore(&lcu->lock, flags);
579 return rc;
582 int dasd_alias_remove_device(struct dasd_device *device)
584 struct dasd_eckd_private *private;
585 struct alias_lcu *lcu;
586 unsigned long flags;
588 private = (struct dasd_eckd_private *) device->private;
589 lcu = private->lcu;
590 spin_lock_irqsave(&lcu->lock, flags);
591 _remove_device_from_lcu(lcu, device);
592 spin_unlock_irqrestore(&lcu->lock, flags);
593 return 0;
596 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
599 struct dasd_device *alias_device;
600 struct alias_pav_group *group;
601 struct alias_lcu *lcu;
602 struct dasd_eckd_private *private, *alias_priv;
603 unsigned long flags;
605 private = (struct dasd_eckd_private *) base_device->private;
606 group = private->pavgroup;
607 lcu = private->lcu;
608 if (!group || !lcu)
609 return NULL;
610 if (lcu->pav == NO_PAV ||
611 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
612 return NULL;
614 spin_lock_irqsave(&lcu->lock, flags);
615 alias_device = group->next;
616 if (!alias_device) {
617 if (list_empty(&group->aliaslist)) {
618 spin_unlock_irqrestore(&lcu->lock, flags);
619 return NULL;
620 } else {
621 alias_device = list_first_entry(&group->aliaslist,
622 struct dasd_device,
623 alias_list);
626 if (list_is_last(&alias_device->alias_list, &group->aliaslist))
627 group->next = list_first_entry(&group->aliaslist,
628 struct dasd_device, alias_list);
629 else
630 group->next = list_first_entry(&alias_device->alias_list,
631 struct dasd_device, alias_list);
632 spin_unlock_irqrestore(&lcu->lock, flags);
633 alias_priv = (struct dasd_eckd_private *) alias_device->private;
634 if ((alias_priv->count < private->count) && !alias_device->stopped)
635 return alias_device;
636 else
637 return NULL;
641 * Summary unit check handling depends on the way alias devices
642 * are handled so it is done here rather then in dasd_eckd.c
644 static int reset_summary_unit_check(struct alias_lcu *lcu,
645 struct dasd_device *device,
646 char reason)
648 struct dasd_ccw_req *cqr;
649 int rc = 0;
650 struct ccw1 *ccw;
652 cqr = lcu->rsu_cqr;
653 strncpy((char *) &cqr->magic, "ECKD", 4);
654 ASCEBC((char *) &cqr->magic, 4);
655 ccw = cqr->cpaddr;
656 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
657 ccw->flags = 0 ;
658 ccw->count = 16;
659 ccw->cda = (__u32)(addr_t) cqr->data;
660 ((char *)cqr->data)[0] = reason;
662 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
663 cqr->retries = 255; /* set retry counter to enable basic ERP */
664 cqr->startdev = device;
665 cqr->memdev = device;
666 cqr->block = NULL;
667 cqr->expires = 5 * HZ;
668 cqr->buildclk = get_clock();
669 cqr->status = DASD_CQR_FILLED;
671 rc = dasd_sleep_on_immediatly(cqr);
672 return rc;
675 static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
677 struct alias_pav_group *pavgroup;
678 struct dasd_device *device;
679 struct dasd_eckd_private *private;
681 /* active and inactive list can contain alias as well as base devices */
682 list_for_each_entry(device, &lcu->active_devices, alias_list) {
683 private = (struct dasd_eckd_private *) device->private;
684 if (private->uid.type != UA_BASE_DEVICE)
685 continue;
686 dasd_schedule_block_bh(device->block);
687 dasd_schedule_device_bh(device);
689 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
690 private = (struct dasd_eckd_private *) device->private;
691 if (private->uid.type != UA_BASE_DEVICE)
692 continue;
693 dasd_schedule_block_bh(device->block);
694 dasd_schedule_device_bh(device);
696 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
697 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
698 dasd_schedule_block_bh(device->block);
699 dasd_schedule_device_bh(device);
704 static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
706 struct alias_pav_group *pavgroup;
707 struct dasd_device *device, *temp;
708 struct dasd_eckd_private *private;
709 int rc;
710 unsigned long flags;
711 LIST_HEAD(active);
714 * Problem here ist that dasd_flush_device_queue may wait
715 * for termination of a request to complete. We can't keep
716 * the lcu lock during that time, so we must assume that
717 * the lists may have changed.
718 * Idea: first gather all active alias devices in a separate list,
719 * then flush the first element of this list unlocked, and afterwards
720 * check if it is still on the list before moving it to the
721 * active_devices list.
724 spin_lock_irqsave(&lcu->lock, flags);
725 list_for_each_entry_safe(device, temp, &lcu->active_devices,
726 alias_list) {
727 private = (struct dasd_eckd_private *) device->private;
728 if (private->uid.type == UA_BASE_DEVICE)
729 continue;
730 list_move(&device->alias_list, &active);
733 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
734 list_splice_init(&pavgroup->aliaslist, &active);
736 while (!list_empty(&active)) {
737 device = list_first_entry(&active, struct dasd_device,
738 alias_list);
739 spin_unlock_irqrestore(&lcu->lock, flags);
740 rc = dasd_flush_device_queue(device);
741 spin_lock_irqsave(&lcu->lock, flags);
743 * only move device around if it wasn't moved away while we
744 * were waiting for the flush
746 if (device == list_first_entry(&active,
747 struct dasd_device, alias_list))
748 list_move(&device->alias_list, &lcu->active_devices);
750 spin_unlock_irqrestore(&lcu->lock, flags);
753 static void __stop_device_on_lcu(struct dasd_device *device,
754 struct dasd_device *pos)
756 /* If pos == device then device is already locked! */
757 if (pos == device) {
758 pos->stopped |= DASD_STOPPED_SU;
759 return;
761 spin_lock(get_ccwdev_lock(pos->cdev));
762 pos->stopped |= DASD_STOPPED_SU;
763 spin_unlock(get_ccwdev_lock(pos->cdev));
767 * This function is called in interrupt context, so the
768 * cdev lock for device is already locked!
770 static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
771 struct dasd_device *device)
773 struct alias_pav_group *pavgroup;
774 struct dasd_device *pos;
776 list_for_each_entry(pos, &lcu->active_devices, alias_list)
777 __stop_device_on_lcu(device, pos);
778 list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
779 __stop_device_on_lcu(device, pos);
780 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
781 list_for_each_entry(pos, &pavgroup->baselist, alias_list)
782 __stop_device_on_lcu(device, pos);
783 list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
784 __stop_device_on_lcu(device, pos);
788 static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
790 struct alias_pav_group *pavgroup;
791 struct dasd_device *device;
792 unsigned long flags;
794 list_for_each_entry(device, &lcu->active_devices, alias_list) {
795 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
796 device->stopped &= ~DASD_STOPPED_SU;
797 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
800 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
801 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
802 device->stopped &= ~DASD_STOPPED_SU;
803 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
806 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
807 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
808 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
809 device->stopped &= ~DASD_STOPPED_SU;
810 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
811 flags);
813 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
814 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
815 device->stopped &= ~DASD_STOPPED_SU;
816 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
817 flags);
822 static void summary_unit_check_handling_work(struct work_struct *work)
824 struct alias_lcu *lcu;
825 struct summary_unit_check_work_data *suc_data;
826 unsigned long flags;
827 struct dasd_device *device;
829 suc_data = container_of(work, struct summary_unit_check_work_data,
830 worker);
831 lcu = container_of(suc_data, struct alias_lcu, suc_data);
832 device = suc_data->device;
834 /* 1. flush alias devices */
835 flush_all_alias_devices_on_lcu(lcu);
837 /* 2. reset summary unit check */
838 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
839 device->stopped &= ~(DASD_STOPPED_SU | DASD_STOPPED_PENDING);
840 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
841 reset_summary_unit_check(lcu, device, suc_data->reason);
843 spin_lock_irqsave(&lcu->lock, flags);
844 _unstop_all_devices_on_lcu(lcu);
845 _restart_all_base_devices_on_lcu(lcu);
846 /* 3. read new alias configuration */
847 _schedule_lcu_update(lcu, device);
848 lcu->suc_data.device = NULL;
849 spin_unlock_irqrestore(&lcu->lock, flags);
853 * note: this will be called from int handler context (cdev locked)
855 void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
856 struct irb *irb)
858 struct alias_lcu *lcu;
859 char reason;
860 struct dasd_eckd_private *private;
861 char *sense;
863 private = (struct dasd_eckd_private *) device->private;
865 sense = dasd_get_sense(irb);
866 if (sense) {
867 reason = sense[8];
868 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
869 "eckd handle summary unit check: reason", reason);
870 } else {
871 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
872 "eckd handle summary unit check:"
873 " no reason code available");
874 return;
877 lcu = private->lcu;
878 if (!lcu) {
879 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
880 "device not ready to handle summary"
881 " unit check (no lcu structure)");
882 return;
884 spin_lock(&lcu->lock);
885 _stop_all_devices_on_lcu(lcu, device);
886 /* prepare for lcu_update */
887 private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
888 /* If this device is about to be removed just return and wait for
889 * the next interrupt on a different device
891 if (list_empty(&device->alias_list)) {
892 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
893 "device is in offline processing,"
894 " don't do summary unit check handling");
895 spin_unlock(&lcu->lock);
896 return;
898 if (lcu->suc_data.device) {
899 /* already scheduled or running */
900 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
901 "previous instance of summary unit check worker"
902 " still pending");
903 spin_unlock(&lcu->lock);
904 return ;
906 lcu->suc_data.reason = reason;
907 lcu->suc_data.device = device;
908 spin_unlock(&lcu->lock);
909 schedule_work(&lcu->suc_data.worker);