dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / drivers / scsi / device_handler / scsi_dh_rdac.c
blob93880ed6291cfbda8a88ad6130a433c0c461c727
1 /*
2 * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler
4 * Copyright (C) 2005 Mike Christie. All rights reserved.
5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <scsi/scsi.h>
23 #include <scsi/scsi_eh.h>
24 #include <scsi/scsi_dh.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
29 #define RDAC_NAME "rdac"
30 #define RDAC_RETRY_COUNT 5
33 * LSI mode page stuff
35 * These struct definitions and the forming of the
36 * mode page were taken from the LSI RDAC 2.4 GPL'd
37 * driver, and then converted to Linux conventions.
39 #define RDAC_QUIESCENCE_TIME 20
41 * Page Codes
43 #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
46 * Controller modes definitions
48 #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
51 * RDAC Options field
53 #define RDAC_FORCED_QUIESENCE 0x02
55 #define RDAC_TIMEOUT (60 * HZ)
56 #define RDAC_RETRIES 3
58 struct rdac_mode_6_hdr {
59 u8 data_len;
60 u8 medium_type;
61 u8 device_params;
62 u8 block_desc_len;
65 struct rdac_mode_10_hdr {
66 u16 data_len;
67 u8 medium_type;
68 u8 device_params;
69 u16 reserved;
70 u16 block_desc_len;
73 struct rdac_mode_common {
74 u8 controller_serial[16];
75 u8 alt_controller_serial[16];
76 u8 rdac_mode[2];
77 u8 alt_rdac_mode[2];
78 u8 quiescence_timeout;
79 u8 rdac_options;
82 struct rdac_pg_legacy {
83 struct rdac_mode_6_hdr hdr;
84 u8 page_code;
85 u8 page_len;
86 struct rdac_mode_common common;
87 #define MODE6_MAX_LUN 32
88 u8 lun_table[MODE6_MAX_LUN];
89 u8 reserved2[32];
90 u8 reserved3;
91 u8 reserved4;
94 struct rdac_pg_expanded {
95 struct rdac_mode_10_hdr hdr;
96 u8 page_code;
97 u8 subpage_code;
98 u8 page_len[2];
99 struct rdac_mode_common common;
100 u8 lun_table[256];
101 u8 reserved3;
102 u8 reserved4;
105 struct c9_inquiry {
106 u8 peripheral_info;
107 u8 page_code; /* 0xC9 */
108 u8 reserved1;
109 u8 page_len;
110 u8 page_id[4]; /* "vace" */
111 u8 avte_cvp;
112 u8 path_prio;
113 u8 reserved2[38];
116 #define SUBSYS_ID_LEN 16
117 #define SLOT_ID_LEN 2
118 #define ARRAY_LABEL_LEN 31
120 struct c4_inquiry {
121 u8 peripheral_info;
122 u8 page_code; /* 0xC4 */
123 u8 reserved1;
124 u8 page_len;
125 u8 page_id[4]; /* "subs" */
126 u8 subsys_id[SUBSYS_ID_LEN];
127 u8 revision[4];
128 u8 slot_id[SLOT_ID_LEN];
129 u8 reserved[2];
132 #define UNIQUE_ID_LEN 16
133 struct c8_inquiry {
134 u8 peripheral_info;
135 u8 page_code; /* 0xC8 */
136 u8 reserved1;
137 u8 page_len;
138 u8 page_id[4]; /* "edid" */
139 u8 reserved2[3];
140 u8 vol_uniq_id_len;
141 u8 vol_uniq_id[16];
142 u8 vol_user_label_len;
143 u8 vol_user_label[60];
144 u8 array_uniq_id_len;
145 u8 array_unique_id[UNIQUE_ID_LEN];
146 u8 array_user_label_len;
147 u8 array_user_label[60];
148 u8 lun[8];
151 struct rdac_controller {
152 u8 array_id[UNIQUE_ID_LEN];
153 int use_ms10;
154 struct kref kref;
155 struct list_head node; /* list of all controllers */
156 union {
157 struct rdac_pg_legacy legacy;
158 struct rdac_pg_expanded expanded;
159 } mode_select;
160 u8 index;
161 u8 array_name[ARRAY_LABEL_LEN];
162 struct Scsi_Host *host;
163 spinlock_t ms_lock;
164 int ms_queued;
165 struct work_struct ms_work;
166 struct scsi_device *ms_sdev;
167 struct list_head ms_head;
170 struct c2_inquiry {
171 u8 peripheral_info;
172 u8 page_code; /* 0xC2 */
173 u8 reserved1;
174 u8 page_len;
175 u8 page_id[4]; /* "swr4" */
176 u8 sw_version[3];
177 u8 sw_date[3];
178 u8 features_enabled;
179 u8 max_lun_supported;
180 u8 partitions[239]; /* Total allocation length should be 0xFF */
183 struct rdac_dh_data {
184 struct rdac_controller *ctlr;
185 #define UNINITIALIZED_LUN (1 << 8)
186 unsigned lun;
188 #define RDAC_MODE 0
189 #define RDAC_MODE_AVT 1
190 #define RDAC_MODE_IOSHIP 2
191 unsigned char mode;
193 #define RDAC_STATE_ACTIVE 0
194 #define RDAC_STATE_PASSIVE 1
195 unsigned char state;
197 #define RDAC_LUN_UNOWNED 0
198 #define RDAC_LUN_OWNED 1
199 char lun_state;
201 #define RDAC_PREFERRED 0
202 #define RDAC_NON_PREFERRED 1
203 char preferred;
205 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
206 union {
207 struct c2_inquiry c2;
208 struct c4_inquiry c4;
209 struct c8_inquiry c8;
210 struct c9_inquiry c9;
211 } inq;
214 static const char *mode[] = {
215 "RDAC",
216 "AVT",
217 "IOSHIP",
219 static const char *lun_state[] =
221 "unowned",
222 "owned",
225 struct rdac_queue_data {
226 struct list_head entry;
227 struct rdac_dh_data *h;
228 activate_complete callback_fn;
229 void *callback_data;
232 static LIST_HEAD(ctlr_list);
233 static DEFINE_SPINLOCK(list_lock);
234 static struct workqueue_struct *kmpath_rdacd;
235 static void send_mode_select(struct work_struct *work);
238 * module parameter to enable rdac debug logging.
239 * 2 bits for each type of logging, only two types defined for now
240 * Can be enhanced if required at later point
242 static int rdac_logging = 1;
243 module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
244 MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
245 "Default is 1 - failover logging enabled, "
246 "set it to 0xF to enable all the logs");
248 #define RDAC_LOG_FAILOVER 0
249 #define RDAC_LOG_SENSE 2
251 #define RDAC_LOG_BITS 2
253 #define RDAC_LOG_LEVEL(SHIFT) \
254 ((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
256 #define RDAC_LOG(SHIFT, sdev, f, arg...) \
257 do { \
258 if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
259 sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
260 } while (0);
262 static struct request *get_rdac_req(struct scsi_device *sdev,
263 void *buffer, unsigned buflen, int rw)
265 struct request *rq;
266 struct request_queue *q = sdev->request_queue;
268 rq = blk_get_request(q, rw, GFP_NOIO);
270 if (IS_ERR(rq)) {
271 sdev_printk(KERN_INFO, sdev,
272 "get_rdac_req: blk_get_request failed.\n");
273 return NULL;
275 blk_rq_set_block_pc(rq);
277 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
278 blk_put_request(rq);
279 sdev_printk(KERN_INFO, sdev,
280 "get_rdac_req: blk_rq_map_kern failed.\n");
281 return NULL;
284 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
285 REQ_FAILFAST_DRIVER;
286 rq->retries = RDAC_RETRIES;
287 rq->timeout = RDAC_TIMEOUT;
289 return rq;
292 static struct request *rdac_failover_get(struct scsi_device *sdev,
293 struct rdac_dh_data *h, struct list_head *list)
295 struct request *rq;
296 struct rdac_mode_common *common;
297 unsigned data_size;
298 struct rdac_queue_data *qdata;
299 u8 *lun_table;
301 if (h->ctlr->use_ms10) {
302 struct rdac_pg_expanded *rdac_pg;
304 data_size = sizeof(struct rdac_pg_expanded);
305 rdac_pg = &h->ctlr->mode_select.expanded;
306 memset(rdac_pg, 0, data_size);
307 common = &rdac_pg->common;
308 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
309 rdac_pg->subpage_code = 0x1;
310 rdac_pg->page_len[0] = 0x01;
311 rdac_pg->page_len[1] = 0x28;
312 lun_table = rdac_pg->lun_table;
313 } else {
314 struct rdac_pg_legacy *rdac_pg;
316 data_size = sizeof(struct rdac_pg_legacy);
317 rdac_pg = &h->ctlr->mode_select.legacy;
318 memset(rdac_pg, 0, data_size);
319 common = &rdac_pg->common;
320 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
321 rdac_pg->page_len = 0x68;
322 lun_table = rdac_pg->lun_table;
324 common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
325 common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
326 common->rdac_options = RDAC_FORCED_QUIESENCE;
328 list_for_each_entry(qdata, list, entry) {
329 lun_table[qdata->h->lun] = 0x81;
332 /* get request for block layer packet command */
333 rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
334 if (!rq)
335 return NULL;
337 /* Prepare the command. */
338 if (h->ctlr->use_ms10) {
339 rq->cmd[0] = MODE_SELECT_10;
340 rq->cmd[7] = data_size >> 8;
341 rq->cmd[8] = data_size & 0xff;
342 } else {
343 rq->cmd[0] = MODE_SELECT;
344 rq->cmd[4] = data_size;
346 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
348 rq->sense = h->sense;
349 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
350 rq->sense_len = 0;
352 return rq;
355 static void release_controller(struct kref *kref)
357 struct rdac_controller *ctlr;
358 ctlr = container_of(kref, struct rdac_controller, kref);
360 list_del(&ctlr->node);
361 kfree(ctlr);
364 static struct rdac_controller *get_controller(int index, char *array_name,
365 u8 *array_id, struct scsi_device *sdev)
367 struct rdac_controller *ctlr, *tmp;
369 list_for_each_entry(tmp, &ctlr_list, node) {
370 if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
371 (tmp->index == index) &&
372 (tmp->host == sdev->host)) {
373 kref_get(&tmp->kref);
374 return tmp;
377 ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
378 if (!ctlr)
379 return NULL;
381 /* initialize fields of controller */
382 memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
383 ctlr->index = index;
384 ctlr->host = sdev->host;
385 memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
387 kref_init(&ctlr->kref);
388 ctlr->use_ms10 = -1;
389 ctlr->ms_queued = 0;
390 ctlr->ms_sdev = NULL;
391 spin_lock_init(&ctlr->ms_lock);
392 INIT_WORK(&ctlr->ms_work, send_mode_select);
393 INIT_LIST_HEAD(&ctlr->ms_head);
394 list_add(&ctlr->node, &ctlr_list);
396 return ctlr;
399 static int submit_inquiry(struct scsi_device *sdev, int page_code,
400 unsigned int len, struct rdac_dh_data *h)
402 struct request *rq;
403 struct request_queue *q = sdev->request_queue;
404 int err = SCSI_DH_RES_TEMP_UNAVAIL;
406 rq = get_rdac_req(sdev, &h->inq, len, READ);
407 if (!rq)
408 goto done;
410 /* Prepare the command. */
411 rq->cmd[0] = INQUIRY;
412 rq->cmd[1] = 1;
413 rq->cmd[2] = page_code;
414 rq->cmd[4] = len;
415 rq->cmd_len = COMMAND_SIZE(INQUIRY);
417 rq->sense = h->sense;
418 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
419 rq->sense_len = 0;
421 err = blk_execute_rq(q, NULL, rq, 1);
422 if (err == -EIO)
423 err = SCSI_DH_IO;
425 blk_put_request(rq);
426 done:
427 return err;
430 static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
431 char *array_name, u8 *array_id)
433 int err, i;
434 struct c8_inquiry *inqp;
436 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
437 if (err == SCSI_DH_OK) {
438 inqp = &h->inq.c8;
439 if (inqp->page_code != 0xc8)
440 return SCSI_DH_NOSYS;
441 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
442 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
443 return SCSI_DH_NOSYS;
444 h->lun = inqp->lun[7]; /* Uses only the last byte */
446 for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
447 *(array_name+i) = inqp->array_user_label[(2*i)+1];
449 *(array_name+ARRAY_LABEL_LEN-1) = '\0';
450 memset(array_id, 0, UNIQUE_ID_LEN);
451 memcpy(array_id, inqp->array_unique_id, inqp->array_uniq_id_len);
453 return err;
456 static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
458 int err;
459 struct c9_inquiry *inqp;
461 h->state = RDAC_STATE_ACTIVE;
462 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
463 if (err == SCSI_DH_OK) {
464 inqp = &h->inq.c9;
465 /* detect the operating mode */
466 if ((inqp->avte_cvp >> 5) & 0x1)
467 h->mode = RDAC_MODE_IOSHIP; /* LUN in IOSHIP mode */
468 else if (inqp->avte_cvp >> 7)
469 h->mode = RDAC_MODE_AVT; /* LUN in AVT mode */
470 else
471 h->mode = RDAC_MODE; /* LUN in RDAC mode */
473 /* Update ownership */
474 if (inqp->avte_cvp & 0x1)
475 h->lun_state = RDAC_LUN_OWNED;
476 else {
477 h->lun_state = RDAC_LUN_UNOWNED;
478 if (h->mode == RDAC_MODE)
479 h->state = RDAC_STATE_PASSIVE;
482 /* Update path prio*/
483 if (inqp->path_prio & 0x1)
484 h->preferred = RDAC_PREFERRED;
485 else
486 h->preferred = RDAC_NON_PREFERRED;
489 return err;
492 static int initialize_controller(struct scsi_device *sdev,
493 struct rdac_dh_data *h, char *array_name, u8 *array_id)
495 int err, index;
496 struct c4_inquiry *inqp;
498 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
499 if (err == SCSI_DH_OK) {
500 inqp = &h->inq.c4;
501 /* get the controller index */
502 if (inqp->slot_id[1] == 0x31)
503 index = 0;
504 else
505 index = 1;
507 spin_lock(&list_lock);
508 h->ctlr = get_controller(index, array_name, array_id, sdev);
509 if (!h->ctlr)
510 err = SCSI_DH_RES_TEMP_UNAVAIL;
511 spin_unlock(&list_lock);
513 return err;
516 static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
518 int err;
519 struct c2_inquiry *inqp;
521 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
522 if (err == SCSI_DH_OK) {
523 inqp = &h->inq.c2;
525 * If more than MODE6_MAX_LUN luns are supported, use
526 * mode select 10
528 if (inqp->max_lun_supported >= MODE6_MAX_LUN)
529 h->ctlr->use_ms10 = 1;
530 else
531 h->ctlr->use_ms10 = 0;
533 return err;
536 static int mode_select_handle_sense(struct scsi_device *sdev,
537 unsigned char *sensebuf)
539 struct scsi_sense_hdr sense_hdr;
540 int err = SCSI_DH_IO, ret;
541 struct rdac_dh_data *h = sdev->handler_data;
543 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
544 if (!ret)
545 goto done;
547 switch (sense_hdr.sense_key) {
548 case NO_SENSE:
549 case ABORTED_COMMAND:
550 case UNIT_ATTENTION:
551 err = SCSI_DH_RETRY;
552 break;
553 case NOT_READY:
554 if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
555 /* LUN Not Ready and is in the Process of Becoming
556 * Ready
558 err = SCSI_DH_RETRY;
559 break;
560 case ILLEGAL_REQUEST:
561 if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
563 * Command Lock contention
565 err = SCSI_DH_IMM_RETRY;
566 break;
567 default:
568 break;
571 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
572 "MODE_SELECT returned with sense %02x/%02x/%02x",
573 (char *) h->ctlr->array_name, h->ctlr->index,
574 sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
576 done:
577 return err;
580 static void send_mode_select(struct work_struct *work)
582 struct rdac_controller *ctlr =
583 container_of(work, struct rdac_controller, ms_work);
584 struct request *rq;
585 struct scsi_device *sdev = ctlr->ms_sdev;
586 struct rdac_dh_data *h = sdev->handler_data;
587 struct request_queue *q = sdev->request_queue;
588 int err, retry_cnt = RDAC_RETRY_COUNT;
589 struct rdac_queue_data *tmp, *qdata;
590 LIST_HEAD(list);
592 spin_lock(&ctlr->ms_lock);
593 list_splice_init(&ctlr->ms_head, &list);
594 ctlr->ms_queued = 0;
595 ctlr->ms_sdev = NULL;
596 spin_unlock(&ctlr->ms_lock);
598 retry:
599 err = SCSI_DH_RES_TEMP_UNAVAIL;
600 rq = rdac_failover_get(sdev, h, &list);
601 if (!rq)
602 goto done;
604 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
605 "%s MODE_SELECT command",
606 (char *) h->ctlr->array_name, h->ctlr->index,
607 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
609 err = blk_execute_rq(q, NULL, rq, 1);
610 blk_put_request(rq);
611 if (err != SCSI_DH_OK) {
612 err = mode_select_handle_sense(sdev, h->sense);
613 if (err == SCSI_DH_RETRY && retry_cnt--)
614 goto retry;
615 if (err == SCSI_DH_IMM_RETRY)
616 goto retry;
618 if (err == SCSI_DH_OK) {
619 h->state = RDAC_STATE_ACTIVE;
620 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
621 "MODE_SELECT completed",
622 (char *) h->ctlr->array_name, h->ctlr->index);
625 done:
626 list_for_each_entry_safe(qdata, tmp, &list, entry) {
627 list_del(&qdata->entry);
628 if (err == SCSI_DH_OK)
629 qdata->h->state = RDAC_STATE_ACTIVE;
630 if (qdata->callback_fn)
631 qdata->callback_fn(qdata->callback_data, err);
632 kfree(qdata);
634 return;
637 static int queue_mode_select(struct scsi_device *sdev,
638 activate_complete fn, void *data)
640 struct rdac_queue_data *qdata;
641 struct rdac_controller *ctlr;
643 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
644 if (!qdata)
645 return SCSI_DH_RETRY;
647 qdata->h = sdev->handler_data;
648 qdata->callback_fn = fn;
649 qdata->callback_data = data;
651 ctlr = qdata->h->ctlr;
652 spin_lock(&ctlr->ms_lock);
653 list_add_tail(&qdata->entry, &ctlr->ms_head);
654 if (!ctlr->ms_queued) {
655 ctlr->ms_queued = 1;
656 ctlr->ms_sdev = sdev;
657 queue_work(kmpath_rdacd, &ctlr->ms_work);
659 spin_unlock(&ctlr->ms_lock);
660 return SCSI_DH_OK;
663 static int rdac_activate(struct scsi_device *sdev,
664 activate_complete fn, void *data)
666 struct rdac_dh_data *h = sdev->handler_data;
667 int err = SCSI_DH_OK;
668 int act = 0;
670 err = check_ownership(sdev, h);
671 if (err != SCSI_DH_OK)
672 goto done;
674 switch (h->mode) {
675 case RDAC_MODE:
676 if (h->lun_state == RDAC_LUN_UNOWNED)
677 act = 1;
678 break;
679 case RDAC_MODE_IOSHIP:
680 if ((h->lun_state == RDAC_LUN_UNOWNED) &&
681 (h->preferred == RDAC_PREFERRED))
682 act = 1;
683 break;
684 default:
685 break;
688 if (act) {
689 err = queue_mode_select(sdev, fn, data);
690 if (err == SCSI_DH_OK)
691 return 0;
693 done:
694 if (fn)
695 fn(data, err);
696 return 0;
699 static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
701 struct rdac_dh_data *h = sdev->handler_data;
702 int ret = BLKPREP_OK;
704 if (h->state != RDAC_STATE_ACTIVE) {
705 ret = BLKPREP_KILL;
706 req->cmd_flags |= REQ_QUIET;
708 return ret;
712 static int rdac_check_sense(struct scsi_device *sdev,
713 struct scsi_sense_hdr *sense_hdr)
715 struct rdac_dh_data *h = sdev->handler_data;
717 RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
718 "I/O returned with sense %02x/%02x/%02x",
719 (char *) h->ctlr->array_name, h->ctlr->index,
720 sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
722 switch (sense_hdr->sense_key) {
723 case NOT_READY:
724 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
725 /* LUN Not Ready - Logical Unit Not Ready and is in
726 * the process of becoming ready
727 * Just retry.
729 return ADD_TO_MLQUEUE;
730 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
731 /* LUN Not Ready - Storage firmware incompatible
732 * Manual code synchonisation required.
734 * Nothing we can do here. Try to bypass the path.
736 return SUCCESS;
737 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
738 /* LUN Not Ready - Quiescense in progress
740 * Just retry and wait.
742 return ADD_TO_MLQUEUE;
743 if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02)
744 /* LUN Not Ready - Quiescense in progress
745 * or has been achieved
746 * Just retry.
748 return ADD_TO_MLQUEUE;
749 break;
750 case ILLEGAL_REQUEST:
751 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
752 /* Invalid Request - Current Logical Unit Ownership.
753 * Controller is not the current owner of the LUN,
754 * Fail the path, so that the other path be used.
756 h->state = RDAC_STATE_PASSIVE;
757 return SUCCESS;
759 break;
760 case UNIT_ATTENTION:
761 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
763 * Power On, Reset, or Bus Device Reset, just retry.
765 return ADD_TO_MLQUEUE;
766 if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
768 * Quiescence in progress , just retry.
770 return ADD_TO_MLQUEUE;
771 break;
773 /* success just means we do not care what scsi-ml does */
774 return SCSI_RETURN_NOT_HANDLED;
777 static int rdac_bus_attach(struct scsi_device *sdev)
779 struct rdac_dh_data *h;
780 int err;
781 char array_name[ARRAY_LABEL_LEN];
782 char array_id[UNIQUE_ID_LEN];
784 h = kzalloc(sizeof(*h) , GFP_KERNEL);
785 if (!h)
786 return -ENOMEM;
787 h->lun = UNINITIALIZED_LUN;
788 h->state = RDAC_STATE_ACTIVE;
790 err = get_lun_info(sdev, h, array_name, array_id);
791 if (err != SCSI_DH_OK)
792 goto failed;
794 err = initialize_controller(sdev, h, array_name, array_id);
795 if (err != SCSI_DH_OK)
796 goto failed;
798 err = check_ownership(sdev, h);
799 if (err != SCSI_DH_OK)
800 goto clean_ctlr;
802 err = set_mode_select(sdev, h);
803 if (err != SCSI_DH_OK)
804 goto clean_ctlr;
806 sdev_printk(KERN_NOTICE, sdev,
807 "%s: LUN %d (%s) (%s)\n",
808 RDAC_NAME, h->lun, mode[(int)h->mode],
809 lun_state[(int)h->lun_state]);
811 sdev->handler_data = h;
812 return 0;
814 clean_ctlr:
815 spin_lock(&list_lock);
816 kref_put(&h->ctlr->kref, release_controller);
817 spin_unlock(&list_lock);
819 failed:
820 kfree(h);
821 return -EINVAL;
824 static void rdac_bus_detach( struct scsi_device *sdev )
826 struct rdac_dh_data *h = sdev->handler_data;
828 if (h->ctlr && h->ctlr->ms_queued)
829 flush_workqueue(kmpath_rdacd);
831 spin_lock(&list_lock);
832 if (h->ctlr)
833 kref_put(&h->ctlr->kref, release_controller);
834 spin_unlock(&list_lock);
835 sdev->handler_data = NULL;
836 kfree(h);
839 static struct scsi_device_handler rdac_dh = {
840 .name = RDAC_NAME,
841 .module = THIS_MODULE,
842 .prep_fn = rdac_prep_fn,
843 .check_sense = rdac_check_sense,
844 .attach = rdac_bus_attach,
845 .detach = rdac_bus_detach,
846 .activate = rdac_activate,
849 static int __init rdac_init(void)
851 int r;
853 r = scsi_register_device_handler(&rdac_dh);
854 if (r != 0) {
855 printk(KERN_ERR "Failed to register scsi device handler.");
856 goto done;
860 * Create workqueue to handle mode selects for rdac
862 kmpath_rdacd = create_singlethread_workqueue("kmpath_rdacd");
863 if (!kmpath_rdacd) {
864 scsi_unregister_device_handler(&rdac_dh);
865 printk(KERN_ERR "kmpath_rdacd creation failed.\n");
867 r = -EINVAL;
869 done:
870 return r;
873 static void __exit rdac_exit(void)
875 destroy_workqueue(kmpath_rdacd);
876 scsi_unregister_device_handler(&rdac_dh);
879 module_init(rdac_init);
880 module_exit(rdac_exit);
882 MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver");
883 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
884 MODULE_VERSION("01.00.0000.0000");
885 MODULE_LICENSE("GPL");