MIPS: handle write_combine in pci_mmap_page_range
[linux-2.6/linux-loongson.git] / drivers / scsi / device_handler / scsi_dh_rdac.c
blob43b8c51e98d090d5cdf1f333fc449730e6002d1e
1 /*
2 * Engenio/LSI RDAC SCSI Device Handler
4 * Copyright (C) 2005 Mike Christie. All rights reserved.
5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <scsi/scsi.h>
23 #include <scsi/scsi_eh.h>
24 #include <scsi/scsi_dh.h>
26 #define RDAC_NAME "rdac"
27 #define RDAC_RETRY_COUNT 5
30 * LSI mode page stuff
32 * These struct definitions and the forming of the
33 * mode page were taken from the LSI RDAC 2.4 GPL'd
34 * driver, and then converted to Linux conventions.
36 #define RDAC_QUIESCENCE_TIME 20;
38 * Page Codes
40 #define RDAC_PAGE_CODE_REDUNDANT_CONTROLLER 0x2c
43 * Controller modes definitions
45 #define RDAC_MODE_TRANSFER_SPECIFIED_LUNS 0x02
48 * RDAC Options field
50 #define RDAC_FORCED_QUIESENCE 0x02
52 #define RDAC_TIMEOUT (60 * HZ)
53 #define RDAC_RETRIES 3
55 struct rdac_mode_6_hdr {
56 u8 data_len;
57 u8 medium_type;
58 u8 device_params;
59 u8 block_desc_len;
62 struct rdac_mode_10_hdr {
63 u16 data_len;
64 u8 medium_type;
65 u8 device_params;
66 u16 reserved;
67 u16 block_desc_len;
70 struct rdac_mode_common {
71 u8 controller_serial[16];
72 u8 alt_controller_serial[16];
73 u8 rdac_mode[2];
74 u8 alt_rdac_mode[2];
75 u8 quiescence_timeout;
76 u8 rdac_options;
79 struct rdac_pg_legacy {
80 struct rdac_mode_6_hdr hdr;
81 u8 page_code;
82 u8 page_len;
83 struct rdac_mode_common common;
84 #define MODE6_MAX_LUN 32
85 u8 lun_table[MODE6_MAX_LUN];
86 u8 reserved2[32];
87 u8 reserved3;
88 u8 reserved4;
91 struct rdac_pg_expanded {
92 struct rdac_mode_10_hdr hdr;
93 u8 page_code;
94 u8 subpage_code;
95 u8 page_len[2];
96 struct rdac_mode_common common;
97 u8 lun_table[256];
98 u8 reserved3;
99 u8 reserved4;
102 struct c9_inquiry {
103 u8 peripheral_info;
104 u8 page_code; /* 0xC9 */
105 u8 reserved1;
106 u8 page_len;
107 u8 page_id[4]; /* "vace" */
108 u8 avte_cvp;
109 u8 path_prio;
110 u8 reserved2[38];
113 #define SUBSYS_ID_LEN 16
114 #define SLOT_ID_LEN 2
116 struct c4_inquiry {
117 u8 peripheral_info;
118 u8 page_code; /* 0xC4 */
119 u8 reserved1;
120 u8 page_len;
121 u8 page_id[4]; /* "subs" */
122 u8 subsys_id[SUBSYS_ID_LEN];
123 u8 revision[4];
124 u8 slot_id[SLOT_ID_LEN];
125 u8 reserved[2];
128 struct rdac_controller {
129 u8 subsys_id[SUBSYS_ID_LEN];
130 u8 slot_id[SLOT_ID_LEN];
131 int use_ms10;
132 struct kref kref;
133 struct list_head node; /* list of all controllers */
134 union {
135 struct rdac_pg_legacy legacy;
136 struct rdac_pg_expanded expanded;
137 } mode_select;
139 struct c8_inquiry {
140 u8 peripheral_info;
141 u8 page_code; /* 0xC8 */
142 u8 reserved1;
143 u8 page_len;
144 u8 page_id[4]; /* "edid" */
145 u8 reserved2[3];
146 u8 vol_uniq_id_len;
147 u8 vol_uniq_id[16];
148 u8 vol_user_label_len;
149 u8 vol_user_label[60];
150 u8 array_uniq_id_len;
151 u8 array_unique_id[16];
152 u8 array_user_label_len;
153 u8 array_user_label[60];
154 u8 lun[8];
157 struct c2_inquiry {
158 u8 peripheral_info;
159 u8 page_code; /* 0xC2 */
160 u8 reserved1;
161 u8 page_len;
162 u8 page_id[4]; /* "swr4" */
163 u8 sw_version[3];
164 u8 sw_date[3];
165 u8 features_enabled;
166 u8 max_lun_supported;
167 u8 partitions[239]; /* Total allocation length should be 0xFF */
170 struct rdac_dh_data {
171 struct rdac_controller *ctlr;
172 #define UNINITIALIZED_LUN (1 << 8)
173 unsigned lun;
174 #define RDAC_STATE_ACTIVE 0
175 #define RDAC_STATE_PASSIVE 1
176 unsigned char state;
178 #define RDAC_LUN_UNOWNED 0
179 #define RDAC_LUN_OWNED 1
180 #define RDAC_LUN_AVT 2
181 char lun_state;
182 unsigned char sense[SCSI_SENSE_BUFFERSIZE];
183 union {
184 struct c2_inquiry c2;
185 struct c4_inquiry c4;
186 struct c8_inquiry c8;
187 struct c9_inquiry c9;
188 } inq;
191 static const char *lun_state[] =
193 "unowned",
194 "owned",
195 "owned (AVT mode)",
198 static LIST_HEAD(ctlr_list);
199 static DEFINE_SPINLOCK(list_lock);
201 static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
203 struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
204 BUG_ON(scsi_dh_data == NULL);
205 return ((struct rdac_dh_data *) scsi_dh_data->buf);
208 static struct request *get_rdac_req(struct scsi_device *sdev,
209 void *buffer, unsigned buflen, int rw)
211 struct request *rq;
212 struct request_queue *q = sdev->request_queue;
214 rq = blk_get_request(q, rw, GFP_NOIO);
216 if (!rq) {
217 sdev_printk(KERN_INFO, sdev,
218 "get_rdac_req: blk_get_request failed.\n");
219 return NULL;
222 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {
223 blk_put_request(rq);
224 sdev_printk(KERN_INFO, sdev,
225 "get_rdac_req: blk_rq_map_kern failed.\n");
226 return NULL;
229 rq->cmd_type = REQ_TYPE_BLOCK_PC;
230 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
231 REQ_FAILFAST_DRIVER;
232 rq->retries = RDAC_RETRIES;
233 rq->timeout = RDAC_TIMEOUT;
235 return rq;
238 static struct request *rdac_failover_get(struct scsi_device *sdev,
239 struct rdac_dh_data *h)
241 struct request *rq;
242 struct rdac_mode_common *common;
243 unsigned data_size;
245 if (h->ctlr->use_ms10) {
246 struct rdac_pg_expanded *rdac_pg;
248 data_size = sizeof(struct rdac_pg_expanded);
249 rdac_pg = &h->ctlr->mode_select.expanded;
250 memset(rdac_pg, 0, data_size);
251 common = &rdac_pg->common;
252 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40;
253 rdac_pg->subpage_code = 0x1;
254 rdac_pg->page_len[0] = 0x01;
255 rdac_pg->page_len[1] = 0x28;
256 rdac_pg->lun_table[h->lun] = 0x81;
257 } else {
258 struct rdac_pg_legacy *rdac_pg;
260 data_size = sizeof(struct rdac_pg_legacy);
261 rdac_pg = &h->ctlr->mode_select.legacy;
262 memset(rdac_pg, 0, data_size);
263 common = &rdac_pg->common;
264 rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER;
265 rdac_pg->page_len = 0x68;
266 rdac_pg->lun_table[h->lun] = 0x81;
268 common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS;
269 common->quiescence_timeout = RDAC_QUIESCENCE_TIME;
270 common->rdac_options = RDAC_FORCED_QUIESENCE;
272 /* get request for block layer packet command */
273 rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE);
274 if (!rq)
275 return NULL;
277 /* Prepare the command. */
278 if (h->ctlr->use_ms10) {
279 rq->cmd[0] = MODE_SELECT_10;
280 rq->cmd[7] = data_size >> 8;
281 rq->cmd[8] = data_size & 0xff;
282 } else {
283 rq->cmd[0] = MODE_SELECT;
284 rq->cmd[4] = data_size;
286 rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
288 rq->sense = h->sense;
289 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
290 rq->sense_len = 0;
292 return rq;
295 static void release_controller(struct kref *kref)
297 struct rdac_controller *ctlr;
298 ctlr = container_of(kref, struct rdac_controller, kref);
300 spin_lock(&list_lock);
301 list_del(&ctlr->node);
302 spin_unlock(&list_lock);
303 kfree(ctlr);
306 static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
308 struct rdac_controller *ctlr, *tmp;
310 spin_lock(&list_lock);
312 list_for_each_entry(tmp, &ctlr_list, node) {
313 if ((memcmp(tmp->subsys_id, subsys_id, SUBSYS_ID_LEN) == 0) &&
314 (memcmp(tmp->slot_id, slot_id, SLOT_ID_LEN) == 0)) {
315 kref_get(&tmp->kref);
316 spin_unlock(&list_lock);
317 return tmp;
320 ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
321 if (!ctlr)
322 goto done;
324 /* initialize fields of controller */
325 memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
326 memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
327 kref_init(&ctlr->kref);
328 ctlr->use_ms10 = -1;
329 list_add(&ctlr->node, &ctlr_list);
330 done:
331 spin_unlock(&list_lock);
332 return ctlr;
335 static int submit_inquiry(struct scsi_device *sdev, int page_code,
336 unsigned int len, struct rdac_dh_data *h)
338 struct request *rq;
339 struct request_queue *q = sdev->request_queue;
340 int err = SCSI_DH_RES_TEMP_UNAVAIL;
342 rq = get_rdac_req(sdev, &h->inq, len, READ);
343 if (!rq)
344 goto done;
346 /* Prepare the command. */
347 rq->cmd[0] = INQUIRY;
348 rq->cmd[1] = 1;
349 rq->cmd[2] = page_code;
350 rq->cmd[4] = len;
351 rq->cmd_len = COMMAND_SIZE(INQUIRY);
353 rq->sense = h->sense;
354 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
355 rq->sense_len = 0;
357 err = blk_execute_rq(q, NULL, rq, 1);
358 if (err == -EIO)
359 err = SCSI_DH_IO;
361 blk_put_request(rq);
362 done:
363 return err;
366 static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
368 int err;
369 struct c8_inquiry *inqp;
371 err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
372 if (err == SCSI_DH_OK) {
373 inqp = &h->inq.c8;
374 if (inqp->page_code != 0xc8)
375 return SCSI_DH_NOSYS;
376 if (inqp->page_id[0] != 'e' || inqp->page_id[1] != 'd' ||
377 inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
378 return SCSI_DH_NOSYS;
379 h->lun = inqp->lun[7]; /* Uses only the last byte */
381 return err;
384 static int check_ownership(struct scsi_device *sdev, struct rdac_dh_data *h)
386 int err;
387 struct c9_inquiry *inqp;
389 h->lun_state = RDAC_LUN_UNOWNED;
390 h->state = RDAC_STATE_ACTIVE;
391 err = submit_inquiry(sdev, 0xC9, sizeof(struct c9_inquiry), h);
392 if (err == SCSI_DH_OK) {
393 inqp = &h->inq.c9;
394 if ((inqp->avte_cvp >> 7) == 0x1) {
395 /* LUN in AVT mode */
396 sdev_printk(KERN_NOTICE, sdev,
397 "%s: AVT mode detected\n",
398 RDAC_NAME);
399 h->lun_state = RDAC_LUN_AVT;
400 } else if ((inqp->avte_cvp & 0x1) != 0) {
401 /* LUN was owned by the controller */
402 h->lun_state = RDAC_LUN_OWNED;
406 if (h->lun_state == RDAC_LUN_UNOWNED)
407 h->state = RDAC_STATE_PASSIVE;
409 return err;
412 static int initialize_controller(struct scsi_device *sdev,
413 struct rdac_dh_data *h)
415 int err;
416 struct c4_inquiry *inqp;
418 err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
419 if (err == SCSI_DH_OK) {
420 inqp = &h->inq.c4;
421 h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
422 if (!h->ctlr)
423 err = SCSI_DH_RES_TEMP_UNAVAIL;
425 return err;
428 static int set_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
430 int err;
431 struct c2_inquiry *inqp;
433 err = submit_inquiry(sdev, 0xC2, sizeof(struct c2_inquiry), h);
434 if (err == SCSI_DH_OK) {
435 inqp = &h->inq.c2;
437 * If more than MODE6_MAX_LUN luns are supported, use
438 * mode select 10
440 if (inqp->max_lun_supported >= MODE6_MAX_LUN)
441 h->ctlr->use_ms10 = 1;
442 else
443 h->ctlr->use_ms10 = 0;
445 return err;
448 static int mode_select_handle_sense(struct scsi_device *sdev,
449 unsigned char *sensebuf)
451 struct scsi_sense_hdr sense_hdr;
452 int err = SCSI_DH_IO, ret;
454 ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
455 if (!ret)
456 goto done;
458 err = SCSI_DH_OK;
460 switch (sense_hdr.sense_key) {
461 case NO_SENSE:
462 case ABORTED_COMMAND:
463 case UNIT_ATTENTION:
464 err = SCSI_DH_RETRY;
465 break;
466 case NOT_READY:
467 if (sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x01)
468 /* LUN Not Ready and is in the Process of Becoming
469 * Ready
471 err = SCSI_DH_RETRY;
472 break;
473 case ILLEGAL_REQUEST:
474 if (sense_hdr.asc == 0x91 && sense_hdr.ascq == 0x36)
476 * Command Lock contention
478 err = SCSI_DH_RETRY;
479 break;
480 default:
481 sdev_printk(KERN_INFO, sdev,
482 "MODE_SELECT failed with sense %02x/%02x/%02x.\n",
483 sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
486 done:
487 return err;
490 static int send_mode_select(struct scsi_device *sdev, struct rdac_dh_data *h)
492 struct request *rq;
493 struct request_queue *q = sdev->request_queue;
494 int err, retry_cnt = RDAC_RETRY_COUNT;
496 retry:
497 err = SCSI_DH_RES_TEMP_UNAVAIL;
498 rq = rdac_failover_get(sdev, h);
499 if (!rq)
500 goto done;
502 sdev_printk(KERN_INFO, sdev, "%s MODE_SELECT command.\n",
503 (retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
505 err = blk_execute_rq(q, NULL, rq, 1);
506 blk_put_request(rq);
507 if (err != SCSI_DH_OK) {
508 err = mode_select_handle_sense(sdev, h->sense);
509 if (err == SCSI_DH_RETRY && retry_cnt--)
510 goto retry;
512 if (err == SCSI_DH_OK)
513 h->state = RDAC_STATE_ACTIVE;
515 done:
516 return err;
519 static int rdac_activate(struct scsi_device *sdev)
521 struct rdac_dh_data *h = get_rdac_data(sdev);
522 int err = SCSI_DH_OK;
524 err = check_ownership(sdev, h);
525 if (err != SCSI_DH_OK)
526 goto done;
528 if (!h->ctlr) {
529 err = initialize_controller(sdev, h);
530 if (err != SCSI_DH_OK)
531 goto done;
534 if (h->ctlr->use_ms10 == -1) {
535 err = set_mode_select(sdev, h);
536 if (err != SCSI_DH_OK)
537 goto done;
539 if (h->lun_state == RDAC_LUN_UNOWNED)
540 err = send_mode_select(sdev, h);
541 done:
542 return err;
545 static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
547 struct rdac_dh_data *h = get_rdac_data(sdev);
548 int ret = BLKPREP_OK;
550 if (h->state != RDAC_STATE_ACTIVE) {
551 ret = BLKPREP_KILL;
552 req->cmd_flags |= REQ_QUIET;
554 return ret;
558 static int rdac_check_sense(struct scsi_device *sdev,
559 struct scsi_sense_hdr *sense_hdr)
561 struct rdac_dh_data *h = get_rdac_data(sdev);
562 switch (sense_hdr->sense_key) {
563 case NOT_READY:
564 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
565 /* LUN Not Ready - Storage firmware incompatible
566 * Manual code synchonisation required.
568 * Nothing we can do here. Try to bypass the path.
570 return SUCCESS;
571 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0xA1)
572 /* LUN Not Ready - Quiescense in progress
574 * Just retry and wait.
576 return ADD_TO_MLQUEUE;
577 if (sense_hdr->asc == 0xA1 && sense_hdr->ascq == 0x02)
578 /* LUN Not Ready - Quiescense in progress
579 * or has been achieved
580 * Just retry.
582 return ADD_TO_MLQUEUE;
583 break;
584 case ILLEGAL_REQUEST:
585 if (sense_hdr->asc == 0x94 && sense_hdr->ascq == 0x01) {
586 /* Invalid Request - Current Logical Unit Ownership.
587 * Controller is not the current owner of the LUN,
588 * Fail the path, so that the other path be used.
590 h->state = RDAC_STATE_PASSIVE;
591 return SUCCESS;
593 break;
594 case UNIT_ATTENTION:
595 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
597 * Power On, Reset, or Bus Device Reset, just retry.
599 return ADD_TO_MLQUEUE;
600 if (sense_hdr->asc == 0x8b && sense_hdr->ascq == 0x02)
602 * Quiescence in progress , just retry.
604 return ADD_TO_MLQUEUE;
605 break;
607 /* success just means we do not care what scsi-ml does */
608 return SCSI_RETURN_NOT_HANDLED;
611 static const struct scsi_dh_devlist rdac_dev_list[] = {
612 {"IBM", "1722"},
613 {"IBM", "1724"},
614 {"IBM", "1726"},
615 {"IBM", "1742"},
616 {"IBM", "1814"},
617 {"IBM", "1815"},
618 {"IBM", "1818"},
619 {"IBM", "3526"},
620 {"SGI", "TP9400"},
621 {"SGI", "TP9500"},
622 {"SGI", "IS"},
623 {"STK", "OPENstorage D280"},
624 {"SUN", "CSM200_R"},
625 {"SUN", "LCSM100_F"},
626 {"DELL", "MD3000"},
627 {"DELL", "MD3000i"},
628 {"LSI", "INF-01-00"},
629 {"ENGENIO", "INF-01-00"},
630 {NULL, NULL},
633 static int rdac_bus_attach(struct scsi_device *sdev);
634 static void rdac_bus_detach(struct scsi_device *sdev);
636 static struct scsi_device_handler rdac_dh = {
637 .name = RDAC_NAME,
638 .module = THIS_MODULE,
639 .devlist = rdac_dev_list,
640 .prep_fn = rdac_prep_fn,
641 .check_sense = rdac_check_sense,
642 .attach = rdac_bus_attach,
643 .detach = rdac_bus_detach,
644 .activate = rdac_activate,
647 static int rdac_bus_attach(struct scsi_device *sdev)
649 struct scsi_dh_data *scsi_dh_data;
650 struct rdac_dh_data *h;
651 unsigned long flags;
652 int err;
654 scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
655 + sizeof(*h) , GFP_KERNEL);
656 if (!scsi_dh_data) {
657 sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n",
658 RDAC_NAME);
659 return 0;
662 scsi_dh_data->scsi_dh = &rdac_dh;
663 h = (struct rdac_dh_data *) scsi_dh_data->buf;
664 h->lun = UNINITIALIZED_LUN;
665 h->state = RDAC_STATE_ACTIVE;
667 err = get_lun(sdev, h);
668 if (err != SCSI_DH_OK)
669 goto failed;
671 err = check_ownership(sdev, h);
672 if (err != SCSI_DH_OK)
673 goto failed;
675 if (!try_module_get(THIS_MODULE))
676 goto failed;
678 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
679 sdev->scsi_dh_data = scsi_dh_data;
680 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
682 sdev_printk(KERN_NOTICE, sdev,
683 "%s: LUN %d (%s)\n",
684 RDAC_NAME, h->lun, lun_state[(int)h->lun_state]);
686 return 0;
688 failed:
689 kfree(scsi_dh_data);
690 sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
691 RDAC_NAME);
692 return -EINVAL;
695 static void rdac_bus_detach( struct scsi_device *sdev )
697 struct scsi_dh_data *scsi_dh_data;
698 struct rdac_dh_data *h;
699 unsigned long flags;
701 spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
702 scsi_dh_data = sdev->scsi_dh_data;
703 sdev->scsi_dh_data = NULL;
704 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
706 h = (struct rdac_dh_data *) scsi_dh_data->buf;
707 if (h->ctlr)
708 kref_put(&h->ctlr->kref, release_controller);
709 kfree(scsi_dh_data);
710 module_put(THIS_MODULE);
711 sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
716 static int __init rdac_init(void)
718 int r;
720 r = scsi_register_device_handler(&rdac_dh);
721 if (r != 0)
722 printk(KERN_ERR "Failed to register scsi device handler.");
723 return r;
726 static void __exit rdac_exit(void)
728 scsi_unregister_device_handler(&rdac_dh);
731 module_init(rdac_init);
732 module_exit(rdac_exit);
734 MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver");
735 MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
736 MODULE_LICENSE("GPL");