i2c-eg20t: change timeout value 50msec to 1000msec
[zen-stable.git] / drivers / scsi / 3w-9xxx.c
blob3868ab2397c6bcb3418d7f2b8663739438e86809
1 /*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
4 Written By: Adam Radford <linuxraid@lsi.com>
5 Modifications By: Tom Couch <linuxraid@lsi.com>
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; version 2 of the License.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 NO WARRANTY
20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 solely responsible for determining the appropriateness of using and
25 distributing the Program and assumes all risks associated with its
26 exercise of rights under this Agreement, including but not limited to
27 the risks and costs of program errors, damage to or loss of data,
28 programs or equipment, and unavailability or interruption of operations.
30 DISCLAIMER OF LIABILITY
31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 You should have received a copy of the GNU General Public License
40 along with this program; if not, write to the Free Software
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
43 Bugs/Comments/Suggestions should be mailed to:
44 linuxraid@lsi.com
46 For more information, goto:
47 http://www.lsi.com
49 Note: This version of the driver does not contain a bundled firmware
50 image.
52 History
53 -------
54 2.26.02.000 - Driver cleanup for kernel submission.
55 2.26.02.001 - Replace schedule_timeout() calls with msleep().
56 2.26.02.002 - Add support for PAE mode.
57 Add lun support.
58 Fix twa_remove() to free irq handler/unregister_chrdev()
59 before shutting down card.
60 Change to new 'change_queue_depth' api.
61 Fix 'handled=1' ISR usage, remove bogus IRQ check.
62 Remove un-needed eh_abort handler.
63 Add support for embedded firmware error strings.
64 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
65 2.26.02.004 - Add support for 9550SX controllers.
66 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
67 2.26.02.006 - Fix 9550SX pchip reset timeout.
68 Add big endian support.
69 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
70 2.26.02.008 - Free irq handler in __twa_shutdown().
71 Serialize reset code.
72 Add support for 9650SE controllers.
73 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
74 2.26.02.010 - Add support for 9690SA controllers.
75 2.26.02.011 - Increase max AENs drained to 256.
76 Add MSI support and "use_msi" module parameter.
77 Fix bug in twa_get_param() on 4GB+.
78 Use pci_resource_len() for ioremap().
79 2.26.02.012 - Add power management support.
80 2.26.02.013 - Fix bug in twa_load_sgl().
81 2.26.02.014 - Force 60 second timeout default.
84 #include <linux/module.h>
85 #include <linux/reboot.h>
86 #include <linux/spinlock.h>
87 #include <linux/interrupt.h>
88 #include <linux/moduleparam.h>
89 #include <linux/errno.h>
90 #include <linux/types.h>
91 #include <linux/delay.h>
92 #include <linux/pci.h>
93 #include <linux/time.h>
94 #include <linux/mutex.h>
95 #include <linux/slab.h>
96 #include <asm/io.h>
97 #include <asm/irq.h>
98 #include <asm/uaccess.h>
99 #include <scsi/scsi.h>
100 #include <scsi/scsi_host.h>
101 #include <scsi/scsi_tcq.h>
102 #include <scsi/scsi_cmnd.h>
103 #include "3w-9xxx.h"
105 /* Globals */
106 #define TW_DRIVER_VERSION "2.26.02.014"
107 static DEFINE_MUTEX(twa_chrdev_mutex);
108 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
109 static unsigned int twa_device_extension_count;
110 static int twa_major = -1;
111 extern struct timezone sys_tz;
113 /* Module parameters */
114 MODULE_AUTHOR ("LSI");
115 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
116 MODULE_LICENSE("GPL");
117 MODULE_VERSION(TW_DRIVER_VERSION);
119 static int use_msi = 0;
120 module_param(use_msi, int, S_IRUGO);
121 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
123 /* Function prototypes */
124 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
125 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
126 static char *twa_aen_severity_lookup(unsigned char severity_code);
127 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
128 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
129 static int twa_chrdev_open(struct inode *inode, struct file *file);
130 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
131 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
132 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
133 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
134 u32 set_features, unsigned short current_fw_srl,
135 unsigned short current_fw_arch_id,
136 unsigned short current_fw_branch,
137 unsigned short current_fw_build,
138 unsigned short *fw_on_ctlr_srl,
139 unsigned short *fw_on_ctlr_arch_id,
140 unsigned short *fw_on_ctlr_branch,
141 unsigned short *fw_on_ctlr_build,
142 u32 *init_connect_result);
143 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
144 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
145 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
146 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
147 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
148 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
149 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
150 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
151 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
152 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
154 /* Functions */
156 /* Show some statistics about the card */
157 static ssize_t twa_show_stats(struct device *dev,
158 struct device_attribute *attr, char *buf)
160 struct Scsi_Host *host = class_to_shost(dev);
161 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
162 unsigned long flags = 0;
163 ssize_t len;
165 spin_lock_irqsave(tw_dev->host->host_lock, flags);
166 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
167 "Current commands posted: %4d\n"
168 "Max commands posted: %4d\n"
169 "Current pending commands: %4d\n"
170 "Max pending commands: %4d\n"
171 "Last sgl length: %4d\n"
172 "Max sgl length: %4d\n"
173 "Last sector count: %4d\n"
174 "Max sector count: %4d\n"
175 "SCSI Host Resets: %4d\n"
176 "AEN's: %4d\n",
177 TW_DRIVER_VERSION,
178 tw_dev->posted_request_count,
179 tw_dev->max_posted_request_count,
180 tw_dev->pending_request_count,
181 tw_dev->max_pending_request_count,
182 tw_dev->sgl_entries,
183 tw_dev->max_sgl_entries,
184 tw_dev->sector_count,
185 tw_dev->max_sector_count,
186 tw_dev->num_resets,
187 tw_dev->aen_count);
188 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
189 return len;
190 } /* End twa_show_stats() */
192 /* This function will set a devices queue depth */
193 static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
194 int reason)
196 if (reason != SCSI_QDEPTH_DEFAULT)
197 return -EOPNOTSUPP;
199 if (queue_depth > TW_Q_LENGTH-2)
200 queue_depth = TW_Q_LENGTH-2;
201 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
202 return queue_depth;
203 } /* End twa_change_queue_depth() */
205 /* Create sysfs 'stats' entry */
206 static struct device_attribute twa_host_stats_attr = {
207 .attr = {
208 .name = "stats",
209 .mode = S_IRUGO,
211 .show = twa_show_stats
214 /* Host attributes initializer */
215 static struct device_attribute *twa_host_attrs[] = {
216 &twa_host_stats_attr,
217 NULL,
220 /* File operations struct for character device */
221 static const struct file_operations twa_fops = {
222 .owner = THIS_MODULE,
223 .unlocked_ioctl = twa_chrdev_ioctl,
224 .open = twa_chrdev_open,
225 .release = NULL,
226 .llseek = noop_llseek,
229 /* This function will complete an aen request from the isr */
230 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
232 TW_Command_Full *full_command_packet;
233 TW_Command *command_packet;
234 TW_Command_Apache_Header *header;
235 unsigned short aen;
236 int retval = 1;
238 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
239 tw_dev->posted_request_count--;
240 aen = le16_to_cpu(header->status_block.error);
241 full_command_packet = tw_dev->command_packet_virt[request_id];
242 command_packet = &full_command_packet->command.oldcommand;
244 /* First check for internal completion of set param for time sync */
245 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
246 /* Keep reading the queue in case there are more aen's */
247 if (twa_aen_read_queue(tw_dev, request_id))
248 goto out2;
249 else {
250 retval = 0;
251 goto out;
255 switch (aen) {
256 case TW_AEN_QUEUE_EMPTY:
257 /* Quit reading the queue if this is the last one */
258 break;
259 case TW_AEN_SYNC_TIME_WITH_HOST:
260 twa_aen_sync_time(tw_dev, request_id);
261 retval = 0;
262 goto out;
263 default:
264 twa_aen_queue_event(tw_dev, header);
266 /* If there are more aen's, keep reading the queue */
267 if (twa_aen_read_queue(tw_dev, request_id))
268 goto out2;
269 else {
270 retval = 0;
271 goto out;
274 retval = 0;
275 out2:
276 tw_dev->state[request_id] = TW_S_COMPLETED;
277 twa_free_request_id(tw_dev, request_id);
278 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
279 out:
280 return retval;
281 } /* End twa_aen_complete() */
283 /* This function will drain aen queue */
284 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
286 int request_id = 0;
287 char cdb[TW_MAX_CDB_LEN];
288 TW_SG_Entry sglist[1];
289 int finished = 0, count = 0;
290 TW_Command_Full *full_command_packet;
291 TW_Command_Apache_Header *header;
292 unsigned short aen;
293 int first_reset = 0, queue = 0, retval = 1;
295 if (no_check_reset)
296 first_reset = 0;
297 else
298 first_reset = 1;
300 full_command_packet = tw_dev->command_packet_virt[request_id];
301 memset(full_command_packet, 0, sizeof(TW_Command_Full));
303 /* Initialize cdb */
304 memset(&cdb, 0, TW_MAX_CDB_LEN);
305 cdb[0] = REQUEST_SENSE; /* opcode */
306 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
308 /* Initialize sglist */
309 memset(&sglist, 0, sizeof(TW_SG_Entry));
310 sglist[0].length = TW_SECTOR_SIZE;
311 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
313 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
314 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
315 goto out;
318 /* Mark internal command */
319 tw_dev->srb[request_id] = NULL;
321 do {
322 /* Send command to the board */
323 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
325 goto out;
328 /* Now poll for completion */
329 if (twa_poll_response(tw_dev, request_id, 30)) {
330 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
331 tw_dev->posted_request_count--;
332 goto out;
335 tw_dev->posted_request_count--;
336 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
337 aen = le16_to_cpu(header->status_block.error);
338 queue = 0;
339 count++;
341 switch (aen) {
342 case TW_AEN_QUEUE_EMPTY:
343 if (first_reset != 1)
344 goto out;
345 else
346 finished = 1;
347 break;
348 case TW_AEN_SOFT_RESET:
349 if (first_reset == 0)
350 first_reset = 1;
351 else
352 queue = 1;
353 break;
354 case TW_AEN_SYNC_TIME_WITH_HOST:
355 break;
356 default:
357 queue = 1;
360 /* Now queue an event info */
361 if (queue)
362 twa_aen_queue_event(tw_dev, header);
363 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
365 if (count == TW_MAX_AEN_DRAIN)
366 goto out;
368 retval = 0;
369 out:
370 tw_dev->state[request_id] = TW_S_INITIAL;
371 return retval;
372 } /* End twa_aen_drain_queue() */
374 /* This function will queue an event */
375 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
377 u32 local_time;
378 struct timeval time;
379 TW_Event *event;
380 unsigned short aen;
381 char host[16];
382 char *error_str;
384 tw_dev->aen_count++;
386 /* Fill out event info */
387 event = tw_dev->event_queue[tw_dev->error_index];
389 /* Check for clobber */
390 host[0] = '\0';
391 if (tw_dev->host) {
392 sprintf(host, " scsi%d:", tw_dev->host->host_no);
393 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
394 tw_dev->aen_clobber = 1;
397 aen = le16_to_cpu(header->status_block.error);
398 memset(event, 0, sizeof(TW_Event));
400 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
401 do_gettimeofday(&time);
402 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
403 event->time_stamp_sec = local_time;
404 event->aen_code = aen;
405 event->retrieved = TW_AEN_NOT_RETRIEVED;
406 event->sequence_id = tw_dev->error_sequence_id;
407 tw_dev->error_sequence_id++;
409 /* Check for embedded error string */
410 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
412 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
413 event->parameter_len = strlen(header->err_specific_desc);
414 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
415 if (event->severity != TW_AEN_SEVERITY_DEBUG)
416 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
417 host,
418 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
419 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
420 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
421 header->err_specific_desc);
422 else
423 tw_dev->aen_count--;
425 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
426 tw_dev->event_queue_wrapped = 1;
427 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
428 } /* End twa_aen_queue_event() */
430 /* This function will read the aen queue from the isr */
431 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
433 char cdb[TW_MAX_CDB_LEN];
434 TW_SG_Entry sglist[1];
435 TW_Command_Full *full_command_packet;
436 int retval = 1;
438 full_command_packet = tw_dev->command_packet_virt[request_id];
439 memset(full_command_packet, 0, sizeof(TW_Command_Full));
441 /* Initialize cdb */
442 memset(&cdb, 0, TW_MAX_CDB_LEN);
443 cdb[0] = REQUEST_SENSE; /* opcode */
444 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
446 /* Initialize sglist */
447 memset(&sglist, 0, sizeof(TW_SG_Entry));
448 sglist[0].length = TW_SECTOR_SIZE;
449 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
451 /* Mark internal command */
452 tw_dev->srb[request_id] = NULL;
454 /* Now post the command packet */
455 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
456 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
457 goto out;
459 retval = 0;
460 out:
461 return retval;
462 } /* End twa_aen_read_queue() */
464 /* This function will look up an AEN severity string */
465 static char *twa_aen_severity_lookup(unsigned char severity_code)
467 char *retval = NULL;
469 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
470 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
471 goto out;
473 retval = twa_aen_severity_table[severity_code];
474 out:
475 return retval;
476 } /* End twa_aen_severity_lookup() */
478 /* This function will sync firmware time with the host time */
479 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
481 u32 schedulertime;
482 struct timeval utc;
483 TW_Command_Full *full_command_packet;
484 TW_Command *command_packet;
485 TW_Param_Apache *param;
486 u32 local_time;
488 /* Fill out the command packet */
489 full_command_packet = tw_dev->command_packet_virt[request_id];
490 memset(full_command_packet, 0, sizeof(TW_Command_Full));
491 command_packet = &full_command_packet->command.oldcommand;
492 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
493 command_packet->request_id = request_id;
494 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
495 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
496 command_packet->size = TW_COMMAND_SIZE;
497 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
499 /* Setup the param */
500 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
501 memset(param, 0, TW_SECTOR_SIZE);
502 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
503 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
504 param->parameter_size_bytes = cpu_to_le16(4);
506 /* Convert system time in UTC to local time seconds since last
507 Sunday 12:00AM */
508 do_gettimeofday(&utc);
509 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
510 schedulertime = local_time - (3 * 86400);
511 schedulertime = cpu_to_le32(schedulertime % 604800);
513 memcpy(param->data, &schedulertime, sizeof(u32));
515 /* Mark internal command */
516 tw_dev->srb[request_id] = NULL;
518 /* Now post the command */
519 twa_post_command_packet(tw_dev, request_id, 1);
520 } /* End twa_aen_sync_time() */
522 /* This function will allocate memory and check if it is correctly aligned */
523 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
525 int i;
526 dma_addr_t dma_handle;
527 unsigned long *cpu_addr;
528 int retval = 1;
530 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
531 if (!cpu_addr) {
532 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
533 goto out;
536 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
537 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
538 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
539 goto out;
542 memset(cpu_addr, 0, size*TW_Q_LENGTH);
544 for (i = 0; i < TW_Q_LENGTH; i++) {
545 switch(which) {
546 case 0:
547 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
548 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
549 break;
550 case 1:
551 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
552 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
553 break;
556 retval = 0;
557 out:
558 return retval;
559 } /* End twa_allocate_memory() */
561 /* This function will check the status register for unexpected bits */
562 static int twa_check_bits(u32 status_reg_value)
564 int retval = 1;
566 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
567 goto out;
568 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
569 goto out;
571 retval = 0;
572 out:
573 return retval;
574 } /* End twa_check_bits() */
576 /* This function will check the srl and decide if we are compatible */
577 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
579 int retval = 1;
580 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
581 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
582 u32 init_connect_result = 0;
584 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
585 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
586 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
587 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
588 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
589 &fw_on_ctlr_build, &init_connect_result)) {
590 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
591 goto out;
594 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
595 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
596 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
598 /* Try base mode compatibility */
599 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
600 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
601 TW_EXTENDED_INIT_CONNECT,
602 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
603 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
604 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
605 &fw_on_ctlr_branch, &fw_on_ctlr_build,
606 &init_connect_result)) {
607 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
608 goto out;
610 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
611 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
612 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
613 } else {
614 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
616 goto out;
618 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
619 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
620 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
623 /* Load rest of compatibility struct */
624 strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
625 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
626 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
627 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
628 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
629 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
630 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
631 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
632 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
633 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
635 retval = 0;
636 out:
637 return retval;
638 } /* End twa_check_srl() */
640 /* This function handles ioctl for the character device */
641 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
643 struct inode *inode = file->f_path.dentry->d_inode;
644 long timeout;
645 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
646 dma_addr_t dma_handle;
647 int request_id = 0;
648 unsigned int sequence_id = 0;
649 unsigned char event_index, start_index;
650 TW_Ioctl_Driver_Command driver_command;
651 TW_Ioctl_Buf_Apache *tw_ioctl;
652 TW_Lock *tw_lock;
653 TW_Command_Full *full_command_packet;
654 TW_Compatibility_Info *tw_compat_info;
655 TW_Event *event;
656 struct timeval current_time;
657 u32 current_time_ms;
658 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
659 int retval = TW_IOCTL_ERROR_OS_EFAULT;
660 void __user *argp = (void __user *)arg;
662 mutex_lock(&twa_chrdev_mutex);
664 /* Only let one of these through at a time */
665 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
666 retval = TW_IOCTL_ERROR_OS_EINTR;
667 goto out;
670 /* First copy down the driver command */
671 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
672 goto out2;
674 /* Check data buffer size */
675 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
676 retval = TW_IOCTL_ERROR_OS_EINVAL;
677 goto out2;
680 /* Hardware can only do multiple of 512 byte transfers */
681 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
683 /* Now allocate ioctl buf memory */
684 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
685 if (!cpu_addr) {
686 retval = TW_IOCTL_ERROR_OS_ENOMEM;
687 goto out2;
690 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
692 /* Now copy down the entire ioctl */
693 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
694 goto out3;
696 /* See which ioctl we are doing */
697 switch (cmd) {
698 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
699 spin_lock_irqsave(tw_dev->host->host_lock, flags);
700 twa_get_request_id(tw_dev, &request_id);
702 /* Flag internal command */
703 tw_dev->srb[request_id] = NULL;
705 /* Flag chrdev ioctl */
706 tw_dev->chrdev_request_id = request_id;
708 full_command_packet = &tw_ioctl->firmware_command;
710 /* Load request id and sglist for both command types */
711 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
713 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
715 /* Now post the command packet to the controller */
716 twa_post_command_packet(tw_dev, request_id, 1);
717 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
719 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
721 /* Now wait for command to complete */
722 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
724 /* We timed out, and didn't get an interrupt */
725 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
726 /* Now we need to reset the board */
727 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
728 tw_dev->host->host_no, TW_DRIVER, 0x37,
729 cmd);
730 retval = TW_IOCTL_ERROR_OS_EIO;
731 twa_reset_device_extension(tw_dev);
732 goto out3;
735 /* Now copy in the command packet response */
736 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
738 /* Now complete the io */
739 spin_lock_irqsave(tw_dev->host->host_lock, flags);
740 tw_dev->posted_request_count--;
741 tw_dev->state[request_id] = TW_S_COMPLETED;
742 twa_free_request_id(tw_dev, request_id);
743 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
744 break;
745 case TW_IOCTL_GET_COMPATIBILITY_INFO:
746 tw_ioctl->driver_command.status = 0;
747 /* Copy compatibility struct into ioctl data buffer */
748 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
749 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
750 break;
751 case TW_IOCTL_GET_LAST_EVENT:
752 if (tw_dev->event_queue_wrapped) {
753 if (tw_dev->aen_clobber) {
754 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
755 tw_dev->aen_clobber = 0;
756 } else
757 tw_ioctl->driver_command.status = 0;
758 } else {
759 if (!tw_dev->error_index) {
760 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
761 break;
763 tw_ioctl->driver_command.status = 0;
765 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
766 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
767 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
768 break;
769 case TW_IOCTL_GET_FIRST_EVENT:
770 if (tw_dev->event_queue_wrapped) {
771 if (tw_dev->aen_clobber) {
772 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
773 tw_dev->aen_clobber = 0;
774 } else
775 tw_ioctl->driver_command.status = 0;
776 event_index = tw_dev->error_index;
777 } else {
778 if (!tw_dev->error_index) {
779 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
780 break;
782 tw_ioctl->driver_command.status = 0;
783 event_index = 0;
785 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
786 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
787 break;
788 case TW_IOCTL_GET_NEXT_EVENT:
789 event = (TW_Event *)tw_ioctl->data_buffer;
790 sequence_id = event->sequence_id;
791 tw_ioctl->driver_command.status = 0;
793 if (tw_dev->event_queue_wrapped) {
794 if (tw_dev->aen_clobber) {
795 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
796 tw_dev->aen_clobber = 0;
798 start_index = tw_dev->error_index;
799 } else {
800 if (!tw_dev->error_index) {
801 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
802 break;
804 start_index = 0;
806 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
808 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
809 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
810 tw_dev->aen_clobber = 1;
811 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
812 break;
814 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
815 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
816 break;
817 case TW_IOCTL_GET_PREVIOUS_EVENT:
818 event = (TW_Event *)tw_ioctl->data_buffer;
819 sequence_id = event->sequence_id;
820 tw_ioctl->driver_command.status = 0;
822 if (tw_dev->event_queue_wrapped) {
823 if (tw_dev->aen_clobber) {
824 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
825 tw_dev->aen_clobber = 0;
827 start_index = tw_dev->error_index;
828 } else {
829 if (!tw_dev->error_index) {
830 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
831 break;
833 start_index = 0;
835 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
837 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
838 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
839 tw_dev->aen_clobber = 1;
840 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
841 break;
843 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
844 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
845 break;
846 case TW_IOCTL_GET_LOCK:
847 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
848 do_gettimeofday(&current_time);
849 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
851 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
852 tw_dev->ioctl_sem_lock = 1;
853 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
854 tw_ioctl->driver_command.status = 0;
855 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
856 } else {
857 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
858 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
860 break;
861 case TW_IOCTL_RELEASE_LOCK:
862 if (tw_dev->ioctl_sem_lock == 1) {
863 tw_dev->ioctl_sem_lock = 0;
864 tw_ioctl->driver_command.status = 0;
865 } else {
866 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
868 break;
869 default:
870 retval = TW_IOCTL_ERROR_OS_ENOTTY;
871 goto out3;
874 /* Now copy the entire response to userspace */
875 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
876 retval = 0;
877 out3:
878 /* Now free ioctl buf memory */
879 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
880 out2:
881 mutex_unlock(&tw_dev->ioctl_lock);
882 out:
883 mutex_unlock(&twa_chrdev_mutex);
884 return retval;
885 } /* End twa_chrdev_ioctl() */
887 /* This function handles open for the character device */
888 /* NOTE that this function will race with remove. */
889 static int twa_chrdev_open(struct inode *inode, struct file *file)
891 unsigned int minor_number;
892 int retval = TW_IOCTL_ERROR_OS_ENODEV;
894 minor_number = iminor(inode);
895 if (minor_number >= twa_device_extension_count)
896 goto out;
897 retval = 0;
898 out:
899 return retval;
900 } /* End twa_chrdev_open() */
902 /* This function will print readable messages from status register errors */
903 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
905 int retval = 1;
907 /* Check for various error conditions and handle them appropriately */
908 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
909 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
910 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
913 if (status_reg_value & TW_STATUS_PCI_ABORT) {
914 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
915 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
916 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
919 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
920 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
921 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
922 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
923 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
924 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
927 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
928 if (tw_dev->reset_print == 0) {
929 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
930 tw_dev->reset_print = 1;
932 goto out;
934 retval = 0;
935 out:
936 return retval;
937 } /* End twa_decode_bits() */
939 /* This function will empty the response queue */
940 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
942 u32 status_reg_value, response_que_value;
943 int count = 0, retval = 1;
945 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
947 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
948 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
949 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
950 count++;
952 if (count == TW_MAX_RESPONSE_DRAIN)
953 goto out;
955 retval = 0;
956 out:
957 return retval;
958 } /* End twa_empty_response_queue() */
960 /* This function will clear the pchip/response queue on 9550SX */
961 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
963 u32 response_que_value = 0;
964 unsigned long before;
965 int retval = 1;
967 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
968 before = jiffies;
969 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
970 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
971 msleep(1);
972 if (time_after(jiffies, before + HZ * 30))
973 goto out;
975 /* P-chip settle time */
976 msleep(500);
977 retval = 0;
978 } else
979 retval = 0;
980 out:
981 return retval;
982 } /* End twa_empty_response_queue_large() */
984 /* This function passes sense keys from firmware to scsi layer */
985 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
987 TW_Command_Full *full_command_packet;
988 unsigned short error;
989 int retval = 1;
990 char *error_str;
992 full_command_packet = tw_dev->command_packet_virt[request_id];
994 /* Check for embedded error string */
995 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
997 /* Don't print error for Logical unit not supported during rollcall */
998 error = le16_to_cpu(full_command_packet->header.status_block.error);
999 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1000 if (print_host)
1001 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1002 tw_dev->host->host_no,
1003 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1004 full_command_packet->header.status_block.error,
1005 error_str[0] == '\0' ?
1006 twa_string_lookup(twa_error_table,
1007 full_command_packet->header.status_block.error) : error_str,
1008 full_command_packet->header.err_specific_desc);
1009 else
1010 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1011 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1012 full_command_packet->header.status_block.error,
1013 error_str[0] == '\0' ?
1014 twa_string_lookup(twa_error_table,
1015 full_command_packet->header.status_block.error) : error_str,
1016 full_command_packet->header.err_specific_desc);
1019 if (copy_sense) {
1020 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1021 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1022 retval = TW_ISR_DONT_RESULT;
1023 goto out;
1025 retval = 0;
1026 out:
1027 return retval;
1028 } /* End twa_fill_sense() */
1030 /* This function will free up device extension resources */
1031 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1033 if (tw_dev->command_packet_virt[0])
1034 pci_free_consistent(tw_dev->tw_pci_dev,
1035 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1036 tw_dev->command_packet_virt[0],
1037 tw_dev->command_packet_phys[0]);
1039 if (tw_dev->generic_buffer_virt[0])
1040 pci_free_consistent(tw_dev->tw_pci_dev,
1041 TW_SECTOR_SIZE*TW_Q_LENGTH,
1042 tw_dev->generic_buffer_virt[0],
1043 tw_dev->generic_buffer_phys[0]);
1045 kfree(tw_dev->event_queue[0]);
1046 } /* End twa_free_device_extension() */
1048 /* This function will free a request id */
1049 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1051 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1052 tw_dev->state[request_id] = TW_S_FINISHED;
1053 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1054 } /* End twa_free_request_id() */
1056 /* This function will get parameter table entries from the firmware */
1057 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1059 TW_Command_Full *full_command_packet;
1060 TW_Command *command_packet;
1061 TW_Param_Apache *param;
1062 void *retval = NULL;
1064 /* Setup the command packet */
1065 full_command_packet = tw_dev->command_packet_virt[request_id];
1066 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1067 command_packet = &full_command_packet->command.oldcommand;
1069 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1070 command_packet->size = TW_COMMAND_SIZE;
1071 command_packet->request_id = request_id;
1072 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1074 /* Now setup the param */
1075 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1076 memset(param, 0, TW_SECTOR_SIZE);
1077 param->table_id = cpu_to_le16(table_id | 0x8000);
1078 param->parameter_id = cpu_to_le16(parameter_id);
1079 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1081 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1082 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1084 /* Post the command packet to the board */
1085 twa_post_command_packet(tw_dev, request_id, 1);
1087 /* Poll for completion */
1088 if (twa_poll_response(tw_dev, request_id, 30))
1089 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1090 else
1091 retval = (void *)&(param->data[0]);
1093 tw_dev->posted_request_count--;
1094 tw_dev->state[request_id] = TW_S_INITIAL;
1096 return retval;
1097 } /* End twa_get_param() */
1099 /* This function will assign an available request id */
1100 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1102 *request_id = tw_dev->free_queue[tw_dev->free_head];
1103 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1104 tw_dev->state[*request_id] = TW_S_STARTED;
1105 } /* End twa_get_request_id() */
1107 /* This function will send an initconnection command to controller */
1108 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1109 u32 set_features, unsigned short current_fw_srl,
1110 unsigned short current_fw_arch_id,
1111 unsigned short current_fw_branch,
1112 unsigned short current_fw_build,
1113 unsigned short *fw_on_ctlr_srl,
1114 unsigned short *fw_on_ctlr_arch_id,
1115 unsigned short *fw_on_ctlr_branch,
1116 unsigned short *fw_on_ctlr_build,
1117 u32 *init_connect_result)
1119 TW_Command_Full *full_command_packet;
1120 TW_Initconnect *tw_initconnect;
1121 int request_id = 0, retval = 1;
1123 /* Initialize InitConnection command packet */
1124 full_command_packet = tw_dev->command_packet_virt[request_id];
1125 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1126 full_command_packet->header.header_desc.size_header = 128;
1128 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1129 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1130 tw_initconnect->request_id = request_id;
1131 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1132 tw_initconnect->features = set_features;
1134 /* Turn on 64-bit sgl support if we need to */
1135 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1137 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1139 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1140 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1141 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1142 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1143 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1144 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1145 } else
1146 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1148 /* Send command packet to the board */
1149 twa_post_command_packet(tw_dev, request_id, 1);
1151 /* Poll for completion */
1152 if (twa_poll_response(tw_dev, request_id, 30)) {
1153 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1154 } else {
1155 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1156 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1157 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1158 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1159 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1160 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1162 retval = 0;
1165 tw_dev->posted_request_count--;
1166 tw_dev->state[request_id] = TW_S_INITIAL;
1168 return retval;
1169 } /* End twa_initconnection() */
1171 /* This function will initialize the fields of a device extension */
1172 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1174 int i, retval = 1;
1176 /* Initialize command packet buffers */
1177 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1178 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1179 goto out;
1182 /* Initialize generic buffer */
1183 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1184 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1185 goto out;
1188 /* Allocate event info space */
1189 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1190 if (!tw_dev->event_queue[0]) {
1191 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1192 goto out;
1196 for (i = 0; i < TW_Q_LENGTH; i++) {
1197 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1198 tw_dev->free_queue[i] = i;
1199 tw_dev->state[i] = TW_S_INITIAL;
1202 tw_dev->pending_head = TW_Q_START;
1203 tw_dev->pending_tail = TW_Q_START;
1204 tw_dev->free_head = TW_Q_START;
1205 tw_dev->free_tail = TW_Q_START;
1206 tw_dev->error_sequence_id = 1;
1207 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1209 mutex_init(&tw_dev->ioctl_lock);
1210 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1212 retval = 0;
1213 out:
1214 return retval;
1215 } /* End twa_initialize_device_extension() */
1217 /* This function is the interrupt service routine */
1218 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1220 int request_id, error = 0;
1221 u32 status_reg_value;
1222 TW_Response_Queue response_que;
1223 TW_Command_Full *full_command_packet;
1224 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1225 int handled = 0;
1227 /* Get the per adapter lock */
1228 spin_lock(tw_dev->host->host_lock);
1230 /* Read the registers */
1231 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1233 /* Check if this is our interrupt, otherwise bail */
1234 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1235 goto twa_interrupt_bail;
1237 handled = 1;
1239 /* If we are resetting, bail */
1240 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1241 goto twa_interrupt_bail;
1243 /* Check controller for errors */
1244 if (twa_check_bits(status_reg_value)) {
1245 if (twa_decode_bits(tw_dev, status_reg_value)) {
1246 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1247 goto twa_interrupt_bail;
1251 /* Handle host interrupt */
1252 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1253 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1255 /* Handle attention interrupt */
1256 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1257 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1258 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1259 twa_get_request_id(tw_dev, &request_id);
1261 error = twa_aen_read_queue(tw_dev, request_id);
1262 if (error) {
1263 tw_dev->state[request_id] = TW_S_COMPLETED;
1264 twa_free_request_id(tw_dev, request_id);
1265 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1270 /* Handle command interrupt */
1271 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1272 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1273 /* Drain as many pending commands as we can */
1274 while (tw_dev->pending_request_count > 0) {
1275 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1276 if (tw_dev->state[request_id] != TW_S_PENDING) {
1277 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1278 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1279 goto twa_interrupt_bail;
1281 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1282 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1283 tw_dev->pending_request_count--;
1284 } else {
1285 /* If we get here, we will continue re-posting on the next command interrupt */
1286 break;
1291 /* Handle response interrupt */
1292 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1294 /* Drain the response queue from the board */
1295 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1296 /* Complete the response */
1297 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1298 request_id = TW_RESID_OUT(response_que.response_id);
1299 full_command_packet = tw_dev->command_packet_virt[request_id];
1300 error = 0;
1301 /* Check for command packet errors */
1302 if (full_command_packet->command.newcommand.status != 0) {
1303 if (tw_dev->srb[request_id] != NULL) {
1304 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1305 } else {
1306 /* Skip ioctl error prints */
1307 if (request_id != tw_dev->chrdev_request_id) {
1308 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1313 /* Check for correct state */
1314 if (tw_dev->state[request_id] != TW_S_POSTED) {
1315 if (tw_dev->srb[request_id] != NULL) {
1316 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1317 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1318 goto twa_interrupt_bail;
1322 /* Check for internal command completion */
1323 if (tw_dev->srb[request_id] == NULL) {
1324 if (request_id != tw_dev->chrdev_request_id) {
1325 if (twa_aen_complete(tw_dev, request_id))
1326 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1327 } else {
1328 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1329 wake_up(&tw_dev->ioctl_wqueue);
1331 } else {
1332 struct scsi_cmnd *cmd;
1334 cmd = tw_dev->srb[request_id];
1336 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1337 /* If no error command was a success */
1338 if (error == 0) {
1339 cmd->result = (DID_OK << 16);
1342 /* If error, command failed */
1343 if (error == 1) {
1344 /* Ask for a host reset */
1345 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1348 /* Report residual bytes for single sgl */
1349 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1350 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1351 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1354 /* Now complete the io */
1355 tw_dev->state[request_id] = TW_S_COMPLETED;
1356 twa_free_request_id(tw_dev, request_id);
1357 tw_dev->posted_request_count--;
1358 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1359 twa_unmap_scsi_data(tw_dev, request_id);
1362 /* Check for valid status after each drain */
1363 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1364 if (twa_check_bits(status_reg_value)) {
1365 if (twa_decode_bits(tw_dev, status_reg_value)) {
1366 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1367 goto twa_interrupt_bail;
1373 twa_interrupt_bail:
1374 spin_unlock(tw_dev->host->host_lock);
1375 return IRQ_RETVAL(handled);
1376 } /* End twa_interrupt() */
1378 /* This function will load the request id and various sgls for ioctls */
1379 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1381 TW_Command *oldcommand;
1382 TW_Command_Apache *newcommand;
1383 TW_SG_Entry *sgl;
1384 unsigned int pae = 0;
1386 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1387 pae = 1;
1389 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1390 newcommand = &full_command_packet->command.newcommand;
1391 newcommand->request_id__lunl =
1392 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1393 if (length) {
1394 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1395 newcommand->sg_list[0].length = cpu_to_le32(length);
1397 newcommand->sgl_entries__lunh =
1398 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1399 } else {
1400 oldcommand = &full_command_packet->command.oldcommand;
1401 oldcommand->request_id = request_id;
1403 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1404 /* Load the sg list */
1405 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1406 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1407 else
1408 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1409 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1410 sgl->length = cpu_to_le32(length);
1412 oldcommand->size += pae;
1415 } /* End twa_load_sgl() */
1417 /* This function will perform a pci-dma mapping for a scatter gather list */
1418 static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1420 int use_sg;
1421 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1423 use_sg = scsi_dma_map(cmd);
1424 if (!use_sg)
1425 return 0;
1426 else if (use_sg < 0) {
1427 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1428 return 0;
1431 cmd->SCp.phase = TW_PHASE_SGLIST;
1432 cmd->SCp.have_data_in = use_sg;
1434 return use_sg;
1435 } /* End twa_map_scsi_sg_data() */
1437 /* This function will poll for a response interrupt of a request */
1438 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1440 int retval = 1, found = 0, response_request_id;
1441 TW_Response_Queue response_queue;
1442 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1444 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1445 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1446 response_request_id = TW_RESID_OUT(response_queue.response_id);
1447 if (request_id != response_request_id) {
1448 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1449 goto out;
1451 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1452 if (full_command_packet->command.newcommand.status != 0) {
1453 /* bad response */
1454 twa_fill_sense(tw_dev, request_id, 0, 0);
1455 goto out;
1457 found = 1;
1458 } else {
1459 if (full_command_packet->command.oldcommand.status != 0) {
1460 /* bad response */
1461 twa_fill_sense(tw_dev, request_id, 0, 0);
1462 goto out;
1464 found = 1;
1468 if (found)
1469 retval = 0;
1470 out:
1471 return retval;
1472 } /* End twa_poll_response() */
1474 /* This function will poll the status register for a flag */
1475 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1477 u32 status_reg_value;
1478 unsigned long before;
1479 int retval = 1;
1481 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1482 before = jiffies;
1484 if (twa_check_bits(status_reg_value))
1485 twa_decode_bits(tw_dev, status_reg_value);
1487 while ((status_reg_value & flag) != flag) {
1488 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1490 if (twa_check_bits(status_reg_value))
1491 twa_decode_bits(tw_dev, status_reg_value);
1493 if (time_after(jiffies, before + HZ * seconds))
1494 goto out;
1496 msleep(50);
1498 retval = 0;
1499 out:
1500 return retval;
1501 } /* End twa_poll_status() */
1503 /* This function will poll the status register for disappearance of a flag */
1504 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1506 u32 status_reg_value;
1507 unsigned long before;
1508 int retval = 1;
1510 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1511 before = jiffies;
1513 if (twa_check_bits(status_reg_value))
1514 twa_decode_bits(tw_dev, status_reg_value);
1516 while ((status_reg_value & flag) != 0) {
1517 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1518 if (twa_check_bits(status_reg_value))
1519 twa_decode_bits(tw_dev, status_reg_value);
1521 if (time_after(jiffies, before + HZ * seconds))
1522 goto out;
1524 msleep(50);
1526 retval = 0;
1527 out:
1528 return retval;
1529 } /* End twa_poll_status_gone() */
1531 /* This function will attempt to post a command packet to the board */
1532 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1534 u32 status_reg_value;
1535 dma_addr_t command_que_value;
1536 int retval = 1;
1538 command_que_value = tw_dev->command_packet_phys[request_id];
1540 /* For 9650SE write low 4 bytes first */
1541 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1542 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1543 command_que_value += TW_COMMAND_OFFSET;
1544 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1547 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1549 if (twa_check_bits(status_reg_value))
1550 twa_decode_bits(tw_dev, status_reg_value);
1552 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1554 /* Only pend internal driver commands */
1555 if (!internal) {
1556 retval = SCSI_MLQUEUE_HOST_BUSY;
1557 goto out;
1560 /* Couldn't post the command packet, so we do it later */
1561 if (tw_dev->state[request_id] != TW_S_PENDING) {
1562 tw_dev->state[request_id] = TW_S_PENDING;
1563 tw_dev->pending_request_count++;
1564 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1565 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1567 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1568 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1570 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1571 goto out;
1572 } else {
1573 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1574 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1575 /* Now write upper 4 bytes */
1576 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1577 } else {
1578 if (sizeof(dma_addr_t) > 4) {
1579 command_que_value += TW_COMMAND_OFFSET;
1580 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1581 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1582 } else {
1583 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1586 tw_dev->state[request_id] = TW_S_POSTED;
1587 tw_dev->posted_request_count++;
1588 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1589 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1592 retval = 0;
1593 out:
1594 return retval;
1595 } /* End twa_post_command_packet() */
1597 /* This function will reset a device extension */
1598 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1600 int i = 0;
1601 int retval = 1;
1602 unsigned long flags = 0;
1604 set_bit(TW_IN_RESET, &tw_dev->flags);
1605 TW_DISABLE_INTERRUPTS(tw_dev);
1606 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1607 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1609 /* Abort all requests that are in progress */
1610 for (i = 0; i < TW_Q_LENGTH; i++) {
1611 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1612 (tw_dev->state[i] != TW_S_INITIAL) &&
1613 (tw_dev->state[i] != TW_S_COMPLETED)) {
1614 if (tw_dev->srb[i]) {
1615 tw_dev->srb[i]->result = (DID_RESET << 16);
1616 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1617 twa_unmap_scsi_data(tw_dev, i);
1622 /* Reset queues and counts */
1623 for (i = 0; i < TW_Q_LENGTH; i++) {
1624 tw_dev->free_queue[i] = i;
1625 tw_dev->state[i] = TW_S_INITIAL;
1627 tw_dev->free_head = TW_Q_START;
1628 tw_dev->free_tail = TW_Q_START;
1629 tw_dev->posted_request_count = 0;
1630 tw_dev->pending_request_count = 0;
1631 tw_dev->pending_head = TW_Q_START;
1632 tw_dev->pending_tail = TW_Q_START;
1633 tw_dev->reset_print = 0;
1635 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1637 if (twa_reset_sequence(tw_dev, 1))
1638 goto out;
1640 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1641 clear_bit(TW_IN_RESET, &tw_dev->flags);
1642 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1644 retval = 0;
1645 out:
1646 return retval;
1647 } /* End twa_reset_device_extension() */
1649 /* This function will reset a controller */
1650 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1652 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1654 while (tries < TW_MAX_RESET_TRIES) {
1655 if (do_soft_reset) {
1656 TW_SOFT_RESET(tw_dev);
1657 /* Clear pchip/response queue on 9550SX */
1658 if (twa_empty_response_queue_large(tw_dev)) {
1659 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1660 do_soft_reset = 1;
1661 tries++;
1662 continue;
1666 /* Make sure controller is in a good state */
1667 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1668 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1669 do_soft_reset = 1;
1670 tries++;
1671 continue;
1674 /* Empty response queue */
1675 if (twa_empty_response_queue(tw_dev)) {
1676 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1677 do_soft_reset = 1;
1678 tries++;
1679 continue;
1682 flashed = 0;
1684 /* Check for compatibility/flash */
1685 if (twa_check_srl(tw_dev, &flashed)) {
1686 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1687 do_soft_reset = 1;
1688 tries++;
1689 continue;
1690 } else {
1691 if (flashed) {
1692 tries++;
1693 continue;
1697 /* Drain the AEN queue */
1698 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1699 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1700 do_soft_reset = 1;
1701 tries++;
1702 continue;
1705 /* If we got here, controller is in a good state */
1706 retval = 0;
1707 goto out;
1709 out:
1710 return retval;
1711 } /* End twa_reset_sequence() */
1713 /* This funciton returns unit geometry in cylinders/heads/sectors */
1714 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1716 int heads, sectors, cylinders;
1717 TW_Device_Extension *tw_dev;
1719 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1721 if (capacity >= 0x200000) {
1722 heads = 255;
1723 sectors = 63;
1724 cylinders = sector_div(capacity, heads * sectors);
1725 } else {
1726 heads = 64;
1727 sectors = 32;
1728 cylinders = sector_div(capacity, heads * sectors);
1731 geom[0] = heads;
1732 geom[1] = sectors;
1733 geom[2] = cylinders;
1735 return 0;
1736 } /* End twa_scsi_biosparam() */
1738 /* This is the new scsi eh reset function */
1739 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1741 TW_Device_Extension *tw_dev = NULL;
1742 int retval = FAILED;
1744 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1746 tw_dev->num_resets++;
1748 sdev_printk(KERN_WARNING, SCpnt->device,
1749 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1750 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1752 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1753 mutex_lock(&tw_dev->ioctl_lock);
1755 /* Now reset the card and some of the device extension data */
1756 if (twa_reset_device_extension(tw_dev)) {
1757 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1758 goto out;
1761 retval = SUCCESS;
1762 out:
1763 mutex_unlock(&tw_dev->ioctl_lock);
1764 return retval;
1765 } /* End twa_scsi_eh_reset() */
1767 /* This is the main scsi queue function to handle scsi opcodes */
1768 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1770 int request_id, retval;
1771 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1773 /* If we are resetting due to timed out ioctl, report as busy */
1774 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1775 retval = SCSI_MLQUEUE_HOST_BUSY;
1776 goto out;
1779 /* Check if this FW supports luns */
1780 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1781 SCpnt->result = (DID_BAD_TARGET << 16);
1782 done(SCpnt);
1783 retval = 0;
1784 goto out;
1787 /* Save done function into scsi_cmnd struct */
1788 SCpnt->scsi_done = done;
1790 /* Get a free request id */
1791 twa_get_request_id(tw_dev, &request_id);
1793 /* Save the scsi command for use by the ISR */
1794 tw_dev->srb[request_id] = SCpnt;
1796 /* Initialize phase to zero */
1797 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1799 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1800 switch (retval) {
1801 case SCSI_MLQUEUE_HOST_BUSY:
1802 twa_free_request_id(tw_dev, request_id);
1803 twa_unmap_scsi_data(tw_dev, request_id);
1804 break;
1805 case 1:
1806 tw_dev->state[request_id] = TW_S_COMPLETED;
1807 twa_free_request_id(tw_dev, request_id);
1808 twa_unmap_scsi_data(tw_dev, request_id);
1809 SCpnt->result = (DID_ERROR << 16);
1810 done(SCpnt);
1811 retval = 0;
1813 out:
1814 return retval;
1815 } /* End twa_scsi_queue() */
1817 static DEF_SCSI_QCMD(twa_scsi_queue)
1819 /* This function hands scsi cdb's to the firmware */
1820 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1822 TW_Command_Full *full_command_packet;
1823 TW_Command_Apache *command_packet;
1824 u32 num_sectors = 0x0;
1825 int i, sg_count;
1826 struct scsi_cmnd *srb = NULL;
1827 struct scatterlist *sglist = NULL, *sg;
1828 int retval = 1;
1830 if (tw_dev->srb[request_id]) {
1831 srb = tw_dev->srb[request_id];
1832 if (scsi_sglist(srb))
1833 sglist = scsi_sglist(srb);
1836 /* Initialize command packet */
1837 full_command_packet = tw_dev->command_packet_virt[request_id];
1838 full_command_packet->header.header_desc.size_header = 128;
1839 full_command_packet->header.status_block.error = 0;
1840 full_command_packet->header.status_block.severity__reserved = 0;
1842 command_packet = &full_command_packet->command.newcommand;
1843 command_packet->status = 0;
1844 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1846 /* We forced 16 byte cdb use earlier */
1847 if (!cdb)
1848 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1849 else
1850 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1852 if (srb) {
1853 command_packet->unit = srb->device->id;
1854 command_packet->request_id__lunl =
1855 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1856 } else {
1857 command_packet->request_id__lunl =
1858 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1859 command_packet->unit = 0;
1862 command_packet->sgl_offset = 16;
1864 if (!sglistarg) {
1865 /* Map sglist from scsi layer to cmd packet */
1867 if (scsi_sg_count(srb)) {
1868 if ((scsi_sg_count(srb) == 1) &&
1869 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1870 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1871 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1872 scsi_sg_copy_to_buffer(srb,
1873 tw_dev->generic_buffer_virt[request_id],
1874 TW_SECTOR_SIZE);
1875 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1876 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1877 } else {
1878 sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
1879 if (sg_count == 0)
1880 goto out;
1882 scsi_for_each_sg(srb, sg, sg_count, i) {
1883 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1884 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1885 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1886 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1887 goto out;
1891 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1893 } else {
1894 /* Internal cdb post */
1895 for (i = 0; i < use_sg; i++) {
1896 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1897 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1898 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1899 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1900 goto out;
1903 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1906 if (srb) {
1907 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1908 num_sectors = (u32)srb->cmnd[4];
1910 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1911 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1914 /* Update sector statistic */
1915 tw_dev->sector_count = num_sectors;
1916 if (tw_dev->sector_count > tw_dev->max_sector_count)
1917 tw_dev->max_sector_count = tw_dev->sector_count;
1919 /* Update SG statistics */
1920 if (srb) {
1921 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1922 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1923 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1926 /* Now post the command to the board */
1927 if (srb) {
1928 retval = twa_post_command_packet(tw_dev, request_id, 0);
1929 } else {
1930 twa_post_command_packet(tw_dev, request_id, 1);
1931 retval = 0;
1933 out:
1934 return retval;
1935 } /* End twa_scsiop_execute_scsi() */
1937 /* This function completes an execute scsi operation */
1938 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1940 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1942 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1943 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1944 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1945 if (scsi_sg_count(cmd) == 1) {
1946 void *buf = tw_dev->generic_buffer_virt[request_id];
1948 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1951 } /* End twa_scsiop_execute_scsi_complete() */
1953 /* This function tells the controller to shut down */
1954 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1956 /* Disable interrupts */
1957 TW_DISABLE_INTERRUPTS(tw_dev);
1959 /* Free up the IRQ */
1960 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1962 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1964 /* Tell the card we are shutting down */
1965 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1966 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1967 } else {
1968 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1971 /* Clear all interrupts just before exit */
1972 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1973 } /* End __twa_shutdown() */
1975 /* Wrapper for __twa_shutdown */
1976 static void twa_shutdown(struct pci_dev *pdev)
1978 struct Scsi_Host *host = pci_get_drvdata(pdev);
1979 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1981 __twa_shutdown(tw_dev);
1982 } /* End twa_shutdown() */
1984 /* This function will look up a string */
1985 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1987 int index;
1989 for (index = 0; ((code != table[index].code) &&
1990 (table[index].text != (char *)0)); index++);
1991 return(table[index].text);
1992 } /* End twa_string_lookup() */
1994 /* This function will perform a pci-dma unmap */
1995 static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1997 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1999 if (cmd->SCp.phase == TW_PHASE_SGLIST)
2000 scsi_dma_unmap(cmd);
2001 } /* End twa_unmap_scsi_data() */
2003 /* This function gets called when a disk is coming on-line */
2004 static int twa_slave_configure(struct scsi_device *sdev)
2006 /* Force 60 second timeout */
2007 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
2009 return 0;
2010 } /* End twa_slave_configure() */
2012 /* scsi_host_template initializer */
2013 static struct scsi_host_template driver_template = {
2014 .module = THIS_MODULE,
2015 .name = "3ware 9000 Storage Controller",
2016 .queuecommand = twa_scsi_queue,
2017 .eh_host_reset_handler = twa_scsi_eh_reset,
2018 .bios_param = twa_scsi_biosparam,
2019 .change_queue_depth = twa_change_queue_depth,
2020 .can_queue = TW_Q_LENGTH-2,
2021 .slave_configure = twa_slave_configure,
2022 .this_id = -1,
2023 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
2024 .max_sectors = TW_MAX_SECTORS,
2025 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2026 .use_clustering = ENABLE_CLUSTERING,
2027 .shost_attrs = twa_host_attrs,
2028 .emulated = 1
2031 /* This function will probe and initialize a card */
2032 static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2034 struct Scsi_Host *host = NULL;
2035 TW_Device_Extension *tw_dev;
2036 unsigned long mem_addr, mem_len;
2037 int retval = -ENODEV;
2039 retval = pci_enable_device(pdev);
2040 if (retval) {
2041 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2042 goto out_disable_device;
2045 pci_set_master(pdev);
2046 pci_try_set_mwi(pdev);
2048 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2049 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2050 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2051 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2052 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2053 retval = -ENODEV;
2054 goto out_disable_device;
2057 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2058 if (!host) {
2059 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2060 retval = -ENOMEM;
2061 goto out_disable_device;
2063 tw_dev = (TW_Device_Extension *)host->hostdata;
2065 /* Save values to device extension */
2066 tw_dev->host = host;
2067 tw_dev->tw_pci_dev = pdev;
2069 if (twa_initialize_device_extension(tw_dev)) {
2070 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2071 goto out_free_device_extension;
2074 /* Request IO regions */
2075 retval = pci_request_regions(pdev, "3w-9xxx");
2076 if (retval) {
2077 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2078 goto out_free_device_extension;
2081 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2082 mem_addr = pci_resource_start(pdev, 1);
2083 mem_len = pci_resource_len(pdev, 1);
2084 } else {
2085 mem_addr = pci_resource_start(pdev, 2);
2086 mem_len = pci_resource_len(pdev, 2);
2089 /* Save base address */
2090 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2091 if (!tw_dev->base_addr) {
2092 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2093 goto out_release_mem_region;
2096 /* Disable interrupts on the card */
2097 TW_DISABLE_INTERRUPTS(tw_dev);
2099 /* Initialize the card */
2100 if (twa_reset_sequence(tw_dev, 0))
2101 goto out_iounmap;
2103 /* Set host specific parameters */
2104 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2105 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2106 host->max_id = TW_MAX_UNITS_9650SE;
2107 else
2108 host->max_id = TW_MAX_UNITS;
2110 host->max_cmd_len = TW_MAX_CDB_LEN;
2112 /* Channels aren't supported by adapter */
2113 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2114 host->max_channel = 0;
2116 /* Register the card with the kernel SCSI layer */
2117 retval = scsi_add_host(host, &pdev->dev);
2118 if (retval) {
2119 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2120 goto out_iounmap;
2123 pci_set_drvdata(pdev, host);
2125 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2126 host->host_no, mem_addr, pdev->irq);
2127 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2128 host->host_no,
2129 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2130 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2131 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2132 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2133 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2134 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2136 /* Try to enable MSI */
2137 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2138 !pci_enable_msi(pdev))
2139 set_bit(TW_USING_MSI, &tw_dev->flags);
2141 /* Now setup the interrupt handler */
2142 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2143 if (retval) {
2144 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2145 goto out_remove_host;
2148 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2149 twa_device_extension_count++;
2151 /* Re-enable interrupts on the card */
2152 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2154 /* Finally, scan the host */
2155 scsi_scan_host(host);
2157 if (twa_major == -1) {
2158 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2159 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2161 return 0;
2163 out_remove_host:
2164 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2165 pci_disable_msi(pdev);
2166 scsi_remove_host(host);
2167 out_iounmap:
2168 iounmap(tw_dev->base_addr);
2169 out_release_mem_region:
2170 pci_release_regions(pdev);
2171 out_free_device_extension:
2172 twa_free_device_extension(tw_dev);
2173 scsi_host_put(host);
2174 out_disable_device:
2175 pci_disable_device(pdev);
2177 return retval;
2178 } /* End twa_probe() */
2180 /* This function is called to remove a device */
2181 static void twa_remove(struct pci_dev *pdev)
2183 struct Scsi_Host *host = pci_get_drvdata(pdev);
2184 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2186 scsi_remove_host(tw_dev->host);
2188 /* Unregister character device */
2189 if (twa_major >= 0) {
2190 unregister_chrdev(twa_major, "twa");
2191 twa_major = -1;
2194 /* Shutdown the card */
2195 __twa_shutdown(tw_dev);
2197 /* Disable MSI if enabled */
2198 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2199 pci_disable_msi(pdev);
2201 /* Free IO remapping */
2202 iounmap(tw_dev->base_addr);
2204 /* Free up the mem region */
2205 pci_release_regions(pdev);
2207 /* Free up device extension resources */
2208 twa_free_device_extension(tw_dev);
2210 scsi_host_put(tw_dev->host);
2211 pci_disable_device(pdev);
2212 twa_device_extension_count--;
2213 } /* End twa_remove() */
2215 #ifdef CONFIG_PM
2216 /* This function is called on PCI suspend */
2217 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2219 struct Scsi_Host *host = pci_get_drvdata(pdev);
2220 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2222 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2224 TW_DISABLE_INTERRUPTS(tw_dev);
2225 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2227 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2228 pci_disable_msi(pdev);
2230 /* Tell the card we are shutting down */
2231 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2232 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2233 } else {
2234 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2236 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2238 pci_save_state(pdev);
2239 pci_disable_device(pdev);
2240 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2242 return 0;
2243 } /* End twa_suspend() */
2245 /* This function is called on PCI resume */
2246 static int twa_resume(struct pci_dev *pdev)
2248 int retval = 0;
2249 struct Scsi_Host *host = pci_get_drvdata(pdev);
2250 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2252 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2253 pci_set_power_state(pdev, PCI_D0);
2254 pci_enable_wake(pdev, PCI_D0, 0);
2255 pci_restore_state(pdev);
2257 retval = pci_enable_device(pdev);
2258 if (retval) {
2259 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2260 return retval;
2263 pci_set_master(pdev);
2264 pci_try_set_mwi(pdev);
2266 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2267 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2268 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2269 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2270 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2271 retval = -ENODEV;
2272 goto out_disable_device;
2275 /* Initialize the card */
2276 if (twa_reset_sequence(tw_dev, 0)) {
2277 retval = -ENODEV;
2278 goto out_disable_device;
2281 /* Now setup the interrupt handler */
2282 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2283 if (retval) {
2284 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2285 retval = -ENODEV;
2286 goto out_disable_device;
2289 /* Now enable MSI if enabled */
2290 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2291 pci_enable_msi(pdev);
2293 /* Re-enable interrupts on the card */
2294 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2296 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2297 return 0;
2299 out_disable_device:
2300 scsi_remove_host(host);
2301 pci_disable_device(pdev);
2303 return retval;
2304 } /* End twa_resume() */
2305 #endif
2307 /* PCI Devices supported by this driver */
2308 static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2309 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2310 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2311 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2312 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2313 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2314 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2315 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2316 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2319 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2321 /* pci_driver initializer */
2322 static struct pci_driver twa_driver = {
2323 .name = "3w-9xxx",
2324 .id_table = twa_pci_tbl,
2325 .probe = twa_probe,
2326 .remove = twa_remove,
2327 #ifdef CONFIG_PM
2328 .suspend = twa_suspend,
2329 .resume = twa_resume,
2330 #endif
2331 .shutdown = twa_shutdown
2334 /* This function is called on driver initialization */
2335 static int __init twa_init(void)
2337 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2339 return pci_register_driver(&twa_driver);
2340 } /* End twa_init() */
2342 /* This function is called on driver exit */
2343 static void __exit twa_exit(void)
2345 pci_unregister_driver(&twa_driver);
2346 } /* End twa_exit() */
2348 module_init(twa_init);
2349 module_exit(twa_exit);