mei: me: add cannon point device ids
[linux/fpc-iii.git] / drivers / scsi / 3w-9xxx.c
blob00e7968a1d70f6b120aa0309087d196f77f5f145
1 /*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
4 Written By: Adam Radford <aradford@gmail.com>
5 Modifications By: Tom Couch
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; version 2 of the License.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 NO WARRANTY
20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 solely responsible for determining the appropriateness of using and
25 distributing the Program and assumes all risks associated with its
26 exercise of rights under this Agreement, including but not limited to
27 the risks and costs of program errors, damage to or loss of data,
28 programs or equipment, and unavailability or interruption of operations.
30 DISCLAIMER OF LIABILITY
31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 You should have received a copy of the GNU General Public License
40 along with this program; if not, write to the Free Software
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
43 Bugs/Comments/Suggestions should be mailed to:
44 aradford@gmail.com
46 Note: This version of the driver does not contain a bundled firmware
47 image.
49 History
50 -------
51 2.26.02.000 - Driver cleanup for kernel submission.
52 2.26.02.001 - Replace schedule_timeout() calls with msleep().
53 2.26.02.002 - Add support for PAE mode.
54 Add lun support.
55 Fix twa_remove() to free irq handler/unregister_chrdev()
56 before shutting down card.
57 Change to new 'change_queue_depth' api.
58 Fix 'handled=1' ISR usage, remove bogus IRQ check.
59 Remove un-needed eh_abort handler.
60 Add support for embedded firmware error strings.
61 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
62 2.26.02.004 - Add support for 9550SX controllers.
63 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
64 2.26.02.006 - Fix 9550SX pchip reset timeout.
65 Add big endian support.
66 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
67 2.26.02.008 - Free irq handler in __twa_shutdown().
68 Serialize reset code.
69 Add support for 9650SE controllers.
70 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
71 2.26.02.010 - Add support for 9690SA controllers.
72 2.26.02.011 - Increase max AENs drained to 256.
73 Add MSI support and "use_msi" module parameter.
74 Fix bug in twa_get_param() on 4GB+.
75 Use pci_resource_len() for ioremap().
76 2.26.02.012 - Add power management support.
77 2.26.02.013 - Fix bug in twa_load_sgl().
78 2.26.02.014 - Force 60 second timeout default.
81 #include <linux/module.h>
82 #include <linux/reboot.h>
83 #include <linux/spinlock.h>
84 #include <linux/interrupt.h>
85 #include <linux/moduleparam.h>
86 #include <linux/errno.h>
87 #include <linux/types.h>
88 #include <linux/delay.h>
89 #include <linux/pci.h>
90 #include <linux/time.h>
91 #include <linux/mutex.h>
92 #include <linux/slab.h>
93 #include <asm/io.h>
94 #include <asm/irq.h>
95 #include <linux/uaccess.h>
96 #include <scsi/scsi.h>
97 #include <scsi/scsi_host.h>
98 #include <scsi/scsi_tcq.h>
99 #include <scsi/scsi_cmnd.h>
100 #include "3w-9xxx.h"
102 /* Globals */
103 #define TW_DRIVER_VERSION "2.26.02.014"
104 static DEFINE_MUTEX(twa_chrdev_mutex);
105 static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
106 static unsigned int twa_device_extension_count;
107 static int twa_major = -1;
108 extern struct timezone sys_tz;
110 /* Module parameters */
111 MODULE_AUTHOR ("LSI");
112 MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
113 MODULE_LICENSE("GPL");
114 MODULE_VERSION(TW_DRIVER_VERSION);
116 static int use_msi = 0;
117 module_param(use_msi, int, S_IRUGO);
118 MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
120 /* Function prototypes */
121 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
122 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
123 static char *twa_aen_severity_lookup(unsigned char severity_code);
124 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
125 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126 static int twa_chrdev_open(struct inode *inode, struct file *file);
127 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
128 static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
129 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
130 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
131 u32 set_features, unsigned short current_fw_srl,
132 unsigned short current_fw_arch_id,
133 unsigned short current_fw_branch,
134 unsigned short current_fw_build,
135 unsigned short *fw_on_ctlr_srl,
136 unsigned short *fw_on_ctlr_arch_id,
137 unsigned short *fw_on_ctlr_branch,
138 unsigned short *fw_on_ctlr_build,
139 u32 *init_connect_result);
140 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
141 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
142 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
143 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
144 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
145 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
146 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
147 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
148 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
150 /* Functions */
152 /* Show some statistics about the card */
153 static ssize_t twa_show_stats(struct device *dev,
154 struct device_attribute *attr, char *buf)
156 struct Scsi_Host *host = class_to_shost(dev);
157 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
158 unsigned long flags = 0;
159 ssize_t len;
161 spin_lock_irqsave(tw_dev->host->host_lock, flags);
162 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
163 "Current commands posted: %4d\n"
164 "Max commands posted: %4d\n"
165 "Current pending commands: %4d\n"
166 "Max pending commands: %4d\n"
167 "Last sgl length: %4d\n"
168 "Max sgl length: %4d\n"
169 "Last sector count: %4d\n"
170 "Max sector count: %4d\n"
171 "SCSI Host Resets: %4d\n"
172 "AEN's: %4d\n",
173 TW_DRIVER_VERSION,
174 tw_dev->posted_request_count,
175 tw_dev->max_posted_request_count,
176 tw_dev->pending_request_count,
177 tw_dev->max_pending_request_count,
178 tw_dev->sgl_entries,
179 tw_dev->max_sgl_entries,
180 tw_dev->sector_count,
181 tw_dev->max_sector_count,
182 tw_dev->num_resets,
183 tw_dev->aen_count);
184 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
185 return len;
186 } /* End twa_show_stats() */
188 /* Create sysfs 'stats' entry */
189 static struct device_attribute twa_host_stats_attr = {
190 .attr = {
191 .name = "stats",
192 .mode = S_IRUGO,
194 .show = twa_show_stats
197 /* Host attributes initializer */
198 static struct device_attribute *twa_host_attrs[] = {
199 &twa_host_stats_attr,
200 NULL,
203 /* File operations struct for character device */
204 static const struct file_operations twa_fops = {
205 .owner = THIS_MODULE,
206 .unlocked_ioctl = twa_chrdev_ioctl,
207 .open = twa_chrdev_open,
208 .release = NULL,
209 .llseek = noop_llseek,
213 * The controllers use an inline buffer instead of a mapped SGL for small,
214 * single entry buffers. Note that we treat a zero-length transfer like
215 * a mapped SGL.
217 static bool twa_command_mapped(struct scsi_cmnd *cmd)
219 return scsi_sg_count(cmd) != 1 ||
220 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
223 /* This function will complete an aen request from the isr */
224 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
226 TW_Command_Full *full_command_packet;
227 TW_Command *command_packet;
228 TW_Command_Apache_Header *header;
229 unsigned short aen;
230 int retval = 1;
232 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
233 tw_dev->posted_request_count--;
234 aen = le16_to_cpu(header->status_block.error);
235 full_command_packet = tw_dev->command_packet_virt[request_id];
236 command_packet = &full_command_packet->command.oldcommand;
238 /* First check for internal completion of set param for time sync */
239 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
240 /* Keep reading the queue in case there are more aen's */
241 if (twa_aen_read_queue(tw_dev, request_id))
242 goto out2;
243 else {
244 retval = 0;
245 goto out;
249 switch (aen) {
250 case TW_AEN_QUEUE_EMPTY:
251 /* Quit reading the queue if this is the last one */
252 break;
253 case TW_AEN_SYNC_TIME_WITH_HOST:
254 twa_aen_sync_time(tw_dev, request_id);
255 retval = 0;
256 goto out;
257 default:
258 twa_aen_queue_event(tw_dev, header);
260 /* If there are more aen's, keep reading the queue */
261 if (twa_aen_read_queue(tw_dev, request_id))
262 goto out2;
263 else {
264 retval = 0;
265 goto out;
268 retval = 0;
269 out2:
270 tw_dev->state[request_id] = TW_S_COMPLETED;
271 twa_free_request_id(tw_dev, request_id);
272 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
273 out:
274 return retval;
275 } /* End twa_aen_complete() */
277 /* This function will drain aen queue */
278 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
280 int request_id = 0;
281 char cdb[TW_MAX_CDB_LEN];
282 TW_SG_Entry sglist[1];
283 int finished = 0, count = 0;
284 TW_Command_Full *full_command_packet;
285 TW_Command_Apache_Header *header;
286 unsigned short aen;
287 int first_reset = 0, queue = 0, retval = 1;
289 if (no_check_reset)
290 first_reset = 0;
291 else
292 first_reset = 1;
294 full_command_packet = tw_dev->command_packet_virt[request_id];
295 memset(full_command_packet, 0, sizeof(TW_Command_Full));
297 /* Initialize cdb */
298 memset(&cdb, 0, TW_MAX_CDB_LEN);
299 cdb[0] = REQUEST_SENSE; /* opcode */
300 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
302 /* Initialize sglist */
303 memset(&sglist, 0, sizeof(TW_SG_Entry));
304 sglist[0].length = TW_SECTOR_SIZE;
305 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
307 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
308 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
309 goto out;
312 /* Mark internal command */
313 tw_dev->srb[request_id] = NULL;
315 do {
316 /* Send command to the board */
317 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
318 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
319 goto out;
322 /* Now poll for completion */
323 if (twa_poll_response(tw_dev, request_id, 30)) {
324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
325 tw_dev->posted_request_count--;
326 goto out;
329 tw_dev->posted_request_count--;
330 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
331 aen = le16_to_cpu(header->status_block.error);
332 queue = 0;
333 count++;
335 switch (aen) {
336 case TW_AEN_QUEUE_EMPTY:
337 if (first_reset != 1)
338 goto out;
339 else
340 finished = 1;
341 break;
342 case TW_AEN_SOFT_RESET:
343 if (first_reset == 0)
344 first_reset = 1;
345 else
346 queue = 1;
347 break;
348 case TW_AEN_SYNC_TIME_WITH_HOST:
349 break;
350 default:
351 queue = 1;
354 /* Now queue an event info */
355 if (queue)
356 twa_aen_queue_event(tw_dev, header);
357 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
359 if (count == TW_MAX_AEN_DRAIN)
360 goto out;
362 retval = 0;
363 out:
364 tw_dev->state[request_id] = TW_S_INITIAL;
365 return retval;
366 } /* End twa_aen_drain_queue() */
368 /* This function will queue an event */
369 static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
371 u32 local_time;
372 struct timeval time;
373 TW_Event *event;
374 unsigned short aen;
375 char host[16];
376 char *error_str;
378 tw_dev->aen_count++;
380 /* Fill out event info */
381 event = tw_dev->event_queue[tw_dev->error_index];
383 /* Check for clobber */
384 host[0] = '\0';
385 if (tw_dev->host) {
386 sprintf(host, " scsi%d:", tw_dev->host->host_no);
387 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
388 tw_dev->aen_clobber = 1;
391 aen = le16_to_cpu(header->status_block.error);
392 memset(event, 0, sizeof(TW_Event));
394 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
395 do_gettimeofday(&time);
396 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
397 event->time_stamp_sec = local_time;
398 event->aen_code = aen;
399 event->retrieved = TW_AEN_NOT_RETRIEVED;
400 event->sequence_id = tw_dev->error_sequence_id;
401 tw_dev->error_sequence_id++;
403 /* Check for embedded error string */
404 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
406 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
407 event->parameter_len = strlen(header->err_specific_desc);
408 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
409 if (event->severity != TW_AEN_SEVERITY_DEBUG)
410 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
411 host,
412 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
413 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
414 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
415 header->err_specific_desc);
416 else
417 tw_dev->aen_count--;
419 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
420 tw_dev->event_queue_wrapped = 1;
421 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
422 } /* End twa_aen_queue_event() */
424 /* This function will read the aen queue from the isr */
425 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
427 char cdb[TW_MAX_CDB_LEN];
428 TW_SG_Entry sglist[1];
429 TW_Command_Full *full_command_packet;
430 int retval = 1;
432 full_command_packet = tw_dev->command_packet_virt[request_id];
433 memset(full_command_packet, 0, sizeof(TW_Command_Full));
435 /* Initialize cdb */
436 memset(&cdb, 0, TW_MAX_CDB_LEN);
437 cdb[0] = REQUEST_SENSE; /* opcode */
438 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
440 /* Initialize sglist */
441 memset(&sglist, 0, sizeof(TW_SG_Entry));
442 sglist[0].length = TW_SECTOR_SIZE;
443 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
445 /* Mark internal command */
446 tw_dev->srb[request_id] = NULL;
448 /* Now post the command packet */
449 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
450 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
451 goto out;
453 retval = 0;
454 out:
455 return retval;
456 } /* End twa_aen_read_queue() */
458 /* This function will look up an AEN severity string */
459 static char *twa_aen_severity_lookup(unsigned char severity_code)
461 char *retval = NULL;
463 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
464 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
465 goto out;
467 retval = twa_aen_severity_table[severity_code];
468 out:
469 return retval;
470 } /* End twa_aen_severity_lookup() */
472 /* This function will sync firmware time with the host time */
473 static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
475 u32 schedulertime;
476 struct timeval utc;
477 TW_Command_Full *full_command_packet;
478 TW_Command *command_packet;
479 TW_Param_Apache *param;
480 u32 local_time;
482 /* Fill out the command packet */
483 full_command_packet = tw_dev->command_packet_virt[request_id];
484 memset(full_command_packet, 0, sizeof(TW_Command_Full));
485 command_packet = &full_command_packet->command.oldcommand;
486 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
487 command_packet->request_id = request_id;
488 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
489 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
490 command_packet->size = TW_COMMAND_SIZE;
491 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
493 /* Setup the param */
494 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
495 memset(param, 0, TW_SECTOR_SIZE);
496 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
497 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
498 param->parameter_size_bytes = cpu_to_le16(4);
500 /* Convert system time in UTC to local time seconds since last
501 Sunday 12:00AM */
502 do_gettimeofday(&utc);
503 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
504 schedulertime = local_time - (3 * 86400);
505 schedulertime = cpu_to_le32(schedulertime % 604800);
507 memcpy(param->data, &schedulertime, sizeof(u32));
509 /* Mark internal command */
510 tw_dev->srb[request_id] = NULL;
512 /* Now post the command */
513 twa_post_command_packet(tw_dev, request_id, 1);
514 } /* End twa_aen_sync_time() */
516 /* This function will allocate memory and check if it is correctly aligned */
517 static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
519 int i;
520 dma_addr_t dma_handle;
521 unsigned long *cpu_addr;
522 int retval = 1;
524 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
525 if (!cpu_addr) {
526 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
527 goto out;
530 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
531 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
532 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
533 goto out;
536 memset(cpu_addr, 0, size*TW_Q_LENGTH);
538 for (i = 0; i < TW_Q_LENGTH; i++) {
539 switch(which) {
540 case 0:
541 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
542 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
543 break;
544 case 1:
545 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
546 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
547 break;
550 retval = 0;
551 out:
552 return retval;
553 } /* End twa_allocate_memory() */
555 /* This function will check the status register for unexpected bits */
556 static int twa_check_bits(u32 status_reg_value)
558 int retval = 1;
560 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
561 goto out;
562 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
563 goto out;
565 retval = 0;
566 out:
567 return retval;
568 } /* End twa_check_bits() */
570 /* This function will check the srl and decide if we are compatible */
571 static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
573 int retval = 1;
574 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
575 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
576 u32 init_connect_result = 0;
578 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
579 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
580 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
581 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
582 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
583 &fw_on_ctlr_build, &init_connect_result)) {
584 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
585 goto out;
588 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
589 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
590 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
592 /* Try base mode compatibility */
593 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
594 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
595 TW_EXTENDED_INIT_CONNECT,
596 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
597 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
598 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
599 &fw_on_ctlr_branch, &fw_on_ctlr_build,
600 &init_connect_result)) {
601 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
602 goto out;
604 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
605 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
606 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
607 } else {
608 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
610 goto out;
612 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
613 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
614 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
617 /* Load rest of compatibility struct */
618 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
619 sizeof(tw_dev->tw_compat_info.driver_version));
620 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
621 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
622 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
623 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
624 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
625 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
626 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
627 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
628 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
630 retval = 0;
631 out:
632 return retval;
633 } /* End twa_check_srl() */
635 /* This function handles ioctl for the character device */
636 static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
638 struct inode *inode = file_inode(file);
639 long timeout;
640 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
641 dma_addr_t dma_handle;
642 int request_id = 0;
643 unsigned int sequence_id = 0;
644 unsigned char event_index, start_index;
645 TW_Ioctl_Driver_Command driver_command;
646 TW_Ioctl_Buf_Apache *tw_ioctl;
647 TW_Lock *tw_lock;
648 TW_Command_Full *full_command_packet;
649 TW_Compatibility_Info *tw_compat_info;
650 TW_Event *event;
651 struct timeval current_time;
652 u32 current_time_ms;
653 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
654 int retval = TW_IOCTL_ERROR_OS_EFAULT;
655 void __user *argp = (void __user *)arg;
657 mutex_lock(&twa_chrdev_mutex);
659 /* Only let one of these through at a time */
660 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
661 retval = TW_IOCTL_ERROR_OS_EINTR;
662 goto out;
665 /* First copy down the driver command */
666 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
667 goto out2;
669 /* Check data buffer size */
670 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
671 retval = TW_IOCTL_ERROR_OS_EINVAL;
672 goto out2;
675 /* Hardware can only do multiple of 512 byte transfers */
676 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
678 /* Now allocate ioctl buf memory */
679 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
680 if (!cpu_addr) {
681 retval = TW_IOCTL_ERROR_OS_ENOMEM;
682 goto out2;
685 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
687 /* Now copy down the entire ioctl */
688 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
689 goto out3;
691 /* See which ioctl we are doing */
692 switch (cmd) {
693 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
694 spin_lock_irqsave(tw_dev->host->host_lock, flags);
695 twa_get_request_id(tw_dev, &request_id);
697 /* Flag internal command */
698 tw_dev->srb[request_id] = NULL;
700 /* Flag chrdev ioctl */
701 tw_dev->chrdev_request_id = request_id;
703 full_command_packet = &tw_ioctl->firmware_command;
705 /* Load request id and sglist for both command types */
706 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
708 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
710 /* Now post the command packet to the controller */
711 twa_post_command_packet(tw_dev, request_id, 1);
712 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
714 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
716 /* Now wait for command to complete */
717 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
719 /* We timed out, and didn't get an interrupt */
720 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
721 /* Now we need to reset the board */
722 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
723 tw_dev->host->host_no, TW_DRIVER, 0x37,
724 cmd);
725 retval = TW_IOCTL_ERROR_OS_EIO;
726 twa_reset_device_extension(tw_dev);
727 goto out3;
730 /* Now copy in the command packet response */
731 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
733 /* Now complete the io */
734 spin_lock_irqsave(tw_dev->host->host_lock, flags);
735 tw_dev->posted_request_count--;
736 tw_dev->state[request_id] = TW_S_COMPLETED;
737 twa_free_request_id(tw_dev, request_id);
738 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
739 break;
740 case TW_IOCTL_GET_COMPATIBILITY_INFO:
741 tw_ioctl->driver_command.status = 0;
742 /* Copy compatibility struct into ioctl data buffer */
743 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
744 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
745 break;
746 case TW_IOCTL_GET_LAST_EVENT:
747 if (tw_dev->event_queue_wrapped) {
748 if (tw_dev->aen_clobber) {
749 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
750 tw_dev->aen_clobber = 0;
751 } else
752 tw_ioctl->driver_command.status = 0;
753 } else {
754 if (!tw_dev->error_index) {
755 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
756 break;
758 tw_ioctl->driver_command.status = 0;
760 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
761 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
762 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
763 break;
764 case TW_IOCTL_GET_FIRST_EVENT:
765 if (tw_dev->event_queue_wrapped) {
766 if (tw_dev->aen_clobber) {
767 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
768 tw_dev->aen_clobber = 0;
769 } else
770 tw_ioctl->driver_command.status = 0;
771 event_index = tw_dev->error_index;
772 } else {
773 if (!tw_dev->error_index) {
774 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
775 break;
777 tw_ioctl->driver_command.status = 0;
778 event_index = 0;
780 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
781 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
782 break;
783 case TW_IOCTL_GET_NEXT_EVENT:
784 event = (TW_Event *)tw_ioctl->data_buffer;
785 sequence_id = event->sequence_id;
786 tw_ioctl->driver_command.status = 0;
788 if (tw_dev->event_queue_wrapped) {
789 if (tw_dev->aen_clobber) {
790 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
791 tw_dev->aen_clobber = 0;
793 start_index = tw_dev->error_index;
794 } else {
795 if (!tw_dev->error_index) {
796 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
797 break;
799 start_index = 0;
801 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
803 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
804 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
805 tw_dev->aen_clobber = 1;
806 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
807 break;
809 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
810 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
811 break;
812 case TW_IOCTL_GET_PREVIOUS_EVENT:
813 event = (TW_Event *)tw_ioctl->data_buffer;
814 sequence_id = event->sequence_id;
815 tw_ioctl->driver_command.status = 0;
817 if (tw_dev->event_queue_wrapped) {
818 if (tw_dev->aen_clobber) {
819 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
820 tw_dev->aen_clobber = 0;
822 start_index = tw_dev->error_index;
823 } else {
824 if (!tw_dev->error_index) {
825 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
826 break;
828 start_index = 0;
830 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
832 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
833 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
834 tw_dev->aen_clobber = 1;
835 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
836 break;
838 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
839 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
840 break;
841 case TW_IOCTL_GET_LOCK:
842 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
843 do_gettimeofday(&current_time);
844 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
846 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
847 tw_dev->ioctl_sem_lock = 1;
848 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
849 tw_ioctl->driver_command.status = 0;
850 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
851 } else {
852 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
853 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
855 break;
856 case TW_IOCTL_RELEASE_LOCK:
857 if (tw_dev->ioctl_sem_lock == 1) {
858 tw_dev->ioctl_sem_lock = 0;
859 tw_ioctl->driver_command.status = 0;
860 } else {
861 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
863 break;
864 default:
865 retval = TW_IOCTL_ERROR_OS_ENOTTY;
866 goto out3;
869 /* Now copy the entire response to userspace */
870 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
871 retval = 0;
872 out3:
873 /* Now free ioctl buf memory */
874 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
875 out2:
876 mutex_unlock(&tw_dev->ioctl_lock);
877 out:
878 mutex_unlock(&twa_chrdev_mutex);
879 return retval;
880 } /* End twa_chrdev_ioctl() */
882 /* This function handles open for the character device */
883 /* NOTE that this function will race with remove. */
884 static int twa_chrdev_open(struct inode *inode, struct file *file)
886 unsigned int minor_number;
887 int retval = TW_IOCTL_ERROR_OS_ENODEV;
889 minor_number = iminor(inode);
890 if (minor_number >= twa_device_extension_count)
891 goto out;
892 retval = 0;
893 out:
894 return retval;
895 } /* End twa_chrdev_open() */
897 /* This function will print readable messages from status register errors */
898 static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
900 int retval = 1;
902 /* Check for various error conditions and handle them appropriately */
903 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
904 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
905 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
908 if (status_reg_value & TW_STATUS_PCI_ABORT) {
909 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
910 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
911 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
914 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
915 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
916 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
917 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
918 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
919 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
922 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
923 if (tw_dev->reset_print == 0) {
924 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
925 tw_dev->reset_print = 1;
927 goto out;
929 retval = 0;
930 out:
931 return retval;
932 } /* End twa_decode_bits() */
934 /* This function will empty the response queue */
935 static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
937 u32 status_reg_value, response_que_value;
938 int count = 0, retval = 1;
940 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
942 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
943 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
944 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
945 count++;
947 if (count == TW_MAX_RESPONSE_DRAIN)
948 goto out;
950 retval = 0;
951 out:
952 return retval;
953 } /* End twa_empty_response_queue() */
955 /* This function will clear the pchip/response queue on 9550SX */
956 static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
958 u32 response_que_value = 0;
959 unsigned long before;
960 int retval = 1;
962 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
963 before = jiffies;
964 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
965 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
966 msleep(1);
967 if (time_after(jiffies, before + HZ * 30))
968 goto out;
970 /* P-chip settle time */
971 msleep(500);
972 retval = 0;
973 } else
974 retval = 0;
975 out:
976 return retval;
977 } /* End twa_empty_response_queue_large() */
979 /* This function passes sense keys from firmware to scsi layer */
980 static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
982 TW_Command_Full *full_command_packet;
983 unsigned short error;
984 int retval = 1;
985 char *error_str;
987 full_command_packet = tw_dev->command_packet_virt[request_id];
989 /* Check for embedded error string */
990 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
992 /* Don't print error for Logical unit not supported during rollcall */
993 error = le16_to_cpu(full_command_packet->header.status_block.error);
994 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
995 if (print_host)
996 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
997 tw_dev->host->host_no,
998 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
999 full_command_packet->header.status_block.error,
1000 error_str[0] == '\0' ?
1001 twa_string_lookup(twa_error_table,
1002 full_command_packet->header.status_block.error) : error_str,
1003 full_command_packet->header.err_specific_desc);
1004 else
1005 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1006 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1007 full_command_packet->header.status_block.error,
1008 error_str[0] == '\0' ?
1009 twa_string_lookup(twa_error_table,
1010 full_command_packet->header.status_block.error) : error_str,
1011 full_command_packet->header.err_specific_desc);
1014 if (copy_sense) {
1015 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1016 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1017 retval = TW_ISR_DONT_RESULT;
1018 goto out;
1020 retval = 0;
1021 out:
1022 return retval;
1023 } /* End twa_fill_sense() */
1025 /* This function will free up device extension resources */
1026 static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1028 if (tw_dev->command_packet_virt[0])
1029 pci_free_consistent(tw_dev->tw_pci_dev,
1030 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1031 tw_dev->command_packet_virt[0],
1032 tw_dev->command_packet_phys[0]);
1034 if (tw_dev->generic_buffer_virt[0])
1035 pci_free_consistent(tw_dev->tw_pci_dev,
1036 TW_SECTOR_SIZE*TW_Q_LENGTH,
1037 tw_dev->generic_buffer_virt[0],
1038 tw_dev->generic_buffer_phys[0]);
1040 kfree(tw_dev->event_queue[0]);
1041 } /* End twa_free_device_extension() */
1043 /* This function will free a request id */
1044 static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1046 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1047 tw_dev->state[request_id] = TW_S_FINISHED;
1048 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1049 } /* End twa_free_request_id() */
1051 /* This function will get parameter table entries from the firmware */
1052 static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1054 TW_Command_Full *full_command_packet;
1055 TW_Command *command_packet;
1056 TW_Param_Apache *param;
1057 void *retval = NULL;
1059 /* Setup the command packet */
1060 full_command_packet = tw_dev->command_packet_virt[request_id];
1061 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1062 command_packet = &full_command_packet->command.oldcommand;
1064 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1065 command_packet->size = TW_COMMAND_SIZE;
1066 command_packet->request_id = request_id;
1067 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1069 /* Now setup the param */
1070 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1071 memset(param, 0, TW_SECTOR_SIZE);
1072 param->table_id = cpu_to_le16(table_id | 0x8000);
1073 param->parameter_id = cpu_to_le16(parameter_id);
1074 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1076 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1077 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1079 /* Post the command packet to the board */
1080 twa_post_command_packet(tw_dev, request_id, 1);
1082 /* Poll for completion */
1083 if (twa_poll_response(tw_dev, request_id, 30))
1084 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1085 else
1086 retval = (void *)&(param->data[0]);
1088 tw_dev->posted_request_count--;
1089 tw_dev->state[request_id] = TW_S_INITIAL;
1091 return retval;
1092 } /* End twa_get_param() */
1094 /* This function will assign an available request id */
1095 static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1097 *request_id = tw_dev->free_queue[tw_dev->free_head];
1098 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1099 tw_dev->state[*request_id] = TW_S_STARTED;
1100 } /* End twa_get_request_id() */
1102 /* This function will send an initconnection command to controller */
1103 static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1104 u32 set_features, unsigned short current_fw_srl,
1105 unsigned short current_fw_arch_id,
1106 unsigned short current_fw_branch,
1107 unsigned short current_fw_build,
1108 unsigned short *fw_on_ctlr_srl,
1109 unsigned short *fw_on_ctlr_arch_id,
1110 unsigned short *fw_on_ctlr_branch,
1111 unsigned short *fw_on_ctlr_build,
1112 u32 *init_connect_result)
1114 TW_Command_Full *full_command_packet;
1115 TW_Initconnect *tw_initconnect;
1116 int request_id = 0, retval = 1;
1118 /* Initialize InitConnection command packet */
1119 full_command_packet = tw_dev->command_packet_virt[request_id];
1120 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1121 full_command_packet->header.header_desc.size_header = 128;
1123 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1124 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1125 tw_initconnect->request_id = request_id;
1126 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1127 tw_initconnect->features = set_features;
1129 /* Turn on 64-bit sgl support if we need to */
1130 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1132 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1134 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1135 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1136 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1137 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1138 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1139 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1140 } else
1141 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1143 /* Send command packet to the board */
1144 twa_post_command_packet(tw_dev, request_id, 1);
1146 /* Poll for completion */
1147 if (twa_poll_response(tw_dev, request_id, 30)) {
1148 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1149 } else {
1150 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1151 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1152 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1153 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1154 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1155 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1157 retval = 0;
1160 tw_dev->posted_request_count--;
1161 tw_dev->state[request_id] = TW_S_INITIAL;
1163 return retval;
1164 } /* End twa_initconnection() */
1166 /* This function will initialize the fields of a device extension */
1167 static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1169 int i, retval = 1;
1171 /* Initialize command packet buffers */
1172 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1173 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1174 goto out;
1177 /* Initialize generic buffer */
1178 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1179 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1180 goto out;
1183 /* Allocate event info space */
1184 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1185 if (!tw_dev->event_queue[0]) {
1186 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1187 goto out;
1191 for (i = 0; i < TW_Q_LENGTH; i++) {
1192 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1193 tw_dev->free_queue[i] = i;
1194 tw_dev->state[i] = TW_S_INITIAL;
1197 tw_dev->pending_head = TW_Q_START;
1198 tw_dev->pending_tail = TW_Q_START;
1199 tw_dev->free_head = TW_Q_START;
1200 tw_dev->free_tail = TW_Q_START;
1201 tw_dev->error_sequence_id = 1;
1202 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1204 mutex_init(&tw_dev->ioctl_lock);
1205 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1207 retval = 0;
1208 out:
1209 return retval;
1210 } /* End twa_initialize_device_extension() */
1212 /* This function is the interrupt service routine */
1213 static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1215 int request_id, error = 0;
1216 u32 status_reg_value;
1217 TW_Response_Queue response_que;
1218 TW_Command_Full *full_command_packet;
1219 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1220 int handled = 0;
1222 /* Get the per adapter lock */
1223 spin_lock(tw_dev->host->host_lock);
1225 /* Read the registers */
1226 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1228 /* Check if this is our interrupt, otherwise bail */
1229 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1230 goto twa_interrupt_bail;
1232 handled = 1;
1234 /* If we are resetting, bail */
1235 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1236 goto twa_interrupt_bail;
1238 /* Check controller for errors */
1239 if (twa_check_bits(status_reg_value)) {
1240 if (twa_decode_bits(tw_dev, status_reg_value)) {
1241 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1242 goto twa_interrupt_bail;
1246 /* Handle host interrupt */
1247 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1248 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1250 /* Handle attention interrupt */
1251 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1252 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1253 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1254 twa_get_request_id(tw_dev, &request_id);
1256 error = twa_aen_read_queue(tw_dev, request_id);
1257 if (error) {
1258 tw_dev->state[request_id] = TW_S_COMPLETED;
1259 twa_free_request_id(tw_dev, request_id);
1260 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1265 /* Handle command interrupt */
1266 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1267 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1268 /* Drain as many pending commands as we can */
1269 while (tw_dev->pending_request_count > 0) {
1270 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1271 if (tw_dev->state[request_id] != TW_S_PENDING) {
1272 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1273 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1274 goto twa_interrupt_bail;
1276 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1277 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1278 tw_dev->pending_request_count--;
1279 } else {
1280 /* If we get here, we will continue re-posting on the next command interrupt */
1281 break;
1286 /* Handle response interrupt */
1287 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1289 /* Drain the response queue from the board */
1290 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1291 /* Complete the response */
1292 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1293 request_id = TW_RESID_OUT(response_que.response_id);
1294 full_command_packet = tw_dev->command_packet_virt[request_id];
1295 error = 0;
1296 /* Check for command packet errors */
1297 if (full_command_packet->command.newcommand.status != 0) {
1298 if (tw_dev->srb[request_id] != NULL) {
1299 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1300 } else {
1301 /* Skip ioctl error prints */
1302 if (request_id != tw_dev->chrdev_request_id) {
1303 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1308 /* Check for correct state */
1309 if (tw_dev->state[request_id] != TW_S_POSTED) {
1310 if (tw_dev->srb[request_id] != NULL) {
1311 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1312 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1313 goto twa_interrupt_bail;
1317 /* Check for internal command completion */
1318 if (tw_dev->srb[request_id] == NULL) {
1319 if (request_id != tw_dev->chrdev_request_id) {
1320 if (twa_aen_complete(tw_dev, request_id))
1321 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1322 } else {
1323 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1324 wake_up(&tw_dev->ioctl_wqueue);
1326 } else {
1327 struct scsi_cmnd *cmd;
1329 cmd = tw_dev->srb[request_id];
1331 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1332 /* If no error command was a success */
1333 if (error == 0) {
1334 cmd->result = (DID_OK << 16);
1337 /* If error, command failed */
1338 if (error == 1) {
1339 /* Ask for a host reset */
1340 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1343 /* Report residual bytes for single sgl */
1344 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1345 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1346 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1349 /* Now complete the io */
1350 if (twa_command_mapped(cmd))
1351 scsi_dma_unmap(cmd);
1352 cmd->scsi_done(cmd);
1353 tw_dev->state[request_id] = TW_S_COMPLETED;
1354 twa_free_request_id(tw_dev, request_id);
1355 tw_dev->posted_request_count--;
1358 /* Check for valid status after each drain */
1359 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1360 if (twa_check_bits(status_reg_value)) {
1361 if (twa_decode_bits(tw_dev, status_reg_value)) {
1362 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1363 goto twa_interrupt_bail;
1369 twa_interrupt_bail:
1370 spin_unlock(tw_dev->host->host_lock);
1371 return IRQ_RETVAL(handled);
1372 } /* End twa_interrupt() */
1374 /* This function will load the request id and various sgls for ioctls */
1375 static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1377 TW_Command *oldcommand;
1378 TW_Command_Apache *newcommand;
1379 TW_SG_Entry *sgl;
1380 unsigned int pae = 0;
1382 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1383 pae = 1;
1385 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1386 newcommand = &full_command_packet->command.newcommand;
1387 newcommand->request_id__lunl =
1388 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1389 if (length) {
1390 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1391 newcommand->sg_list[0].length = cpu_to_le32(length);
1393 newcommand->sgl_entries__lunh =
1394 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1395 } else {
1396 oldcommand = &full_command_packet->command.oldcommand;
1397 oldcommand->request_id = request_id;
1399 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1400 /* Load the sg list */
1401 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1402 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1403 else
1404 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1405 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1406 sgl->length = cpu_to_le32(length);
1408 oldcommand->size += pae;
1411 } /* End twa_load_sgl() */
1413 /* This function will poll for a response interrupt of a request */
1414 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1416 int retval = 1, found = 0, response_request_id;
1417 TW_Response_Queue response_queue;
1418 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1420 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1421 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1422 response_request_id = TW_RESID_OUT(response_queue.response_id);
1423 if (request_id != response_request_id) {
1424 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1425 goto out;
1427 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1428 if (full_command_packet->command.newcommand.status != 0) {
1429 /* bad response */
1430 twa_fill_sense(tw_dev, request_id, 0, 0);
1431 goto out;
1433 found = 1;
1434 } else {
1435 if (full_command_packet->command.oldcommand.status != 0) {
1436 /* bad response */
1437 twa_fill_sense(tw_dev, request_id, 0, 0);
1438 goto out;
1440 found = 1;
1444 if (found)
1445 retval = 0;
1446 out:
1447 return retval;
1448 } /* End twa_poll_response() */
1450 /* This function will poll the status register for a flag */
1451 static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1453 u32 status_reg_value;
1454 unsigned long before;
1455 int retval = 1;
1457 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1458 before = jiffies;
1460 if (twa_check_bits(status_reg_value))
1461 twa_decode_bits(tw_dev, status_reg_value);
1463 while ((status_reg_value & flag) != flag) {
1464 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1466 if (twa_check_bits(status_reg_value))
1467 twa_decode_bits(tw_dev, status_reg_value);
1469 if (time_after(jiffies, before + HZ * seconds))
1470 goto out;
1472 msleep(50);
1474 retval = 0;
1475 out:
1476 return retval;
1477 } /* End twa_poll_status() */
1479 /* This function will poll the status register for disappearance of a flag */
1480 static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1482 u32 status_reg_value;
1483 unsigned long before;
1484 int retval = 1;
1486 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1487 before = jiffies;
1489 if (twa_check_bits(status_reg_value))
1490 twa_decode_bits(tw_dev, status_reg_value);
1492 while ((status_reg_value & flag) != 0) {
1493 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1494 if (twa_check_bits(status_reg_value))
1495 twa_decode_bits(tw_dev, status_reg_value);
1497 if (time_after(jiffies, before + HZ * seconds))
1498 goto out;
1500 msleep(50);
1502 retval = 0;
1503 out:
1504 return retval;
1505 } /* End twa_poll_status_gone() */
1507 /* This function will attempt to post a command packet to the board */
1508 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1510 u32 status_reg_value;
1511 dma_addr_t command_que_value;
1512 int retval = 1;
1514 command_que_value = tw_dev->command_packet_phys[request_id];
1516 /* For 9650SE write low 4 bytes first */
1517 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1518 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1519 command_que_value += TW_COMMAND_OFFSET;
1520 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1523 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1525 if (twa_check_bits(status_reg_value))
1526 twa_decode_bits(tw_dev, status_reg_value);
1528 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1530 /* Only pend internal driver commands */
1531 if (!internal) {
1532 retval = SCSI_MLQUEUE_HOST_BUSY;
1533 goto out;
1536 /* Couldn't post the command packet, so we do it later */
1537 if (tw_dev->state[request_id] != TW_S_PENDING) {
1538 tw_dev->state[request_id] = TW_S_PENDING;
1539 tw_dev->pending_request_count++;
1540 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1541 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1543 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1544 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1546 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1547 goto out;
1548 } else {
1549 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1550 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1551 /* Now write upper 4 bytes */
1552 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1553 } else {
1554 if (sizeof(dma_addr_t) > 4) {
1555 command_que_value += TW_COMMAND_OFFSET;
1556 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1557 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1558 } else {
1559 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1562 tw_dev->state[request_id] = TW_S_POSTED;
1563 tw_dev->posted_request_count++;
1564 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1565 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1568 retval = 0;
1569 out:
1570 return retval;
1571 } /* End twa_post_command_packet() */
1573 /* This function will reset a device extension */
1574 static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1576 int i = 0;
1577 int retval = 1;
1578 unsigned long flags = 0;
1580 set_bit(TW_IN_RESET, &tw_dev->flags);
1581 TW_DISABLE_INTERRUPTS(tw_dev);
1582 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1583 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1585 /* Abort all requests that are in progress */
1586 for (i = 0; i < TW_Q_LENGTH; i++) {
1587 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1588 (tw_dev->state[i] != TW_S_INITIAL) &&
1589 (tw_dev->state[i] != TW_S_COMPLETED)) {
1590 if (tw_dev->srb[i]) {
1591 struct scsi_cmnd *cmd = tw_dev->srb[i];
1593 cmd->result = (DID_RESET << 16);
1594 if (twa_command_mapped(cmd))
1595 scsi_dma_unmap(cmd);
1596 cmd->scsi_done(cmd);
1601 /* Reset queues and counts */
1602 for (i = 0; i < TW_Q_LENGTH; i++) {
1603 tw_dev->free_queue[i] = i;
1604 tw_dev->state[i] = TW_S_INITIAL;
1606 tw_dev->free_head = TW_Q_START;
1607 tw_dev->free_tail = TW_Q_START;
1608 tw_dev->posted_request_count = 0;
1609 tw_dev->pending_request_count = 0;
1610 tw_dev->pending_head = TW_Q_START;
1611 tw_dev->pending_tail = TW_Q_START;
1612 tw_dev->reset_print = 0;
1614 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1616 if (twa_reset_sequence(tw_dev, 1))
1617 goto out;
1619 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1620 clear_bit(TW_IN_RESET, &tw_dev->flags);
1621 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1623 retval = 0;
1624 out:
1625 return retval;
1626 } /* End twa_reset_device_extension() */
1628 /* This function will reset a controller */
1629 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1631 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1633 while (tries < TW_MAX_RESET_TRIES) {
1634 if (do_soft_reset) {
1635 TW_SOFT_RESET(tw_dev);
1636 /* Clear pchip/response queue on 9550SX */
1637 if (twa_empty_response_queue_large(tw_dev)) {
1638 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1639 do_soft_reset = 1;
1640 tries++;
1641 continue;
1645 /* Make sure controller is in a good state */
1646 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1647 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1648 do_soft_reset = 1;
1649 tries++;
1650 continue;
1653 /* Empty response queue */
1654 if (twa_empty_response_queue(tw_dev)) {
1655 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1656 do_soft_reset = 1;
1657 tries++;
1658 continue;
1661 flashed = 0;
1663 /* Check for compatibility/flash */
1664 if (twa_check_srl(tw_dev, &flashed)) {
1665 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1666 do_soft_reset = 1;
1667 tries++;
1668 continue;
1669 } else {
1670 if (flashed) {
1671 tries++;
1672 continue;
1676 /* Drain the AEN queue */
1677 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1678 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1679 do_soft_reset = 1;
1680 tries++;
1681 continue;
1684 /* If we got here, controller is in a good state */
1685 retval = 0;
1686 goto out;
1688 out:
1689 return retval;
1690 } /* End twa_reset_sequence() */
1692 /* This funciton returns unit geometry in cylinders/heads/sectors */
1693 static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1695 int heads, sectors, cylinders;
1696 TW_Device_Extension *tw_dev;
1698 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1700 if (capacity >= 0x200000) {
1701 heads = 255;
1702 sectors = 63;
1703 cylinders = sector_div(capacity, heads * sectors);
1704 } else {
1705 heads = 64;
1706 sectors = 32;
1707 cylinders = sector_div(capacity, heads * sectors);
1710 geom[0] = heads;
1711 geom[1] = sectors;
1712 geom[2] = cylinders;
1714 return 0;
1715 } /* End twa_scsi_biosparam() */
1717 /* This is the new scsi eh reset function */
1718 static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1720 TW_Device_Extension *tw_dev = NULL;
1721 int retval = FAILED;
1723 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1725 tw_dev->num_resets++;
1727 sdev_printk(KERN_WARNING, SCpnt->device,
1728 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1729 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1731 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1732 mutex_lock(&tw_dev->ioctl_lock);
1734 /* Now reset the card and some of the device extension data */
1735 if (twa_reset_device_extension(tw_dev)) {
1736 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1737 goto out;
1740 retval = SUCCESS;
1741 out:
1742 mutex_unlock(&tw_dev->ioctl_lock);
1743 return retval;
1744 } /* End twa_scsi_eh_reset() */
1746 /* This is the main scsi queue function to handle scsi opcodes */
1747 static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1749 int request_id, retval;
1750 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1752 /* If we are resetting due to timed out ioctl, report as busy */
1753 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1754 retval = SCSI_MLQUEUE_HOST_BUSY;
1755 goto out;
1758 /* Check if this FW supports luns */
1759 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1760 SCpnt->result = (DID_BAD_TARGET << 16);
1761 done(SCpnt);
1762 retval = 0;
1763 goto out;
1766 /* Save done function into scsi_cmnd struct */
1767 SCpnt->scsi_done = done;
1769 /* Get a free request id */
1770 twa_get_request_id(tw_dev, &request_id);
1772 /* Save the scsi command for use by the ISR */
1773 tw_dev->srb[request_id] = SCpnt;
1775 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1776 switch (retval) {
1777 case SCSI_MLQUEUE_HOST_BUSY:
1778 if (twa_command_mapped(SCpnt))
1779 scsi_dma_unmap(SCpnt);
1780 twa_free_request_id(tw_dev, request_id);
1781 break;
1782 case 1:
1783 SCpnt->result = (DID_ERROR << 16);
1784 if (twa_command_mapped(SCpnt))
1785 scsi_dma_unmap(SCpnt);
1786 done(SCpnt);
1787 tw_dev->state[request_id] = TW_S_COMPLETED;
1788 twa_free_request_id(tw_dev, request_id);
1789 retval = 0;
1791 out:
1792 return retval;
1793 } /* End twa_scsi_queue() */
1795 static DEF_SCSI_QCMD(twa_scsi_queue)
1797 /* This function hands scsi cdb's to the firmware */
1798 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1800 TW_Command_Full *full_command_packet;
1801 TW_Command_Apache *command_packet;
1802 u32 num_sectors = 0x0;
1803 int i, sg_count;
1804 struct scsi_cmnd *srb = NULL;
1805 struct scatterlist *sglist = NULL, *sg;
1806 int retval = 1;
1808 if (tw_dev->srb[request_id]) {
1809 srb = tw_dev->srb[request_id];
1810 if (scsi_sglist(srb))
1811 sglist = scsi_sglist(srb);
1814 /* Initialize command packet */
1815 full_command_packet = tw_dev->command_packet_virt[request_id];
1816 full_command_packet->header.header_desc.size_header = 128;
1817 full_command_packet->header.status_block.error = 0;
1818 full_command_packet->header.status_block.severity__reserved = 0;
1820 command_packet = &full_command_packet->command.newcommand;
1821 command_packet->status = 0;
1822 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1824 /* We forced 16 byte cdb use earlier */
1825 if (!cdb)
1826 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1827 else
1828 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1830 if (srb) {
1831 command_packet->unit = srb->device->id;
1832 command_packet->request_id__lunl =
1833 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1834 } else {
1835 command_packet->request_id__lunl =
1836 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1837 command_packet->unit = 0;
1840 command_packet->sgl_offset = 16;
1842 if (!sglistarg) {
1843 /* Map sglist from scsi layer to cmd packet */
1845 if (scsi_sg_count(srb)) {
1846 if (!twa_command_mapped(srb)) {
1847 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1848 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1849 scsi_sg_copy_to_buffer(srb,
1850 tw_dev->generic_buffer_virt[request_id],
1851 TW_SECTOR_SIZE);
1852 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1853 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1854 } else {
1855 sg_count = scsi_dma_map(srb);
1856 if (sg_count < 0)
1857 goto out;
1859 scsi_for_each_sg(srb, sg, sg_count, i) {
1860 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1861 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1862 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1863 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1864 goto out;
1868 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1870 } else {
1871 /* Internal cdb post */
1872 for (i = 0; i < use_sg; i++) {
1873 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1874 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1875 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1876 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1877 goto out;
1880 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1883 if (srb) {
1884 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1885 num_sectors = (u32)srb->cmnd[4];
1887 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1888 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1891 /* Update sector statistic */
1892 tw_dev->sector_count = num_sectors;
1893 if (tw_dev->sector_count > tw_dev->max_sector_count)
1894 tw_dev->max_sector_count = tw_dev->sector_count;
1896 /* Update SG statistics */
1897 if (srb) {
1898 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1899 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1900 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1903 /* Now post the command to the board */
1904 if (srb) {
1905 retval = twa_post_command_packet(tw_dev, request_id, 0);
1906 } else {
1907 twa_post_command_packet(tw_dev, request_id, 1);
1908 retval = 0;
1910 out:
1911 return retval;
1912 } /* End twa_scsiop_execute_scsi() */
1914 /* This function completes an execute scsi operation */
1915 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1917 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1919 if (!twa_command_mapped(cmd) &&
1920 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1921 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1922 if (scsi_sg_count(cmd) == 1) {
1923 void *buf = tw_dev->generic_buffer_virt[request_id];
1925 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1928 } /* End twa_scsiop_execute_scsi_complete() */
1930 /* This function tells the controller to shut down */
1931 static void __twa_shutdown(TW_Device_Extension *tw_dev)
1933 /* Disable interrupts */
1934 TW_DISABLE_INTERRUPTS(tw_dev);
1936 /* Free up the IRQ */
1937 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1939 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1941 /* Tell the card we are shutting down */
1942 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1943 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1944 } else {
1945 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1948 /* Clear all interrupts just before exit */
1949 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1950 } /* End __twa_shutdown() */
1952 /* Wrapper for __twa_shutdown */
1953 static void twa_shutdown(struct pci_dev *pdev)
1955 struct Scsi_Host *host = pci_get_drvdata(pdev);
1956 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1958 __twa_shutdown(tw_dev);
1959 } /* End twa_shutdown() */
1961 /* This function will look up a string */
1962 static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1964 int index;
1966 for (index = 0; ((code != table[index].code) &&
1967 (table[index].text != (char *)0)); index++);
1968 return(table[index].text);
1969 } /* End twa_string_lookup() */
1971 /* This function gets called when a disk is coming on-line */
1972 static int twa_slave_configure(struct scsi_device *sdev)
1974 /* Force 60 second timeout */
1975 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1977 return 0;
1978 } /* End twa_slave_configure() */
1980 /* scsi_host_template initializer */
1981 static struct scsi_host_template driver_template = {
1982 .module = THIS_MODULE,
1983 .name = "3ware 9000 Storage Controller",
1984 .queuecommand = twa_scsi_queue,
1985 .eh_host_reset_handler = twa_scsi_eh_reset,
1986 .bios_param = twa_scsi_biosparam,
1987 .change_queue_depth = scsi_change_queue_depth,
1988 .can_queue = TW_Q_LENGTH-2,
1989 .slave_configure = twa_slave_configure,
1990 .this_id = -1,
1991 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1992 .max_sectors = TW_MAX_SECTORS,
1993 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1994 .use_clustering = ENABLE_CLUSTERING,
1995 .shost_attrs = twa_host_attrs,
1996 .emulated = 1,
1997 .no_write_same = 1,
2000 /* This function will probe and initialize a card */
2001 static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2003 struct Scsi_Host *host = NULL;
2004 TW_Device_Extension *tw_dev;
2005 unsigned long mem_addr, mem_len;
2006 int retval = -ENODEV;
2008 retval = pci_enable_device(pdev);
2009 if (retval) {
2010 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2011 goto out_disable_device;
2014 pci_set_master(pdev);
2015 pci_try_set_mwi(pdev);
2017 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2018 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2019 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2020 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2021 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2022 retval = -ENODEV;
2023 goto out_disable_device;
2026 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2027 if (!host) {
2028 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2029 retval = -ENOMEM;
2030 goto out_disable_device;
2032 tw_dev = (TW_Device_Extension *)host->hostdata;
2034 /* Save values to device extension */
2035 tw_dev->host = host;
2036 tw_dev->tw_pci_dev = pdev;
2038 if (twa_initialize_device_extension(tw_dev)) {
2039 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2040 goto out_free_device_extension;
2043 /* Request IO regions */
2044 retval = pci_request_regions(pdev, "3w-9xxx");
2045 if (retval) {
2046 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2047 goto out_free_device_extension;
2050 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2051 mem_addr = pci_resource_start(pdev, 1);
2052 mem_len = pci_resource_len(pdev, 1);
2053 } else {
2054 mem_addr = pci_resource_start(pdev, 2);
2055 mem_len = pci_resource_len(pdev, 2);
2058 /* Save base address */
2059 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2060 if (!tw_dev->base_addr) {
2061 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2062 goto out_release_mem_region;
2065 /* Disable interrupts on the card */
2066 TW_DISABLE_INTERRUPTS(tw_dev);
2068 /* Initialize the card */
2069 if (twa_reset_sequence(tw_dev, 0))
2070 goto out_iounmap;
2072 /* Set host specific parameters */
2073 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2074 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2075 host->max_id = TW_MAX_UNITS_9650SE;
2076 else
2077 host->max_id = TW_MAX_UNITS;
2079 host->max_cmd_len = TW_MAX_CDB_LEN;
2081 /* Channels aren't supported by adapter */
2082 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2083 host->max_channel = 0;
2085 /* Register the card with the kernel SCSI layer */
2086 retval = scsi_add_host(host, &pdev->dev);
2087 if (retval) {
2088 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2089 goto out_iounmap;
2092 pci_set_drvdata(pdev, host);
2094 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2095 host->host_no, mem_addr, pdev->irq);
2096 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2097 host->host_no,
2098 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2099 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2100 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2101 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2102 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2103 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2105 /* Try to enable MSI */
2106 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2107 !pci_enable_msi(pdev))
2108 set_bit(TW_USING_MSI, &tw_dev->flags);
2110 /* Now setup the interrupt handler */
2111 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2112 if (retval) {
2113 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2114 goto out_remove_host;
2117 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2118 twa_device_extension_count++;
2120 /* Re-enable interrupts on the card */
2121 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2123 /* Finally, scan the host */
2124 scsi_scan_host(host);
2126 if (twa_major == -1) {
2127 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2128 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2130 return 0;
2132 out_remove_host:
2133 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2134 pci_disable_msi(pdev);
2135 scsi_remove_host(host);
2136 out_iounmap:
2137 iounmap(tw_dev->base_addr);
2138 out_release_mem_region:
2139 pci_release_regions(pdev);
2140 out_free_device_extension:
2141 twa_free_device_extension(tw_dev);
2142 scsi_host_put(host);
2143 out_disable_device:
2144 pci_disable_device(pdev);
2146 return retval;
2147 } /* End twa_probe() */
2149 /* This function is called to remove a device */
2150 static void twa_remove(struct pci_dev *pdev)
2152 struct Scsi_Host *host = pci_get_drvdata(pdev);
2153 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2155 scsi_remove_host(tw_dev->host);
2157 /* Unregister character device */
2158 if (twa_major >= 0) {
2159 unregister_chrdev(twa_major, "twa");
2160 twa_major = -1;
2163 /* Shutdown the card */
2164 __twa_shutdown(tw_dev);
2166 /* Disable MSI if enabled */
2167 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2168 pci_disable_msi(pdev);
2170 /* Free IO remapping */
2171 iounmap(tw_dev->base_addr);
2173 /* Free up the mem region */
2174 pci_release_regions(pdev);
2176 /* Free up device extension resources */
2177 twa_free_device_extension(tw_dev);
2179 scsi_host_put(tw_dev->host);
2180 pci_disable_device(pdev);
2181 twa_device_extension_count--;
2182 } /* End twa_remove() */
2184 #ifdef CONFIG_PM
2185 /* This function is called on PCI suspend */
2186 static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2188 struct Scsi_Host *host = pci_get_drvdata(pdev);
2189 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2191 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2193 TW_DISABLE_INTERRUPTS(tw_dev);
2194 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2196 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2197 pci_disable_msi(pdev);
2199 /* Tell the card we are shutting down */
2200 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2201 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2202 } else {
2203 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2205 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2207 pci_save_state(pdev);
2208 pci_disable_device(pdev);
2209 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2211 return 0;
2212 } /* End twa_suspend() */
2214 /* This function is called on PCI resume */
2215 static int twa_resume(struct pci_dev *pdev)
2217 int retval = 0;
2218 struct Scsi_Host *host = pci_get_drvdata(pdev);
2219 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2221 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2222 pci_set_power_state(pdev, PCI_D0);
2223 pci_enable_wake(pdev, PCI_D0, 0);
2224 pci_restore_state(pdev);
2226 retval = pci_enable_device(pdev);
2227 if (retval) {
2228 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2229 return retval;
2232 pci_set_master(pdev);
2233 pci_try_set_mwi(pdev);
2235 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2236 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2237 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2238 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2239 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2240 retval = -ENODEV;
2241 goto out_disable_device;
2244 /* Initialize the card */
2245 if (twa_reset_sequence(tw_dev, 0)) {
2246 retval = -ENODEV;
2247 goto out_disable_device;
2250 /* Now setup the interrupt handler */
2251 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2252 if (retval) {
2253 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2254 retval = -ENODEV;
2255 goto out_disable_device;
2258 /* Now enable MSI if enabled */
2259 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2260 pci_enable_msi(pdev);
2262 /* Re-enable interrupts on the card */
2263 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2265 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2266 return 0;
2268 out_disable_device:
2269 scsi_remove_host(host);
2270 pci_disable_device(pdev);
2272 return retval;
2273 } /* End twa_resume() */
2274 #endif
2276 /* PCI Devices supported by this driver */
2277 static struct pci_device_id twa_pci_tbl[] = {
2278 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2280 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2282 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2283 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2284 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2285 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2288 MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2290 /* pci_driver initializer */
2291 static struct pci_driver twa_driver = {
2292 .name = "3w-9xxx",
2293 .id_table = twa_pci_tbl,
2294 .probe = twa_probe,
2295 .remove = twa_remove,
2296 #ifdef CONFIG_PM
2297 .suspend = twa_suspend,
2298 .resume = twa_resume,
2299 #endif
2300 .shutdown = twa_shutdown
2303 /* This function is called on driver initialization */
2304 static int __init twa_init(void)
2306 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2308 return pci_register_driver(&twa_driver);
2309 } /* End twa_init() */
2311 /* This function is called on driver exit */
2312 static void __exit twa_exit(void)
2314 pci_unregister_driver(&twa_driver);
2315 } /* End twa_exit() */
2317 module_init(twa_init);
2318 module_exit(twa_exit);