mtd: gpmi: rename the functions from gpmi_nfc_* to gpmi_nand_*
[linux/fpc-iii.git] / drivers / scsi / qla1280.c
blob5a522c5bbd433910e3f745005fe3bc6ded93da54
1 /******************************************************************************
2 * QLOGIC LINUX SOFTWARE
4 * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
5 * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
6 * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
7 * Copyright (C) 2003-2004 Christoph Hellwig
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
12 * later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 ******************************************************************************/
20 #define QLA1280_VERSION "3.27.1"
21 /*****************************************************************************
22 Revision History:
23 Rev 3.27.1, February 8, 2010, Michael Reed
24 - Retain firmware image for error recovery.
25 Rev 3.27, February 10, 2009, Michael Reed
26 - General code cleanup.
27 - Improve error recovery.
28 Rev 3.26, January 16, 2006 Jes Sorensen
29 - Ditch all < 2.6 support
30 Rev 3.25.1, February 10, 2005 Christoph Hellwig
31 - use pci_map_single to map non-S/G requests
32 - remove qla1280_proc_info
33 Rev 3.25, September 28, 2004, Christoph Hellwig
34 - add support for ISP1020/1040
35 - don't include "scsi.h" anymore for 2.6.x
36 Rev 3.24.4 June 7, 2004 Christoph Hellwig
37 - restructure firmware loading, cleanup initialization code
38 - prepare support for ISP1020/1040 chips
39 Rev 3.24.3 January 19, 2004, Jes Sorensen
40 - Handle PCI DMA mask settings correctly
41 - Correct order of error handling in probe_one, free_irq should not
42 be called if request_irq failed
43 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
44 - Big endian fixes (James)
45 - Remove bogus IOCB content on zero data transfer commands (Andrew)
46 Rev 3.24.1 January 5, 2004, Jes Sorensen
47 - Initialize completion queue to avoid OOPS on probe
48 - Handle interrupts during mailbox testing
49 Rev 3.24 November 17, 2003, Christoph Hellwig
50 - use struct list_head for completion queue
51 - avoid old Scsi_FOO typedefs
52 - cleanup 2.4 compat glue a bit
53 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
54 - make initialization for memory mapped vs port I/O more similar
55 - remove broken pci config space manipulation
56 - kill more cruft
57 - this is an almost perfect 2.6 scsi driver now! ;)
58 Rev 3.23.39 December 17, 2003, Jes Sorensen
59 - Delete completion queue from srb if mailbox command failed to
60 to avoid qla1280_done completeting qla1280_error_action's
61 obsolete context
62 - Reduce arguments for qla1280_done
63 Rev 3.23.38 October 18, 2003, Christoph Hellwig
64 - Convert to new-style hotplugable driver for 2.6
65 - Fix missing scsi_unregister/scsi_host_put on HBA removal
66 - Kill some more cruft
67 Rev 3.23.37 October 1, 2003, Jes Sorensen
68 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
69 random CONFIG option
70 - Clean up locking in probe path
71 Rev 3.23.36 October 1, 2003, Christoph Hellwig
72 - queuecommand only ever receives new commands - clear flags
73 - Reintegrate lost fixes from Linux 2.5
74 Rev 3.23.35 August 14, 2003, Jes Sorensen
75 - Build against 2.6
76 Rev 3.23.34 July 23, 2003, Jes Sorensen
77 - Remove pointless TRUE/FALSE macros
78 - Clean up vchan handling
79 Rev 3.23.33 July 3, 2003, Jes Sorensen
80 - Don't define register access macros before define determining MMIO.
81 This just happened to work out on ia64 but not elsewhere.
82 - Don't try and read from the card while it is in reset as
83 it won't respond and causes an MCA
84 Rev 3.23.32 June 23, 2003, Jes Sorensen
85 - Basic support for boot time arguments
86 Rev 3.23.31 June 8, 2003, Jes Sorensen
87 - Reduce boot time messages
88 Rev 3.23.30 June 6, 2003, Jes Sorensen
89 - Do not enable sync/wide/ppr before it has been determined
90 that the target device actually supports it
91 - Enable DMA arbitration for multi channel controllers
92 Rev 3.23.29 June 3, 2003, Jes Sorensen
93 - Port to 2.5.69
94 Rev 3.23.28 June 3, 2003, Jes Sorensen
95 - Eliminate duplicate marker commands on bus resets
96 - Handle outstanding commands appropriately on bus/device resets
97 Rev 3.23.27 May 28, 2003, Jes Sorensen
98 - Remove bogus input queue code, let the Linux SCSI layer do the work
99 - Clean up NVRAM handling, only read it once from the card
100 - Add a number of missing default nvram parameters
101 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
102 - Use completion queue for mailbox commands instead of busy wait
103 Rev 3.23.25 Beta May 27, 2003, James Bottomley
104 - Migrate to use new error handling code
105 Rev 3.23.24 Beta May 21, 2003, James Bottomley
106 - Big endian support
107 - Cleanup data direction code
108 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
109 - Switch to using MMIO instead of PIO
110 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
111 - Fix PCI parity problem with 12160 during reset.
112 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
113 - Use pci_map_page()/pci_unmap_page() instead of map_single version.
114 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
115 - Remove < 2.4.x support
116 - Introduce HOST_LOCK to make the spin lock changes portable.
117 - Remove a bunch of idiotic and unnecessary typedef's
118 - Kill all leftovers of target-mode support which never worked anyway
119 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
120 - Do qla1280_pci_config() before calling request_irq() and
121 request_region()
122 - Use pci_dma_hi32() to handle upper word of DMA addresses instead
123 of large shifts
124 - Hand correct arguments to free_irq() in case of failure
125 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
126 - Run source through Lindent and clean up the output
127 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
128 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
129 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
130 - Rely on mailbox commands generating interrupts - do not
131 run qla1280_isr() from ql1280_mailbox_command()
132 - Remove device_reg_t
133 - Integrate ql12160_set_target_parameters() with 1280 version
134 - Make qla1280_setup() non static
135 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
136 sent to the card - this command pauses the firmware!!!
137 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
138 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
139 - Remove a pile of pointless and confusing (srb_t **) and
140 (scsi_lu_t *) typecasts
141 - Explicit mark that we do not use the new error handling (for now)
142 - Remove scsi_qla_host_t and use 'struct' instead
143 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
144 pci_64bit_slot flags which weren't used for anything anyway
145 - Grab host->host_lock while calling qla1280_isr() from abort()
146 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
147 do not need to save/restore flags in the interrupt handler
148 - Enable interrupts early (before any mailbox access) in preparation
149 for cleaning up the mailbox handling
150 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
151 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
152 it with proper use of dprintk().
153 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
154 a debug level argument to determine if data is to be printed
155 - Add KERN_* info to printk()
156 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
157 - Significant cosmetic cleanups
158 - Change debug code to use dprintk() and remove #if mess
159 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
160 - More cosmetic cleanups, fix places treating return as function
161 - use cpu_relax() in qla1280_debounce_register()
162 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
163 - Make it compile under 2.5.5
164 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
165 - Do no typecast short * to long * in QL1280BoardTbl, this
166 broke miserably on big endian boxes
167 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
168 - Remove pre 2.2 hack for checking for reentrance in interrupt handler
169 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
170 unsigned int to match the types from struct scsi_cmnd
171 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
172 - Remove bogus timer_t typedef from qla1280.h
173 - Remove obsolete pre 2.2 PCI setup code, use proper #define's
174 for PCI_ values, call pci_set_master()
175 - Fix memleak of qla1280_buffer on module unload
176 - Only compile module parsing code #ifdef MODULE - should be
177 changed to use individual MODULE_PARM's later
178 - Remove dummy_buffer that was never modified nor printed
179 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
180 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
181 - Remove \r from print statements, this is Linux, not DOS
182 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
183 dummy macros
184 - Remove C++ compile hack in header file as Linux driver are not
185 supposed to be compiled as C++
186 - Kill MS_64BITS macro as it makes the code more readable
187 - Remove unnecessary flags.in_interrupts bit
188 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
189 - Dont' check for set flags on q->q_flag one by one in qla1280_next()
190 - Check whether the interrupt was generated by the QLA1280 before
191 doing any processing
192 - qla1280_status_entry(): Only zero out part of sense_buffer that
193 is not being copied into
194 - Remove more superflouous typecasts
195 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
196 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
197 - Don't walk the entire list in qla1280_putq_t() just to directly
198 grab the pointer to the last element afterwards
199 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
200 - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
201 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
202 - Set dev->max_sectors to 1024
203 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
204 - Provide compat macros for pci_enable_device(), pci_find_subsys()
205 and scsi_set_pci_device()
206 - Call scsi_set_pci_device() for all devices
207 - Reduce size of kernel version dependent device probe code
208 - Move duplicate probe/init code to separate function
209 - Handle error if qla1280_mem_alloc() fails
210 - Kill OFFSET() macro and use Linux's PCI definitions instead
211 - Kill private structure defining PCI config space (struct config_reg)
212 - Only allocate I/O port region if not in MMIO mode
213 - Remove duplicate (unused) sanity check of sife of srb_t
214 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
215 - Change home-brew memset() implementations to use memset()
216 - Remove all references to COMTRACE() - accessing a PC's COM2 serial
217 port directly is not legal under Linux.
218 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
219 - Remove pre 2.2 kernel support
220 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
221 - Fix MMIO access to use readl/writel instead of directly
222 dereferencing pointers
223 - Nuke MSDOS debugging code
224 - Change true/false data types to int from uint8_t
225 - Use int for counters instead of uint8_t etc.
226 - Clean up size & byte order conversion macro usage
227 Rev 3.23 Beta January 11, 2001 BN Qlogic
228 - Added check of device_id when handling non
229 QLA12160s during detect().
230 Rev 3.22 Beta January 5, 2001 BN Qlogic
231 - Changed queue_task() to schedule_task()
232 for kernels 2.4.0 and higher.
233 Note: 2.4.0-testxx kernels released prior to
234 the actual 2.4.0 kernel release on January 2001
235 will get compile/link errors with schedule_task().
236 Please update your kernel to released 2.4.0 level,
237 or comment lines in this file flagged with 3.22
238 to resolve compile/link error of schedule_task().
239 - Added -DCONFIG_SMP in addition to -D__SMP__
240 in Makefile for 2.4.0 builds of driver as module.
241 Rev 3.21 Beta January 4, 2001 BN Qlogic
242 - Changed criteria of 64/32 Bit mode of HBA
243 operation according to BITS_PER_LONG rather
244 than HBA's NVRAM setting of >4Gig memory bit;
245 so that the HBA auto-configures without the need
246 to setup each system individually.
247 Rev 3.20 Beta December 5, 2000 BN Qlogic
248 - Added priority handling to IA-64 onboard SCSI
249 ISP12160 chip for kernels greater than 2.3.18.
250 - Added irqrestore for qla1280_intr_handler.
251 - Enabled /proc/scsi/qla1280 interface.
252 - Clear /proc/scsi/qla1280 counters in detect().
253 Rev 3.19 Beta October 13, 2000 BN Qlogic
254 - Declare driver_template for new kernel
255 (2.4.0 and greater) scsi initialization scheme.
256 - Update /proc/scsi entry for 2.3.18 kernels and
257 above as qla1280
258 Rev 3.18 Beta October 10, 2000 BN Qlogic
259 - Changed scan order of adapters to map
260 the QLA12160 followed by the QLA1280.
261 Rev 3.17 Beta September 18, 2000 BN Qlogic
262 - Removed warnings for 32 bit 2.4.x compiles
263 - Corrected declared size for request and response
264 DMA addresses that are kept in each ha
265 Rev. 3.16 Beta August 25, 2000 BN Qlogic
266 - Corrected 64 bit addressing issue on IA-64
267 where the upper 32 bits were not properly
268 passed to the RISC engine.
269 Rev. 3.15 Beta August 22, 2000 BN Qlogic
270 - Modified qla1280_setup_chip to properly load
271 ISP firmware for greater that 4 Gig memory on IA-64
272 Rev. 3.14 Beta August 16, 2000 BN Qlogic
273 - Added setting of dma_mask to full 64 bit
274 if flags.enable_64bit_addressing is set in NVRAM
275 Rev. 3.13 Beta August 16, 2000 BN Qlogic
276 - Use new PCI DMA mapping APIs for 2.4.x kernel
277 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
278 - Added check of pci_enable_device to detect() for 2.3.x
279 - Use pci_resource_start() instead of
280 pdev->resource[0].start in detect() for 2.3.x
281 - Updated driver version
282 Rev. 3.11 July 14, 2000 BN Qlogic
283 - Updated SCSI Firmware to following versions:
284 qla1x80: 8.13.08
285 qla1x160: 10.04.08
286 - Updated driver version to 3.11
287 Rev. 3.10 June 23, 2000 BN Qlogic
288 - Added filtering of AMI SubSys Vendor ID devices
289 Rev. 3.9
290 - DEBUG_QLA1280 undefined and new version BN Qlogic
291 Rev. 3.08b May 9, 2000 MD Dell
292 - Added logic to check against AMI subsystem vendor ID
293 Rev. 3.08 May 4, 2000 DG Qlogic
294 - Added logic to check for PCI subsystem ID.
295 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
296 - Updated SCSI Firmware to following versions:
297 qla12160: 10.01.19
298 qla1280: 8.09.00
299 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
300 - Internal revision; not released
301 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
302 - Edit correction for virt_to_bus and PROC.
303 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
304 - Merge changes from ia64 port.
305 Rev. 3.03 Mar 28, 2000 BN Qlogic
306 - Increase version to reflect new code drop with compile fix
307 of issue with inclusion of linux/spinlock for 2.3 kernels
308 Rev. 3.02 Mar 15, 2000 BN Qlogic
309 - Merge qla1280_proc_info from 2.10 code base
310 Rev. 3.01 Feb 10, 2000 BN Qlogic
311 - Corrected code to compile on a 2.2.x kernel.
312 Rev. 3.00 Jan 17, 2000 DG Qlogic
313 - Added 64-bit support.
314 Rev. 2.07 Nov 9, 1999 DG Qlogic
315 - Added new routine to set target parameters for ISP12160.
316 Rev. 2.06 Sept 10, 1999 DG Qlogic
317 - Added support for ISP12160 Ultra 3 chip.
318 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
319 - Modified code to remove errors generated when compiling with
320 Cygnus IA64 Compiler.
321 - Changed conversion of pointers to unsigned longs instead of integers.
322 - Changed type of I/O port variables from uint32_t to unsigned long.
323 - Modified OFFSET macro to work with 64-bit as well as 32-bit.
324 - Changed sprintf and printk format specifiers for pointers to %p.
325 - Changed some int to long type casts where needed in sprintf & printk.
326 - Added l modifiers to sprintf and printk format specifiers for longs.
327 - Removed unused local variables.
328 Rev. 1.20 June 8, 1999 DG, Qlogic
329 Changes to support RedHat release 6.0 (kernel 2.2.5).
330 - Added SCSI exclusive access lock (io_request_lock) when accessing
331 the adapter.
332 - Added changes for the new LINUX interface template. Some new error
333 handling routines have been added to the template, but for now we
334 will use the old ones.
335 - Initial Beta Release.
336 *****************************************************************************/
339 #include <linux/module.h>
341 #include <linux/types.h>
342 #include <linux/string.h>
343 #include <linux/errno.h>
344 #include <linux/kernel.h>
345 #include <linux/ioport.h>
346 #include <linux/delay.h>
347 #include <linux/timer.h>
348 #include <linux/pci.h>
349 #include <linux/proc_fs.h>
350 #include <linux/stat.h>
351 #include <linux/pci_ids.h>
352 #include <linux/interrupt.h>
353 #include <linux/init.h>
354 #include <linux/dma-mapping.h>
355 #include <linux/firmware.h>
357 #include <asm/io.h>
358 #include <asm/irq.h>
359 #include <asm/byteorder.h>
360 #include <asm/processor.h>
361 #include <asm/types.h>
363 #include <scsi/scsi.h>
364 #include <scsi/scsi_cmnd.h>
365 #include <scsi/scsi_device.h>
366 #include <scsi/scsi_host.h>
367 #include <scsi/scsi_tcq.h>
369 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
370 #include <asm/sn/io.h>
371 #endif
375 * Compile time Options:
376 * 0 - Disable and 1 - Enable
378 #define DEBUG_QLA1280_INTR 0
379 #define DEBUG_PRINT_NVRAM 0
380 #define DEBUG_QLA1280 0
383 * The SGI VISWS is broken and doesn't support MMIO ;-(
385 #ifdef CONFIG_X86_VISWS
386 #define MEMORY_MAPPED_IO 0
387 #else
388 #define MEMORY_MAPPED_IO 1
389 #endif
391 #include "qla1280.h"
393 #ifndef BITS_PER_LONG
394 #error "BITS_PER_LONG not defined!"
395 #endif
396 #if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
397 #define QLA_64BIT_PTR 1
398 #endif
400 #ifdef QLA_64BIT_PTR
401 #define pci_dma_hi32(a) ((a >> 16) >> 16)
402 #else
403 #define pci_dma_hi32(a) 0
404 #endif
405 #define pci_dma_lo32(a) (a & 0xffffffff)
407 #define NVRAM_DELAY() udelay(500) /* 2 microseconds */
409 #if defined(__ia64__) && !defined(ia64_platform_is)
410 #define ia64_platform_is(foo) (!strcmp(x, platform_name))
411 #endif
414 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
415 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
416 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
417 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
418 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
421 static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
422 static void qla1280_remove_one(struct pci_dev *);
425 * QLogic Driver Support Function Prototypes.
427 static void qla1280_done(struct scsi_qla_host *);
428 static int qla1280_get_token(char *);
429 static int qla1280_setup(char *s) __init;
432 * QLogic ISP1280 Hardware Support Function Prototypes.
434 static int qla1280_load_firmware(struct scsi_qla_host *);
435 static int qla1280_init_rings(struct scsi_qla_host *);
436 static int qla1280_nvram_config(struct scsi_qla_host *);
437 static int qla1280_mailbox_command(struct scsi_qla_host *,
438 uint8_t, uint16_t *);
439 static int qla1280_bus_reset(struct scsi_qla_host *, int);
440 static int qla1280_device_reset(struct scsi_qla_host *, int, int);
441 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
442 static int qla1280_abort_isp(struct scsi_qla_host *);
443 #ifdef QLA_64BIT_PTR
444 static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
445 #else
446 static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
447 #endif
448 static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
449 static void qla1280_poll(struct scsi_qla_host *);
450 static void qla1280_reset_adapter(struct scsi_qla_host *);
451 static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
452 static void qla1280_isp_cmd(struct scsi_qla_host *);
453 static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
454 static void qla1280_rst_aen(struct scsi_qla_host *);
455 static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
456 struct list_head *);
457 static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
458 struct list_head *);
459 static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
460 static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
461 static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
462 static request_t *qla1280_req_pkt(struct scsi_qla_host *);
463 static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
464 unsigned int);
465 static void qla1280_get_target_parameters(struct scsi_qla_host *,
466 struct scsi_device *);
467 static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
470 static struct qla_driver_setup driver_setup;
473 * convert scsi data direction to request_t control flags
475 static inline uint16_t
476 qla1280_data_direction(struct scsi_cmnd *cmnd)
478 switch(cmnd->sc_data_direction) {
479 case DMA_FROM_DEVICE:
480 return BIT_5;
481 case DMA_TO_DEVICE:
482 return BIT_6;
483 case DMA_BIDIRECTIONAL:
484 return BIT_5 | BIT_6;
486 * We could BUG() on default here if one of the four cases aren't
487 * met, but then again if we receive something like that from the
488 * SCSI layer we have more serious problems. This shuts up GCC.
490 case DMA_NONE:
491 default:
492 return 0;
496 #if DEBUG_QLA1280
497 static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
498 static void __qla1280_dump_buffer(char *, int);
499 #endif
503 * insmod needs to find the variable and make it point to something
505 #ifdef MODULE
506 static char *qla1280;
508 /* insmod qla1280 options=verbose" */
509 module_param(qla1280, charp, 0);
510 #else
511 __setup("qla1280=", qla1280_setup);
512 #endif
516 * We use the scsi_pointer structure that's included with each scsi_command
517 * to overlay our struct srb over it. qla1280_init() checks that a srb is not
518 * bigger than a scsi_pointer.
521 #define CMD_SP(Cmnd) &Cmnd->SCp
522 #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
523 #define CMD_CDBP(Cmnd) Cmnd->cmnd
524 #define CMD_SNSP(Cmnd) Cmnd->sense_buffer
525 #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
526 #define CMD_RESULT(Cmnd) Cmnd->result
527 #define CMD_HANDLE(Cmnd) Cmnd->host_scribble
528 #define CMD_REQUEST(Cmnd) Cmnd->request->cmd
530 #define CMD_HOST(Cmnd) Cmnd->device->host
531 #define SCSI_BUS_32(Cmnd) Cmnd->device->channel
532 #define SCSI_TCN_32(Cmnd) Cmnd->device->id
533 #define SCSI_LUN_32(Cmnd) Cmnd->device->lun
536 /*****************************************/
537 /* ISP Boards supported by this driver */
538 /*****************************************/
540 struct qla_boards {
541 char *name; /* Board ID String */
542 int numPorts; /* Number of SCSI ports */
543 int fw_index; /* index into qla1280_fw_tbl for firmware */
546 /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
547 static struct pci_device_id qla1280_pci_tbl[] = {
548 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
549 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
550 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
551 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
552 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
553 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
554 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
555 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
556 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
557 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
558 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
559 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
560 {0,}
562 MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
564 DEFINE_MUTEX(qla1280_firmware_mutex);
566 struct qla_fw {
567 char *fwname;
568 const struct firmware *fw;
571 #define QL_NUM_FW_IMAGES 3
573 struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
574 {"qlogic/1040.bin", NULL}, /* image 0 */
575 {"qlogic/1280.bin", NULL}, /* image 1 */
576 {"qlogic/12160.bin", NULL}, /* image 2 */
579 /* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
580 static struct qla_boards ql1280_board_tbl[] = {
581 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
582 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
583 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
584 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
585 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
586 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
587 {.name = " ", .numPorts = 0, .fw_index = -1},
590 static int qla1280_verbose = 1;
592 #if DEBUG_QLA1280
593 static int ql_debug_level = 1;
594 #define dprintk(level, format, a...) \
595 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
596 #define qla1280_dump_buffer(level, buf, size) \
597 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
598 #define qla1280_print_scsi_cmd(level, cmd) \
599 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
600 #else
601 #define ql_debug_level 0
602 #define dprintk(level, format, a...) do{}while(0)
603 #define qla1280_dump_buffer(a, b, c) do{}while(0)
604 #define qla1280_print_scsi_cmd(a, b) do{}while(0)
605 #endif
607 #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
608 #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
609 #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
610 #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
613 static int qla1280_read_nvram(struct scsi_qla_host *ha)
615 uint16_t *wptr;
616 uint8_t chksum;
617 int cnt, i;
618 struct nvram *nv;
620 ENTER("qla1280_read_nvram");
622 if (driver_setup.no_nvram)
623 return 1;
625 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
627 wptr = (uint16_t *)&ha->nvram;
628 nv = &ha->nvram;
629 chksum = 0;
630 for (cnt = 0; cnt < 3; cnt++) {
631 *wptr = qla1280_get_nvram_word(ha, cnt);
632 chksum += *wptr & 0xff;
633 chksum += (*wptr >> 8) & 0xff;
634 wptr++;
637 if (nv->id0 != 'I' || nv->id1 != 'S' ||
638 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
639 dprintk(2, "Invalid nvram ID or version!\n");
640 chksum = 1;
641 } else {
642 for (; cnt < sizeof(struct nvram); cnt++) {
643 *wptr = qla1280_get_nvram_word(ha, cnt);
644 chksum += *wptr & 0xff;
645 chksum += (*wptr >> 8) & 0xff;
646 wptr++;
650 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
651 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
652 nv->version);
655 if (chksum) {
656 if (!driver_setup.no_nvram)
657 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
658 "validate NVRAM checksum, using default "
659 "settings\n", ha->host_no);
660 ha->nvram_valid = 0;
661 } else
662 ha->nvram_valid = 1;
664 /* The firmware interface is, um, interesting, in that the
665 * actual firmware image on the chip is little endian, thus,
666 * the process of taking that image to the CPU would end up
667 * little endian. However, the firmware interface requires it
668 * to be read a word (two bytes) at a time.
670 * The net result of this would be that the word (and
671 * doubleword) quantites in the firmware would be correct, but
672 * the bytes would be pairwise reversed. Since most of the
673 * firmware quantites are, in fact, bytes, we do an extra
674 * le16_to_cpu() in the firmware read routine.
676 * The upshot of all this is that the bytes in the firmware
677 * are in the correct places, but the 16 and 32 bit quantites
678 * are still in little endian format. We fix that up below by
679 * doing extra reverses on them */
680 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
681 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
682 for(i = 0; i < MAX_BUSES; i++) {
683 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
684 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
686 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
687 LEAVE("qla1280_read_nvram");
689 return chksum;
692 /**************************************************************************
693 * qla1280_info
694 * Return a string describing the driver.
695 **************************************************************************/
696 static const char *
697 qla1280_info(struct Scsi_Host *host)
699 static char qla1280_scsi_name_buffer[125];
700 char *bp;
701 struct scsi_qla_host *ha;
702 struct qla_boards *bdp;
704 bp = &qla1280_scsi_name_buffer[0];
705 ha = (struct scsi_qla_host *)host->hostdata;
706 bdp = &ql1280_board_tbl[ha->devnum];
707 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
709 sprintf (bp,
710 "QLogic %s PCI to SCSI Host Adapter\n"
711 " Firmware version: %2d.%02d.%02d, Driver version %s",
712 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
713 QLA1280_VERSION);
714 return bp;
717 /**************************************************************************
718 * qla1280_queuecommand
719 * Queue a command to the controller.
721 * Note:
722 * The mid-level driver tries to ensures that queuecommand never gets invoked
723 * concurrently with itself or the interrupt handler (although the
724 * interrupt handler may call this routine as part of request-completion
725 * handling). Unfortunely, it sometimes calls the scheduler in interrupt
726 * context which is a big NO! NO!.
727 **************************************************************************/
728 static int
729 qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
731 struct Scsi_Host *host = cmd->device->host;
732 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
733 struct srb *sp = (struct srb *)CMD_SP(cmd);
734 int status;
736 cmd->scsi_done = fn;
737 sp->cmd = cmd;
738 sp->flags = 0;
739 sp->wait = NULL;
740 CMD_HANDLE(cmd) = (unsigned char *)NULL;
742 qla1280_print_scsi_cmd(5, cmd);
744 #ifdef QLA_64BIT_PTR
746 * Using 64 bit commands if the PCI bridge doesn't support it is a
747 * bit wasteful, however this should really only happen if one's
748 * PCI controller is completely broken, like the BCM1250. For
749 * sane hardware this is not an issue.
751 status = qla1280_64bit_start_scsi(ha, sp);
752 #else
753 status = qla1280_32bit_start_scsi(ha, sp);
754 #endif
755 return status;
758 static DEF_SCSI_QCMD(qla1280_queuecommand)
760 enum action {
761 ABORT_COMMAND,
762 DEVICE_RESET,
763 BUS_RESET,
764 ADAPTER_RESET,
768 static void qla1280_mailbox_timeout(unsigned long __data)
770 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
771 struct device_reg __iomem *reg;
772 reg = ha->iobase;
774 ha->mailbox_out[0] = RD_REG_WORD(&reg->mailbox0);
775 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
776 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
777 RD_REG_WORD(&reg->ictrl), RD_REG_WORD(&reg->istatus));
778 complete(ha->mailbox_wait);
781 static int
782 _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
783 struct completion *wait)
785 int status = FAILED;
786 struct scsi_cmnd *cmd = sp->cmd;
788 spin_unlock_irq(ha->host->host_lock);
789 wait_for_completion_timeout(wait, 4*HZ);
790 spin_lock_irq(ha->host->host_lock);
791 sp->wait = NULL;
792 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
793 status = SUCCESS;
794 (*cmd->scsi_done)(cmd);
796 return status;
799 static int
800 qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
802 DECLARE_COMPLETION_ONSTACK(wait);
804 sp->wait = &wait;
805 return _qla1280_wait_for_single_command(ha, sp, &wait);
808 static int
809 qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
811 int cnt;
812 int status;
813 struct srb *sp;
814 struct scsi_cmnd *cmd;
816 status = SUCCESS;
819 * Wait for all commands with the designated bus/target
820 * to be completed by the firmware
822 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
823 sp = ha->outstanding_cmds[cnt];
824 if (sp) {
825 cmd = sp->cmd;
827 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
828 continue;
829 if (target >= 0 && SCSI_TCN_32(cmd) != target)
830 continue;
832 status = qla1280_wait_for_single_command(ha, sp);
833 if (status == FAILED)
834 break;
837 return status;
840 /**************************************************************************
841 * qla1280_error_action
842 * The function will attempt to perform a specified error action and
843 * wait for the results (or time out).
845 * Input:
846 * cmd = Linux SCSI command packet of the command that cause the
847 * bus reset.
848 * action = error action to take (see action_t)
850 * Returns:
851 * SUCCESS or FAILED
853 **************************************************************************/
854 static int
855 qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
857 struct scsi_qla_host *ha;
858 int bus, target, lun;
859 struct srb *sp;
860 int i, found;
861 int result=FAILED;
862 int wait_for_bus=-1;
863 int wait_for_target = -1;
864 DECLARE_COMPLETION_ONSTACK(wait);
866 ENTER("qla1280_error_action");
868 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
869 sp = (struct srb *)CMD_SP(cmd);
870 bus = SCSI_BUS_32(cmd);
871 target = SCSI_TCN_32(cmd);
872 lun = SCSI_LUN_32(cmd);
874 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
875 RD_REG_WORD(&ha->iobase->istatus));
877 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
878 RD_REG_WORD(&ha->iobase->host_cmd),
879 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
881 if (qla1280_verbose)
882 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
883 "Handle=0x%p, action=0x%x\n",
884 ha->host_no, cmd, CMD_HANDLE(cmd), action);
887 * Check to see if we have the command in the outstanding_cmds[]
888 * array. If not then it must have completed before this error
889 * action was initiated. If the error_action isn't ABORT_COMMAND
890 * then the driver must proceed with the requested action.
892 found = -1;
893 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
894 if (sp == ha->outstanding_cmds[i]) {
895 found = i;
896 sp->wait = &wait; /* we'll wait for it to complete */
897 break;
901 if (found < 0) { /* driver doesn't have command */
902 result = SUCCESS;
903 if (qla1280_verbose) {
904 printk(KERN_INFO
905 "scsi(%ld:%d:%d:%d): specified command has "
906 "already completed.\n", ha->host_no, bus,
907 target, lun);
911 switch (action) {
913 case ABORT_COMMAND:
914 dprintk(1, "qla1280: RISC aborting command\n");
916 * The abort might fail due to race when the host_lock
917 * is released to issue the abort. As such, we
918 * don't bother to check the return status.
920 if (found >= 0)
921 qla1280_abort_command(ha, sp, found);
922 break;
924 case DEVICE_RESET:
925 if (qla1280_verbose)
926 printk(KERN_INFO
927 "scsi(%ld:%d:%d:%d): Queueing device reset "
928 "command.\n", ha->host_no, bus, target, lun);
929 if (qla1280_device_reset(ha, bus, target) == 0) {
930 /* issued device reset, set wait conditions */
931 wait_for_bus = bus;
932 wait_for_target = target;
934 break;
936 case BUS_RESET:
937 if (qla1280_verbose)
938 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
939 "reset.\n", ha->host_no, bus);
940 if (qla1280_bus_reset(ha, bus) == 0) {
941 /* issued bus reset, set wait conditions */
942 wait_for_bus = bus;
944 break;
946 case ADAPTER_RESET:
947 default:
948 if (qla1280_verbose) {
949 printk(KERN_INFO
950 "scsi(%ld): Issued ADAPTER RESET\n",
951 ha->host_no);
952 printk(KERN_INFO "scsi(%ld): I/O processing will "
953 "continue automatically\n", ha->host_no);
955 ha->flags.reset_active = 1;
957 if (qla1280_abort_isp(ha) != 0) { /* it's dead */
958 result = FAILED;
961 ha->flags.reset_active = 0;
965 * At this point, the host_lock has been released and retaken
966 * by the issuance of the mailbox command.
967 * Wait for the command passed in by the mid-layer if it
968 * was found by the driver. It might have been returned
969 * between eh recovery steps, hence the check of the "found"
970 * variable.
973 if (found >= 0)
974 result = _qla1280_wait_for_single_command(ha, sp, &wait);
976 if (action == ABORT_COMMAND && result != SUCCESS) {
977 printk(KERN_WARNING
978 "scsi(%li:%i:%i:%i): "
979 "Unable to abort command!\n",
980 ha->host_no, bus, target, lun);
984 * If the command passed in by the mid-layer has been
985 * returned by the board, then wait for any additional
986 * commands which are supposed to complete based upon
987 * the error action.
989 * All commands are unconditionally returned during a
990 * call to qla1280_abort_isp(), ADAPTER_RESET. No need
991 * to wait for them.
993 if (result == SUCCESS && wait_for_bus >= 0) {
994 result = qla1280_wait_for_pending_commands(ha,
995 wait_for_bus, wait_for_target);
998 dprintk(1, "RESET returning %d\n", result);
1000 LEAVE("qla1280_error_action");
1001 return result;
1004 /**************************************************************************
1005 * qla1280_abort
1006 * Abort the specified SCSI command(s).
1007 **************************************************************************/
1008 static int
1009 qla1280_eh_abort(struct scsi_cmnd * cmd)
1011 int rc;
1013 spin_lock_irq(cmd->device->host->host_lock);
1014 rc = qla1280_error_action(cmd, ABORT_COMMAND);
1015 spin_unlock_irq(cmd->device->host->host_lock);
1017 return rc;
1020 /**************************************************************************
1021 * qla1280_device_reset
1022 * Reset the specified SCSI device
1023 **************************************************************************/
1024 static int
1025 qla1280_eh_device_reset(struct scsi_cmnd *cmd)
1027 int rc;
1029 spin_lock_irq(cmd->device->host->host_lock);
1030 rc = qla1280_error_action(cmd, DEVICE_RESET);
1031 spin_unlock_irq(cmd->device->host->host_lock);
1033 return rc;
1036 /**************************************************************************
1037 * qla1280_bus_reset
1038 * Reset the specified bus.
1039 **************************************************************************/
1040 static int
1041 qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1043 int rc;
1045 spin_lock_irq(cmd->device->host->host_lock);
1046 rc = qla1280_error_action(cmd, BUS_RESET);
1047 spin_unlock_irq(cmd->device->host->host_lock);
1049 return rc;
1052 /**************************************************************************
1053 * qla1280_adapter_reset
1054 * Reset the specified adapter (both channels)
1055 **************************************************************************/
1056 static int
1057 qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1059 int rc;
1061 spin_lock_irq(cmd->device->host->host_lock);
1062 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1063 spin_unlock_irq(cmd->device->host->host_lock);
1065 return rc;
1068 static int
1069 qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1070 sector_t capacity, int geom[])
1072 int heads, sectors, cylinders;
1074 heads = 64;
1075 sectors = 32;
1076 cylinders = (unsigned long)capacity / (heads * sectors);
1077 if (cylinders > 1024) {
1078 heads = 255;
1079 sectors = 63;
1080 cylinders = (unsigned long)capacity / (heads * sectors);
1081 /* if (cylinders > 1023)
1082 cylinders = 1023; */
1085 geom[0] = heads;
1086 geom[1] = sectors;
1087 geom[2] = cylinders;
1089 return 0;
1093 /* disable risc and host interrupts */
1094 static inline void
1095 qla1280_disable_intrs(struct scsi_qla_host *ha)
1097 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1098 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1101 /* enable risc and host interrupts */
1102 static inline void
1103 qla1280_enable_intrs(struct scsi_qla_host *ha)
1105 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1106 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1109 /**************************************************************************
1110 * qla1280_intr_handler
1111 * Handles the H/W interrupt
1112 **************************************************************************/
1113 static irqreturn_t
1114 qla1280_intr_handler(int irq, void *dev_id)
1116 struct scsi_qla_host *ha;
1117 struct device_reg __iomem *reg;
1118 u16 data;
1119 int handled = 0;
1121 ENTER_INTR ("qla1280_intr_handler");
1122 ha = (struct scsi_qla_host *)dev_id;
1124 spin_lock(ha->host->host_lock);
1126 ha->isr_count++;
1127 reg = ha->iobase;
1129 qla1280_disable_intrs(ha);
1131 data = qla1280_debounce_register(&reg->istatus);
1132 /* Check for pending interrupts. */
1133 if (data & RISC_INT) {
1134 qla1280_isr(ha, &ha->done_q);
1135 handled = 1;
1137 if (!list_empty(&ha->done_q))
1138 qla1280_done(ha);
1140 spin_unlock(ha->host->host_lock);
1142 qla1280_enable_intrs(ha);
1144 LEAVE_INTR("qla1280_intr_handler");
1145 return IRQ_RETVAL(handled);
1149 static int
1150 qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1152 uint8_t mr;
1153 uint16_t mb[MAILBOX_REGISTER_COUNT];
1154 struct nvram *nv;
1155 int status, lun;
1157 nv = &ha->nvram;
1159 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1161 /* Set Target Parameters. */
1162 mb[0] = MBC_SET_TARGET_PARAMETERS;
1163 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1164 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1165 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1166 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1167 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1168 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1169 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1170 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1171 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1173 if (IS_ISP1x160(ha)) {
1174 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1175 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1176 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1177 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1178 mr |= BIT_6;
1179 } else {
1180 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1182 mb[3] |= nv->bus[bus].target[target].sync_period;
1184 status = qla1280_mailbox_command(ha, mr, mb);
1186 /* Set Device Queue Parameters. */
1187 for (lun = 0; lun < MAX_LUNS; lun++) {
1188 mb[0] = MBC_SET_DEVICE_QUEUE;
1189 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1190 mb[1] |= lun;
1191 mb[2] = nv->bus[bus].max_queue_depth;
1192 mb[3] = nv->bus[bus].target[target].execution_throttle;
1193 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1196 if (status)
1197 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1198 "qla1280_set_target_parameters() failed\n",
1199 ha->host_no, bus, target);
1200 return status;
1204 /**************************************************************************
1205 * qla1280_slave_configure
1207 * Description:
1208 * Determines the queue depth for a given device. There are two ways
1209 * a queue depth can be obtained for a tagged queueing device. One
1210 * way is the default queue depth which is determined by whether
1211 * If it is defined, then it is used
1212 * as the default queue depth. Otherwise, we use either 4 or 8 as the
1213 * default queue depth (dependent on the number of hardware SCBs).
1214 **************************************************************************/
1215 static int
1216 qla1280_slave_configure(struct scsi_device *device)
1218 struct scsi_qla_host *ha;
1219 int default_depth = 3;
1220 int bus = device->channel;
1221 int target = device->id;
1222 int status = 0;
1223 struct nvram *nv;
1224 unsigned long flags;
1226 ha = (struct scsi_qla_host *)device->host->hostdata;
1227 nv = &ha->nvram;
1229 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1230 return 1;
1232 if (device->tagged_supported &&
1233 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1234 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
1235 ha->bus_settings[bus].hiwat);
1236 } else {
1237 scsi_adjust_queue_depth(device, 0, default_depth);
1240 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1241 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1242 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1244 if (driver_setup.no_sync ||
1245 (driver_setup.sync_mask &&
1246 (~driver_setup.sync_mask & (1 << target))))
1247 nv->bus[bus].target[target].parameter.enable_sync = 0;
1248 if (driver_setup.no_wide ||
1249 (driver_setup.wide_mask &&
1250 (~driver_setup.wide_mask & (1 << target))))
1251 nv->bus[bus].target[target].parameter.enable_wide = 0;
1252 if (IS_ISP1x160(ha)) {
1253 if (driver_setup.no_ppr ||
1254 (driver_setup.ppr_mask &&
1255 (~driver_setup.ppr_mask & (1 << target))))
1256 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1259 spin_lock_irqsave(ha->host->host_lock, flags);
1260 if (nv->bus[bus].target[target].parameter.enable_sync)
1261 status = qla1280_set_target_parameters(ha, bus, target);
1262 qla1280_get_target_parameters(ha, device);
1263 spin_unlock_irqrestore(ha->host->host_lock, flags);
1264 return status;
1269 * qla1280_done
1270 * Process completed commands.
1272 * Input:
1273 * ha = adapter block pointer.
1275 static void
1276 qla1280_done(struct scsi_qla_host *ha)
1278 struct srb *sp;
1279 struct list_head *done_q;
1280 int bus, target, lun;
1281 struct scsi_cmnd *cmd;
1283 ENTER("qla1280_done");
1285 done_q = &ha->done_q;
1287 while (!list_empty(done_q)) {
1288 sp = list_entry(done_q->next, struct srb, list);
1290 list_del(&sp->list);
1292 cmd = sp->cmd;
1293 bus = SCSI_BUS_32(cmd);
1294 target = SCSI_TCN_32(cmd);
1295 lun = SCSI_LUN_32(cmd);
1297 switch ((CMD_RESULT(cmd) >> 16)) {
1298 case DID_RESET:
1299 /* Issue marker command. */
1300 if (!ha->flags.abort_isp_active)
1301 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1302 break;
1303 case DID_ABORT:
1304 sp->flags &= ~SRB_ABORT_PENDING;
1305 sp->flags |= SRB_ABORTED;
1306 break;
1307 default:
1308 break;
1311 /* Release memory used for this I/O */
1312 scsi_dma_unmap(cmd);
1314 /* Call the mid-level driver interrupt handler */
1315 ha->actthreads--;
1317 if (sp->wait == NULL)
1318 (*(cmd)->scsi_done)(cmd);
1319 else
1320 complete(sp->wait);
1322 LEAVE("qla1280_done");
1326 * Translates a ISP error to a Linux SCSI error
1328 static int
1329 qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1331 int host_status = DID_ERROR;
1332 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1333 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1334 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1335 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1336 #if DEBUG_QLA1280_INTR
1337 static char *reason[] = {
1338 "DID_OK",
1339 "DID_NO_CONNECT",
1340 "DID_BUS_BUSY",
1341 "DID_TIME_OUT",
1342 "DID_BAD_TARGET",
1343 "DID_ABORT",
1344 "DID_PARITY",
1345 "DID_ERROR",
1346 "DID_RESET",
1347 "DID_BAD_INTR"
1349 #endif /* DEBUG_QLA1280_INTR */
1351 ENTER("qla1280_return_status");
1353 #if DEBUG_QLA1280_INTR
1355 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
1356 comp_status);
1358 #endif
1360 switch (comp_status) {
1361 case CS_COMPLETE:
1362 host_status = DID_OK;
1363 break;
1365 case CS_INCOMPLETE:
1366 if (!(state_flags & SF_GOT_BUS))
1367 host_status = DID_NO_CONNECT;
1368 else if (!(state_flags & SF_GOT_TARGET))
1369 host_status = DID_BAD_TARGET;
1370 else if (!(state_flags & SF_SENT_CDB))
1371 host_status = DID_ERROR;
1372 else if (!(state_flags & SF_TRANSFERRED_DATA))
1373 host_status = DID_ERROR;
1374 else if (!(state_flags & SF_GOT_STATUS))
1375 host_status = DID_ERROR;
1376 else if (!(state_flags & SF_GOT_SENSE))
1377 host_status = DID_ERROR;
1378 break;
1380 case CS_RESET:
1381 host_status = DID_RESET;
1382 break;
1384 case CS_ABORTED:
1385 host_status = DID_ABORT;
1386 break;
1388 case CS_TIMEOUT:
1389 host_status = DID_TIME_OUT;
1390 break;
1392 case CS_DATA_OVERRUN:
1393 dprintk(2, "Data overrun 0x%x\n", residual_length);
1394 dprintk(2, "qla1280_return_status: response packet data\n");
1395 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1396 host_status = DID_ERROR;
1397 break;
1399 case CS_DATA_UNDERRUN:
1400 if ((scsi_bufflen(cp) - residual_length) <
1401 cp->underflow) {
1402 printk(KERN_WARNING
1403 "scsi: Underflow detected - retrying "
1404 "command.\n");
1405 host_status = DID_ERROR;
1406 } else {
1407 scsi_set_resid(cp, residual_length);
1408 host_status = DID_OK;
1410 break;
1412 default:
1413 host_status = DID_ERROR;
1414 break;
1417 #if DEBUG_QLA1280_INTR
1418 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1419 reason[host_status], scsi_status);
1420 #endif
1422 LEAVE("qla1280_return_status");
1424 return (scsi_status & 0xff) | (host_status << 16);
1427 /****************************************************************************/
1428 /* QLogic ISP1280 Hardware Support Functions. */
1429 /****************************************************************************/
1432 * qla1280_initialize_adapter
1433 * Initialize board.
1435 * Input:
1436 * ha = adapter block pointer.
1438 * Returns:
1439 * 0 = success
1441 static int
1442 qla1280_initialize_adapter(struct scsi_qla_host *ha)
1444 struct device_reg __iomem *reg;
1445 int status;
1446 int bus;
1447 unsigned long flags;
1449 ENTER("qla1280_initialize_adapter");
1451 /* Clear adapter flags. */
1452 ha->flags.online = 0;
1453 ha->flags.disable_host_adapter = 0;
1454 ha->flags.reset_active = 0;
1455 ha->flags.abort_isp_active = 0;
1457 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1458 if (ia64_platform_is("sn2")) {
1459 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
1460 "dual channel lockup workaround\n", ha->host_no);
1461 ha->flags.use_pci_vchannel = 1;
1462 driver_setup.no_nvram = 1;
1464 #endif
1466 /* TODO: implement support for the 1040 nvram format */
1467 if (IS_ISP1040(ha))
1468 driver_setup.no_nvram = 1;
1470 dprintk(1, "Configure PCI space for adapter...\n");
1472 reg = ha->iobase;
1474 /* Insure mailbox registers are free. */
1475 WRT_REG_WORD(&reg->semaphore, 0);
1476 WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
1477 WRT_REG_WORD(&reg->host_cmd, HC_CLR_HOST_INT);
1478 RD_REG_WORD(&reg->host_cmd);
1480 if (qla1280_read_nvram(ha)) {
1481 dprintk(2, "qla1280_initialize_adapter: failed to read "
1482 "NVRAM\n");
1486 * It's necessary to grab the spin here as qla1280_mailbox_command
1487 * needs to be able to drop the lock unconditionally to wait
1488 * for completion.
1490 spin_lock_irqsave(ha->host->host_lock, flags);
1492 status = qla1280_load_firmware(ha);
1493 if (status) {
1494 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1495 ha->host_no);
1496 goto out;
1499 /* Setup adapter based on NVRAM parameters. */
1500 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1501 qla1280_nvram_config(ha);
1503 if (ha->flags.disable_host_adapter) {
1504 status = 1;
1505 goto out;
1508 status = qla1280_init_rings(ha);
1509 if (status)
1510 goto out;
1512 /* Issue SCSI reset, if we can't reset twice then bus is dead */
1513 for (bus = 0; bus < ha->ports; bus++) {
1514 if (!ha->bus_settings[bus].disable_scsi_reset &&
1515 qla1280_bus_reset(ha, bus) &&
1516 qla1280_bus_reset(ha, bus))
1517 ha->bus_settings[bus].scsi_bus_dead = 1;
1520 ha->flags.online = 1;
1521 out:
1522 spin_unlock_irqrestore(ha->host->host_lock, flags);
1524 if (status)
1525 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1527 LEAVE("qla1280_initialize_adapter");
1528 return status;
1532 * qla1280_request_firmware
1533 * Acquire firmware for chip. Retain in memory
1534 * for error recovery.
1536 * Input:
1537 * ha = adapter block pointer.
1539 * Returns:
1540 * Pointer to firmware image or an error code
1541 * cast to pointer via ERR_PTR().
1543 static const struct firmware *
1544 qla1280_request_firmware(struct scsi_qla_host *ha)
1546 const struct firmware *fw;
1547 int err;
1548 int index;
1549 char *fwname;
1551 spin_unlock_irq(ha->host->host_lock);
1552 mutex_lock(&qla1280_firmware_mutex);
1554 index = ql1280_board_tbl[ha->devnum].fw_index;
1555 fw = qla1280_fw_tbl[index].fw;
1556 if (fw)
1557 goto out;
1559 fwname = qla1280_fw_tbl[index].fwname;
1560 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1562 if (err) {
1563 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1564 fwname, err);
1565 fw = ERR_PTR(err);
1566 goto unlock;
1568 if ((fw->size % 2) || (fw->size < 6)) {
1569 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1570 fw->size, fwname);
1571 release_firmware(fw);
1572 fw = ERR_PTR(-EINVAL);
1573 goto unlock;
1576 qla1280_fw_tbl[index].fw = fw;
1578 out:
1579 ha->fwver1 = fw->data[0];
1580 ha->fwver2 = fw->data[1];
1581 ha->fwver3 = fw->data[2];
1582 unlock:
1583 mutex_unlock(&qla1280_firmware_mutex);
1584 spin_lock_irq(ha->host->host_lock);
1585 return fw;
1589 * Chip diagnostics
1590 * Test chip for proper operation.
1592 * Input:
1593 * ha = adapter block pointer.
1595 * Returns:
1596 * 0 = success.
1598 static int
1599 qla1280_chip_diag(struct scsi_qla_host *ha)
1601 uint16_t mb[MAILBOX_REGISTER_COUNT];
1602 struct device_reg __iomem *reg = ha->iobase;
1603 int status = 0;
1604 int cnt;
1605 uint16_t data;
1606 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", &reg->id_l);
1608 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1610 /* Soft reset chip and wait for it to finish. */
1611 WRT_REG_WORD(&reg->ictrl, ISP_RESET);
1614 * We can't do a traditional PCI write flush here by reading
1615 * back the register. The card will not respond once the reset
1616 * is in action and we end up with a machine check exception
1617 * instead. Nothing to do but wait and hope for the best.
1618 * A portable pci_write_flush(pdev) call would be very useful here.
1620 udelay(20);
1621 data = qla1280_debounce_register(&reg->ictrl);
1623 * Yet another QLogic gem ;-(
1625 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1626 udelay(5);
1627 data = RD_REG_WORD(&reg->ictrl);
1630 if (!cnt)
1631 goto fail;
1633 /* Reset register cleared by chip reset. */
1634 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1636 WRT_REG_WORD(&reg->cfg_1, 0);
1638 /* Reset RISC and disable BIOS which
1639 allows RISC to execute out of RAM. */
1640 WRT_REG_WORD(&reg->host_cmd, HC_RESET_RISC |
1641 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1643 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
1644 data = qla1280_debounce_register(&reg->mailbox0);
1647 * I *LOVE* this code!
1649 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1650 udelay(5);
1651 data = RD_REG_WORD(&reg->mailbox0);
1654 if (!cnt)
1655 goto fail;
1657 /* Check product ID of chip */
1658 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1660 if (RD_REG_WORD(&reg->mailbox1) != PROD_ID_1 ||
1661 (RD_REG_WORD(&reg->mailbox2) != PROD_ID_2 &&
1662 RD_REG_WORD(&reg->mailbox2) != PROD_ID_2a) ||
1663 RD_REG_WORD(&reg->mailbox3) != PROD_ID_3 ||
1664 RD_REG_WORD(&reg->mailbox4) != PROD_ID_4) {
1665 printk(KERN_INFO "qla1280: Wrong product ID = "
1666 "0x%x,0x%x,0x%x,0x%x\n",
1667 RD_REG_WORD(&reg->mailbox1),
1668 RD_REG_WORD(&reg->mailbox2),
1669 RD_REG_WORD(&reg->mailbox3),
1670 RD_REG_WORD(&reg->mailbox4));
1671 goto fail;
1675 * Enable ints early!!!
1677 qla1280_enable_intrs(ha);
1679 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1680 /* Wrap Incoming Mailboxes Test. */
1681 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1682 mb[1] = 0xAAAA;
1683 mb[2] = 0x5555;
1684 mb[3] = 0xAA55;
1685 mb[4] = 0x55AA;
1686 mb[5] = 0xA5A5;
1687 mb[6] = 0x5A5A;
1688 mb[7] = 0x2525;
1690 status = qla1280_mailbox_command(ha, 0xff, mb);
1691 if (status)
1692 goto fail;
1694 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1695 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1696 mb[7] != 0x2525) {
1697 printk(KERN_INFO "qla1280: Failed mbox check\n");
1698 goto fail;
1701 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1702 return 0;
1703 fail:
1704 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1705 return status;
1708 static int
1709 qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1711 /* enter with host_lock acquired */
1713 const struct firmware *fw;
1714 const __le16 *fw_data;
1715 uint16_t risc_address, risc_code_size;
1716 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1717 int err = 0;
1719 fw = qla1280_request_firmware(ha);
1720 if (IS_ERR(fw))
1721 return PTR_ERR(fw);
1723 fw_data = (const __le16 *)&fw->data[0];
1724 ha->fwstart = __le16_to_cpu(fw_data[2]);
1726 /* Load RISC code. */
1727 risc_address = ha->fwstart;
1728 fw_data = (const __le16 *)&fw->data[6];
1729 risc_code_size = (fw->size - 6) / 2;
1731 for (i = 0; i < risc_code_size; i++) {
1732 mb[0] = MBC_WRITE_RAM_WORD;
1733 mb[1] = risc_address + i;
1734 mb[2] = __le16_to_cpu(fw_data[i]);
1736 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1737 if (err) {
1738 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1739 ha->host_no);
1740 break;
1744 return err;
1747 #define DUMP_IT_BACK 0 /* for debug of RISC loading */
1748 static int
1749 qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1751 /* enter with host_lock acquired */
1752 const struct firmware *fw;
1753 const __le16 *fw_data;
1754 uint16_t risc_address, risc_code_size;
1755 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1756 int err = 0, num, i;
1757 #if DUMP_IT_BACK
1758 uint8_t *sp, *tbuf;
1759 dma_addr_t p_tbuf;
1761 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
1762 if (!tbuf)
1763 return -ENOMEM;
1764 #endif
1766 fw = qla1280_request_firmware(ha);
1767 if (IS_ERR(fw))
1768 return PTR_ERR(fw);
1770 fw_data = (const __le16 *)&fw->data[0];
1771 ha->fwstart = __le16_to_cpu(fw_data[2]);
1773 /* Load RISC code. */
1774 risc_address = ha->fwstart;
1775 fw_data = (const __le16 *)&fw->data[6];
1776 risc_code_size = (fw->size - 6) / 2;
1778 dprintk(1, "%s: DMA RISC code (%i) words\n",
1779 __func__, risc_code_size);
1781 num = 0;
1782 while (risc_code_size > 0) {
1783 int warn __attribute__((unused)) = 0;
1785 cnt = 2000 >> 1;
1787 if (cnt > risc_code_size)
1788 cnt = risc_code_size;
1790 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1791 "%d,%d(0x%x)\n",
1792 fw_data, cnt, num, risc_address);
1793 for(i = 0; i < cnt; i++)
1794 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1796 mb[0] = MBC_LOAD_RAM;
1797 mb[1] = risc_address;
1798 mb[4] = cnt;
1799 mb[3] = ha->request_dma & 0xffff;
1800 mb[2] = (ha->request_dma >> 16) & 0xffff;
1801 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1802 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1803 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1804 __func__, mb[0],
1805 (void *)(long)ha->request_dma,
1806 mb[6], mb[7], mb[2], mb[3]);
1807 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1808 BIT_1 | BIT_0, mb);
1809 if (err) {
1810 printk(KERN_ERR "scsi(%li): Failed to load partial "
1811 "segment of f\n", ha->host_no);
1812 goto out;
1815 #if DUMP_IT_BACK
1816 mb[0] = MBC_DUMP_RAM;
1817 mb[1] = risc_address;
1818 mb[4] = cnt;
1819 mb[3] = p_tbuf & 0xffff;
1820 mb[2] = (p_tbuf >> 16) & 0xffff;
1821 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
1822 mb[6] = pci_dma_hi32(p_tbuf) >> 16;
1824 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1825 BIT_1 | BIT_0, mb);
1826 if (err) {
1827 printk(KERN_ERR
1828 "Failed to dump partial segment of f/w\n");
1829 goto out;
1831 sp = (uint8_t *)ha->request_ring;
1832 for (i = 0; i < (cnt << 1); i++) {
1833 if (tbuf[i] != sp[i] && warn++ < 10) {
1834 printk(KERN_ERR "%s: FW compare error @ "
1835 "byte(0x%x) loop#=%x\n",
1836 __func__, i, num);
1837 printk(KERN_ERR "%s: FWbyte=%x "
1838 "FWfromChip=%x\n",
1839 __func__, sp[i], tbuf[i]);
1840 /*break; */
1843 #endif
1844 risc_address += cnt;
1845 risc_code_size = risc_code_size - cnt;
1846 fw_data = fw_data + cnt;
1847 num++;
1850 out:
1851 #if DUMP_IT_BACK
1852 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1853 #endif
1854 return err;
1857 static int
1858 qla1280_start_firmware(struct scsi_qla_host *ha)
1860 uint16_t mb[MAILBOX_REGISTER_COUNT];
1861 int err;
1863 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1864 __func__);
1866 /* Verify checksum of loaded RISC code. */
1867 mb[0] = MBC_VERIFY_CHECKSUM;
1868 /* mb[1] = ql12_risc_code_addr01; */
1869 mb[1] = ha->fwstart;
1870 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1871 if (err) {
1872 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1873 return err;
1876 /* Start firmware execution. */
1877 dprintk(1, "%s: start firmware running.\n", __func__);
1878 mb[0] = MBC_EXECUTE_FIRMWARE;
1879 mb[1] = ha->fwstart;
1880 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1881 if (err) {
1882 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1883 ha->host_no);
1886 return err;
1889 static int
1890 qla1280_load_firmware(struct scsi_qla_host *ha)
1892 /* enter with host_lock taken */
1893 int err;
1895 err = qla1280_chip_diag(ha);
1896 if (err)
1897 goto out;
1898 if (IS_ISP1040(ha))
1899 err = qla1280_load_firmware_pio(ha);
1900 else
1901 err = qla1280_load_firmware_dma(ha);
1902 if (err)
1903 goto out;
1904 err = qla1280_start_firmware(ha);
1905 out:
1906 return err;
1910 * Initialize rings
1912 * Input:
1913 * ha = adapter block pointer.
1914 * ha->request_ring = request ring virtual address
1915 * ha->response_ring = response ring virtual address
1916 * ha->request_dma = request ring physical address
1917 * ha->response_dma = response ring physical address
1919 * Returns:
1920 * 0 = success.
1922 static int
1923 qla1280_init_rings(struct scsi_qla_host *ha)
1925 uint16_t mb[MAILBOX_REGISTER_COUNT];
1926 int status = 0;
1928 ENTER("qla1280_init_rings");
1930 /* Clear outstanding commands array. */
1931 memset(ha->outstanding_cmds, 0,
1932 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1934 /* Initialize request queue. */
1935 ha->request_ring_ptr = ha->request_ring;
1936 ha->req_ring_index = 0;
1937 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1938 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
1939 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1940 mb[1] = REQUEST_ENTRY_CNT;
1941 mb[3] = ha->request_dma & 0xffff;
1942 mb[2] = (ha->request_dma >> 16) & 0xffff;
1943 mb[4] = 0;
1944 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1945 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1946 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1947 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1948 &mb[0]))) {
1949 /* Initialize response queue. */
1950 ha->response_ring_ptr = ha->response_ring;
1951 ha->rsp_ring_index = 0;
1952 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
1953 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1954 mb[1] = RESPONSE_ENTRY_CNT;
1955 mb[3] = ha->response_dma & 0xffff;
1956 mb[2] = (ha->response_dma >> 16) & 0xffff;
1957 mb[5] = 0;
1958 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
1959 mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
1960 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1961 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1962 &mb[0]);
1965 if (status)
1966 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1968 LEAVE("qla1280_init_rings");
1969 return status;
1972 static void
1973 qla1280_print_settings(struct nvram *nv)
1975 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1976 nv->bus[0].config_1.initiator_id);
1977 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1978 nv->bus[1].config_1.initiator_id);
1980 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1981 nv->bus[0].bus_reset_delay);
1982 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1983 nv->bus[1].bus_reset_delay);
1985 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1986 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1987 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1988 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1990 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1991 nv->bus[0].config_2.async_data_setup_time);
1992 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1993 nv->bus[1].config_2.async_data_setup_time);
1995 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1996 nv->bus[0].config_2.req_ack_active_negation);
1997 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1998 nv->bus[1].config_2.req_ack_active_negation);
2000 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
2001 nv->bus[0].config_2.data_line_active_negation);
2002 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
2003 nv->bus[1].config_2.data_line_active_negation);
2005 dprintk(1, "qla1280 : disable loading risc code=%d\n",
2006 nv->cntr_flags_1.disable_loading_risc_code);
2008 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
2009 nv->cntr_flags_1.enable_64bit_addressing);
2011 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
2012 nv->bus[0].selection_timeout);
2013 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
2014 nv->bus[1].selection_timeout);
2016 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
2017 nv->bus[0].max_queue_depth);
2018 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
2019 nv->bus[1].max_queue_depth);
2022 static void
2023 qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
2025 struct nvram *nv = &ha->nvram;
2027 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
2028 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
2029 nv->bus[bus].target[target].parameter.tag_queuing = 1;
2030 nv->bus[bus].target[target].parameter.enable_sync = 1;
2031 #if 1 /* Some SCSI Processors do not seem to like this */
2032 nv->bus[bus].target[target].parameter.enable_wide = 1;
2033 #endif
2034 nv->bus[bus].target[target].execution_throttle =
2035 nv->bus[bus].max_queue_depth - 1;
2036 nv->bus[bus].target[target].parameter.parity_checking = 1;
2037 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2039 if (IS_ISP1x160(ha)) {
2040 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2041 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2042 nv->bus[bus].target[target].sync_period = 9;
2043 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2044 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2045 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2046 } else {
2047 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2048 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2049 nv->bus[bus].target[target].sync_period = 10;
2053 static void
2054 qla1280_set_defaults(struct scsi_qla_host *ha)
2056 struct nvram *nv = &ha->nvram;
2057 int bus, target;
2059 dprintk(1, "Using defaults for NVRAM: \n");
2060 memset(nv, 0, sizeof(struct nvram));
2062 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
2063 nv->firmware_feature.f.enable_fast_posting = 1;
2064 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2065 nv->termination.scsi_bus_0_control = 3;
2066 nv->termination.scsi_bus_1_control = 3;
2067 nv->termination.auto_term_support = 1;
2070 * Set default FIFO magic - What appropriate values would be here
2071 * is unknown. This is what I have found testing with 12160s.
2073 * Now, I would love the magic decoder ring for this one, the
2074 * header file provided by QLogic seems to be bogus or incomplete
2075 * at best.
2077 nv->isp_config.burst_enable = 1;
2078 if (IS_ISP1040(ha))
2079 nv->isp_config.fifo_threshold |= 3;
2080 else
2081 nv->isp_config.fifo_threshold |= 4;
2083 if (IS_ISP1x160(ha))
2084 nv->isp_parameter = 0x01; /* fast memory enable */
2086 for (bus = 0; bus < MAX_BUSES; bus++) {
2087 nv->bus[bus].config_1.initiator_id = 7;
2088 nv->bus[bus].config_2.req_ack_active_negation = 1;
2089 nv->bus[bus].config_2.data_line_active_negation = 1;
2090 nv->bus[bus].selection_timeout = 250;
2091 nv->bus[bus].max_queue_depth = 32;
2093 if (IS_ISP1040(ha)) {
2094 nv->bus[bus].bus_reset_delay = 3;
2095 nv->bus[bus].config_2.async_data_setup_time = 6;
2096 nv->bus[bus].retry_delay = 1;
2097 } else {
2098 nv->bus[bus].bus_reset_delay = 5;
2099 nv->bus[bus].config_2.async_data_setup_time = 8;
2102 for (target = 0; target < MAX_TARGETS; target++)
2103 qla1280_set_target_defaults(ha, bus, target);
2107 static int
2108 qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2110 struct nvram *nv = &ha->nvram;
2111 uint16_t mb[MAILBOX_REGISTER_COUNT];
2112 int status, lun;
2113 uint16_t flag;
2115 /* Set Target Parameters. */
2116 mb[0] = MBC_SET_TARGET_PARAMETERS;
2117 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2120 * Do not enable sync and ppr for the initial INQUIRY run. We
2121 * enable this later if we determine the target actually
2122 * supports it.
2124 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2125 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2127 if (IS_ISP1x160(ha))
2128 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2129 else
2130 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2131 mb[3] |= nv->bus[bus].target[target].sync_period;
2132 status = qla1280_mailbox_command(ha, 0x0f, mb);
2134 /* Save Tag queuing enable flag. */
2135 flag = (BIT_0 << target);
2136 if (nv->bus[bus].target[target].parameter.tag_queuing)
2137 ha->bus_settings[bus].qtag_enables |= flag;
2139 /* Save Device enable flag. */
2140 if (IS_ISP1x160(ha)) {
2141 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2142 ha->bus_settings[bus].device_enables |= flag;
2143 ha->bus_settings[bus].lun_disables |= 0;
2144 } else {
2145 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2146 ha->bus_settings[bus].device_enables |= flag;
2147 /* Save LUN disable flag. */
2148 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2149 ha->bus_settings[bus].lun_disables |= flag;
2152 /* Set Device Queue Parameters. */
2153 for (lun = 0; lun < MAX_LUNS; lun++) {
2154 mb[0] = MBC_SET_DEVICE_QUEUE;
2155 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2156 mb[1] |= lun;
2157 mb[2] = nv->bus[bus].max_queue_depth;
2158 mb[3] = nv->bus[bus].target[target].execution_throttle;
2159 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2162 return status;
2165 static int
2166 qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2168 struct nvram *nv = &ha->nvram;
2169 uint16_t mb[MAILBOX_REGISTER_COUNT];
2170 int target, status;
2172 /* SCSI Reset Disable. */
2173 ha->bus_settings[bus].disable_scsi_reset =
2174 nv->bus[bus].config_1.scsi_reset_disable;
2176 /* Initiator ID. */
2177 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2178 mb[0] = MBC_SET_INITIATOR_ID;
2179 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2180 ha->bus_settings[bus].id;
2181 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2183 /* Reset Delay. */
2184 ha->bus_settings[bus].bus_reset_delay =
2185 nv->bus[bus].bus_reset_delay;
2187 /* Command queue depth per device. */
2188 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2190 /* Set target parameters. */
2191 for (target = 0; target < MAX_TARGETS; target++)
2192 status |= qla1280_config_target(ha, bus, target);
2194 return status;
2197 static int
2198 qla1280_nvram_config(struct scsi_qla_host *ha)
2200 struct device_reg __iomem *reg = ha->iobase;
2201 struct nvram *nv = &ha->nvram;
2202 int bus, target, status = 0;
2203 uint16_t mb[MAILBOX_REGISTER_COUNT];
2205 ENTER("qla1280_nvram_config");
2207 if (ha->nvram_valid) {
2208 /* Always force AUTO sense for LINUX SCSI */
2209 for (bus = 0; bus < MAX_BUSES; bus++)
2210 for (target = 0; target < MAX_TARGETS; target++) {
2211 nv->bus[bus].target[target].parameter.
2212 auto_request_sense = 1;
2214 } else {
2215 qla1280_set_defaults(ha);
2218 qla1280_print_settings(nv);
2220 /* Disable RISC load of firmware. */
2221 ha->flags.disable_risc_code_load =
2222 nv->cntr_flags_1.disable_loading_risc_code;
2224 if (IS_ISP1040(ha)) {
2225 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2227 hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
2229 cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2230 cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
2231 ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
2233 /* Busted fifo, says mjacob. */
2234 if (hwrev != ISP_CFG0_1040A)
2235 cfg1 |= nv->isp_config.fifo_threshold << 4;
2237 cfg1 |= nv->isp_config.burst_enable << 2;
2238 WRT_REG_WORD(&reg->cfg_1, cfg1);
2240 WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2241 WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2242 } else {
2243 uint16_t cfg1, term;
2245 /* Set ISP hardware DMA burst */
2246 cfg1 = nv->isp_config.fifo_threshold << 4;
2247 cfg1 |= nv->isp_config.burst_enable << 2;
2248 /* Enable DMA arbitration on dual channel controllers */
2249 if (ha->ports > 1)
2250 cfg1 |= BIT_13;
2251 WRT_REG_WORD(&reg->cfg_1, cfg1);
2253 /* Set SCSI termination. */
2254 WRT_REG_WORD(&reg->gpio_enable,
2255 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2256 term = nv->termination.scsi_bus_1_control;
2257 term |= nv->termination.scsi_bus_0_control << 2;
2258 term |= nv->termination.auto_term_support << 7;
2259 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2260 WRT_REG_WORD(&reg->gpio_data, term);
2262 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2264 /* ISP parameter word. */
2265 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2266 mb[1] = nv->isp_parameter;
2267 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2269 if (IS_ISP1x40(ha)) {
2270 /* clock rate - for qla1240 and older, only */
2271 mb[0] = MBC_SET_CLOCK_RATE;
2272 mb[1] = 40;
2273 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2276 /* Firmware feature word. */
2277 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2278 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2279 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2280 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2281 #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2282 if (ia64_platform_is("sn2")) {
2283 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2284 "workaround\n", ha->host_no);
2285 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
2287 #endif
2288 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2290 /* Retry count and delay. */
2291 mb[0] = MBC_SET_RETRY_COUNT;
2292 mb[1] = nv->bus[0].retry_count;
2293 mb[2] = nv->bus[0].retry_delay;
2294 mb[6] = nv->bus[1].retry_count;
2295 mb[7] = nv->bus[1].retry_delay;
2296 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2297 BIT_1 | BIT_0, &mb[0]);
2299 /* ASYNC data setup time. */
2300 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2301 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2302 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2303 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2305 /* Active negation states. */
2306 mb[0] = MBC_SET_ACTIVE_NEGATION;
2307 mb[1] = 0;
2308 if (nv->bus[0].config_2.req_ack_active_negation)
2309 mb[1] |= BIT_5;
2310 if (nv->bus[0].config_2.data_line_active_negation)
2311 mb[1] |= BIT_4;
2312 mb[2] = 0;
2313 if (nv->bus[1].config_2.req_ack_active_negation)
2314 mb[2] |= BIT_5;
2315 if (nv->bus[1].config_2.data_line_active_negation)
2316 mb[2] |= BIT_4;
2317 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2319 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2320 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
2321 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2323 /* thingy */
2324 mb[0] = MBC_SET_PCI_CONTROL;
2325 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
2326 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
2327 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2329 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2330 mb[1] = 8;
2331 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2333 /* Selection timeout. */
2334 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2335 mb[1] = nv->bus[0].selection_timeout;
2336 mb[2] = nv->bus[1].selection_timeout;
2337 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2339 for (bus = 0; bus < ha->ports; bus++)
2340 status |= qla1280_config_bus(ha, bus);
2342 if (status)
2343 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2345 LEAVE("qla1280_nvram_config");
2346 return status;
2350 * Get NVRAM data word
2351 * Calculates word position in NVRAM and calls request routine to
2352 * get the word from NVRAM.
2354 * Input:
2355 * ha = adapter block pointer.
2356 * address = NVRAM word address.
2358 * Returns:
2359 * data word.
2361 static uint16_t
2362 qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2364 uint32_t nv_cmd;
2365 uint16_t data;
2367 nv_cmd = address << 16;
2368 nv_cmd |= NV_READ_OP;
2370 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2372 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2373 "0x%x", data);
2375 return data;
2379 * NVRAM request
2380 * Sends read command to NVRAM and gets data from NVRAM.
2382 * Input:
2383 * ha = adapter block pointer.
2384 * nv_cmd = Bit 26 = start bit
2385 * Bit 25, 24 = opcode
2386 * Bit 23-16 = address
2387 * Bit 15-0 = write data
2389 * Returns:
2390 * data word.
2392 static uint16_t
2393 qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2395 struct device_reg __iomem *reg = ha->iobase;
2396 int cnt;
2397 uint16_t data = 0;
2398 uint16_t reg_data;
2400 /* Send command to NVRAM. */
2402 nv_cmd <<= 5;
2403 for (cnt = 0; cnt < 11; cnt++) {
2404 if (nv_cmd & BIT_31)
2405 qla1280_nv_write(ha, NV_DATA_OUT);
2406 else
2407 qla1280_nv_write(ha, 0);
2408 nv_cmd <<= 1;
2411 /* Read data from NVRAM. */
2413 for (cnt = 0; cnt < 16; cnt++) {
2414 WRT_REG_WORD(&reg->nvram, (NV_SELECT | NV_CLOCK));
2415 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2416 NVRAM_DELAY();
2417 data <<= 1;
2418 reg_data = RD_REG_WORD(&reg->nvram);
2419 if (reg_data & NV_DATA_IN)
2420 data |= BIT_0;
2421 WRT_REG_WORD(&reg->nvram, NV_SELECT);
2422 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2423 NVRAM_DELAY();
2426 /* Deselect chip. */
2428 WRT_REG_WORD(&reg->nvram, NV_DESELECT);
2429 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2430 NVRAM_DELAY();
2432 return data;
2435 static void
2436 qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2438 struct device_reg __iomem *reg = ha->iobase;
2440 WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
2441 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2442 NVRAM_DELAY();
2443 WRT_REG_WORD(&reg->nvram, data | NV_SELECT | NV_CLOCK);
2444 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2445 NVRAM_DELAY();
2446 WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
2447 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2448 NVRAM_DELAY();
2452 * Mailbox Command
2453 * Issue mailbox command and waits for completion.
2455 * Input:
2456 * ha = adapter block pointer.
2457 * mr = mailbox registers to load.
2458 * mb = data pointer for mailbox registers.
2460 * Output:
2461 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
2463 * Returns:
2464 * 0 = success
2466 static int
2467 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2469 struct device_reg __iomem *reg = ha->iobase;
2470 int status = 0;
2471 int cnt;
2472 uint16_t *optr, *iptr;
2473 uint16_t __iomem *mptr;
2474 uint16_t data;
2475 DECLARE_COMPLETION_ONSTACK(wait);
2476 struct timer_list timer;
2478 ENTER("qla1280_mailbox_command");
2480 if (ha->mailbox_wait) {
2481 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2483 ha->mailbox_wait = &wait;
2486 * We really should start out by verifying that the mailbox is
2487 * available before starting sending the command data
2489 /* Load mailbox registers. */
2490 mptr = (uint16_t __iomem *) &reg->mailbox0;
2491 iptr = mb;
2492 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2493 if (mr & BIT_0) {
2494 WRT_REG_WORD(mptr, (*iptr));
2497 mr >>= 1;
2498 mptr++;
2499 iptr++;
2502 /* Issue set host interrupt command. */
2504 /* set up a timer just in case we're really jammed */
2505 init_timer(&timer);
2506 timer.expires = jiffies + 20*HZ;
2507 timer.data = (unsigned long)ha;
2508 timer.function = qla1280_mailbox_timeout;
2509 add_timer(&timer);
2511 spin_unlock_irq(ha->host->host_lock);
2512 WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
2513 data = qla1280_debounce_register(&reg->istatus);
2515 wait_for_completion(&wait);
2516 del_timer_sync(&timer);
2518 spin_lock_irq(ha->host->host_lock);
2520 ha->mailbox_wait = NULL;
2522 /* Check for mailbox command timeout. */
2523 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2524 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2525 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2526 "0x%04x\n",
2527 mb[0], ha->mailbox_out[0], RD_REG_WORD(&reg->istatus));
2528 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2529 RD_REG_WORD(&reg->mailbox0), RD_REG_WORD(&reg->mailbox1),
2530 RD_REG_WORD(&reg->mailbox2), RD_REG_WORD(&reg->mailbox3));
2531 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2532 RD_REG_WORD(&reg->mailbox4), RD_REG_WORD(&reg->mailbox5),
2533 RD_REG_WORD(&reg->mailbox6), RD_REG_WORD(&reg->mailbox7));
2534 status = 1;
2537 /* Load return mailbox registers. */
2538 optr = mb;
2539 iptr = (uint16_t *) &ha->mailbox_out[0];
2540 mr = MAILBOX_REGISTER_COUNT;
2541 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2543 if (ha->flags.reset_marker)
2544 qla1280_rst_aen(ha);
2546 if (status)
2547 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2548 "0x%x ****\n", mb[0]);
2550 LEAVE("qla1280_mailbox_command");
2551 return status;
2555 * qla1280_poll
2556 * Polls ISP for interrupts.
2558 * Input:
2559 * ha = adapter block pointer.
2561 static void
2562 qla1280_poll(struct scsi_qla_host *ha)
2564 struct device_reg __iomem *reg = ha->iobase;
2565 uint16_t data;
2566 LIST_HEAD(done_q);
2568 /* ENTER("qla1280_poll"); */
2570 /* Check for pending interrupts. */
2571 data = RD_REG_WORD(&reg->istatus);
2572 if (data & RISC_INT)
2573 qla1280_isr(ha, &done_q);
2575 if (!ha->mailbox_wait) {
2576 if (ha->flags.reset_marker)
2577 qla1280_rst_aen(ha);
2580 if (!list_empty(&done_q))
2581 qla1280_done(ha);
2583 /* LEAVE("qla1280_poll"); */
2587 * qla1280_bus_reset
2588 * Issue SCSI bus reset.
2590 * Input:
2591 * ha = adapter block pointer.
2592 * bus = SCSI bus number.
2594 * Returns:
2595 * 0 = success
2597 static int
2598 qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2600 uint16_t mb[MAILBOX_REGISTER_COUNT];
2601 uint16_t reset_delay;
2602 int status;
2604 dprintk(3, "qla1280_bus_reset: entered\n");
2606 if (qla1280_verbose)
2607 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2608 ha->host_no, bus);
2610 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2611 mb[0] = MBC_BUS_RESET;
2612 mb[1] = reset_delay;
2613 mb[2] = (uint16_t) bus;
2614 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2616 if (status) {
2617 if (ha->bus_settings[bus].failed_reset_count > 2)
2618 ha->bus_settings[bus].scsi_bus_dead = 1;
2619 ha->bus_settings[bus].failed_reset_count++;
2620 } else {
2621 spin_unlock_irq(ha->host->host_lock);
2622 ssleep(reset_delay);
2623 spin_lock_irq(ha->host->host_lock);
2625 ha->bus_settings[bus].scsi_bus_dead = 0;
2626 ha->bus_settings[bus].failed_reset_count = 0;
2627 ha->bus_settings[bus].reset_marker = 0;
2628 /* Issue marker command. */
2629 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2633 * We should probably call qla1280_set_target_parameters()
2634 * here as well for all devices on the bus.
2637 if (status)
2638 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2639 else
2640 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2642 return status;
2646 * qla1280_device_reset
2647 * Issue bus device reset message to the target.
2649 * Input:
2650 * ha = adapter block pointer.
2651 * bus = SCSI BUS number.
2652 * target = SCSI ID.
2654 * Returns:
2655 * 0 = success
2657 static int
2658 qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2660 uint16_t mb[MAILBOX_REGISTER_COUNT];
2661 int status;
2663 ENTER("qla1280_device_reset");
2665 mb[0] = MBC_ABORT_TARGET;
2666 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2667 mb[2] = 1;
2668 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2670 /* Issue marker command. */
2671 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2673 if (status)
2674 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2676 LEAVE("qla1280_device_reset");
2677 return status;
2681 * qla1280_abort_command
2682 * Abort command aborts a specified IOCB.
2684 * Input:
2685 * ha = adapter block pointer.
2686 * sp = SB structure pointer.
2688 * Returns:
2689 * 0 = success
2691 static int
2692 qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2694 uint16_t mb[MAILBOX_REGISTER_COUNT];
2695 unsigned int bus, target, lun;
2696 int status;
2698 ENTER("qla1280_abort_command");
2700 bus = SCSI_BUS_32(sp->cmd);
2701 target = SCSI_TCN_32(sp->cmd);
2702 lun = SCSI_LUN_32(sp->cmd);
2704 sp->flags |= SRB_ABORT_PENDING;
2706 mb[0] = MBC_ABORT_COMMAND;
2707 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2708 mb[2] = handle >> 16;
2709 mb[3] = handle & 0xffff;
2710 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2712 if (status) {
2713 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2714 sp->flags &= ~SRB_ABORT_PENDING;
2718 LEAVE("qla1280_abort_command");
2719 return status;
2723 * qla1280_reset_adapter
2724 * Reset adapter.
2726 * Input:
2727 * ha = adapter block pointer.
2729 static void
2730 qla1280_reset_adapter(struct scsi_qla_host *ha)
2732 struct device_reg __iomem *reg = ha->iobase;
2734 ENTER("qla1280_reset_adapter");
2736 /* Disable ISP chip */
2737 ha->flags.online = 0;
2738 WRT_REG_WORD(&reg->ictrl, ISP_RESET);
2739 WRT_REG_WORD(&reg->host_cmd,
2740 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2741 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2743 LEAVE("qla1280_reset_adapter");
2747 * Issue marker command.
2748 * Function issues marker IOCB.
2750 * Input:
2751 * ha = adapter block pointer.
2752 * bus = SCSI BUS number
2753 * id = SCSI ID
2754 * lun = SCSI LUN
2755 * type = marker modifier
2757 static void
2758 qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2760 struct mrk_entry *pkt;
2762 ENTER("qla1280_marker");
2764 /* Get request packet. */
2765 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2766 pkt->entry_type = MARKER_TYPE;
2767 pkt->lun = (uint8_t) lun;
2768 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2769 pkt->modifier = type;
2770 pkt->entry_status = 0;
2772 /* Issue command to ISP */
2773 qla1280_isp_cmd(ha);
2776 LEAVE("qla1280_marker");
2781 * qla1280_64bit_start_scsi
2782 * The start SCSI is responsible for building request packets on
2783 * request ring and modifying ISP input pointer.
2785 * Input:
2786 * ha = adapter block pointer.
2787 * sp = SB structure pointer.
2789 * Returns:
2790 * 0 = success, was able to issue command.
2792 #ifdef QLA_64BIT_PTR
2793 static int
2794 qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2796 struct device_reg __iomem *reg = ha->iobase;
2797 struct scsi_cmnd *cmd = sp->cmd;
2798 cmd_a64_entry_t *pkt;
2799 __le32 *dword_ptr;
2800 dma_addr_t dma_handle;
2801 int status = 0;
2802 int cnt;
2803 int req_cnt;
2804 int seg_cnt;
2805 u8 dir;
2807 ENTER("qla1280_64bit_start_scsi:");
2809 /* Calculate number of entries and segments required. */
2810 req_cnt = 1;
2811 seg_cnt = scsi_dma_map(cmd);
2812 if (seg_cnt > 0) {
2813 if (seg_cnt > 2) {
2814 req_cnt += (seg_cnt - 2) / 5;
2815 if ((seg_cnt - 2) % 5)
2816 req_cnt++;
2818 } else if (seg_cnt < 0) {
2819 status = 1;
2820 goto out;
2823 if ((req_cnt + 2) >= ha->req_q_cnt) {
2824 /* Calculate number of free request entries. */
2825 cnt = RD_REG_WORD(&reg->mailbox4);
2826 if (ha->req_ring_index < cnt)
2827 ha->req_q_cnt = cnt - ha->req_ring_index;
2828 else
2829 ha->req_q_cnt =
2830 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2833 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2834 ha->req_q_cnt, seg_cnt);
2836 /* If room for request in request ring. */
2837 if ((req_cnt + 2) >= ha->req_q_cnt) {
2838 status = SCSI_MLQUEUE_HOST_BUSY;
2839 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2840 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2841 req_cnt);
2842 goto out;
2845 /* Check for room in outstanding command list. */
2846 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2847 ha->outstanding_cmds[cnt] != NULL; cnt++);
2849 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2850 status = SCSI_MLQUEUE_HOST_BUSY;
2851 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2852 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2853 goto out;
2856 ha->outstanding_cmds[cnt] = sp;
2857 ha->req_q_cnt -= req_cnt;
2858 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2860 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2861 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2862 dprintk(2, " bus %i, target %i, lun %i\n",
2863 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2864 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2867 * Build command packet.
2869 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2871 pkt->entry_type = COMMAND_A64_TYPE;
2872 pkt->entry_count = (uint8_t) req_cnt;
2873 pkt->sys_define = (uint8_t) ha->req_ring_index;
2874 pkt->entry_status = 0;
2875 pkt->handle = cpu_to_le32(cnt);
2877 /* Zero out remaining portion of packet. */
2878 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2880 /* Set ISP command timeout. */
2881 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2883 /* Set device target ID and LUN */
2884 pkt->lun = SCSI_LUN_32(cmd);
2885 pkt->target = SCSI_BUS_32(cmd) ?
2886 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2888 /* Enable simple tag queuing if device supports it. */
2889 if (cmd->device->simple_tags)
2890 pkt->control_flags |= cpu_to_le16(BIT_3);
2892 /* Load SCSI command packet. */
2893 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2894 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2895 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
2897 /* Set transfer direction. */
2898 dir = qla1280_data_direction(cmd);
2899 pkt->control_flags |= cpu_to_le16(dir);
2901 /* Set total data segment count. */
2902 pkt->dseg_count = cpu_to_le16(seg_cnt);
2905 * Load data segments.
2907 if (seg_cnt) { /* If data transfer. */
2908 struct scatterlist *sg, *s;
2909 int remseg = seg_cnt;
2911 sg = scsi_sglist(cmd);
2913 /* Setup packet address segment pointer. */
2914 dword_ptr = (u32 *)&pkt->dseg_0_address;
2916 /* Load command entry data segments. */
2917 for_each_sg(sg, s, seg_cnt, cnt) {
2918 if (cnt == 2)
2919 break;
2921 dma_handle = sg_dma_address(s);
2922 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2923 if (ha->flags.use_pci_vchannel)
2924 sn_pci_set_vchan(ha->pdev,
2925 (unsigned long *)&dma_handle,
2926 SCSI_BUS_32(cmd));
2927 #endif
2928 *dword_ptr++ =
2929 cpu_to_le32(pci_dma_lo32(dma_handle));
2930 *dword_ptr++ =
2931 cpu_to_le32(pci_dma_hi32(dma_handle));
2932 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2933 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2934 cpu_to_le32(pci_dma_hi32(dma_handle)),
2935 cpu_to_le32(pci_dma_lo32(dma_handle)),
2936 cpu_to_le32(sg_dma_len(sg_next(s))));
2937 remseg--;
2939 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2940 "command packet data - b %i, t %i, l %i \n",
2941 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2942 SCSI_LUN_32(cmd));
2943 qla1280_dump_buffer(5, (char *)pkt,
2944 REQUEST_ENTRY_SIZE);
2947 * Build continuation packets.
2949 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2950 "remains\n", seg_cnt);
2952 while (remseg > 0) {
2953 /* Update sg start */
2954 sg = s;
2955 /* Adjust ring index. */
2956 ha->req_ring_index++;
2957 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2958 ha->req_ring_index = 0;
2959 ha->request_ring_ptr =
2960 ha->request_ring;
2961 } else
2962 ha->request_ring_ptr++;
2964 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2966 /* Zero out packet. */
2967 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2969 /* Load packet defaults. */
2970 ((struct cont_a64_entry *) pkt)->entry_type =
2971 CONTINUE_A64_TYPE;
2972 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2973 ((struct cont_a64_entry *) pkt)->sys_define =
2974 (uint8_t)ha->req_ring_index;
2975 /* Setup packet address segment pointer. */
2976 dword_ptr =
2977 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2979 /* Load continuation entry data segments. */
2980 for_each_sg(sg, s, remseg, cnt) {
2981 if (cnt == 5)
2982 break;
2983 dma_handle = sg_dma_address(s);
2984 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2985 if (ha->flags.use_pci_vchannel)
2986 sn_pci_set_vchan(ha->pdev,
2987 (unsigned long *)&dma_handle,
2988 SCSI_BUS_32(cmd));
2989 #endif
2990 *dword_ptr++ =
2991 cpu_to_le32(pci_dma_lo32(dma_handle));
2992 *dword_ptr++ =
2993 cpu_to_le32(pci_dma_hi32(dma_handle));
2994 *dword_ptr++ =
2995 cpu_to_le32(sg_dma_len(s));
2996 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2997 cpu_to_le32(pci_dma_hi32(dma_handle)),
2998 cpu_to_le32(pci_dma_lo32(dma_handle)),
2999 cpu_to_le32(sg_dma_len(s)));
3001 remseg -= cnt;
3002 dprintk(5, "qla1280_64bit_start_scsi: "
3003 "continuation packet data - b %i, t "
3004 "%i, l %i \n", SCSI_BUS_32(cmd),
3005 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3006 qla1280_dump_buffer(5, (char *)pkt,
3007 REQUEST_ENTRY_SIZE);
3009 } else { /* No data transfer */
3010 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
3011 "packet data - b %i, t %i, l %i \n",
3012 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3013 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3015 /* Adjust ring index. */
3016 ha->req_ring_index++;
3017 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3018 ha->req_ring_index = 0;
3019 ha->request_ring_ptr = ha->request_ring;
3020 } else
3021 ha->request_ring_ptr++;
3023 /* Set chip new ring index. */
3024 dprintk(2,
3025 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
3026 sp->flags |= SRB_SENT;
3027 ha->actthreads++;
3028 WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
3029 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3030 mmiowb();
3032 out:
3033 if (status)
3034 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
3035 else
3036 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
3038 return status;
3040 #else /* !QLA_64BIT_PTR */
3043 * qla1280_32bit_start_scsi
3044 * The start SCSI is responsible for building request packets on
3045 * request ring and modifying ISP input pointer.
3047 * The Qlogic firmware interface allows every queue slot to have a SCSI
3048 * command and up to 4 scatter/gather (SG) entries. If we need more
3049 * than 4 SG entries, then continuation entries are used that can
3050 * hold another 7 entries each. The start routine determines if there
3051 * is eought empty slots then build the combination of requests to
3052 * fulfill the OS request.
3054 * Input:
3055 * ha = adapter block pointer.
3056 * sp = SCSI Request Block structure pointer.
3058 * Returns:
3059 * 0 = success, was able to issue command.
3061 static int
3062 qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3064 struct device_reg __iomem *reg = ha->iobase;
3065 struct scsi_cmnd *cmd = sp->cmd;
3066 struct cmd_entry *pkt;
3067 __le32 *dword_ptr;
3068 int status = 0;
3069 int cnt;
3070 int req_cnt;
3071 int seg_cnt;
3072 u8 dir;
3074 ENTER("qla1280_32bit_start_scsi");
3076 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3077 cmd->cmnd[0]);
3079 /* Calculate number of entries and segments required. */
3080 req_cnt = 1;
3081 seg_cnt = scsi_dma_map(cmd);
3082 if (seg_cnt) {
3084 * if greater than four sg entries then we need to allocate
3085 * continuation entries
3087 if (seg_cnt > 4) {
3088 req_cnt += (seg_cnt - 4) / 7;
3089 if ((seg_cnt - 4) % 7)
3090 req_cnt++;
3092 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3093 cmd, seg_cnt, req_cnt);
3094 } else if (seg_cnt < 0) {
3095 status = 1;
3096 goto out;
3099 if ((req_cnt + 2) >= ha->req_q_cnt) {
3100 /* Calculate number of free request entries. */
3101 cnt = RD_REG_WORD(&reg->mailbox4);
3102 if (ha->req_ring_index < cnt)
3103 ha->req_q_cnt = cnt - ha->req_ring_index;
3104 else
3105 ha->req_q_cnt =
3106 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3109 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3110 ha->req_q_cnt, seg_cnt);
3111 /* If room for request in request ring. */
3112 if ((req_cnt + 2) >= ha->req_q_cnt) {
3113 status = SCSI_MLQUEUE_HOST_BUSY;
3114 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3115 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3116 ha->req_q_cnt, req_cnt);
3117 goto out;
3120 /* Check for empty slot in outstanding command list. */
3121 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3122 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3124 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3125 status = SCSI_MLQUEUE_HOST_BUSY;
3126 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3127 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3128 goto out;
3131 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3132 ha->outstanding_cmds[cnt] = sp;
3133 ha->req_q_cnt -= req_cnt;
3136 * Build command packet.
3138 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3140 pkt->entry_type = COMMAND_TYPE;
3141 pkt->entry_count = (uint8_t) req_cnt;
3142 pkt->sys_define = (uint8_t) ha->req_ring_index;
3143 pkt->entry_status = 0;
3144 pkt->handle = cpu_to_le32(cnt);
3146 /* Zero out remaining portion of packet. */
3147 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3149 /* Set ISP command timeout. */
3150 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3152 /* Set device target ID and LUN */
3153 pkt->lun = SCSI_LUN_32(cmd);
3154 pkt->target = SCSI_BUS_32(cmd) ?
3155 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3157 /* Enable simple tag queuing if device supports it. */
3158 if (cmd->device->simple_tags)
3159 pkt->control_flags |= cpu_to_le16(BIT_3);
3161 /* Load SCSI command packet. */
3162 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3163 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3165 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
3166 /* Set transfer direction. */
3167 dir = qla1280_data_direction(cmd);
3168 pkt->control_flags |= cpu_to_le16(dir);
3170 /* Set total data segment count. */
3171 pkt->dseg_count = cpu_to_le16(seg_cnt);
3174 * Load data segments.
3176 if (seg_cnt) {
3177 struct scatterlist *sg, *s;
3178 int remseg = seg_cnt;
3180 sg = scsi_sglist(cmd);
3182 /* Setup packet address segment pointer. */
3183 dword_ptr = &pkt->dseg_0_address;
3185 dprintk(3, "Building S/G data segments..\n");
3186 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3188 /* Load command entry data segments. */
3189 for_each_sg(sg, s, seg_cnt, cnt) {
3190 if (cnt == 4)
3191 break;
3192 *dword_ptr++ =
3193 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3194 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3195 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3196 (pci_dma_lo32(sg_dma_address(s))),
3197 (sg_dma_len(s)));
3198 remseg--;
3201 * Build continuation packets.
3203 dprintk(3, "S/G Building Continuation"
3204 "...seg_cnt=0x%x remains\n", seg_cnt);
3205 while (remseg > 0) {
3206 /* Continue from end point */
3207 sg = s;
3208 /* Adjust ring index. */
3209 ha->req_ring_index++;
3210 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3211 ha->req_ring_index = 0;
3212 ha->request_ring_ptr =
3213 ha->request_ring;
3214 } else
3215 ha->request_ring_ptr++;
3217 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3219 /* Zero out packet. */
3220 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3222 /* Load packet defaults. */
3223 ((struct cont_entry *) pkt)->
3224 entry_type = CONTINUE_TYPE;
3225 ((struct cont_entry *) pkt)->entry_count = 1;
3227 ((struct cont_entry *) pkt)->sys_define =
3228 (uint8_t) ha->req_ring_index;
3230 /* Setup packet address segment pointer. */
3231 dword_ptr =
3232 &((struct cont_entry *) pkt)->dseg_0_address;
3234 /* Load continuation entry data segments. */
3235 for_each_sg(sg, s, remseg, cnt) {
3236 if (cnt == 7)
3237 break;
3238 *dword_ptr++ =
3239 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3240 *dword_ptr++ =
3241 cpu_to_le32(sg_dma_len(s));
3242 dprintk(1,
3243 "S/G Segment Cont. phys_addr=0x%x, "
3244 "len=0x%x\n",
3245 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3246 cpu_to_le32(sg_dma_len(s)));
3248 remseg -= cnt;
3249 dprintk(5, "qla1280_32bit_start_scsi: "
3250 "continuation packet data - "
3251 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3252 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3253 qla1280_dump_buffer(5, (char *)pkt,
3254 REQUEST_ENTRY_SIZE);
3256 } else { /* No data transfer at all */
3257 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3258 "packet data - \n");
3259 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3261 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3262 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3263 REQUEST_ENTRY_SIZE);
3265 /* Adjust ring index. */
3266 ha->req_ring_index++;
3267 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3268 ha->req_ring_index = 0;
3269 ha->request_ring_ptr = ha->request_ring;
3270 } else
3271 ha->request_ring_ptr++;
3273 /* Set chip new ring index. */
3274 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3275 "for pending command\n");
3276 sp->flags |= SRB_SENT;
3277 ha->actthreads++;
3278 WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
3279 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3280 mmiowb();
3282 out:
3283 if (status)
3284 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3286 LEAVE("qla1280_32bit_start_scsi");
3288 return status;
3290 #endif
3293 * qla1280_req_pkt
3294 * Function is responsible for locking ring and
3295 * getting a zeroed out request packet.
3297 * Input:
3298 * ha = adapter block pointer.
3300 * Returns:
3301 * 0 = failed to get slot.
3303 static request_t *
3304 qla1280_req_pkt(struct scsi_qla_host *ha)
3306 struct device_reg __iomem *reg = ha->iobase;
3307 request_t *pkt = NULL;
3308 int cnt;
3309 uint32_t timer;
3311 ENTER("qla1280_req_pkt");
3314 * This can be called from interrupt context, damn it!!!
3316 /* Wait for 30 seconds for slot. */
3317 for (timer = 15000000; timer; timer--) {
3318 if (ha->req_q_cnt > 0) {
3319 /* Calculate number of free request entries. */
3320 cnt = RD_REG_WORD(&reg->mailbox4);
3321 if (ha->req_ring_index < cnt)
3322 ha->req_q_cnt = cnt - ha->req_ring_index;
3323 else
3324 ha->req_q_cnt =
3325 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3328 /* Found empty request ring slot? */
3329 if (ha->req_q_cnt > 0) {
3330 ha->req_q_cnt--;
3331 pkt = ha->request_ring_ptr;
3333 /* Zero out packet. */
3334 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3337 * How can this be right when we have a ring
3338 * size of 512???
3340 /* Set system defined field. */
3341 pkt->sys_define = (uint8_t) ha->req_ring_index;
3343 /* Set entry count. */
3344 pkt->entry_count = 1;
3346 break;
3349 udelay(2); /* 10 */
3351 /* Check for pending interrupts. */
3352 qla1280_poll(ha);
3355 if (!pkt)
3356 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3357 else
3358 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3360 return pkt;
3364 * qla1280_isp_cmd
3365 * Function is responsible for modifying ISP input pointer.
3366 * Releases ring lock.
3368 * Input:
3369 * ha = adapter block pointer.
3371 static void
3372 qla1280_isp_cmd(struct scsi_qla_host *ha)
3374 struct device_reg __iomem *reg = ha->iobase;
3376 ENTER("qla1280_isp_cmd");
3378 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3379 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3380 REQUEST_ENTRY_SIZE);
3382 /* Adjust ring index. */
3383 ha->req_ring_index++;
3384 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3385 ha->req_ring_index = 0;
3386 ha->request_ring_ptr = ha->request_ring;
3387 } else
3388 ha->request_ring_ptr++;
3391 * Update request index to mailbox4 (Request Queue In).
3392 * The mmiowb() ensures that this write is ordered with writes by other
3393 * CPUs. Without the mmiowb(), it is possible for the following:
3394 * CPUA posts write of index 5 to mailbox4
3395 * CPUA releases host lock
3396 * CPUB acquires host lock
3397 * CPUB posts write of index 6 to mailbox4
3398 * On PCI bus, order reverses and write of 6 posts, then index 5,
3399 * causing chip to issue full queue of stale commands
3400 * The mmiowb() prevents future writes from crossing the barrier.
3401 * See Documentation/DocBook/deviceiobook.tmpl for more information.
3403 WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
3404 mmiowb();
3406 LEAVE("qla1280_isp_cmd");
3409 /****************************************************************************/
3410 /* Interrupt Service Routine. */
3411 /****************************************************************************/
3413 /****************************************************************************
3414 * qla1280_isr
3415 * Calls I/O done on command completion.
3417 * Input:
3418 * ha = adapter block pointer.
3419 * done_q = done queue.
3420 ****************************************************************************/
3421 static void
3422 qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3424 struct device_reg __iomem *reg = ha->iobase;
3425 struct response *pkt;
3426 struct srb *sp = NULL;
3427 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3428 uint16_t *wptr;
3429 uint32_t index;
3430 u16 istatus;
3432 ENTER("qla1280_isr");
3434 istatus = RD_REG_WORD(&reg->istatus);
3435 if (!(istatus & (RISC_INT | PCI_INT)))
3436 return;
3438 /* Save mailbox register 5 */
3439 mailbox[5] = RD_REG_WORD(&reg->mailbox5);
3441 /* Check for mailbox interrupt. */
3443 mailbox[0] = RD_REG_WORD_dmasync(&reg->semaphore);
3445 if (mailbox[0] & BIT_0) {
3446 /* Get mailbox data. */
3447 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
3449 wptr = &mailbox[0];
3450 *wptr++ = RD_REG_WORD(&reg->mailbox0);
3451 *wptr++ = RD_REG_WORD(&reg->mailbox1);
3452 *wptr = RD_REG_WORD(&reg->mailbox2);
3453 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3454 wptr++;
3455 *wptr++ = RD_REG_WORD(&reg->mailbox3);
3456 *wptr++ = RD_REG_WORD(&reg->mailbox4);
3457 wptr++;
3458 *wptr++ = RD_REG_WORD(&reg->mailbox6);
3459 *wptr = RD_REG_WORD(&reg->mailbox7);
3462 /* Release mailbox registers. */
3464 WRT_REG_WORD(&reg->semaphore, 0);
3465 WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
3467 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3468 mailbox[0]);
3470 /* Handle asynchronous event */
3471 switch (mailbox[0]) {
3472 case MBA_SCSI_COMPLETION: /* Response completion */
3473 dprintk(5, "qla1280_isr: mailbox SCSI response "
3474 "completion\n");
3476 if (ha->flags.online) {
3477 /* Get outstanding command index. */
3478 index = mailbox[2] << 16 | mailbox[1];
3480 /* Validate handle. */
3481 if (index < MAX_OUTSTANDING_COMMANDS)
3482 sp = ha->outstanding_cmds[index];
3483 else
3484 sp = NULL;
3486 if (sp) {
3487 /* Free outstanding command slot. */
3488 ha->outstanding_cmds[index] = NULL;
3490 /* Save ISP completion status */
3491 CMD_RESULT(sp->cmd) = 0;
3492 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3494 /* Place block on done queue */
3495 list_add_tail(&sp->list, done_q);
3496 } else {
3498 * If we get here we have a real problem!
3500 printk(KERN_WARNING
3501 "qla1280: ISP invalid handle\n");
3504 break;
3506 case MBA_BUS_RESET: /* SCSI Bus Reset */
3507 ha->flags.reset_marker = 1;
3508 index = mailbox[6] & BIT_0;
3509 ha->bus_settings[index].reset_marker = 1;
3511 printk(KERN_DEBUG "qla1280_isr(): index %i "
3512 "asynchronous BUS_RESET\n", index);
3513 break;
3515 case MBA_SYSTEM_ERR: /* System Error */
3516 printk(KERN_WARNING
3517 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3518 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3519 mailbox[3]);
3520 break;
3522 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3523 printk(KERN_WARNING
3524 "qla1280: ISP Request Transfer Error\n");
3525 break;
3527 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3528 printk(KERN_WARNING
3529 "qla1280: ISP Response Transfer Error\n");
3530 break;
3532 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
3533 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3534 break;
3536 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
3537 dprintk(2,
3538 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3539 break;
3541 case MBA_DEVICE_RESET: /* Bus Device Reset */
3542 printk(KERN_INFO "qla1280_isr(): asynchronous "
3543 "BUS_DEVICE_RESET\n");
3545 ha->flags.reset_marker = 1;
3546 index = mailbox[6] & BIT_0;
3547 ha->bus_settings[index].reset_marker = 1;
3548 break;
3550 case MBA_BUS_MODE_CHANGE:
3551 dprintk(2,
3552 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3553 break;
3555 default:
3556 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
3557 if (mailbox[0] < MBA_ASYNC_EVENT) {
3558 wptr = &mailbox[0];
3559 memcpy((uint16_t *) ha->mailbox_out, wptr,
3560 MAILBOX_REGISTER_COUNT *
3561 sizeof(uint16_t));
3563 if(ha->mailbox_wait != NULL)
3564 complete(ha->mailbox_wait);
3566 break;
3568 } else {
3569 WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
3573 * We will receive interrupts during mailbox testing prior to
3574 * the card being marked online, hence the double check.
3576 if (!(ha->flags.online && !ha->mailbox_wait)) {
3577 dprintk(2, "qla1280_isr: Response pointer Error\n");
3578 goto out;
3581 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3582 goto out;
3584 while (ha->rsp_ring_index != mailbox[5]) {
3585 pkt = ha->response_ring_ptr;
3587 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3588 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3589 dprintk(5,"qla1280_isr: response packet data\n");
3590 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3592 if (pkt->entry_type == STATUS_TYPE) {
3593 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3594 || pkt->comp_status || pkt->entry_status) {
3595 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3596 "0x%x mailbox[5] = 0x%x, comp_status "
3597 "= 0x%x, scsi_status = 0x%x\n",
3598 ha->rsp_ring_index, mailbox[5],
3599 le16_to_cpu(pkt->comp_status),
3600 le16_to_cpu(pkt->scsi_status));
3602 } else {
3603 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3604 "0x%x, mailbox[5] = 0x%x\n",
3605 ha->rsp_ring_index, mailbox[5]);
3606 dprintk(2, "qla1280_isr: response packet data\n");
3607 qla1280_dump_buffer(2, (char *)pkt,
3608 RESPONSE_ENTRY_SIZE);
3611 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3612 dprintk(2, "status: Cmd %p, handle %i\n",
3613 ha->outstanding_cmds[pkt->handle]->cmd,
3614 pkt->handle);
3615 if (pkt->entry_type == STATUS_TYPE)
3616 qla1280_status_entry(ha, pkt, done_q);
3617 else
3618 qla1280_error_entry(ha, pkt, done_q);
3619 /* Adjust ring index. */
3620 ha->rsp_ring_index++;
3621 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3622 ha->rsp_ring_index = 0;
3623 ha->response_ring_ptr = ha->response_ring;
3624 } else
3625 ha->response_ring_ptr++;
3626 WRT_REG_WORD(&reg->mailbox5, ha->rsp_ring_index);
3630 out:
3631 LEAVE("qla1280_isr");
3635 * qla1280_rst_aen
3636 * Processes asynchronous reset.
3638 * Input:
3639 * ha = adapter block pointer.
3641 static void
3642 qla1280_rst_aen(struct scsi_qla_host *ha)
3644 uint8_t bus;
3646 ENTER("qla1280_rst_aen");
3648 if (ha->flags.online && !ha->flags.reset_active &&
3649 !ha->flags.abort_isp_active) {
3650 ha->flags.reset_active = 1;
3651 while (ha->flags.reset_marker) {
3652 /* Issue marker command. */
3653 ha->flags.reset_marker = 0;
3654 for (bus = 0; bus < ha->ports &&
3655 !ha->flags.reset_marker; bus++) {
3656 if (ha->bus_settings[bus].reset_marker) {
3657 ha->bus_settings[bus].reset_marker = 0;
3658 qla1280_marker(ha, bus, 0, 0,
3659 MK_SYNC_ALL);
3665 LEAVE("qla1280_rst_aen");
3670 * qla1280_status_entry
3671 * Processes received ISP status entry.
3673 * Input:
3674 * ha = adapter block pointer.
3675 * pkt = entry pointer.
3676 * done_q = done queue.
3678 static void
3679 qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3680 struct list_head *done_q)
3682 unsigned int bus, target, lun;
3683 int sense_sz;
3684 struct srb *sp;
3685 struct scsi_cmnd *cmd;
3686 uint32_t handle = le32_to_cpu(pkt->handle);
3687 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3688 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3690 ENTER("qla1280_status_entry");
3692 /* Validate handle. */
3693 if (handle < MAX_OUTSTANDING_COMMANDS)
3694 sp = ha->outstanding_cmds[handle];
3695 else
3696 sp = NULL;
3698 if (!sp) {
3699 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3700 goto out;
3703 /* Free outstanding command slot. */
3704 ha->outstanding_cmds[handle] = NULL;
3706 cmd = sp->cmd;
3708 /* Generate LU queue on cntrl, target, LUN */
3709 bus = SCSI_BUS_32(cmd);
3710 target = SCSI_TCN_32(cmd);
3711 lun = SCSI_LUN_32(cmd);
3713 if (comp_status || scsi_status) {
3714 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3715 "0x%x, handle = 0x%x\n", comp_status,
3716 scsi_status, handle);
3719 /* Target busy or queue full */
3720 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3721 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3722 CMD_RESULT(cmd) = scsi_status & 0xff;
3723 } else {
3725 /* Save ISP completion status */
3726 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3728 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3729 if (comp_status != CS_ARS_FAILED) {
3730 uint16_t req_sense_length =
3731 le16_to_cpu(pkt->req_sense_length);
3732 if (req_sense_length < CMD_SNSLEN(cmd))
3733 sense_sz = req_sense_length;
3734 else
3736 * scsi_cmnd->sense_buffer is
3737 * 64 bytes, why only copy 63?
3738 * This looks wrong! /Jes
3740 sense_sz = CMD_SNSLEN(cmd) - 1;
3742 memcpy(cmd->sense_buffer,
3743 &pkt->req_sense_data, sense_sz);
3744 } else
3745 sense_sz = 0;
3746 memset(cmd->sense_buffer + sense_sz, 0,
3747 SCSI_SENSE_BUFFERSIZE - sense_sz);
3749 dprintk(2, "qla1280_status_entry: Check "
3750 "condition Sense data, b %i, t %i, "
3751 "l %i\n", bus, target, lun);
3752 if (sense_sz)
3753 qla1280_dump_buffer(2,
3754 (char *)cmd->sense_buffer,
3755 sense_sz);
3759 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3761 /* Place command on done queue. */
3762 list_add_tail(&sp->list, done_q);
3763 out:
3764 LEAVE("qla1280_status_entry");
3768 * qla1280_error_entry
3769 * Processes error entry.
3771 * Input:
3772 * ha = adapter block pointer.
3773 * pkt = entry pointer.
3774 * done_q = done queue.
3776 static void
3777 qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3778 struct list_head *done_q)
3780 struct srb *sp;
3781 uint32_t handle = le32_to_cpu(pkt->handle);
3783 ENTER("qla1280_error_entry");
3785 if (pkt->entry_status & BIT_3)
3786 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3787 else if (pkt->entry_status & BIT_2)
3788 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3789 else if (pkt->entry_status & BIT_1)
3790 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3791 else
3792 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3794 /* Validate handle. */
3795 if (handle < MAX_OUTSTANDING_COMMANDS)
3796 sp = ha->outstanding_cmds[handle];
3797 else
3798 sp = NULL;
3800 if (sp) {
3801 /* Free outstanding command slot. */
3802 ha->outstanding_cmds[handle] = NULL;
3804 /* Bad payload or header */
3805 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3806 /* Bad payload or header, set error status. */
3807 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
3808 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3809 } else if (pkt->entry_status & BIT_1) { /* FULL flag */
3810 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3811 } else {
3812 /* Set error status. */
3813 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3816 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3818 /* Place command on done queue. */
3819 list_add_tail(&sp->list, done_q);
3821 #ifdef QLA_64BIT_PTR
3822 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3823 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3825 #endif
3827 LEAVE("qla1280_error_entry");
3831 * qla1280_abort_isp
3832 * Resets ISP and aborts all outstanding commands.
3834 * Input:
3835 * ha = adapter block pointer.
3837 * Returns:
3838 * 0 = success
3840 static int
3841 qla1280_abort_isp(struct scsi_qla_host *ha)
3843 struct device_reg __iomem *reg = ha->iobase;
3844 struct srb *sp;
3845 int status = 0;
3846 int cnt;
3847 int bus;
3849 ENTER("qla1280_abort_isp");
3851 if (ha->flags.abort_isp_active || !ha->flags.online)
3852 goto out;
3854 ha->flags.abort_isp_active = 1;
3856 /* Disable ISP interrupts. */
3857 qla1280_disable_intrs(ha);
3858 WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
3859 RD_REG_WORD(&reg->id_l);
3861 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3862 ha->host_no);
3863 /* Dequeue all commands in outstanding command list. */
3864 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3865 struct scsi_cmnd *cmd;
3866 sp = ha->outstanding_cmds[cnt];
3867 if (sp) {
3868 cmd = sp->cmd;
3869 CMD_RESULT(cmd) = DID_RESET << 16;
3870 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3871 ha->outstanding_cmds[cnt] = NULL;
3872 list_add_tail(&sp->list, &ha->done_q);
3876 qla1280_done(ha);
3878 status = qla1280_load_firmware(ha);
3879 if (status)
3880 goto out;
3882 /* Setup adapter based on NVRAM parameters. */
3883 qla1280_nvram_config (ha);
3885 status = qla1280_init_rings(ha);
3886 if (status)
3887 goto out;
3889 /* Issue SCSI reset. */
3890 for (bus = 0; bus < ha->ports; bus++)
3891 qla1280_bus_reset(ha, bus);
3893 ha->flags.abort_isp_active = 0;
3894 out:
3895 if (status) {
3896 printk(KERN_WARNING
3897 "qla1280: ISP error recovery failed, board disabled");
3898 qla1280_reset_adapter(ha);
3899 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3902 LEAVE("qla1280_abort_isp");
3903 return status;
3908 * qla1280_debounce_register
3909 * Debounce register.
3911 * Input:
3912 * port = register address.
3914 * Returns:
3915 * register value.
3917 static u16
3918 qla1280_debounce_register(volatile u16 __iomem * addr)
3920 volatile u16 ret;
3921 volatile u16 ret2;
3923 ret = RD_REG_WORD(addr);
3924 ret2 = RD_REG_WORD(addr);
3926 if (ret == ret2)
3927 return ret;
3929 do {
3930 cpu_relax();
3931 ret = RD_REG_WORD(addr);
3932 ret2 = RD_REG_WORD(addr);
3933 } while (ret != ret2);
3935 return ret;
3939 /************************************************************************
3940 * qla1280_check_for_dead_scsi_bus *
3942 * This routine checks for a dead SCSI bus *
3943 ************************************************************************/
3944 #define SET_SXP_BANK 0x0100
3945 #define SCSI_PHASE_INVALID 0x87FF
3946 static int
3947 qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3949 uint16_t config_reg, scsi_control;
3950 struct device_reg __iomem *reg = ha->iobase;
3952 if (ha->bus_settings[bus].scsi_bus_dead) {
3953 WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
3954 config_reg = RD_REG_WORD(&reg->cfg_1);
3955 WRT_REG_WORD(&reg->cfg_1, SET_SXP_BANK);
3956 scsi_control = RD_REG_WORD(&reg->scsiControlPins);
3957 WRT_REG_WORD(&reg->cfg_1, config_reg);
3958 WRT_REG_WORD(&reg->host_cmd, HC_RELEASE_RISC);
3960 if (scsi_control == SCSI_PHASE_INVALID) {
3961 ha->bus_settings[bus].scsi_bus_dead = 1;
3962 return 1; /* bus is dead */
3963 } else {
3964 ha->bus_settings[bus].scsi_bus_dead = 0;
3965 ha->bus_settings[bus].failed_reset_count = 0;
3968 return 0; /* bus is not dead */
3971 static void
3972 qla1280_get_target_parameters(struct scsi_qla_host *ha,
3973 struct scsi_device *device)
3975 uint16_t mb[MAILBOX_REGISTER_COUNT];
3976 int bus, target, lun;
3978 bus = device->channel;
3979 target = device->id;
3980 lun = device->lun;
3983 mb[0] = MBC_GET_TARGET_PARAMETERS;
3984 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3985 mb[1] <<= 8;
3986 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3987 &mb[0]);
3989 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3991 if (mb[3] != 0) {
3992 printk(" Sync: period %d, offset %d",
3993 (mb[3] & 0xff), (mb[3] >> 8));
3994 if (mb[2] & BIT_13)
3995 printk(", Wide");
3996 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3997 printk(", DT");
3998 } else
3999 printk(" Async");
4001 if (device->simple_tags)
4002 printk(", Tagged queuing: depth %d", device->queue_depth);
4003 printk("\n");
4007 #if DEBUG_QLA1280
4008 static void
4009 __qla1280_dump_buffer(char *b, int size)
4011 int cnt;
4012 u8 c;
4014 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
4015 "Bh Ch Dh Eh Fh\n");
4016 printk(KERN_DEBUG "---------------------------------------------"
4017 "------------------\n");
4019 for (cnt = 0; cnt < size;) {
4020 c = *b++;
4022 printk("0x%02x", c);
4023 cnt++;
4024 if (!(cnt % 16))
4025 printk("\n");
4026 else
4027 printk(" ");
4029 if (cnt % 16)
4030 printk("\n");
4033 /**************************************************************************
4034 * ql1280_print_scsi_cmd
4036 **************************************************************************/
4037 static void
4038 __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4040 struct scsi_qla_host *ha;
4041 struct Scsi_Host *host = CMD_HOST(cmd);
4042 struct srb *sp;
4043 /* struct scatterlist *sg; */
4045 int i;
4046 ha = (struct scsi_qla_host *)host->hostdata;
4048 sp = (struct srb *)CMD_SP(cmd);
4049 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
4050 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
4051 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
4052 CMD_CDBLEN(cmd));
4053 printk(" CDB = ");
4054 for (i = 0; i < cmd->cmd_len; i++) {
4055 printk("0x%02x ", cmd->cmnd[i]);
4057 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4058 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4059 scsi_sglist(cmd), scsi_bufflen(cmd));
4060 /* if (cmd->use_sg)
4062 sg = (struct scatterlist *) cmd->request_buffer;
4063 printk(" SG buffer: \n");
4064 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
4065 } */
4066 printk(" tag=%d, transfersize=0x%x \n",
4067 cmd->tag, cmd->transfersize);
4068 printk(" SP=0x%p\n", CMD_SP(cmd));
4069 printk(" underflow size = 0x%x, direction=0x%x\n",
4070 cmd->underflow, cmd->sc_data_direction);
4073 /**************************************************************************
4074 * ql1280_dump_device
4076 **************************************************************************/
4077 static void
4078 ql1280_dump_device(struct scsi_qla_host *ha)
4081 struct scsi_cmnd *cp;
4082 struct srb *sp;
4083 int i;
4085 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4087 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4088 if ((sp = ha->outstanding_cmds[i]) == NULL)
4089 continue;
4090 if ((cp = sp->cmd) == NULL)
4091 continue;
4092 qla1280_print_scsi_cmd(1, cp);
4095 #endif
4098 enum tokens {
4099 TOKEN_NVRAM,
4100 TOKEN_SYNC,
4101 TOKEN_WIDE,
4102 TOKEN_PPR,
4103 TOKEN_VERBOSE,
4104 TOKEN_DEBUG,
4107 struct setup_tokens {
4108 char *token;
4109 int val;
4112 static struct setup_tokens setup_token[] __initdata =
4114 { "nvram", TOKEN_NVRAM },
4115 { "sync", TOKEN_SYNC },
4116 { "wide", TOKEN_WIDE },
4117 { "ppr", TOKEN_PPR },
4118 { "verbose", TOKEN_VERBOSE },
4119 { "debug", TOKEN_DEBUG },
4123 /**************************************************************************
4124 * qla1280_setup
4126 * Handle boot parameters. This really needs to be changed so one
4127 * can specify per adapter parameters.
4128 **************************************************************************/
4129 static int __init
4130 qla1280_setup(char *s)
4132 char *cp, *ptr;
4133 unsigned long val;
4134 int toke;
4136 cp = s;
4138 while (cp && (ptr = strchr(cp, ':'))) {
4139 ptr++;
4140 if (!strcmp(ptr, "yes")) {
4141 val = 0x10000;
4142 ptr += 3;
4143 } else if (!strcmp(ptr, "no")) {
4144 val = 0;
4145 ptr += 2;
4146 } else
4147 val = simple_strtoul(ptr, &ptr, 0);
4149 switch ((toke = qla1280_get_token(cp))) {
4150 case TOKEN_NVRAM:
4151 if (!val)
4152 driver_setup.no_nvram = 1;
4153 break;
4154 case TOKEN_SYNC:
4155 if (!val)
4156 driver_setup.no_sync = 1;
4157 else if (val != 0x10000)
4158 driver_setup.sync_mask = val;
4159 break;
4160 case TOKEN_WIDE:
4161 if (!val)
4162 driver_setup.no_wide = 1;
4163 else if (val != 0x10000)
4164 driver_setup.wide_mask = val;
4165 break;
4166 case TOKEN_PPR:
4167 if (!val)
4168 driver_setup.no_ppr = 1;
4169 else if (val != 0x10000)
4170 driver_setup.ppr_mask = val;
4171 break;
4172 case TOKEN_VERBOSE:
4173 qla1280_verbose = val;
4174 break;
4175 default:
4176 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4177 cp);
4180 cp = strchr(ptr, ';');
4181 if (cp)
4182 cp++;
4183 else {
4184 break;
4187 return 1;
4191 static int __init
4192 qla1280_get_token(char *str)
4194 char *sep;
4195 long ret = -1;
4196 int i;
4198 sep = strchr(str, ':');
4200 if (sep) {
4201 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4202 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4203 ret = setup_token[i].val;
4204 break;
4209 return ret;
4213 static struct scsi_host_template qla1280_driver_template = {
4214 .module = THIS_MODULE,
4215 .proc_name = "qla1280",
4216 .name = "Qlogic ISP 1280/12160",
4217 .info = qla1280_info,
4218 .slave_configure = qla1280_slave_configure,
4219 .queuecommand = qla1280_queuecommand,
4220 .eh_abort_handler = qla1280_eh_abort,
4221 .eh_device_reset_handler= qla1280_eh_device_reset,
4222 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4223 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4224 .bios_param = qla1280_biosparam,
4225 .can_queue = 0xfffff,
4226 .this_id = -1,
4227 .sg_tablesize = SG_ALL,
4228 .cmd_per_lun = 1,
4229 .use_clustering = ENABLE_CLUSTERING,
4233 static int
4234 qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4236 int devnum = id->driver_data;
4237 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4238 struct Scsi_Host *host;
4239 struct scsi_qla_host *ha;
4240 int error = -ENODEV;
4242 /* Bypass all AMI SUBSYS VENDOR IDs */
4243 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4244 printk(KERN_INFO
4245 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4246 goto error;
4249 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4250 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4252 if (pci_enable_device(pdev)) {
4253 printk(KERN_WARNING
4254 "qla1280: Failed to enabled pci device, aborting.\n");
4255 goto error;
4258 pci_set_master(pdev);
4260 error = -ENOMEM;
4261 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4262 if (!host) {
4263 printk(KERN_WARNING
4264 "qla1280: Failed to register host, aborting.\n");
4265 goto error_disable_device;
4268 ha = (struct scsi_qla_host *)host->hostdata;
4269 memset(ha, 0, sizeof(struct scsi_qla_host));
4271 ha->pdev = pdev;
4272 ha->devnum = devnum; /* specifies microcode load address */
4274 #ifdef QLA_64BIT_PTR
4275 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
4276 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4277 printk(KERN_WARNING "scsi(%li): Unable to set a "
4278 "suitable DMA mask - aborting\n", ha->host_no);
4279 error = -ENODEV;
4280 goto error_put_host;
4282 } else
4283 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4284 ha->host_no);
4285 #else
4286 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4287 printk(KERN_WARNING "scsi(%li): Unable to set a "
4288 "suitable DMA mask - aborting\n", ha->host_no);
4289 error = -ENODEV;
4290 goto error_put_host;
4292 #endif
4294 ha->request_ring = pci_alloc_consistent(ha->pdev,
4295 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4296 &ha->request_dma);
4297 if (!ha->request_ring) {
4298 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4299 goto error_put_host;
4302 ha->response_ring = pci_alloc_consistent(ha->pdev,
4303 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4304 &ha->response_dma);
4305 if (!ha->response_ring) {
4306 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4307 goto error_free_request_ring;
4310 ha->ports = bdp->numPorts;
4312 ha->host = host;
4313 ha->host_no = host->host_no;
4315 host->irq = pdev->irq;
4316 host->max_channel = bdp->numPorts - 1;
4317 host->max_lun = MAX_LUNS - 1;
4318 host->max_id = MAX_TARGETS;
4319 host->max_sectors = 1024;
4320 host->unique_id = host->host_no;
4322 error = -ENODEV;
4324 #if MEMORY_MAPPED_IO
4325 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4326 if (!ha->mmpbase) {
4327 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4328 goto error_free_response_ring;
4331 host->base = (unsigned long)ha->mmpbase;
4332 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4333 #else
4334 host->io_port = pci_resource_start(ha->pdev, 0);
4335 if (!request_region(host->io_port, 0xff, "qla1280")) {
4336 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4337 "0x%04lx-0x%04lx - already in use\n",
4338 host->io_port, host->io_port + 0xff);
4339 goto error_free_response_ring;
4342 ha->iobase = (struct device_reg *)host->io_port;
4343 #endif
4345 INIT_LIST_HEAD(&ha->done_q);
4347 /* Disable ISP interrupts. */
4348 qla1280_disable_intrs(ha);
4350 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4351 "qla1280", ha)) {
4352 printk("qla1280 : Failed to reserve interrupt %d already "
4353 "in use\n", pdev->irq);
4354 goto error_release_region;
4357 /* load the F/W, read paramaters, and init the H/W */
4358 if (qla1280_initialize_adapter(ha)) {
4359 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4360 goto error_free_irq;
4363 /* set our host ID (need to do something about our two IDs) */
4364 host->this_id = ha->bus_settings[0].id;
4366 pci_set_drvdata(pdev, host);
4368 error = scsi_add_host(host, &pdev->dev);
4369 if (error)
4370 goto error_disable_adapter;
4371 scsi_scan_host(host);
4373 return 0;
4375 error_disable_adapter:
4376 qla1280_disable_intrs(ha);
4377 error_free_irq:
4378 free_irq(pdev->irq, ha);
4379 error_release_region:
4380 #if MEMORY_MAPPED_IO
4381 iounmap(ha->mmpbase);
4382 #else
4383 release_region(host->io_port, 0xff);
4384 #endif
4385 error_free_response_ring:
4386 pci_free_consistent(ha->pdev,
4387 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4388 ha->response_ring, ha->response_dma);
4389 error_free_request_ring:
4390 pci_free_consistent(ha->pdev,
4391 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4392 ha->request_ring, ha->request_dma);
4393 error_put_host:
4394 scsi_host_put(host);
4395 error_disable_device:
4396 pci_disable_device(pdev);
4397 error:
4398 return error;
4402 static void
4403 qla1280_remove_one(struct pci_dev *pdev)
4405 struct Scsi_Host *host = pci_get_drvdata(pdev);
4406 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4408 scsi_remove_host(host);
4410 qla1280_disable_intrs(ha);
4412 free_irq(pdev->irq, ha);
4414 #if MEMORY_MAPPED_IO
4415 iounmap(ha->mmpbase);
4416 #else
4417 release_region(host->io_port, 0xff);
4418 #endif
4420 pci_free_consistent(ha->pdev,
4421 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4422 ha->request_ring, ha->request_dma);
4423 pci_free_consistent(ha->pdev,
4424 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4425 ha->response_ring, ha->response_dma);
4427 pci_disable_device(pdev);
4429 scsi_host_put(host);
4432 static struct pci_driver qla1280_pci_driver = {
4433 .name = "qla1280",
4434 .id_table = qla1280_pci_tbl,
4435 .probe = qla1280_probe_one,
4436 .remove = qla1280_remove_one,
4439 static int __init
4440 qla1280_init(void)
4442 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4443 printk(KERN_WARNING
4444 "qla1280: struct srb too big, aborting\n");
4445 return -EINVAL;
4448 #ifdef MODULE
4450 * If we are called as a module, the qla1280 pointer may not be null
4451 * and it would point to our bootup string, just like on the lilo
4452 * command line. IF not NULL, then process this config string with
4453 * qla1280_setup
4455 * Boot time Options
4456 * To add options at boot time add a line to your lilo.conf file like:
4457 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
4458 * which will result in the first four devices on the first two
4459 * controllers being set to a tagged queue depth of 32.
4461 if (qla1280)
4462 qla1280_setup(qla1280);
4463 #endif
4465 return pci_register_driver(&qla1280_pci_driver);
4468 static void __exit
4469 qla1280_exit(void)
4471 int i;
4473 pci_unregister_driver(&qla1280_pci_driver);
4474 /* release any allocated firmware images */
4475 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4476 release_firmware(qla1280_fw_tbl[i].fw);
4477 qla1280_fw_tbl[i].fw = NULL;
4481 module_init(qla1280_init);
4482 module_exit(qla1280_exit);
4484 MODULE_AUTHOR("Qlogic & Jes Sorensen");
4485 MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4486 MODULE_LICENSE("GPL");
4487 MODULE_FIRMWARE("qlogic/1040.bin");
4488 MODULE_FIRMWARE("qlogic/1280.bin");
4489 MODULE_FIRMWARE("qlogic/12160.bin");
4490 MODULE_VERSION(QLA1280_VERSION);
4493 * Overrides for Emacs so that we almost follow Linus's tabbing style.
4494 * Emacs will notice this stuff at the end of the file and automatically
4495 * adjust the settings for this buffer only. This must remain at the end
4496 * of the file.
4497 * ---------------------------------------------------------------------------
4498 * Local variables:
4499 * c-basic-offset: 8
4500 * tab-width: 8
4501 * End: