dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / drivers / scsi / qla1280.c
blob634254a523013a557327a7b38825924de7e71b6c
1 /******************************************************************************
2 * QLOGIC LINUX SOFTWARE
4 * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
5 * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
6 * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
7 * Copyright (C) 2003-2004 Christoph Hellwig
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
12 * later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 ******************************************************************************/
20 #define QLA1280_VERSION "3.27.1"
21 /*****************************************************************************
22 Revision History:
23 Rev 3.27.1, February 8, 2010, Michael Reed
24 - Retain firmware image for error recovery.
25 Rev 3.27, February 10, 2009, Michael Reed
26 - General code cleanup.
27 - Improve error recovery.
28 Rev 3.26, January 16, 2006 Jes Sorensen
29 - Ditch all < 2.6 support
30 Rev 3.25.1, February 10, 2005 Christoph Hellwig
31 - use pci_map_single to map non-S/G requests
32 - remove qla1280_proc_info
33 Rev 3.25, September 28, 2004, Christoph Hellwig
34 - add support for ISP1020/1040
35 - don't include "scsi.h" anymore for 2.6.x
36 Rev 3.24.4 June 7, 2004 Christoph Hellwig
37 - restructure firmware loading, cleanup initialization code
38 - prepare support for ISP1020/1040 chips
39 Rev 3.24.3 January 19, 2004, Jes Sorensen
40 - Handle PCI DMA mask settings correctly
41 - Correct order of error handling in probe_one, free_irq should not
42 be called if request_irq failed
43 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
44 - Big endian fixes (James)
45 - Remove bogus IOCB content on zero data transfer commands (Andrew)
46 Rev 3.24.1 January 5, 2004, Jes Sorensen
47 - Initialize completion queue to avoid OOPS on probe
48 - Handle interrupts during mailbox testing
49 Rev 3.24 November 17, 2003, Christoph Hellwig
50 - use struct list_head for completion queue
51 - avoid old Scsi_FOO typedefs
52 - cleanup 2.4 compat glue a bit
53 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
54 - make initialization for memory mapped vs port I/O more similar
55 - remove broken pci config space manipulation
56 - kill more cruft
57 - this is an almost perfect 2.6 scsi driver now! ;)
58 Rev 3.23.39 December 17, 2003, Jes Sorensen
59 - Delete completion queue from srb if mailbox command failed to
60 to avoid qla1280_done completeting qla1280_error_action's
61 obsolete context
62 - Reduce arguments for qla1280_done
63 Rev 3.23.38 October 18, 2003, Christoph Hellwig
64 - Convert to new-style hotplugable driver for 2.6
65 - Fix missing scsi_unregister/scsi_host_put on HBA removal
66 - Kill some more cruft
67 Rev 3.23.37 October 1, 2003, Jes Sorensen
68 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
69 random CONFIG option
70 - Clean up locking in probe path
71 Rev 3.23.36 October 1, 2003, Christoph Hellwig
72 - queuecommand only ever receives new commands - clear flags
73 - Reintegrate lost fixes from Linux 2.5
74 Rev 3.23.35 August 14, 2003, Jes Sorensen
75 - Build against 2.6
76 Rev 3.23.34 July 23, 2003, Jes Sorensen
77 - Remove pointless TRUE/FALSE macros
78 - Clean up vchan handling
79 Rev 3.23.33 July 3, 2003, Jes Sorensen
80 - Don't define register access macros before define determining MMIO.
81 This just happened to work out on ia64 but not elsewhere.
82 - Don't try and read from the card while it is in reset as
83 it won't respond and causes an MCA
84 Rev 3.23.32 June 23, 2003, Jes Sorensen
85 - Basic support for boot time arguments
86 Rev 3.23.31 June 8, 2003, Jes Sorensen
87 - Reduce boot time messages
88 Rev 3.23.30 June 6, 2003, Jes Sorensen
89 - Do not enable sync/wide/ppr before it has been determined
90 that the target device actually supports it
91 - Enable DMA arbitration for multi channel controllers
92 Rev 3.23.29 June 3, 2003, Jes Sorensen
93 - Port to 2.5.69
94 Rev 3.23.28 June 3, 2003, Jes Sorensen
95 - Eliminate duplicate marker commands on bus resets
96 - Handle outstanding commands appropriately on bus/device resets
97 Rev 3.23.27 May 28, 2003, Jes Sorensen
98 - Remove bogus input queue code, let the Linux SCSI layer do the work
99 - Clean up NVRAM handling, only read it once from the card
100 - Add a number of missing default nvram parameters
101 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
102 - Use completion queue for mailbox commands instead of busy wait
103 Rev 3.23.25 Beta May 27, 2003, James Bottomley
104 - Migrate to use new error handling code
105 Rev 3.23.24 Beta May 21, 2003, James Bottomley
106 - Big endian support
107 - Cleanup data direction code
108 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
109 - Switch to using MMIO instead of PIO
110 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
111 - Fix PCI parity problem with 12160 during reset.
112 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
113 - Use pci_map_page()/pci_unmap_page() instead of map_single version.
114 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
115 - Remove < 2.4.x support
116 - Introduce HOST_LOCK to make the spin lock changes portable.
117 - Remove a bunch of idiotic and unnecessary typedef's
118 - Kill all leftovers of target-mode support which never worked anyway
119 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
120 - Do qla1280_pci_config() before calling request_irq() and
121 request_region()
122 - Use pci_dma_hi32() to handle upper word of DMA addresses instead
123 of large shifts
124 - Hand correct arguments to free_irq() in case of failure
125 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
126 - Run source through Lindent and clean up the output
127 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
128 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
129 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
130 - Rely on mailbox commands generating interrupts - do not
131 run qla1280_isr() from ql1280_mailbox_command()
132 - Remove device_reg_t
133 - Integrate ql12160_set_target_parameters() with 1280 version
134 - Make qla1280_setup() non static
135 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
136 sent to the card - this command pauses the firmware!!!
137 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
138 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
139 - Remove a pile of pointless and confusing (srb_t **) and
140 (scsi_lu_t *) typecasts
141 - Explicit mark that we do not use the new error handling (for now)
142 - Remove scsi_qla_host_t and use 'struct' instead
143 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
144 pci_64bit_slot flags which weren't used for anything anyway
145 - Grab host->host_lock while calling qla1280_isr() from abort()
146 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
147 do not need to save/restore flags in the interrupt handler
148 - Enable interrupts early (before any mailbox access) in preparation
149 for cleaning up the mailbox handling
150 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
151 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
152 it with proper use of dprintk().
153 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
154 a debug level argument to determine if data is to be printed
155 - Add KERN_* info to printk()
156 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
157 - Significant cosmetic cleanups
158 - Change debug code to use dprintk() and remove #if mess
159 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
160 - More cosmetic cleanups, fix places treating return as function
161 - use cpu_relax() in qla1280_debounce_register()
162 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
163 - Make it compile under 2.5.5
164 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
165 - Do no typecast short * to long * in QL1280BoardTbl, this
166 broke miserably on big endian boxes
167 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
168 - Remove pre 2.2 hack for checking for reentrance in interrupt handler
169 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
170 unsigned int to match the types from struct scsi_cmnd
171 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
172 - Remove bogus timer_t typedef from qla1280.h
173 - Remove obsolete pre 2.2 PCI setup code, use proper #define's
174 for PCI_ values, call pci_set_master()
175 - Fix memleak of qla1280_buffer on module unload
176 - Only compile module parsing code #ifdef MODULE - should be
177 changed to use individual MODULE_PARM's later
178 - Remove dummy_buffer that was never modified nor printed
179 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
180 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
181 - Remove \r from print statements, this is Linux, not DOS
182 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
183 dummy macros
184 - Remove C++ compile hack in header file as Linux driver are not
185 supposed to be compiled as C++
186 - Kill MS_64BITS macro as it makes the code more readable
187 - Remove unnecessary flags.in_interrupts bit
188 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
189 - Dont' check for set flags on q->q_flag one by one in qla1280_next()
190 - Check whether the interrupt was generated by the QLA1280 before
191 doing any processing
192 - qla1280_status_entry(): Only zero out part of sense_buffer that
193 is not being copied into
194 - Remove more superflouous typecasts
195 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
196 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
197 - Don't walk the entire list in qla1280_putq_t() just to directly
198 grab the pointer to the last element afterwards
199 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
200 - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
201 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
202 - Set dev->max_sectors to 1024
203 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
204 - Provide compat macros for pci_enable_device(), pci_find_subsys()
205 and scsi_set_pci_device()
206 - Call scsi_set_pci_device() for all devices
207 - Reduce size of kernel version dependent device probe code
208 - Move duplicate probe/init code to separate function
209 - Handle error if qla1280_mem_alloc() fails
210 - Kill OFFSET() macro and use Linux's PCI definitions instead
211 - Kill private structure defining PCI config space (struct config_reg)
212 - Only allocate I/O port region if not in MMIO mode
213 - Remove duplicate (unused) sanity check of sife of srb_t
214 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
215 - Change home-brew memset() implementations to use memset()
216 - Remove all references to COMTRACE() - accessing a PC's COM2 serial
217 port directly is not legal under Linux.
218 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
219 - Remove pre 2.2 kernel support
220 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
221 - Fix MMIO access to use readl/writel instead of directly
222 dereferencing pointers
223 - Nuke MSDOS debugging code
224 - Change true/false data types to int from uint8_t
225 - Use int for counters instead of uint8_t etc.
226 - Clean up size & byte order conversion macro usage
227 Rev 3.23 Beta January 11, 2001 BN Qlogic
228 - Added check of device_id when handling non
229 QLA12160s during detect().
230 Rev 3.22 Beta January 5, 2001 BN Qlogic
231 - Changed queue_task() to schedule_task()
232 for kernels 2.4.0 and higher.
233 Note: 2.4.0-testxx kernels released prior to
234 the actual 2.4.0 kernel release on January 2001
235 will get compile/link errors with schedule_task().
236 Please update your kernel to released 2.4.0 level,
237 or comment lines in this file flagged with 3.22
238 to resolve compile/link error of schedule_task().
239 - Added -DCONFIG_SMP in addition to -D__SMP__
240 in Makefile for 2.4.0 builds of driver as module.
241 Rev 3.21 Beta January 4, 2001 BN Qlogic
242 - Changed criteria of 64/32 Bit mode of HBA
243 operation according to BITS_PER_LONG rather
244 than HBA's NVRAM setting of >4Gig memory bit;
245 so that the HBA auto-configures without the need
246 to setup each system individually.
247 Rev 3.20 Beta December 5, 2000 BN Qlogic
248 - Added priority handling to IA-64 onboard SCSI
249 ISP12160 chip for kernels greater than 2.3.18.
250 - Added irqrestore for qla1280_intr_handler.
251 - Enabled /proc/scsi/qla1280 interface.
252 - Clear /proc/scsi/qla1280 counters in detect().
253 Rev 3.19 Beta October 13, 2000 BN Qlogic
254 - Declare driver_template for new kernel
255 (2.4.0 and greater) scsi initialization scheme.
256 - Update /proc/scsi entry for 2.3.18 kernels and
257 above as qla1280
258 Rev 3.18 Beta October 10, 2000 BN Qlogic
259 - Changed scan order of adapters to map
260 the QLA12160 followed by the QLA1280.
261 Rev 3.17 Beta September 18, 2000 BN Qlogic
262 - Removed warnings for 32 bit 2.4.x compiles
263 - Corrected declared size for request and response
264 DMA addresses that are kept in each ha
265 Rev. 3.16 Beta August 25, 2000 BN Qlogic
266 - Corrected 64 bit addressing issue on IA-64
267 where the upper 32 bits were not properly
268 passed to the RISC engine.
269 Rev. 3.15 Beta August 22, 2000 BN Qlogic
270 - Modified qla1280_setup_chip to properly load
271 ISP firmware for greater that 4 Gig memory on IA-64
272 Rev. 3.14 Beta August 16, 2000 BN Qlogic
273 - Added setting of dma_mask to full 64 bit
274 if flags.enable_64bit_addressing is set in NVRAM
275 Rev. 3.13 Beta August 16, 2000 BN Qlogic
276 - Use new PCI DMA mapping APIs for 2.4.x kernel
277 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
278 - Added check of pci_enable_device to detect() for 2.3.x
279 - Use pci_resource_start() instead of
280 pdev->resource[0].start in detect() for 2.3.x
281 - Updated driver version
282 Rev. 3.11 July 14, 2000 BN Qlogic
283 - Updated SCSI Firmware to following versions:
284 qla1x80: 8.13.08
285 qla1x160: 10.04.08
286 - Updated driver version to 3.11
287 Rev. 3.10 June 23, 2000 BN Qlogic
288 - Added filtering of AMI SubSys Vendor ID devices
289 Rev. 3.9
290 - DEBUG_QLA1280 undefined and new version BN Qlogic
291 Rev. 3.08b May 9, 2000 MD Dell
292 - Added logic to check against AMI subsystem vendor ID
293 Rev. 3.08 May 4, 2000 DG Qlogic
294 - Added logic to check for PCI subsystem ID.
295 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
296 - Updated SCSI Firmware to following versions:
297 qla12160: 10.01.19
298 qla1280: 8.09.00
299 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
300 - Internal revision; not released
301 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
302 - Edit correction for virt_to_bus and PROC.
303 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
304 - Merge changes from ia64 port.
305 Rev. 3.03 Mar 28, 2000 BN Qlogic
306 - Increase version to reflect new code drop with compile fix
307 of issue with inclusion of linux/spinlock for 2.3 kernels
308 Rev. 3.02 Mar 15, 2000 BN Qlogic
309 - Merge qla1280_proc_info from 2.10 code base
310 Rev. 3.01 Feb 10, 2000 BN Qlogic
311 - Corrected code to compile on a 2.2.x kernel.
312 Rev. 3.00 Jan 17, 2000 DG Qlogic
313 - Added 64-bit support.
314 Rev. 2.07 Nov 9, 1999 DG Qlogic
315 - Added new routine to set target parameters for ISP12160.
316 Rev. 2.06 Sept 10, 1999 DG Qlogic
317 - Added support for ISP12160 Ultra 3 chip.
318 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
319 - Modified code to remove errors generated when compiling with
320 Cygnus IA64 Compiler.
321 - Changed conversion of pointers to unsigned longs instead of integers.
322 - Changed type of I/O port variables from uint32_t to unsigned long.
323 - Modified OFFSET macro to work with 64-bit as well as 32-bit.
324 - Changed sprintf and printk format specifiers for pointers to %p.
325 - Changed some int to long type casts where needed in sprintf & printk.
326 - Added l modifiers to sprintf and printk format specifiers for longs.
327 - Removed unused local variables.
328 Rev. 1.20 June 8, 1999 DG, Qlogic
329 Changes to support RedHat release 6.0 (kernel 2.2.5).
330 - Added SCSI exclusive access lock (io_request_lock) when accessing
331 the adapter.
332 - Added changes for the new LINUX interface template. Some new error
333 handling routines have been added to the template, but for now we
334 will use the old ones.
335 - Initial Beta Release.
336 *****************************************************************************/
339 #include <linux/module.h>
341 #include <linux/types.h>
342 #include <linux/string.h>
343 #include <linux/errno.h>
344 #include <linux/kernel.h>
345 #include <linux/ioport.h>
346 #include <linux/delay.h>
347 #include <linux/timer.h>
348 #include <linux/pci.h>
349 #include <linux/proc_fs.h>
350 #include <linux/stat.h>
351 #include <linux/pci_ids.h>
352 #include <linux/interrupt.h>
353 #include <linux/init.h>
354 #include <linux/dma-mapping.h>
355 #include <linux/firmware.h>
357 #include <asm/io.h>
358 #include <asm/irq.h>
359 #include <asm/byteorder.h>
360 #include <asm/processor.h>
361 #include <asm/types.h>
363 #include <scsi/scsi.h>
364 #include <scsi/scsi_cmnd.h>
365 #include <scsi/scsi_device.h>
366 #include <scsi/scsi_host.h>
367 #include <scsi/scsi_tcq.h>
369 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
370 #include <asm/sn/io.h>
371 #endif
375 * Compile time Options:
376 * 0 - Disable and 1 - Enable
378 #define DEBUG_QLA1280_INTR 0
379 #define DEBUG_PRINT_NVRAM 0
380 #define DEBUG_QLA1280 0
382 #define MEMORY_MAPPED_IO 1
384 #include "qla1280.h"
386 #ifndef BITS_PER_LONG
387 #error "BITS_PER_LONG not defined!"
388 #endif
389 #if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
390 #define QLA_64BIT_PTR 1
391 #endif
393 #ifdef QLA_64BIT_PTR
394 #define pci_dma_hi32(a) ((a >> 16) >> 16)
395 #else
396 #define pci_dma_hi32(a) 0
397 #endif
398 #define pci_dma_lo32(a) (a & 0xffffffff)
400 #define NVRAM_DELAY() udelay(500) /* 2 microseconds */
402 #if defined(__ia64__) && !defined(ia64_platform_is)
403 #define ia64_platform_is(foo) (!strcmp(x, platform_name))
404 #endif
407 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
408 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
409 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
410 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
411 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
414 static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
415 static void qla1280_remove_one(struct pci_dev *);
418 * QLogic Driver Support Function Prototypes.
420 static void qla1280_done(struct scsi_qla_host *);
421 static int qla1280_get_token(char *);
422 static int qla1280_setup(char *s) __init;
425 * QLogic ISP1280 Hardware Support Function Prototypes.
427 static int qla1280_load_firmware(struct scsi_qla_host *);
428 static int qla1280_init_rings(struct scsi_qla_host *);
429 static int qla1280_nvram_config(struct scsi_qla_host *);
430 static int qla1280_mailbox_command(struct scsi_qla_host *,
431 uint8_t, uint16_t *);
432 static int qla1280_bus_reset(struct scsi_qla_host *, int);
433 static int qla1280_device_reset(struct scsi_qla_host *, int, int);
434 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
435 static int qla1280_abort_isp(struct scsi_qla_host *);
436 #ifdef QLA_64BIT_PTR
437 static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
438 #else
439 static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
440 #endif
441 static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
442 static void qla1280_poll(struct scsi_qla_host *);
443 static void qla1280_reset_adapter(struct scsi_qla_host *);
444 static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
445 static void qla1280_isp_cmd(struct scsi_qla_host *);
446 static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
447 static void qla1280_rst_aen(struct scsi_qla_host *);
448 static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
449 struct list_head *);
450 static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
451 struct list_head *);
452 static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
453 static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
454 static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
455 static request_t *qla1280_req_pkt(struct scsi_qla_host *);
456 static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
457 unsigned int);
458 static void qla1280_get_target_parameters(struct scsi_qla_host *,
459 struct scsi_device *);
460 static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
463 static struct qla_driver_setup driver_setup;
466 * convert scsi data direction to request_t control flags
468 static inline uint16_t
469 qla1280_data_direction(struct scsi_cmnd *cmnd)
471 switch(cmnd->sc_data_direction) {
472 case DMA_FROM_DEVICE:
473 return BIT_5;
474 case DMA_TO_DEVICE:
475 return BIT_6;
476 case DMA_BIDIRECTIONAL:
477 return BIT_5 | BIT_6;
479 * We could BUG() on default here if one of the four cases aren't
480 * met, but then again if we receive something like that from the
481 * SCSI layer we have more serious problems. This shuts up GCC.
483 case DMA_NONE:
484 default:
485 return 0;
489 #if DEBUG_QLA1280
490 static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
491 static void __qla1280_dump_buffer(char *, int);
492 #endif
496 * insmod needs to find the variable and make it point to something
498 #ifdef MODULE
499 static char *qla1280;
501 /* insmod qla1280 options=verbose" */
502 module_param(qla1280, charp, 0);
503 #else
504 __setup("qla1280=", qla1280_setup);
505 #endif
509 * We use the scsi_pointer structure that's included with each scsi_command
510 * to overlay our struct srb over it. qla1280_init() checks that a srb is not
511 * bigger than a scsi_pointer.
514 #define CMD_SP(Cmnd) &Cmnd->SCp
515 #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
516 #define CMD_CDBP(Cmnd) Cmnd->cmnd
517 #define CMD_SNSP(Cmnd) Cmnd->sense_buffer
518 #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
519 #define CMD_RESULT(Cmnd) Cmnd->result
520 #define CMD_HANDLE(Cmnd) Cmnd->host_scribble
521 #define CMD_REQUEST(Cmnd) Cmnd->request->cmd
523 #define CMD_HOST(Cmnd) Cmnd->device->host
524 #define SCSI_BUS_32(Cmnd) Cmnd->device->channel
525 #define SCSI_TCN_32(Cmnd) Cmnd->device->id
526 #define SCSI_LUN_32(Cmnd) Cmnd->device->lun
529 /*****************************************/
530 /* ISP Boards supported by this driver */
531 /*****************************************/
533 struct qla_boards {
534 char *name; /* Board ID String */
535 int numPorts; /* Number of SCSI ports */
536 int fw_index; /* index into qla1280_fw_tbl for firmware */
539 /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
540 static struct pci_device_id qla1280_pci_tbl[] = {
541 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
542 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
543 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
544 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
545 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
546 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
547 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
548 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
549 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
550 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
551 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
552 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
553 {0,}
555 MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
557 DEFINE_MUTEX(qla1280_firmware_mutex);
559 struct qla_fw {
560 char *fwname;
561 const struct firmware *fw;
564 #define QL_NUM_FW_IMAGES 3
566 struct qla_fw qla1280_fw_tbl[QL_NUM_FW_IMAGES] = {
567 {"qlogic/1040.bin", NULL}, /* image 0 */
568 {"qlogic/1280.bin", NULL}, /* image 1 */
569 {"qlogic/12160.bin", NULL}, /* image 2 */
572 /* NOTE: Order of boards in this table must match order in qla1280_pci_tbl */
573 static struct qla_boards ql1280_board_tbl[] = {
574 {.name = "QLA12160", .numPorts = 2, .fw_index = 2},
575 {.name = "QLA1040" , .numPorts = 1, .fw_index = 0},
576 {.name = "QLA1080" , .numPorts = 1, .fw_index = 1},
577 {.name = "QLA1240" , .numPorts = 2, .fw_index = 1},
578 {.name = "QLA1280" , .numPorts = 2, .fw_index = 1},
579 {.name = "QLA10160", .numPorts = 1, .fw_index = 2},
580 {.name = " ", .numPorts = 0, .fw_index = -1},
583 static int qla1280_verbose = 1;
585 #if DEBUG_QLA1280
586 static int ql_debug_level = 1;
587 #define dprintk(level, format, a...) \
588 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
589 #define qla1280_dump_buffer(level, buf, size) \
590 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
591 #define qla1280_print_scsi_cmd(level, cmd) \
592 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
593 #else
594 #define ql_debug_level 0
595 #define dprintk(level, format, a...) do{}while(0)
596 #define qla1280_dump_buffer(a, b, c) do{}while(0)
597 #define qla1280_print_scsi_cmd(a, b) do{}while(0)
598 #endif
600 #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
601 #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
602 #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
603 #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
606 static int qla1280_read_nvram(struct scsi_qla_host *ha)
608 uint16_t *wptr;
609 uint8_t chksum;
610 int cnt, i;
611 struct nvram *nv;
613 ENTER("qla1280_read_nvram");
615 if (driver_setup.no_nvram)
616 return 1;
618 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
620 wptr = (uint16_t *)&ha->nvram;
621 nv = &ha->nvram;
622 chksum = 0;
623 for (cnt = 0; cnt < 3; cnt++) {
624 *wptr = qla1280_get_nvram_word(ha, cnt);
625 chksum += *wptr & 0xff;
626 chksum += (*wptr >> 8) & 0xff;
627 wptr++;
630 if (nv->id0 != 'I' || nv->id1 != 'S' ||
631 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
632 dprintk(2, "Invalid nvram ID or version!\n");
633 chksum = 1;
634 } else {
635 for (; cnt < sizeof(struct nvram); cnt++) {
636 *wptr = qla1280_get_nvram_word(ha, cnt);
637 chksum += *wptr & 0xff;
638 chksum += (*wptr >> 8) & 0xff;
639 wptr++;
643 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
644 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
645 nv->version);
648 if (chksum) {
649 if (!driver_setup.no_nvram)
650 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
651 "validate NVRAM checksum, using default "
652 "settings\n", ha->host_no);
653 ha->nvram_valid = 0;
654 } else
655 ha->nvram_valid = 1;
657 /* The firmware interface is, um, interesting, in that the
658 * actual firmware image on the chip is little endian, thus,
659 * the process of taking that image to the CPU would end up
660 * little endian. However, the firmware interface requires it
661 * to be read a word (two bytes) at a time.
663 * The net result of this would be that the word (and
664 * doubleword) quantites in the firmware would be correct, but
665 * the bytes would be pairwise reversed. Since most of the
666 * firmware quantites are, in fact, bytes, we do an extra
667 * le16_to_cpu() in the firmware read routine.
669 * The upshot of all this is that the bytes in the firmware
670 * are in the correct places, but the 16 and 32 bit quantites
671 * are still in little endian format. We fix that up below by
672 * doing extra reverses on them */
673 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
674 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
675 for(i = 0; i < MAX_BUSES; i++) {
676 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
677 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
679 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
680 LEAVE("qla1280_read_nvram");
682 return chksum;
685 /**************************************************************************
686 * qla1280_info
687 * Return a string describing the driver.
688 **************************************************************************/
689 static const char *
690 qla1280_info(struct Scsi_Host *host)
692 static char qla1280_scsi_name_buffer[125];
693 char *bp;
694 struct scsi_qla_host *ha;
695 struct qla_boards *bdp;
697 bp = &qla1280_scsi_name_buffer[0];
698 ha = (struct scsi_qla_host *)host->hostdata;
699 bdp = &ql1280_board_tbl[ha->devnum];
700 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
702 sprintf (bp,
703 "QLogic %s PCI to SCSI Host Adapter\n"
704 " Firmware version: %2d.%02d.%02d, Driver version %s",
705 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
706 QLA1280_VERSION);
707 return bp;
710 /**************************************************************************
711 * qla1280_queuecommand
712 * Queue a command to the controller.
714 * Note:
715 * The mid-level driver tries to ensures that queuecommand never gets invoked
716 * concurrently with itself or the interrupt handler (although the
717 * interrupt handler may call this routine as part of request-completion
718 * handling). Unfortunely, it sometimes calls the scheduler in interrupt
719 * context which is a big NO! NO!.
720 **************************************************************************/
721 static int
722 qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
724 struct Scsi_Host *host = cmd->device->host;
725 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
726 struct srb *sp = (struct srb *)CMD_SP(cmd);
727 int status;
729 cmd->scsi_done = fn;
730 sp->cmd = cmd;
731 sp->flags = 0;
732 sp->wait = NULL;
733 CMD_HANDLE(cmd) = (unsigned char *)NULL;
735 qla1280_print_scsi_cmd(5, cmd);
737 #ifdef QLA_64BIT_PTR
739 * Using 64 bit commands if the PCI bridge doesn't support it is a
740 * bit wasteful, however this should really only happen if one's
741 * PCI controller is completely broken, like the BCM1250. For
742 * sane hardware this is not an issue.
744 status = qla1280_64bit_start_scsi(ha, sp);
745 #else
746 status = qla1280_32bit_start_scsi(ha, sp);
747 #endif
748 return status;
751 static DEF_SCSI_QCMD(qla1280_queuecommand)
753 enum action {
754 ABORT_COMMAND,
755 DEVICE_RESET,
756 BUS_RESET,
757 ADAPTER_RESET,
761 static void qla1280_mailbox_timeout(unsigned long __data)
763 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
764 struct device_reg __iomem *reg;
765 reg = ha->iobase;
767 ha->mailbox_out[0] = RD_REG_WORD(&reg->mailbox0);
768 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
769 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
770 RD_REG_WORD(&reg->ictrl), RD_REG_WORD(&reg->istatus));
771 complete(ha->mailbox_wait);
774 static int
775 _qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
776 struct completion *wait)
778 int status = FAILED;
779 struct scsi_cmnd *cmd = sp->cmd;
781 spin_unlock_irq(ha->host->host_lock);
782 wait_for_completion_timeout(wait, 4*HZ);
783 spin_lock_irq(ha->host->host_lock);
784 sp->wait = NULL;
785 if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
786 status = SUCCESS;
787 (*cmd->scsi_done)(cmd);
789 return status;
792 static int
793 qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
795 DECLARE_COMPLETION_ONSTACK(wait);
797 sp->wait = &wait;
798 return _qla1280_wait_for_single_command(ha, sp, &wait);
801 static int
802 qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
804 int cnt;
805 int status;
806 struct srb *sp;
807 struct scsi_cmnd *cmd;
809 status = SUCCESS;
812 * Wait for all commands with the designated bus/target
813 * to be completed by the firmware
815 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
816 sp = ha->outstanding_cmds[cnt];
817 if (sp) {
818 cmd = sp->cmd;
820 if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
821 continue;
822 if (target >= 0 && SCSI_TCN_32(cmd) != target)
823 continue;
825 status = qla1280_wait_for_single_command(ha, sp);
826 if (status == FAILED)
827 break;
830 return status;
833 /**************************************************************************
834 * qla1280_error_action
835 * The function will attempt to perform a specified error action and
836 * wait for the results (or time out).
838 * Input:
839 * cmd = Linux SCSI command packet of the command that cause the
840 * bus reset.
841 * action = error action to take (see action_t)
843 * Returns:
844 * SUCCESS or FAILED
846 **************************************************************************/
847 static int
848 qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
850 struct scsi_qla_host *ha;
851 int bus, target, lun;
852 struct srb *sp;
853 int i, found;
854 int result=FAILED;
855 int wait_for_bus=-1;
856 int wait_for_target = -1;
857 DECLARE_COMPLETION_ONSTACK(wait);
859 ENTER("qla1280_error_action");
861 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
862 sp = (struct srb *)CMD_SP(cmd);
863 bus = SCSI_BUS_32(cmd);
864 target = SCSI_TCN_32(cmd);
865 lun = SCSI_LUN_32(cmd);
867 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
868 RD_REG_WORD(&ha->iobase->istatus));
870 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
871 RD_REG_WORD(&ha->iobase->host_cmd),
872 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
874 if (qla1280_verbose)
875 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
876 "Handle=0x%p, action=0x%x\n",
877 ha->host_no, cmd, CMD_HANDLE(cmd), action);
880 * Check to see if we have the command in the outstanding_cmds[]
881 * array. If not then it must have completed before this error
882 * action was initiated. If the error_action isn't ABORT_COMMAND
883 * then the driver must proceed with the requested action.
885 found = -1;
886 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
887 if (sp == ha->outstanding_cmds[i]) {
888 found = i;
889 sp->wait = &wait; /* we'll wait for it to complete */
890 break;
894 if (found < 0) { /* driver doesn't have command */
895 result = SUCCESS;
896 if (qla1280_verbose) {
897 printk(KERN_INFO
898 "scsi(%ld:%d:%d:%d): specified command has "
899 "already completed.\n", ha->host_no, bus,
900 target, lun);
904 switch (action) {
906 case ABORT_COMMAND:
907 dprintk(1, "qla1280: RISC aborting command\n");
909 * The abort might fail due to race when the host_lock
910 * is released to issue the abort. As such, we
911 * don't bother to check the return status.
913 if (found >= 0)
914 qla1280_abort_command(ha, sp, found);
915 break;
917 case DEVICE_RESET:
918 if (qla1280_verbose)
919 printk(KERN_INFO
920 "scsi(%ld:%d:%d:%d): Queueing device reset "
921 "command.\n", ha->host_no, bus, target, lun);
922 if (qla1280_device_reset(ha, bus, target) == 0) {
923 /* issued device reset, set wait conditions */
924 wait_for_bus = bus;
925 wait_for_target = target;
927 break;
929 case BUS_RESET:
930 if (qla1280_verbose)
931 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
932 "reset.\n", ha->host_no, bus);
933 if (qla1280_bus_reset(ha, bus) == 0) {
934 /* issued bus reset, set wait conditions */
935 wait_for_bus = bus;
937 break;
939 case ADAPTER_RESET:
940 default:
941 if (qla1280_verbose) {
942 printk(KERN_INFO
943 "scsi(%ld): Issued ADAPTER RESET\n",
944 ha->host_no);
945 printk(KERN_INFO "scsi(%ld): I/O processing will "
946 "continue automatically\n", ha->host_no);
948 ha->flags.reset_active = 1;
950 if (qla1280_abort_isp(ha) != 0) { /* it's dead */
951 result = FAILED;
954 ha->flags.reset_active = 0;
958 * At this point, the host_lock has been released and retaken
959 * by the issuance of the mailbox command.
960 * Wait for the command passed in by the mid-layer if it
961 * was found by the driver. It might have been returned
962 * between eh recovery steps, hence the check of the "found"
963 * variable.
966 if (found >= 0)
967 result = _qla1280_wait_for_single_command(ha, sp, &wait);
969 if (action == ABORT_COMMAND && result != SUCCESS) {
970 printk(KERN_WARNING
971 "scsi(%li:%i:%i:%i): "
972 "Unable to abort command!\n",
973 ha->host_no, bus, target, lun);
977 * If the command passed in by the mid-layer has been
978 * returned by the board, then wait for any additional
979 * commands which are supposed to complete based upon
980 * the error action.
982 * All commands are unconditionally returned during a
983 * call to qla1280_abort_isp(), ADAPTER_RESET. No need
984 * to wait for them.
986 if (result == SUCCESS && wait_for_bus >= 0) {
987 result = qla1280_wait_for_pending_commands(ha,
988 wait_for_bus, wait_for_target);
991 dprintk(1, "RESET returning %d\n", result);
993 LEAVE("qla1280_error_action");
994 return result;
997 /**************************************************************************
998 * qla1280_abort
999 * Abort the specified SCSI command(s).
1000 **************************************************************************/
1001 static int
1002 qla1280_eh_abort(struct scsi_cmnd * cmd)
1004 int rc;
1006 spin_lock_irq(cmd->device->host->host_lock);
1007 rc = qla1280_error_action(cmd, ABORT_COMMAND);
1008 spin_unlock_irq(cmd->device->host->host_lock);
1010 return rc;
1013 /**************************************************************************
1014 * qla1280_device_reset
1015 * Reset the specified SCSI device
1016 **************************************************************************/
1017 static int
1018 qla1280_eh_device_reset(struct scsi_cmnd *cmd)
1020 int rc;
1022 spin_lock_irq(cmd->device->host->host_lock);
1023 rc = qla1280_error_action(cmd, DEVICE_RESET);
1024 spin_unlock_irq(cmd->device->host->host_lock);
1026 return rc;
1029 /**************************************************************************
1030 * qla1280_bus_reset
1031 * Reset the specified bus.
1032 **************************************************************************/
1033 static int
1034 qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1036 int rc;
1038 spin_lock_irq(cmd->device->host->host_lock);
1039 rc = qla1280_error_action(cmd, BUS_RESET);
1040 spin_unlock_irq(cmd->device->host->host_lock);
1042 return rc;
1045 /**************************************************************************
1046 * qla1280_adapter_reset
1047 * Reset the specified adapter (both channels)
1048 **************************************************************************/
1049 static int
1050 qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1052 int rc;
1054 spin_lock_irq(cmd->device->host->host_lock);
1055 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1056 spin_unlock_irq(cmd->device->host->host_lock);
1058 return rc;
1061 static int
1062 qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1063 sector_t capacity, int geom[])
1065 int heads, sectors, cylinders;
1067 heads = 64;
1068 sectors = 32;
1069 cylinders = (unsigned long)capacity / (heads * sectors);
1070 if (cylinders > 1024) {
1071 heads = 255;
1072 sectors = 63;
1073 cylinders = (unsigned long)capacity / (heads * sectors);
1074 /* if (cylinders > 1023)
1075 cylinders = 1023; */
1078 geom[0] = heads;
1079 geom[1] = sectors;
1080 geom[2] = cylinders;
1082 return 0;
1086 /* disable risc and host interrupts */
1087 static inline void
1088 qla1280_disable_intrs(struct scsi_qla_host *ha)
1090 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1091 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1094 /* enable risc and host interrupts */
1095 static inline void
1096 qla1280_enable_intrs(struct scsi_qla_host *ha)
1098 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1099 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1102 /**************************************************************************
1103 * qla1280_intr_handler
1104 * Handles the H/W interrupt
1105 **************************************************************************/
1106 static irqreturn_t
1107 qla1280_intr_handler(int irq, void *dev_id)
1109 struct scsi_qla_host *ha;
1110 struct device_reg __iomem *reg;
1111 u16 data;
1112 int handled = 0;
1114 ENTER_INTR ("qla1280_intr_handler");
1115 ha = (struct scsi_qla_host *)dev_id;
1117 spin_lock(ha->host->host_lock);
1119 ha->isr_count++;
1120 reg = ha->iobase;
1122 qla1280_disable_intrs(ha);
1124 data = qla1280_debounce_register(&reg->istatus);
1125 /* Check for pending interrupts. */
1126 if (data & RISC_INT) {
1127 qla1280_isr(ha, &ha->done_q);
1128 handled = 1;
1130 if (!list_empty(&ha->done_q))
1131 qla1280_done(ha);
1133 spin_unlock(ha->host->host_lock);
1135 qla1280_enable_intrs(ha);
1137 LEAVE_INTR("qla1280_intr_handler");
1138 return IRQ_RETVAL(handled);
1142 static int
1143 qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1145 uint8_t mr;
1146 uint16_t mb[MAILBOX_REGISTER_COUNT];
1147 struct nvram *nv;
1148 int status, lun;
1150 nv = &ha->nvram;
1152 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1154 /* Set Target Parameters. */
1155 mb[0] = MBC_SET_TARGET_PARAMETERS;
1156 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1157 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1158 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1159 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1160 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1161 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1162 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1163 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1164 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1166 if (IS_ISP1x160(ha)) {
1167 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1168 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1169 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1170 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1171 mr |= BIT_6;
1172 } else {
1173 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1175 mb[3] |= nv->bus[bus].target[target].sync_period;
1177 status = qla1280_mailbox_command(ha, mr, mb);
1179 /* Set Device Queue Parameters. */
1180 for (lun = 0; lun < MAX_LUNS; lun++) {
1181 mb[0] = MBC_SET_DEVICE_QUEUE;
1182 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1183 mb[1] |= lun;
1184 mb[2] = nv->bus[bus].max_queue_depth;
1185 mb[3] = nv->bus[bus].target[target].execution_throttle;
1186 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1189 if (status)
1190 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1191 "qla1280_set_target_parameters() failed\n",
1192 ha->host_no, bus, target);
1193 return status;
1197 /**************************************************************************
1198 * qla1280_slave_configure
1200 * Description:
1201 * Determines the queue depth for a given device. There are two ways
1202 * a queue depth can be obtained for a tagged queueing device. One
1203 * way is the default queue depth which is determined by whether
1204 * If it is defined, then it is used
1205 * as the default queue depth. Otherwise, we use either 4 or 8 as the
1206 * default queue depth (dependent on the number of hardware SCBs).
1207 **************************************************************************/
1208 static int
1209 qla1280_slave_configure(struct scsi_device *device)
1211 struct scsi_qla_host *ha;
1212 int default_depth = 3;
1213 int bus = device->channel;
1214 int target = device->id;
1215 int status = 0;
1216 struct nvram *nv;
1217 unsigned long flags;
1219 ha = (struct scsi_qla_host *)device->host->hostdata;
1220 nv = &ha->nvram;
1222 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1223 return 1;
1225 if (device->tagged_supported &&
1226 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1227 scsi_change_queue_depth(device, ha->bus_settings[bus].hiwat);
1228 } else {
1229 scsi_change_queue_depth(device, default_depth);
1232 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1233 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1234 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1236 if (driver_setup.no_sync ||
1237 (driver_setup.sync_mask &&
1238 (~driver_setup.sync_mask & (1 << target))))
1239 nv->bus[bus].target[target].parameter.enable_sync = 0;
1240 if (driver_setup.no_wide ||
1241 (driver_setup.wide_mask &&
1242 (~driver_setup.wide_mask & (1 << target))))
1243 nv->bus[bus].target[target].parameter.enable_wide = 0;
1244 if (IS_ISP1x160(ha)) {
1245 if (driver_setup.no_ppr ||
1246 (driver_setup.ppr_mask &&
1247 (~driver_setup.ppr_mask & (1 << target))))
1248 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1251 spin_lock_irqsave(ha->host->host_lock, flags);
1252 if (nv->bus[bus].target[target].parameter.enable_sync)
1253 status = qla1280_set_target_parameters(ha, bus, target);
1254 qla1280_get_target_parameters(ha, device);
1255 spin_unlock_irqrestore(ha->host->host_lock, flags);
1256 return status;
1261 * qla1280_done
1262 * Process completed commands.
1264 * Input:
1265 * ha = adapter block pointer.
1267 static void
1268 qla1280_done(struct scsi_qla_host *ha)
1270 struct srb *sp;
1271 struct list_head *done_q;
1272 int bus, target, lun;
1273 struct scsi_cmnd *cmd;
1275 ENTER("qla1280_done");
1277 done_q = &ha->done_q;
1279 while (!list_empty(done_q)) {
1280 sp = list_entry(done_q->next, struct srb, list);
1282 list_del(&sp->list);
1284 cmd = sp->cmd;
1285 bus = SCSI_BUS_32(cmd);
1286 target = SCSI_TCN_32(cmd);
1287 lun = SCSI_LUN_32(cmd);
1289 switch ((CMD_RESULT(cmd) >> 16)) {
1290 case DID_RESET:
1291 /* Issue marker command. */
1292 if (!ha->flags.abort_isp_active)
1293 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1294 break;
1295 case DID_ABORT:
1296 sp->flags &= ~SRB_ABORT_PENDING;
1297 sp->flags |= SRB_ABORTED;
1298 break;
1299 default:
1300 break;
1303 /* Release memory used for this I/O */
1304 scsi_dma_unmap(cmd);
1306 /* Call the mid-level driver interrupt handler */
1307 ha->actthreads--;
1309 if (sp->wait == NULL)
1310 (*(cmd)->scsi_done)(cmd);
1311 else
1312 complete(sp->wait);
1314 LEAVE("qla1280_done");
1318 * Translates a ISP error to a Linux SCSI error
1320 static int
1321 qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1323 int host_status = DID_ERROR;
1324 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1325 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1326 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1327 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1328 #if DEBUG_QLA1280_INTR
1329 static char *reason[] = {
1330 "DID_OK",
1331 "DID_NO_CONNECT",
1332 "DID_BUS_BUSY",
1333 "DID_TIME_OUT",
1334 "DID_BAD_TARGET",
1335 "DID_ABORT",
1336 "DID_PARITY",
1337 "DID_ERROR",
1338 "DID_RESET",
1339 "DID_BAD_INTR"
1341 #endif /* DEBUG_QLA1280_INTR */
1343 ENTER("qla1280_return_status");
1345 #if DEBUG_QLA1280_INTR
1347 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
1348 comp_status);
1350 #endif
1352 switch (comp_status) {
1353 case CS_COMPLETE:
1354 host_status = DID_OK;
1355 break;
1357 case CS_INCOMPLETE:
1358 if (!(state_flags & SF_GOT_BUS))
1359 host_status = DID_NO_CONNECT;
1360 else if (!(state_flags & SF_GOT_TARGET))
1361 host_status = DID_BAD_TARGET;
1362 else if (!(state_flags & SF_SENT_CDB))
1363 host_status = DID_ERROR;
1364 else if (!(state_flags & SF_TRANSFERRED_DATA))
1365 host_status = DID_ERROR;
1366 else if (!(state_flags & SF_GOT_STATUS))
1367 host_status = DID_ERROR;
1368 else if (!(state_flags & SF_GOT_SENSE))
1369 host_status = DID_ERROR;
1370 break;
1372 case CS_RESET:
1373 host_status = DID_RESET;
1374 break;
1376 case CS_ABORTED:
1377 host_status = DID_ABORT;
1378 break;
1380 case CS_TIMEOUT:
1381 host_status = DID_TIME_OUT;
1382 break;
1384 case CS_DATA_OVERRUN:
1385 dprintk(2, "Data overrun 0x%x\n", residual_length);
1386 dprintk(2, "qla1280_return_status: response packet data\n");
1387 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1388 host_status = DID_ERROR;
1389 break;
1391 case CS_DATA_UNDERRUN:
1392 if ((scsi_bufflen(cp) - residual_length) <
1393 cp->underflow) {
1394 printk(KERN_WARNING
1395 "scsi: Underflow detected - retrying "
1396 "command.\n");
1397 host_status = DID_ERROR;
1398 } else {
1399 scsi_set_resid(cp, residual_length);
1400 host_status = DID_OK;
1402 break;
1404 default:
1405 host_status = DID_ERROR;
1406 break;
1409 #if DEBUG_QLA1280_INTR
1410 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1411 reason[host_status], scsi_status);
1412 #endif
1414 LEAVE("qla1280_return_status");
1416 return (scsi_status & 0xff) | (host_status << 16);
1419 /****************************************************************************/
1420 /* QLogic ISP1280 Hardware Support Functions. */
1421 /****************************************************************************/
1424 * qla1280_initialize_adapter
1425 * Initialize board.
1427 * Input:
1428 * ha = adapter block pointer.
1430 * Returns:
1431 * 0 = success
1433 static int
1434 qla1280_initialize_adapter(struct scsi_qla_host *ha)
1436 struct device_reg __iomem *reg;
1437 int status;
1438 int bus;
1439 unsigned long flags;
1441 ENTER("qla1280_initialize_adapter");
1443 /* Clear adapter flags. */
1444 ha->flags.online = 0;
1445 ha->flags.disable_host_adapter = 0;
1446 ha->flags.reset_active = 0;
1447 ha->flags.abort_isp_active = 0;
1449 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1450 if (ia64_platform_is("sn2")) {
1451 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
1452 "dual channel lockup workaround\n", ha->host_no);
1453 ha->flags.use_pci_vchannel = 1;
1454 driver_setup.no_nvram = 1;
1456 #endif
1458 /* TODO: implement support for the 1040 nvram format */
1459 if (IS_ISP1040(ha))
1460 driver_setup.no_nvram = 1;
1462 dprintk(1, "Configure PCI space for adapter...\n");
1464 reg = ha->iobase;
1466 /* Insure mailbox registers are free. */
1467 WRT_REG_WORD(&reg->semaphore, 0);
1468 WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
1469 WRT_REG_WORD(&reg->host_cmd, HC_CLR_HOST_INT);
1470 RD_REG_WORD(&reg->host_cmd);
1472 if (qla1280_read_nvram(ha)) {
1473 dprintk(2, "qla1280_initialize_adapter: failed to read "
1474 "NVRAM\n");
1478 * It's necessary to grab the spin here as qla1280_mailbox_command
1479 * needs to be able to drop the lock unconditionally to wait
1480 * for completion.
1482 spin_lock_irqsave(ha->host->host_lock, flags);
1484 status = qla1280_load_firmware(ha);
1485 if (status) {
1486 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1487 ha->host_no);
1488 goto out;
1491 /* Setup adapter based on NVRAM parameters. */
1492 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1493 qla1280_nvram_config(ha);
1495 if (ha->flags.disable_host_adapter) {
1496 status = 1;
1497 goto out;
1500 status = qla1280_init_rings(ha);
1501 if (status)
1502 goto out;
1504 /* Issue SCSI reset, if we can't reset twice then bus is dead */
1505 for (bus = 0; bus < ha->ports; bus++) {
1506 if (!ha->bus_settings[bus].disable_scsi_reset &&
1507 qla1280_bus_reset(ha, bus) &&
1508 qla1280_bus_reset(ha, bus))
1509 ha->bus_settings[bus].scsi_bus_dead = 1;
1512 ha->flags.online = 1;
1513 out:
1514 spin_unlock_irqrestore(ha->host->host_lock, flags);
1516 if (status)
1517 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1519 LEAVE("qla1280_initialize_adapter");
1520 return status;
1524 * qla1280_request_firmware
1525 * Acquire firmware for chip. Retain in memory
1526 * for error recovery.
1528 * Input:
1529 * ha = adapter block pointer.
1531 * Returns:
1532 * Pointer to firmware image or an error code
1533 * cast to pointer via ERR_PTR().
1535 static const struct firmware *
1536 qla1280_request_firmware(struct scsi_qla_host *ha)
1538 const struct firmware *fw;
1539 int err;
1540 int index;
1541 char *fwname;
1543 spin_unlock_irq(ha->host->host_lock);
1544 mutex_lock(&qla1280_firmware_mutex);
1546 index = ql1280_board_tbl[ha->devnum].fw_index;
1547 fw = qla1280_fw_tbl[index].fw;
1548 if (fw)
1549 goto out;
1551 fwname = qla1280_fw_tbl[index].fwname;
1552 err = request_firmware(&fw, fwname, &ha->pdev->dev);
1554 if (err) {
1555 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1556 fwname, err);
1557 fw = ERR_PTR(err);
1558 goto unlock;
1560 if ((fw->size % 2) || (fw->size < 6)) {
1561 printk(KERN_ERR "Invalid firmware length %zu in image \"%s\"\n",
1562 fw->size, fwname);
1563 release_firmware(fw);
1564 fw = ERR_PTR(-EINVAL);
1565 goto unlock;
1568 qla1280_fw_tbl[index].fw = fw;
1570 out:
1571 ha->fwver1 = fw->data[0];
1572 ha->fwver2 = fw->data[1];
1573 ha->fwver3 = fw->data[2];
1574 unlock:
1575 mutex_unlock(&qla1280_firmware_mutex);
1576 spin_lock_irq(ha->host->host_lock);
1577 return fw;
1581 * Chip diagnostics
1582 * Test chip for proper operation.
1584 * Input:
1585 * ha = adapter block pointer.
1587 * Returns:
1588 * 0 = success.
1590 static int
1591 qla1280_chip_diag(struct scsi_qla_host *ha)
1593 uint16_t mb[MAILBOX_REGISTER_COUNT];
1594 struct device_reg __iomem *reg = ha->iobase;
1595 int status = 0;
1596 int cnt;
1597 uint16_t data;
1598 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", &reg->id_l);
1600 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1602 /* Soft reset chip and wait for it to finish. */
1603 WRT_REG_WORD(&reg->ictrl, ISP_RESET);
1606 * We can't do a traditional PCI write flush here by reading
1607 * back the register. The card will not respond once the reset
1608 * is in action and we end up with a machine check exception
1609 * instead. Nothing to do but wait and hope for the best.
1610 * A portable pci_write_flush(pdev) call would be very useful here.
1612 udelay(20);
1613 data = qla1280_debounce_register(&reg->ictrl);
1615 * Yet another QLogic gem ;-(
1617 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1618 udelay(5);
1619 data = RD_REG_WORD(&reg->ictrl);
1622 if (!cnt)
1623 goto fail;
1625 /* Reset register cleared by chip reset. */
1626 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1628 WRT_REG_WORD(&reg->cfg_1, 0);
1630 /* Reset RISC and disable BIOS which
1631 allows RISC to execute out of RAM. */
1632 WRT_REG_WORD(&reg->host_cmd, HC_RESET_RISC |
1633 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1635 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
1636 data = qla1280_debounce_register(&reg->mailbox0);
1639 * I *LOVE* this code!
1641 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1642 udelay(5);
1643 data = RD_REG_WORD(&reg->mailbox0);
1646 if (!cnt)
1647 goto fail;
1649 /* Check product ID of chip */
1650 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1652 if (RD_REG_WORD(&reg->mailbox1) != PROD_ID_1 ||
1653 (RD_REG_WORD(&reg->mailbox2) != PROD_ID_2 &&
1654 RD_REG_WORD(&reg->mailbox2) != PROD_ID_2a) ||
1655 RD_REG_WORD(&reg->mailbox3) != PROD_ID_3 ||
1656 RD_REG_WORD(&reg->mailbox4) != PROD_ID_4) {
1657 printk(KERN_INFO "qla1280: Wrong product ID = "
1658 "0x%x,0x%x,0x%x,0x%x\n",
1659 RD_REG_WORD(&reg->mailbox1),
1660 RD_REG_WORD(&reg->mailbox2),
1661 RD_REG_WORD(&reg->mailbox3),
1662 RD_REG_WORD(&reg->mailbox4));
1663 goto fail;
1667 * Enable ints early!!!
1669 qla1280_enable_intrs(ha);
1671 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1672 /* Wrap Incoming Mailboxes Test. */
1673 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1674 mb[1] = 0xAAAA;
1675 mb[2] = 0x5555;
1676 mb[3] = 0xAA55;
1677 mb[4] = 0x55AA;
1678 mb[5] = 0xA5A5;
1679 mb[6] = 0x5A5A;
1680 mb[7] = 0x2525;
1682 status = qla1280_mailbox_command(ha, 0xff, mb);
1683 if (status)
1684 goto fail;
1686 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1687 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1688 mb[7] != 0x2525) {
1689 printk(KERN_INFO "qla1280: Failed mbox check\n");
1690 goto fail;
1693 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1694 return 0;
1695 fail:
1696 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1697 return status;
1700 static int
1701 qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1703 /* enter with host_lock acquired */
1705 const struct firmware *fw;
1706 const __le16 *fw_data;
1707 uint16_t risc_address, risc_code_size;
1708 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1709 int err = 0;
1711 fw = qla1280_request_firmware(ha);
1712 if (IS_ERR(fw))
1713 return PTR_ERR(fw);
1715 fw_data = (const __le16 *)&fw->data[0];
1716 ha->fwstart = __le16_to_cpu(fw_data[2]);
1718 /* Load RISC code. */
1719 risc_address = ha->fwstart;
1720 fw_data = (const __le16 *)&fw->data[6];
1721 risc_code_size = (fw->size - 6) / 2;
1723 for (i = 0; i < risc_code_size; i++) {
1724 mb[0] = MBC_WRITE_RAM_WORD;
1725 mb[1] = risc_address + i;
1726 mb[2] = __le16_to_cpu(fw_data[i]);
1728 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1729 if (err) {
1730 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1731 ha->host_no);
1732 break;
1736 return err;
1739 #define DUMP_IT_BACK 0 /* for debug of RISC loading */
1740 static int
1741 qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1743 /* enter with host_lock acquired */
1744 const struct firmware *fw;
1745 const __le16 *fw_data;
1746 uint16_t risc_address, risc_code_size;
1747 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1748 int err = 0, num, i;
1749 #if DUMP_IT_BACK
1750 uint8_t *sp, *tbuf;
1751 dma_addr_t p_tbuf;
1753 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
1754 if (!tbuf)
1755 return -ENOMEM;
1756 #endif
1758 fw = qla1280_request_firmware(ha);
1759 if (IS_ERR(fw))
1760 return PTR_ERR(fw);
1762 fw_data = (const __le16 *)&fw->data[0];
1763 ha->fwstart = __le16_to_cpu(fw_data[2]);
1765 /* Load RISC code. */
1766 risc_address = ha->fwstart;
1767 fw_data = (const __le16 *)&fw->data[6];
1768 risc_code_size = (fw->size - 6) / 2;
1770 dprintk(1, "%s: DMA RISC code (%i) words\n",
1771 __func__, risc_code_size);
1773 num = 0;
1774 while (risc_code_size > 0) {
1775 int warn __attribute__((unused)) = 0;
1777 cnt = 2000 >> 1;
1779 if (cnt > risc_code_size)
1780 cnt = risc_code_size;
1782 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1783 "%d,%d(0x%x)\n",
1784 fw_data, cnt, num, risc_address);
1785 for(i = 0; i < cnt; i++)
1786 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1788 mb[0] = MBC_LOAD_RAM;
1789 mb[1] = risc_address;
1790 mb[4] = cnt;
1791 mb[3] = ha->request_dma & 0xffff;
1792 mb[2] = (ha->request_dma >> 16) & 0xffff;
1793 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1794 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1795 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1796 __func__, mb[0],
1797 (void *)(long)ha->request_dma,
1798 mb[6], mb[7], mb[2], mb[3]);
1799 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1800 BIT_1 | BIT_0, mb);
1801 if (err) {
1802 printk(KERN_ERR "scsi(%li): Failed to load partial "
1803 "segment of f\n", ha->host_no);
1804 goto out;
1807 #if DUMP_IT_BACK
1808 mb[0] = MBC_DUMP_RAM;
1809 mb[1] = risc_address;
1810 mb[4] = cnt;
1811 mb[3] = p_tbuf & 0xffff;
1812 mb[2] = (p_tbuf >> 16) & 0xffff;
1813 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
1814 mb[6] = pci_dma_hi32(p_tbuf) >> 16;
1816 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1817 BIT_1 | BIT_0, mb);
1818 if (err) {
1819 printk(KERN_ERR
1820 "Failed to dump partial segment of f/w\n");
1821 goto out;
1823 sp = (uint8_t *)ha->request_ring;
1824 for (i = 0; i < (cnt << 1); i++) {
1825 if (tbuf[i] != sp[i] && warn++ < 10) {
1826 printk(KERN_ERR "%s: FW compare error @ "
1827 "byte(0x%x) loop#=%x\n",
1828 __func__, i, num);
1829 printk(KERN_ERR "%s: FWbyte=%x "
1830 "FWfromChip=%x\n",
1831 __func__, sp[i], tbuf[i]);
1832 /*break; */
1835 #endif
1836 risc_address += cnt;
1837 risc_code_size = risc_code_size - cnt;
1838 fw_data = fw_data + cnt;
1839 num++;
1842 out:
1843 #if DUMP_IT_BACK
1844 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1845 #endif
1846 return err;
1849 static int
1850 qla1280_start_firmware(struct scsi_qla_host *ha)
1852 uint16_t mb[MAILBOX_REGISTER_COUNT];
1853 int err;
1855 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1856 __func__);
1858 /* Verify checksum of loaded RISC code. */
1859 mb[0] = MBC_VERIFY_CHECKSUM;
1860 /* mb[1] = ql12_risc_code_addr01; */
1861 mb[1] = ha->fwstart;
1862 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1863 if (err) {
1864 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1865 return err;
1868 /* Start firmware execution. */
1869 dprintk(1, "%s: start firmware running.\n", __func__);
1870 mb[0] = MBC_EXECUTE_FIRMWARE;
1871 mb[1] = ha->fwstart;
1872 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1873 if (err) {
1874 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1875 ha->host_no);
1878 return err;
1881 static int
1882 qla1280_load_firmware(struct scsi_qla_host *ha)
1884 /* enter with host_lock taken */
1885 int err;
1887 err = qla1280_chip_diag(ha);
1888 if (err)
1889 goto out;
1890 if (IS_ISP1040(ha))
1891 err = qla1280_load_firmware_pio(ha);
1892 else
1893 err = qla1280_load_firmware_dma(ha);
1894 if (err)
1895 goto out;
1896 err = qla1280_start_firmware(ha);
1897 out:
1898 return err;
1902 * Initialize rings
1904 * Input:
1905 * ha = adapter block pointer.
1906 * ha->request_ring = request ring virtual address
1907 * ha->response_ring = response ring virtual address
1908 * ha->request_dma = request ring physical address
1909 * ha->response_dma = response ring physical address
1911 * Returns:
1912 * 0 = success.
1914 static int
1915 qla1280_init_rings(struct scsi_qla_host *ha)
1917 uint16_t mb[MAILBOX_REGISTER_COUNT];
1918 int status = 0;
1920 ENTER("qla1280_init_rings");
1922 /* Clear outstanding commands array. */
1923 memset(ha->outstanding_cmds, 0,
1924 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1926 /* Initialize request queue. */
1927 ha->request_ring_ptr = ha->request_ring;
1928 ha->req_ring_index = 0;
1929 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1930 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
1931 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1932 mb[1] = REQUEST_ENTRY_CNT;
1933 mb[3] = ha->request_dma & 0xffff;
1934 mb[2] = (ha->request_dma >> 16) & 0xffff;
1935 mb[4] = 0;
1936 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1937 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1938 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1939 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1940 &mb[0]))) {
1941 /* Initialize response queue. */
1942 ha->response_ring_ptr = ha->response_ring;
1943 ha->rsp_ring_index = 0;
1944 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
1945 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1946 mb[1] = RESPONSE_ENTRY_CNT;
1947 mb[3] = ha->response_dma & 0xffff;
1948 mb[2] = (ha->response_dma >> 16) & 0xffff;
1949 mb[5] = 0;
1950 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
1951 mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
1952 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1953 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1954 &mb[0]);
1957 if (status)
1958 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1960 LEAVE("qla1280_init_rings");
1961 return status;
1964 static void
1965 qla1280_print_settings(struct nvram *nv)
1967 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1968 nv->bus[0].config_1.initiator_id);
1969 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1970 nv->bus[1].config_1.initiator_id);
1972 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1973 nv->bus[0].bus_reset_delay);
1974 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1975 nv->bus[1].bus_reset_delay);
1977 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1978 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1979 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1980 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1982 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1983 nv->bus[0].config_2.async_data_setup_time);
1984 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1985 nv->bus[1].config_2.async_data_setup_time);
1987 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1988 nv->bus[0].config_2.req_ack_active_negation);
1989 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1990 nv->bus[1].config_2.req_ack_active_negation);
1992 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
1993 nv->bus[0].config_2.data_line_active_negation);
1994 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
1995 nv->bus[1].config_2.data_line_active_negation);
1997 dprintk(1, "qla1280 : disable loading risc code=%d\n",
1998 nv->cntr_flags_1.disable_loading_risc_code);
2000 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
2001 nv->cntr_flags_1.enable_64bit_addressing);
2003 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
2004 nv->bus[0].selection_timeout);
2005 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
2006 nv->bus[1].selection_timeout);
2008 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
2009 nv->bus[0].max_queue_depth);
2010 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
2011 nv->bus[1].max_queue_depth);
2014 static void
2015 qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
2017 struct nvram *nv = &ha->nvram;
2019 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
2020 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
2021 nv->bus[bus].target[target].parameter.tag_queuing = 1;
2022 nv->bus[bus].target[target].parameter.enable_sync = 1;
2023 #if 1 /* Some SCSI Processors do not seem to like this */
2024 nv->bus[bus].target[target].parameter.enable_wide = 1;
2025 #endif
2026 nv->bus[bus].target[target].execution_throttle =
2027 nv->bus[bus].max_queue_depth - 1;
2028 nv->bus[bus].target[target].parameter.parity_checking = 1;
2029 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
2031 if (IS_ISP1x160(ha)) {
2032 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
2033 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
2034 nv->bus[bus].target[target].sync_period = 9;
2035 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
2036 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
2037 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
2038 } else {
2039 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
2040 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
2041 nv->bus[bus].target[target].sync_period = 10;
2045 static void
2046 qla1280_set_defaults(struct scsi_qla_host *ha)
2048 struct nvram *nv = &ha->nvram;
2049 int bus, target;
2051 dprintk(1, "Using defaults for NVRAM: \n");
2052 memset(nv, 0, sizeof(struct nvram));
2054 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
2055 nv->firmware_feature.f.enable_fast_posting = 1;
2056 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2057 nv->termination.scsi_bus_0_control = 3;
2058 nv->termination.scsi_bus_1_control = 3;
2059 nv->termination.auto_term_support = 1;
2062 * Set default FIFO magic - What appropriate values would be here
2063 * is unknown. This is what I have found testing with 12160s.
2065 * Now, I would love the magic decoder ring for this one, the
2066 * header file provided by QLogic seems to be bogus or incomplete
2067 * at best.
2069 nv->isp_config.burst_enable = 1;
2070 if (IS_ISP1040(ha))
2071 nv->isp_config.fifo_threshold |= 3;
2072 else
2073 nv->isp_config.fifo_threshold |= 4;
2075 if (IS_ISP1x160(ha))
2076 nv->isp_parameter = 0x01; /* fast memory enable */
2078 for (bus = 0; bus < MAX_BUSES; bus++) {
2079 nv->bus[bus].config_1.initiator_id = 7;
2080 nv->bus[bus].config_2.req_ack_active_negation = 1;
2081 nv->bus[bus].config_2.data_line_active_negation = 1;
2082 nv->bus[bus].selection_timeout = 250;
2083 nv->bus[bus].max_queue_depth = 32;
2085 if (IS_ISP1040(ha)) {
2086 nv->bus[bus].bus_reset_delay = 3;
2087 nv->bus[bus].config_2.async_data_setup_time = 6;
2088 nv->bus[bus].retry_delay = 1;
2089 } else {
2090 nv->bus[bus].bus_reset_delay = 5;
2091 nv->bus[bus].config_2.async_data_setup_time = 8;
2094 for (target = 0; target < MAX_TARGETS; target++)
2095 qla1280_set_target_defaults(ha, bus, target);
2099 static int
2100 qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2102 struct nvram *nv = &ha->nvram;
2103 uint16_t mb[MAILBOX_REGISTER_COUNT];
2104 int status, lun;
2105 uint16_t flag;
2107 /* Set Target Parameters. */
2108 mb[0] = MBC_SET_TARGET_PARAMETERS;
2109 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2112 * Do not enable sync and ppr for the initial INQUIRY run. We
2113 * enable this later if we determine the target actually
2114 * supports it.
2116 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2117 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2119 if (IS_ISP1x160(ha))
2120 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2121 else
2122 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2123 mb[3] |= nv->bus[bus].target[target].sync_period;
2124 status = qla1280_mailbox_command(ha, 0x0f, mb);
2126 /* Save Tag queuing enable flag. */
2127 flag = (BIT_0 << target);
2128 if (nv->bus[bus].target[target].parameter.tag_queuing)
2129 ha->bus_settings[bus].qtag_enables |= flag;
2131 /* Save Device enable flag. */
2132 if (IS_ISP1x160(ha)) {
2133 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2134 ha->bus_settings[bus].device_enables |= flag;
2135 ha->bus_settings[bus].lun_disables |= 0;
2136 } else {
2137 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2138 ha->bus_settings[bus].device_enables |= flag;
2139 /* Save LUN disable flag. */
2140 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2141 ha->bus_settings[bus].lun_disables |= flag;
2144 /* Set Device Queue Parameters. */
2145 for (lun = 0; lun < MAX_LUNS; lun++) {
2146 mb[0] = MBC_SET_DEVICE_QUEUE;
2147 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2148 mb[1] |= lun;
2149 mb[2] = nv->bus[bus].max_queue_depth;
2150 mb[3] = nv->bus[bus].target[target].execution_throttle;
2151 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2154 return status;
2157 static int
2158 qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2160 struct nvram *nv = &ha->nvram;
2161 uint16_t mb[MAILBOX_REGISTER_COUNT];
2162 int target, status;
2164 /* SCSI Reset Disable. */
2165 ha->bus_settings[bus].disable_scsi_reset =
2166 nv->bus[bus].config_1.scsi_reset_disable;
2168 /* Initiator ID. */
2169 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2170 mb[0] = MBC_SET_INITIATOR_ID;
2171 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2172 ha->bus_settings[bus].id;
2173 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2175 /* Reset Delay. */
2176 ha->bus_settings[bus].bus_reset_delay =
2177 nv->bus[bus].bus_reset_delay;
2179 /* Command queue depth per device. */
2180 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2182 /* Set target parameters. */
2183 for (target = 0; target < MAX_TARGETS; target++)
2184 status |= qla1280_config_target(ha, bus, target);
2186 return status;
2189 static int
2190 qla1280_nvram_config(struct scsi_qla_host *ha)
2192 struct device_reg __iomem *reg = ha->iobase;
2193 struct nvram *nv = &ha->nvram;
2194 int bus, target, status = 0;
2195 uint16_t mb[MAILBOX_REGISTER_COUNT];
2197 ENTER("qla1280_nvram_config");
2199 if (ha->nvram_valid) {
2200 /* Always force AUTO sense for LINUX SCSI */
2201 for (bus = 0; bus < MAX_BUSES; bus++)
2202 for (target = 0; target < MAX_TARGETS; target++) {
2203 nv->bus[bus].target[target].parameter.
2204 auto_request_sense = 1;
2206 } else {
2207 qla1280_set_defaults(ha);
2210 qla1280_print_settings(nv);
2212 /* Disable RISC load of firmware. */
2213 ha->flags.disable_risc_code_load =
2214 nv->cntr_flags_1.disable_loading_risc_code;
2216 if (IS_ISP1040(ha)) {
2217 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2219 hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
2221 cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2222 cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
2223 ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
2225 /* Busted fifo, says mjacob. */
2226 if (hwrev != ISP_CFG0_1040A)
2227 cfg1 |= nv->isp_config.fifo_threshold << 4;
2229 cfg1 |= nv->isp_config.burst_enable << 2;
2230 WRT_REG_WORD(&reg->cfg_1, cfg1);
2232 WRT_REG_WORD(&reg->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2233 WRT_REG_WORD(&reg->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2234 } else {
2235 uint16_t cfg1, term;
2237 /* Set ISP hardware DMA burst */
2238 cfg1 = nv->isp_config.fifo_threshold << 4;
2239 cfg1 |= nv->isp_config.burst_enable << 2;
2240 /* Enable DMA arbitration on dual channel controllers */
2241 if (ha->ports > 1)
2242 cfg1 |= BIT_13;
2243 WRT_REG_WORD(&reg->cfg_1, cfg1);
2245 /* Set SCSI termination. */
2246 WRT_REG_WORD(&reg->gpio_enable,
2247 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2248 term = nv->termination.scsi_bus_1_control;
2249 term |= nv->termination.scsi_bus_0_control << 2;
2250 term |= nv->termination.auto_term_support << 7;
2251 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2252 WRT_REG_WORD(&reg->gpio_data, term);
2254 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2256 /* ISP parameter word. */
2257 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2258 mb[1] = nv->isp_parameter;
2259 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2261 if (IS_ISP1x40(ha)) {
2262 /* clock rate - for qla1240 and older, only */
2263 mb[0] = MBC_SET_CLOCK_RATE;
2264 mb[1] = 40;
2265 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2268 /* Firmware feature word. */
2269 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2270 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2271 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2272 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2273 #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2274 if (ia64_platform_is("sn2")) {
2275 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2276 "workaround\n", ha->host_no);
2277 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
2279 #endif
2280 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2282 /* Retry count and delay. */
2283 mb[0] = MBC_SET_RETRY_COUNT;
2284 mb[1] = nv->bus[0].retry_count;
2285 mb[2] = nv->bus[0].retry_delay;
2286 mb[6] = nv->bus[1].retry_count;
2287 mb[7] = nv->bus[1].retry_delay;
2288 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2289 BIT_1 | BIT_0, &mb[0]);
2291 /* ASYNC data setup time. */
2292 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2293 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2294 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2295 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2297 /* Active negation states. */
2298 mb[0] = MBC_SET_ACTIVE_NEGATION;
2299 mb[1] = 0;
2300 if (nv->bus[0].config_2.req_ack_active_negation)
2301 mb[1] |= BIT_5;
2302 if (nv->bus[0].config_2.data_line_active_negation)
2303 mb[1] |= BIT_4;
2304 mb[2] = 0;
2305 if (nv->bus[1].config_2.req_ack_active_negation)
2306 mb[2] |= BIT_5;
2307 if (nv->bus[1].config_2.data_line_active_negation)
2308 mb[2] |= BIT_4;
2309 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2311 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2312 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
2313 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2315 /* thingy */
2316 mb[0] = MBC_SET_PCI_CONTROL;
2317 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
2318 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
2319 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2321 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2322 mb[1] = 8;
2323 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2325 /* Selection timeout. */
2326 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2327 mb[1] = nv->bus[0].selection_timeout;
2328 mb[2] = nv->bus[1].selection_timeout;
2329 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2331 for (bus = 0; bus < ha->ports; bus++)
2332 status |= qla1280_config_bus(ha, bus);
2334 if (status)
2335 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2337 LEAVE("qla1280_nvram_config");
2338 return status;
2342 * Get NVRAM data word
2343 * Calculates word position in NVRAM and calls request routine to
2344 * get the word from NVRAM.
2346 * Input:
2347 * ha = adapter block pointer.
2348 * address = NVRAM word address.
2350 * Returns:
2351 * data word.
2353 static uint16_t
2354 qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2356 uint32_t nv_cmd;
2357 uint16_t data;
2359 nv_cmd = address << 16;
2360 nv_cmd |= NV_READ_OP;
2362 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2364 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2365 "0x%x", data);
2367 return data;
2371 * NVRAM request
2372 * Sends read command to NVRAM and gets data from NVRAM.
2374 * Input:
2375 * ha = adapter block pointer.
2376 * nv_cmd = Bit 26 = start bit
2377 * Bit 25, 24 = opcode
2378 * Bit 23-16 = address
2379 * Bit 15-0 = write data
2381 * Returns:
2382 * data word.
2384 static uint16_t
2385 qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2387 struct device_reg __iomem *reg = ha->iobase;
2388 int cnt;
2389 uint16_t data = 0;
2390 uint16_t reg_data;
2392 /* Send command to NVRAM. */
2394 nv_cmd <<= 5;
2395 for (cnt = 0; cnt < 11; cnt++) {
2396 if (nv_cmd & BIT_31)
2397 qla1280_nv_write(ha, NV_DATA_OUT);
2398 else
2399 qla1280_nv_write(ha, 0);
2400 nv_cmd <<= 1;
2403 /* Read data from NVRAM. */
2405 for (cnt = 0; cnt < 16; cnt++) {
2406 WRT_REG_WORD(&reg->nvram, (NV_SELECT | NV_CLOCK));
2407 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2408 NVRAM_DELAY();
2409 data <<= 1;
2410 reg_data = RD_REG_WORD(&reg->nvram);
2411 if (reg_data & NV_DATA_IN)
2412 data |= BIT_0;
2413 WRT_REG_WORD(&reg->nvram, NV_SELECT);
2414 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2415 NVRAM_DELAY();
2418 /* Deselect chip. */
2420 WRT_REG_WORD(&reg->nvram, NV_DESELECT);
2421 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2422 NVRAM_DELAY();
2424 return data;
2427 static void
2428 qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2430 struct device_reg __iomem *reg = ha->iobase;
2432 WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
2433 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2434 NVRAM_DELAY();
2435 WRT_REG_WORD(&reg->nvram, data | NV_SELECT | NV_CLOCK);
2436 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2437 NVRAM_DELAY();
2438 WRT_REG_WORD(&reg->nvram, data | NV_SELECT);
2439 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2440 NVRAM_DELAY();
2444 * Mailbox Command
2445 * Issue mailbox command and waits for completion.
2447 * Input:
2448 * ha = adapter block pointer.
2449 * mr = mailbox registers to load.
2450 * mb = data pointer for mailbox registers.
2452 * Output:
2453 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
2455 * Returns:
2456 * 0 = success
2458 static int
2459 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2461 struct device_reg __iomem *reg = ha->iobase;
2462 int status = 0;
2463 int cnt;
2464 uint16_t *optr, *iptr;
2465 uint16_t __iomem *mptr;
2466 uint16_t data;
2467 DECLARE_COMPLETION_ONSTACK(wait);
2468 struct timer_list timer;
2470 ENTER("qla1280_mailbox_command");
2472 if (ha->mailbox_wait) {
2473 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2475 ha->mailbox_wait = &wait;
2478 * We really should start out by verifying that the mailbox is
2479 * available before starting sending the command data
2481 /* Load mailbox registers. */
2482 mptr = (uint16_t __iomem *) &reg->mailbox0;
2483 iptr = mb;
2484 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2485 if (mr & BIT_0) {
2486 WRT_REG_WORD(mptr, (*iptr));
2489 mr >>= 1;
2490 mptr++;
2491 iptr++;
2494 /* Issue set host interrupt command. */
2496 /* set up a timer just in case we're really jammed */
2497 init_timer_on_stack(&timer);
2498 timer.expires = jiffies + 20*HZ;
2499 timer.data = (unsigned long)ha;
2500 timer.function = qla1280_mailbox_timeout;
2501 add_timer(&timer);
2503 spin_unlock_irq(ha->host->host_lock);
2504 WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
2505 data = qla1280_debounce_register(&reg->istatus);
2507 wait_for_completion(&wait);
2508 del_timer_sync(&timer);
2510 spin_lock_irq(ha->host->host_lock);
2512 ha->mailbox_wait = NULL;
2514 /* Check for mailbox command timeout. */
2515 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2516 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2517 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2518 "0x%04x\n",
2519 mb[0], ha->mailbox_out[0], RD_REG_WORD(&reg->istatus));
2520 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2521 RD_REG_WORD(&reg->mailbox0), RD_REG_WORD(&reg->mailbox1),
2522 RD_REG_WORD(&reg->mailbox2), RD_REG_WORD(&reg->mailbox3));
2523 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2524 RD_REG_WORD(&reg->mailbox4), RD_REG_WORD(&reg->mailbox5),
2525 RD_REG_WORD(&reg->mailbox6), RD_REG_WORD(&reg->mailbox7));
2526 status = 1;
2529 /* Load return mailbox registers. */
2530 optr = mb;
2531 iptr = (uint16_t *) &ha->mailbox_out[0];
2532 mr = MAILBOX_REGISTER_COUNT;
2533 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2535 if (ha->flags.reset_marker)
2536 qla1280_rst_aen(ha);
2538 if (status)
2539 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2540 "0x%x ****\n", mb[0]);
2542 LEAVE("qla1280_mailbox_command");
2543 return status;
2547 * qla1280_poll
2548 * Polls ISP for interrupts.
2550 * Input:
2551 * ha = adapter block pointer.
2553 static void
2554 qla1280_poll(struct scsi_qla_host *ha)
2556 struct device_reg __iomem *reg = ha->iobase;
2557 uint16_t data;
2558 LIST_HEAD(done_q);
2560 /* ENTER("qla1280_poll"); */
2562 /* Check for pending interrupts. */
2563 data = RD_REG_WORD(&reg->istatus);
2564 if (data & RISC_INT)
2565 qla1280_isr(ha, &done_q);
2567 if (!ha->mailbox_wait) {
2568 if (ha->flags.reset_marker)
2569 qla1280_rst_aen(ha);
2572 if (!list_empty(&done_q))
2573 qla1280_done(ha);
2575 /* LEAVE("qla1280_poll"); */
2579 * qla1280_bus_reset
2580 * Issue SCSI bus reset.
2582 * Input:
2583 * ha = adapter block pointer.
2584 * bus = SCSI bus number.
2586 * Returns:
2587 * 0 = success
2589 static int
2590 qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2592 uint16_t mb[MAILBOX_REGISTER_COUNT];
2593 uint16_t reset_delay;
2594 int status;
2596 dprintk(3, "qla1280_bus_reset: entered\n");
2598 if (qla1280_verbose)
2599 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2600 ha->host_no, bus);
2602 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2603 mb[0] = MBC_BUS_RESET;
2604 mb[1] = reset_delay;
2605 mb[2] = (uint16_t) bus;
2606 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2608 if (status) {
2609 if (ha->bus_settings[bus].failed_reset_count > 2)
2610 ha->bus_settings[bus].scsi_bus_dead = 1;
2611 ha->bus_settings[bus].failed_reset_count++;
2612 } else {
2613 spin_unlock_irq(ha->host->host_lock);
2614 ssleep(reset_delay);
2615 spin_lock_irq(ha->host->host_lock);
2617 ha->bus_settings[bus].scsi_bus_dead = 0;
2618 ha->bus_settings[bus].failed_reset_count = 0;
2619 ha->bus_settings[bus].reset_marker = 0;
2620 /* Issue marker command. */
2621 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2625 * We should probably call qla1280_set_target_parameters()
2626 * here as well for all devices on the bus.
2629 if (status)
2630 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2631 else
2632 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2634 return status;
2638 * qla1280_device_reset
2639 * Issue bus device reset message to the target.
2641 * Input:
2642 * ha = adapter block pointer.
2643 * bus = SCSI BUS number.
2644 * target = SCSI ID.
2646 * Returns:
2647 * 0 = success
2649 static int
2650 qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2652 uint16_t mb[MAILBOX_REGISTER_COUNT];
2653 int status;
2655 ENTER("qla1280_device_reset");
2657 mb[0] = MBC_ABORT_TARGET;
2658 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2659 mb[2] = 1;
2660 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2662 /* Issue marker command. */
2663 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2665 if (status)
2666 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2668 LEAVE("qla1280_device_reset");
2669 return status;
2673 * qla1280_abort_command
2674 * Abort command aborts a specified IOCB.
2676 * Input:
2677 * ha = adapter block pointer.
2678 * sp = SB structure pointer.
2680 * Returns:
2681 * 0 = success
2683 static int
2684 qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2686 uint16_t mb[MAILBOX_REGISTER_COUNT];
2687 unsigned int bus, target, lun;
2688 int status;
2690 ENTER("qla1280_abort_command");
2692 bus = SCSI_BUS_32(sp->cmd);
2693 target = SCSI_TCN_32(sp->cmd);
2694 lun = SCSI_LUN_32(sp->cmd);
2696 sp->flags |= SRB_ABORT_PENDING;
2698 mb[0] = MBC_ABORT_COMMAND;
2699 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2700 mb[2] = handle >> 16;
2701 mb[3] = handle & 0xffff;
2702 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2704 if (status) {
2705 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2706 sp->flags &= ~SRB_ABORT_PENDING;
2710 LEAVE("qla1280_abort_command");
2711 return status;
2715 * qla1280_reset_adapter
2716 * Reset adapter.
2718 * Input:
2719 * ha = adapter block pointer.
2721 static void
2722 qla1280_reset_adapter(struct scsi_qla_host *ha)
2724 struct device_reg __iomem *reg = ha->iobase;
2726 ENTER("qla1280_reset_adapter");
2728 /* Disable ISP chip */
2729 ha->flags.online = 0;
2730 WRT_REG_WORD(&reg->ictrl, ISP_RESET);
2731 WRT_REG_WORD(&reg->host_cmd,
2732 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2733 RD_REG_WORD(&reg->id_l); /* Flush PCI write */
2735 LEAVE("qla1280_reset_adapter");
2739 * Issue marker command.
2740 * Function issues marker IOCB.
2742 * Input:
2743 * ha = adapter block pointer.
2744 * bus = SCSI BUS number
2745 * id = SCSI ID
2746 * lun = SCSI LUN
2747 * type = marker modifier
2749 static void
2750 qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2752 struct mrk_entry *pkt;
2754 ENTER("qla1280_marker");
2756 /* Get request packet. */
2757 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2758 pkt->entry_type = MARKER_TYPE;
2759 pkt->lun = (uint8_t) lun;
2760 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2761 pkt->modifier = type;
2762 pkt->entry_status = 0;
2764 /* Issue command to ISP */
2765 qla1280_isp_cmd(ha);
2768 LEAVE("qla1280_marker");
2773 * qla1280_64bit_start_scsi
2774 * The start SCSI is responsible for building request packets on
2775 * request ring and modifying ISP input pointer.
2777 * Input:
2778 * ha = adapter block pointer.
2779 * sp = SB structure pointer.
2781 * Returns:
2782 * 0 = success, was able to issue command.
2784 #ifdef QLA_64BIT_PTR
2785 static int
2786 qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2788 struct device_reg __iomem *reg = ha->iobase;
2789 struct scsi_cmnd *cmd = sp->cmd;
2790 cmd_a64_entry_t *pkt;
2791 __le32 *dword_ptr;
2792 dma_addr_t dma_handle;
2793 int status = 0;
2794 int cnt;
2795 int req_cnt;
2796 int seg_cnt;
2797 u8 dir;
2799 ENTER("qla1280_64bit_start_scsi:");
2801 /* Calculate number of entries and segments required. */
2802 req_cnt = 1;
2803 seg_cnt = scsi_dma_map(cmd);
2804 if (seg_cnt > 0) {
2805 if (seg_cnt > 2) {
2806 req_cnt += (seg_cnt - 2) / 5;
2807 if ((seg_cnt - 2) % 5)
2808 req_cnt++;
2810 } else if (seg_cnt < 0) {
2811 status = 1;
2812 goto out;
2815 if ((req_cnt + 2) >= ha->req_q_cnt) {
2816 /* Calculate number of free request entries. */
2817 cnt = RD_REG_WORD(&reg->mailbox4);
2818 if (ha->req_ring_index < cnt)
2819 ha->req_q_cnt = cnt - ha->req_ring_index;
2820 else
2821 ha->req_q_cnt =
2822 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2825 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2826 ha->req_q_cnt, seg_cnt);
2828 /* If room for request in request ring. */
2829 if ((req_cnt + 2) >= ha->req_q_cnt) {
2830 status = SCSI_MLQUEUE_HOST_BUSY;
2831 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2832 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2833 req_cnt);
2834 goto out;
2837 /* Check for room in outstanding command list. */
2838 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2839 ha->outstanding_cmds[cnt] != NULL; cnt++);
2841 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2842 status = SCSI_MLQUEUE_HOST_BUSY;
2843 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2844 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2845 goto out;
2848 ha->outstanding_cmds[cnt] = sp;
2849 ha->req_q_cnt -= req_cnt;
2850 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2852 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2853 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2854 dprintk(2, " bus %i, target %i, lun %i\n",
2855 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2856 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2859 * Build command packet.
2861 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2863 pkt->entry_type = COMMAND_A64_TYPE;
2864 pkt->entry_count = (uint8_t) req_cnt;
2865 pkt->sys_define = (uint8_t) ha->req_ring_index;
2866 pkt->entry_status = 0;
2867 pkt->handle = cpu_to_le32(cnt);
2869 /* Zero out remaining portion of packet. */
2870 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2872 /* Set ISP command timeout. */
2873 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2875 /* Set device target ID and LUN */
2876 pkt->lun = SCSI_LUN_32(cmd);
2877 pkt->target = SCSI_BUS_32(cmd) ?
2878 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2880 /* Enable simple tag queuing if device supports it. */
2881 if (cmd->device->simple_tags)
2882 pkt->control_flags |= cpu_to_le16(BIT_3);
2884 /* Load SCSI command packet. */
2885 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2886 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2887 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
2889 /* Set transfer direction. */
2890 dir = qla1280_data_direction(cmd);
2891 pkt->control_flags |= cpu_to_le16(dir);
2893 /* Set total data segment count. */
2894 pkt->dseg_count = cpu_to_le16(seg_cnt);
2897 * Load data segments.
2899 if (seg_cnt) { /* If data transfer. */
2900 struct scatterlist *sg, *s;
2901 int remseg = seg_cnt;
2903 sg = scsi_sglist(cmd);
2905 /* Setup packet address segment pointer. */
2906 dword_ptr = (u32 *)&pkt->dseg_0_address;
2908 /* Load command entry data segments. */
2909 for_each_sg(sg, s, seg_cnt, cnt) {
2910 if (cnt == 2)
2911 break;
2913 dma_handle = sg_dma_address(s);
2914 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2915 if (ha->flags.use_pci_vchannel)
2916 sn_pci_set_vchan(ha->pdev,
2917 (unsigned long *)&dma_handle,
2918 SCSI_BUS_32(cmd));
2919 #endif
2920 *dword_ptr++ =
2921 cpu_to_le32(pci_dma_lo32(dma_handle));
2922 *dword_ptr++ =
2923 cpu_to_le32(pci_dma_hi32(dma_handle));
2924 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2925 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2926 cpu_to_le32(pci_dma_hi32(dma_handle)),
2927 cpu_to_le32(pci_dma_lo32(dma_handle)),
2928 cpu_to_le32(sg_dma_len(sg_next(s))));
2929 remseg--;
2931 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2932 "command packet data - b %i, t %i, l %i \n",
2933 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2934 SCSI_LUN_32(cmd));
2935 qla1280_dump_buffer(5, (char *)pkt,
2936 REQUEST_ENTRY_SIZE);
2939 * Build continuation packets.
2941 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2942 "remains\n", seg_cnt);
2944 while (remseg > 0) {
2945 /* Update sg start */
2946 sg = s;
2947 /* Adjust ring index. */
2948 ha->req_ring_index++;
2949 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2950 ha->req_ring_index = 0;
2951 ha->request_ring_ptr =
2952 ha->request_ring;
2953 } else
2954 ha->request_ring_ptr++;
2956 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2958 /* Zero out packet. */
2959 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2961 /* Load packet defaults. */
2962 ((struct cont_a64_entry *) pkt)->entry_type =
2963 CONTINUE_A64_TYPE;
2964 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2965 ((struct cont_a64_entry *) pkt)->sys_define =
2966 (uint8_t)ha->req_ring_index;
2967 /* Setup packet address segment pointer. */
2968 dword_ptr =
2969 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2971 /* Load continuation entry data segments. */
2972 for_each_sg(sg, s, remseg, cnt) {
2973 if (cnt == 5)
2974 break;
2975 dma_handle = sg_dma_address(s);
2976 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2977 if (ha->flags.use_pci_vchannel)
2978 sn_pci_set_vchan(ha->pdev,
2979 (unsigned long *)&dma_handle,
2980 SCSI_BUS_32(cmd));
2981 #endif
2982 *dword_ptr++ =
2983 cpu_to_le32(pci_dma_lo32(dma_handle));
2984 *dword_ptr++ =
2985 cpu_to_le32(pci_dma_hi32(dma_handle));
2986 *dword_ptr++ =
2987 cpu_to_le32(sg_dma_len(s));
2988 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2989 cpu_to_le32(pci_dma_hi32(dma_handle)),
2990 cpu_to_le32(pci_dma_lo32(dma_handle)),
2991 cpu_to_le32(sg_dma_len(s)));
2993 remseg -= cnt;
2994 dprintk(5, "qla1280_64bit_start_scsi: "
2995 "continuation packet data - b %i, t "
2996 "%i, l %i \n", SCSI_BUS_32(cmd),
2997 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2998 qla1280_dump_buffer(5, (char *)pkt,
2999 REQUEST_ENTRY_SIZE);
3001 } else { /* No data transfer */
3002 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
3003 "packet data - b %i, t %i, l %i \n",
3004 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3005 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3007 /* Adjust ring index. */
3008 ha->req_ring_index++;
3009 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3010 ha->req_ring_index = 0;
3011 ha->request_ring_ptr = ha->request_ring;
3012 } else
3013 ha->request_ring_ptr++;
3015 /* Set chip new ring index. */
3016 dprintk(2,
3017 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
3018 sp->flags |= SRB_SENT;
3019 ha->actthreads++;
3020 WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
3021 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3022 mmiowb();
3024 out:
3025 if (status)
3026 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
3027 else
3028 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
3030 return status;
3032 #else /* !QLA_64BIT_PTR */
3035 * qla1280_32bit_start_scsi
3036 * The start SCSI is responsible for building request packets on
3037 * request ring and modifying ISP input pointer.
3039 * The Qlogic firmware interface allows every queue slot to have a SCSI
3040 * command and up to 4 scatter/gather (SG) entries. If we need more
3041 * than 4 SG entries, then continuation entries are used that can
3042 * hold another 7 entries each. The start routine determines if there
3043 * is eought empty slots then build the combination of requests to
3044 * fulfill the OS request.
3046 * Input:
3047 * ha = adapter block pointer.
3048 * sp = SCSI Request Block structure pointer.
3050 * Returns:
3051 * 0 = success, was able to issue command.
3053 static int
3054 qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3056 struct device_reg __iomem *reg = ha->iobase;
3057 struct scsi_cmnd *cmd = sp->cmd;
3058 struct cmd_entry *pkt;
3059 __le32 *dword_ptr;
3060 int status = 0;
3061 int cnt;
3062 int req_cnt;
3063 int seg_cnt;
3064 u8 dir;
3066 ENTER("qla1280_32bit_start_scsi");
3068 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3069 cmd->cmnd[0]);
3071 /* Calculate number of entries and segments required. */
3072 req_cnt = 1;
3073 seg_cnt = scsi_dma_map(cmd);
3074 if (seg_cnt) {
3076 * if greater than four sg entries then we need to allocate
3077 * continuation entries
3079 if (seg_cnt > 4) {
3080 req_cnt += (seg_cnt - 4) / 7;
3081 if ((seg_cnt - 4) % 7)
3082 req_cnt++;
3084 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3085 cmd, seg_cnt, req_cnt);
3086 } else if (seg_cnt < 0) {
3087 status = 1;
3088 goto out;
3091 if ((req_cnt + 2) >= ha->req_q_cnt) {
3092 /* Calculate number of free request entries. */
3093 cnt = RD_REG_WORD(&reg->mailbox4);
3094 if (ha->req_ring_index < cnt)
3095 ha->req_q_cnt = cnt - ha->req_ring_index;
3096 else
3097 ha->req_q_cnt =
3098 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3101 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3102 ha->req_q_cnt, seg_cnt);
3103 /* If room for request in request ring. */
3104 if ((req_cnt + 2) >= ha->req_q_cnt) {
3105 status = SCSI_MLQUEUE_HOST_BUSY;
3106 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3107 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3108 ha->req_q_cnt, req_cnt);
3109 goto out;
3112 /* Check for empty slot in outstanding command list. */
3113 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3114 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3116 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3117 status = SCSI_MLQUEUE_HOST_BUSY;
3118 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3119 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3120 goto out;
3123 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3124 ha->outstanding_cmds[cnt] = sp;
3125 ha->req_q_cnt -= req_cnt;
3128 * Build command packet.
3130 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3132 pkt->entry_type = COMMAND_TYPE;
3133 pkt->entry_count = (uint8_t) req_cnt;
3134 pkt->sys_define = (uint8_t) ha->req_ring_index;
3135 pkt->entry_status = 0;
3136 pkt->handle = cpu_to_le32(cnt);
3138 /* Zero out remaining portion of packet. */
3139 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3141 /* Set ISP command timeout. */
3142 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3144 /* Set device target ID and LUN */
3145 pkt->lun = SCSI_LUN_32(cmd);
3146 pkt->target = SCSI_BUS_32(cmd) ?
3147 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3149 /* Enable simple tag queuing if device supports it. */
3150 if (cmd->device->simple_tags)
3151 pkt->control_flags |= cpu_to_le16(BIT_3);
3153 /* Load SCSI command packet. */
3154 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3155 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3157 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
3158 /* Set transfer direction. */
3159 dir = qla1280_data_direction(cmd);
3160 pkt->control_flags |= cpu_to_le16(dir);
3162 /* Set total data segment count. */
3163 pkt->dseg_count = cpu_to_le16(seg_cnt);
3166 * Load data segments.
3168 if (seg_cnt) {
3169 struct scatterlist *sg, *s;
3170 int remseg = seg_cnt;
3172 sg = scsi_sglist(cmd);
3174 /* Setup packet address segment pointer. */
3175 dword_ptr = &pkt->dseg_0_address;
3177 dprintk(3, "Building S/G data segments..\n");
3178 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3180 /* Load command entry data segments. */
3181 for_each_sg(sg, s, seg_cnt, cnt) {
3182 if (cnt == 4)
3183 break;
3184 *dword_ptr++ =
3185 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3186 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3187 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3188 (pci_dma_lo32(sg_dma_address(s))),
3189 (sg_dma_len(s)));
3190 remseg--;
3193 * Build continuation packets.
3195 dprintk(3, "S/G Building Continuation"
3196 "...seg_cnt=0x%x remains\n", seg_cnt);
3197 while (remseg > 0) {
3198 /* Continue from end point */
3199 sg = s;
3200 /* Adjust ring index. */
3201 ha->req_ring_index++;
3202 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3203 ha->req_ring_index = 0;
3204 ha->request_ring_ptr =
3205 ha->request_ring;
3206 } else
3207 ha->request_ring_ptr++;
3209 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3211 /* Zero out packet. */
3212 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3214 /* Load packet defaults. */
3215 ((struct cont_entry *) pkt)->
3216 entry_type = CONTINUE_TYPE;
3217 ((struct cont_entry *) pkt)->entry_count = 1;
3219 ((struct cont_entry *) pkt)->sys_define =
3220 (uint8_t) ha->req_ring_index;
3222 /* Setup packet address segment pointer. */
3223 dword_ptr =
3224 &((struct cont_entry *) pkt)->dseg_0_address;
3226 /* Load continuation entry data segments. */
3227 for_each_sg(sg, s, remseg, cnt) {
3228 if (cnt == 7)
3229 break;
3230 *dword_ptr++ =
3231 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3232 *dword_ptr++ =
3233 cpu_to_le32(sg_dma_len(s));
3234 dprintk(1,
3235 "S/G Segment Cont. phys_addr=0x%x, "
3236 "len=0x%x\n",
3237 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3238 cpu_to_le32(sg_dma_len(s)));
3240 remseg -= cnt;
3241 dprintk(5, "qla1280_32bit_start_scsi: "
3242 "continuation packet data - "
3243 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3244 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3245 qla1280_dump_buffer(5, (char *)pkt,
3246 REQUEST_ENTRY_SIZE);
3248 } else { /* No data transfer at all */
3249 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3250 "packet data - \n");
3251 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3253 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3254 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3255 REQUEST_ENTRY_SIZE);
3257 /* Adjust ring index. */
3258 ha->req_ring_index++;
3259 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3260 ha->req_ring_index = 0;
3261 ha->request_ring_ptr = ha->request_ring;
3262 } else
3263 ha->request_ring_ptr++;
3265 /* Set chip new ring index. */
3266 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3267 "for pending command\n");
3268 sp->flags |= SRB_SENT;
3269 ha->actthreads++;
3270 WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
3271 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3272 mmiowb();
3274 out:
3275 if (status)
3276 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3278 LEAVE("qla1280_32bit_start_scsi");
3280 return status;
3282 #endif
3285 * qla1280_req_pkt
3286 * Function is responsible for locking ring and
3287 * getting a zeroed out request packet.
3289 * Input:
3290 * ha = adapter block pointer.
3292 * Returns:
3293 * 0 = failed to get slot.
3295 static request_t *
3296 qla1280_req_pkt(struct scsi_qla_host *ha)
3298 struct device_reg __iomem *reg = ha->iobase;
3299 request_t *pkt = NULL;
3300 int cnt;
3301 uint32_t timer;
3303 ENTER("qla1280_req_pkt");
3306 * This can be called from interrupt context, damn it!!!
3308 /* Wait for 30 seconds for slot. */
3309 for (timer = 15000000; timer; timer--) {
3310 if (ha->req_q_cnt > 0) {
3311 /* Calculate number of free request entries. */
3312 cnt = RD_REG_WORD(&reg->mailbox4);
3313 if (ha->req_ring_index < cnt)
3314 ha->req_q_cnt = cnt - ha->req_ring_index;
3315 else
3316 ha->req_q_cnt =
3317 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3320 /* Found empty request ring slot? */
3321 if (ha->req_q_cnt > 0) {
3322 ha->req_q_cnt--;
3323 pkt = ha->request_ring_ptr;
3325 /* Zero out packet. */
3326 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3329 * How can this be right when we have a ring
3330 * size of 512???
3332 /* Set system defined field. */
3333 pkt->sys_define = (uint8_t) ha->req_ring_index;
3335 /* Set entry count. */
3336 pkt->entry_count = 1;
3338 break;
3341 udelay(2); /* 10 */
3343 /* Check for pending interrupts. */
3344 qla1280_poll(ha);
3347 if (!pkt)
3348 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3349 else
3350 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3352 return pkt;
3356 * qla1280_isp_cmd
3357 * Function is responsible for modifying ISP input pointer.
3358 * Releases ring lock.
3360 * Input:
3361 * ha = adapter block pointer.
3363 static void
3364 qla1280_isp_cmd(struct scsi_qla_host *ha)
3366 struct device_reg __iomem *reg = ha->iobase;
3368 ENTER("qla1280_isp_cmd");
3370 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3371 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3372 REQUEST_ENTRY_SIZE);
3374 /* Adjust ring index. */
3375 ha->req_ring_index++;
3376 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3377 ha->req_ring_index = 0;
3378 ha->request_ring_ptr = ha->request_ring;
3379 } else
3380 ha->request_ring_ptr++;
3383 * Update request index to mailbox4 (Request Queue In).
3384 * The mmiowb() ensures that this write is ordered with writes by other
3385 * CPUs. Without the mmiowb(), it is possible for the following:
3386 * CPUA posts write of index 5 to mailbox4
3387 * CPUA releases host lock
3388 * CPUB acquires host lock
3389 * CPUB posts write of index 6 to mailbox4
3390 * On PCI bus, order reverses and write of 6 posts, then index 5,
3391 * causing chip to issue full queue of stale commands
3392 * The mmiowb() prevents future writes from crossing the barrier.
3393 * See Documentation/DocBook/deviceiobook.tmpl for more information.
3395 WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
3396 mmiowb();
3398 LEAVE("qla1280_isp_cmd");
3401 /****************************************************************************/
3402 /* Interrupt Service Routine. */
3403 /****************************************************************************/
3405 /****************************************************************************
3406 * qla1280_isr
3407 * Calls I/O done on command completion.
3409 * Input:
3410 * ha = adapter block pointer.
3411 * done_q = done queue.
3412 ****************************************************************************/
3413 static void
3414 qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3416 struct device_reg __iomem *reg = ha->iobase;
3417 struct response *pkt;
3418 struct srb *sp = NULL;
3419 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3420 uint16_t *wptr;
3421 uint32_t index;
3422 u16 istatus;
3424 ENTER("qla1280_isr");
3426 istatus = RD_REG_WORD(&reg->istatus);
3427 if (!(istatus & (RISC_INT | PCI_INT)))
3428 return;
3430 /* Save mailbox register 5 */
3431 mailbox[5] = RD_REG_WORD(&reg->mailbox5);
3433 /* Check for mailbox interrupt. */
3435 mailbox[0] = RD_REG_WORD_dmasync(&reg->semaphore);
3437 if (mailbox[0] & BIT_0) {
3438 /* Get mailbox data. */
3439 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
3441 wptr = &mailbox[0];
3442 *wptr++ = RD_REG_WORD(&reg->mailbox0);
3443 *wptr++ = RD_REG_WORD(&reg->mailbox1);
3444 *wptr = RD_REG_WORD(&reg->mailbox2);
3445 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3446 wptr++;
3447 *wptr++ = RD_REG_WORD(&reg->mailbox3);
3448 *wptr++ = RD_REG_WORD(&reg->mailbox4);
3449 wptr++;
3450 *wptr++ = RD_REG_WORD(&reg->mailbox6);
3451 *wptr = RD_REG_WORD(&reg->mailbox7);
3454 /* Release mailbox registers. */
3456 WRT_REG_WORD(&reg->semaphore, 0);
3457 WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
3459 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3460 mailbox[0]);
3462 /* Handle asynchronous event */
3463 switch (mailbox[0]) {
3464 case MBA_SCSI_COMPLETION: /* Response completion */
3465 dprintk(5, "qla1280_isr: mailbox SCSI response "
3466 "completion\n");
3468 if (ha->flags.online) {
3469 /* Get outstanding command index. */
3470 index = mailbox[2] << 16 | mailbox[1];
3472 /* Validate handle. */
3473 if (index < MAX_OUTSTANDING_COMMANDS)
3474 sp = ha->outstanding_cmds[index];
3475 else
3476 sp = NULL;
3478 if (sp) {
3479 /* Free outstanding command slot. */
3480 ha->outstanding_cmds[index] = NULL;
3482 /* Save ISP completion status */
3483 CMD_RESULT(sp->cmd) = 0;
3484 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3486 /* Place block on done queue */
3487 list_add_tail(&sp->list, done_q);
3488 } else {
3490 * If we get here we have a real problem!
3492 printk(KERN_WARNING
3493 "qla1280: ISP invalid handle\n");
3496 break;
3498 case MBA_BUS_RESET: /* SCSI Bus Reset */
3499 ha->flags.reset_marker = 1;
3500 index = mailbox[6] & BIT_0;
3501 ha->bus_settings[index].reset_marker = 1;
3503 printk(KERN_DEBUG "qla1280_isr(): index %i "
3504 "asynchronous BUS_RESET\n", index);
3505 break;
3507 case MBA_SYSTEM_ERR: /* System Error */
3508 printk(KERN_WARNING
3509 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3510 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3511 mailbox[3]);
3512 break;
3514 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3515 printk(KERN_WARNING
3516 "qla1280: ISP Request Transfer Error\n");
3517 break;
3519 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3520 printk(KERN_WARNING
3521 "qla1280: ISP Response Transfer Error\n");
3522 break;
3524 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
3525 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3526 break;
3528 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
3529 dprintk(2,
3530 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3531 break;
3533 case MBA_DEVICE_RESET: /* Bus Device Reset */
3534 printk(KERN_INFO "qla1280_isr(): asynchronous "
3535 "BUS_DEVICE_RESET\n");
3537 ha->flags.reset_marker = 1;
3538 index = mailbox[6] & BIT_0;
3539 ha->bus_settings[index].reset_marker = 1;
3540 break;
3542 case MBA_BUS_MODE_CHANGE:
3543 dprintk(2,
3544 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3545 break;
3547 default:
3548 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
3549 if (mailbox[0] < MBA_ASYNC_EVENT) {
3550 wptr = &mailbox[0];
3551 memcpy((uint16_t *) ha->mailbox_out, wptr,
3552 MAILBOX_REGISTER_COUNT *
3553 sizeof(uint16_t));
3555 if(ha->mailbox_wait != NULL)
3556 complete(ha->mailbox_wait);
3558 break;
3560 } else {
3561 WRT_REG_WORD(&reg->host_cmd, HC_CLR_RISC_INT);
3565 * We will receive interrupts during mailbox testing prior to
3566 * the card being marked online, hence the double check.
3568 if (!(ha->flags.online && !ha->mailbox_wait)) {
3569 dprintk(2, "qla1280_isr: Response pointer Error\n");
3570 goto out;
3573 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3574 goto out;
3576 while (ha->rsp_ring_index != mailbox[5]) {
3577 pkt = ha->response_ring_ptr;
3579 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3580 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3581 dprintk(5,"qla1280_isr: response packet data\n");
3582 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3584 if (pkt->entry_type == STATUS_TYPE) {
3585 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3586 || pkt->comp_status || pkt->entry_status) {
3587 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3588 "0x%x mailbox[5] = 0x%x, comp_status "
3589 "= 0x%x, scsi_status = 0x%x\n",
3590 ha->rsp_ring_index, mailbox[5],
3591 le16_to_cpu(pkt->comp_status),
3592 le16_to_cpu(pkt->scsi_status));
3594 } else {
3595 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3596 "0x%x, mailbox[5] = 0x%x\n",
3597 ha->rsp_ring_index, mailbox[5]);
3598 dprintk(2, "qla1280_isr: response packet data\n");
3599 qla1280_dump_buffer(2, (char *)pkt,
3600 RESPONSE_ENTRY_SIZE);
3603 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3604 dprintk(2, "status: Cmd %p, handle %i\n",
3605 ha->outstanding_cmds[pkt->handle]->cmd,
3606 pkt->handle);
3607 if (pkt->entry_type == STATUS_TYPE)
3608 qla1280_status_entry(ha, pkt, done_q);
3609 else
3610 qla1280_error_entry(ha, pkt, done_q);
3611 /* Adjust ring index. */
3612 ha->rsp_ring_index++;
3613 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3614 ha->rsp_ring_index = 0;
3615 ha->response_ring_ptr = ha->response_ring;
3616 } else
3617 ha->response_ring_ptr++;
3618 WRT_REG_WORD(&reg->mailbox5, ha->rsp_ring_index);
3622 out:
3623 LEAVE("qla1280_isr");
3627 * qla1280_rst_aen
3628 * Processes asynchronous reset.
3630 * Input:
3631 * ha = adapter block pointer.
3633 static void
3634 qla1280_rst_aen(struct scsi_qla_host *ha)
3636 uint8_t bus;
3638 ENTER("qla1280_rst_aen");
3640 if (ha->flags.online && !ha->flags.reset_active &&
3641 !ha->flags.abort_isp_active) {
3642 ha->flags.reset_active = 1;
3643 while (ha->flags.reset_marker) {
3644 /* Issue marker command. */
3645 ha->flags.reset_marker = 0;
3646 for (bus = 0; bus < ha->ports &&
3647 !ha->flags.reset_marker; bus++) {
3648 if (ha->bus_settings[bus].reset_marker) {
3649 ha->bus_settings[bus].reset_marker = 0;
3650 qla1280_marker(ha, bus, 0, 0,
3651 MK_SYNC_ALL);
3657 LEAVE("qla1280_rst_aen");
3662 * qla1280_status_entry
3663 * Processes received ISP status entry.
3665 * Input:
3666 * ha = adapter block pointer.
3667 * pkt = entry pointer.
3668 * done_q = done queue.
3670 static void
3671 qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3672 struct list_head *done_q)
3674 unsigned int bus, target, lun;
3675 int sense_sz;
3676 struct srb *sp;
3677 struct scsi_cmnd *cmd;
3678 uint32_t handle = le32_to_cpu(pkt->handle);
3679 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3680 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3682 ENTER("qla1280_status_entry");
3684 /* Validate handle. */
3685 if (handle < MAX_OUTSTANDING_COMMANDS)
3686 sp = ha->outstanding_cmds[handle];
3687 else
3688 sp = NULL;
3690 if (!sp) {
3691 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3692 goto out;
3695 /* Free outstanding command slot. */
3696 ha->outstanding_cmds[handle] = NULL;
3698 cmd = sp->cmd;
3700 /* Generate LU queue on cntrl, target, LUN */
3701 bus = SCSI_BUS_32(cmd);
3702 target = SCSI_TCN_32(cmd);
3703 lun = SCSI_LUN_32(cmd);
3705 if (comp_status || scsi_status) {
3706 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3707 "0x%x, handle = 0x%x\n", comp_status,
3708 scsi_status, handle);
3711 /* Target busy or queue full */
3712 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3713 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3714 CMD_RESULT(cmd) = scsi_status & 0xff;
3715 } else {
3717 /* Save ISP completion status */
3718 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3720 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3721 if (comp_status != CS_ARS_FAILED) {
3722 uint16_t req_sense_length =
3723 le16_to_cpu(pkt->req_sense_length);
3724 if (req_sense_length < CMD_SNSLEN(cmd))
3725 sense_sz = req_sense_length;
3726 else
3728 * scsi_cmnd->sense_buffer is
3729 * 64 bytes, why only copy 63?
3730 * This looks wrong! /Jes
3732 sense_sz = CMD_SNSLEN(cmd) - 1;
3734 memcpy(cmd->sense_buffer,
3735 &pkt->req_sense_data, sense_sz);
3736 } else
3737 sense_sz = 0;
3738 memset(cmd->sense_buffer + sense_sz, 0,
3739 SCSI_SENSE_BUFFERSIZE - sense_sz);
3741 dprintk(2, "qla1280_status_entry: Check "
3742 "condition Sense data, b %i, t %i, "
3743 "l %i\n", bus, target, lun);
3744 if (sense_sz)
3745 qla1280_dump_buffer(2,
3746 (char *)cmd->sense_buffer,
3747 sense_sz);
3751 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3753 /* Place command on done queue. */
3754 list_add_tail(&sp->list, done_q);
3755 out:
3756 LEAVE("qla1280_status_entry");
3760 * qla1280_error_entry
3761 * Processes error entry.
3763 * Input:
3764 * ha = adapter block pointer.
3765 * pkt = entry pointer.
3766 * done_q = done queue.
3768 static void
3769 qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3770 struct list_head *done_q)
3772 struct srb *sp;
3773 uint32_t handle = le32_to_cpu(pkt->handle);
3775 ENTER("qla1280_error_entry");
3777 if (pkt->entry_status & BIT_3)
3778 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3779 else if (pkt->entry_status & BIT_2)
3780 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3781 else if (pkt->entry_status & BIT_1)
3782 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3783 else
3784 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3786 /* Validate handle. */
3787 if (handle < MAX_OUTSTANDING_COMMANDS)
3788 sp = ha->outstanding_cmds[handle];
3789 else
3790 sp = NULL;
3792 if (sp) {
3793 /* Free outstanding command slot. */
3794 ha->outstanding_cmds[handle] = NULL;
3796 /* Bad payload or header */
3797 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3798 /* Bad payload or header, set error status. */
3799 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
3800 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3801 } else if (pkt->entry_status & BIT_1) { /* FULL flag */
3802 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3803 } else {
3804 /* Set error status. */
3805 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3808 CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
3810 /* Place command on done queue. */
3811 list_add_tail(&sp->list, done_q);
3813 #ifdef QLA_64BIT_PTR
3814 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3815 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3817 #endif
3819 LEAVE("qla1280_error_entry");
3823 * qla1280_abort_isp
3824 * Resets ISP and aborts all outstanding commands.
3826 * Input:
3827 * ha = adapter block pointer.
3829 * Returns:
3830 * 0 = success
3832 static int
3833 qla1280_abort_isp(struct scsi_qla_host *ha)
3835 struct device_reg __iomem *reg = ha->iobase;
3836 struct srb *sp;
3837 int status = 0;
3838 int cnt;
3839 int bus;
3841 ENTER("qla1280_abort_isp");
3843 if (ha->flags.abort_isp_active || !ha->flags.online)
3844 goto out;
3846 ha->flags.abort_isp_active = 1;
3848 /* Disable ISP interrupts. */
3849 qla1280_disable_intrs(ha);
3850 WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
3851 RD_REG_WORD(&reg->id_l);
3853 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3854 ha->host_no);
3855 /* Dequeue all commands in outstanding command list. */
3856 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3857 struct scsi_cmnd *cmd;
3858 sp = ha->outstanding_cmds[cnt];
3859 if (sp) {
3860 cmd = sp->cmd;
3861 CMD_RESULT(cmd) = DID_RESET << 16;
3862 CMD_HANDLE(cmd) = COMPLETED_HANDLE;
3863 ha->outstanding_cmds[cnt] = NULL;
3864 list_add_tail(&sp->list, &ha->done_q);
3868 qla1280_done(ha);
3870 status = qla1280_load_firmware(ha);
3871 if (status)
3872 goto out;
3874 /* Setup adapter based on NVRAM parameters. */
3875 qla1280_nvram_config (ha);
3877 status = qla1280_init_rings(ha);
3878 if (status)
3879 goto out;
3881 /* Issue SCSI reset. */
3882 for (bus = 0; bus < ha->ports; bus++)
3883 qla1280_bus_reset(ha, bus);
3885 ha->flags.abort_isp_active = 0;
3886 out:
3887 if (status) {
3888 printk(KERN_WARNING
3889 "qla1280: ISP error recovery failed, board disabled");
3890 qla1280_reset_adapter(ha);
3891 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3894 LEAVE("qla1280_abort_isp");
3895 return status;
3900 * qla1280_debounce_register
3901 * Debounce register.
3903 * Input:
3904 * port = register address.
3906 * Returns:
3907 * register value.
3909 static u16
3910 qla1280_debounce_register(volatile u16 __iomem * addr)
3912 volatile u16 ret;
3913 volatile u16 ret2;
3915 ret = RD_REG_WORD(addr);
3916 ret2 = RD_REG_WORD(addr);
3918 if (ret == ret2)
3919 return ret;
3921 do {
3922 cpu_relax();
3923 ret = RD_REG_WORD(addr);
3924 ret2 = RD_REG_WORD(addr);
3925 } while (ret != ret2);
3927 return ret;
3931 /************************************************************************
3932 * qla1280_check_for_dead_scsi_bus *
3934 * This routine checks for a dead SCSI bus *
3935 ************************************************************************/
3936 #define SET_SXP_BANK 0x0100
3937 #define SCSI_PHASE_INVALID 0x87FF
3938 static int
3939 qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3941 uint16_t config_reg, scsi_control;
3942 struct device_reg __iomem *reg = ha->iobase;
3944 if (ha->bus_settings[bus].scsi_bus_dead) {
3945 WRT_REG_WORD(&reg->host_cmd, HC_PAUSE_RISC);
3946 config_reg = RD_REG_WORD(&reg->cfg_1);
3947 WRT_REG_WORD(&reg->cfg_1, SET_SXP_BANK);
3948 scsi_control = RD_REG_WORD(&reg->scsiControlPins);
3949 WRT_REG_WORD(&reg->cfg_1, config_reg);
3950 WRT_REG_WORD(&reg->host_cmd, HC_RELEASE_RISC);
3952 if (scsi_control == SCSI_PHASE_INVALID) {
3953 ha->bus_settings[bus].scsi_bus_dead = 1;
3954 return 1; /* bus is dead */
3955 } else {
3956 ha->bus_settings[bus].scsi_bus_dead = 0;
3957 ha->bus_settings[bus].failed_reset_count = 0;
3960 return 0; /* bus is not dead */
3963 static void
3964 qla1280_get_target_parameters(struct scsi_qla_host *ha,
3965 struct scsi_device *device)
3967 uint16_t mb[MAILBOX_REGISTER_COUNT];
3968 int bus, target, lun;
3970 bus = device->channel;
3971 target = device->id;
3972 lun = device->lun;
3975 mb[0] = MBC_GET_TARGET_PARAMETERS;
3976 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3977 mb[1] <<= 8;
3978 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3979 &mb[0]);
3981 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3983 if (mb[3] != 0) {
3984 printk(" Sync: period %d, offset %d",
3985 (mb[3] & 0xff), (mb[3] >> 8));
3986 if (mb[2] & BIT_13)
3987 printk(", Wide");
3988 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
3989 printk(", DT");
3990 } else
3991 printk(" Async");
3993 if (device->simple_tags)
3994 printk(", Tagged queuing: depth %d", device->queue_depth);
3995 printk("\n");
3999 #if DEBUG_QLA1280
4000 static void
4001 __qla1280_dump_buffer(char *b, int size)
4003 int cnt;
4004 u8 c;
4006 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
4007 "Bh Ch Dh Eh Fh\n");
4008 printk(KERN_DEBUG "---------------------------------------------"
4009 "------------------\n");
4011 for (cnt = 0; cnt < size;) {
4012 c = *b++;
4014 printk("0x%02x", c);
4015 cnt++;
4016 if (!(cnt % 16))
4017 printk("\n");
4018 else
4019 printk(" ");
4021 if (cnt % 16)
4022 printk("\n");
4025 /**************************************************************************
4026 * ql1280_print_scsi_cmd
4028 **************************************************************************/
4029 static void
4030 __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4032 struct scsi_qla_host *ha;
4033 struct Scsi_Host *host = CMD_HOST(cmd);
4034 struct srb *sp;
4035 /* struct scatterlist *sg; */
4037 int i;
4038 ha = (struct scsi_qla_host *)host->hostdata;
4040 sp = (struct srb *)CMD_SP(cmd);
4041 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
4042 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
4043 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
4044 CMD_CDBLEN(cmd));
4045 printk(" CDB = ");
4046 for (i = 0; i < cmd->cmd_len; i++) {
4047 printk("0x%02x ", cmd->cmnd[i]);
4049 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4050 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4051 scsi_sglist(cmd), scsi_bufflen(cmd));
4052 /* if (cmd->use_sg)
4054 sg = (struct scatterlist *) cmd->request_buffer;
4055 printk(" SG buffer: \n");
4056 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
4057 } */
4058 printk(" tag=%d, transfersize=0x%x \n",
4059 cmd->tag, cmd->transfersize);
4060 printk(" SP=0x%p\n", CMD_SP(cmd));
4061 printk(" underflow size = 0x%x, direction=0x%x\n",
4062 cmd->underflow, cmd->sc_data_direction);
4065 /**************************************************************************
4066 * ql1280_dump_device
4068 **************************************************************************/
4069 static void
4070 ql1280_dump_device(struct scsi_qla_host *ha)
4073 struct scsi_cmnd *cp;
4074 struct srb *sp;
4075 int i;
4077 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4079 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4080 if ((sp = ha->outstanding_cmds[i]) == NULL)
4081 continue;
4082 if ((cp = sp->cmd) == NULL)
4083 continue;
4084 qla1280_print_scsi_cmd(1, cp);
4087 #endif
4090 enum tokens {
4091 TOKEN_NVRAM,
4092 TOKEN_SYNC,
4093 TOKEN_WIDE,
4094 TOKEN_PPR,
4095 TOKEN_VERBOSE,
4096 TOKEN_DEBUG,
4099 struct setup_tokens {
4100 char *token;
4101 int val;
4104 static struct setup_tokens setup_token[] __initdata =
4106 { "nvram", TOKEN_NVRAM },
4107 { "sync", TOKEN_SYNC },
4108 { "wide", TOKEN_WIDE },
4109 { "ppr", TOKEN_PPR },
4110 { "verbose", TOKEN_VERBOSE },
4111 { "debug", TOKEN_DEBUG },
4115 /**************************************************************************
4116 * qla1280_setup
4118 * Handle boot parameters. This really needs to be changed so one
4119 * can specify per adapter parameters.
4120 **************************************************************************/
4121 static int __init
4122 qla1280_setup(char *s)
4124 char *cp, *ptr;
4125 unsigned long val;
4126 int toke;
4128 cp = s;
4130 while (cp && (ptr = strchr(cp, ':'))) {
4131 ptr++;
4132 if (!strcmp(ptr, "yes")) {
4133 val = 0x10000;
4134 ptr += 3;
4135 } else if (!strcmp(ptr, "no")) {
4136 val = 0;
4137 ptr += 2;
4138 } else
4139 val = simple_strtoul(ptr, &ptr, 0);
4141 switch ((toke = qla1280_get_token(cp))) {
4142 case TOKEN_NVRAM:
4143 if (!val)
4144 driver_setup.no_nvram = 1;
4145 break;
4146 case TOKEN_SYNC:
4147 if (!val)
4148 driver_setup.no_sync = 1;
4149 else if (val != 0x10000)
4150 driver_setup.sync_mask = val;
4151 break;
4152 case TOKEN_WIDE:
4153 if (!val)
4154 driver_setup.no_wide = 1;
4155 else if (val != 0x10000)
4156 driver_setup.wide_mask = val;
4157 break;
4158 case TOKEN_PPR:
4159 if (!val)
4160 driver_setup.no_ppr = 1;
4161 else if (val != 0x10000)
4162 driver_setup.ppr_mask = val;
4163 break;
4164 case TOKEN_VERBOSE:
4165 qla1280_verbose = val;
4166 break;
4167 default:
4168 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4169 cp);
4172 cp = strchr(ptr, ';');
4173 if (cp)
4174 cp++;
4175 else {
4176 break;
4179 return 1;
4183 static int __init
4184 qla1280_get_token(char *str)
4186 char *sep;
4187 long ret = -1;
4188 int i;
4190 sep = strchr(str, ':');
4192 if (sep) {
4193 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4194 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4195 ret = setup_token[i].val;
4196 break;
4201 return ret;
4205 static struct scsi_host_template qla1280_driver_template = {
4206 .module = THIS_MODULE,
4207 .proc_name = "qla1280",
4208 .name = "Qlogic ISP 1280/12160",
4209 .info = qla1280_info,
4210 .slave_configure = qla1280_slave_configure,
4211 .queuecommand = qla1280_queuecommand,
4212 .eh_abort_handler = qla1280_eh_abort,
4213 .eh_device_reset_handler= qla1280_eh_device_reset,
4214 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4215 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4216 .bios_param = qla1280_biosparam,
4217 .can_queue = MAX_OUTSTANDING_COMMANDS,
4218 .this_id = -1,
4219 .sg_tablesize = SG_ALL,
4220 .use_clustering = ENABLE_CLUSTERING,
4224 static int
4225 qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4227 int devnum = id->driver_data;
4228 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4229 struct Scsi_Host *host;
4230 struct scsi_qla_host *ha;
4231 int error = -ENODEV;
4233 /* Bypass all AMI SUBSYS VENDOR IDs */
4234 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4235 printk(KERN_INFO
4236 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4237 goto error;
4240 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4241 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4243 if (pci_enable_device(pdev)) {
4244 printk(KERN_WARNING
4245 "qla1280: Failed to enabled pci device, aborting.\n");
4246 goto error;
4249 pci_set_master(pdev);
4251 error = -ENOMEM;
4252 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4253 if (!host) {
4254 printk(KERN_WARNING
4255 "qla1280: Failed to register host, aborting.\n");
4256 goto error_disable_device;
4259 ha = (struct scsi_qla_host *)host->hostdata;
4260 memset(ha, 0, sizeof(struct scsi_qla_host));
4262 ha->pdev = pdev;
4263 ha->devnum = devnum; /* specifies microcode load address */
4265 #ifdef QLA_64BIT_PTR
4266 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
4267 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4268 printk(KERN_WARNING "scsi(%li): Unable to set a "
4269 "suitable DMA mask - aborting\n", ha->host_no);
4270 error = -ENODEV;
4271 goto error_put_host;
4273 } else
4274 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4275 ha->host_no);
4276 #else
4277 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4278 printk(KERN_WARNING "scsi(%li): Unable to set a "
4279 "suitable DMA mask - aborting\n", ha->host_no);
4280 error = -ENODEV;
4281 goto error_put_host;
4283 #endif
4285 ha->request_ring = pci_alloc_consistent(ha->pdev,
4286 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4287 &ha->request_dma);
4288 if (!ha->request_ring) {
4289 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4290 goto error_put_host;
4293 ha->response_ring = pci_alloc_consistent(ha->pdev,
4294 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4295 &ha->response_dma);
4296 if (!ha->response_ring) {
4297 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4298 goto error_free_request_ring;
4301 ha->ports = bdp->numPorts;
4303 ha->host = host;
4304 ha->host_no = host->host_no;
4306 host->irq = pdev->irq;
4307 host->max_channel = bdp->numPorts - 1;
4308 host->max_lun = MAX_LUNS - 1;
4309 host->max_id = MAX_TARGETS;
4310 host->max_sectors = 1024;
4311 host->unique_id = host->host_no;
4313 error = -ENODEV;
4315 #if MEMORY_MAPPED_IO
4316 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4317 if (!ha->mmpbase) {
4318 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4319 goto error_free_response_ring;
4322 host->base = (unsigned long)ha->mmpbase;
4323 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4324 #else
4325 host->io_port = pci_resource_start(ha->pdev, 0);
4326 if (!request_region(host->io_port, 0xff, "qla1280")) {
4327 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4328 "0x%04lx-0x%04lx - already in use\n",
4329 host->io_port, host->io_port + 0xff);
4330 goto error_free_response_ring;
4333 ha->iobase = (struct device_reg *)host->io_port;
4334 #endif
4336 INIT_LIST_HEAD(&ha->done_q);
4338 /* Disable ISP interrupts. */
4339 qla1280_disable_intrs(ha);
4341 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4342 "qla1280", ha)) {
4343 printk("qla1280 : Failed to reserve interrupt %d already "
4344 "in use\n", pdev->irq);
4345 goto error_release_region;
4348 /* load the F/W, read paramaters, and init the H/W */
4349 if (qla1280_initialize_adapter(ha)) {
4350 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4351 goto error_free_irq;
4354 /* set our host ID (need to do something about our two IDs) */
4355 host->this_id = ha->bus_settings[0].id;
4357 pci_set_drvdata(pdev, host);
4359 error = scsi_add_host(host, &pdev->dev);
4360 if (error)
4361 goto error_disable_adapter;
4362 scsi_scan_host(host);
4364 return 0;
4366 error_disable_adapter:
4367 qla1280_disable_intrs(ha);
4368 error_free_irq:
4369 free_irq(pdev->irq, ha);
4370 error_release_region:
4371 #if MEMORY_MAPPED_IO
4372 iounmap(ha->mmpbase);
4373 #else
4374 release_region(host->io_port, 0xff);
4375 #endif
4376 error_free_response_ring:
4377 pci_free_consistent(ha->pdev,
4378 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4379 ha->response_ring, ha->response_dma);
4380 error_free_request_ring:
4381 pci_free_consistent(ha->pdev,
4382 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4383 ha->request_ring, ha->request_dma);
4384 error_put_host:
4385 scsi_host_put(host);
4386 error_disable_device:
4387 pci_disable_device(pdev);
4388 error:
4389 return error;
4393 static void
4394 qla1280_remove_one(struct pci_dev *pdev)
4396 struct Scsi_Host *host = pci_get_drvdata(pdev);
4397 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4399 scsi_remove_host(host);
4401 qla1280_disable_intrs(ha);
4403 free_irq(pdev->irq, ha);
4405 #if MEMORY_MAPPED_IO
4406 iounmap(ha->mmpbase);
4407 #else
4408 release_region(host->io_port, 0xff);
4409 #endif
4411 pci_free_consistent(ha->pdev,
4412 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4413 ha->request_ring, ha->request_dma);
4414 pci_free_consistent(ha->pdev,
4415 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4416 ha->response_ring, ha->response_dma);
4418 pci_disable_device(pdev);
4420 scsi_host_put(host);
4423 static struct pci_driver qla1280_pci_driver = {
4424 .name = "qla1280",
4425 .id_table = qla1280_pci_tbl,
4426 .probe = qla1280_probe_one,
4427 .remove = qla1280_remove_one,
4430 static int __init
4431 qla1280_init(void)
4433 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4434 printk(KERN_WARNING
4435 "qla1280: struct srb too big, aborting\n");
4436 return -EINVAL;
4439 #ifdef MODULE
4441 * If we are called as a module, the qla1280 pointer may not be null
4442 * and it would point to our bootup string, just like on the lilo
4443 * command line. IF not NULL, then process this config string with
4444 * qla1280_setup
4446 * Boot time Options
4447 * To add options at boot time add a line to your lilo.conf file like:
4448 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
4449 * which will result in the first four devices on the first two
4450 * controllers being set to a tagged queue depth of 32.
4452 if (qla1280)
4453 qla1280_setup(qla1280);
4454 #endif
4456 return pci_register_driver(&qla1280_pci_driver);
4459 static void __exit
4460 qla1280_exit(void)
4462 int i;
4464 pci_unregister_driver(&qla1280_pci_driver);
4465 /* release any allocated firmware images */
4466 for (i = 0; i < QL_NUM_FW_IMAGES; i++) {
4467 release_firmware(qla1280_fw_tbl[i].fw);
4468 qla1280_fw_tbl[i].fw = NULL;
4472 module_init(qla1280_init);
4473 module_exit(qla1280_exit);
4475 MODULE_AUTHOR("Qlogic & Jes Sorensen");
4476 MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4477 MODULE_LICENSE("GPL");
4478 MODULE_FIRMWARE("qlogic/1040.bin");
4479 MODULE_FIRMWARE("qlogic/1280.bin");
4480 MODULE_FIRMWARE("qlogic/12160.bin");
4481 MODULE_VERSION(QLA1280_VERSION);
4484 * Overrides for Emacs so that we almost follow Linus's tabbing style.
4485 * Emacs will notice this stuff at the end of the file and automatically
4486 * adjust the settings for this buffer only. This must remain at the end
4487 * of the file.
4488 * ---------------------------------------------------------------------------
4489 * Local variables:
4490 * c-basic-offset: 8
4491 * tab-width: 8
4492 * End: