Linux 2.6.34-rc3
[pohmelfs.git] / drivers / scsi / ipr.c
blobc79cd98eb6bfed927e635c2731f65f8dbf5d5d5b
1 /*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 * Notes:
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
57 #include <linux/fs.h>
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <linux/libata.h>
74 #include <linux/hdreg.h>
75 #include <linux/reboot.h>
76 #include <linux/stringify.h>
77 #include <asm/io.h>
78 #include <asm/irq.h>
79 #include <asm/processor.h>
80 #include <scsi/scsi.h>
81 #include <scsi/scsi_host.h>
82 #include <scsi/scsi_tcq.h>
83 #include <scsi/scsi_eh.h>
84 #include <scsi/scsi_cmnd.h>
85 #include "ipr.h"
88 * Global Data
90 static LIST_HEAD(ipr_ioa_head);
91 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
92 static unsigned int ipr_max_speed = 1;
93 static int ipr_testmode = 0;
94 static unsigned int ipr_fastfail = 0;
95 static unsigned int ipr_transop_timeout = 0;
96 static unsigned int ipr_debug = 0;
97 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
98 static unsigned int ipr_dual_ioa_raid = 1;
99 static DEFINE_SPINLOCK(ipr_driver_lock);
101 /* This table describes the differences between DMA controller chips */
102 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
103 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
104 .mailbox = 0x0042C,
105 .cache_line_size = 0x20,
107 .set_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_mask_reg = 0x00230,
109 .clr_interrupt_mask_reg32 = 0x00230,
110 .sense_interrupt_mask_reg = 0x0022C,
111 .sense_interrupt_mask_reg32 = 0x0022C,
112 .clr_interrupt_reg = 0x00228,
113 .clr_interrupt_reg32 = 0x00228,
114 .sense_interrupt_reg = 0x00224,
115 .sense_interrupt_reg32 = 0x00224,
116 .ioarrin_reg = 0x00404,
117 .sense_uproc_interrupt_reg = 0x00214,
118 .sense_uproc_interrupt_reg32 = 0x00214,
119 .set_uproc_interrupt_reg = 0x00214,
120 .set_uproc_interrupt_reg32 = 0x00214,
121 .clr_uproc_interrupt_reg = 0x00218,
122 .clr_uproc_interrupt_reg32 = 0x00218
125 { /* Snipe and Scamp */
126 .mailbox = 0x0052C,
127 .cache_line_size = 0x20,
129 .set_interrupt_mask_reg = 0x00288,
130 .clr_interrupt_mask_reg = 0x0028C,
131 .clr_interrupt_mask_reg32 = 0x0028C,
132 .sense_interrupt_mask_reg = 0x00288,
133 .sense_interrupt_mask_reg32 = 0x00288,
134 .clr_interrupt_reg = 0x00284,
135 .clr_interrupt_reg32 = 0x00284,
136 .sense_interrupt_reg = 0x00280,
137 .sense_interrupt_reg32 = 0x00280,
138 .ioarrin_reg = 0x00504,
139 .sense_uproc_interrupt_reg = 0x00290,
140 .sense_uproc_interrupt_reg32 = 0x00290,
141 .set_uproc_interrupt_reg = 0x00290,
142 .set_uproc_interrupt_reg32 = 0x00290,
143 .clr_uproc_interrupt_reg = 0x00294,
144 .clr_uproc_interrupt_reg32 = 0x00294
147 { /* CRoC */
148 .mailbox = 0x00040,
149 .cache_line_size = 0x20,
151 .set_interrupt_mask_reg = 0x00010,
152 .clr_interrupt_mask_reg = 0x00018,
153 .clr_interrupt_mask_reg32 = 0x0001C,
154 .sense_interrupt_mask_reg = 0x00010,
155 .sense_interrupt_mask_reg32 = 0x00014,
156 .clr_interrupt_reg = 0x00008,
157 .clr_interrupt_reg32 = 0x0000C,
158 .sense_interrupt_reg = 0x00000,
159 .sense_interrupt_reg32 = 0x00004,
160 .ioarrin_reg = 0x00070,
161 .sense_uproc_interrupt_reg = 0x00020,
162 .sense_uproc_interrupt_reg32 = 0x00024,
163 .set_uproc_interrupt_reg = 0x00020,
164 .set_uproc_interrupt_reg32 = 0x00024,
165 .clr_uproc_interrupt_reg = 0x00028,
166 .clr_uproc_interrupt_reg32 = 0x0002C,
167 .init_feedback_reg = 0x0005C,
168 .dump_addr_reg = 0x00064,
169 .dump_data_reg = 0x00068
174 static const struct ipr_chip_t ipr_chip[] = {
175 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
181 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
182 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
186 static int ipr_max_bus_speeds [] = {
187 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
190 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
191 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
192 module_param_named(max_speed, ipr_max_speed, uint, 0);
193 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
194 module_param_named(log_level, ipr_log_level, uint, 0);
195 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
196 module_param_named(testmode, ipr_testmode, int, 0);
197 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
198 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
199 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
200 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
201 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
202 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
203 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
204 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
205 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
206 module_param_named(max_devs, ipr_max_devs, int, 0);
207 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
208 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
209 MODULE_LICENSE("GPL");
210 MODULE_VERSION(IPR_DRIVER_VERSION);
212 /* A constant array of IOASCs/URCs/Error Messages */
213 static const
214 struct ipr_error_table_t ipr_error_table[] = {
215 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
216 "8155: An unknown error was received"},
217 {0x00330000, 0, 0,
218 "Soft underlength error"},
219 {0x005A0000, 0, 0,
220 "Command to be cancelled not found"},
221 {0x00808000, 0, 0,
222 "Qualified success"},
223 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
224 "FFFE: Soft device bus error recovered by the IOA"},
225 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
226 "4101: Soft device bus fabric error"},
227 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
228 "FFFC: Logical block guard error recovered by the device"},
229 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
230 "FFFC: Logical block reference tag error recovered by the device"},
231 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
232 "4171: Recovered scatter list tag / sequence number error"},
233 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
234 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
235 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
236 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
237 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
238 "FFFD: Recovered logical block reference tag error detected by the IOA"},
239 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
240 "FFFD: Logical block guard error recovered by the IOA"},
241 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
242 "FFF9: Device sector reassign successful"},
243 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
244 "FFF7: Media error recovered by device rewrite procedures"},
245 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
246 "7001: IOA sector reassignment successful"},
247 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
248 "FFF9: Soft media error. Sector reassignment recommended"},
249 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
250 "FFF7: Media error recovered by IOA rewrite procedures"},
251 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
252 "FF3D: Soft PCI bus error recovered by the IOA"},
253 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
254 "FFF6: Device hardware error recovered by the IOA"},
255 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
256 "FFF6: Device hardware error recovered by the device"},
257 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
258 "FF3D: Soft IOA error recovered by the IOA"},
259 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
260 "FFFA: Undefined device response recovered by the IOA"},
261 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
262 "FFF6: Device bus error, message or command phase"},
263 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
264 "FFFE: Task Management Function failed"},
265 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
266 "FFF6: Failure prediction threshold exceeded"},
267 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
268 "8009: Impending cache battery pack failure"},
269 {0x02040400, 0, 0,
270 "34FF: Disk device format in progress"},
271 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
272 "9070: IOA requested reset"},
273 {0x023F0000, 0, 0,
274 "Synchronization required"},
275 {0x024E0000, 0, 0,
276 "No ready, IOA shutdown"},
277 {0x025A0000, 0, 0,
278 "Not ready, IOA has been shutdown"},
279 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
280 "3020: Storage subsystem configuration error"},
281 {0x03110B00, 0, 0,
282 "FFF5: Medium error, data unreadable, recommend reassign"},
283 {0x03110C00, 0, 0,
284 "7000: Medium error, data unreadable, do not reassign"},
285 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
286 "FFF3: Disk media format bad"},
287 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
288 "3002: Addressed device failed to respond to selection"},
289 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
290 "3100: Device bus error"},
291 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
292 "3109: IOA timed out a device command"},
293 {0x04088000, 0, 0,
294 "3120: SCSI bus is not operational"},
295 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
296 "4100: Hard device bus fabric error"},
297 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
298 "310C: Logical block guard error detected by the device"},
299 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
300 "310C: Logical block reference tag error detected by the device"},
301 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
302 "4170: Scatter list tag / sequence number error"},
303 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
304 "8150: Logical block CRC error on IOA to Host transfer"},
305 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
306 "4170: Logical block sequence number error on IOA to Host transfer"},
307 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
308 "310D: Logical block reference tag error detected by the IOA"},
309 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
310 "310D: Logical block guard error detected by the IOA"},
311 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
312 "9000: IOA reserved area data check"},
313 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
314 "9001: IOA reserved area invalid data pattern"},
315 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
316 "9002: IOA reserved area LRC error"},
317 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
318 "Hardware Error, IOA metadata access error"},
319 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
320 "102E: Out of alternate sectors for disk storage"},
321 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
322 "FFF4: Data transfer underlength error"},
323 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
324 "FFF4: Data transfer overlength error"},
325 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
326 "3400: Logical unit failure"},
327 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
328 "FFF4: Device microcode is corrupt"},
329 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
330 "8150: PCI bus error"},
331 {0x04430000, 1, 0,
332 "Unsupported device bus message received"},
333 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
334 "FFF4: Disk device problem"},
335 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
336 "8150: Permanent IOA failure"},
337 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
338 "3010: Disk device returned wrong response to IOA"},
339 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
340 "8151: IOA microcode error"},
341 {0x04448500, 0, 0,
342 "Device bus status error"},
343 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
344 "8157: IOA error requiring IOA reset to recover"},
345 {0x04448700, 0, 0,
346 "ATA device status error"},
347 {0x04490000, 0, 0,
348 "Message reject received from the device"},
349 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
350 "8008: A permanent cache battery pack failure occurred"},
351 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
352 "9090: Disk unit has been modified after the last known status"},
353 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
354 "9081: IOA detected device error"},
355 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
356 "9082: IOA detected device error"},
357 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
358 "3110: Device bus error, message or command phase"},
359 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
360 "3110: SAS Command / Task Management Function failed"},
361 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
362 "9091: Incorrect hardware configuration change has been detected"},
363 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
364 "9073: Invalid multi-adapter configuration"},
365 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
366 "4010: Incorrect connection between cascaded expanders"},
367 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
368 "4020: Connections exceed IOA design limits"},
369 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
370 "4030: Incorrect multipath connection"},
371 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
372 "4110: Unsupported enclosure function"},
373 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
374 "FFF4: Command to logical unit failed"},
375 {0x05240000, 1, 0,
376 "Illegal request, invalid request type or request packet"},
377 {0x05250000, 0, 0,
378 "Illegal request, invalid resource handle"},
379 {0x05258000, 0, 0,
380 "Illegal request, commands not allowed to this device"},
381 {0x05258100, 0, 0,
382 "Illegal request, command not allowed to a secondary adapter"},
383 {0x05258200, 0, 0,
384 "Illegal request, command not allowed to a non-optimized resource"},
385 {0x05260000, 0, 0,
386 "Illegal request, invalid field in parameter list"},
387 {0x05260100, 0, 0,
388 "Illegal request, parameter not supported"},
389 {0x05260200, 0, 0,
390 "Illegal request, parameter value invalid"},
391 {0x052C0000, 0, 0,
392 "Illegal request, command sequence error"},
393 {0x052C8000, 1, 0,
394 "Illegal request, dual adapter support not enabled"},
395 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
396 "9031: Array protection temporarily suspended, protection resuming"},
397 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
398 "9040: Array protection temporarily suspended, protection resuming"},
399 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
400 "3140: Device bus not ready to ready transition"},
401 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
402 "FFFB: SCSI bus was reset"},
403 {0x06290500, 0, 0,
404 "FFFE: SCSI bus transition to single ended"},
405 {0x06290600, 0, 0,
406 "FFFE: SCSI bus transition to LVD"},
407 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
408 "FFFB: SCSI bus was reset by another initiator"},
409 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
410 "3029: A device replacement has occurred"},
411 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
412 "9051: IOA cache data exists for a missing or failed device"},
413 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
414 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
415 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
416 "9025: Disk unit is not supported at its physical location"},
417 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
418 "3020: IOA detected a SCSI bus configuration error"},
419 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
420 "3150: SCSI bus configuration error"},
421 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
422 "9074: Asymmetric advanced function disk configuration"},
423 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
424 "4040: Incomplete multipath connection between IOA and enclosure"},
425 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
426 "4041: Incomplete multipath connection between enclosure and device"},
427 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
428 "9075: Incomplete multipath connection between IOA and remote IOA"},
429 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
430 "9076: Configuration error, missing remote IOA"},
431 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
432 "4050: Enclosure does not support a required multipath function"},
433 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
434 "4070: Logically bad block written on device"},
435 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
436 "9041: Array protection temporarily suspended"},
437 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
438 "9042: Corrupt array parity detected on specified device"},
439 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
440 "9030: Array no longer protected due to missing or failed disk unit"},
441 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
442 "9071: Link operational transition"},
443 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
444 "9072: Link not operational transition"},
445 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
446 "9032: Array exposed but still protected"},
447 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
448 "70DD: Device forced failed by disrupt device command"},
449 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
450 "4061: Multipath redundancy level got better"},
451 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
452 "4060: Multipath redundancy level got worse"},
453 {0x07270000, 0, 0,
454 "Failure due to other device"},
455 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
456 "9008: IOA does not support functions expected by devices"},
457 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
458 "9010: Cache data associated with attached devices cannot be found"},
459 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
460 "9011: Cache data belongs to devices other than those attached"},
461 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
462 "9020: Array missing 2 or more devices with only 1 device present"},
463 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
464 "9021: Array missing 2 or more devices with 2 or more devices present"},
465 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
466 "9022: Exposed array is missing a required device"},
467 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
468 "9023: Array member(s) not at required physical locations"},
469 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
470 "9024: Array not functional due to present hardware configuration"},
471 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
472 "9026: Array not functional due to present hardware configuration"},
473 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
474 "9027: Array is missing a device and parity is out of sync"},
475 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
476 "9028: Maximum number of arrays already exist"},
477 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
478 "9050: Required cache data cannot be located for a disk unit"},
479 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
480 "9052: Cache data exists for a device that has been modified"},
481 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
482 "9054: IOA resources not available due to previous problems"},
483 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
484 "9092: Disk unit requires initialization before use"},
485 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
486 "9029: Incorrect hardware configuration change has been detected"},
487 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
488 "9060: One or more disk pairs are missing from an array"},
489 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
490 "9061: One or more disks are missing from an array"},
491 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
492 "9062: One or more disks are missing from an array"},
493 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
494 "9063: Maximum number of functional arrays has been exceeded"},
495 {0x0B260000, 0, 0,
496 "Aborted command, invalid descriptor"},
497 {0x0B5A0000, 0, 0,
498 "Command terminated by host"}
501 static const struct ipr_ses_table_entry ipr_ses_table[] = {
502 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
503 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
504 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
505 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
506 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
507 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
508 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
509 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
510 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
511 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
512 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
513 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
514 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
518 * Function Prototypes
520 static int ipr_reset_alert(struct ipr_cmnd *);
521 static void ipr_process_ccn(struct ipr_cmnd *);
522 static void ipr_process_error(struct ipr_cmnd *);
523 static void ipr_reset_ioa_job(struct ipr_cmnd *);
524 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
525 enum ipr_shutdown_type);
527 #ifdef CONFIG_SCSI_IPR_TRACE
529 * ipr_trc_hook - Add a trace entry to the driver trace
530 * @ipr_cmd: ipr command struct
531 * @type: trace type
532 * @add_data: additional data
534 * Return value:
535 * none
537 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
538 u8 type, u32 add_data)
540 struct ipr_trace_entry *trace_entry;
541 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
543 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
544 trace_entry->time = jiffies;
545 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
546 trace_entry->type = type;
547 if (ipr_cmd->ioa_cfg->sis64)
548 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
549 else
550 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
551 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
552 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
553 trace_entry->u.add_data = add_data;
555 #else
556 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
557 #endif
560 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
561 * @ipr_cmd: ipr command struct
563 * Return value:
564 * none
566 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
568 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
569 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
570 dma_addr_t dma_addr = ipr_cmd->dma_addr;
572 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
573 ioarcb->data_transfer_length = 0;
574 ioarcb->read_data_transfer_length = 0;
575 ioarcb->ioadl_len = 0;
576 ioarcb->read_ioadl_len = 0;
578 if (ipr_cmd->ioa_cfg->sis64)
579 ioarcb->u.sis64_addr_data.data_ioadl_addr =
580 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
581 else {
582 ioarcb->write_ioadl_addr =
583 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
584 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
587 ioasa->ioasc = 0;
588 ioasa->residual_data_len = 0;
589 ioasa->u.gata.status = 0;
591 ipr_cmd->scsi_cmd = NULL;
592 ipr_cmd->qc = NULL;
593 ipr_cmd->sense_buffer[0] = 0;
594 ipr_cmd->dma_use_sg = 0;
598 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
599 * @ipr_cmd: ipr command struct
601 * Return value:
602 * none
604 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
606 ipr_reinit_ipr_cmnd(ipr_cmd);
607 ipr_cmd->u.scratch = 0;
608 ipr_cmd->sibling = NULL;
609 init_timer(&ipr_cmd->timer);
613 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
614 * @ioa_cfg: ioa config struct
616 * Return value:
617 * pointer to ipr command struct
619 static
620 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
622 struct ipr_cmnd *ipr_cmd;
624 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
625 list_del(&ipr_cmd->queue);
626 ipr_init_ipr_cmnd(ipr_cmd);
628 return ipr_cmd;
632 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
633 * @ioa_cfg: ioa config struct
634 * @clr_ints: interrupts to clear
636 * This function masks all interrupts on the adapter, then clears the
637 * interrupts specified in the mask
639 * Return value:
640 * none
642 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
643 u32 clr_ints)
645 volatile u32 int_reg;
647 /* Stop new interrupts */
648 ioa_cfg->allow_interrupts = 0;
650 /* Set interrupt mask to stop all new interrupts */
651 if (ioa_cfg->sis64)
652 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
653 else
654 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
656 /* Clear any pending interrupts */
657 if (ioa_cfg->sis64)
658 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
659 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
660 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
664 * ipr_save_pcix_cmd_reg - Save PCI-X command register
665 * @ioa_cfg: ioa config struct
667 * Return value:
668 * 0 on success / -EIO on failure
670 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
672 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
674 if (pcix_cmd_reg == 0)
675 return 0;
677 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
678 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
679 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
680 return -EIO;
683 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
684 return 0;
688 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
689 * @ioa_cfg: ioa config struct
691 * Return value:
692 * 0 on success / -EIO on failure
694 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
696 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
698 if (pcix_cmd_reg) {
699 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
700 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
701 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
702 return -EIO;
706 return 0;
710 * ipr_sata_eh_done - done function for aborted SATA commands
711 * @ipr_cmd: ipr command struct
713 * This function is invoked for ops generated to SATA
714 * devices which are being aborted.
716 * Return value:
717 * none
719 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
721 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
722 struct ata_queued_cmd *qc = ipr_cmd->qc;
723 struct ipr_sata_port *sata_port = qc->ap->private_data;
725 qc->err_mask |= AC_ERR_OTHER;
726 sata_port->ioasa.status |= ATA_BUSY;
727 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
728 ata_qc_complete(qc);
732 * ipr_scsi_eh_done - mid-layer done function for aborted ops
733 * @ipr_cmd: ipr command struct
735 * This function is invoked by the interrupt handler for
736 * ops generated by the SCSI mid-layer which are being aborted.
738 * Return value:
739 * none
741 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
743 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
744 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
746 scsi_cmd->result |= (DID_ERROR << 16);
748 scsi_dma_unmap(ipr_cmd->scsi_cmd);
749 scsi_cmd->scsi_done(scsi_cmd);
750 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
754 * ipr_fail_all_ops - Fails all outstanding ops.
755 * @ioa_cfg: ioa config struct
757 * This function fails all outstanding ops.
759 * Return value:
760 * none
762 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
764 struct ipr_cmnd *ipr_cmd, *temp;
766 ENTER;
767 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
768 list_del(&ipr_cmd->queue);
770 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
771 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
773 if (ipr_cmd->scsi_cmd)
774 ipr_cmd->done = ipr_scsi_eh_done;
775 else if (ipr_cmd->qc)
776 ipr_cmd->done = ipr_sata_eh_done;
778 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
779 del_timer(&ipr_cmd->timer);
780 ipr_cmd->done(ipr_cmd);
783 LEAVE;
787 * ipr_send_command - Send driver initiated requests.
788 * @ipr_cmd: ipr command struct
790 * This function sends a command to the adapter using the correct write call.
791 * In the case of sis64, calculate the ioarcb size required. Then or in the
792 * appropriate bits.
794 * Return value:
795 * none
797 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
799 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
800 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
802 if (ioa_cfg->sis64) {
803 /* The default size is 256 bytes */
804 send_dma_addr |= 0x1;
806 /* If the number of ioadls * size of ioadl > 128 bytes,
807 then use a 512 byte ioarcb */
808 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
809 send_dma_addr |= 0x4;
810 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
811 } else
812 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
816 * ipr_do_req - Send driver initiated requests.
817 * @ipr_cmd: ipr command struct
818 * @done: done function
819 * @timeout_func: timeout function
820 * @timeout: timeout value
822 * This function sends the specified command to the adapter with the
823 * timeout given. The done function is invoked on command completion.
825 * Return value:
826 * none
828 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
829 void (*done) (struct ipr_cmnd *),
830 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
832 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
834 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
836 ipr_cmd->done = done;
838 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
839 ipr_cmd->timer.expires = jiffies + timeout;
840 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
842 add_timer(&ipr_cmd->timer);
844 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
846 mb();
848 ipr_send_command(ipr_cmd);
852 * ipr_internal_cmd_done - Op done function for an internally generated op.
853 * @ipr_cmd: ipr command struct
855 * This function is the op done function for an internally generated,
856 * blocking op. It simply wakes the sleeping thread.
858 * Return value:
859 * none
861 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
863 if (ipr_cmd->sibling)
864 ipr_cmd->sibling = NULL;
865 else
866 complete(&ipr_cmd->completion);
870 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
871 * @ipr_cmd: ipr command struct
872 * @dma_addr: dma address
873 * @len: transfer length
874 * @flags: ioadl flag value
876 * This function initializes an ioadl in the case where there is only a single
877 * descriptor.
879 * Return value:
880 * nothing
882 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
883 u32 len, int flags)
885 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
886 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
888 ipr_cmd->dma_use_sg = 1;
890 if (ipr_cmd->ioa_cfg->sis64) {
891 ioadl64->flags = cpu_to_be32(flags);
892 ioadl64->data_len = cpu_to_be32(len);
893 ioadl64->address = cpu_to_be64(dma_addr);
895 ipr_cmd->ioarcb.ioadl_len =
896 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
897 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
898 } else {
899 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
900 ioadl->address = cpu_to_be32(dma_addr);
902 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
903 ipr_cmd->ioarcb.read_ioadl_len =
904 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
905 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
906 } else {
907 ipr_cmd->ioarcb.ioadl_len =
908 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
909 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
915 * ipr_send_blocking_cmd - Send command and sleep on its completion.
916 * @ipr_cmd: ipr command struct
917 * @timeout_func: function to invoke if command times out
918 * @timeout: timeout
920 * Return value:
921 * none
923 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
924 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
925 u32 timeout)
927 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
929 init_completion(&ipr_cmd->completion);
930 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
932 spin_unlock_irq(ioa_cfg->host->host_lock);
933 wait_for_completion(&ipr_cmd->completion);
934 spin_lock_irq(ioa_cfg->host->host_lock);
938 * ipr_send_hcam - Send an HCAM to the adapter.
939 * @ioa_cfg: ioa config struct
940 * @type: HCAM type
941 * @hostrcb: hostrcb struct
943 * This function will send a Host Controlled Async command to the adapter.
944 * If HCAMs are currently not allowed to be issued to the adapter, it will
945 * place the hostrcb on the free queue.
947 * Return value:
948 * none
950 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
951 struct ipr_hostrcb *hostrcb)
953 struct ipr_cmnd *ipr_cmd;
954 struct ipr_ioarcb *ioarcb;
956 if (ioa_cfg->allow_cmds) {
957 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
958 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
959 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
961 ipr_cmd->u.hostrcb = hostrcb;
962 ioarcb = &ipr_cmd->ioarcb;
964 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
965 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
966 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
967 ioarcb->cmd_pkt.cdb[1] = type;
968 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
969 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
971 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
972 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
974 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
975 ipr_cmd->done = ipr_process_ccn;
976 else
977 ipr_cmd->done = ipr_process_error;
979 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
981 mb();
983 ipr_send_command(ipr_cmd);
984 } else {
985 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
990 * ipr_update_ata_class - Update the ata class in the resource entry
991 * @res: resource entry struct
992 * @proto: cfgte device bus protocol value
994 * Return value:
995 * none
997 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
999 switch(proto) {
1000 case IPR_PROTO_SATA:
1001 case IPR_PROTO_SAS_STP:
1002 res->ata_class = ATA_DEV_ATA;
1003 break;
1004 case IPR_PROTO_SATA_ATAPI:
1005 case IPR_PROTO_SAS_STP_ATAPI:
1006 res->ata_class = ATA_DEV_ATAPI;
1007 break;
1008 default:
1009 res->ata_class = ATA_DEV_UNKNOWN;
1010 break;
1015 * ipr_init_res_entry - Initialize a resource entry struct.
1016 * @res: resource entry struct
1017 * @cfgtew: config table entry wrapper struct
1019 * Return value:
1020 * none
1022 static void ipr_init_res_entry(struct ipr_resource_entry *res,
1023 struct ipr_config_table_entry_wrapper *cfgtew)
1025 int found = 0;
1026 unsigned int proto;
1027 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1028 struct ipr_resource_entry *gscsi_res = NULL;
1030 res->needs_sync_complete = 0;
1031 res->in_erp = 0;
1032 res->add_to_ml = 0;
1033 res->del_from_ml = 0;
1034 res->resetting_device = 0;
1035 res->sdev = NULL;
1036 res->sata_port = NULL;
1038 if (ioa_cfg->sis64) {
1039 proto = cfgtew->u.cfgte64->proto;
1040 res->res_flags = cfgtew->u.cfgte64->res_flags;
1041 res->qmodel = IPR_QUEUEING_MODEL64(res);
1042 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1044 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1045 sizeof(res->res_path));
1047 res->bus = 0;
1048 res->lun = scsilun_to_int(&res->dev_lun);
1050 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1051 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1052 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1053 found = 1;
1054 res->target = gscsi_res->target;
1055 break;
1058 if (!found) {
1059 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1060 ioa_cfg->max_devs_supported);
1061 set_bit(res->target, ioa_cfg->target_ids);
1064 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1065 sizeof(res->dev_lun.scsi_lun));
1066 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1067 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1068 res->target = 0;
1069 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1070 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1071 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1072 ioa_cfg->max_devs_supported);
1073 set_bit(res->target, ioa_cfg->array_ids);
1074 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1075 res->bus = IPR_VSET_VIRTUAL_BUS;
1076 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1077 ioa_cfg->max_devs_supported);
1078 set_bit(res->target, ioa_cfg->vset_ids);
1079 } else {
1080 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1081 ioa_cfg->max_devs_supported);
1082 set_bit(res->target, ioa_cfg->target_ids);
1084 } else {
1085 proto = cfgtew->u.cfgte->proto;
1086 res->qmodel = IPR_QUEUEING_MODEL(res);
1087 res->flags = cfgtew->u.cfgte->flags;
1088 if (res->flags & IPR_IS_IOA_RESOURCE)
1089 res->type = IPR_RES_TYPE_IOAFP;
1090 else
1091 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1093 res->bus = cfgtew->u.cfgte->res_addr.bus;
1094 res->target = cfgtew->u.cfgte->res_addr.target;
1095 res->lun = cfgtew->u.cfgte->res_addr.lun;
1098 ipr_update_ata_class(res, proto);
1102 * ipr_is_same_device - Determine if two devices are the same.
1103 * @res: resource entry struct
1104 * @cfgtew: config table entry wrapper struct
1106 * Return value:
1107 * 1 if the devices are the same / 0 otherwise
1109 static int ipr_is_same_device(struct ipr_resource_entry *res,
1110 struct ipr_config_table_entry_wrapper *cfgtew)
1112 if (res->ioa_cfg->sis64) {
1113 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1114 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1115 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1116 sizeof(cfgtew->u.cfgte64->lun))) {
1117 return 1;
1119 } else {
1120 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1121 res->target == cfgtew->u.cfgte->res_addr.target &&
1122 res->lun == cfgtew->u.cfgte->res_addr.lun)
1123 return 1;
1126 return 0;
1130 * ipr_format_resource_path - Format the resource path for printing.
1131 * @res_path: resource path
1132 * @buf: buffer
1134 * Return value:
1135 * pointer to buffer
1137 static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1139 int i;
1141 sprintf(buffer, "%02X", res_path[0]);
1142 for (i=1; res_path[i] != 0xff; i++)
1143 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
1145 return buffer;
1149 * ipr_update_res_entry - Update the resource entry.
1150 * @res: resource entry struct
1151 * @cfgtew: config table entry wrapper struct
1153 * Return value:
1154 * none
1156 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1157 struct ipr_config_table_entry_wrapper *cfgtew)
1159 char buffer[IPR_MAX_RES_PATH_LENGTH];
1160 unsigned int proto;
1161 int new_path = 0;
1163 if (res->ioa_cfg->sis64) {
1164 res->flags = cfgtew->u.cfgte64->flags;
1165 res->res_flags = cfgtew->u.cfgte64->res_flags;
1166 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1168 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1169 sizeof(struct ipr_std_inq_data));
1171 res->qmodel = IPR_QUEUEING_MODEL64(res);
1172 proto = cfgtew->u.cfgte64->proto;
1173 res->res_handle = cfgtew->u.cfgte64->res_handle;
1174 res->dev_id = cfgtew->u.cfgte64->dev_id;
1176 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1177 sizeof(res->dev_lun.scsi_lun));
1179 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1180 sizeof(res->res_path))) {
1181 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1182 sizeof(res->res_path));
1183 new_path = 1;
1186 if (res->sdev && new_path)
1187 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1188 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1189 } else {
1190 res->flags = cfgtew->u.cfgte->flags;
1191 if (res->flags & IPR_IS_IOA_RESOURCE)
1192 res->type = IPR_RES_TYPE_IOAFP;
1193 else
1194 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1196 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1197 sizeof(struct ipr_std_inq_data));
1199 res->qmodel = IPR_QUEUEING_MODEL(res);
1200 proto = cfgtew->u.cfgte->proto;
1201 res->res_handle = cfgtew->u.cfgte->res_handle;
1204 ipr_update_ata_class(res, proto);
1208 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1209 * for the resource.
1210 * @res: resource entry struct
1211 * @cfgtew: config table entry wrapper struct
1213 * Return value:
1214 * none
1216 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1218 struct ipr_resource_entry *gscsi_res = NULL;
1219 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1221 if (!ioa_cfg->sis64)
1222 return;
1224 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1225 clear_bit(res->target, ioa_cfg->array_ids);
1226 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1227 clear_bit(res->target, ioa_cfg->vset_ids);
1228 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1229 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1230 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1231 return;
1232 clear_bit(res->target, ioa_cfg->target_ids);
1234 } else if (res->bus == 0)
1235 clear_bit(res->target, ioa_cfg->target_ids);
1239 * ipr_handle_config_change - Handle a config change from the adapter
1240 * @ioa_cfg: ioa config struct
1241 * @hostrcb: hostrcb
1243 * Return value:
1244 * none
1246 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1247 struct ipr_hostrcb *hostrcb)
1249 struct ipr_resource_entry *res = NULL;
1250 struct ipr_config_table_entry_wrapper cfgtew;
1251 __be32 cc_res_handle;
1253 u32 is_ndn = 1;
1255 if (ioa_cfg->sis64) {
1256 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1257 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1258 } else {
1259 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1260 cc_res_handle = cfgtew.u.cfgte->res_handle;
1263 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1264 if (res->res_handle == cc_res_handle) {
1265 is_ndn = 0;
1266 break;
1270 if (is_ndn) {
1271 if (list_empty(&ioa_cfg->free_res_q)) {
1272 ipr_send_hcam(ioa_cfg,
1273 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1274 hostrcb);
1275 return;
1278 res = list_entry(ioa_cfg->free_res_q.next,
1279 struct ipr_resource_entry, queue);
1281 list_del(&res->queue);
1282 ipr_init_res_entry(res, &cfgtew);
1283 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1286 ipr_update_res_entry(res, &cfgtew);
1288 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1289 if (res->sdev) {
1290 res->del_from_ml = 1;
1291 res->res_handle = IPR_INVALID_RES_HANDLE;
1292 if (ioa_cfg->allow_ml_add_del)
1293 schedule_work(&ioa_cfg->work_q);
1294 } else {
1295 ipr_clear_res_target(res);
1296 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1298 } else if (!res->sdev) {
1299 res->add_to_ml = 1;
1300 if (ioa_cfg->allow_ml_add_del)
1301 schedule_work(&ioa_cfg->work_q);
1304 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1308 * ipr_process_ccn - Op done function for a CCN.
1309 * @ipr_cmd: ipr command struct
1311 * This function is the op done function for a configuration
1312 * change notification host controlled async from the adapter.
1314 * Return value:
1315 * none
1317 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1319 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1320 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1321 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1323 list_del(&hostrcb->queue);
1324 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1326 if (ioasc) {
1327 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1328 dev_err(&ioa_cfg->pdev->dev,
1329 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1331 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1332 } else {
1333 ipr_handle_config_change(ioa_cfg, hostrcb);
1338 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1339 * @i: index into buffer
1340 * @buf: string to modify
1342 * This function will strip all trailing whitespace, pad the end
1343 * of the string with a single space, and NULL terminate the string.
1345 * Return value:
1346 * new length of string
1348 static int strip_and_pad_whitespace(int i, char *buf)
1350 while (i && buf[i] == ' ')
1351 i--;
1352 buf[i+1] = ' ';
1353 buf[i+2] = '\0';
1354 return i + 2;
1358 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1359 * @prefix: string to print at start of printk
1360 * @hostrcb: hostrcb pointer
1361 * @vpd: vendor/product id/sn struct
1363 * Return value:
1364 * none
1366 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1367 struct ipr_vpd *vpd)
1369 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1370 int i = 0;
1372 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1373 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1375 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1376 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1378 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1379 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1381 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1385 * ipr_log_vpd - Log the passed VPD to the error log.
1386 * @vpd: vendor/product id/sn struct
1388 * Return value:
1389 * none
1391 static void ipr_log_vpd(struct ipr_vpd *vpd)
1393 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1394 + IPR_SERIAL_NUM_LEN];
1396 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1397 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1398 IPR_PROD_ID_LEN);
1399 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1400 ipr_err("Vendor/Product ID: %s\n", buffer);
1402 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1403 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1404 ipr_err(" Serial Number: %s\n", buffer);
1408 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1409 * @prefix: string to print at start of printk
1410 * @hostrcb: hostrcb pointer
1411 * @vpd: vendor/product id/sn/wwn struct
1413 * Return value:
1414 * none
1416 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1417 struct ipr_ext_vpd *vpd)
1419 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1420 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1421 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1425 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1426 * @vpd: vendor/product id/sn/wwn struct
1428 * Return value:
1429 * none
1431 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1433 ipr_log_vpd(&vpd->vpd);
1434 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1435 be32_to_cpu(vpd->wwid[1]));
1439 * ipr_log_enhanced_cache_error - Log a cache error.
1440 * @ioa_cfg: ioa config struct
1441 * @hostrcb: hostrcb struct
1443 * Return value:
1444 * none
1446 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1447 struct ipr_hostrcb *hostrcb)
1449 struct ipr_hostrcb_type_12_error *error;
1451 if (ioa_cfg->sis64)
1452 error = &hostrcb->hcam.u.error64.u.type_12_error;
1453 else
1454 error = &hostrcb->hcam.u.error.u.type_12_error;
1456 ipr_err("-----Current Configuration-----\n");
1457 ipr_err("Cache Directory Card Information:\n");
1458 ipr_log_ext_vpd(&error->ioa_vpd);
1459 ipr_err("Adapter Card Information:\n");
1460 ipr_log_ext_vpd(&error->cfc_vpd);
1462 ipr_err("-----Expected Configuration-----\n");
1463 ipr_err("Cache Directory Card Information:\n");
1464 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1465 ipr_err("Adapter Card Information:\n");
1466 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1468 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1469 be32_to_cpu(error->ioa_data[0]),
1470 be32_to_cpu(error->ioa_data[1]),
1471 be32_to_cpu(error->ioa_data[2]));
1475 * ipr_log_cache_error - Log a cache error.
1476 * @ioa_cfg: ioa config struct
1477 * @hostrcb: hostrcb struct
1479 * Return value:
1480 * none
1482 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1483 struct ipr_hostrcb *hostrcb)
1485 struct ipr_hostrcb_type_02_error *error =
1486 &hostrcb->hcam.u.error.u.type_02_error;
1488 ipr_err("-----Current Configuration-----\n");
1489 ipr_err("Cache Directory Card Information:\n");
1490 ipr_log_vpd(&error->ioa_vpd);
1491 ipr_err("Adapter Card Information:\n");
1492 ipr_log_vpd(&error->cfc_vpd);
1494 ipr_err("-----Expected Configuration-----\n");
1495 ipr_err("Cache Directory Card Information:\n");
1496 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1497 ipr_err("Adapter Card Information:\n");
1498 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1500 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1501 be32_to_cpu(error->ioa_data[0]),
1502 be32_to_cpu(error->ioa_data[1]),
1503 be32_to_cpu(error->ioa_data[2]));
1507 * ipr_log_enhanced_config_error - Log a configuration error.
1508 * @ioa_cfg: ioa config struct
1509 * @hostrcb: hostrcb struct
1511 * Return value:
1512 * none
1514 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1515 struct ipr_hostrcb *hostrcb)
1517 int errors_logged, i;
1518 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1519 struct ipr_hostrcb_type_13_error *error;
1521 error = &hostrcb->hcam.u.error.u.type_13_error;
1522 errors_logged = be32_to_cpu(error->errors_logged);
1524 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1525 be32_to_cpu(error->errors_detected), errors_logged);
1527 dev_entry = error->dev;
1529 for (i = 0; i < errors_logged; i++, dev_entry++) {
1530 ipr_err_separator;
1532 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1533 ipr_log_ext_vpd(&dev_entry->vpd);
1535 ipr_err("-----New Device Information-----\n");
1536 ipr_log_ext_vpd(&dev_entry->new_vpd);
1538 ipr_err("Cache Directory Card Information:\n");
1539 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1541 ipr_err("Adapter Card Information:\n");
1542 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1547 * ipr_log_sis64_config_error - Log a device error.
1548 * @ioa_cfg: ioa config struct
1549 * @hostrcb: hostrcb struct
1551 * Return value:
1552 * none
1554 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1555 struct ipr_hostrcb *hostrcb)
1557 int errors_logged, i;
1558 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1559 struct ipr_hostrcb_type_23_error *error;
1560 char buffer[IPR_MAX_RES_PATH_LENGTH];
1562 error = &hostrcb->hcam.u.error64.u.type_23_error;
1563 errors_logged = be32_to_cpu(error->errors_logged);
1565 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1566 be32_to_cpu(error->errors_detected), errors_logged);
1568 dev_entry = error->dev;
1570 for (i = 0; i < errors_logged; i++, dev_entry++) {
1571 ipr_err_separator;
1573 ipr_err("Device %d : %s", i + 1,
1574 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1575 ipr_log_ext_vpd(&dev_entry->vpd);
1577 ipr_err("-----New Device Information-----\n");
1578 ipr_log_ext_vpd(&dev_entry->new_vpd);
1580 ipr_err("Cache Directory Card Information:\n");
1581 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1583 ipr_err("Adapter Card Information:\n");
1584 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1589 * ipr_log_config_error - Log a configuration error.
1590 * @ioa_cfg: ioa config struct
1591 * @hostrcb: hostrcb struct
1593 * Return value:
1594 * none
1596 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1597 struct ipr_hostrcb *hostrcb)
1599 int errors_logged, i;
1600 struct ipr_hostrcb_device_data_entry *dev_entry;
1601 struct ipr_hostrcb_type_03_error *error;
1603 error = &hostrcb->hcam.u.error.u.type_03_error;
1604 errors_logged = be32_to_cpu(error->errors_logged);
1606 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1607 be32_to_cpu(error->errors_detected), errors_logged);
1609 dev_entry = error->dev;
1611 for (i = 0; i < errors_logged; i++, dev_entry++) {
1612 ipr_err_separator;
1614 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1615 ipr_log_vpd(&dev_entry->vpd);
1617 ipr_err("-----New Device Information-----\n");
1618 ipr_log_vpd(&dev_entry->new_vpd);
1620 ipr_err("Cache Directory Card Information:\n");
1621 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1623 ipr_err("Adapter Card Information:\n");
1624 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1626 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1627 be32_to_cpu(dev_entry->ioa_data[0]),
1628 be32_to_cpu(dev_entry->ioa_data[1]),
1629 be32_to_cpu(dev_entry->ioa_data[2]),
1630 be32_to_cpu(dev_entry->ioa_data[3]),
1631 be32_to_cpu(dev_entry->ioa_data[4]));
1636 * ipr_log_enhanced_array_error - Log an array configuration error.
1637 * @ioa_cfg: ioa config struct
1638 * @hostrcb: hostrcb struct
1640 * Return value:
1641 * none
1643 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1644 struct ipr_hostrcb *hostrcb)
1646 int i, num_entries;
1647 struct ipr_hostrcb_type_14_error *error;
1648 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1649 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1651 error = &hostrcb->hcam.u.error.u.type_14_error;
1653 ipr_err_separator;
1655 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1656 error->protection_level,
1657 ioa_cfg->host->host_no,
1658 error->last_func_vset_res_addr.bus,
1659 error->last_func_vset_res_addr.target,
1660 error->last_func_vset_res_addr.lun);
1662 ipr_err_separator;
1664 array_entry = error->array_member;
1665 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1666 sizeof(error->array_member));
1668 for (i = 0; i < num_entries; i++, array_entry++) {
1669 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1670 continue;
1672 if (be32_to_cpu(error->exposed_mode_adn) == i)
1673 ipr_err("Exposed Array Member %d:\n", i);
1674 else
1675 ipr_err("Array Member %d:\n", i);
1677 ipr_log_ext_vpd(&array_entry->vpd);
1678 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1679 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1680 "Expected Location");
1682 ipr_err_separator;
1687 * ipr_log_array_error - Log an array configuration error.
1688 * @ioa_cfg: ioa config struct
1689 * @hostrcb: hostrcb struct
1691 * Return value:
1692 * none
1694 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1695 struct ipr_hostrcb *hostrcb)
1697 int i;
1698 struct ipr_hostrcb_type_04_error *error;
1699 struct ipr_hostrcb_array_data_entry *array_entry;
1700 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1702 error = &hostrcb->hcam.u.error.u.type_04_error;
1704 ipr_err_separator;
1706 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1707 error->protection_level,
1708 ioa_cfg->host->host_no,
1709 error->last_func_vset_res_addr.bus,
1710 error->last_func_vset_res_addr.target,
1711 error->last_func_vset_res_addr.lun);
1713 ipr_err_separator;
1715 array_entry = error->array_member;
1717 for (i = 0; i < 18; i++) {
1718 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1719 continue;
1721 if (be32_to_cpu(error->exposed_mode_adn) == i)
1722 ipr_err("Exposed Array Member %d:\n", i);
1723 else
1724 ipr_err("Array Member %d:\n", i);
1726 ipr_log_vpd(&array_entry->vpd);
1728 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1729 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1730 "Expected Location");
1732 ipr_err_separator;
1734 if (i == 9)
1735 array_entry = error->array_member2;
1736 else
1737 array_entry++;
1742 * ipr_log_hex_data - Log additional hex IOA error data.
1743 * @ioa_cfg: ioa config struct
1744 * @data: IOA error data
1745 * @len: data length
1747 * Return value:
1748 * none
1750 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1752 int i;
1754 if (len == 0)
1755 return;
1757 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1758 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1760 for (i = 0; i < len / 4; i += 4) {
1761 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1762 be32_to_cpu(data[i]),
1763 be32_to_cpu(data[i+1]),
1764 be32_to_cpu(data[i+2]),
1765 be32_to_cpu(data[i+3]));
1770 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1771 * @ioa_cfg: ioa config struct
1772 * @hostrcb: hostrcb struct
1774 * Return value:
1775 * none
1777 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1778 struct ipr_hostrcb *hostrcb)
1780 struct ipr_hostrcb_type_17_error *error;
1782 if (ioa_cfg->sis64)
1783 error = &hostrcb->hcam.u.error64.u.type_17_error;
1784 else
1785 error = &hostrcb->hcam.u.error.u.type_17_error;
1787 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1788 strim(error->failure_reason);
1790 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1791 be32_to_cpu(hostrcb->hcam.u.error.prc));
1792 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1793 ipr_log_hex_data(ioa_cfg, error->data,
1794 be32_to_cpu(hostrcb->hcam.length) -
1795 (offsetof(struct ipr_hostrcb_error, u) +
1796 offsetof(struct ipr_hostrcb_type_17_error, data)));
1800 * ipr_log_dual_ioa_error - Log a dual adapter error.
1801 * @ioa_cfg: ioa config struct
1802 * @hostrcb: hostrcb struct
1804 * Return value:
1805 * none
1807 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1808 struct ipr_hostrcb *hostrcb)
1810 struct ipr_hostrcb_type_07_error *error;
1812 error = &hostrcb->hcam.u.error.u.type_07_error;
1813 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1814 strim(error->failure_reason);
1816 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1817 be32_to_cpu(hostrcb->hcam.u.error.prc));
1818 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1819 ipr_log_hex_data(ioa_cfg, error->data,
1820 be32_to_cpu(hostrcb->hcam.length) -
1821 (offsetof(struct ipr_hostrcb_error, u) +
1822 offsetof(struct ipr_hostrcb_type_07_error, data)));
1825 static const struct {
1826 u8 active;
1827 char *desc;
1828 } path_active_desc[] = {
1829 { IPR_PATH_NO_INFO, "Path" },
1830 { IPR_PATH_ACTIVE, "Active path" },
1831 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1834 static const struct {
1835 u8 state;
1836 char *desc;
1837 } path_state_desc[] = {
1838 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1839 { IPR_PATH_HEALTHY, "is healthy" },
1840 { IPR_PATH_DEGRADED, "is degraded" },
1841 { IPR_PATH_FAILED, "is failed" }
1845 * ipr_log_fabric_path - Log a fabric path error
1846 * @hostrcb: hostrcb struct
1847 * @fabric: fabric descriptor
1849 * Return value:
1850 * none
1852 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1853 struct ipr_hostrcb_fabric_desc *fabric)
1855 int i, j;
1856 u8 path_state = fabric->path_state;
1857 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1858 u8 state = path_state & IPR_PATH_STATE_MASK;
1860 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1861 if (path_active_desc[i].active != active)
1862 continue;
1864 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1865 if (path_state_desc[j].state != state)
1866 continue;
1868 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1869 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1870 path_active_desc[i].desc, path_state_desc[j].desc,
1871 fabric->ioa_port);
1872 } else if (fabric->cascaded_expander == 0xff) {
1873 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1874 path_active_desc[i].desc, path_state_desc[j].desc,
1875 fabric->ioa_port, fabric->phy);
1876 } else if (fabric->phy == 0xff) {
1877 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1878 path_active_desc[i].desc, path_state_desc[j].desc,
1879 fabric->ioa_port, fabric->cascaded_expander);
1880 } else {
1881 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1882 path_active_desc[i].desc, path_state_desc[j].desc,
1883 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1885 return;
1889 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1890 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1894 * ipr_log64_fabric_path - Log a fabric path error
1895 * @hostrcb: hostrcb struct
1896 * @fabric: fabric descriptor
1898 * Return value:
1899 * none
1901 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1902 struct ipr_hostrcb64_fabric_desc *fabric)
1904 int i, j;
1905 u8 path_state = fabric->path_state;
1906 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1907 u8 state = path_state & IPR_PATH_STATE_MASK;
1908 char buffer[IPR_MAX_RES_PATH_LENGTH];
1910 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1911 if (path_active_desc[i].active != active)
1912 continue;
1914 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1915 if (path_state_desc[j].state != state)
1916 continue;
1918 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1919 path_active_desc[i].desc, path_state_desc[j].desc,
1920 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1921 return;
1925 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1926 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1929 static const struct {
1930 u8 type;
1931 char *desc;
1932 } path_type_desc[] = {
1933 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1934 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1935 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1936 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1939 static const struct {
1940 u8 status;
1941 char *desc;
1942 } path_status_desc[] = {
1943 { IPR_PATH_CFG_NO_PROB, "Functional" },
1944 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1945 { IPR_PATH_CFG_FAILED, "Failed" },
1946 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1947 { IPR_PATH_NOT_DETECTED, "Missing" },
1948 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1951 static const char *link_rate[] = {
1952 "unknown",
1953 "disabled",
1954 "phy reset problem",
1955 "spinup hold",
1956 "port selector",
1957 "unknown",
1958 "unknown",
1959 "unknown",
1960 "1.5Gbps",
1961 "3.0Gbps",
1962 "unknown",
1963 "unknown",
1964 "unknown",
1965 "unknown",
1966 "unknown",
1967 "unknown"
1971 * ipr_log_path_elem - Log a fabric path element.
1972 * @hostrcb: hostrcb struct
1973 * @cfg: fabric path element struct
1975 * Return value:
1976 * none
1978 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1979 struct ipr_hostrcb_config_element *cfg)
1981 int i, j;
1982 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1983 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1985 if (type == IPR_PATH_CFG_NOT_EXIST)
1986 return;
1988 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1989 if (path_type_desc[i].type != type)
1990 continue;
1992 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1993 if (path_status_desc[j].status != status)
1994 continue;
1996 if (type == IPR_PATH_CFG_IOA_PORT) {
1997 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1998 path_status_desc[j].desc, path_type_desc[i].desc,
1999 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2000 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2001 } else {
2002 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2003 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2004 path_status_desc[j].desc, path_type_desc[i].desc,
2005 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2006 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2007 } else if (cfg->cascaded_expander == 0xff) {
2008 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2009 "WWN=%08X%08X\n", path_status_desc[j].desc,
2010 path_type_desc[i].desc, cfg->phy,
2011 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2012 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2013 } else if (cfg->phy == 0xff) {
2014 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2015 "WWN=%08X%08X\n", path_status_desc[j].desc,
2016 path_type_desc[i].desc, cfg->cascaded_expander,
2017 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2018 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2019 } else {
2020 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2021 "WWN=%08X%08X\n", path_status_desc[j].desc,
2022 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2023 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2024 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2027 return;
2031 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2032 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2033 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2034 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2038 * ipr_log64_path_elem - Log a fabric path element.
2039 * @hostrcb: hostrcb struct
2040 * @cfg: fabric path element struct
2042 * Return value:
2043 * none
2045 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2046 struct ipr_hostrcb64_config_element *cfg)
2048 int i, j;
2049 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2050 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2051 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2052 char buffer[IPR_MAX_RES_PATH_LENGTH];
2054 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2055 return;
2057 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2058 if (path_type_desc[i].type != type)
2059 continue;
2061 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2062 if (path_status_desc[j].status != status)
2063 continue;
2065 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2066 path_status_desc[j].desc, path_type_desc[i].desc,
2067 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2068 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2069 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2070 return;
2073 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2074 "WWN=%08X%08X\n", cfg->type_status,
2075 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2076 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2077 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2081 * ipr_log_fabric_error - Log a fabric error.
2082 * @ioa_cfg: ioa config struct
2083 * @hostrcb: hostrcb struct
2085 * Return value:
2086 * none
2088 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2089 struct ipr_hostrcb *hostrcb)
2091 struct ipr_hostrcb_type_20_error *error;
2092 struct ipr_hostrcb_fabric_desc *fabric;
2093 struct ipr_hostrcb_config_element *cfg;
2094 int i, add_len;
2096 error = &hostrcb->hcam.u.error.u.type_20_error;
2097 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2098 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2100 add_len = be32_to_cpu(hostrcb->hcam.length) -
2101 (offsetof(struct ipr_hostrcb_error, u) +
2102 offsetof(struct ipr_hostrcb_type_20_error, desc));
2104 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2105 ipr_log_fabric_path(hostrcb, fabric);
2106 for_each_fabric_cfg(fabric, cfg)
2107 ipr_log_path_elem(hostrcb, cfg);
2109 add_len -= be16_to_cpu(fabric->length);
2110 fabric = (struct ipr_hostrcb_fabric_desc *)
2111 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2114 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2118 * ipr_log_sis64_array_error - Log a sis64 array error.
2119 * @ioa_cfg: ioa config struct
2120 * @hostrcb: hostrcb struct
2122 * Return value:
2123 * none
2125 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2126 struct ipr_hostrcb *hostrcb)
2128 int i, num_entries;
2129 struct ipr_hostrcb_type_24_error *error;
2130 struct ipr_hostrcb64_array_data_entry *array_entry;
2131 char buffer[IPR_MAX_RES_PATH_LENGTH];
2132 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2134 error = &hostrcb->hcam.u.error64.u.type_24_error;
2136 ipr_err_separator;
2138 ipr_err("RAID %s Array Configuration: %s\n",
2139 error->protection_level,
2140 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2142 ipr_err_separator;
2144 array_entry = error->array_member;
2145 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2146 sizeof(error->array_member));
2148 for (i = 0; i < num_entries; i++, array_entry++) {
2150 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2151 continue;
2153 if (error->exposed_mode_adn == i)
2154 ipr_err("Exposed Array Member %d:\n", i);
2155 else
2156 ipr_err("Array Member %d:\n", i);
2158 ipr_err("Array Member %d:\n", i);
2159 ipr_log_ext_vpd(&array_entry->vpd);
2160 ipr_err("Current Location: %s",
2161 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2162 ipr_err("Expected Location: %s",
2163 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2165 ipr_err_separator;
2170 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2171 * @ioa_cfg: ioa config struct
2172 * @hostrcb: hostrcb struct
2174 * Return value:
2175 * none
2177 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2178 struct ipr_hostrcb *hostrcb)
2180 struct ipr_hostrcb_type_30_error *error;
2181 struct ipr_hostrcb64_fabric_desc *fabric;
2182 struct ipr_hostrcb64_config_element *cfg;
2183 int i, add_len;
2185 error = &hostrcb->hcam.u.error64.u.type_30_error;
2187 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2188 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2190 add_len = be32_to_cpu(hostrcb->hcam.length) -
2191 (offsetof(struct ipr_hostrcb64_error, u) +
2192 offsetof(struct ipr_hostrcb_type_30_error, desc));
2194 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2195 ipr_log64_fabric_path(hostrcb, fabric);
2196 for_each_fabric_cfg(fabric, cfg)
2197 ipr_log64_path_elem(hostrcb, cfg);
2199 add_len -= be16_to_cpu(fabric->length);
2200 fabric = (struct ipr_hostrcb64_fabric_desc *)
2201 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2204 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2208 * ipr_log_generic_error - Log an adapter error.
2209 * @ioa_cfg: ioa config struct
2210 * @hostrcb: hostrcb struct
2212 * Return value:
2213 * none
2215 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2216 struct ipr_hostrcb *hostrcb)
2218 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2219 be32_to_cpu(hostrcb->hcam.length));
2223 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2224 * @ioasc: IOASC
2226 * This function will return the index of into the ipr_error_table
2227 * for the specified IOASC. If the IOASC is not in the table,
2228 * 0 will be returned, which points to the entry used for unknown errors.
2230 * Return value:
2231 * index into the ipr_error_table
2233 static u32 ipr_get_error(u32 ioasc)
2235 int i;
2237 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2238 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2239 return i;
2241 return 0;
2245 * ipr_handle_log_data - Log an adapter error.
2246 * @ioa_cfg: ioa config struct
2247 * @hostrcb: hostrcb struct
2249 * This function logs an adapter error to the system.
2251 * Return value:
2252 * none
2254 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2255 struct ipr_hostrcb *hostrcb)
2257 u32 ioasc;
2258 int error_index;
2260 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2261 return;
2263 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2264 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2266 if (ioa_cfg->sis64)
2267 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2268 else
2269 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2271 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2272 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2273 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2274 scsi_report_bus_reset(ioa_cfg->host,
2275 hostrcb->hcam.u.error.fd_res_addr.bus);
2278 error_index = ipr_get_error(ioasc);
2280 if (!ipr_error_table[error_index].log_hcam)
2281 return;
2283 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2285 /* Set indication we have logged an error */
2286 ioa_cfg->errors_logged++;
2288 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2289 return;
2290 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2291 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2293 switch (hostrcb->hcam.overlay_id) {
2294 case IPR_HOST_RCB_OVERLAY_ID_2:
2295 ipr_log_cache_error(ioa_cfg, hostrcb);
2296 break;
2297 case IPR_HOST_RCB_OVERLAY_ID_3:
2298 ipr_log_config_error(ioa_cfg, hostrcb);
2299 break;
2300 case IPR_HOST_RCB_OVERLAY_ID_4:
2301 case IPR_HOST_RCB_OVERLAY_ID_6:
2302 ipr_log_array_error(ioa_cfg, hostrcb);
2303 break;
2304 case IPR_HOST_RCB_OVERLAY_ID_7:
2305 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2306 break;
2307 case IPR_HOST_RCB_OVERLAY_ID_12:
2308 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2309 break;
2310 case IPR_HOST_RCB_OVERLAY_ID_13:
2311 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2312 break;
2313 case IPR_HOST_RCB_OVERLAY_ID_14:
2314 case IPR_HOST_RCB_OVERLAY_ID_16:
2315 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2316 break;
2317 case IPR_HOST_RCB_OVERLAY_ID_17:
2318 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2319 break;
2320 case IPR_HOST_RCB_OVERLAY_ID_20:
2321 ipr_log_fabric_error(ioa_cfg, hostrcb);
2322 break;
2323 case IPR_HOST_RCB_OVERLAY_ID_23:
2324 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2325 break;
2326 case IPR_HOST_RCB_OVERLAY_ID_24:
2327 case IPR_HOST_RCB_OVERLAY_ID_26:
2328 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2329 break;
2330 case IPR_HOST_RCB_OVERLAY_ID_30:
2331 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2332 break;
2333 case IPR_HOST_RCB_OVERLAY_ID_1:
2334 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2335 default:
2336 ipr_log_generic_error(ioa_cfg, hostrcb);
2337 break;
2342 * ipr_process_error - Op done function for an adapter error log.
2343 * @ipr_cmd: ipr command struct
2345 * This function is the op done function for an error log host
2346 * controlled async from the adapter. It will log the error and
2347 * send the HCAM back to the adapter.
2349 * Return value:
2350 * none
2352 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2354 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2355 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2356 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
2357 u32 fd_ioasc;
2359 if (ioa_cfg->sis64)
2360 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2361 else
2362 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2364 list_del(&hostrcb->queue);
2365 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2367 if (!ioasc) {
2368 ipr_handle_log_data(ioa_cfg, hostrcb);
2369 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2370 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2371 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2372 dev_err(&ioa_cfg->pdev->dev,
2373 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2376 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2380 * ipr_timeout - An internally generated op has timed out.
2381 * @ipr_cmd: ipr command struct
2383 * This function blocks host requests and initiates an
2384 * adapter reset.
2386 * Return value:
2387 * none
2389 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2391 unsigned long lock_flags = 0;
2392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2394 ENTER;
2395 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2397 ioa_cfg->errors_logged++;
2398 dev_err(&ioa_cfg->pdev->dev,
2399 "Adapter being reset due to command timeout.\n");
2401 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2402 ioa_cfg->sdt_state = GET_DUMP;
2404 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2405 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2407 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2408 LEAVE;
2412 * ipr_oper_timeout - Adapter timed out transitioning to operational
2413 * @ipr_cmd: ipr command struct
2415 * This function blocks host requests and initiates an
2416 * adapter reset.
2418 * Return value:
2419 * none
2421 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2423 unsigned long lock_flags = 0;
2424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2426 ENTER;
2427 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2429 ioa_cfg->errors_logged++;
2430 dev_err(&ioa_cfg->pdev->dev,
2431 "Adapter timed out transitioning to operational.\n");
2433 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2434 ioa_cfg->sdt_state = GET_DUMP;
2436 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2437 if (ipr_fastfail)
2438 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2439 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2442 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2443 LEAVE;
2447 * ipr_reset_reload - Reset/Reload the IOA
2448 * @ioa_cfg: ioa config struct
2449 * @shutdown_type: shutdown type
2451 * This function resets the adapter and re-initializes it.
2452 * This function assumes that all new host commands have been stopped.
2453 * Return value:
2454 * SUCCESS / FAILED
2456 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2457 enum ipr_shutdown_type shutdown_type)
2459 if (!ioa_cfg->in_reset_reload)
2460 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2462 spin_unlock_irq(ioa_cfg->host->host_lock);
2463 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2464 spin_lock_irq(ioa_cfg->host->host_lock);
2466 /* If we got hit with a host reset while we were already resetting
2467 the adapter for some reason, and the reset failed. */
2468 if (ioa_cfg->ioa_is_dead) {
2469 ipr_trace;
2470 return FAILED;
2473 return SUCCESS;
2477 * ipr_find_ses_entry - Find matching SES in SES table
2478 * @res: resource entry struct of SES
2480 * Return value:
2481 * pointer to SES table entry / NULL on failure
2483 static const struct ipr_ses_table_entry *
2484 ipr_find_ses_entry(struct ipr_resource_entry *res)
2486 int i, j, matches;
2487 struct ipr_std_inq_vpids *vpids;
2488 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2490 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2491 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2492 if (ste->compare_product_id_byte[j] == 'X') {
2493 vpids = &res->std_inq_data.vpids;
2494 if (vpids->product_id[j] == ste->product_id[j])
2495 matches++;
2496 else
2497 break;
2498 } else
2499 matches++;
2502 if (matches == IPR_PROD_ID_LEN)
2503 return ste;
2506 return NULL;
2510 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2511 * @ioa_cfg: ioa config struct
2512 * @bus: SCSI bus
2513 * @bus_width: bus width
2515 * Return value:
2516 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2517 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2518 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2519 * max 160MHz = max 320MB/sec).
2521 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2523 struct ipr_resource_entry *res;
2524 const struct ipr_ses_table_entry *ste;
2525 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2527 /* Loop through each config table entry in the config table buffer */
2528 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2529 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2530 continue;
2532 if (bus != res->bus)
2533 continue;
2535 if (!(ste = ipr_find_ses_entry(res)))
2536 continue;
2538 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2541 return max_xfer_rate;
2545 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2546 * @ioa_cfg: ioa config struct
2547 * @max_delay: max delay in micro-seconds to wait
2549 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2551 * Return value:
2552 * 0 on success / other on failure
2554 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2556 volatile u32 pcii_reg;
2557 int delay = 1;
2559 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2560 while (delay < max_delay) {
2561 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2563 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2564 return 0;
2566 /* udelay cannot be used if delay is more than a few milliseconds */
2567 if ((delay / 1000) > MAX_UDELAY_MS)
2568 mdelay(delay / 1000);
2569 else
2570 udelay(delay);
2572 delay += delay;
2574 return -EIO;
2578 * ipr_get_sis64_dump_data_section - Dump IOA memory
2579 * @ioa_cfg: ioa config struct
2580 * @start_addr: adapter address to dump
2581 * @dest: destination kernel buffer
2582 * @length_in_words: length to dump in 4 byte words
2584 * Return value:
2585 * 0 on success
2587 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2588 u32 start_addr,
2589 __be32 *dest, u32 length_in_words)
2591 int i;
2593 for (i = 0; i < length_in_words; i++) {
2594 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2595 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2596 dest++;
2599 return 0;
2603 * ipr_get_ldump_data_section - Dump IOA memory
2604 * @ioa_cfg: ioa config struct
2605 * @start_addr: adapter address to dump
2606 * @dest: destination kernel buffer
2607 * @length_in_words: length to dump in 4 byte words
2609 * Return value:
2610 * 0 on success / -EIO on failure
2612 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2613 u32 start_addr,
2614 __be32 *dest, u32 length_in_words)
2616 volatile u32 temp_pcii_reg;
2617 int i, delay = 0;
2619 if (ioa_cfg->sis64)
2620 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2621 dest, length_in_words);
2623 /* Write IOA interrupt reg starting LDUMP state */
2624 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2625 ioa_cfg->regs.set_uproc_interrupt_reg32);
2627 /* Wait for IO debug acknowledge */
2628 if (ipr_wait_iodbg_ack(ioa_cfg,
2629 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2630 dev_err(&ioa_cfg->pdev->dev,
2631 "IOA dump long data transfer timeout\n");
2632 return -EIO;
2635 /* Signal LDUMP interlocked - clear IO debug ack */
2636 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2637 ioa_cfg->regs.clr_interrupt_reg);
2639 /* Write Mailbox with starting address */
2640 writel(start_addr, ioa_cfg->ioa_mailbox);
2642 /* Signal address valid - clear IOA Reset alert */
2643 writel(IPR_UPROCI_RESET_ALERT,
2644 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2646 for (i = 0; i < length_in_words; i++) {
2647 /* Wait for IO debug acknowledge */
2648 if (ipr_wait_iodbg_ack(ioa_cfg,
2649 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2650 dev_err(&ioa_cfg->pdev->dev,
2651 "IOA dump short data transfer timeout\n");
2652 return -EIO;
2655 /* Read data from mailbox and increment destination pointer */
2656 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2657 dest++;
2659 /* For all but the last word of data, signal data received */
2660 if (i < (length_in_words - 1)) {
2661 /* Signal dump data received - Clear IO debug Ack */
2662 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2663 ioa_cfg->regs.clr_interrupt_reg);
2667 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2668 writel(IPR_UPROCI_RESET_ALERT,
2669 ioa_cfg->regs.set_uproc_interrupt_reg32);
2671 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2672 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2674 /* Signal dump data received - Clear IO debug Ack */
2675 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2676 ioa_cfg->regs.clr_interrupt_reg);
2678 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2679 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2680 temp_pcii_reg =
2681 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2683 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2684 return 0;
2686 udelay(10);
2687 delay += 10;
2690 return 0;
2693 #ifdef CONFIG_SCSI_IPR_DUMP
2695 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2696 * @ioa_cfg: ioa config struct
2697 * @pci_address: adapter address
2698 * @length: length of data to copy
2700 * Copy data from PCI adapter to kernel buffer.
2701 * Note: length MUST be a 4 byte multiple
2702 * Return value:
2703 * 0 on success / other on failure
2705 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2706 unsigned long pci_address, u32 length)
2708 int bytes_copied = 0;
2709 int cur_len, rc, rem_len, rem_page_len;
2710 __be32 *page;
2711 unsigned long lock_flags = 0;
2712 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2714 while (bytes_copied < length &&
2715 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2716 if (ioa_dump->page_offset >= PAGE_SIZE ||
2717 ioa_dump->page_offset == 0) {
2718 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2720 if (!page) {
2721 ipr_trace;
2722 return bytes_copied;
2725 ioa_dump->page_offset = 0;
2726 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2727 ioa_dump->next_page_index++;
2728 } else
2729 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2731 rem_len = length - bytes_copied;
2732 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2733 cur_len = min(rem_len, rem_page_len);
2735 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2736 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2737 rc = -EIO;
2738 } else {
2739 rc = ipr_get_ldump_data_section(ioa_cfg,
2740 pci_address + bytes_copied,
2741 &page[ioa_dump->page_offset / 4],
2742 (cur_len / sizeof(u32)));
2744 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2746 if (!rc) {
2747 ioa_dump->page_offset += cur_len;
2748 bytes_copied += cur_len;
2749 } else {
2750 ipr_trace;
2751 break;
2753 schedule();
2756 return bytes_copied;
2760 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2761 * @hdr: dump entry header struct
2763 * Return value:
2764 * nothing
2766 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2768 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2769 hdr->num_elems = 1;
2770 hdr->offset = sizeof(*hdr);
2771 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2775 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2776 * @ioa_cfg: ioa config struct
2777 * @driver_dump: driver dump struct
2779 * Return value:
2780 * nothing
2782 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2783 struct ipr_driver_dump *driver_dump)
2785 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2787 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2788 driver_dump->ioa_type_entry.hdr.len =
2789 sizeof(struct ipr_dump_ioa_type_entry) -
2790 sizeof(struct ipr_dump_entry_header);
2791 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2792 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2793 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2794 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2795 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2796 ucode_vpd->minor_release[1];
2797 driver_dump->hdr.num_entries++;
2801 * ipr_dump_version_data - Fill in the driver version in the dump.
2802 * @ioa_cfg: ioa config struct
2803 * @driver_dump: driver dump struct
2805 * Return value:
2806 * nothing
2808 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2809 struct ipr_driver_dump *driver_dump)
2811 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2812 driver_dump->version_entry.hdr.len =
2813 sizeof(struct ipr_dump_version_entry) -
2814 sizeof(struct ipr_dump_entry_header);
2815 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2816 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2817 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2818 driver_dump->hdr.num_entries++;
2822 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2823 * @ioa_cfg: ioa config struct
2824 * @driver_dump: driver dump struct
2826 * Return value:
2827 * nothing
2829 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2830 struct ipr_driver_dump *driver_dump)
2832 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2833 driver_dump->trace_entry.hdr.len =
2834 sizeof(struct ipr_dump_trace_entry) -
2835 sizeof(struct ipr_dump_entry_header);
2836 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2837 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2838 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2839 driver_dump->hdr.num_entries++;
2843 * ipr_dump_location_data - Fill in the IOA location in the dump.
2844 * @ioa_cfg: ioa config struct
2845 * @driver_dump: driver dump struct
2847 * Return value:
2848 * nothing
2850 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2851 struct ipr_driver_dump *driver_dump)
2853 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2854 driver_dump->location_entry.hdr.len =
2855 sizeof(struct ipr_dump_location_entry) -
2856 sizeof(struct ipr_dump_entry_header);
2857 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2858 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2859 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2860 driver_dump->hdr.num_entries++;
2864 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2865 * @ioa_cfg: ioa config struct
2866 * @dump: dump struct
2868 * Return value:
2869 * nothing
2871 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2873 unsigned long start_addr, sdt_word;
2874 unsigned long lock_flags = 0;
2875 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2876 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2877 u32 num_entries, start_off, end_off;
2878 u32 bytes_to_copy, bytes_copied, rc;
2879 struct ipr_sdt *sdt;
2880 int valid = 1;
2881 int i;
2883 ENTER;
2885 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2887 if (ioa_cfg->sdt_state != GET_DUMP) {
2888 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2889 return;
2892 start_addr = readl(ioa_cfg->ioa_mailbox);
2894 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2895 dev_err(&ioa_cfg->pdev->dev,
2896 "Invalid dump table format: %lx\n", start_addr);
2897 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2898 return;
2901 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2903 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2905 /* Initialize the overall dump header */
2906 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2907 driver_dump->hdr.num_entries = 1;
2908 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2909 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2910 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2911 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2913 ipr_dump_version_data(ioa_cfg, driver_dump);
2914 ipr_dump_location_data(ioa_cfg, driver_dump);
2915 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2916 ipr_dump_trace_data(ioa_cfg, driver_dump);
2918 /* Update dump_header */
2919 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2921 /* IOA Dump entry */
2922 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2923 ioa_dump->hdr.len = 0;
2924 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2925 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2927 /* First entries in sdt are actually a list of dump addresses and
2928 lengths to gather the real dump data. sdt represents the pointer
2929 to the ioa generated dump table. Dump data will be extracted based
2930 on entries in this table */
2931 sdt = &ioa_dump->sdt;
2933 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2934 sizeof(struct ipr_sdt) / sizeof(__be32));
2936 /* Smart Dump table is ready to use and the first entry is valid */
2937 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2938 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2939 dev_err(&ioa_cfg->pdev->dev,
2940 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2941 rc, be32_to_cpu(sdt->hdr.state));
2942 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2943 ioa_cfg->sdt_state = DUMP_OBTAINED;
2944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2945 return;
2948 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2950 if (num_entries > IPR_NUM_SDT_ENTRIES)
2951 num_entries = IPR_NUM_SDT_ENTRIES;
2953 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2955 for (i = 0; i < num_entries; i++) {
2956 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2957 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2958 break;
2961 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2962 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2963 if (ioa_cfg->sis64)
2964 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2965 else {
2966 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2967 end_off = be32_to_cpu(sdt->entry[i].end_token);
2969 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2970 bytes_to_copy = end_off - start_off;
2971 else
2972 valid = 0;
2974 if (valid) {
2975 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2976 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2977 continue;
2980 /* Copy data from adapter to driver buffers */
2981 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2982 bytes_to_copy);
2984 ioa_dump->hdr.len += bytes_copied;
2986 if (bytes_copied != bytes_to_copy) {
2987 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2988 break;
2994 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2996 /* Update dump_header */
2997 driver_dump->hdr.len += ioa_dump->hdr.len;
2998 wmb();
2999 ioa_cfg->sdt_state = DUMP_OBTAINED;
3000 LEAVE;
3003 #else
3004 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3005 #endif
3008 * ipr_release_dump - Free adapter dump memory
3009 * @kref: kref struct
3011 * Return value:
3012 * nothing
3014 static void ipr_release_dump(struct kref *kref)
3016 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3017 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3018 unsigned long lock_flags = 0;
3019 int i;
3021 ENTER;
3022 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3023 ioa_cfg->dump = NULL;
3024 ioa_cfg->sdt_state = INACTIVE;
3025 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3027 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3028 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3030 kfree(dump);
3031 LEAVE;
3035 * ipr_worker_thread - Worker thread
3036 * @work: ioa config struct
3038 * Called at task level from a work thread. This function takes care
3039 * of adding and removing device from the mid-layer as configuration
3040 * changes are detected by the adapter.
3042 * Return value:
3043 * nothing
3045 static void ipr_worker_thread(struct work_struct *work)
3047 unsigned long lock_flags;
3048 struct ipr_resource_entry *res;
3049 struct scsi_device *sdev;
3050 struct ipr_dump *dump;
3051 struct ipr_ioa_cfg *ioa_cfg =
3052 container_of(work, struct ipr_ioa_cfg, work_q);
3053 u8 bus, target, lun;
3054 int did_work;
3056 ENTER;
3057 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3059 if (ioa_cfg->sdt_state == GET_DUMP) {
3060 dump = ioa_cfg->dump;
3061 if (!dump) {
3062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3063 return;
3065 kref_get(&dump->kref);
3066 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067 ipr_get_ioa_dump(ioa_cfg, dump);
3068 kref_put(&dump->kref, ipr_release_dump);
3070 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3071 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3072 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3074 return;
3077 restart:
3078 do {
3079 did_work = 0;
3080 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3081 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3082 return;
3085 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3086 if (res->del_from_ml && res->sdev) {
3087 did_work = 1;
3088 sdev = res->sdev;
3089 if (!scsi_device_get(sdev)) {
3090 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3091 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3092 scsi_remove_device(sdev);
3093 scsi_device_put(sdev);
3094 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3096 break;
3099 } while(did_work);
3101 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3102 if (res->add_to_ml) {
3103 bus = res->bus;
3104 target = res->target;
3105 lun = res->lun;
3106 res->add_to_ml = 0;
3107 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3108 scsi_add_device(ioa_cfg->host, bus, target, lun);
3109 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3110 goto restart;
3114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3115 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3116 LEAVE;
3119 #ifdef CONFIG_SCSI_IPR_TRACE
3121 * ipr_read_trace - Dump the adapter trace
3122 * @kobj: kobject struct
3123 * @bin_attr: bin_attribute struct
3124 * @buf: buffer
3125 * @off: offset
3126 * @count: buffer size
3128 * Return value:
3129 * number of bytes printed to buffer
3131 static ssize_t ipr_read_trace(struct kobject *kobj,
3132 struct bin_attribute *bin_attr,
3133 char *buf, loff_t off, size_t count)
3135 struct device *dev = container_of(kobj, struct device, kobj);
3136 struct Scsi_Host *shost = class_to_shost(dev);
3137 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3138 unsigned long lock_flags = 0;
3139 ssize_t ret;
3141 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3142 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3143 IPR_TRACE_SIZE);
3144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3146 return ret;
3149 static struct bin_attribute ipr_trace_attr = {
3150 .attr = {
3151 .name = "trace",
3152 .mode = S_IRUGO,
3154 .size = 0,
3155 .read = ipr_read_trace,
3157 #endif
3160 * ipr_show_fw_version - Show the firmware version
3161 * @dev: class device struct
3162 * @buf: buffer
3164 * Return value:
3165 * number of bytes printed to buffer
3167 static ssize_t ipr_show_fw_version(struct device *dev,
3168 struct device_attribute *attr, char *buf)
3170 struct Scsi_Host *shost = class_to_shost(dev);
3171 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3172 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3173 unsigned long lock_flags = 0;
3174 int len;
3176 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3177 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3178 ucode_vpd->major_release, ucode_vpd->card_type,
3179 ucode_vpd->minor_release[0],
3180 ucode_vpd->minor_release[1]);
3181 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3182 return len;
3185 static struct device_attribute ipr_fw_version_attr = {
3186 .attr = {
3187 .name = "fw_version",
3188 .mode = S_IRUGO,
3190 .show = ipr_show_fw_version,
3194 * ipr_show_log_level - Show the adapter's error logging level
3195 * @dev: class device struct
3196 * @buf: buffer
3198 * Return value:
3199 * number of bytes printed to buffer
3201 static ssize_t ipr_show_log_level(struct device *dev,
3202 struct device_attribute *attr, char *buf)
3204 struct Scsi_Host *shost = class_to_shost(dev);
3205 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3206 unsigned long lock_flags = 0;
3207 int len;
3209 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3210 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3211 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3212 return len;
3216 * ipr_store_log_level - Change the adapter's error logging level
3217 * @dev: class device struct
3218 * @buf: buffer
3220 * Return value:
3221 * number of bytes printed to buffer
3223 static ssize_t ipr_store_log_level(struct device *dev,
3224 struct device_attribute *attr,
3225 const char *buf, size_t count)
3227 struct Scsi_Host *shost = class_to_shost(dev);
3228 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3229 unsigned long lock_flags = 0;
3231 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3232 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3233 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3234 return strlen(buf);
3237 static struct device_attribute ipr_log_level_attr = {
3238 .attr = {
3239 .name = "log_level",
3240 .mode = S_IRUGO | S_IWUSR,
3242 .show = ipr_show_log_level,
3243 .store = ipr_store_log_level
3247 * ipr_store_diagnostics - IOA Diagnostics interface
3248 * @dev: device struct
3249 * @buf: buffer
3250 * @count: buffer size
3252 * This function will reset the adapter and wait a reasonable
3253 * amount of time for any errors that the adapter might log.
3255 * Return value:
3256 * count on success / other on failure
3258 static ssize_t ipr_store_diagnostics(struct device *dev,
3259 struct device_attribute *attr,
3260 const char *buf, size_t count)
3262 struct Scsi_Host *shost = class_to_shost(dev);
3263 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3264 unsigned long lock_flags = 0;
3265 int rc = count;
3267 if (!capable(CAP_SYS_ADMIN))
3268 return -EACCES;
3270 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3271 while(ioa_cfg->in_reset_reload) {
3272 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3273 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3274 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3277 ioa_cfg->errors_logged = 0;
3278 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3280 if (ioa_cfg->in_reset_reload) {
3281 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3282 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3284 /* Wait for a second for any errors to be logged */
3285 msleep(1000);
3286 } else {
3287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3288 return -EIO;
3291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3292 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3293 rc = -EIO;
3294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3296 return rc;
3299 static struct device_attribute ipr_diagnostics_attr = {
3300 .attr = {
3301 .name = "run_diagnostics",
3302 .mode = S_IWUSR,
3304 .store = ipr_store_diagnostics
3308 * ipr_show_adapter_state - Show the adapter's state
3309 * @class_dev: device struct
3310 * @buf: buffer
3312 * Return value:
3313 * number of bytes printed to buffer
3315 static ssize_t ipr_show_adapter_state(struct device *dev,
3316 struct device_attribute *attr, char *buf)
3318 struct Scsi_Host *shost = class_to_shost(dev);
3319 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3320 unsigned long lock_flags = 0;
3321 int len;
3323 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3324 if (ioa_cfg->ioa_is_dead)
3325 len = snprintf(buf, PAGE_SIZE, "offline\n");
3326 else
3327 len = snprintf(buf, PAGE_SIZE, "online\n");
3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329 return len;
3333 * ipr_store_adapter_state - Change adapter state
3334 * @dev: device struct
3335 * @buf: buffer
3336 * @count: buffer size
3338 * This function will change the adapter's state.
3340 * Return value:
3341 * count on success / other on failure
3343 static ssize_t ipr_store_adapter_state(struct device *dev,
3344 struct device_attribute *attr,
3345 const char *buf, size_t count)
3347 struct Scsi_Host *shost = class_to_shost(dev);
3348 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3349 unsigned long lock_flags;
3350 int result = count;
3352 if (!capable(CAP_SYS_ADMIN))
3353 return -EACCES;
3355 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3356 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3357 ioa_cfg->ioa_is_dead = 0;
3358 ioa_cfg->reset_retries = 0;
3359 ioa_cfg->in_ioa_bringdown = 0;
3360 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3362 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3363 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3365 return result;
3368 static struct device_attribute ipr_ioa_state_attr = {
3369 .attr = {
3370 .name = "online_state",
3371 .mode = S_IRUGO | S_IWUSR,
3373 .show = ipr_show_adapter_state,
3374 .store = ipr_store_adapter_state
3378 * ipr_store_reset_adapter - Reset the adapter
3379 * @dev: device struct
3380 * @buf: buffer
3381 * @count: buffer size
3383 * This function will reset the adapter.
3385 * Return value:
3386 * count on success / other on failure
3388 static ssize_t ipr_store_reset_adapter(struct device *dev,
3389 struct device_attribute *attr,
3390 const char *buf, size_t count)
3392 struct Scsi_Host *shost = class_to_shost(dev);
3393 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3394 unsigned long lock_flags;
3395 int result = count;
3397 if (!capable(CAP_SYS_ADMIN))
3398 return -EACCES;
3400 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3401 if (!ioa_cfg->in_reset_reload)
3402 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3403 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3404 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3406 return result;
3409 static struct device_attribute ipr_ioa_reset_attr = {
3410 .attr = {
3411 .name = "reset_host",
3412 .mode = S_IWUSR,
3414 .store = ipr_store_reset_adapter
3418 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3419 * @buf_len: buffer length
3421 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3422 * list to use for microcode download
3424 * Return value:
3425 * pointer to sglist / NULL on failure
3427 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3429 int sg_size, order, bsize_elem, num_elem, i, j;
3430 struct ipr_sglist *sglist;
3431 struct scatterlist *scatterlist;
3432 struct page *page;
3434 /* Get the minimum size per scatter/gather element */
3435 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3437 /* Get the actual size per element */
3438 order = get_order(sg_size);
3440 /* Determine the actual number of bytes per element */
3441 bsize_elem = PAGE_SIZE * (1 << order);
3443 /* Determine the actual number of sg entries needed */
3444 if (buf_len % bsize_elem)
3445 num_elem = (buf_len / bsize_elem) + 1;
3446 else
3447 num_elem = buf_len / bsize_elem;
3449 /* Allocate a scatter/gather list for the DMA */
3450 sglist = kzalloc(sizeof(struct ipr_sglist) +
3451 (sizeof(struct scatterlist) * (num_elem - 1)),
3452 GFP_KERNEL);
3454 if (sglist == NULL) {
3455 ipr_trace;
3456 return NULL;
3459 scatterlist = sglist->scatterlist;
3460 sg_init_table(scatterlist, num_elem);
3462 sglist->order = order;
3463 sglist->num_sg = num_elem;
3465 /* Allocate a bunch of sg elements */
3466 for (i = 0; i < num_elem; i++) {
3467 page = alloc_pages(GFP_KERNEL, order);
3468 if (!page) {
3469 ipr_trace;
3471 /* Free up what we already allocated */
3472 for (j = i - 1; j >= 0; j--)
3473 __free_pages(sg_page(&scatterlist[j]), order);
3474 kfree(sglist);
3475 return NULL;
3478 sg_set_page(&scatterlist[i], page, 0, 0);
3481 return sglist;
3485 * ipr_free_ucode_buffer - Frees a microcode download buffer
3486 * @p_dnld: scatter/gather list pointer
3488 * Free a DMA'able ucode download buffer previously allocated with
3489 * ipr_alloc_ucode_buffer
3491 * Return value:
3492 * nothing
3494 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3496 int i;
3498 for (i = 0; i < sglist->num_sg; i++)
3499 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3501 kfree(sglist);
3505 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3506 * @sglist: scatter/gather list pointer
3507 * @buffer: buffer pointer
3508 * @len: buffer length
3510 * Copy a microcode image from a user buffer into a buffer allocated by
3511 * ipr_alloc_ucode_buffer
3513 * Return value:
3514 * 0 on success / other on failure
3516 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3517 u8 *buffer, u32 len)
3519 int bsize_elem, i, result = 0;
3520 struct scatterlist *scatterlist;
3521 void *kaddr;
3523 /* Determine the actual number of bytes per element */
3524 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3526 scatterlist = sglist->scatterlist;
3528 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3529 struct page *page = sg_page(&scatterlist[i]);
3531 kaddr = kmap(page);
3532 memcpy(kaddr, buffer, bsize_elem);
3533 kunmap(page);
3535 scatterlist[i].length = bsize_elem;
3537 if (result != 0) {
3538 ipr_trace;
3539 return result;
3543 if (len % bsize_elem) {
3544 struct page *page = sg_page(&scatterlist[i]);
3546 kaddr = kmap(page);
3547 memcpy(kaddr, buffer, len % bsize_elem);
3548 kunmap(page);
3550 scatterlist[i].length = len % bsize_elem;
3553 sglist->buffer_len = len;
3554 return result;
3558 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3559 * @ipr_cmd: ipr command struct
3560 * @sglist: scatter/gather list
3562 * Builds a microcode download IOA data list (IOADL).
3565 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3566 struct ipr_sglist *sglist)
3568 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3569 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3570 struct scatterlist *scatterlist = sglist->scatterlist;
3571 int i;
3573 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3574 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3575 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3577 ioarcb->ioadl_len =
3578 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3579 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3580 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3581 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3582 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3585 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3589 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3590 * @ipr_cmd: ipr command struct
3591 * @sglist: scatter/gather list
3593 * Builds a microcode download IOA data list (IOADL).
3596 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3597 struct ipr_sglist *sglist)
3599 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3600 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3601 struct scatterlist *scatterlist = sglist->scatterlist;
3602 int i;
3604 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3605 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3606 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3608 ioarcb->ioadl_len =
3609 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3611 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3612 ioadl[i].flags_and_data_len =
3613 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3614 ioadl[i].address =
3615 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3618 ioadl[i-1].flags_and_data_len |=
3619 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3623 * ipr_update_ioa_ucode - Update IOA's microcode
3624 * @ioa_cfg: ioa config struct
3625 * @sglist: scatter/gather list
3627 * Initiate an adapter reset to update the IOA's microcode
3629 * Return value:
3630 * 0 on success / -EIO on failure
3632 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3633 struct ipr_sglist *sglist)
3635 unsigned long lock_flags;
3637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3638 while(ioa_cfg->in_reset_reload) {
3639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3640 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3641 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3644 if (ioa_cfg->ucode_sglist) {
3645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3646 dev_err(&ioa_cfg->pdev->dev,
3647 "Microcode download already in progress\n");
3648 return -EIO;
3651 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3652 sglist->num_sg, DMA_TO_DEVICE);
3654 if (!sglist->num_dma_sg) {
3655 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3656 dev_err(&ioa_cfg->pdev->dev,
3657 "Failed to map microcode download buffer!\n");
3658 return -EIO;
3661 ioa_cfg->ucode_sglist = sglist;
3662 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3663 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3664 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3666 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3667 ioa_cfg->ucode_sglist = NULL;
3668 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3669 return 0;
3673 * ipr_store_update_fw - Update the firmware on the adapter
3674 * @class_dev: device struct
3675 * @buf: buffer
3676 * @count: buffer size
3678 * This function will update the firmware on the adapter.
3680 * Return value:
3681 * count on success / other on failure
3683 static ssize_t ipr_store_update_fw(struct device *dev,
3684 struct device_attribute *attr,
3685 const char *buf, size_t count)
3687 struct Scsi_Host *shost = class_to_shost(dev);
3688 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3689 struct ipr_ucode_image_header *image_hdr;
3690 const struct firmware *fw_entry;
3691 struct ipr_sglist *sglist;
3692 char fname[100];
3693 char *src;
3694 int len, result, dnld_size;
3696 if (!capable(CAP_SYS_ADMIN))
3697 return -EACCES;
3699 len = snprintf(fname, 99, "%s", buf);
3700 fname[len-1] = '\0';
3702 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3703 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3704 return -EIO;
3707 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3709 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3710 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3711 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3712 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3713 release_firmware(fw_entry);
3714 return -EINVAL;
3717 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3718 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3719 sglist = ipr_alloc_ucode_buffer(dnld_size);
3721 if (!sglist) {
3722 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3723 release_firmware(fw_entry);
3724 return -ENOMEM;
3727 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3729 if (result) {
3730 dev_err(&ioa_cfg->pdev->dev,
3731 "Microcode buffer copy to DMA buffer failed\n");
3732 goto out;
3735 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3737 if (!result)
3738 result = count;
3739 out:
3740 ipr_free_ucode_buffer(sglist);
3741 release_firmware(fw_entry);
3742 return result;
3745 static struct device_attribute ipr_update_fw_attr = {
3746 .attr = {
3747 .name = "update_fw",
3748 .mode = S_IWUSR,
3750 .store = ipr_store_update_fw
3753 static struct device_attribute *ipr_ioa_attrs[] = {
3754 &ipr_fw_version_attr,
3755 &ipr_log_level_attr,
3756 &ipr_diagnostics_attr,
3757 &ipr_ioa_state_attr,
3758 &ipr_ioa_reset_attr,
3759 &ipr_update_fw_attr,
3760 NULL,
3763 #ifdef CONFIG_SCSI_IPR_DUMP
3765 * ipr_read_dump - Dump the adapter
3766 * @kobj: kobject struct
3767 * @bin_attr: bin_attribute struct
3768 * @buf: buffer
3769 * @off: offset
3770 * @count: buffer size
3772 * Return value:
3773 * number of bytes printed to buffer
3775 static ssize_t ipr_read_dump(struct kobject *kobj,
3776 struct bin_attribute *bin_attr,
3777 char *buf, loff_t off, size_t count)
3779 struct device *cdev = container_of(kobj, struct device, kobj);
3780 struct Scsi_Host *shost = class_to_shost(cdev);
3781 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3782 struct ipr_dump *dump;
3783 unsigned long lock_flags = 0;
3784 char *src;
3785 int len;
3786 size_t rc = count;
3788 if (!capable(CAP_SYS_ADMIN))
3789 return -EACCES;
3791 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3792 dump = ioa_cfg->dump;
3794 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3795 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3796 return 0;
3798 kref_get(&dump->kref);
3799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3801 if (off > dump->driver_dump.hdr.len) {
3802 kref_put(&dump->kref, ipr_release_dump);
3803 return 0;
3806 if (off + count > dump->driver_dump.hdr.len) {
3807 count = dump->driver_dump.hdr.len - off;
3808 rc = count;
3811 if (count && off < sizeof(dump->driver_dump)) {
3812 if (off + count > sizeof(dump->driver_dump))
3813 len = sizeof(dump->driver_dump) - off;
3814 else
3815 len = count;
3816 src = (u8 *)&dump->driver_dump + off;
3817 memcpy(buf, src, len);
3818 buf += len;
3819 off += len;
3820 count -= len;
3823 off -= sizeof(dump->driver_dump);
3825 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3826 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3827 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3828 else
3829 len = count;
3830 src = (u8 *)&dump->ioa_dump + off;
3831 memcpy(buf, src, len);
3832 buf += len;
3833 off += len;
3834 count -= len;
3837 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3839 while (count) {
3840 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3841 len = PAGE_ALIGN(off) - off;
3842 else
3843 len = count;
3844 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3845 src += off & ~PAGE_MASK;
3846 memcpy(buf, src, len);
3847 buf += len;
3848 off += len;
3849 count -= len;
3852 kref_put(&dump->kref, ipr_release_dump);
3853 return rc;
3857 * ipr_alloc_dump - Prepare for adapter dump
3858 * @ioa_cfg: ioa config struct
3860 * Return value:
3861 * 0 on success / other on failure
3863 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3865 struct ipr_dump *dump;
3866 unsigned long lock_flags = 0;
3868 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3870 if (!dump) {
3871 ipr_err("Dump memory allocation failed\n");
3872 return -ENOMEM;
3875 kref_init(&dump->kref);
3876 dump->ioa_cfg = ioa_cfg;
3878 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3880 if (INACTIVE != ioa_cfg->sdt_state) {
3881 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3882 kfree(dump);
3883 return 0;
3886 ioa_cfg->dump = dump;
3887 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3888 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3889 ioa_cfg->dump_taken = 1;
3890 schedule_work(&ioa_cfg->work_q);
3892 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3894 return 0;
3898 * ipr_free_dump - Free adapter dump memory
3899 * @ioa_cfg: ioa config struct
3901 * Return value:
3902 * 0 on success / other on failure
3904 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3906 struct ipr_dump *dump;
3907 unsigned long lock_flags = 0;
3909 ENTER;
3911 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3912 dump = ioa_cfg->dump;
3913 if (!dump) {
3914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3915 return 0;
3918 ioa_cfg->dump = NULL;
3919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3921 kref_put(&dump->kref, ipr_release_dump);
3923 LEAVE;
3924 return 0;
3928 * ipr_write_dump - Setup dump state of adapter
3929 * @kobj: kobject struct
3930 * @bin_attr: bin_attribute struct
3931 * @buf: buffer
3932 * @off: offset
3933 * @count: buffer size
3935 * Return value:
3936 * number of bytes printed to buffer
3938 static ssize_t ipr_write_dump(struct kobject *kobj,
3939 struct bin_attribute *bin_attr,
3940 char *buf, loff_t off, size_t count)
3942 struct device *cdev = container_of(kobj, struct device, kobj);
3943 struct Scsi_Host *shost = class_to_shost(cdev);
3944 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3945 int rc;
3947 if (!capable(CAP_SYS_ADMIN))
3948 return -EACCES;
3950 if (buf[0] == '1')
3951 rc = ipr_alloc_dump(ioa_cfg);
3952 else if (buf[0] == '0')
3953 rc = ipr_free_dump(ioa_cfg);
3954 else
3955 return -EINVAL;
3957 if (rc)
3958 return rc;
3959 else
3960 return count;
3963 static struct bin_attribute ipr_dump_attr = {
3964 .attr = {
3965 .name = "dump",
3966 .mode = S_IRUSR | S_IWUSR,
3968 .size = 0,
3969 .read = ipr_read_dump,
3970 .write = ipr_write_dump
3972 #else
3973 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3974 #endif
3977 * ipr_change_queue_depth - Change the device's queue depth
3978 * @sdev: scsi device struct
3979 * @qdepth: depth to set
3980 * @reason: calling context
3982 * Return value:
3983 * actual depth set
3985 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3986 int reason)
3988 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3989 struct ipr_resource_entry *res;
3990 unsigned long lock_flags = 0;
3992 if (reason != SCSI_QDEPTH_DEFAULT)
3993 return -EOPNOTSUPP;
3995 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3996 res = (struct ipr_resource_entry *)sdev->hostdata;
3998 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3999 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4002 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4003 return sdev->queue_depth;
4007 * ipr_change_queue_type - Change the device's queue type
4008 * @dsev: scsi device struct
4009 * @tag_type: type of tags to use
4011 * Return value:
4012 * actual queue type set
4014 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4016 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4017 struct ipr_resource_entry *res;
4018 unsigned long lock_flags = 0;
4020 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4021 res = (struct ipr_resource_entry *)sdev->hostdata;
4023 if (res) {
4024 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4026 * We don't bother quiescing the device here since the
4027 * adapter firmware does it for us.
4029 scsi_set_tag_type(sdev, tag_type);
4031 if (tag_type)
4032 scsi_activate_tcq(sdev, sdev->queue_depth);
4033 else
4034 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4035 } else
4036 tag_type = 0;
4037 } else
4038 tag_type = 0;
4040 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4041 return tag_type;
4045 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4046 * @dev: device struct
4047 * @buf: buffer
4049 * Return value:
4050 * number of bytes printed to buffer
4052 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4054 struct scsi_device *sdev = to_scsi_device(dev);
4055 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4056 struct ipr_resource_entry *res;
4057 unsigned long lock_flags = 0;
4058 ssize_t len = -ENXIO;
4060 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4061 res = (struct ipr_resource_entry *)sdev->hostdata;
4062 if (res)
4063 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4065 return len;
4068 static struct device_attribute ipr_adapter_handle_attr = {
4069 .attr = {
4070 .name = "adapter_handle",
4071 .mode = S_IRUSR,
4073 .show = ipr_show_adapter_handle
4077 * ipr_show_resource_path - Show the resource path for this device.
4078 * @dev: device struct
4079 * @buf: buffer
4081 * Return value:
4082 * number of bytes printed to buffer
4084 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4086 struct scsi_device *sdev = to_scsi_device(dev);
4087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4088 struct ipr_resource_entry *res;
4089 unsigned long lock_flags = 0;
4090 ssize_t len = -ENXIO;
4091 char buffer[IPR_MAX_RES_PATH_LENGTH];
4093 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4094 res = (struct ipr_resource_entry *)sdev->hostdata;
4095 if (res)
4096 len = snprintf(buf, PAGE_SIZE, "%s\n",
4097 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4098 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4099 return len;
4102 static struct device_attribute ipr_resource_path_attr = {
4103 .attr = {
4104 .name = "resource_path",
4105 .mode = S_IRUSR,
4107 .show = ipr_show_resource_path
4110 static struct device_attribute *ipr_dev_attrs[] = {
4111 &ipr_adapter_handle_attr,
4112 &ipr_resource_path_attr,
4113 NULL,
4117 * ipr_biosparam - Return the HSC mapping
4118 * @sdev: scsi device struct
4119 * @block_device: block device pointer
4120 * @capacity: capacity of the device
4121 * @parm: Array containing returned HSC values.
4123 * This function generates the HSC parms that fdisk uses.
4124 * We want to make sure we return something that places partitions
4125 * on 4k boundaries for best performance with the IOA.
4127 * Return value:
4128 * 0 on success
4130 static int ipr_biosparam(struct scsi_device *sdev,
4131 struct block_device *block_device,
4132 sector_t capacity, int *parm)
4134 int heads, sectors;
4135 sector_t cylinders;
4137 heads = 128;
4138 sectors = 32;
4140 cylinders = capacity;
4141 sector_div(cylinders, (128 * 32));
4143 /* return result */
4144 parm[0] = heads;
4145 parm[1] = sectors;
4146 parm[2] = cylinders;
4148 return 0;
4152 * ipr_find_starget - Find target based on bus/target.
4153 * @starget: scsi target struct
4155 * Return value:
4156 * resource entry pointer if found / NULL if not found
4158 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4160 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4161 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4162 struct ipr_resource_entry *res;
4164 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4165 if ((res->bus == starget->channel) &&
4166 (res->target == starget->id) &&
4167 (res->lun == 0)) {
4168 return res;
4172 return NULL;
4175 static struct ata_port_info sata_port_info;
4178 * ipr_target_alloc - Prepare for commands to a SCSI target
4179 * @starget: scsi target struct
4181 * If the device is a SATA device, this function allocates an
4182 * ATA port with libata, else it does nothing.
4184 * Return value:
4185 * 0 on success / non-0 on failure
4187 static int ipr_target_alloc(struct scsi_target *starget)
4189 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4190 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4191 struct ipr_sata_port *sata_port;
4192 struct ata_port *ap;
4193 struct ipr_resource_entry *res;
4194 unsigned long lock_flags;
4196 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4197 res = ipr_find_starget(starget);
4198 starget->hostdata = NULL;
4200 if (res && ipr_is_gata(res)) {
4201 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4202 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4203 if (!sata_port)
4204 return -ENOMEM;
4206 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4207 if (ap) {
4208 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4209 sata_port->ioa_cfg = ioa_cfg;
4210 sata_port->ap = ap;
4211 sata_port->res = res;
4213 res->sata_port = sata_port;
4214 ap->private_data = sata_port;
4215 starget->hostdata = sata_port;
4216 } else {
4217 kfree(sata_port);
4218 return -ENOMEM;
4221 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4223 return 0;
4227 * ipr_target_destroy - Destroy a SCSI target
4228 * @starget: scsi target struct
4230 * If the device was a SATA device, this function frees the libata
4231 * ATA port, else it does nothing.
4234 static void ipr_target_destroy(struct scsi_target *starget)
4236 struct ipr_sata_port *sata_port = starget->hostdata;
4237 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4238 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4240 if (ioa_cfg->sis64) {
4241 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4242 clear_bit(starget->id, ioa_cfg->array_ids);
4243 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4244 clear_bit(starget->id, ioa_cfg->vset_ids);
4245 else if (starget->channel == 0)
4246 clear_bit(starget->id, ioa_cfg->target_ids);
4249 if (sata_port) {
4250 starget->hostdata = NULL;
4251 ata_sas_port_destroy(sata_port->ap);
4252 kfree(sata_port);
4257 * ipr_find_sdev - Find device based on bus/target/lun.
4258 * @sdev: scsi device struct
4260 * Return value:
4261 * resource entry pointer if found / NULL if not found
4263 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4265 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4266 struct ipr_resource_entry *res;
4268 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4269 if ((res->bus == sdev->channel) &&
4270 (res->target == sdev->id) &&
4271 (res->lun == sdev->lun))
4272 return res;
4275 return NULL;
4279 * ipr_slave_destroy - Unconfigure a SCSI device
4280 * @sdev: scsi device struct
4282 * Return value:
4283 * nothing
4285 static void ipr_slave_destroy(struct scsi_device *sdev)
4287 struct ipr_resource_entry *res;
4288 struct ipr_ioa_cfg *ioa_cfg;
4289 unsigned long lock_flags = 0;
4291 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4293 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4294 res = (struct ipr_resource_entry *) sdev->hostdata;
4295 if (res) {
4296 if (res->sata_port)
4297 ata_port_disable(res->sata_port->ap);
4298 sdev->hostdata = NULL;
4299 res->sdev = NULL;
4300 res->sata_port = NULL;
4302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4306 * ipr_slave_configure - Configure a SCSI device
4307 * @sdev: scsi device struct
4309 * This function configures the specified scsi device.
4311 * Return value:
4312 * 0 on success
4314 static int ipr_slave_configure(struct scsi_device *sdev)
4316 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4317 struct ipr_resource_entry *res;
4318 struct ata_port *ap = NULL;
4319 unsigned long lock_flags = 0;
4320 char buffer[IPR_MAX_RES_PATH_LENGTH];
4322 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4323 res = sdev->hostdata;
4324 if (res) {
4325 if (ipr_is_af_dasd_device(res))
4326 sdev->type = TYPE_RAID;
4327 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4328 sdev->scsi_level = 4;
4329 sdev->no_uld_attach = 1;
4331 if (ipr_is_vset_device(res)) {
4332 blk_queue_rq_timeout(sdev->request_queue,
4333 IPR_VSET_RW_TIMEOUT);
4334 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4336 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
4337 sdev->allow_restart = 1;
4338 if (ipr_is_gata(res) && res->sata_port)
4339 ap = res->sata_port->ap;
4340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4342 if (ap) {
4343 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4344 ata_sas_slave_configure(sdev, ap);
4345 } else
4346 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4347 if (ioa_cfg->sis64)
4348 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4349 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4350 return 0;
4352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4353 return 0;
4357 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4358 * @sdev: scsi device struct
4360 * This function initializes an ATA port so that future commands
4361 * sent through queuecommand will work.
4363 * Return value:
4364 * 0 on success
4366 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4368 struct ipr_sata_port *sata_port = NULL;
4369 int rc = -ENXIO;
4371 ENTER;
4372 if (sdev->sdev_target)
4373 sata_port = sdev->sdev_target->hostdata;
4374 if (sata_port)
4375 rc = ata_sas_port_init(sata_port->ap);
4376 if (rc)
4377 ipr_slave_destroy(sdev);
4379 LEAVE;
4380 return rc;
4384 * ipr_slave_alloc - Prepare for commands to a device.
4385 * @sdev: scsi device struct
4387 * This function saves a pointer to the resource entry
4388 * in the scsi device struct if the device exists. We
4389 * can then use this pointer in ipr_queuecommand when
4390 * handling new commands.
4392 * Return value:
4393 * 0 on success / -ENXIO if device does not exist
4395 static int ipr_slave_alloc(struct scsi_device *sdev)
4397 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4398 struct ipr_resource_entry *res;
4399 unsigned long lock_flags;
4400 int rc = -ENXIO;
4402 sdev->hostdata = NULL;
4404 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4406 res = ipr_find_sdev(sdev);
4407 if (res) {
4408 res->sdev = sdev;
4409 res->add_to_ml = 0;
4410 res->in_erp = 0;
4411 sdev->hostdata = res;
4412 if (!ipr_is_naca_model(res))
4413 res->needs_sync_complete = 1;
4414 rc = 0;
4415 if (ipr_is_gata(res)) {
4416 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4417 return ipr_ata_slave_alloc(sdev);
4421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4423 return rc;
4427 * ipr_eh_host_reset - Reset the host adapter
4428 * @scsi_cmd: scsi command struct
4430 * Return value:
4431 * SUCCESS / FAILED
4433 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4435 struct ipr_ioa_cfg *ioa_cfg;
4436 int rc;
4438 ENTER;
4439 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4441 dev_err(&ioa_cfg->pdev->dev,
4442 "Adapter being reset as a result of error recovery.\n");
4444 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4445 ioa_cfg->sdt_state = GET_DUMP;
4447 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4449 LEAVE;
4450 return rc;
4453 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4455 int rc;
4457 spin_lock_irq(cmd->device->host->host_lock);
4458 rc = __ipr_eh_host_reset(cmd);
4459 spin_unlock_irq(cmd->device->host->host_lock);
4461 return rc;
4465 * ipr_device_reset - Reset the device
4466 * @ioa_cfg: ioa config struct
4467 * @res: resource entry struct
4469 * This function issues a device reset to the affected device.
4470 * If the device is a SCSI device, a LUN reset will be sent
4471 * to the device first. If that does not work, a target reset
4472 * will be sent. If the device is a SATA device, a PHY reset will
4473 * be sent.
4475 * Return value:
4476 * 0 on success / non-zero on failure
4478 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4479 struct ipr_resource_entry *res)
4481 struct ipr_cmnd *ipr_cmd;
4482 struct ipr_ioarcb *ioarcb;
4483 struct ipr_cmd_pkt *cmd_pkt;
4484 struct ipr_ioarcb_ata_regs *regs;
4485 u32 ioasc;
4487 ENTER;
4488 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4489 ioarcb = &ipr_cmd->ioarcb;
4490 cmd_pkt = &ioarcb->cmd_pkt;
4492 if (ipr_cmd->ioa_cfg->sis64) {
4493 regs = &ipr_cmd->i.ata_ioadl.regs;
4494 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4495 } else
4496 regs = &ioarcb->u.add_data.u.regs;
4498 ioarcb->res_handle = res->res_handle;
4499 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4500 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4501 if (ipr_is_gata(res)) {
4502 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4503 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4504 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4507 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4508 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4509 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4510 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
4511 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4512 sizeof(struct ipr_ioasa_gata));
4514 LEAVE;
4515 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4519 * ipr_sata_reset - Reset the SATA port
4520 * @link: SATA link to reset
4521 * @classes: class of the attached device
4523 * This function issues a SATA phy reset to the affected ATA link.
4525 * Return value:
4526 * 0 on success / non-zero on failure
4528 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4529 unsigned long deadline)
4531 struct ipr_sata_port *sata_port = link->ap->private_data;
4532 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4533 struct ipr_resource_entry *res;
4534 unsigned long lock_flags = 0;
4535 int rc = -ENXIO;
4537 ENTER;
4538 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4539 while(ioa_cfg->in_reset_reload) {
4540 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4541 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4542 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4545 res = sata_port->res;
4546 if (res) {
4547 rc = ipr_device_reset(ioa_cfg, res);
4548 *classes = res->ata_class;
4551 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4552 LEAVE;
4553 return rc;
4557 * ipr_eh_dev_reset - Reset the device
4558 * @scsi_cmd: scsi command struct
4560 * This function issues a device reset to the affected device.
4561 * A LUN reset will be sent to the device first. If that does
4562 * not work, a target reset will be sent.
4564 * Return value:
4565 * SUCCESS / FAILED
4567 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4569 struct ipr_cmnd *ipr_cmd;
4570 struct ipr_ioa_cfg *ioa_cfg;
4571 struct ipr_resource_entry *res;
4572 struct ata_port *ap;
4573 int rc = 0;
4575 ENTER;
4576 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4577 res = scsi_cmd->device->hostdata;
4579 if (!res)
4580 return FAILED;
4583 * If we are currently going through reset/reload, return failed. This will force the
4584 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4585 * reset to complete
4587 if (ioa_cfg->in_reset_reload)
4588 return FAILED;
4589 if (ioa_cfg->ioa_is_dead)
4590 return FAILED;
4592 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4593 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4594 if (ipr_cmd->scsi_cmd)
4595 ipr_cmd->done = ipr_scsi_eh_done;
4596 if (ipr_cmd->qc)
4597 ipr_cmd->done = ipr_sata_eh_done;
4598 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4599 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4600 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4605 res->resetting_device = 1;
4606 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4608 if (ipr_is_gata(res) && res->sata_port) {
4609 ap = res->sata_port->ap;
4610 spin_unlock_irq(scsi_cmd->device->host->host_lock);
4611 ata_std_error_handler(ap);
4612 spin_lock_irq(scsi_cmd->device->host->host_lock);
4614 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4615 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4616 rc = -EIO;
4617 break;
4620 } else
4621 rc = ipr_device_reset(ioa_cfg, res);
4622 res->resetting_device = 0;
4624 LEAVE;
4625 return (rc ? FAILED : SUCCESS);
4628 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4630 int rc;
4632 spin_lock_irq(cmd->device->host->host_lock);
4633 rc = __ipr_eh_dev_reset(cmd);
4634 spin_unlock_irq(cmd->device->host->host_lock);
4636 return rc;
4640 * ipr_bus_reset_done - Op done function for bus reset.
4641 * @ipr_cmd: ipr command struct
4643 * This function is the op done function for a bus reset
4645 * Return value:
4646 * none
4648 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4650 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4651 struct ipr_resource_entry *res;
4653 ENTER;
4654 if (!ioa_cfg->sis64)
4655 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4656 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4657 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4658 break;
4663 * If abort has not completed, indicate the reset has, else call the
4664 * abort's done function to wake the sleeping eh thread
4666 if (ipr_cmd->sibling->sibling)
4667 ipr_cmd->sibling->sibling = NULL;
4668 else
4669 ipr_cmd->sibling->done(ipr_cmd->sibling);
4671 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4672 LEAVE;
4676 * ipr_abort_timeout - An abort task has timed out
4677 * @ipr_cmd: ipr command struct
4679 * This function handles when an abort task times out. If this
4680 * happens we issue a bus reset since we have resources tied
4681 * up that must be freed before returning to the midlayer.
4683 * Return value:
4684 * none
4686 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4688 struct ipr_cmnd *reset_cmd;
4689 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4690 struct ipr_cmd_pkt *cmd_pkt;
4691 unsigned long lock_flags = 0;
4693 ENTER;
4694 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4695 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4696 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4697 return;
4700 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4701 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4702 ipr_cmd->sibling = reset_cmd;
4703 reset_cmd->sibling = ipr_cmd;
4704 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4705 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4706 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4707 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4708 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4710 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4712 LEAVE;
4716 * ipr_cancel_op - Cancel specified op
4717 * @scsi_cmd: scsi command struct
4719 * This function cancels specified op.
4721 * Return value:
4722 * SUCCESS / FAILED
4724 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4726 struct ipr_cmnd *ipr_cmd;
4727 struct ipr_ioa_cfg *ioa_cfg;
4728 struct ipr_resource_entry *res;
4729 struct ipr_cmd_pkt *cmd_pkt;
4730 u32 ioasc;
4731 int op_found = 0;
4733 ENTER;
4734 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4735 res = scsi_cmd->device->hostdata;
4737 /* If we are currently going through reset/reload, return failed.
4738 * This will force the mid-layer to call ipr_eh_host_reset,
4739 * which will then go to sleep and wait for the reset to complete
4741 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4742 return FAILED;
4743 if (!res || !ipr_is_gscsi(res))
4744 return FAILED;
4746 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4747 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4748 ipr_cmd->done = ipr_scsi_eh_done;
4749 op_found = 1;
4750 break;
4754 if (!op_found)
4755 return SUCCESS;
4757 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4758 ipr_cmd->ioarcb.res_handle = res->res_handle;
4759 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4760 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4761 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4762 ipr_cmd->u.sdev = scsi_cmd->device;
4764 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4765 scsi_cmd->cmnd[0]);
4766 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4767 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4770 * If the abort task timed out and we sent a bus reset, we will get
4771 * one the following responses to the abort
4773 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4774 ioasc = 0;
4775 ipr_trace;
4778 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4779 if (!ipr_is_naca_model(res))
4780 res->needs_sync_complete = 1;
4782 LEAVE;
4783 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4787 * ipr_eh_abort - Abort a single op
4788 * @scsi_cmd: scsi command struct
4790 * Return value:
4791 * SUCCESS / FAILED
4793 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4795 unsigned long flags;
4796 int rc;
4798 ENTER;
4800 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4801 rc = ipr_cancel_op(scsi_cmd);
4802 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4804 LEAVE;
4805 return rc;
4809 * ipr_handle_other_interrupt - Handle "other" interrupts
4810 * @ioa_cfg: ioa config struct
4811 * @int_reg: interrupt register
4813 * Return value:
4814 * IRQ_NONE / IRQ_HANDLED
4816 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4817 volatile u32 int_reg)
4819 irqreturn_t rc = IRQ_HANDLED;
4821 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4822 /* Mask the interrupt */
4823 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4825 /* Clear the interrupt */
4826 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4827 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4829 list_del(&ioa_cfg->reset_cmd->queue);
4830 del_timer(&ioa_cfg->reset_cmd->timer);
4831 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4832 } else {
4833 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4834 ioa_cfg->ioa_unit_checked = 1;
4835 else
4836 dev_err(&ioa_cfg->pdev->dev,
4837 "Permanent IOA failure. 0x%08X\n", int_reg);
4839 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4840 ioa_cfg->sdt_state = GET_DUMP;
4842 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4843 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4846 return rc;
4850 * ipr_isr_eh - Interrupt service routine error handler
4851 * @ioa_cfg: ioa config struct
4852 * @msg: message to log
4854 * Return value:
4855 * none
4857 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4859 ioa_cfg->errors_logged++;
4860 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4862 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4863 ioa_cfg->sdt_state = GET_DUMP;
4865 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4869 * ipr_isr - Interrupt service routine
4870 * @irq: irq number
4871 * @devp: pointer to ioa config struct
4873 * Return value:
4874 * IRQ_NONE / IRQ_HANDLED
4876 static irqreturn_t ipr_isr(int irq, void *devp)
4878 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4879 unsigned long lock_flags = 0;
4880 volatile u32 int_reg, int_mask_reg;
4881 u32 ioasc;
4882 u16 cmd_index;
4883 int num_hrrq = 0;
4884 struct ipr_cmnd *ipr_cmd;
4885 irqreturn_t rc = IRQ_NONE;
4887 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4889 /* If interrupts are disabled, ignore the interrupt */
4890 if (!ioa_cfg->allow_interrupts) {
4891 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4892 return IRQ_NONE;
4895 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4896 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4898 /* If an interrupt on the adapter did not occur, ignore it.
4899 * Or in the case of SIS 64, check for a stage change interrupt.
4901 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4902 if (ioa_cfg->sis64) {
4903 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4904 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4905 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4907 /* clear stage change */
4908 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4909 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4910 list_del(&ioa_cfg->reset_cmd->queue);
4911 del_timer(&ioa_cfg->reset_cmd->timer);
4912 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4913 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4914 return IRQ_HANDLED;
4918 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4919 return IRQ_NONE;
4922 while (1) {
4923 ipr_cmd = NULL;
4925 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4926 ioa_cfg->toggle_bit) {
4928 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4929 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4931 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4932 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
4933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4934 return IRQ_HANDLED;
4937 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4939 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4941 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4943 list_del(&ipr_cmd->queue);
4944 del_timer(&ipr_cmd->timer);
4945 ipr_cmd->done(ipr_cmd);
4947 rc = IRQ_HANDLED;
4949 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4950 ioa_cfg->hrrq_curr++;
4951 } else {
4952 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4953 ioa_cfg->toggle_bit ^= 1u;
4957 if (ipr_cmd != NULL) {
4958 /* Clear the PCI interrupt */
4959 do {
4960 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4961 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4962 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4963 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4965 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4966 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4967 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4968 return IRQ_HANDLED;
4971 } else
4972 break;
4975 if (unlikely(rc == IRQ_NONE))
4976 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4978 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4979 return rc;
4983 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4984 * @ioa_cfg: ioa config struct
4985 * @ipr_cmd: ipr command struct
4987 * Return value:
4988 * 0 on success / -1 on failure
4990 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4991 struct ipr_cmnd *ipr_cmd)
4993 int i, nseg;
4994 struct scatterlist *sg;
4995 u32 length;
4996 u32 ioadl_flags = 0;
4997 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4998 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4999 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5001 length = scsi_bufflen(scsi_cmd);
5002 if (!length)
5003 return 0;
5005 nseg = scsi_dma_map(scsi_cmd);
5006 if (nseg < 0) {
5007 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5008 return -1;
5011 ipr_cmd->dma_use_sg = nseg;
5013 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5014 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5015 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5016 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5017 ioadl_flags = IPR_IOADL_FLAGS_READ;
5019 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5020 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5021 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5022 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5025 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5026 return 0;
5030 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5031 * @ioa_cfg: ioa config struct
5032 * @ipr_cmd: ipr command struct
5034 * Return value:
5035 * 0 on success / -1 on failure
5037 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5038 struct ipr_cmnd *ipr_cmd)
5040 int i, nseg;
5041 struct scatterlist *sg;
5042 u32 length;
5043 u32 ioadl_flags = 0;
5044 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5045 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5046 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5048 length = scsi_bufflen(scsi_cmd);
5049 if (!length)
5050 return 0;
5052 nseg = scsi_dma_map(scsi_cmd);
5053 if (nseg < 0) {
5054 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5055 return -1;
5058 ipr_cmd->dma_use_sg = nseg;
5060 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5061 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5062 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5063 ioarcb->data_transfer_length = cpu_to_be32(length);
5064 ioarcb->ioadl_len =
5065 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5066 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5067 ioadl_flags = IPR_IOADL_FLAGS_READ;
5068 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5069 ioarcb->read_ioadl_len =
5070 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5073 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5074 ioadl = ioarcb->u.add_data.u.ioadl;
5075 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5076 offsetof(struct ipr_ioarcb, u.add_data));
5077 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5080 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5081 ioadl[i].flags_and_data_len =
5082 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5083 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5086 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5087 return 0;
5091 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5092 * @scsi_cmd: scsi command struct
5094 * Return value:
5095 * task attributes
5097 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5099 u8 tag[2];
5100 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5102 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5103 switch (tag[0]) {
5104 case MSG_SIMPLE_TAG:
5105 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5106 break;
5107 case MSG_HEAD_TAG:
5108 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5109 break;
5110 case MSG_ORDERED_TAG:
5111 rc = IPR_FLAGS_LO_ORDERED_TASK;
5112 break;
5116 return rc;
5120 * ipr_erp_done - Process completion of ERP for a device
5121 * @ipr_cmd: ipr command struct
5123 * This function copies the sense buffer into the scsi_cmd
5124 * struct and pushes the scsi_done function.
5126 * Return value:
5127 * nothing
5129 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5131 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5132 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5133 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5134 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5136 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5137 scsi_cmd->result |= (DID_ERROR << 16);
5138 scmd_printk(KERN_ERR, scsi_cmd,
5139 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5140 } else {
5141 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5142 SCSI_SENSE_BUFFERSIZE);
5145 if (res) {
5146 if (!ipr_is_naca_model(res))
5147 res->needs_sync_complete = 1;
5148 res->in_erp = 0;
5150 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5151 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5152 scsi_cmd->scsi_done(scsi_cmd);
5156 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5157 * @ipr_cmd: ipr command struct
5159 * Return value:
5160 * none
5162 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5164 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5165 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5166 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5168 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5169 ioarcb->data_transfer_length = 0;
5170 ioarcb->read_data_transfer_length = 0;
5171 ioarcb->ioadl_len = 0;
5172 ioarcb->read_ioadl_len = 0;
5173 ioasa->ioasc = 0;
5174 ioasa->residual_data_len = 0;
5176 if (ipr_cmd->ioa_cfg->sis64)
5177 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5178 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5179 else {
5180 ioarcb->write_ioadl_addr =
5181 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5182 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5187 * ipr_erp_request_sense - Send request sense to a device
5188 * @ipr_cmd: ipr command struct
5190 * This function sends a request sense to a device as a result
5191 * of a check condition.
5193 * Return value:
5194 * nothing
5196 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5198 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5199 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5201 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5202 ipr_erp_done(ipr_cmd);
5203 return;
5206 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5208 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5209 cmd_pkt->cdb[0] = REQUEST_SENSE;
5210 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5211 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5212 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5213 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5215 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5216 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5218 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5219 IPR_REQUEST_SENSE_TIMEOUT * 2);
5223 * ipr_erp_cancel_all - Send cancel all to a device
5224 * @ipr_cmd: ipr command struct
5226 * This function sends a cancel all to a device to clear the
5227 * queue. If we are running TCQ on the device, QERR is set to 1,
5228 * which means all outstanding ops have been dropped on the floor.
5229 * Cancel all will return them to us.
5231 * Return value:
5232 * nothing
5234 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5236 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5237 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5238 struct ipr_cmd_pkt *cmd_pkt;
5240 res->in_erp = 1;
5242 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5244 if (!scsi_get_tag_type(scsi_cmd->device)) {
5245 ipr_erp_request_sense(ipr_cmd);
5246 return;
5249 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5250 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5251 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5253 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5254 IPR_CANCEL_ALL_TIMEOUT);
5258 * ipr_dump_ioasa - Dump contents of IOASA
5259 * @ioa_cfg: ioa config struct
5260 * @ipr_cmd: ipr command struct
5261 * @res: resource entry struct
5263 * This function is invoked by the interrupt handler when ops
5264 * fail. It will log the IOASA if appropriate. Only called
5265 * for GPDD ops.
5267 * Return value:
5268 * none
5270 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5271 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5273 int i;
5274 u16 data_len;
5275 u32 ioasc, fd_ioasc;
5276 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5277 __be32 *ioasa_data = (__be32 *)ioasa;
5278 int error_index;
5280 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
5281 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
5283 if (0 == ioasc)
5284 return;
5286 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5287 return;
5289 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5290 error_index = ipr_get_error(fd_ioasc);
5291 else
5292 error_index = ipr_get_error(ioasc);
5294 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5295 /* Don't log an error if the IOA already logged one */
5296 if (ioasa->ilid != 0)
5297 return;
5299 if (!ipr_is_gscsi(res))
5300 return;
5302 if (ipr_error_table[error_index].log_ioasa == 0)
5303 return;
5306 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5308 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
5309 data_len = sizeof(struct ipr_ioasa);
5310 else
5311 data_len = be16_to_cpu(ioasa->ret_stat_len);
5313 ipr_err("IOASA Dump:\n");
5315 for (i = 0; i < data_len / 4; i += 4) {
5316 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5317 be32_to_cpu(ioasa_data[i]),
5318 be32_to_cpu(ioasa_data[i+1]),
5319 be32_to_cpu(ioasa_data[i+2]),
5320 be32_to_cpu(ioasa_data[i+3]));
5325 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5326 * @ioasa: IOASA
5327 * @sense_buf: sense data buffer
5329 * Return value:
5330 * none
5332 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5334 u32 failing_lba;
5335 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5336 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5337 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5338 u32 ioasc = be32_to_cpu(ioasa->ioasc);
5340 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5342 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5343 return;
5345 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5347 if (ipr_is_vset_device(res) &&
5348 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5349 ioasa->u.vset.failing_lba_hi != 0) {
5350 sense_buf[0] = 0x72;
5351 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5352 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5353 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5355 sense_buf[7] = 12;
5356 sense_buf[8] = 0;
5357 sense_buf[9] = 0x0A;
5358 sense_buf[10] = 0x80;
5360 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5362 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5363 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5364 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5365 sense_buf[15] = failing_lba & 0x000000ff;
5367 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5369 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5370 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5371 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5372 sense_buf[19] = failing_lba & 0x000000ff;
5373 } else {
5374 sense_buf[0] = 0x70;
5375 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5376 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5377 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5379 /* Illegal request */
5380 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5381 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5382 sense_buf[7] = 10; /* additional length */
5384 /* IOARCB was in error */
5385 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5386 sense_buf[15] = 0xC0;
5387 else /* Parameter data was invalid */
5388 sense_buf[15] = 0x80;
5390 sense_buf[16] =
5391 ((IPR_FIELD_POINTER_MASK &
5392 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
5393 sense_buf[17] =
5394 (IPR_FIELD_POINTER_MASK &
5395 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
5396 } else {
5397 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5398 if (ipr_is_vset_device(res))
5399 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5400 else
5401 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5403 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5404 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5405 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5406 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5407 sense_buf[6] = failing_lba & 0x000000ff;
5410 sense_buf[7] = 6; /* additional length */
5416 * ipr_get_autosense - Copy autosense data to sense buffer
5417 * @ipr_cmd: ipr command struct
5419 * This function copies the autosense buffer to the buffer
5420 * in the scsi_cmd, if there is autosense available.
5422 * Return value:
5423 * 1 if autosense was available / 0 if not
5425 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5427 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5429 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5430 return 0;
5432 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5433 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5434 SCSI_SENSE_BUFFERSIZE));
5435 return 1;
5439 * ipr_erp_start - Process an error response for a SCSI op
5440 * @ioa_cfg: ioa config struct
5441 * @ipr_cmd: ipr command struct
5443 * This function determines whether or not to initiate ERP
5444 * on the affected device.
5446 * Return value:
5447 * nothing
5449 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5450 struct ipr_cmnd *ipr_cmd)
5452 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5453 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5454 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5455 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5457 if (!res) {
5458 ipr_scsi_eh_done(ipr_cmd);
5459 return;
5462 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5463 ipr_gen_sense(ipr_cmd);
5465 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5467 switch (masked_ioasc) {
5468 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5469 if (ipr_is_naca_model(res))
5470 scsi_cmd->result |= (DID_ABORT << 16);
5471 else
5472 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5473 break;
5474 case IPR_IOASC_IR_RESOURCE_HANDLE:
5475 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5476 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5477 break;
5478 case IPR_IOASC_HW_SEL_TIMEOUT:
5479 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5480 if (!ipr_is_naca_model(res))
5481 res->needs_sync_complete = 1;
5482 break;
5483 case IPR_IOASC_SYNC_REQUIRED:
5484 if (!res->in_erp)
5485 res->needs_sync_complete = 1;
5486 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5487 break;
5488 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5489 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5490 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5491 break;
5492 case IPR_IOASC_BUS_WAS_RESET:
5493 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5495 * Report the bus reset and ask for a retry. The device
5496 * will give CC/UA the next command.
5498 if (!res->resetting_device)
5499 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5500 scsi_cmd->result |= (DID_ERROR << 16);
5501 if (!ipr_is_naca_model(res))
5502 res->needs_sync_complete = 1;
5503 break;
5504 case IPR_IOASC_HW_DEV_BUS_STATUS:
5505 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5506 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5507 if (!ipr_get_autosense(ipr_cmd)) {
5508 if (!ipr_is_naca_model(res)) {
5509 ipr_erp_cancel_all(ipr_cmd);
5510 return;
5514 if (!ipr_is_naca_model(res))
5515 res->needs_sync_complete = 1;
5516 break;
5517 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5518 break;
5519 default:
5520 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5521 scsi_cmd->result |= (DID_ERROR << 16);
5522 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5523 res->needs_sync_complete = 1;
5524 break;
5527 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5528 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5529 scsi_cmd->scsi_done(scsi_cmd);
5533 * ipr_scsi_done - mid-layer done function
5534 * @ipr_cmd: ipr command struct
5536 * This function is invoked by the interrupt handler for
5537 * ops generated by the SCSI mid-layer
5539 * Return value:
5540 * none
5542 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5544 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5545 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5546 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5548 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
5550 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5551 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5552 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5553 scsi_cmd->scsi_done(scsi_cmd);
5554 } else
5555 ipr_erp_start(ioa_cfg, ipr_cmd);
5559 * ipr_queuecommand - Queue a mid-layer request
5560 * @scsi_cmd: scsi command struct
5561 * @done: done function
5563 * This function queues a request generated by the mid-layer.
5565 * Return value:
5566 * 0 on success
5567 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5568 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5570 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5571 void (*done) (struct scsi_cmnd *))
5573 struct ipr_ioa_cfg *ioa_cfg;
5574 struct ipr_resource_entry *res;
5575 struct ipr_ioarcb *ioarcb;
5576 struct ipr_cmnd *ipr_cmd;
5577 int rc = 0;
5579 scsi_cmd->scsi_done = done;
5580 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5581 res = scsi_cmd->device->hostdata;
5582 scsi_cmd->result = (DID_OK << 16);
5585 * We are currently blocking all devices due to a host reset
5586 * We have told the host to stop giving us new requests, but
5587 * ERP ops don't count. FIXME
5589 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5590 return SCSI_MLQUEUE_HOST_BUSY;
5593 * FIXME - Create scsi_set_host_offline interface
5594 * and the ioa_is_dead check can be removed
5596 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5597 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5598 scsi_cmd->result = (DID_NO_CONNECT << 16);
5599 scsi_cmd->scsi_done(scsi_cmd);
5600 return 0;
5603 if (ipr_is_gata(res) && res->sata_port)
5604 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5606 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5607 ioarcb = &ipr_cmd->ioarcb;
5608 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5610 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5611 ipr_cmd->scsi_cmd = scsi_cmd;
5612 ioarcb->res_handle = res->res_handle;
5613 ipr_cmd->done = ipr_scsi_done;
5614 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5616 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5617 if (scsi_cmd->underflow == 0)
5618 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5620 if (res->needs_sync_complete) {
5621 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5622 res->needs_sync_complete = 0;
5625 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5626 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5627 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5628 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5631 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5632 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5633 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5635 if (likely(rc == 0)) {
5636 if (ioa_cfg->sis64)
5637 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5638 else
5639 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5642 if (likely(rc == 0)) {
5643 mb();
5644 ipr_send_command(ipr_cmd);
5645 } else {
5646 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5647 return SCSI_MLQUEUE_HOST_BUSY;
5650 return 0;
5654 * ipr_ioctl - IOCTL handler
5655 * @sdev: scsi device struct
5656 * @cmd: IOCTL cmd
5657 * @arg: IOCTL arg
5659 * Return value:
5660 * 0 on success / other on failure
5662 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5664 struct ipr_resource_entry *res;
5666 res = (struct ipr_resource_entry *)sdev->hostdata;
5667 if (res && ipr_is_gata(res)) {
5668 if (cmd == HDIO_GET_IDENTITY)
5669 return -ENOTTY;
5670 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5673 return -EINVAL;
5677 * ipr_info - Get information about the card/driver
5678 * @scsi_host: scsi host struct
5680 * Return value:
5681 * pointer to buffer with description string
5683 static const char * ipr_ioa_info(struct Scsi_Host *host)
5685 static char buffer[512];
5686 struct ipr_ioa_cfg *ioa_cfg;
5687 unsigned long lock_flags = 0;
5689 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5691 spin_lock_irqsave(host->host_lock, lock_flags);
5692 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5693 spin_unlock_irqrestore(host->host_lock, lock_flags);
5695 return buffer;
5698 static struct scsi_host_template driver_template = {
5699 .module = THIS_MODULE,
5700 .name = "IPR",
5701 .info = ipr_ioa_info,
5702 .ioctl = ipr_ioctl,
5703 .queuecommand = ipr_queuecommand,
5704 .eh_abort_handler = ipr_eh_abort,
5705 .eh_device_reset_handler = ipr_eh_dev_reset,
5706 .eh_host_reset_handler = ipr_eh_host_reset,
5707 .slave_alloc = ipr_slave_alloc,
5708 .slave_configure = ipr_slave_configure,
5709 .slave_destroy = ipr_slave_destroy,
5710 .target_alloc = ipr_target_alloc,
5711 .target_destroy = ipr_target_destroy,
5712 .change_queue_depth = ipr_change_queue_depth,
5713 .change_queue_type = ipr_change_queue_type,
5714 .bios_param = ipr_biosparam,
5715 .can_queue = IPR_MAX_COMMANDS,
5716 .this_id = -1,
5717 .sg_tablesize = IPR_MAX_SGLIST,
5718 .max_sectors = IPR_IOA_MAX_SECTORS,
5719 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5720 .use_clustering = ENABLE_CLUSTERING,
5721 .shost_attrs = ipr_ioa_attrs,
5722 .sdev_attrs = ipr_dev_attrs,
5723 .proc_name = IPR_NAME
5727 * ipr_ata_phy_reset - libata phy_reset handler
5728 * @ap: ata port to reset
5731 static void ipr_ata_phy_reset(struct ata_port *ap)
5733 unsigned long flags;
5734 struct ipr_sata_port *sata_port = ap->private_data;
5735 struct ipr_resource_entry *res = sata_port->res;
5736 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5737 int rc;
5739 ENTER;
5740 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5741 while(ioa_cfg->in_reset_reload) {
5742 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5743 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5744 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5747 if (!ioa_cfg->allow_cmds)
5748 goto out_unlock;
5750 rc = ipr_device_reset(ioa_cfg, res);
5752 if (rc) {
5753 ata_port_disable(ap);
5754 goto out_unlock;
5757 ap->link.device[0].class = res->ata_class;
5758 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5759 ata_port_disable(ap);
5761 out_unlock:
5762 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5763 LEAVE;
5767 * ipr_ata_post_internal - Cleanup after an internal command
5768 * @qc: ATA queued command
5770 * Return value:
5771 * none
5773 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5775 struct ipr_sata_port *sata_port = qc->ap->private_data;
5776 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5777 struct ipr_cmnd *ipr_cmd;
5778 unsigned long flags;
5780 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5781 while(ioa_cfg->in_reset_reload) {
5782 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5783 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5784 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5787 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5788 if (ipr_cmd->qc == qc) {
5789 ipr_device_reset(ioa_cfg, sata_port->res);
5790 break;
5793 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5797 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5798 * @regs: destination
5799 * @tf: source ATA taskfile
5801 * Return value:
5802 * none
5804 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5805 struct ata_taskfile *tf)
5807 regs->feature = tf->feature;
5808 regs->nsect = tf->nsect;
5809 regs->lbal = tf->lbal;
5810 regs->lbam = tf->lbam;
5811 regs->lbah = tf->lbah;
5812 regs->device = tf->device;
5813 regs->command = tf->command;
5814 regs->hob_feature = tf->hob_feature;
5815 regs->hob_nsect = tf->hob_nsect;
5816 regs->hob_lbal = tf->hob_lbal;
5817 regs->hob_lbam = tf->hob_lbam;
5818 regs->hob_lbah = tf->hob_lbah;
5819 regs->ctl = tf->ctl;
5823 * ipr_sata_done - done function for SATA commands
5824 * @ipr_cmd: ipr command struct
5826 * This function is invoked by the interrupt handler for
5827 * ops generated by the SCSI mid-layer to SATA devices
5829 * Return value:
5830 * none
5832 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5834 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5835 struct ata_queued_cmd *qc = ipr_cmd->qc;
5836 struct ipr_sata_port *sata_port = qc->ap->private_data;
5837 struct ipr_resource_entry *res = sata_port->res;
5838 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5840 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5841 sizeof(struct ipr_ioasa_gata));
5842 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5844 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5845 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5847 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5848 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5849 else
5850 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5851 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5852 ata_qc_complete(qc);
5856 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5857 * @ipr_cmd: ipr command struct
5858 * @qc: ATA queued command
5861 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5862 struct ata_queued_cmd *qc)
5864 u32 ioadl_flags = 0;
5865 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5866 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5867 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5868 int len = qc->nbytes;
5869 struct scatterlist *sg;
5870 unsigned int si;
5871 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5873 if (len == 0)
5874 return;
5876 if (qc->dma_dir == DMA_TO_DEVICE) {
5877 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5878 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5879 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5880 ioadl_flags = IPR_IOADL_FLAGS_READ;
5882 ioarcb->data_transfer_length = cpu_to_be32(len);
5883 ioarcb->ioadl_len =
5884 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5885 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5886 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5888 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5889 ioadl64->flags = cpu_to_be32(ioadl_flags);
5890 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5891 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5893 last_ioadl64 = ioadl64;
5894 ioadl64++;
5897 if (likely(last_ioadl64))
5898 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5902 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5903 * @ipr_cmd: ipr command struct
5904 * @qc: ATA queued command
5907 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5908 struct ata_queued_cmd *qc)
5910 u32 ioadl_flags = 0;
5911 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5912 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5913 struct ipr_ioadl_desc *last_ioadl = NULL;
5914 int len = qc->nbytes;
5915 struct scatterlist *sg;
5916 unsigned int si;
5918 if (len == 0)
5919 return;
5921 if (qc->dma_dir == DMA_TO_DEVICE) {
5922 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5923 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5924 ioarcb->data_transfer_length = cpu_to_be32(len);
5925 ioarcb->ioadl_len =
5926 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5927 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5928 ioadl_flags = IPR_IOADL_FLAGS_READ;
5929 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5930 ioarcb->read_ioadl_len =
5931 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5934 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5935 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5936 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5938 last_ioadl = ioadl;
5939 ioadl++;
5942 if (likely(last_ioadl))
5943 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5947 * ipr_qc_issue - Issue a SATA qc to a device
5948 * @qc: queued command
5950 * Return value:
5951 * 0 if success
5953 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5955 struct ata_port *ap = qc->ap;
5956 struct ipr_sata_port *sata_port = ap->private_data;
5957 struct ipr_resource_entry *res = sata_port->res;
5958 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5959 struct ipr_cmnd *ipr_cmd;
5960 struct ipr_ioarcb *ioarcb;
5961 struct ipr_ioarcb_ata_regs *regs;
5963 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5964 return AC_ERR_SYSTEM;
5966 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5967 ioarcb = &ipr_cmd->ioarcb;
5969 if (ioa_cfg->sis64) {
5970 regs = &ipr_cmd->i.ata_ioadl.regs;
5971 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5972 } else
5973 regs = &ioarcb->u.add_data.u.regs;
5975 memset(regs, 0, sizeof(*regs));
5976 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
5978 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5979 ipr_cmd->qc = qc;
5980 ipr_cmd->done = ipr_sata_done;
5981 ipr_cmd->ioarcb.res_handle = res->res_handle;
5982 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5983 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5984 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5985 ipr_cmd->dma_use_sg = qc->n_elem;
5987 if (ioa_cfg->sis64)
5988 ipr_build_ata_ioadl64(ipr_cmd, qc);
5989 else
5990 ipr_build_ata_ioadl(ipr_cmd, qc);
5992 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5993 ipr_copy_sata_tf(regs, &qc->tf);
5994 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5995 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5997 switch (qc->tf.protocol) {
5998 case ATA_PROT_NODATA:
5999 case ATA_PROT_PIO:
6000 break;
6002 case ATA_PROT_DMA:
6003 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6004 break;
6006 case ATAPI_PROT_PIO:
6007 case ATAPI_PROT_NODATA:
6008 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6009 break;
6011 case ATAPI_PROT_DMA:
6012 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6013 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6014 break;
6016 default:
6017 WARN_ON(1);
6018 return AC_ERR_INVALID;
6021 mb();
6023 ipr_send_command(ipr_cmd);
6025 return 0;
6029 * ipr_qc_fill_rtf - Read result TF
6030 * @qc: ATA queued command
6032 * Return value:
6033 * true
6035 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6037 struct ipr_sata_port *sata_port = qc->ap->private_data;
6038 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6039 struct ata_taskfile *tf = &qc->result_tf;
6041 tf->feature = g->error;
6042 tf->nsect = g->nsect;
6043 tf->lbal = g->lbal;
6044 tf->lbam = g->lbam;
6045 tf->lbah = g->lbah;
6046 tf->device = g->device;
6047 tf->command = g->status;
6048 tf->hob_nsect = g->hob_nsect;
6049 tf->hob_lbal = g->hob_lbal;
6050 tf->hob_lbam = g->hob_lbam;
6051 tf->hob_lbah = g->hob_lbah;
6052 tf->ctl = g->alt_status;
6054 return true;
6057 static struct ata_port_operations ipr_sata_ops = {
6058 .phy_reset = ipr_ata_phy_reset,
6059 .hardreset = ipr_sata_reset,
6060 .post_internal_cmd = ipr_ata_post_internal,
6061 .qc_prep = ata_noop_qc_prep,
6062 .qc_issue = ipr_qc_issue,
6063 .qc_fill_rtf = ipr_qc_fill_rtf,
6064 .port_start = ata_sas_port_start,
6065 .port_stop = ata_sas_port_stop
6068 static struct ata_port_info sata_port_info = {
6069 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6070 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6071 .pio_mask = 0x10, /* pio4 */
6072 .mwdma_mask = 0x07,
6073 .udma_mask = 0x7f, /* udma0-6 */
6074 .port_ops = &ipr_sata_ops
6077 #ifdef CONFIG_PPC_PSERIES
6078 static const u16 ipr_blocked_processors[] = {
6079 PV_NORTHSTAR,
6080 PV_PULSAR,
6081 PV_POWER4,
6082 PV_ICESTAR,
6083 PV_SSTAR,
6084 PV_POWER4p,
6085 PV_630,
6086 PV_630p
6090 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6091 * @ioa_cfg: ioa cfg struct
6093 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6094 * certain pSeries hardware. This function determines if the given
6095 * adapter is in one of these confgurations or not.
6097 * Return value:
6098 * 1 if adapter is not supported / 0 if adapter is supported
6100 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6102 int i;
6104 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6105 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6106 if (__is_processor(ipr_blocked_processors[i]))
6107 return 1;
6110 return 0;
6112 #else
6113 #define ipr_invalid_adapter(ioa_cfg) 0
6114 #endif
6117 * ipr_ioa_bringdown_done - IOA bring down completion.
6118 * @ipr_cmd: ipr command struct
6120 * This function processes the completion of an adapter bring down.
6121 * It wakes any reset sleepers.
6123 * Return value:
6124 * IPR_RC_JOB_RETURN
6126 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6128 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6130 ENTER;
6131 ioa_cfg->in_reset_reload = 0;
6132 ioa_cfg->reset_retries = 0;
6133 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6134 wake_up_all(&ioa_cfg->reset_wait_q);
6136 spin_unlock_irq(ioa_cfg->host->host_lock);
6137 scsi_unblock_requests(ioa_cfg->host);
6138 spin_lock_irq(ioa_cfg->host->host_lock);
6139 LEAVE;
6141 return IPR_RC_JOB_RETURN;
6145 * ipr_ioa_reset_done - IOA reset completion.
6146 * @ipr_cmd: ipr command struct
6148 * This function processes the completion of an adapter reset.
6149 * It schedules any necessary mid-layer add/removes and
6150 * wakes any reset sleepers.
6152 * Return value:
6153 * IPR_RC_JOB_RETURN
6155 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6157 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6158 struct ipr_resource_entry *res;
6159 struct ipr_hostrcb *hostrcb, *temp;
6160 int i = 0;
6162 ENTER;
6163 ioa_cfg->in_reset_reload = 0;
6164 ioa_cfg->allow_cmds = 1;
6165 ioa_cfg->reset_cmd = NULL;
6166 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6168 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6169 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6170 ipr_trace;
6171 break;
6174 schedule_work(&ioa_cfg->work_q);
6176 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6177 list_del(&hostrcb->queue);
6178 if (i++ < IPR_NUM_LOG_HCAMS)
6179 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6180 else
6181 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6184 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6185 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6187 ioa_cfg->reset_retries = 0;
6188 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6189 wake_up_all(&ioa_cfg->reset_wait_q);
6191 spin_unlock(ioa_cfg->host->host_lock);
6192 scsi_unblock_requests(ioa_cfg->host);
6193 spin_lock(ioa_cfg->host->host_lock);
6195 if (!ioa_cfg->allow_cmds)
6196 scsi_block_requests(ioa_cfg->host);
6198 LEAVE;
6199 return IPR_RC_JOB_RETURN;
6203 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6204 * @supported_dev: supported device struct
6205 * @vpids: vendor product id struct
6207 * Return value:
6208 * none
6210 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6211 struct ipr_std_inq_vpids *vpids)
6213 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6214 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6215 supported_dev->num_records = 1;
6216 supported_dev->data_length =
6217 cpu_to_be16(sizeof(struct ipr_supported_device));
6218 supported_dev->reserved = 0;
6222 * ipr_set_supported_devs - Send Set Supported Devices for a device
6223 * @ipr_cmd: ipr command struct
6225 * This function sends a Set Supported Devices to the adapter
6227 * Return value:
6228 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6230 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6232 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6233 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6234 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6235 struct ipr_resource_entry *res = ipr_cmd->u.res;
6237 ipr_cmd->job_step = ipr_ioa_reset_done;
6239 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6240 if (!ipr_is_scsi_disk(res))
6241 continue;
6243 ipr_cmd->u.res = res;
6244 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6246 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6247 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6248 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6250 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6251 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6252 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6253 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6255 ipr_init_ioadl(ipr_cmd,
6256 ioa_cfg->vpd_cbs_dma +
6257 offsetof(struct ipr_misc_cbs, supp_dev),
6258 sizeof(struct ipr_supported_device),
6259 IPR_IOADL_FLAGS_WRITE_LAST);
6261 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6262 IPR_SET_SUP_DEVICE_TIMEOUT);
6264 if (!ioa_cfg->sis64)
6265 ipr_cmd->job_step = ipr_set_supported_devs;
6266 return IPR_RC_JOB_RETURN;
6269 return IPR_RC_JOB_CONTINUE;
6273 * ipr_get_mode_page - Locate specified mode page
6274 * @mode_pages: mode page buffer
6275 * @page_code: page code to find
6276 * @len: minimum required length for mode page
6278 * Return value:
6279 * pointer to mode page / NULL on failure
6281 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6282 u32 page_code, u32 len)
6284 struct ipr_mode_page_hdr *mode_hdr;
6285 u32 page_length;
6286 u32 length;
6288 if (!mode_pages || (mode_pages->hdr.length == 0))
6289 return NULL;
6291 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6292 mode_hdr = (struct ipr_mode_page_hdr *)
6293 (mode_pages->data + mode_pages->hdr.block_desc_len);
6295 while (length) {
6296 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6297 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6298 return mode_hdr;
6299 break;
6300 } else {
6301 page_length = (sizeof(struct ipr_mode_page_hdr) +
6302 mode_hdr->page_length);
6303 length -= page_length;
6304 mode_hdr = (struct ipr_mode_page_hdr *)
6305 ((unsigned long)mode_hdr + page_length);
6308 return NULL;
6312 * ipr_check_term_power - Check for term power errors
6313 * @ioa_cfg: ioa config struct
6314 * @mode_pages: IOAFP mode pages buffer
6316 * Check the IOAFP's mode page 28 for term power errors
6318 * Return value:
6319 * nothing
6321 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6322 struct ipr_mode_pages *mode_pages)
6324 int i;
6325 int entry_length;
6326 struct ipr_dev_bus_entry *bus;
6327 struct ipr_mode_page28 *mode_page;
6329 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6330 sizeof(struct ipr_mode_page28));
6332 entry_length = mode_page->entry_length;
6334 bus = mode_page->bus;
6336 for (i = 0; i < mode_page->num_entries; i++) {
6337 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6338 dev_err(&ioa_cfg->pdev->dev,
6339 "Term power is absent on scsi bus %d\n",
6340 bus->res_addr.bus);
6343 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6348 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6349 * @ioa_cfg: ioa config struct
6351 * Looks through the config table checking for SES devices. If
6352 * the SES device is in the SES table indicating a maximum SCSI
6353 * bus speed, the speed is limited for the bus.
6355 * Return value:
6356 * none
6358 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6360 u32 max_xfer_rate;
6361 int i;
6363 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6364 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6365 ioa_cfg->bus_attr[i].bus_width);
6367 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6368 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6373 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6374 * @ioa_cfg: ioa config struct
6375 * @mode_pages: mode page 28 buffer
6377 * Updates mode page 28 based on driver configuration
6379 * Return value:
6380 * none
6382 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6383 struct ipr_mode_pages *mode_pages)
6385 int i, entry_length;
6386 struct ipr_dev_bus_entry *bus;
6387 struct ipr_bus_attributes *bus_attr;
6388 struct ipr_mode_page28 *mode_page;
6390 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6391 sizeof(struct ipr_mode_page28));
6393 entry_length = mode_page->entry_length;
6395 /* Loop for each device bus entry */
6396 for (i = 0, bus = mode_page->bus;
6397 i < mode_page->num_entries;
6398 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6399 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6400 dev_err(&ioa_cfg->pdev->dev,
6401 "Invalid resource address reported: 0x%08X\n",
6402 IPR_GET_PHYS_LOC(bus->res_addr));
6403 continue;
6406 bus_attr = &ioa_cfg->bus_attr[i];
6407 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6408 bus->bus_width = bus_attr->bus_width;
6409 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6410 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6411 if (bus_attr->qas_enabled)
6412 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6413 else
6414 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6419 * ipr_build_mode_select - Build a mode select command
6420 * @ipr_cmd: ipr command struct
6421 * @res_handle: resource handle to send command to
6422 * @parm: Byte 2 of Mode Sense command
6423 * @dma_addr: DMA buffer address
6424 * @xfer_len: data transfer length
6426 * Return value:
6427 * none
6429 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6430 __be32 res_handle, u8 parm,
6431 dma_addr_t dma_addr, u8 xfer_len)
6433 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6435 ioarcb->res_handle = res_handle;
6436 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6437 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6438 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6439 ioarcb->cmd_pkt.cdb[1] = parm;
6440 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6442 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6446 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6447 * @ipr_cmd: ipr command struct
6449 * This function sets up the SCSI bus attributes and sends
6450 * a Mode Select for Page 28 to activate them.
6452 * Return value:
6453 * IPR_RC_JOB_RETURN
6455 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6458 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6459 int length;
6461 ENTER;
6462 ipr_scsi_bus_speed_limit(ioa_cfg);
6463 ipr_check_term_power(ioa_cfg, mode_pages);
6464 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6465 length = mode_pages->hdr.length + 1;
6466 mode_pages->hdr.length = 0;
6468 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6469 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6470 length);
6472 ipr_cmd->job_step = ipr_set_supported_devs;
6473 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6474 struct ipr_resource_entry, queue);
6475 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6477 LEAVE;
6478 return IPR_RC_JOB_RETURN;
6482 * ipr_build_mode_sense - Builds a mode sense command
6483 * @ipr_cmd: ipr command struct
6484 * @res: resource entry struct
6485 * @parm: Byte 2 of mode sense command
6486 * @dma_addr: DMA address of mode sense buffer
6487 * @xfer_len: Size of DMA buffer
6489 * Return value:
6490 * none
6492 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6493 __be32 res_handle,
6494 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6496 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6498 ioarcb->res_handle = res_handle;
6499 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6500 ioarcb->cmd_pkt.cdb[2] = parm;
6501 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6502 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6504 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6508 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6509 * @ipr_cmd: ipr command struct
6511 * This function handles the failure of an IOA bringup command.
6513 * Return value:
6514 * IPR_RC_JOB_RETURN
6516 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6518 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6519 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6521 dev_err(&ioa_cfg->pdev->dev,
6522 "0x%02X failed with IOASC: 0x%08X\n",
6523 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6525 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6526 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6527 return IPR_RC_JOB_RETURN;
6531 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6532 * @ipr_cmd: ipr command struct
6534 * This function handles the failure of a Mode Sense to the IOAFP.
6535 * Some adapters do not handle all mode pages.
6537 * Return value:
6538 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6540 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6542 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6543 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6545 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6546 ipr_cmd->job_step = ipr_set_supported_devs;
6547 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6548 struct ipr_resource_entry, queue);
6549 return IPR_RC_JOB_CONTINUE;
6552 return ipr_reset_cmd_failed(ipr_cmd);
6556 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6557 * @ipr_cmd: ipr command struct
6559 * This function send a Page 28 mode sense to the IOA to
6560 * retrieve SCSI bus attributes.
6562 * Return value:
6563 * IPR_RC_JOB_RETURN
6565 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6567 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6569 ENTER;
6570 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6571 0x28, ioa_cfg->vpd_cbs_dma +
6572 offsetof(struct ipr_misc_cbs, mode_pages),
6573 sizeof(struct ipr_mode_pages));
6575 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6576 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6578 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6580 LEAVE;
6581 return IPR_RC_JOB_RETURN;
6585 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6586 * @ipr_cmd: ipr command struct
6588 * This function enables dual IOA RAID support if possible.
6590 * Return value:
6591 * IPR_RC_JOB_RETURN
6593 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6595 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6596 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6597 struct ipr_mode_page24 *mode_page;
6598 int length;
6600 ENTER;
6601 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6602 sizeof(struct ipr_mode_page24));
6604 if (mode_page)
6605 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6607 length = mode_pages->hdr.length + 1;
6608 mode_pages->hdr.length = 0;
6610 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6611 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6612 length);
6614 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6615 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6617 LEAVE;
6618 return IPR_RC_JOB_RETURN;
6622 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6623 * @ipr_cmd: ipr command struct
6625 * This function handles the failure of a Mode Sense to the IOAFP.
6626 * Some adapters do not handle all mode pages.
6628 * Return value:
6629 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6631 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6633 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6635 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6636 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6637 return IPR_RC_JOB_CONTINUE;
6640 return ipr_reset_cmd_failed(ipr_cmd);
6644 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6645 * @ipr_cmd: ipr command struct
6647 * This function send a mode sense to the IOA to retrieve
6648 * the IOA Advanced Function Control mode page.
6650 * Return value:
6651 * IPR_RC_JOB_RETURN
6653 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6655 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6657 ENTER;
6658 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6659 0x24, ioa_cfg->vpd_cbs_dma +
6660 offsetof(struct ipr_misc_cbs, mode_pages),
6661 sizeof(struct ipr_mode_pages));
6663 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6664 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6666 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6668 LEAVE;
6669 return IPR_RC_JOB_RETURN;
6673 * ipr_init_res_table - Initialize the resource table
6674 * @ipr_cmd: ipr command struct
6676 * This function looks through the existing resource table, comparing
6677 * it with the config table. This function will take care of old/new
6678 * devices and schedule adding/removing them from the mid-layer
6679 * as appropriate.
6681 * Return value:
6682 * IPR_RC_JOB_CONTINUE
6684 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6686 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6687 struct ipr_resource_entry *res, *temp;
6688 struct ipr_config_table_entry_wrapper cfgtew;
6689 int entries, found, flag, i;
6690 LIST_HEAD(old_res);
6692 ENTER;
6693 if (ioa_cfg->sis64)
6694 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6695 else
6696 flag = ioa_cfg->u.cfg_table->hdr.flags;
6698 if (flag & IPR_UCODE_DOWNLOAD_REQ)
6699 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6701 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6702 list_move_tail(&res->queue, &old_res);
6704 if (ioa_cfg->sis64)
6705 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6706 else
6707 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6709 for (i = 0; i < entries; i++) {
6710 if (ioa_cfg->sis64)
6711 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6712 else
6713 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6714 found = 0;
6716 list_for_each_entry_safe(res, temp, &old_res, queue) {
6717 if (ipr_is_same_device(res, &cfgtew)) {
6718 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6719 found = 1;
6720 break;
6724 if (!found) {
6725 if (list_empty(&ioa_cfg->free_res_q)) {
6726 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6727 break;
6730 found = 1;
6731 res = list_entry(ioa_cfg->free_res_q.next,
6732 struct ipr_resource_entry, queue);
6733 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6734 ipr_init_res_entry(res, &cfgtew);
6735 res->add_to_ml = 1;
6738 if (found)
6739 ipr_update_res_entry(res, &cfgtew);
6742 list_for_each_entry_safe(res, temp, &old_res, queue) {
6743 if (res->sdev) {
6744 res->del_from_ml = 1;
6745 res->res_handle = IPR_INVALID_RES_HANDLE;
6746 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6750 list_for_each_entry_safe(res, temp, &old_res, queue) {
6751 ipr_clear_res_target(res);
6752 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6755 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6756 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6757 else
6758 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6760 LEAVE;
6761 return IPR_RC_JOB_CONTINUE;
6765 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6766 * @ipr_cmd: ipr command struct
6768 * This function sends a Query IOA Configuration command
6769 * to the adapter to retrieve the IOA configuration table.
6771 * Return value:
6772 * IPR_RC_JOB_RETURN
6774 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6776 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6777 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6778 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6779 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6781 ENTER;
6782 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6783 ioa_cfg->dual_raid = 1;
6784 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6785 ucode_vpd->major_release, ucode_vpd->card_type,
6786 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6787 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6788 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6790 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6791 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6792 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6794 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6795 IPR_IOADL_FLAGS_READ_LAST);
6797 ipr_cmd->job_step = ipr_init_res_table;
6799 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6801 LEAVE;
6802 return IPR_RC_JOB_RETURN;
6806 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6807 * @ipr_cmd: ipr command struct
6809 * This utility function sends an inquiry to the adapter.
6811 * Return value:
6812 * none
6814 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6815 dma_addr_t dma_addr, u8 xfer_len)
6817 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6819 ENTER;
6820 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6821 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6823 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6824 ioarcb->cmd_pkt.cdb[1] = flags;
6825 ioarcb->cmd_pkt.cdb[2] = page;
6826 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6828 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6830 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6831 LEAVE;
6835 * ipr_inquiry_page_supported - Is the given inquiry page supported
6836 * @page0: inquiry page 0 buffer
6837 * @page: page code.
6839 * This function determines if the specified inquiry page is supported.
6841 * Return value:
6842 * 1 if page is supported / 0 if not
6844 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6846 int i;
6848 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6849 if (page0->page[i] == page)
6850 return 1;
6852 return 0;
6856 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6857 * @ipr_cmd: ipr command struct
6859 * This function sends a Page 0xD0 inquiry to the adapter
6860 * to retrieve adapter capabilities.
6862 * Return value:
6863 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6865 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6867 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6868 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6869 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6871 ENTER;
6872 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6873 memset(cap, 0, sizeof(*cap));
6875 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6876 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6877 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6878 sizeof(struct ipr_inquiry_cap));
6879 return IPR_RC_JOB_RETURN;
6882 LEAVE;
6883 return IPR_RC_JOB_CONTINUE;
6887 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6888 * @ipr_cmd: ipr command struct
6890 * This function sends a Page 3 inquiry to the adapter
6891 * to retrieve software VPD information.
6893 * Return value:
6894 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6896 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6898 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6900 ENTER;
6902 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6904 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6905 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6906 sizeof(struct ipr_inquiry_page3));
6908 LEAVE;
6909 return IPR_RC_JOB_RETURN;
6913 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6914 * @ipr_cmd: ipr command struct
6916 * This function sends a Page 0 inquiry to the adapter
6917 * to retrieve supported inquiry pages.
6919 * Return value:
6920 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6922 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6924 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6925 char type[5];
6927 ENTER;
6929 /* Grab the type out of the VPD and store it away */
6930 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6931 type[4] = '\0';
6932 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6934 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6936 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6937 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6938 sizeof(struct ipr_inquiry_page0));
6940 LEAVE;
6941 return IPR_RC_JOB_RETURN;
6945 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6946 * @ipr_cmd: ipr command struct
6948 * This function sends a standard inquiry to the adapter.
6950 * Return value:
6951 * IPR_RC_JOB_RETURN
6953 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6955 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6957 ENTER;
6958 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6960 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6961 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6962 sizeof(struct ipr_ioa_vpd));
6964 LEAVE;
6965 return IPR_RC_JOB_RETURN;
6969 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
6970 * @ipr_cmd: ipr command struct
6972 * This function send an Identify Host Request Response Queue
6973 * command to establish the HRRQ with the adapter.
6975 * Return value:
6976 * IPR_RC_JOB_RETURN
6978 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
6980 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6981 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6983 ENTER;
6984 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6986 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6987 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6989 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6990 if (ioa_cfg->sis64)
6991 ioarcb->cmd_pkt.cdb[1] = 0x1;
6992 ioarcb->cmd_pkt.cdb[2] =
6993 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6994 ioarcb->cmd_pkt.cdb[3] =
6995 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6996 ioarcb->cmd_pkt.cdb[4] =
6997 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6998 ioarcb->cmd_pkt.cdb[5] =
6999 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
7000 ioarcb->cmd_pkt.cdb[7] =
7001 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7002 ioarcb->cmd_pkt.cdb[8] =
7003 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7005 if (ioa_cfg->sis64) {
7006 ioarcb->cmd_pkt.cdb[10] =
7007 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7008 ioarcb->cmd_pkt.cdb[11] =
7009 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7010 ioarcb->cmd_pkt.cdb[12] =
7011 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7012 ioarcb->cmd_pkt.cdb[13] =
7013 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7016 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7018 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7020 LEAVE;
7021 return IPR_RC_JOB_RETURN;
7025 * ipr_reset_timer_done - Adapter reset timer function
7026 * @ipr_cmd: ipr command struct
7028 * Description: This function is used in adapter reset processing
7029 * for timing events. If the reset_cmd pointer in the IOA
7030 * config struct is not this adapter's we are doing nested
7031 * resets and fail_all_ops will take care of freeing the
7032 * command block.
7034 * Return value:
7035 * none
7037 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7039 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7040 unsigned long lock_flags = 0;
7042 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7044 if (ioa_cfg->reset_cmd == ipr_cmd) {
7045 list_del(&ipr_cmd->queue);
7046 ipr_cmd->done(ipr_cmd);
7049 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7053 * ipr_reset_start_timer - Start a timer for adapter reset job
7054 * @ipr_cmd: ipr command struct
7055 * @timeout: timeout value
7057 * Description: This function is used in adapter reset processing
7058 * for timing events. If the reset_cmd pointer in the IOA
7059 * config struct is not this adapter's we are doing nested
7060 * resets and fail_all_ops will take care of freeing the
7061 * command block.
7063 * Return value:
7064 * none
7066 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7067 unsigned long timeout)
7069 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7070 ipr_cmd->done = ipr_reset_ioa_job;
7072 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7073 ipr_cmd->timer.expires = jiffies + timeout;
7074 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7075 add_timer(&ipr_cmd->timer);
7079 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7080 * @ioa_cfg: ioa cfg struct
7082 * Return value:
7083 * nothing
7085 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7087 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7089 /* Initialize Host RRQ pointers */
7090 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7091 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7092 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7093 ioa_cfg->toggle_bit = 1;
7095 /* Zero out config table */
7096 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7100 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7101 * @ipr_cmd: ipr command struct
7103 * Return value:
7104 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7106 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7108 unsigned long stage, stage_time;
7109 u32 feedback;
7110 volatile u32 int_reg;
7111 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7112 u64 maskval = 0;
7114 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7115 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7116 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7118 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7120 /* sanity check the stage_time value */
7121 if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7122 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7123 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7124 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7126 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7127 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7128 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7129 stage_time = ioa_cfg->transop_timeout;
7130 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7131 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7132 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7133 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7134 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7135 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7136 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7137 return IPR_RC_JOB_CONTINUE;
7140 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7141 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7142 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7143 ipr_cmd->done = ipr_reset_ioa_job;
7144 add_timer(&ipr_cmd->timer);
7145 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7147 return IPR_RC_JOB_RETURN;
7151 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7152 * @ipr_cmd: ipr command struct
7154 * This function reinitializes some control blocks and
7155 * enables destructive diagnostics on the adapter.
7157 * Return value:
7158 * IPR_RC_JOB_RETURN
7160 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7162 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7163 volatile u32 int_reg;
7165 ENTER;
7166 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7167 ipr_init_ioa_mem(ioa_cfg);
7169 ioa_cfg->allow_interrupts = 1;
7170 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7172 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7173 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7174 ioa_cfg->regs.clr_interrupt_mask_reg32);
7175 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7176 return IPR_RC_JOB_CONTINUE;
7179 /* Enable destructive diagnostics on IOA */
7180 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7182 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7183 if (ioa_cfg->sis64)
7184 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
7186 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7188 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7190 if (ioa_cfg->sis64) {
7191 ipr_cmd->job_step = ipr_reset_next_stage;
7192 return IPR_RC_JOB_CONTINUE;
7195 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7196 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7197 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7198 ipr_cmd->done = ipr_reset_ioa_job;
7199 add_timer(&ipr_cmd->timer);
7200 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7202 LEAVE;
7203 return IPR_RC_JOB_RETURN;
7207 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7208 * @ipr_cmd: ipr command struct
7210 * This function is invoked when an adapter dump has run out
7211 * of processing time.
7213 * Return value:
7214 * IPR_RC_JOB_CONTINUE
7216 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7218 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7220 if (ioa_cfg->sdt_state == GET_DUMP)
7221 ioa_cfg->sdt_state = ABORT_DUMP;
7223 ipr_cmd->job_step = ipr_reset_alert;
7225 return IPR_RC_JOB_CONTINUE;
7229 * ipr_unit_check_no_data - Log a unit check/no data error log
7230 * @ioa_cfg: ioa config struct
7232 * Logs an error indicating the adapter unit checked, but for some
7233 * reason, we were unable to fetch the unit check buffer.
7235 * Return value:
7236 * nothing
7238 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7240 ioa_cfg->errors_logged++;
7241 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7245 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7246 * @ioa_cfg: ioa config struct
7248 * Fetches the unit check buffer from the adapter by clocking the data
7249 * through the mailbox register.
7251 * Return value:
7252 * nothing
7254 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7256 unsigned long mailbox;
7257 struct ipr_hostrcb *hostrcb;
7258 struct ipr_uc_sdt sdt;
7259 int rc, length;
7260 u32 ioasc;
7262 mailbox = readl(ioa_cfg->ioa_mailbox);
7264 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7265 ipr_unit_check_no_data(ioa_cfg);
7266 return;
7269 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7270 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7271 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7273 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7274 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7275 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7276 ipr_unit_check_no_data(ioa_cfg);
7277 return;
7280 /* Find length of the first sdt entry (UC buffer) */
7281 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7282 length = be32_to_cpu(sdt.entry[0].end_token);
7283 else
7284 length = (be32_to_cpu(sdt.entry[0].end_token) -
7285 be32_to_cpu(sdt.entry[0].start_token)) &
7286 IPR_FMT2_MBX_ADDR_MASK;
7288 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7289 struct ipr_hostrcb, queue);
7290 list_del(&hostrcb->queue);
7291 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7293 rc = ipr_get_ldump_data_section(ioa_cfg,
7294 be32_to_cpu(sdt.entry[0].start_token),
7295 (__be32 *)&hostrcb->hcam,
7296 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7298 if (!rc) {
7299 ipr_handle_log_data(ioa_cfg, hostrcb);
7300 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7301 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7302 ioa_cfg->sdt_state == GET_DUMP)
7303 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7304 } else
7305 ipr_unit_check_no_data(ioa_cfg);
7307 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7311 * ipr_reset_restore_cfg_space - Restore PCI config space.
7312 * @ipr_cmd: ipr command struct
7314 * Description: This function restores the saved PCI config space of
7315 * the adapter, fails all outstanding ops back to the callers, and
7316 * fetches the dump/unit check if applicable to this reset.
7318 * Return value:
7319 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7321 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7323 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7324 int rc;
7326 ENTER;
7327 ioa_cfg->pdev->state_saved = true;
7328 rc = pci_restore_state(ioa_cfg->pdev);
7330 if (rc != PCIBIOS_SUCCESSFUL) {
7331 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7332 return IPR_RC_JOB_CONTINUE;
7335 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7336 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7337 return IPR_RC_JOB_CONTINUE;
7340 ipr_fail_all_ops(ioa_cfg);
7342 if (ioa_cfg->ioa_unit_checked) {
7343 ioa_cfg->ioa_unit_checked = 0;
7344 ipr_get_unit_check_buffer(ioa_cfg);
7345 ipr_cmd->job_step = ipr_reset_alert;
7346 ipr_reset_start_timer(ipr_cmd, 0);
7347 return IPR_RC_JOB_RETURN;
7350 if (ioa_cfg->in_ioa_bringdown) {
7351 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7352 } else {
7353 ipr_cmd->job_step = ipr_reset_enable_ioa;
7355 if (GET_DUMP == ioa_cfg->sdt_state) {
7356 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7357 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7358 schedule_work(&ioa_cfg->work_q);
7359 return IPR_RC_JOB_RETURN;
7363 ENTER;
7364 return IPR_RC_JOB_CONTINUE;
7368 * ipr_reset_bist_done - BIST has completed on the adapter.
7369 * @ipr_cmd: ipr command struct
7371 * Description: Unblock config space and resume the reset process.
7373 * Return value:
7374 * IPR_RC_JOB_CONTINUE
7376 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7378 ENTER;
7379 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7380 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7381 LEAVE;
7382 return IPR_RC_JOB_CONTINUE;
7386 * ipr_reset_start_bist - Run BIST on the adapter.
7387 * @ipr_cmd: ipr command struct
7389 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7391 * Return value:
7392 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7394 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7396 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7397 int rc;
7399 ENTER;
7400 pci_block_user_cfg_access(ioa_cfg->pdev);
7401 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7403 if (rc != PCIBIOS_SUCCESSFUL) {
7404 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7405 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7406 rc = IPR_RC_JOB_CONTINUE;
7407 } else {
7408 ipr_cmd->job_step = ipr_reset_bist_done;
7409 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7410 rc = IPR_RC_JOB_RETURN;
7413 LEAVE;
7414 return rc;
7418 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7419 * @ipr_cmd: ipr command struct
7421 * Description: This clears PCI reset to the adapter and delays two seconds.
7423 * Return value:
7424 * IPR_RC_JOB_RETURN
7426 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7428 ENTER;
7429 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7430 ipr_cmd->job_step = ipr_reset_bist_done;
7431 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7432 LEAVE;
7433 return IPR_RC_JOB_RETURN;
7437 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7438 * @ipr_cmd: ipr command struct
7440 * Description: This asserts PCI reset to the adapter.
7442 * Return value:
7443 * IPR_RC_JOB_RETURN
7445 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7447 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7448 struct pci_dev *pdev = ioa_cfg->pdev;
7450 ENTER;
7451 pci_block_user_cfg_access(pdev);
7452 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7453 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7454 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7455 LEAVE;
7456 return IPR_RC_JOB_RETURN;
7460 * ipr_reset_allowed - Query whether or not IOA can be reset
7461 * @ioa_cfg: ioa config struct
7463 * Return value:
7464 * 0 if reset not allowed / non-zero if reset is allowed
7466 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7468 volatile u32 temp_reg;
7470 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7471 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7475 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7476 * @ipr_cmd: ipr command struct
7478 * Description: This function waits for adapter permission to run BIST,
7479 * then runs BIST. If the adapter does not give permission after a
7480 * reasonable time, we will reset the adapter anyway. The impact of
7481 * resetting the adapter without warning the adapter is the risk of
7482 * losing the persistent error log on the adapter. If the adapter is
7483 * reset while it is writing to the flash on the adapter, the flash
7484 * segment will have bad ECC and be zeroed.
7486 * Return value:
7487 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7489 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7491 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7492 int rc = IPR_RC_JOB_RETURN;
7494 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7495 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7496 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7497 } else {
7498 ipr_cmd->job_step = ioa_cfg->reset;
7499 rc = IPR_RC_JOB_CONTINUE;
7502 return rc;
7506 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7507 * @ipr_cmd: ipr command struct
7509 * Description: This function alerts the adapter that it will be reset.
7510 * If memory space is not currently enabled, proceed directly
7511 * to running BIST on the adapter. The timer must always be started
7512 * so we guarantee we do not run BIST from ipr_isr.
7514 * Return value:
7515 * IPR_RC_JOB_RETURN
7517 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7519 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7520 u16 cmd_reg;
7521 int rc;
7523 ENTER;
7524 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7526 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7527 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7528 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7529 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7530 } else {
7531 ipr_cmd->job_step = ioa_cfg->reset;
7534 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7535 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7537 LEAVE;
7538 return IPR_RC_JOB_RETURN;
7542 * ipr_reset_ucode_download_done - Microcode download completion
7543 * @ipr_cmd: ipr command struct
7545 * Description: This function unmaps the microcode download buffer.
7547 * Return value:
7548 * IPR_RC_JOB_CONTINUE
7550 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7552 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7553 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7555 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7556 sglist->num_sg, DMA_TO_DEVICE);
7558 ipr_cmd->job_step = ipr_reset_alert;
7559 return IPR_RC_JOB_CONTINUE;
7563 * ipr_reset_ucode_download - Download microcode to the adapter
7564 * @ipr_cmd: ipr command struct
7566 * Description: This function checks to see if it there is microcode
7567 * to download to the adapter. If there is, a download is performed.
7569 * Return value:
7570 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7572 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7574 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7575 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7577 ENTER;
7578 ipr_cmd->job_step = ipr_reset_alert;
7580 if (!sglist)
7581 return IPR_RC_JOB_CONTINUE;
7583 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7584 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7585 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7586 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7587 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7588 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7589 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7591 if (ioa_cfg->sis64)
7592 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7593 else
7594 ipr_build_ucode_ioadl(ipr_cmd, sglist);
7595 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7597 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7598 IPR_WRITE_BUFFER_TIMEOUT);
7600 LEAVE;
7601 return IPR_RC_JOB_RETURN;
7605 * ipr_reset_shutdown_ioa - Shutdown the adapter
7606 * @ipr_cmd: ipr command struct
7608 * Description: This function issues an adapter shutdown of the
7609 * specified type to the specified adapter as part of the
7610 * adapter reset job.
7612 * Return value:
7613 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7615 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7617 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7618 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7619 unsigned long timeout;
7620 int rc = IPR_RC_JOB_CONTINUE;
7622 ENTER;
7623 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7624 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7625 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7626 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7627 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7629 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7630 timeout = IPR_SHUTDOWN_TIMEOUT;
7631 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7632 timeout = IPR_INTERNAL_TIMEOUT;
7633 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7634 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7635 else
7636 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7638 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7640 rc = IPR_RC_JOB_RETURN;
7641 ipr_cmd->job_step = ipr_reset_ucode_download;
7642 } else
7643 ipr_cmd->job_step = ipr_reset_alert;
7645 LEAVE;
7646 return rc;
7650 * ipr_reset_ioa_job - Adapter reset job
7651 * @ipr_cmd: ipr command struct
7653 * Description: This function is the job router for the adapter reset job.
7655 * Return value:
7656 * none
7658 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7660 u32 rc, ioasc;
7661 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7663 do {
7664 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7666 if (ioa_cfg->reset_cmd != ipr_cmd) {
7668 * We are doing nested adapter resets and this is
7669 * not the current reset job.
7671 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7672 return;
7675 if (IPR_IOASC_SENSE_KEY(ioasc)) {
7676 rc = ipr_cmd->job_step_failed(ipr_cmd);
7677 if (rc == IPR_RC_JOB_RETURN)
7678 return;
7681 ipr_reinit_ipr_cmnd(ipr_cmd);
7682 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
7683 rc = ipr_cmd->job_step(ipr_cmd);
7684 } while(rc == IPR_RC_JOB_CONTINUE);
7688 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7689 * @ioa_cfg: ioa config struct
7690 * @job_step: first job step of reset job
7691 * @shutdown_type: shutdown type
7693 * Description: This function will initiate the reset of the given adapter
7694 * starting at the selected job step.
7695 * If the caller needs to wait on the completion of the reset,
7696 * the caller must sleep on the reset_wait_q.
7698 * Return value:
7699 * none
7701 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7702 int (*job_step) (struct ipr_cmnd *),
7703 enum ipr_shutdown_type shutdown_type)
7705 struct ipr_cmnd *ipr_cmd;
7707 ioa_cfg->in_reset_reload = 1;
7708 ioa_cfg->allow_cmds = 0;
7709 scsi_block_requests(ioa_cfg->host);
7711 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7712 ioa_cfg->reset_cmd = ipr_cmd;
7713 ipr_cmd->job_step = job_step;
7714 ipr_cmd->u.shutdown_type = shutdown_type;
7716 ipr_reset_ioa_job(ipr_cmd);
7720 * ipr_initiate_ioa_reset - Initiate an adapter reset
7721 * @ioa_cfg: ioa config struct
7722 * @shutdown_type: shutdown type
7724 * Description: This function will initiate the reset of the given adapter.
7725 * If the caller needs to wait on the completion of the reset,
7726 * the caller must sleep on the reset_wait_q.
7728 * Return value:
7729 * none
7731 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7732 enum ipr_shutdown_type shutdown_type)
7734 if (ioa_cfg->ioa_is_dead)
7735 return;
7737 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7738 ioa_cfg->sdt_state = ABORT_DUMP;
7740 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7741 dev_err(&ioa_cfg->pdev->dev,
7742 "IOA taken offline - error recovery failed\n");
7744 ioa_cfg->reset_retries = 0;
7745 ioa_cfg->ioa_is_dead = 1;
7747 if (ioa_cfg->in_ioa_bringdown) {
7748 ioa_cfg->reset_cmd = NULL;
7749 ioa_cfg->in_reset_reload = 0;
7750 ipr_fail_all_ops(ioa_cfg);
7751 wake_up_all(&ioa_cfg->reset_wait_q);
7753 spin_unlock_irq(ioa_cfg->host->host_lock);
7754 scsi_unblock_requests(ioa_cfg->host);
7755 spin_lock_irq(ioa_cfg->host->host_lock);
7756 return;
7757 } else {
7758 ioa_cfg->in_ioa_bringdown = 1;
7759 shutdown_type = IPR_SHUTDOWN_NONE;
7763 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7764 shutdown_type);
7768 * ipr_reset_freeze - Hold off all I/O activity
7769 * @ipr_cmd: ipr command struct
7771 * Description: If the PCI slot is frozen, hold off all I/O
7772 * activity; then, as soon as the slot is available again,
7773 * initiate an adapter reset.
7775 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7777 /* Disallow new interrupts, avoid loop */
7778 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7779 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7780 ipr_cmd->done = ipr_reset_ioa_job;
7781 return IPR_RC_JOB_RETURN;
7785 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7786 * @pdev: PCI device struct
7788 * Description: This routine is called to tell us that the PCI bus
7789 * is down. Can't do anything here, except put the device driver
7790 * into a holding pattern, waiting for the PCI bus to come back.
7792 static void ipr_pci_frozen(struct pci_dev *pdev)
7794 unsigned long flags = 0;
7795 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7797 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7798 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7803 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7804 * @pdev: PCI device struct
7806 * Description: This routine is called by the pci error recovery
7807 * code after the PCI slot has been reset, just before we
7808 * should resume normal operations.
7810 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7812 unsigned long flags = 0;
7813 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7815 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7816 if (ioa_cfg->needs_warm_reset)
7817 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7818 else
7819 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7820 IPR_SHUTDOWN_NONE);
7821 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7822 return PCI_ERS_RESULT_RECOVERED;
7826 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7827 * @pdev: PCI device struct
7829 * Description: This routine is called when the PCI bus has
7830 * permanently failed.
7832 static void ipr_pci_perm_failure(struct pci_dev *pdev)
7834 unsigned long flags = 0;
7835 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7837 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7838 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7839 ioa_cfg->sdt_state = ABORT_DUMP;
7840 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7841 ioa_cfg->in_ioa_bringdown = 1;
7842 ioa_cfg->allow_cmds = 0;
7843 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7844 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7848 * ipr_pci_error_detected - Called when a PCI error is detected.
7849 * @pdev: PCI device struct
7850 * @state: PCI channel state
7852 * Description: Called when a PCI error is detected.
7854 * Return value:
7855 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7857 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7858 pci_channel_state_t state)
7860 switch (state) {
7861 case pci_channel_io_frozen:
7862 ipr_pci_frozen(pdev);
7863 return PCI_ERS_RESULT_NEED_RESET;
7864 case pci_channel_io_perm_failure:
7865 ipr_pci_perm_failure(pdev);
7866 return PCI_ERS_RESULT_DISCONNECT;
7867 break;
7868 default:
7869 break;
7871 return PCI_ERS_RESULT_NEED_RESET;
7875 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7876 * @ioa_cfg: ioa cfg struct
7878 * Description: This is the second phase of adapter intialization
7879 * This function takes care of initilizing the adapter to the point
7880 * where it can accept new commands.
7882 * Return value:
7883 * 0 on success / -EIO on failure
7885 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7887 int rc = 0;
7888 unsigned long host_lock_flags = 0;
7890 ENTER;
7891 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7892 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
7893 if (ioa_cfg->needs_hard_reset) {
7894 ioa_cfg->needs_hard_reset = 0;
7895 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7896 } else
7897 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7898 IPR_SHUTDOWN_NONE);
7900 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7901 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7902 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7904 if (ioa_cfg->ioa_is_dead) {
7905 rc = -EIO;
7906 } else if (ipr_invalid_adapter(ioa_cfg)) {
7907 if (!ipr_testmode)
7908 rc = -EIO;
7910 dev_err(&ioa_cfg->pdev->dev,
7911 "Adapter not supported in this hardware configuration.\n");
7914 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7916 LEAVE;
7917 return rc;
7921 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7922 * @ioa_cfg: ioa config struct
7924 * Return value:
7925 * none
7927 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7929 int i;
7931 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7932 if (ioa_cfg->ipr_cmnd_list[i])
7933 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7934 ioa_cfg->ipr_cmnd_list[i],
7935 ioa_cfg->ipr_cmnd_list_dma[i]);
7937 ioa_cfg->ipr_cmnd_list[i] = NULL;
7940 if (ioa_cfg->ipr_cmd_pool)
7941 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7943 ioa_cfg->ipr_cmd_pool = NULL;
7947 * ipr_free_mem - Frees memory allocated for an adapter
7948 * @ioa_cfg: ioa cfg struct
7950 * Return value:
7951 * nothing
7953 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7955 int i;
7957 kfree(ioa_cfg->res_entries);
7958 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7959 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7960 ipr_free_cmd_blks(ioa_cfg);
7961 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7962 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7963 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7964 ioa_cfg->u.cfg_table,
7965 ioa_cfg->cfg_table_dma);
7967 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7968 pci_free_consistent(ioa_cfg->pdev,
7969 sizeof(struct ipr_hostrcb),
7970 ioa_cfg->hostrcb[i],
7971 ioa_cfg->hostrcb_dma[i]);
7974 ipr_free_dump(ioa_cfg);
7975 kfree(ioa_cfg->trace);
7979 * ipr_free_all_resources - Free all allocated resources for an adapter.
7980 * @ipr_cmd: ipr command struct
7982 * This function frees all allocated resources for the
7983 * specified adapter.
7985 * Return value:
7986 * none
7988 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7990 struct pci_dev *pdev = ioa_cfg->pdev;
7992 ENTER;
7993 free_irq(pdev->irq, ioa_cfg);
7994 pci_disable_msi(pdev);
7995 iounmap(ioa_cfg->hdw_dma_regs);
7996 pci_release_regions(pdev);
7997 ipr_free_mem(ioa_cfg);
7998 scsi_host_put(ioa_cfg->host);
7999 pci_disable_device(pdev);
8000 LEAVE;
8004 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8005 * @ioa_cfg: ioa config struct
8007 * Return value:
8008 * 0 on success / -ENOMEM on allocation failure
8010 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8012 struct ipr_cmnd *ipr_cmd;
8013 struct ipr_ioarcb *ioarcb;
8014 dma_addr_t dma_addr;
8015 int i;
8017 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8018 sizeof(struct ipr_cmnd), 16, 0);
8020 if (!ioa_cfg->ipr_cmd_pool)
8021 return -ENOMEM;
8023 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8024 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8026 if (!ipr_cmd) {
8027 ipr_free_cmd_blks(ioa_cfg);
8028 return -ENOMEM;
8031 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8032 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8033 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8035 ioarcb = &ipr_cmd->ioarcb;
8036 ipr_cmd->dma_addr = dma_addr;
8037 if (ioa_cfg->sis64)
8038 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8039 else
8040 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8042 ioarcb->host_response_handle = cpu_to_be32(i << 2);
8043 if (ioa_cfg->sis64) {
8044 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8045 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8046 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8047 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8048 } else {
8049 ioarcb->write_ioadl_addr =
8050 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8051 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8052 ioarcb->ioasa_host_pci_addr =
8053 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8055 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8056 ipr_cmd->cmd_index = i;
8057 ipr_cmd->ioa_cfg = ioa_cfg;
8058 ipr_cmd->sense_buffer_dma = dma_addr +
8059 offsetof(struct ipr_cmnd, sense_buffer);
8061 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8064 return 0;
8068 * ipr_alloc_mem - Allocate memory for an adapter
8069 * @ioa_cfg: ioa config struct
8071 * Return value:
8072 * 0 on success / non-zero for error
8074 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8076 struct pci_dev *pdev = ioa_cfg->pdev;
8077 int i, rc = -ENOMEM;
8079 ENTER;
8080 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8081 ioa_cfg->max_devs_supported, GFP_KERNEL);
8083 if (!ioa_cfg->res_entries)
8084 goto out;
8086 if (ioa_cfg->sis64) {
8087 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8088 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8089 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8090 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8091 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8092 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8095 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8096 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8097 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8100 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8101 sizeof(struct ipr_misc_cbs),
8102 &ioa_cfg->vpd_cbs_dma);
8104 if (!ioa_cfg->vpd_cbs)
8105 goto out_free_res_entries;
8107 if (ipr_alloc_cmd_blks(ioa_cfg))
8108 goto out_free_vpd_cbs;
8110 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8111 sizeof(u32) * IPR_NUM_CMD_BLKS,
8112 &ioa_cfg->host_rrq_dma);
8114 if (!ioa_cfg->host_rrq)
8115 goto out_ipr_free_cmd_blocks;
8117 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8118 ioa_cfg->cfg_table_size,
8119 &ioa_cfg->cfg_table_dma);
8121 if (!ioa_cfg->u.cfg_table)
8122 goto out_free_host_rrq;
8124 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8125 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8126 sizeof(struct ipr_hostrcb),
8127 &ioa_cfg->hostrcb_dma[i]);
8129 if (!ioa_cfg->hostrcb[i])
8130 goto out_free_hostrcb_dma;
8132 ioa_cfg->hostrcb[i]->hostrcb_dma =
8133 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8134 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8135 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8138 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8139 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8141 if (!ioa_cfg->trace)
8142 goto out_free_hostrcb_dma;
8144 rc = 0;
8145 out:
8146 LEAVE;
8147 return rc;
8149 out_free_hostrcb_dma:
8150 while (i-- > 0) {
8151 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8152 ioa_cfg->hostrcb[i],
8153 ioa_cfg->hostrcb_dma[i]);
8155 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8156 ioa_cfg->u.cfg_table,
8157 ioa_cfg->cfg_table_dma);
8158 out_free_host_rrq:
8159 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8160 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8161 out_ipr_free_cmd_blocks:
8162 ipr_free_cmd_blks(ioa_cfg);
8163 out_free_vpd_cbs:
8164 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8165 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8166 out_free_res_entries:
8167 kfree(ioa_cfg->res_entries);
8168 goto out;
8172 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8173 * @ioa_cfg: ioa config struct
8175 * Return value:
8176 * none
8178 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8180 int i;
8182 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8183 ioa_cfg->bus_attr[i].bus = i;
8184 ioa_cfg->bus_attr[i].qas_enabled = 0;
8185 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8186 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8187 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8188 else
8189 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8194 * ipr_init_ioa_cfg - Initialize IOA config struct
8195 * @ioa_cfg: ioa config struct
8196 * @host: scsi host struct
8197 * @pdev: PCI dev struct
8199 * Return value:
8200 * none
8202 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8203 struct Scsi_Host *host, struct pci_dev *pdev)
8205 const struct ipr_interrupt_offsets *p;
8206 struct ipr_interrupts *t;
8207 void __iomem *base;
8209 ioa_cfg->host = host;
8210 ioa_cfg->pdev = pdev;
8211 ioa_cfg->log_level = ipr_log_level;
8212 ioa_cfg->doorbell = IPR_DOORBELL;
8213 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8214 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8215 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8216 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8217 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8218 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8219 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8220 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8222 INIT_LIST_HEAD(&ioa_cfg->free_q);
8223 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8224 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8225 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8226 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8227 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8228 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8229 init_waitqueue_head(&ioa_cfg->reset_wait_q);
8230 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8231 ioa_cfg->sdt_state = INACTIVE;
8233 ipr_initialize_bus_attr(ioa_cfg);
8234 ioa_cfg->max_devs_supported = ipr_max_devs;
8236 if (ioa_cfg->sis64) {
8237 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8238 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8239 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8240 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8241 } else {
8242 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8243 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8244 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8245 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8247 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8248 host->unique_id = host->host_no;
8249 host->max_cmd_len = IPR_MAX_CDB_LEN;
8250 pci_set_drvdata(pdev, ioa_cfg);
8252 p = &ioa_cfg->chip_cfg->regs;
8253 t = &ioa_cfg->regs;
8254 base = ioa_cfg->hdw_dma_regs;
8256 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8257 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8258 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8259 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8260 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8261 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8262 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8263 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8264 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8265 t->ioarrin_reg = base + p->ioarrin_reg;
8266 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8267 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8268 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8269 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8270 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8271 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8273 if (ioa_cfg->sis64) {
8274 t->init_feedback_reg = base + p->init_feedback_reg;
8275 t->dump_addr_reg = base + p->dump_addr_reg;
8276 t->dump_data_reg = base + p->dump_data_reg;
8281 * ipr_get_chip_info - Find adapter chip information
8282 * @dev_id: PCI device id struct
8284 * Return value:
8285 * ptr to chip information on success / NULL on failure
8287 static const struct ipr_chip_t * __devinit
8288 ipr_get_chip_info(const struct pci_device_id *dev_id)
8290 int i;
8292 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8293 if (ipr_chip[i].vendor == dev_id->vendor &&
8294 ipr_chip[i].device == dev_id->device)
8295 return &ipr_chip[i];
8296 return NULL;
8300 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8301 * @pdev: PCI device struct
8303 * Description: Simply set the msi_received flag to 1 indicating that
8304 * Message Signaled Interrupts are supported.
8306 * Return value:
8307 * 0 on success / non-zero on failure
8309 static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8311 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8312 unsigned long lock_flags = 0;
8313 irqreturn_t rc = IRQ_HANDLED;
8315 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8317 ioa_cfg->msi_received = 1;
8318 wake_up(&ioa_cfg->msi_wait_q);
8320 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8321 return rc;
8325 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8326 * @pdev: PCI device struct
8328 * Description: The return value from pci_enable_msi() can not always be
8329 * trusted. This routine sets up and initiates a test interrupt to determine
8330 * if the interrupt is received via the ipr_test_intr() service routine.
8331 * If the tests fails, the driver will fall back to LSI.
8333 * Return value:
8334 * 0 on success / non-zero on failure
8336 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8337 struct pci_dev *pdev)
8339 int rc;
8340 volatile u32 int_reg;
8341 unsigned long lock_flags = 0;
8343 ENTER;
8345 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8346 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8347 ioa_cfg->msi_received = 0;
8348 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8349 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8350 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8353 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8354 if (rc) {
8355 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8356 return rc;
8357 } else if (ipr_debug)
8358 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8360 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8361 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8362 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8363 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8365 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8366 if (!ioa_cfg->msi_received) {
8367 /* MSI test failed */
8368 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8369 rc = -EOPNOTSUPP;
8370 } else if (ipr_debug)
8371 dev_info(&pdev->dev, "MSI test succeeded.\n");
8373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8375 free_irq(pdev->irq, ioa_cfg);
8377 LEAVE;
8379 return rc;
8383 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8384 * @pdev: PCI device struct
8385 * @dev_id: PCI device id struct
8387 * Return value:
8388 * 0 on success / non-zero on failure
8390 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8391 const struct pci_device_id *dev_id)
8393 struct ipr_ioa_cfg *ioa_cfg;
8394 struct Scsi_Host *host;
8395 unsigned long ipr_regs_pci;
8396 void __iomem *ipr_regs;
8397 int rc = PCIBIOS_SUCCESSFUL;
8398 volatile u32 mask, uproc, interrupts;
8400 ENTER;
8402 if ((rc = pci_enable_device(pdev))) {
8403 dev_err(&pdev->dev, "Cannot enable adapter\n");
8404 goto out;
8407 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8409 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8411 if (!host) {
8412 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8413 rc = -ENOMEM;
8414 goto out_disable;
8417 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8418 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8419 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8420 sata_port_info.flags, &ipr_sata_ops);
8422 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8424 if (!ioa_cfg->ipr_chip) {
8425 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8426 dev_id->vendor, dev_id->device);
8427 goto out_scsi_host_put;
8430 /* set SIS 32 or SIS 64 */
8431 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8432 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8434 if (ipr_transop_timeout)
8435 ioa_cfg->transop_timeout = ipr_transop_timeout;
8436 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8437 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8438 else
8439 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8441 ioa_cfg->revid = pdev->revision;
8443 ipr_regs_pci = pci_resource_start(pdev, 0);
8445 rc = pci_request_regions(pdev, IPR_NAME);
8446 if (rc < 0) {
8447 dev_err(&pdev->dev,
8448 "Couldn't register memory range of registers\n");
8449 goto out_scsi_host_put;
8452 ipr_regs = pci_ioremap_bar(pdev, 0);
8454 if (!ipr_regs) {
8455 dev_err(&pdev->dev,
8456 "Couldn't map memory range of registers\n");
8457 rc = -ENOMEM;
8458 goto out_release_regions;
8461 ioa_cfg->hdw_dma_regs = ipr_regs;
8462 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8463 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8465 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8467 pci_set_master(pdev);
8469 if (ioa_cfg->sis64) {
8470 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8471 if (rc < 0) {
8472 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8473 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8476 } else
8477 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8479 if (rc < 0) {
8480 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8481 goto cleanup_nomem;
8484 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8485 ioa_cfg->chip_cfg->cache_line_size);
8487 if (rc != PCIBIOS_SUCCESSFUL) {
8488 dev_err(&pdev->dev, "Write of cache line size failed\n");
8489 rc = -EIO;
8490 goto cleanup_nomem;
8493 /* Enable MSI style interrupts if they are supported. */
8494 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8495 rc = ipr_test_msi(ioa_cfg, pdev);
8496 if (rc == -EOPNOTSUPP)
8497 pci_disable_msi(pdev);
8498 else if (rc)
8499 goto out_msi_disable;
8500 else
8501 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8502 } else if (ipr_debug)
8503 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8505 /* Save away PCI config space for use following IOA reset */
8506 rc = pci_save_state(pdev);
8508 if (rc != PCIBIOS_SUCCESSFUL) {
8509 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8510 rc = -EIO;
8511 goto cleanup_nomem;
8514 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8515 goto cleanup_nomem;
8517 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8518 goto cleanup_nomem;
8520 if (ioa_cfg->sis64)
8521 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8522 + ((sizeof(struct ipr_config_table_entry64)
8523 * ioa_cfg->max_devs_supported)));
8524 else
8525 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8526 + ((sizeof(struct ipr_config_table_entry)
8527 * ioa_cfg->max_devs_supported)));
8529 rc = ipr_alloc_mem(ioa_cfg);
8530 if (rc < 0) {
8531 dev_err(&pdev->dev,
8532 "Couldn't allocate enough memory for device driver!\n");
8533 goto cleanup_nomem;
8537 * If HRRQ updated interrupt is not masked, or reset alert is set,
8538 * the card is in an unknown state and needs a hard reset
8540 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8541 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8542 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8543 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8544 ioa_cfg->needs_hard_reset = 1;
8545 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8546 ioa_cfg->needs_hard_reset = 1;
8547 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8548 ioa_cfg->ioa_unit_checked = 1;
8550 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8551 rc = request_irq(pdev->irq, ipr_isr,
8552 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8553 IPR_NAME, ioa_cfg);
8555 if (rc) {
8556 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8557 pdev->irq, rc);
8558 goto cleanup_nolog;
8561 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8562 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8563 ioa_cfg->needs_warm_reset = 1;
8564 ioa_cfg->reset = ipr_reset_slot_reset;
8565 } else
8566 ioa_cfg->reset = ipr_reset_start_bist;
8568 spin_lock(&ipr_driver_lock);
8569 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8570 spin_unlock(&ipr_driver_lock);
8572 LEAVE;
8573 out:
8574 return rc;
8576 cleanup_nolog:
8577 ipr_free_mem(ioa_cfg);
8578 cleanup_nomem:
8579 iounmap(ipr_regs);
8580 out_msi_disable:
8581 pci_disable_msi(pdev);
8582 out_release_regions:
8583 pci_release_regions(pdev);
8584 out_scsi_host_put:
8585 scsi_host_put(host);
8586 out_disable:
8587 pci_disable_device(pdev);
8588 goto out;
8592 * ipr_scan_vsets - Scans for VSET devices
8593 * @ioa_cfg: ioa config struct
8595 * Description: Since the VSET resources do not follow SAM in that we can have
8596 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8598 * Return value:
8599 * none
8601 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8603 int target, lun;
8605 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8606 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8607 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8611 * ipr_initiate_ioa_bringdown - Bring down an adapter
8612 * @ioa_cfg: ioa config struct
8613 * @shutdown_type: shutdown type
8615 * Description: This function will initiate bringing down the adapter.
8616 * This consists of issuing an IOA shutdown to the adapter
8617 * to flush the cache, and running BIST.
8618 * If the caller needs to wait on the completion of the reset,
8619 * the caller must sleep on the reset_wait_q.
8621 * Return value:
8622 * none
8624 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8625 enum ipr_shutdown_type shutdown_type)
8627 ENTER;
8628 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8629 ioa_cfg->sdt_state = ABORT_DUMP;
8630 ioa_cfg->reset_retries = 0;
8631 ioa_cfg->in_ioa_bringdown = 1;
8632 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8633 LEAVE;
8637 * __ipr_remove - Remove a single adapter
8638 * @pdev: pci device struct
8640 * Adapter hot plug remove entry point.
8642 * Return value:
8643 * none
8645 static void __ipr_remove(struct pci_dev *pdev)
8647 unsigned long host_lock_flags = 0;
8648 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8649 ENTER;
8651 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8652 while(ioa_cfg->in_reset_reload) {
8653 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8654 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8655 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8658 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8660 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8661 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8662 flush_scheduled_work();
8663 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8665 spin_lock(&ipr_driver_lock);
8666 list_del(&ioa_cfg->queue);
8667 spin_unlock(&ipr_driver_lock);
8669 if (ioa_cfg->sdt_state == ABORT_DUMP)
8670 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8671 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8673 ipr_free_all_resources(ioa_cfg);
8675 LEAVE;
8679 * ipr_remove - IOA hot plug remove entry point
8680 * @pdev: pci device struct
8682 * Adapter hot plug remove entry point.
8684 * Return value:
8685 * none
8687 static void __devexit ipr_remove(struct pci_dev *pdev)
8689 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8691 ENTER;
8693 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8694 &ipr_trace_attr);
8695 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8696 &ipr_dump_attr);
8697 scsi_remove_host(ioa_cfg->host);
8699 __ipr_remove(pdev);
8701 LEAVE;
8705 * ipr_probe - Adapter hot plug add entry point
8707 * Return value:
8708 * 0 on success / non-zero on failure
8710 static int __devinit ipr_probe(struct pci_dev *pdev,
8711 const struct pci_device_id *dev_id)
8713 struct ipr_ioa_cfg *ioa_cfg;
8714 int rc;
8716 rc = ipr_probe_ioa(pdev, dev_id);
8718 if (rc)
8719 return rc;
8721 ioa_cfg = pci_get_drvdata(pdev);
8722 rc = ipr_probe_ioa_part2(ioa_cfg);
8724 if (rc) {
8725 __ipr_remove(pdev);
8726 return rc;
8729 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8731 if (rc) {
8732 __ipr_remove(pdev);
8733 return rc;
8736 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8737 &ipr_trace_attr);
8739 if (rc) {
8740 scsi_remove_host(ioa_cfg->host);
8741 __ipr_remove(pdev);
8742 return rc;
8745 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8746 &ipr_dump_attr);
8748 if (rc) {
8749 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8750 &ipr_trace_attr);
8751 scsi_remove_host(ioa_cfg->host);
8752 __ipr_remove(pdev);
8753 return rc;
8756 scsi_scan_host(ioa_cfg->host);
8757 ipr_scan_vsets(ioa_cfg);
8758 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8759 ioa_cfg->allow_ml_add_del = 1;
8760 ioa_cfg->host->max_channel = IPR_VSET_BUS;
8761 schedule_work(&ioa_cfg->work_q);
8762 return 0;
8766 * ipr_shutdown - Shutdown handler.
8767 * @pdev: pci device struct
8769 * This function is invoked upon system shutdown/reboot. It will issue
8770 * an adapter shutdown to the adapter to flush the write cache.
8772 * Return value:
8773 * none
8775 static void ipr_shutdown(struct pci_dev *pdev)
8777 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8778 unsigned long lock_flags = 0;
8780 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8781 while(ioa_cfg->in_reset_reload) {
8782 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8783 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8784 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8787 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8788 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8789 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8792 static struct pci_device_id ipr_pci_table[] __devinitdata = {
8793 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8794 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
8795 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8796 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
8797 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8798 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
8799 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8800 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
8801 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8802 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
8803 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8804 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
8805 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8806 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
8807 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8808 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8809 IPR_USE_LONG_TRANSOP_TIMEOUT },
8810 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8811 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8812 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8813 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8814 IPR_USE_LONG_TRANSOP_TIMEOUT },
8815 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8816 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8817 IPR_USE_LONG_TRANSOP_TIMEOUT },
8818 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8819 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8820 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8821 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8822 IPR_USE_LONG_TRANSOP_TIMEOUT},
8823 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8824 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8825 IPR_USE_LONG_TRANSOP_TIMEOUT },
8826 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8827 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8828 IPR_USE_LONG_TRANSOP_TIMEOUT },
8829 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8830 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8831 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8832 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
8833 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
8834 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
8835 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
8836 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8837 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
8838 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8839 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8840 IPR_USE_LONG_TRANSOP_TIMEOUT },
8841 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8842 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8843 IPR_USE_LONG_TRANSOP_TIMEOUT },
8844 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8845 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
8846 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8847 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8848 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8849 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8850 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8851 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8852 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8853 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8854 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8855 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8856 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8857 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8858 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8859 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
8862 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8864 static struct pci_error_handlers ipr_err_handler = {
8865 .error_detected = ipr_pci_error_detected,
8866 .slot_reset = ipr_pci_slot_reset,
8869 static struct pci_driver ipr_driver = {
8870 .name = IPR_NAME,
8871 .id_table = ipr_pci_table,
8872 .probe = ipr_probe,
8873 .remove = __devexit_p(ipr_remove),
8874 .shutdown = ipr_shutdown,
8875 .err_handler = &ipr_err_handler,
8879 * ipr_halt_done - Shutdown prepare completion
8881 * Return value:
8882 * none
8884 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8886 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8888 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8892 * ipr_halt - Issue shutdown prepare to all adapters
8894 * Return value:
8895 * NOTIFY_OK on success / NOTIFY_DONE on failure
8897 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8899 struct ipr_cmnd *ipr_cmd;
8900 struct ipr_ioa_cfg *ioa_cfg;
8901 unsigned long flags = 0;
8903 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8904 return NOTIFY_DONE;
8906 spin_lock(&ipr_driver_lock);
8908 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8909 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8910 if (!ioa_cfg->allow_cmds) {
8911 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8912 continue;
8915 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8916 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8917 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8918 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8919 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8921 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8922 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8924 spin_unlock(&ipr_driver_lock);
8926 return NOTIFY_OK;
8929 static struct notifier_block ipr_notifier = {
8930 ipr_halt, NULL, 0
8934 * ipr_init - Module entry point
8936 * Return value:
8937 * 0 on success / negative value on failure
8939 static int __init ipr_init(void)
8941 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8942 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8944 register_reboot_notifier(&ipr_notifier);
8945 return pci_register_driver(&ipr_driver);
8949 * ipr_exit - Module unload
8951 * Module unload entry point.
8953 * Return value:
8954 * none
8956 static void __exit ipr_exit(void)
8958 unregister_reboot_notifier(&ipr_notifier);
8959 pci_unregister_driver(&ipr_driver);
8962 module_init(ipr_init);
8963 module_exit(ipr_exit);