2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
63 #include <linux/vmalloc.h>
64 #include <linux/ioport.h>
65 #include <linux/delay.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
68 #include <linux/spinlock.h>
69 #include <linux/sched.h>
70 #include <linux/interrupt.h>
71 #include <linux/blkdev.h>
72 #include <linux/firmware.h>
73 #include <linux/module.h>
74 #include <linux/moduleparam.h>
75 #include <linux/libata.h>
76 #include <linux/hdreg.h>
77 #include <linux/reboot.h>
78 #include <linux/stringify.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
83 #include <scsi/scsi_host.h>
84 #include <scsi/scsi_tcq.h>
85 #include <scsi/scsi_eh.h>
86 #include <scsi/scsi_cmnd.h>
92 static LIST_HEAD(ipr_ioa_head
);
93 static unsigned int ipr_log_level
= IPR_DEFAULT_LOG_LEVEL
;
94 static unsigned int ipr_max_speed
= 1;
95 static int ipr_testmode
= 0;
96 static unsigned int ipr_fastfail
= 0;
97 static unsigned int ipr_transop_timeout
= 0;
98 static unsigned int ipr_debug
= 0;
99 static unsigned int ipr_max_devs
= IPR_DEFAULT_SIS64_DEVS
;
100 static unsigned int ipr_dual_ioa_raid
= 1;
101 static unsigned int ipr_number_of_msix
= 16;
102 static unsigned int ipr_fast_reboot
;
103 static DEFINE_SPINLOCK(ipr_driver_lock
);
105 /* This table describes the differences between DMA controller chips */
106 static const struct ipr_chip_cfg_t ipr_chip_cfg
[] = {
107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
110 .cache_line_size
= 0x20,
114 .set_interrupt_mask_reg
= 0x0022C,
115 .clr_interrupt_mask_reg
= 0x00230,
116 .clr_interrupt_mask_reg32
= 0x00230,
117 .sense_interrupt_mask_reg
= 0x0022C,
118 .sense_interrupt_mask_reg32
= 0x0022C,
119 .clr_interrupt_reg
= 0x00228,
120 .clr_interrupt_reg32
= 0x00228,
121 .sense_interrupt_reg
= 0x00224,
122 .sense_interrupt_reg32
= 0x00224,
123 .ioarrin_reg
= 0x00404,
124 .sense_uproc_interrupt_reg
= 0x00214,
125 .sense_uproc_interrupt_reg32
= 0x00214,
126 .set_uproc_interrupt_reg
= 0x00214,
127 .set_uproc_interrupt_reg32
= 0x00214,
128 .clr_uproc_interrupt_reg
= 0x00218,
129 .clr_uproc_interrupt_reg32
= 0x00218
132 { /* Snipe and Scamp */
135 .cache_line_size
= 0x20,
139 .set_interrupt_mask_reg
= 0x00288,
140 .clr_interrupt_mask_reg
= 0x0028C,
141 .clr_interrupt_mask_reg32
= 0x0028C,
142 .sense_interrupt_mask_reg
= 0x00288,
143 .sense_interrupt_mask_reg32
= 0x00288,
144 .clr_interrupt_reg
= 0x00284,
145 .clr_interrupt_reg32
= 0x00284,
146 .sense_interrupt_reg
= 0x00280,
147 .sense_interrupt_reg32
= 0x00280,
148 .ioarrin_reg
= 0x00504,
149 .sense_uproc_interrupt_reg
= 0x00290,
150 .sense_uproc_interrupt_reg32
= 0x00290,
151 .set_uproc_interrupt_reg
= 0x00290,
152 .set_uproc_interrupt_reg32
= 0x00290,
153 .clr_uproc_interrupt_reg
= 0x00294,
154 .clr_uproc_interrupt_reg32
= 0x00294
160 .cache_line_size
= 0x20,
164 .set_interrupt_mask_reg
= 0x00010,
165 .clr_interrupt_mask_reg
= 0x00018,
166 .clr_interrupt_mask_reg32
= 0x0001C,
167 .sense_interrupt_mask_reg
= 0x00010,
168 .sense_interrupt_mask_reg32
= 0x00014,
169 .clr_interrupt_reg
= 0x00008,
170 .clr_interrupt_reg32
= 0x0000C,
171 .sense_interrupt_reg
= 0x00000,
172 .sense_interrupt_reg32
= 0x00004,
173 .ioarrin_reg
= 0x00070,
174 .sense_uproc_interrupt_reg
= 0x00020,
175 .sense_uproc_interrupt_reg32
= 0x00024,
176 .set_uproc_interrupt_reg
= 0x00020,
177 .set_uproc_interrupt_reg32
= 0x00024,
178 .clr_uproc_interrupt_reg
= 0x00028,
179 .clr_uproc_interrupt_reg32
= 0x0002C,
180 .init_feedback_reg
= 0x0005C,
181 .dump_addr_reg
= 0x00064,
182 .dump_data_reg
= 0x00068,
183 .endian_swap_reg
= 0x00084
188 static const struct ipr_chip_t ipr_chip
[] = {
189 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
190 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
191 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
192 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
193 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
, true, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[0] },
194 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
195 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
, false, IPR_SIS32
, IPR_PCI_CFG
, &ipr_chip_cfg
[1] },
196 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
197 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] },
198 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
, true, IPR_SIS64
, IPR_MMIO
, &ipr_chip_cfg
[2] }
201 static int ipr_max_bus_speeds
[] = {
202 IPR_80MBs_SCSI_RATE
, IPR_U160_SCSI_RATE
, IPR_U320_SCSI_RATE
205 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207 module_param_named(max_speed
, ipr_max_speed
, uint
, 0);
208 MODULE_PARM_DESC(max_speed
, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209 module_param_named(log_level
, ipr_log_level
, uint
, 0);
210 MODULE_PARM_DESC(log_level
, "Set to 0 - 4 for increasing verbosity of device driver");
211 module_param_named(testmode
, ipr_testmode
, int, 0);
212 MODULE_PARM_DESC(testmode
, "DANGEROUS!!! Allows unsupported configurations");
213 module_param_named(fastfail
, ipr_fastfail
, int, S_IRUGO
| S_IWUSR
);
214 MODULE_PARM_DESC(fastfail
, "Reduce timeouts and retries");
215 module_param_named(transop_timeout
, ipr_transop_timeout
, int, 0);
216 MODULE_PARM_DESC(transop_timeout
, "Time in seconds to wait for adapter to come operational (default: 300)");
217 module_param_named(debug
, ipr_debug
, int, S_IRUGO
| S_IWUSR
);
218 MODULE_PARM_DESC(debug
, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
219 module_param_named(dual_ioa_raid
, ipr_dual_ioa_raid
, int, 0);
220 MODULE_PARM_DESC(dual_ioa_raid
, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
221 module_param_named(max_devs
, ipr_max_devs
, int, 0);
222 MODULE_PARM_DESC(max_devs
, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS
) "]");
224 module_param_named(number_of_msix
, ipr_number_of_msix
, int, 0);
225 MODULE_PARM_DESC(number_of_msix
, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
226 module_param_named(fast_reboot
, ipr_fast_reboot
, int, S_IRUGO
| S_IWUSR
);
227 MODULE_PARM_DESC(fast_reboot
, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(IPR_DRIVER_VERSION
);
231 /* A constant array of IOASCs/URCs/Error Messages */
233 struct ipr_error_table_t ipr_error_table
[] = {
234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL
,
235 "8155: An unknown error was received"},
237 "Soft underlength error"},
239 "Command to be cancelled not found"},
241 "Qualified success"},
242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL
,
243 "FFFE: Soft device bus error recovered by the IOA"},
244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL
,
245 "4101: Soft device bus fabric error"},
246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL
,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL
,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL
,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL
,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL
,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL
,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL
,
259 "FFFD: Logical block guard error recovered by the IOA"},
260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL
,
261 "FFF9: Device sector reassign successful"},
262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL
,
263 "FFF7: Media error recovered by device rewrite procedures"},
264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL
,
265 "7001: IOA sector reassignment successful"},
266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL
,
267 "FFF9: Soft media error. Sector reassignment recommended"},
268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL
,
269 "FFF7: Media error recovered by IOA rewrite procedures"},
270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL
,
271 "FF3D: Soft PCI bus error recovered by the IOA"},
272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL
,
273 "FFF6: Device hardware error recovered by the IOA"},
274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL
,
275 "FFF6: Device hardware error recovered by the device"},
276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL
,
277 "FF3D: Soft IOA error recovered by the IOA"},
278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL
,
279 "FFFA: Undefined device response recovered by the IOA"},
280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
281 "FFF6: Device bus error, message or command phase"},
282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL
,
283 "FFFE: Task Management Function failed"},
284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL
,
285 "FFF6: Failure prediction threshold exceeded"},
286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL
,
287 "8009: Impending cache battery pack failure"},
289 "Logical Unit in process of becoming ready"},
291 "Initializing command required"},
293 "34FF: Disk device format in progress"},
295 "Logical unit not accessible, target port in unavailable state"},
296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL
,
297 "9070: IOA requested reset"},
299 "Synchronization required"},
301 "IOA microcode download required"},
303 "Device bus connection is prohibited by host"},
305 "No ready, IOA shutdown"},
307 "Not ready, IOA has been shutdown"},
308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL
,
309 "3020: Storage subsystem configuration error"},
311 "FFF5: Medium error, data unreadable, recommend reassign"},
313 "7000: Medium error, data unreadable, do not reassign"},
314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL
,
315 "FFF3: Disk media format bad"},
316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL
,
317 "3002: Addressed device failed to respond to selection"},
318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL
,
319 "3100: Device bus error"},
320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL
,
321 "3109: IOA timed out a device command"},
323 "3120: SCSI bus is not operational"},
324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL
,
325 "4100: Hard device bus fabric error"},
326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL
,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL
,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL
,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL
,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL
,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL
,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL
,
339 "310D: Logical block guard error detected by the IOA"},
340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL
,
341 "9000: IOA reserved area data check"},
342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL
,
343 "9001: IOA reserved area invalid data pattern"},
344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL
,
345 "9002: IOA reserved area LRC error"},
346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL
,
347 "Hardware Error, IOA metadata access error"},
348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL
,
349 "102E: Out of alternate sectors for disk storage"},
350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL
,
351 "FFF4: Data transfer underlength error"},
352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL
,
353 "FFF4: Data transfer overlength error"},
354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL
,
355 "3400: Logical unit failure"},
356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL
,
357 "FFF4: Device microcode is corrupt"},
358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL
,
359 "8150: PCI bus error"},
361 "Unsupported device bus message received"},
362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL
,
363 "FFF4: Disk device problem"},
364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL
,
365 "8150: Permanent IOA failure"},
366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL
,
367 "3010: Disk device returned wrong response to IOA"},
368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL
,
369 "8151: IOA microcode error"},
371 "Device bus status error"},
372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL
,
373 "8157: IOA error requiring IOA reset to recover"},
375 "ATA device status error"},
377 "Message reject received from the device"},
378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL
,
379 "8008: A permanent cache battery pack failure occurred"},
380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL
,
381 "9090: Disk unit has been modified after the last known status"},
382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL
,
383 "9081: IOA detected device error"},
384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL
,
385 "9082: IOA detected device error"},
386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL
,
387 "3110: Device bus error, message or command phase"},
388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL
,
389 "3110: SAS Command / Task Management Function failed"},
390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL
,
391 "9091: Incorrect hardware configuration change has been detected"},
392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL
,
393 "9073: Invalid multi-adapter configuration"},
394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL
,
395 "4010: Incorrect connection between cascaded expanders"},
396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL
,
397 "4020: Connections exceed IOA design limits"},
398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL
,
399 "4030: Incorrect multipath connection"},
400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL
,
401 "4110: Unsupported enclosure function"},
402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL
,
403 "4120: SAS cable VPD cannot be read"},
404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL
,
405 "FFF4: Command to logical unit failed"},
407 "Illegal request, invalid request type or request packet"},
409 "Illegal request, invalid resource handle"},
411 "Illegal request, commands not allowed to this device"},
413 "Illegal request, command not allowed to a secondary adapter"},
415 "Illegal request, command not allowed to a non-optimized resource"},
417 "Illegal request, invalid field in parameter list"},
419 "Illegal request, parameter not supported"},
421 "Illegal request, parameter value invalid"},
423 "Illegal request, command sequence error"},
425 "Illegal request, dual adapter support not enabled"},
427 "Illegal request, another cable connector was physically disabled"},
429 "Illegal request, inconsistent group id/group count"},
430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL
,
431 "9031: Array protection temporarily suspended, protection resuming"},
432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL
,
433 "9040: Array protection temporarily suspended, protection resuming"},
434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL
,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
437 "4085: Service required"},
438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
439 "4086: SAS Adapter Hardware Configuration Error"},
440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL
,
441 "3140: Device bus not ready to ready transition"},
442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL
,
443 "FFFB: SCSI bus was reset"},
445 "FFFE: SCSI bus transition to single ended"},
447 "FFFE: SCSI bus transition to LVD"},
448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL
,
449 "FFFB: SCSI bus was reset by another initiator"},
450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL
,
451 "3029: A device replacement has occurred"},
452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL
,
453 "4102: Device bus fabric performance degradation"},
454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL
,
455 "9051: IOA cache data exists for a missing or failed device"},
456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL
,
457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL
,
459 "9025: Disk unit is not supported at its physical location"},
460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL
,
461 "3020: IOA detected a SCSI bus configuration error"},
462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL
,
463 "3150: SCSI bus configuration error"},
464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL
,
465 "9074: Asymmetric advanced function disk configuration"},
466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL
,
467 "4040: Incomplete multipath connection between IOA and enclosure"},
468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL
,
469 "4041: Incomplete multipath connection between enclosure and device"},
470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL
,
471 "9075: Incomplete multipath connection between IOA and remote IOA"},
472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL
,
473 "9076: Configuration error, missing remote IOA"},
474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL
,
475 "4050: Enclosure does not support a required multipath function"},
476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL
,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL
,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL
,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL
,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL
,
485 "4070: Logically bad block written on device"},
486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL
,
487 "9041: Array protection temporarily suspended"},
488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL
,
489 "9042: Corrupt array parity detected on specified device"},
490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL
,
491 "9030: Array no longer protected due to missing or failed disk unit"},
492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL
,
493 "9071: Link operational transition"},
494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL
,
495 "9072: Link not operational transition"},
496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL
,
497 "9032: Array exposed but still protected"},
498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL
,
499 "70DD: Device forced failed by disrupt device command"},
500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL
,
501 "4061: Multipath redundancy level got better"},
502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL
,
503 "4060: Multipath redundancy level got worse"},
504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL
,
505 "9083: Device raw mode enabled"},
506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL
,
507 "9084: Device raw mode disabled"},
509 "Failure due to other device"},
510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL
,
511 "9008: IOA does not support functions expected by devices"},
512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL
,
513 "9010: Cache data associated with attached devices cannot be found"},
514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL
,
515 "9011: Cache data belongs to devices other than those attached"},
516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL
,
517 "9020: Array missing 2 or more devices with only 1 device present"},
518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL
,
519 "9021: Array missing 2 or more devices with 2 or more devices present"},
520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL
,
521 "9022: Exposed array is missing a required device"},
522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL
,
523 "9023: Array member(s) not at required physical locations"},
524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL
,
525 "9024: Array not functional due to present hardware configuration"},
526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL
,
527 "9026: Array not functional due to present hardware configuration"},
528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL
,
529 "9027: Array is missing a device and parity is out of sync"},
530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL
,
531 "9028: Maximum number of arrays already exist"},
532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL
,
533 "9050: Required cache data cannot be located for a disk unit"},
534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL
,
535 "9052: Cache data exists for a device that has been modified"},
536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL
,
537 "9054: IOA resources not available due to previous problems"},
538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL
,
539 "9092: Disk unit requires initialization before use"},
540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL
,
541 "9029: Incorrect hardware configuration change has been detected"},
542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL
,
543 "9060: One or more disk pairs are missing from an array"},
544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL
,
545 "9061: One or more disks are missing from an array"},
546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL
,
547 "9062: One or more disks are missing from an array"},
548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL
,
549 "9063: Maximum number of functional arrays has been exceeded"},
551 "Data protect, other volume set problem"},
553 "Aborted command, invalid descriptor"},
555 "Target operating conditions have changed, dual adapter takeover"},
557 "Aborted command, medium removal prevented"},
559 "Command terminated by host"},
561 "Aborted command, command terminated by host"}
564 static const struct ipr_ses_table_entry ipr_ses_table
[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
581 * Function Prototypes
583 static int ipr_reset_alert(struct ipr_cmnd
*);
584 static void ipr_process_ccn(struct ipr_cmnd
*);
585 static void ipr_process_error(struct ipr_cmnd
*);
586 static void ipr_reset_ioa_job(struct ipr_cmnd
*);
587 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*,
588 enum ipr_shutdown_type
);
590 #ifdef CONFIG_SCSI_IPR_TRACE
592 * ipr_trc_hook - Add a trace entry to the driver trace
593 * @ipr_cmd: ipr command struct
595 * @add_data: additional data
600 static void ipr_trc_hook(struct ipr_cmnd
*ipr_cmd
,
601 u8 type
, u32 add_data
)
603 struct ipr_trace_entry
*trace_entry
;
604 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
605 unsigned int trace_index
;
607 trace_index
= atomic_add_return(1, &ioa_cfg
->trace_index
) & IPR_TRACE_INDEX_MASK
;
608 trace_entry
= &ioa_cfg
->trace
[trace_index
];
609 trace_entry
->time
= jiffies
;
610 trace_entry
->op_code
= ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0];
611 trace_entry
->type
= type
;
612 if (ipr_cmd
->ioa_cfg
->sis64
)
613 trace_entry
->ata_op_code
= ipr_cmd
->i
.ata_ioadl
.regs
.command
;
615 trace_entry
->ata_op_code
= ipr_cmd
->ioarcb
.u
.add_data
.u
.regs
.command
;
616 trace_entry
->cmd_index
= ipr_cmd
->cmd_index
& 0xff;
617 trace_entry
->res_handle
= ipr_cmd
->ioarcb
.res_handle
;
618 trace_entry
->u
.add_data
= add_data
;
622 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
626 * ipr_lock_and_done - Acquire lock and complete command
627 * @ipr_cmd: ipr command struct
632 static void ipr_lock_and_done(struct ipr_cmnd
*ipr_cmd
)
634 unsigned long lock_flags
;
635 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
637 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
638 ipr_cmd
->done(ipr_cmd
);
639 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644 * @ipr_cmd: ipr command struct
649 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
)
651 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
652 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
653 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
654 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
657 hrrq_id
= ioarcb
->cmd_pkt
.hrrq_id
;
658 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
659 ioarcb
->cmd_pkt
.hrrq_id
= hrrq_id
;
660 ioarcb
->data_transfer_length
= 0;
661 ioarcb
->read_data_transfer_length
= 0;
662 ioarcb
->ioadl_len
= 0;
663 ioarcb
->read_ioadl_len
= 0;
665 if (ipr_cmd
->ioa_cfg
->sis64
) {
666 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
667 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
668 ioasa64
->u
.gata
.status
= 0;
670 ioarcb
->write_ioadl_addr
=
671 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
672 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
673 ioasa
->u
.gata
.status
= 0;
676 ioasa
->hdr
.ioasc
= 0;
677 ioasa
->hdr
.residual_data_len
= 0;
678 ipr_cmd
->scsi_cmd
= NULL
;
680 ipr_cmd
->sense_buffer
[0] = 0;
681 ipr_cmd
->dma_use_sg
= 0;
685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686 * @ipr_cmd: ipr command struct
691 static void ipr_init_ipr_cmnd(struct ipr_cmnd
*ipr_cmd
,
692 void (*fast_done
) (struct ipr_cmnd
*))
694 ipr_reinit_ipr_cmnd(ipr_cmd
);
695 ipr_cmd
->u
.scratch
= 0;
696 ipr_cmd
->sibling
= NULL
;
697 ipr_cmd
->eh_comp
= NULL
;
698 ipr_cmd
->fast_done
= fast_done
;
699 timer_setup(&ipr_cmd
->timer
, NULL
, 0);
703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
704 * @ioa_cfg: ioa config struct
707 * pointer to ipr command struct
710 struct ipr_cmnd
*__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue
*hrrq
)
712 struct ipr_cmnd
*ipr_cmd
= NULL
;
714 if (likely(!list_empty(&hrrq
->hrrq_free_q
))) {
715 ipr_cmd
= list_entry(hrrq
->hrrq_free_q
.next
,
716 struct ipr_cmnd
, queue
);
717 list_del(&ipr_cmd
->queue
);
725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726 * @ioa_cfg: ioa config struct
729 * pointer to ipr command struct
732 struct ipr_cmnd
*ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg
*ioa_cfg
)
734 struct ipr_cmnd
*ipr_cmd
=
735 __ipr_get_free_ipr_cmnd(&ioa_cfg
->hrrq
[IPR_INIT_HRRQ
]);
736 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742 * @ioa_cfg: ioa config struct
743 * @clr_ints: interrupts to clear
745 * This function masks all interrupts on the adapter, then clears the
746 * interrupts specified in the mask
751 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg
*ioa_cfg
,
754 volatile u32 int_reg
;
757 /* Stop new interrupts */
758 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
759 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
760 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
761 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
764 /* Set interrupt mask to stop all new interrupts */
766 writeq(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
768 writel(~0, ioa_cfg
->regs
.set_interrupt_mask_reg
);
770 /* Clear any pending interrupts */
772 writel(~0, ioa_cfg
->regs
.clr_interrupt_reg
);
773 writel(clr_ints
, ioa_cfg
->regs
.clr_interrupt_reg32
);
774 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
778 * ipr_save_pcix_cmd_reg - Save PCI-X command register
779 * @ioa_cfg: ioa config struct
782 * 0 on success / -EIO on failure
784 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
786 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
788 if (pcix_cmd_reg
== 0)
791 if (pci_read_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
792 &ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
793 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to save PCI-X command register\n");
797 ioa_cfg
->saved_pcix_cmd_reg
|= PCI_X_CMD_DPERR_E
| PCI_X_CMD_ERO
;
802 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
803 * @ioa_cfg: ioa config struct
806 * 0 on success / -EIO on failure
808 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg
*ioa_cfg
)
810 int pcix_cmd_reg
= pci_find_capability(ioa_cfg
->pdev
, PCI_CAP_ID_PCIX
);
813 if (pci_write_config_word(ioa_cfg
->pdev
, pcix_cmd_reg
+ PCI_X_CMD
,
814 ioa_cfg
->saved_pcix_cmd_reg
) != PCIBIOS_SUCCESSFUL
) {
815 dev_err(&ioa_cfg
->pdev
->dev
, "Failed to setup PCI-X command register\n");
824 * __ipr_sata_eh_done - done function for aborted SATA commands
825 * @ipr_cmd: ipr command struct
827 * This function is invoked for ops generated to SATA
828 * devices which are being aborted.
833 static void __ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
835 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
836 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
838 qc
->err_mask
|= AC_ERR_OTHER
;
839 sata_port
->ioasa
.status
|= ATA_BUSY
;
841 if (ipr_cmd
->eh_comp
)
842 complete(ipr_cmd
->eh_comp
);
843 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
847 * ipr_sata_eh_done - done function for aborted SATA commands
848 * @ipr_cmd: ipr command struct
850 * This function is invoked for ops generated to SATA
851 * devices which are being aborted.
856 static void ipr_sata_eh_done(struct ipr_cmnd
*ipr_cmd
)
858 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
859 unsigned long hrrq_flags
;
861 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
862 __ipr_sata_eh_done(ipr_cmd
);
863 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
867 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
868 * @ipr_cmd: ipr command struct
870 * This function is invoked by the interrupt handler for
871 * ops generated by the SCSI mid-layer which are being aborted.
876 static void __ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
878 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
880 scsi_cmd
->result
|= (DID_ERROR
<< 16);
882 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
883 scsi_cmd
->scsi_done(scsi_cmd
);
884 if (ipr_cmd
->eh_comp
)
885 complete(ipr_cmd
->eh_comp
);
886 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
890 * ipr_scsi_eh_done - mid-layer done function for aborted ops
891 * @ipr_cmd: ipr command struct
893 * This function is invoked by the interrupt handler for
894 * ops generated by the SCSI mid-layer which are being aborted.
899 static void ipr_scsi_eh_done(struct ipr_cmnd
*ipr_cmd
)
901 unsigned long hrrq_flags
;
902 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
904 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
905 __ipr_scsi_eh_done(ipr_cmd
);
906 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
910 * ipr_fail_all_ops - Fails all outstanding ops.
911 * @ioa_cfg: ioa config struct
913 * This function fails all outstanding ops.
918 static void ipr_fail_all_ops(struct ipr_ioa_cfg
*ioa_cfg
)
920 struct ipr_cmnd
*ipr_cmd
, *temp
;
921 struct ipr_hrr_queue
*hrrq
;
924 for_each_hrrq(hrrq
, ioa_cfg
) {
925 spin_lock(&hrrq
->_lock
);
926 list_for_each_entry_safe(ipr_cmd
,
927 temp
, &hrrq
->hrrq_pending_q
, queue
) {
928 list_del(&ipr_cmd
->queue
);
930 ipr_cmd
->s
.ioasa
.hdr
.ioasc
=
931 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET
);
932 ipr_cmd
->s
.ioasa
.hdr
.ilid
=
933 cpu_to_be32(IPR_DRIVER_ILID
);
935 if (ipr_cmd
->scsi_cmd
)
936 ipr_cmd
->done
= __ipr_scsi_eh_done
;
937 else if (ipr_cmd
->qc
)
938 ipr_cmd
->done
= __ipr_sata_eh_done
;
940 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
,
941 IPR_IOASC_IOA_WAS_RESET
);
942 del_timer(&ipr_cmd
->timer
);
943 ipr_cmd
->done(ipr_cmd
);
945 spin_unlock(&hrrq
->_lock
);
951 * ipr_send_command - Send driver initiated requests.
952 * @ipr_cmd: ipr command struct
954 * This function sends a command to the adapter using the correct write call.
955 * In the case of sis64, calculate the ioarcb size required. Then or in the
961 static void ipr_send_command(struct ipr_cmnd
*ipr_cmd
)
963 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
964 dma_addr_t send_dma_addr
= ipr_cmd
->dma_addr
;
966 if (ioa_cfg
->sis64
) {
967 /* The default size is 256 bytes */
968 send_dma_addr
|= 0x1;
970 /* If the number of ioadls * size of ioadl > 128 bytes,
971 then use a 512 byte ioarcb */
972 if (ipr_cmd
->dma_use_sg
* sizeof(struct ipr_ioadl64_desc
) > 128 )
973 send_dma_addr
|= 0x4;
974 writeq(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
976 writel(send_dma_addr
, ioa_cfg
->regs
.ioarrin_reg
);
980 * ipr_do_req - Send driver initiated requests.
981 * @ipr_cmd: ipr command struct
982 * @done: done function
983 * @timeout_func: timeout function
984 * @timeout: timeout value
986 * This function sends the specified command to the adapter with the
987 * timeout given. The done function is invoked on command completion.
992 static void ipr_do_req(struct ipr_cmnd
*ipr_cmd
,
993 void (*done
) (struct ipr_cmnd
*),
994 void (*timeout_func
) (struct timer_list
*), u32 timeout
)
996 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
998 ipr_cmd
->done
= done
;
1000 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
1001 ipr_cmd
->timer
.function
= timeout_func
;
1003 add_timer(&ipr_cmd
->timer
);
1005 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, 0);
1007 ipr_send_command(ipr_cmd
);
1011 * ipr_internal_cmd_done - Op done function for an internally generated op.
1012 * @ipr_cmd: ipr command struct
1014 * This function is the op done function for an internally generated,
1015 * blocking op. It simply wakes the sleeping thread.
1020 static void ipr_internal_cmd_done(struct ipr_cmnd
*ipr_cmd
)
1022 if (ipr_cmd
->sibling
)
1023 ipr_cmd
->sibling
= NULL
;
1025 complete(&ipr_cmd
->completion
);
1029 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 * @ipr_cmd: ipr command struct
1031 * @dma_addr: dma address
1032 * @len: transfer length
1033 * @flags: ioadl flag value
1035 * This function initializes an ioadl in the case where there is only a single
1041 static void ipr_init_ioadl(struct ipr_cmnd
*ipr_cmd
, dma_addr_t dma_addr
,
1044 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
1045 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
1047 ipr_cmd
->dma_use_sg
= 1;
1049 if (ipr_cmd
->ioa_cfg
->sis64
) {
1050 ioadl64
->flags
= cpu_to_be32(flags
);
1051 ioadl64
->data_len
= cpu_to_be32(len
);
1052 ioadl64
->address
= cpu_to_be64(dma_addr
);
1054 ipr_cmd
->ioarcb
.ioadl_len
=
1055 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
));
1056 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1058 ioadl
->flags_and_data_len
= cpu_to_be32(flags
| len
);
1059 ioadl
->address
= cpu_to_be32(dma_addr
);
1061 if (flags
== IPR_IOADL_FLAGS_READ_LAST
) {
1062 ipr_cmd
->ioarcb
.read_ioadl_len
=
1063 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1064 ipr_cmd
->ioarcb
.read_data_transfer_length
= cpu_to_be32(len
);
1066 ipr_cmd
->ioarcb
.ioadl_len
=
1067 cpu_to_be32(sizeof(struct ipr_ioadl_desc
));
1068 ipr_cmd
->ioarcb
.data_transfer_length
= cpu_to_be32(len
);
1074 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075 * @ipr_cmd: ipr command struct
1076 * @timeout_func: function to invoke if command times out
1082 static void ipr_send_blocking_cmd(struct ipr_cmnd
*ipr_cmd
,
1083 void (*timeout_func
) (struct timer_list
*),
1086 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1088 init_completion(&ipr_cmd
->completion
);
1089 ipr_do_req(ipr_cmd
, ipr_internal_cmd_done
, timeout_func
, timeout
);
1091 spin_unlock_irq(ioa_cfg
->host
->host_lock
);
1092 wait_for_completion(&ipr_cmd
->completion
);
1093 spin_lock_irq(ioa_cfg
->host
->host_lock
);
1096 static int ipr_get_hrrq_index(struct ipr_ioa_cfg
*ioa_cfg
)
1100 if (ioa_cfg
->hrrq_num
== 1)
1103 hrrq
= atomic_add_return(1, &ioa_cfg
->hrrq_index
);
1104 hrrq
= (hrrq
% (ioa_cfg
->hrrq_num
- 1)) + 1;
1110 * ipr_send_hcam - Send an HCAM to the adapter.
1111 * @ioa_cfg: ioa config struct
1113 * @hostrcb: hostrcb struct
1115 * This function will send a Host Controlled Async command to the adapter.
1116 * If HCAMs are currently not allowed to be issued to the adapter, it will
1117 * place the hostrcb on the free queue.
1122 static void ipr_send_hcam(struct ipr_ioa_cfg
*ioa_cfg
, u8 type
,
1123 struct ipr_hostrcb
*hostrcb
)
1125 struct ipr_cmnd
*ipr_cmd
;
1126 struct ipr_ioarcb
*ioarcb
;
1128 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
1129 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
1130 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
1131 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_pending_q
);
1133 ipr_cmd
->u
.hostrcb
= hostrcb
;
1134 ioarcb
= &ipr_cmd
->ioarcb
;
1136 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
1137 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_HCAM
;
1138 ioarcb
->cmd_pkt
.cdb
[0] = IPR_HOST_CONTROLLED_ASYNC
;
1139 ioarcb
->cmd_pkt
.cdb
[1] = type
;
1140 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(hostrcb
->hcam
) >> 8) & 0xff;
1141 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(hostrcb
->hcam
) & 0xff;
1143 ipr_init_ioadl(ipr_cmd
, hostrcb
->hostrcb_dma
,
1144 sizeof(hostrcb
->hcam
), IPR_IOADL_FLAGS_READ_LAST
);
1146 if (type
== IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
)
1147 ipr_cmd
->done
= ipr_process_ccn
;
1149 ipr_cmd
->done
= ipr_process_error
;
1151 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_IOA_RES_ADDR
);
1153 ipr_send_command(ipr_cmd
);
1155 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
1160 * ipr_update_ata_class - Update the ata class in the resource entry
1161 * @res: resource entry struct
1162 * @proto: cfgte device bus protocol value
1167 static void ipr_update_ata_class(struct ipr_resource_entry
*res
, unsigned int proto
)
1170 case IPR_PROTO_SATA
:
1171 case IPR_PROTO_SAS_STP
:
1172 res
->ata_class
= ATA_DEV_ATA
;
1174 case IPR_PROTO_SATA_ATAPI
:
1175 case IPR_PROTO_SAS_STP_ATAPI
:
1176 res
->ata_class
= ATA_DEV_ATAPI
;
1179 res
->ata_class
= ATA_DEV_UNKNOWN
;
1185 * ipr_init_res_entry - Initialize a resource entry struct.
1186 * @res: resource entry struct
1187 * @cfgtew: config table entry wrapper struct
1192 static void ipr_init_res_entry(struct ipr_resource_entry
*res
,
1193 struct ipr_config_table_entry_wrapper
*cfgtew
)
1197 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1198 struct ipr_resource_entry
*gscsi_res
= NULL
;
1200 res
->needs_sync_complete
= 0;
1203 res
->del_from_ml
= 0;
1204 res
->resetting_device
= 0;
1205 res
->reset_occurred
= 0;
1207 res
->sata_port
= NULL
;
1209 if (ioa_cfg
->sis64
) {
1210 proto
= cfgtew
->u
.cfgte64
->proto
;
1211 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1212 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1213 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1214 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1216 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1217 sizeof(res
->res_path
));
1220 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1221 sizeof(res
->dev_lun
.scsi_lun
));
1222 res
->lun
= scsilun_to_int(&res
->dev_lun
);
1224 if (res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1225 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
) {
1226 if (gscsi_res
->dev_id
== cfgtew
->u
.cfgte64
->dev_id
) {
1228 res
->target
= gscsi_res
->target
;
1233 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1234 ioa_cfg
->max_devs_supported
);
1235 set_bit(res
->target
, ioa_cfg
->target_ids
);
1237 } else if (res
->type
== IPR_RES_TYPE_IOAFP
) {
1238 res
->bus
= IPR_IOAFP_VIRTUAL_BUS
;
1240 } else if (res
->type
== IPR_RES_TYPE_ARRAY
) {
1241 res
->bus
= IPR_ARRAY_VIRTUAL_BUS
;
1242 res
->target
= find_first_zero_bit(ioa_cfg
->array_ids
,
1243 ioa_cfg
->max_devs_supported
);
1244 set_bit(res
->target
, ioa_cfg
->array_ids
);
1245 } else if (res
->type
== IPR_RES_TYPE_VOLUME_SET
) {
1246 res
->bus
= IPR_VSET_VIRTUAL_BUS
;
1247 res
->target
= find_first_zero_bit(ioa_cfg
->vset_ids
,
1248 ioa_cfg
->max_devs_supported
);
1249 set_bit(res
->target
, ioa_cfg
->vset_ids
);
1251 res
->target
= find_first_zero_bit(ioa_cfg
->target_ids
,
1252 ioa_cfg
->max_devs_supported
);
1253 set_bit(res
->target
, ioa_cfg
->target_ids
);
1256 proto
= cfgtew
->u
.cfgte
->proto
;
1257 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1258 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1259 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1260 res
->type
= IPR_RES_TYPE_IOAFP
;
1262 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1264 res
->bus
= cfgtew
->u
.cfgte
->res_addr
.bus
;
1265 res
->target
= cfgtew
->u
.cfgte
->res_addr
.target
;
1266 res
->lun
= cfgtew
->u
.cfgte
->res_addr
.lun
;
1267 res
->lun_wwn
= get_unaligned_be64(cfgtew
->u
.cfgte
->lun_wwn
);
1270 ipr_update_ata_class(res
, proto
);
1274 * ipr_is_same_device - Determine if two devices are the same.
1275 * @res: resource entry struct
1276 * @cfgtew: config table entry wrapper struct
1279 * 1 if the devices are the same / 0 otherwise
1281 static int ipr_is_same_device(struct ipr_resource_entry
*res
,
1282 struct ipr_config_table_entry_wrapper
*cfgtew
)
1284 if (res
->ioa_cfg
->sis64
) {
1285 if (!memcmp(&res
->dev_id
, &cfgtew
->u
.cfgte64
->dev_id
,
1286 sizeof(cfgtew
->u
.cfgte64
->dev_id
)) &&
1287 !memcmp(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1288 sizeof(cfgtew
->u
.cfgte64
->lun
))) {
1292 if (res
->bus
== cfgtew
->u
.cfgte
->res_addr
.bus
&&
1293 res
->target
== cfgtew
->u
.cfgte
->res_addr
.target
&&
1294 res
->lun
== cfgtew
->u
.cfgte
->res_addr
.lun
)
1302 * __ipr_format_res_path - Format the resource path for printing.
1303 * @res_path: resource path
1305 * @len: length of buffer provided
1310 static char *__ipr_format_res_path(u8
*res_path
, char *buffer
, int len
)
1316 p
+= snprintf(p
, buffer
+ len
- p
, "%02X", res_path
[0]);
1317 for (i
= 1; res_path
[i
] != 0xff && ((i
* 3) < len
); i
++)
1318 p
+= snprintf(p
, buffer
+ len
- p
, "-%02X", res_path
[i
]);
1324 * ipr_format_res_path - Format the resource path for printing.
1325 * @ioa_cfg: ioa config struct
1326 * @res_path: resource path
1328 * @len: length of buffer provided
1333 static char *ipr_format_res_path(struct ipr_ioa_cfg
*ioa_cfg
,
1334 u8
*res_path
, char *buffer
, int len
)
1339 p
+= snprintf(p
, buffer
+ len
- p
, "%d/", ioa_cfg
->host
->host_no
);
1340 __ipr_format_res_path(res_path
, p
, len
- (buffer
- p
));
1345 * ipr_update_res_entry - Update the resource entry.
1346 * @res: resource entry struct
1347 * @cfgtew: config table entry wrapper struct
1352 static void ipr_update_res_entry(struct ipr_resource_entry
*res
,
1353 struct ipr_config_table_entry_wrapper
*cfgtew
)
1355 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1359 if (res
->ioa_cfg
->sis64
) {
1360 res
->flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->flags
);
1361 res
->res_flags
= be16_to_cpu(cfgtew
->u
.cfgte64
->res_flags
);
1362 res
->type
= cfgtew
->u
.cfgte64
->res_type
;
1364 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte64
->std_inq_data
,
1365 sizeof(struct ipr_std_inq_data
));
1367 res
->qmodel
= IPR_QUEUEING_MODEL64(res
);
1368 proto
= cfgtew
->u
.cfgte64
->proto
;
1369 res
->res_handle
= cfgtew
->u
.cfgte64
->res_handle
;
1370 res
->dev_id
= cfgtew
->u
.cfgte64
->dev_id
;
1372 memcpy(&res
->dev_lun
.scsi_lun
, &cfgtew
->u
.cfgte64
->lun
,
1373 sizeof(res
->dev_lun
.scsi_lun
));
1375 if (memcmp(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1376 sizeof(res
->res_path
))) {
1377 memcpy(res
->res_path
, &cfgtew
->u
.cfgte64
->res_path
,
1378 sizeof(res
->res_path
));
1382 if (res
->sdev
&& new_path
)
1383 sdev_printk(KERN_INFO
, res
->sdev
, "Resource path: %s\n",
1384 ipr_format_res_path(res
->ioa_cfg
,
1385 res
->res_path
, buffer
, sizeof(buffer
)));
1387 res
->flags
= cfgtew
->u
.cfgte
->flags
;
1388 if (res
->flags
& IPR_IS_IOA_RESOURCE
)
1389 res
->type
= IPR_RES_TYPE_IOAFP
;
1391 res
->type
= cfgtew
->u
.cfgte
->rsvd_subtype
& 0x0f;
1393 memcpy(&res
->std_inq_data
, &cfgtew
->u
.cfgte
->std_inq_data
,
1394 sizeof(struct ipr_std_inq_data
));
1396 res
->qmodel
= IPR_QUEUEING_MODEL(res
);
1397 proto
= cfgtew
->u
.cfgte
->proto
;
1398 res
->res_handle
= cfgtew
->u
.cfgte
->res_handle
;
1401 ipr_update_ata_class(res
, proto
);
1405 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1407 * @res: resource entry struct
1408 * @cfgtew: config table entry wrapper struct
1413 static void ipr_clear_res_target(struct ipr_resource_entry
*res
)
1415 struct ipr_resource_entry
*gscsi_res
= NULL
;
1416 struct ipr_ioa_cfg
*ioa_cfg
= res
->ioa_cfg
;
1418 if (!ioa_cfg
->sis64
)
1421 if (res
->bus
== IPR_ARRAY_VIRTUAL_BUS
)
1422 clear_bit(res
->target
, ioa_cfg
->array_ids
);
1423 else if (res
->bus
== IPR_VSET_VIRTUAL_BUS
)
1424 clear_bit(res
->target
, ioa_cfg
->vset_ids
);
1425 else if (res
->bus
== 0 && res
->type
== IPR_RES_TYPE_GENERIC_SCSI
) {
1426 list_for_each_entry(gscsi_res
, &ioa_cfg
->used_res_q
, queue
)
1427 if (gscsi_res
->dev_id
== res
->dev_id
&& gscsi_res
!= res
)
1429 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1431 } else if (res
->bus
== 0)
1432 clear_bit(res
->target
, ioa_cfg
->target_ids
);
1436 * ipr_handle_config_change - Handle a config change from the adapter
1437 * @ioa_cfg: ioa config struct
1443 static void ipr_handle_config_change(struct ipr_ioa_cfg
*ioa_cfg
,
1444 struct ipr_hostrcb
*hostrcb
)
1446 struct ipr_resource_entry
*res
= NULL
;
1447 struct ipr_config_table_entry_wrapper cfgtew
;
1448 __be32 cc_res_handle
;
1452 if (ioa_cfg
->sis64
) {
1453 cfgtew
.u
.cfgte64
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte64
;
1454 cc_res_handle
= cfgtew
.u
.cfgte64
->res_handle
;
1456 cfgtew
.u
.cfgte
= &hostrcb
->hcam
.u
.ccn
.u
.cfgte
;
1457 cc_res_handle
= cfgtew
.u
.cfgte
->res_handle
;
1460 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
1461 if (res
->res_handle
== cc_res_handle
) {
1468 if (list_empty(&ioa_cfg
->free_res_q
)) {
1469 ipr_send_hcam(ioa_cfg
,
1470 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
1475 res
= list_entry(ioa_cfg
->free_res_q
.next
,
1476 struct ipr_resource_entry
, queue
);
1478 list_del(&res
->queue
);
1479 ipr_init_res_entry(res
, &cfgtew
);
1480 list_add_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
1483 ipr_update_res_entry(res
, &cfgtew
);
1485 if (hostrcb
->hcam
.notify_type
== IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY
) {
1487 res
->del_from_ml
= 1;
1488 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
1489 schedule_work(&ioa_cfg
->work_q
);
1491 ipr_clear_res_target(res
);
1492 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
1494 } else if (!res
->sdev
|| res
->del_from_ml
) {
1496 schedule_work(&ioa_cfg
->work_q
);
1499 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1503 * ipr_process_ccn - Op done function for a CCN.
1504 * @ipr_cmd: ipr command struct
1506 * This function is the op done function for a configuration
1507 * change notification host controlled async from the adapter.
1512 static void ipr_process_ccn(struct ipr_cmnd
*ipr_cmd
)
1514 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
1515 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
1516 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
1518 list_del_init(&hostrcb
->queue
);
1519 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
1522 if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
1523 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
)
1524 dev_err(&ioa_cfg
->pdev
->dev
,
1525 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
1527 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
, hostrcb
);
1529 ipr_handle_config_change(ioa_cfg
, hostrcb
);
1534 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535 * @i: index into buffer
1536 * @buf: string to modify
1538 * This function will strip all trailing whitespace, pad the end
1539 * of the string with a single space, and NULL terminate the string.
1542 * new length of string
1544 static int strip_and_pad_whitespace(int i
, char *buf
)
1546 while (i
&& buf
[i
] == ' ')
1554 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555 * @prefix: string to print at start of printk
1556 * @hostrcb: hostrcb pointer
1557 * @vpd: vendor/product id/sn struct
1562 static void ipr_log_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1563 struct ipr_vpd
*vpd
)
1565 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
+ IPR_SERIAL_NUM_LEN
+ 3];
1568 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1569 i
= strip_and_pad_whitespace(IPR_VENDOR_ID_LEN
- 1, buffer
);
1571 memcpy(&buffer
[i
], vpd
->vpids
.product_id
, IPR_PROD_ID_LEN
);
1572 i
= strip_and_pad_whitespace(i
+ IPR_PROD_ID_LEN
- 1, buffer
);
1574 memcpy(&buffer
[i
], vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1575 buffer
[IPR_SERIAL_NUM_LEN
+ i
] = '\0';
1577 ipr_hcam_err(hostrcb
, "%s VPID/SN: %s\n", prefix
, buffer
);
1581 * ipr_log_vpd - Log the passed VPD to the error log.
1582 * @vpd: vendor/product id/sn struct
1587 static void ipr_log_vpd(struct ipr_vpd
*vpd
)
1589 char buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
1590 + IPR_SERIAL_NUM_LEN
];
1592 memcpy(buffer
, vpd
->vpids
.vendor_id
, IPR_VENDOR_ID_LEN
);
1593 memcpy(buffer
+ IPR_VENDOR_ID_LEN
, vpd
->vpids
.product_id
,
1595 buffer
[IPR_VENDOR_ID_LEN
+ IPR_PROD_ID_LEN
] = '\0';
1596 ipr_err("Vendor/Product ID: %s\n", buffer
);
1598 memcpy(buffer
, vpd
->sn
, IPR_SERIAL_NUM_LEN
);
1599 buffer
[IPR_SERIAL_NUM_LEN
] = '\0';
1600 ipr_err(" Serial Number: %s\n", buffer
);
1604 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605 * @prefix: string to print at start of printk
1606 * @hostrcb: hostrcb pointer
1607 * @vpd: vendor/product id/sn/wwn struct
1612 static void ipr_log_ext_vpd_compact(char *prefix
, struct ipr_hostrcb
*hostrcb
,
1613 struct ipr_ext_vpd
*vpd
)
1615 ipr_log_vpd_compact(prefix
, hostrcb
, &vpd
->vpd
);
1616 ipr_hcam_err(hostrcb
, "%s WWN: %08X%08X\n", prefix
,
1617 be32_to_cpu(vpd
->wwid
[0]), be32_to_cpu(vpd
->wwid
[1]));
1621 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622 * @vpd: vendor/product id/sn/wwn struct
1627 static void ipr_log_ext_vpd(struct ipr_ext_vpd
*vpd
)
1629 ipr_log_vpd(&vpd
->vpd
);
1630 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd
->wwid
[0]),
1631 be32_to_cpu(vpd
->wwid
[1]));
1635 * ipr_log_enhanced_cache_error - Log a cache error.
1636 * @ioa_cfg: ioa config struct
1637 * @hostrcb: hostrcb struct
1642 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1643 struct ipr_hostrcb
*hostrcb
)
1645 struct ipr_hostrcb_type_12_error
*error
;
1648 error
= &hostrcb
->hcam
.u
.error64
.u
.type_12_error
;
1650 error
= &hostrcb
->hcam
.u
.error
.u
.type_12_error
;
1652 ipr_err("-----Current Configuration-----\n");
1653 ipr_err("Cache Directory Card Information:\n");
1654 ipr_log_ext_vpd(&error
->ioa_vpd
);
1655 ipr_err("Adapter Card Information:\n");
1656 ipr_log_ext_vpd(&error
->cfc_vpd
);
1658 ipr_err("-----Expected Configuration-----\n");
1659 ipr_err("Cache Directory Card Information:\n");
1660 ipr_log_ext_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1661 ipr_err("Adapter Card Information:\n");
1662 ipr_log_ext_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1664 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665 be32_to_cpu(error
->ioa_data
[0]),
1666 be32_to_cpu(error
->ioa_data
[1]),
1667 be32_to_cpu(error
->ioa_data
[2]));
1671 * ipr_log_cache_error - Log a cache error.
1672 * @ioa_cfg: ioa config struct
1673 * @hostrcb: hostrcb struct
1678 static void ipr_log_cache_error(struct ipr_ioa_cfg
*ioa_cfg
,
1679 struct ipr_hostrcb
*hostrcb
)
1681 struct ipr_hostrcb_type_02_error
*error
=
1682 &hostrcb
->hcam
.u
.error
.u
.type_02_error
;
1684 ipr_err("-----Current Configuration-----\n");
1685 ipr_err("Cache Directory Card Information:\n");
1686 ipr_log_vpd(&error
->ioa_vpd
);
1687 ipr_err("Adapter Card Information:\n");
1688 ipr_log_vpd(&error
->cfc_vpd
);
1690 ipr_err("-----Expected Configuration-----\n");
1691 ipr_err("Cache Directory Card Information:\n");
1692 ipr_log_vpd(&error
->ioa_last_attached_to_cfc_vpd
);
1693 ipr_err("Adapter Card Information:\n");
1694 ipr_log_vpd(&error
->cfc_last_attached_to_ioa_vpd
);
1696 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697 be32_to_cpu(error
->ioa_data
[0]),
1698 be32_to_cpu(error
->ioa_data
[1]),
1699 be32_to_cpu(error
->ioa_data
[2]));
1703 * ipr_log_enhanced_config_error - Log a configuration error.
1704 * @ioa_cfg: ioa config struct
1705 * @hostrcb: hostrcb struct
1710 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1711 struct ipr_hostrcb
*hostrcb
)
1713 int errors_logged
, i
;
1714 struct ipr_hostrcb_device_data_entry_enhanced
*dev_entry
;
1715 struct ipr_hostrcb_type_13_error
*error
;
1717 error
= &hostrcb
->hcam
.u
.error
.u
.type_13_error
;
1718 errors_logged
= be32_to_cpu(error
->errors_logged
);
1720 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721 be32_to_cpu(error
->errors_detected
), errors_logged
);
1723 dev_entry
= error
->dev
;
1725 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1728 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1729 ipr_log_ext_vpd(&dev_entry
->vpd
);
1731 ipr_err("-----New Device Information-----\n");
1732 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1734 ipr_err("Cache Directory Card Information:\n");
1735 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1737 ipr_err("Adapter Card Information:\n");
1738 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1743 * ipr_log_sis64_config_error - Log a device error.
1744 * @ioa_cfg: ioa config struct
1745 * @hostrcb: hostrcb struct
1750 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1751 struct ipr_hostrcb
*hostrcb
)
1753 int errors_logged
, i
;
1754 struct ipr_hostrcb64_device_data_entry_enhanced
*dev_entry
;
1755 struct ipr_hostrcb_type_23_error
*error
;
1756 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
1758 error
= &hostrcb
->hcam
.u
.error64
.u
.type_23_error
;
1759 errors_logged
= be32_to_cpu(error
->errors_logged
);
1761 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762 be32_to_cpu(error
->errors_detected
), errors_logged
);
1764 dev_entry
= error
->dev
;
1766 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1769 ipr_err("Device %d : %s", i
+ 1,
1770 __ipr_format_res_path(dev_entry
->res_path
,
1771 buffer
, sizeof(buffer
)));
1772 ipr_log_ext_vpd(&dev_entry
->vpd
);
1774 ipr_err("-----New Device Information-----\n");
1775 ipr_log_ext_vpd(&dev_entry
->new_vpd
);
1777 ipr_err("Cache Directory Card Information:\n");
1778 ipr_log_ext_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1780 ipr_err("Adapter Card Information:\n");
1781 ipr_log_ext_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1786 * ipr_log_config_error - Log a configuration error.
1787 * @ioa_cfg: ioa config struct
1788 * @hostrcb: hostrcb struct
1793 static void ipr_log_config_error(struct ipr_ioa_cfg
*ioa_cfg
,
1794 struct ipr_hostrcb
*hostrcb
)
1796 int errors_logged
, i
;
1797 struct ipr_hostrcb_device_data_entry
*dev_entry
;
1798 struct ipr_hostrcb_type_03_error
*error
;
1800 error
= &hostrcb
->hcam
.u
.error
.u
.type_03_error
;
1801 errors_logged
= be32_to_cpu(error
->errors_logged
);
1803 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804 be32_to_cpu(error
->errors_detected
), errors_logged
);
1806 dev_entry
= error
->dev
;
1808 for (i
= 0; i
< errors_logged
; i
++, dev_entry
++) {
1811 ipr_phys_res_err(ioa_cfg
, dev_entry
->dev_res_addr
, "Device %d", i
+ 1);
1812 ipr_log_vpd(&dev_entry
->vpd
);
1814 ipr_err("-----New Device Information-----\n");
1815 ipr_log_vpd(&dev_entry
->new_vpd
);
1817 ipr_err("Cache Directory Card Information:\n");
1818 ipr_log_vpd(&dev_entry
->ioa_last_with_dev_vpd
);
1820 ipr_err("Adapter Card Information:\n");
1821 ipr_log_vpd(&dev_entry
->cfc_last_with_dev_vpd
);
1823 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824 be32_to_cpu(dev_entry
->ioa_data
[0]),
1825 be32_to_cpu(dev_entry
->ioa_data
[1]),
1826 be32_to_cpu(dev_entry
->ioa_data
[2]),
1827 be32_to_cpu(dev_entry
->ioa_data
[3]),
1828 be32_to_cpu(dev_entry
->ioa_data
[4]));
1833 * ipr_log_enhanced_array_error - Log an array configuration error.
1834 * @ioa_cfg: ioa config struct
1835 * @hostrcb: hostrcb struct
1840 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1841 struct ipr_hostrcb
*hostrcb
)
1844 struct ipr_hostrcb_type_14_error
*error
;
1845 struct ipr_hostrcb_array_data_entry_enhanced
*array_entry
;
1846 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1848 error
= &hostrcb
->hcam
.u
.error
.u
.type_14_error
;
1852 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853 error
->protection_level
,
1854 ioa_cfg
->host
->host_no
,
1855 error
->last_func_vset_res_addr
.bus
,
1856 error
->last_func_vset_res_addr
.target
,
1857 error
->last_func_vset_res_addr
.lun
);
1861 array_entry
= error
->array_member
;
1862 num_entries
= min_t(u32
, be32_to_cpu(error
->num_entries
),
1863 ARRAY_SIZE(error
->array_member
));
1865 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
1866 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1869 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1870 ipr_err("Exposed Array Member %d:\n", i
);
1872 ipr_err("Array Member %d:\n", i
);
1874 ipr_log_ext_vpd(&array_entry
->vpd
);
1875 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1876 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1877 "Expected Location");
1884 * ipr_log_array_error - Log an array configuration error.
1885 * @ioa_cfg: ioa config struct
1886 * @hostrcb: hostrcb struct
1891 static void ipr_log_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
1892 struct ipr_hostrcb
*hostrcb
)
1895 struct ipr_hostrcb_type_04_error
*error
;
1896 struct ipr_hostrcb_array_data_entry
*array_entry
;
1897 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
1899 error
= &hostrcb
->hcam
.u
.error
.u
.type_04_error
;
1903 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904 error
->protection_level
,
1905 ioa_cfg
->host
->host_no
,
1906 error
->last_func_vset_res_addr
.bus
,
1907 error
->last_func_vset_res_addr
.target
,
1908 error
->last_func_vset_res_addr
.lun
);
1912 array_entry
= error
->array_member
;
1914 for (i
= 0; i
< 18; i
++) {
1915 if (!memcmp(array_entry
->vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
1918 if (be32_to_cpu(error
->exposed_mode_adn
) == i
)
1919 ipr_err("Exposed Array Member %d:\n", i
);
1921 ipr_err("Array Member %d:\n", i
);
1923 ipr_log_vpd(&array_entry
->vpd
);
1925 ipr_phys_res_err(ioa_cfg
, array_entry
->dev_res_addr
, "Current Location");
1926 ipr_phys_res_err(ioa_cfg
, array_entry
->expected_dev_res_addr
,
1927 "Expected Location");
1932 array_entry
= error
->array_member2
;
1939 * ipr_log_hex_data - Log additional hex IOA error data.
1940 * @ioa_cfg: ioa config struct
1941 * @data: IOA error data
1947 static void ipr_log_hex_data(struct ipr_ioa_cfg
*ioa_cfg
, __be32
*data
, int len
)
1954 if (ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
1955 len
= min_t(int, len
, IPR_DEFAULT_MAX_ERROR_DUMP
);
1957 for (i
= 0; i
< len
/ 4; i
+= 4) {
1958 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
1959 be32_to_cpu(data
[i
]),
1960 be32_to_cpu(data
[i
+1]),
1961 be32_to_cpu(data
[i
+2]),
1962 be32_to_cpu(data
[i
+3]));
1967 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968 * @ioa_cfg: ioa config struct
1969 * @hostrcb: hostrcb struct
1974 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
1975 struct ipr_hostrcb
*hostrcb
)
1977 struct ipr_hostrcb_type_17_error
*error
;
1980 error
= &hostrcb
->hcam
.u
.error64
.u
.type_17_error
;
1982 error
= &hostrcb
->hcam
.u
.error
.u
.type_17_error
;
1984 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
1985 strim(error
->failure_reason
);
1987 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
1988 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
1989 ipr_log_ext_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
1990 ipr_log_hex_data(ioa_cfg
, error
->data
,
1991 be32_to_cpu(hostrcb
->hcam
.length
) -
1992 (offsetof(struct ipr_hostrcb_error
, u
) +
1993 offsetof(struct ipr_hostrcb_type_17_error
, data
)));
1997 * ipr_log_dual_ioa_error - Log a dual adapter error.
1998 * @ioa_cfg: ioa config struct
1999 * @hostrcb: hostrcb struct
2004 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg
*ioa_cfg
,
2005 struct ipr_hostrcb
*hostrcb
)
2007 struct ipr_hostrcb_type_07_error
*error
;
2009 error
= &hostrcb
->hcam
.u
.error
.u
.type_07_error
;
2010 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2011 strim(error
->failure_reason
);
2013 ipr_hcam_err(hostrcb
, "%s [PRC: %08X]\n", error
->failure_reason
,
2014 be32_to_cpu(hostrcb
->hcam
.u
.error
.prc
));
2015 ipr_log_vpd_compact("Remote IOA", hostrcb
, &error
->vpd
);
2016 ipr_log_hex_data(ioa_cfg
, error
->data
,
2017 be32_to_cpu(hostrcb
->hcam
.length
) -
2018 (offsetof(struct ipr_hostrcb_error
, u
) +
2019 offsetof(struct ipr_hostrcb_type_07_error
, data
)));
2022 static const struct {
2025 } path_active_desc
[] = {
2026 { IPR_PATH_NO_INFO
, "Path" },
2027 { IPR_PATH_ACTIVE
, "Active path" },
2028 { IPR_PATH_NOT_ACTIVE
, "Inactive path" }
2031 static const struct {
2034 } path_state_desc
[] = {
2035 { IPR_PATH_STATE_NO_INFO
, "has no path state information available" },
2036 { IPR_PATH_HEALTHY
, "is healthy" },
2037 { IPR_PATH_DEGRADED
, "is degraded" },
2038 { IPR_PATH_FAILED
, "is failed" }
2042 * ipr_log_fabric_path - Log a fabric path error
2043 * @hostrcb: hostrcb struct
2044 * @fabric: fabric descriptor
2049 static void ipr_log_fabric_path(struct ipr_hostrcb
*hostrcb
,
2050 struct ipr_hostrcb_fabric_desc
*fabric
)
2053 u8 path_state
= fabric
->path_state
;
2054 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2055 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2057 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2058 if (path_active_desc
[i
].active
!= active
)
2061 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2062 if (path_state_desc
[j
].state
!= state
)
2065 if (fabric
->cascaded_expander
== 0xff && fabric
->phy
== 0xff) {
2066 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d\n",
2067 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2069 } else if (fabric
->cascaded_expander
== 0xff) {
2070 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Phy=%d\n",
2071 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2072 fabric
->ioa_port
, fabric
->phy
);
2073 } else if (fabric
->phy
== 0xff) {
2074 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d\n",
2075 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2076 fabric
->ioa_port
, fabric
->cascaded_expander
);
2078 ipr_hcam_err(hostrcb
, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2080 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2086 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state
,
2087 fabric
->ioa_port
, fabric
->cascaded_expander
, fabric
->phy
);
2091 * ipr_log64_fabric_path - Log a fabric path error
2092 * @hostrcb: hostrcb struct
2093 * @fabric: fabric descriptor
2098 static void ipr_log64_fabric_path(struct ipr_hostrcb
*hostrcb
,
2099 struct ipr_hostrcb64_fabric_desc
*fabric
)
2102 u8 path_state
= fabric
->path_state
;
2103 u8 active
= path_state
& IPR_PATH_ACTIVE_MASK
;
2104 u8 state
= path_state
& IPR_PATH_STATE_MASK
;
2105 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2107 for (i
= 0; i
< ARRAY_SIZE(path_active_desc
); i
++) {
2108 if (path_active_desc
[i
].active
!= active
)
2111 for (j
= 0; j
< ARRAY_SIZE(path_state_desc
); j
++) {
2112 if (path_state_desc
[j
].state
!= state
)
2115 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s\n",
2116 path_active_desc
[i
].desc
, path_state_desc
[j
].desc
,
2117 ipr_format_res_path(hostrcb
->ioa_cfg
,
2119 buffer
, sizeof(buffer
)));
2124 ipr_err("Path state=%02X Resource Path=%s\n", path_state
,
2125 ipr_format_res_path(hostrcb
->ioa_cfg
, fabric
->res_path
,
2126 buffer
, sizeof(buffer
)));
2129 static const struct {
2132 } path_type_desc
[] = {
2133 { IPR_PATH_CFG_IOA_PORT
, "IOA port" },
2134 { IPR_PATH_CFG_EXP_PORT
, "Expander port" },
2135 { IPR_PATH_CFG_DEVICE_PORT
, "Device port" },
2136 { IPR_PATH_CFG_DEVICE_LUN
, "Device LUN" }
2139 static const struct {
2142 } path_status_desc
[] = {
2143 { IPR_PATH_CFG_NO_PROB
, "Functional" },
2144 { IPR_PATH_CFG_DEGRADED
, "Degraded" },
2145 { IPR_PATH_CFG_FAILED
, "Failed" },
2146 { IPR_PATH_CFG_SUSPECT
, "Suspect" },
2147 { IPR_PATH_NOT_DETECTED
, "Missing" },
2148 { IPR_PATH_INCORRECT_CONN
, "Incorrectly connected" }
2151 static const char *link_rate
[] = {
2154 "phy reset problem",
2171 * ipr_log_path_elem - Log a fabric path element.
2172 * @hostrcb: hostrcb struct
2173 * @cfg: fabric path element struct
2178 static void ipr_log_path_elem(struct ipr_hostrcb
*hostrcb
,
2179 struct ipr_hostrcb_config_element
*cfg
)
2182 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2183 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2185 if (type
== IPR_PATH_CFG_NOT_EXIST
)
2188 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2189 if (path_type_desc
[i
].type
!= type
)
2192 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2193 if (path_status_desc
[j
].status
!= status
)
2196 if (type
== IPR_PATH_CFG_IOA_PORT
) {
2197 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2199 cfg
->phy
, link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2200 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2202 if (cfg
->cascaded_expander
== 0xff && cfg
->phy
== 0xff) {
2203 ipr_hcam_err(hostrcb
, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2205 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2206 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2207 } else if (cfg
->cascaded_expander
== 0xff) {
2208 ipr_hcam_err(hostrcb
, "%s %s: Phy=%d, Link rate=%s, "
2209 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2210 path_type_desc
[i
].desc
, cfg
->phy
,
2211 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2212 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2213 } else if (cfg
->phy
== 0xff) {
2214 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Link rate=%s, "
2215 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2216 path_type_desc
[i
].desc
, cfg
->cascaded_expander
,
2217 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2218 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2220 ipr_hcam_err(hostrcb
, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221 "WWN=%08X%08X\n", path_status_desc
[j
].desc
,
2222 path_type_desc
[i
].desc
, cfg
->cascaded_expander
, cfg
->phy
,
2223 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2224 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2231 ipr_hcam_err(hostrcb
, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232 "WWN=%08X%08X\n", cfg
->type_status
, cfg
->cascaded_expander
, cfg
->phy
,
2233 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2234 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2238 * ipr_log64_path_elem - Log a fabric path element.
2239 * @hostrcb: hostrcb struct
2240 * @cfg: fabric path element struct
2245 static void ipr_log64_path_elem(struct ipr_hostrcb
*hostrcb
,
2246 struct ipr_hostrcb64_config_element
*cfg
)
2249 u8 desc_id
= cfg
->descriptor_id
& IPR_DESCRIPTOR_MASK
;
2250 u8 type
= cfg
->type_status
& IPR_PATH_CFG_TYPE_MASK
;
2251 u8 status
= cfg
->type_status
& IPR_PATH_CFG_STATUS_MASK
;
2252 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2254 if (type
== IPR_PATH_CFG_NOT_EXIST
|| desc_id
!= IPR_DESCRIPTOR_SIS64
)
2257 for (i
= 0; i
< ARRAY_SIZE(path_type_desc
); i
++) {
2258 if (path_type_desc
[i
].type
!= type
)
2261 for (j
= 0; j
< ARRAY_SIZE(path_status_desc
); j
++) {
2262 if (path_status_desc
[j
].status
!= status
)
2265 ipr_hcam_err(hostrcb
, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266 path_status_desc
[j
].desc
, path_type_desc
[i
].desc
,
2267 ipr_format_res_path(hostrcb
->ioa_cfg
,
2268 cfg
->res_path
, buffer
, sizeof(buffer
)),
2269 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2270 be32_to_cpu(cfg
->wwid
[0]),
2271 be32_to_cpu(cfg
->wwid
[1]));
2275 ipr_hcam_err(hostrcb
, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276 "WWN=%08X%08X\n", cfg
->type_status
,
2277 ipr_format_res_path(hostrcb
->ioa_cfg
,
2278 cfg
->res_path
, buffer
, sizeof(buffer
)),
2279 link_rate
[cfg
->link_rate
& IPR_PHY_LINK_RATE_MASK
],
2280 be32_to_cpu(cfg
->wwid
[0]), be32_to_cpu(cfg
->wwid
[1]));
2284 * ipr_log_fabric_error - Log a fabric error.
2285 * @ioa_cfg: ioa config struct
2286 * @hostrcb: hostrcb struct
2291 static void ipr_log_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2292 struct ipr_hostrcb
*hostrcb
)
2294 struct ipr_hostrcb_type_20_error
*error
;
2295 struct ipr_hostrcb_fabric_desc
*fabric
;
2296 struct ipr_hostrcb_config_element
*cfg
;
2299 error
= &hostrcb
->hcam
.u
.error
.u
.type_20_error
;
2300 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2301 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2303 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2304 (offsetof(struct ipr_hostrcb_error
, u
) +
2305 offsetof(struct ipr_hostrcb_type_20_error
, desc
));
2307 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2308 ipr_log_fabric_path(hostrcb
, fabric
);
2309 for_each_fabric_cfg(fabric
, cfg
)
2310 ipr_log_path_elem(hostrcb
, cfg
);
2312 add_len
-= be16_to_cpu(fabric
->length
);
2313 fabric
= (struct ipr_hostrcb_fabric_desc
*)
2314 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2317 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2321 * ipr_log_sis64_array_error - Log a sis64 array error.
2322 * @ioa_cfg: ioa config struct
2323 * @hostrcb: hostrcb struct
2328 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg
*ioa_cfg
,
2329 struct ipr_hostrcb
*hostrcb
)
2332 struct ipr_hostrcb_type_24_error
*error
;
2333 struct ipr_hostrcb64_array_data_entry
*array_entry
;
2334 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2335 const u8 zero_sn
[IPR_SERIAL_NUM_LEN
] = { [0 ... IPR_SERIAL_NUM_LEN
-1] = '0' };
2337 error
= &hostrcb
->hcam
.u
.error64
.u
.type_24_error
;
2341 ipr_err("RAID %s Array Configuration: %s\n",
2342 error
->protection_level
,
2343 ipr_format_res_path(ioa_cfg
, error
->last_res_path
,
2344 buffer
, sizeof(buffer
)));
2348 array_entry
= error
->array_member
;
2349 num_entries
= min_t(u32
, error
->num_entries
,
2350 ARRAY_SIZE(error
->array_member
));
2352 for (i
= 0; i
< num_entries
; i
++, array_entry
++) {
2354 if (!memcmp(array_entry
->vpd
.vpd
.sn
, zero_sn
, IPR_SERIAL_NUM_LEN
))
2357 if (error
->exposed_mode_adn
== i
)
2358 ipr_err("Exposed Array Member %d:\n", i
);
2360 ipr_err("Array Member %d:\n", i
);
2362 ipr_err("Array Member %d:\n", i
);
2363 ipr_log_ext_vpd(&array_entry
->vpd
);
2364 ipr_err("Current Location: %s\n",
2365 ipr_format_res_path(ioa_cfg
, array_entry
->res_path
,
2366 buffer
, sizeof(buffer
)));
2367 ipr_err("Expected Location: %s\n",
2368 ipr_format_res_path(ioa_cfg
,
2369 array_entry
->expected_res_path
,
2370 buffer
, sizeof(buffer
)));
2377 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378 * @ioa_cfg: ioa config struct
2379 * @hostrcb: hostrcb struct
2384 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg
*ioa_cfg
,
2385 struct ipr_hostrcb
*hostrcb
)
2387 struct ipr_hostrcb_type_30_error
*error
;
2388 struct ipr_hostrcb64_fabric_desc
*fabric
;
2389 struct ipr_hostrcb64_config_element
*cfg
;
2392 error
= &hostrcb
->hcam
.u
.error64
.u
.type_30_error
;
2394 error
->failure_reason
[sizeof(error
->failure_reason
) - 1] = '\0';
2395 ipr_hcam_err(hostrcb
, "%s\n", error
->failure_reason
);
2397 add_len
= be32_to_cpu(hostrcb
->hcam
.length
) -
2398 (offsetof(struct ipr_hostrcb64_error
, u
) +
2399 offsetof(struct ipr_hostrcb_type_30_error
, desc
));
2401 for (i
= 0, fabric
= error
->desc
; i
< error
->num_entries
; i
++) {
2402 ipr_log64_fabric_path(hostrcb
, fabric
);
2403 for_each_fabric_cfg(fabric
, cfg
)
2404 ipr_log64_path_elem(hostrcb
, cfg
);
2406 add_len
-= be16_to_cpu(fabric
->length
);
2407 fabric
= (struct ipr_hostrcb64_fabric_desc
*)
2408 ((unsigned long)fabric
+ be16_to_cpu(fabric
->length
));
2411 ipr_log_hex_data(ioa_cfg
, (__be32
*)fabric
, add_len
);
2415 * ipr_log_generic_error - Log an adapter error.
2416 * @ioa_cfg: ioa config struct
2417 * @hostrcb: hostrcb struct
2422 static void ipr_log_generic_error(struct ipr_ioa_cfg
*ioa_cfg
,
2423 struct ipr_hostrcb
*hostrcb
)
2425 ipr_log_hex_data(ioa_cfg
, hostrcb
->hcam
.u
.raw
.data
,
2426 be32_to_cpu(hostrcb
->hcam
.length
));
2430 * ipr_log_sis64_device_error - Log a cache error.
2431 * @ioa_cfg: ioa config struct
2432 * @hostrcb: hostrcb struct
2437 static void ipr_log_sis64_device_error(struct ipr_ioa_cfg
*ioa_cfg
,
2438 struct ipr_hostrcb
*hostrcb
)
2440 struct ipr_hostrcb_type_21_error
*error
;
2441 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
2443 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2445 ipr_err("-----Failing Device Information-----\n");
2446 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2447 be32_to_cpu(error
->wwn
[0]), be32_to_cpu(error
->wwn
[1]),
2448 be32_to_cpu(error
->wwn
[2]), be32_to_cpu(error
->wwn
[3]));
2449 ipr_err("Device Resource Path: %s\n",
2450 __ipr_format_res_path(error
->res_path
,
2451 buffer
, sizeof(buffer
)));
2452 error
->primary_problem_desc
[sizeof(error
->primary_problem_desc
) - 1] = '\0';
2453 error
->second_problem_desc
[sizeof(error
->second_problem_desc
) - 1] = '\0';
2454 ipr_err("Primary Problem Description: %s\n", error
->primary_problem_desc
);
2455 ipr_err("Secondary Problem Description: %s\n", error
->second_problem_desc
);
2456 ipr_err("SCSI Sense Data:\n");
2457 ipr_log_hex_data(ioa_cfg
, error
->sense_data
, sizeof(error
->sense_data
));
2458 ipr_err("SCSI Command Descriptor Block: \n");
2459 ipr_log_hex_data(ioa_cfg
, error
->cdb
, sizeof(error
->cdb
));
2461 ipr_err("Additional IOA Data:\n");
2462 ipr_log_hex_data(ioa_cfg
, error
->ioa_data
, be32_to_cpu(error
->length_of_error
));
2466 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2469 * This function will return the index of into the ipr_error_table
2470 * for the specified IOASC. If the IOASC is not in the table,
2471 * 0 will be returned, which points to the entry used for unknown errors.
2474 * index into the ipr_error_table
2476 static u32
ipr_get_error(u32 ioasc
)
2480 for (i
= 0; i
< ARRAY_SIZE(ipr_error_table
); i
++)
2481 if (ipr_error_table
[i
].ioasc
== (ioasc
& IPR_IOASC_IOASC_MASK
))
2488 * ipr_handle_log_data - Log an adapter error.
2489 * @ioa_cfg: ioa config struct
2490 * @hostrcb: hostrcb struct
2492 * This function logs an adapter error to the system.
2497 static void ipr_handle_log_data(struct ipr_ioa_cfg
*ioa_cfg
,
2498 struct ipr_hostrcb
*hostrcb
)
2502 struct ipr_hostrcb_type_21_error
*error
;
2504 if (hostrcb
->hcam
.notify_type
!= IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY
)
2507 if (hostrcb
->hcam
.notifications_lost
== IPR_HOST_RCB_NOTIFICATIONS_LOST
)
2508 dev_err(&ioa_cfg
->pdev
->dev
, "Error notifications lost\n");
2511 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2513 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2515 if (!ioa_cfg
->sis64
&& (ioasc
== IPR_IOASC_BUS_WAS_RESET
||
2516 ioasc
== IPR_IOASC_BUS_WAS_RESET_BY_OTHER
)) {
2517 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2518 scsi_report_bus_reset(ioa_cfg
->host
,
2519 hostrcb
->hcam
.u
.error
.fd_res_addr
.bus
);
2522 error_index
= ipr_get_error(ioasc
);
2524 if (!ipr_error_table
[error_index
].log_hcam
)
2527 if (ioasc
== IPR_IOASC_HW_CMD_FAILED
&&
2528 hostrcb
->hcam
.overlay_id
== IPR_HOST_RCB_OVERLAY_ID_21
) {
2529 error
= &hostrcb
->hcam
.u
.error64
.u
.type_21_error
;
2531 if (((be32_to_cpu(error
->sense_data
[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST
&&
2532 ioa_cfg
->log_level
<= IPR_DEFAULT_LOG_LEVEL
)
2536 ipr_hcam_err(hostrcb
, "%s\n", ipr_error_table
[error_index
].error
);
2538 /* Set indication we have logged an error */
2539 ioa_cfg
->errors_logged
++;
2541 if (ioa_cfg
->log_level
< ipr_error_table
[error_index
].log_hcam
)
2543 if (be32_to_cpu(hostrcb
->hcam
.length
) > sizeof(hostrcb
->hcam
.u
.raw
))
2544 hostrcb
->hcam
.length
= cpu_to_be32(sizeof(hostrcb
->hcam
.u
.raw
));
2546 switch (hostrcb
->hcam
.overlay_id
) {
2547 case IPR_HOST_RCB_OVERLAY_ID_2
:
2548 ipr_log_cache_error(ioa_cfg
, hostrcb
);
2550 case IPR_HOST_RCB_OVERLAY_ID_3
:
2551 ipr_log_config_error(ioa_cfg
, hostrcb
);
2553 case IPR_HOST_RCB_OVERLAY_ID_4
:
2554 case IPR_HOST_RCB_OVERLAY_ID_6
:
2555 ipr_log_array_error(ioa_cfg
, hostrcb
);
2557 case IPR_HOST_RCB_OVERLAY_ID_7
:
2558 ipr_log_dual_ioa_error(ioa_cfg
, hostrcb
);
2560 case IPR_HOST_RCB_OVERLAY_ID_12
:
2561 ipr_log_enhanced_cache_error(ioa_cfg
, hostrcb
);
2563 case IPR_HOST_RCB_OVERLAY_ID_13
:
2564 ipr_log_enhanced_config_error(ioa_cfg
, hostrcb
);
2566 case IPR_HOST_RCB_OVERLAY_ID_14
:
2567 case IPR_HOST_RCB_OVERLAY_ID_16
:
2568 ipr_log_enhanced_array_error(ioa_cfg
, hostrcb
);
2570 case IPR_HOST_RCB_OVERLAY_ID_17
:
2571 ipr_log_enhanced_dual_ioa_error(ioa_cfg
, hostrcb
);
2573 case IPR_HOST_RCB_OVERLAY_ID_20
:
2574 ipr_log_fabric_error(ioa_cfg
, hostrcb
);
2576 case IPR_HOST_RCB_OVERLAY_ID_21
:
2577 ipr_log_sis64_device_error(ioa_cfg
, hostrcb
);
2579 case IPR_HOST_RCB_OVERLAY_ID_23
:
2580 ipr_log_sis64_config_error(ioa_cfg
, hostrcb
);
2582 case IPR_HOST_RCB_OVERLAY_ID_24
:
2583 case IPR_HOST_RCB_OVERLAY_ID_26
:
2584 ipr_log_sis64_array_error(ioa_cfg
, hostrcb
);
2586 case IPR_HOST_RCB_OVERLAY_ID_30
:
2587 ipr_log_sis64_fabric_error(ioa_cfg
, hostrcb
);
2589 case IPR_HOST_RCB_OVERLAY_ID_1
:
2590 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT
:
2592 ipr_log_generic_error(ioa_cfg
, hostrcb
);
2597 static struct ipr_hostrcb
*ipr_get_free_hostrcb(struct ipr_ioa_cfg
*ioa
)
2599 struct ipr_hostrcb
*hostrcb
;
2601 hostrcb
= list_first_entry_or_null(&ioa
->hostrcb_free_q
,
2602 struct ipr_hostrcb
, queue
);
2604 if (unlikely(!hostrcb
)) {
2605 dev_info(&ioa
->pdev
->dev
, "Reclaiming async error buffers.");
2606 hostrcb
= list_first_entry_or_null(&ioa
->hostrcb_report_q
,
2607 struct ipr_hostrcb
, queue
);
2610 list_del_init(&hostrcb
->queue
);
2615 * ipr_process_error - Op done function for an adapter error log.
2616 * @ipr_cmd: ipr command struct
2618 * This function is the op done function for an error log host
2619 * controlled async from the adapter. It will log the error and
2620 * send the HCAM back to the adapter.
2625 static void ipr_process_error(struct ipr_cmnd
*ipr_cmd
)
2627 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2628 struct ipr_hostrcb
*hostrcb
= ipr_cmd
->u
.hostrcb
;
2629 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
2633 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error64
.fd_ioasc
);
2635 fd_ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
2637 list_del_init(&hostrcb
->queue
);
2638 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
2641 ipr_handle_log_data(ioa_cfg
, hostrcb
);
2642 if (fd_ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
)
2643 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
2644 } else if (ioasc
!= IPR_IOASC_IOA_WAS_RESET
&&
2645 ioasc
!= IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
) {
2646 dev_err(&ioa_cfg
->pdev
->dev
,
2647 "Host RCB failed with IOASC: 0x%08X\n", ioasc
);
2650 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_report_q
);
2651 schedule_work(&ioa_cfg
->work_q
);
2652 hostrcb
= ipr_get_free_hostrcb(ioa_cfg
);
2654 ipr_send_hcam(ioa_cfg
, IPR_HCAM_CDB_OP_CODE_LOG_DATA
, hostrcb
);
2658 * ipr_timeout - An internally generated op has timed out.
2659 * @ipr_cmd: ipr command struct
2661 * This function blocks host requests and initiates an
2667 static void ipr_timeout(struct timer_list
*t
)
2669 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
2670 unsigned long lock_flags
= 0;
2671 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2674 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2676 ioa_cfg
->errors_logged
++;
2677 dev_err(&ioa_cfg
->pdev
->dev
,
2678 "Adapter being reset due to command timeout.\n");
2680 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2681 ioa_cfg
->sdt_state
= GET_DUMP
;
2683 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
)
2684 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2686 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2691 * ipr_oper_timeout - Adapter timed out transitioning to operational
2692 * @ipr_cmd: ipr command struct
2694 * This function blocks host requests and initiates an
2700 static void ipr_oper_timeout(struct timer_list
*t
)
2702 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
2703 unsigned long lock_flags
= 0;
2704 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
2707 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2709 ioa_cfg
->errors_logged
++;
2710 dev_err(&ioa_cfg
->pdev
->dev
,
2711 "Adapter timed out transitioning to operational.\n");
2713 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
2714 ioa_cfg
->sdt_state
= GET_DUMP
;
2716 if (!ioa_cfg
->in_reset_reload
|| ioa_cfg
->reset_cmd
== ipr_cmd
) {
2718 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
2719 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
2722 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
2727 * ipr_find_ses_entry - Find matching SES in SES table
2728 * @res: resource entry struct of SES
2731 * pointer to SES table entry / NULL on failure
2733 static const struct ipr_ses_table_entry
*
2734 ipr_find_ses_entry(struct ipr_resource_entry
*res
)
2737 struct ipr_std_inq_vpids
*vpids
;
2738 const struct ipr_ses_table_entry
*ste
= ipr_ses_table
;
2740 for (i
= 0; i
< ARRAY_SIZE(ipr_ses_table
); i
++, ste
++) {
2741 for (j
= 0, matches
= 0; j
< IPR_PROD_ID_LEN
; j
++) {
2742 if (ste
->compare_product_id_byte
[j
] == 'X') {
2743 vpids
= &res
->std_inq_data
.vpids
;
2744 if (vpids
->product_id
[j
] == ste
->product_id
[j
])
2752 if (matches
== IPR_PROD_ID_LEN
)
2760 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2761 * @ioa_cfg: ioa config struct
2763 * @bus_width: bus width
2766 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2767 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2768 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2769 * max 160MHz = max 320MB/sec).
2771 static u32
ipr_get_max_scsi_speed(struct ipr_ioa_cfg
*ioa_cfg
, u8 bus
, u8 bus_width
)
2773 struct ipr_resource_entry
*res
;
2774 const struct ipr_ses_table_entry
*ste
;
2775 u32 max_xfer_rate
= IPR_MAX_SCSI_RATE(bus_width
);
2777 /* Loop through each config table entry in the config table buffer */
2778 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
2779 if (!(IPR_IS_SES_DEVICE(res
->std_inq_data
)))
2782 if (bus
!= res
->bus
)
2785 if (!(ste
= ipr_find_ses_entry(res
)))
2788 max_xfer_rate
= (ste
->max_bus_speed_limit
* 10) / (bus_width
/ 8);
2791 return max_xfer_rate
;
2795 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2796 * @ioa_cfg: ioa config struct
2797 * @max_delay: max delay in micro-seconds to wait
2799 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2802 * 0 on success / other on failure
2804 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg
*ioa_cfg
, int max_delay
)
2806 volatile u32 pcii_reg
;
2809 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2810 while (delay
< max_delay
) {
2811 pcii_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
2813 if (pcii_reg
& IPR_PCII_IO_DEBUG_ACKNOWLEDGE
)
2816 /* udelay cannot be used if delay is more than a few milliseconds */
2817 if ((delay
/ 1000) > MAX_UDELAY_MS
)
2818 mdelay(delay
/ 1000);
2828 * ipr_get_sis64_dump_data_section - Dump IOA memory
2829 * @ioa_cfg: ioa config struct
2830 * @start_addr: adapter address to dump
2831 * @dest: destination kernel buffer
2832 * @length_in_words: length to dump in 4 byte words
2837 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2839 __be32
*dest
, u32 length_in_words
)
2843 for (i
= 0; i
< length_in_words
; i
++) {
2844 writel(start_addr
+(i
*4), ioa_cfg
->regs
.dump_addr_reg
);
2845 *dest
= cpu_to_be32(readl(ioa_cfg
->regs
.dump_data_reg
));
2853 * ipr_get_ldump_data_section - Dump IOA memory
2854 * @ioa_cfg: ioa config struct
2855 * @start_addr: adapter address to dump
2856 * @dest: destination kernel buffer
2857 * @length_in_words: length to dump in 4 byte words
2860 * 0 on success / -EIO on failure
2862 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg
*ioa_cfg
,
2864 __be32
*dest
, u32 length_in_words
)
2866 volatile u32 temp_pcii_reg
;
2870 return ipr_get_sis64_dump_data_section(ioa_cfg
, start_addr
,
2871 dest
, length_in_words
);
2873 /* Write IOA interrupt reg starting LDUMP state */
2874 writel((IPR_UPROCI_RESET_ALERT
| IPR_UPROCI_IO_DEBUG_ALERT
),
2875 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2877 /* Wait for IO debug acknowledge */
2878 if (ipr_wait_iodbg_ack(ioa_cfg
,
2879 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC
)) {
2880 dev_err(&ioa_cfg
->pdev
->dev
,
2881 "IOA dump long data transfer timeout\n");
2885 /* Signal LDUMP interlocked - clear IO debug ack */
2886 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2887 ioa_cfg
->regs
.clr_interrupt_reg
);
2889 /* Write Mailbox with starting address */
2890 writel(start_addr
, ioa_cfg
->ioa_mailbox
);
2892 /* Signal address valid - clear IOA Reset alert */
2893 writel(IPR_UPROCI_RESET_ALERT
,
2894 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2896 for (i
= 0; i
< length_in_words
; i
++) {
2897 /* Wait for IO debug acknowledge */
2898 if (ipr_wait_iodbg_ack(ioa_cfg
,
2899 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
)) {
2900 dev_err(&ioa_cfg
->pdev
->dev
,
2901 "IOA dump short data transfer timeout\n");
2905 /* Read data from mailbox and increment destination pointer */
2906 *dest
= cpu_to_be32(readl(ioa_cfg
->ioa_mailbox
));
2909 /* For all but the last word of data, signal data received */
2910 if (i
< (length_in_words
- 1)) {
2911 /* Signal dump data received - Clear IO debug Ack */
2912 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2913 ioa_cfg
->regs
.clr_interrupt_reg
);
2917 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2918 writel(IPR_UPROCI_RESET_ALERT
,
2919 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
2921 writel(IPR_UPROCI_IO_DEBUG_ALERT
,
2922 ioa_cfg
->regs
.clr_uproc_interrupt_reg32
);
2924 /* Signal dump data received - Clear IO debug Ack */
2925 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
,
2926 ioa_cfg
->regs
.clr_interrupt_reg
);
2928 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2929 while (delay
< IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC
) {
2931 readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
2933 if (!(temp_pcii_reg
& IPR_UPROCI_RESET_ALERT
))
2943 #ifdef CONFIG_SCSI_IPR_DUMP
2945 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2946 * @ioa_cfg: ioa config struct
2947 * @pci_address: adapter address
2948 * @length: length of data to copy
2950 * Copy data from PCI adapter to kernel buffer.
2951 * Note: length MUST be a 4 byte multiple
2953 * 0 on success / other on failure
2955 static int ipr_sdt_copy(struct ipr_ioa_cfg
*ioa_cfg
,
2956 unsigned long pci_address
, u32 length
)
2958 int bytes_copied
= 0;
2959 int cur_len
, rc
, rem_len
, rem_page_len
, max_dump_size
;
2961 unsigned long lock_flags
= 0;
2962 struct ipr_ioa_dump
*ioa_dump
= &ioa_cfg
->dump
->ioa_dump
;
2965 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
2967 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
2969 while (bytes_copied
< length
&&
2970 (ioa_dump
->hdr
.len
+ bytes_copied
) < max_dump_size
) {
2971 if (ioa_dump
->page_offset
>= PAGE_SIZE
||
2972 ioa_dump
->page_offset
== 0) {
2973 page
= (__be32
*)__get_free_page(GFP_ATOMIC
);
2977 return bytes_copied
;
2980 ioa_dump
->page_offset
= 0;
2981 ioa_dump
->ioa_data
[ioa_dump
->next_page_index
] = page
;
2982 ioa_dump
->next_page_index
++;
2984 page
= ioa_dump
->ioa_data
[ioa_dump
->next_page_index
- 1];
2986 rem_len
= length
- bytes_copied
;
2987 rem_page_len
= PAGE_SIZE
- ioa_dump
->page_offset
;
2988 cur_len
= min(rem_len
, rem_page_len
);
2990 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
2991 if (ioa_cfg
->sdt_state
== ABORT_DUMP
) {
2994 rc
= ipr_get_ldump_data_section(ioa_cfg
,
2995 pci_address
+ bytes_copied
,
2996 &page
[ioa_dump
->page_offset
/ 4],
2997 (cur_len
/ sizeof(u32
)));
2999 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3002 ioa_dump
->page_offset
+= cur_len
;
3003 bytes_copied
+= cur_len
;
3011 return bytes_copied
;
3015 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3016 * @hdr: dump entry header struct
3021 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header
*hdr
)
3023 hdr
->eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3025 hdr
->offset
= sizeof(*hdr
);
3026 hdr
->status
= IPR_DUMP_STATUS_SUCCESS
;
3030 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3031 * @ioa_cfg: ioa config struct
3032 * @driver_dump: driver dump struct
3037 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg
*ioa_cfg
,
3038 struct ipr_driver_dump
*driver_dump
)
3040 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3042 ipr_init_dump_entry_hdr(&driver_dump
->ioa_type_entry
.hdr
);
3043 driver_dump
->ioa_type_entry
.hdr
.len
=
3044 sizeof(struct ipr_dump_ioa_type_entry
) -
3045 sizeof(struct ipr_dump_entry_header
);
3046 driver_dump
->ioa_type_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3047 driver_dump
->ioa_type_entry
.hdr
.id
= IPR_DUMP_DRIVER_TYPE_ID
;
3048 driver_dump
->ioa_type_entry
.type
= ioa_cfg
->type
;
3049 driver_dump
->ioa_type_entry
.fw_version
= (ucode_vpd
->major_release
<< 24) |
3050 (ucode_vpd
->card_type
<< 16) | (ucode_vpd
->minor_release
[0] << 8) |
3051 ucode_vpd
->minor_release
[1];
3052 driver_dump
->hdr
.num_entries
++;
3056 * ipr_dump_version_data - Fill in the driver version in the dump.
3057 * @ioa_cfg: ioa config struct
3058 * @driver_dump: driver dump struct
3063 static void ipr_dump_version_data(struct ipr_ioa_cfg
*ioa_cfg
,
3064 struct ipr_driver_dump
*driver_dump
)
3066 ipr_init_dump_entry_hdr(&driver_dump
->version_entry
.hdr
);
3067 driver_dump
->version_entry
.hdr
.len
=
3068 sizeof(struct ipr_dump_version_entry
) -
3069 sizeof(struct ipr_dump_entry_header
);
3070 driver_dump
->version_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3071 driver_dump
->version_entry
.hdr
.id
= IPR_DUMP_DRIVER_VERSION_ID
;
3072 strcpy(driver_dump
->version_entry
.version
, IPR_DRIVER_VERSION
);
3073 driver_dump
->hdr
.num_entries
++;
3077 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3078 * @ioa_cfg: ioa config struct
3079 * @driver_dump: driver dump struct
3084 static void ipr_dump_trace_data(struct ipr_ioa_cfg
*ioa_cfg
,
3085 struct ipr_driver_dump
*driver_dump
)
3087 ipr_init_dump_entry_hdr(&driver_dump
->trace_entry
.hdr
);
3088 driver_dump
->trace_entry
.hdr
.len
=
3089 sizeof(struct ipr_dump_trace_entry
) -
3090 sizeof(struct ipr_dump_entry_header
);
3091 driver_dump
->trace_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3092 driver_dump
->trace_entry
.hdr
.id
= IPR_DUMP_TRACE_ID
;
3093 memcpy(driver_dump
->trace_entry
.trace
, ioa_cfg
->trace
, IPR_TRACE_SIZE
);
3094 driver_dump
->hdr
.num_entries
++;
3098 * ipr_dump_location_data - Fill in the IOA location in the dump.
3099 * @ioa_cfg: ioa config struct
3100 * @driver_dump: driver dump struct
3105 static void ipr_dump_location_data(struct ipr_ioa_cfg
*ioa_cfg
,
3106 struct ipr_driver_dump
*driver_dump
)
3108 ipr_init_dump_entry_hdr(&driver_dump
->location_entry
.hdr
);
3109 driver_dump
->location_entry
.hdr
.len
=
3110 sizeof(struct ipr_dump_location_entry
) -
3111 sizeof(struct ipr_dump_entry_header
);
3112 driver_dump
->location_entry
.hdr
.data_type
= IPR_DUMP_DATA_TYPE_ASCII
;
3113 driver_dump
->location_entry
.hdr
.id
= IPR_DUMP_LOCATION_ID
;
3114 strcpy(driver_dump
->location_entry
.location
, dev_name(&ioa_cfg
->pdev
->dev
));
3115 driver_dump
->hdr
.num_entries
++;
3119 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3120 * @ioa_cfg: ioa config struct
3121 * @dump: dump struct
3126 static void ipr_get_ioa_dump(struct ipr_ioa_cfg
*ioa_cfg
, struct ipr_dump
*dump
)
3128 unsigned long start_addr
, sdt_word
;
3129 unsigned long lock_flags
= 0;
3130 struct ipr_driver_dump
*driver_dump
= &dump
->driver_dump
;
3131 struct ipr_ioa_dump
*ioa_dump
= &dump
->ioa_dump
;
3132 u32 num_entries
, max_num_entries
, start_off
, end_off
;
3133 u32 max_dump_size
, bytes_to_copy
, bytes_copied
, rc
;
3134 struct ipr_sdt
*sdt
;
3140 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3142 if (ioa_cfg
->sdt_state
!= READ_DUMP
) {
3143 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3147 if (ioa_cfg
->sis64
) {
3148 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3149 ssleep(IPR_DUMP_DELAY_SECONDS
);
3150 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3153 start_addr
= readl(ioa_cfg
->ioa_mailbox
);
3155 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(start_addr
)) {
3156 dev_err(&ioa_cfg
->pdev
->dev
,
3157 "Invalid dump table format: %lx\n", start_addr
);
3158 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3162 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA initiated\n");
3164 driver_dump
->hdr
.eye_catcher
= IPR_DUMP_EYE_CATCHER
;
3166 /* Initialize the overall dump header */
3167 driver_dump
->hdr
.len
= sizeof(struct ipr_driver_dump
);
3168 driver_dump
->hdr
.num_entries
= 1;
3169 driver_dump
->hdr
.first_entry_offset
= sizeof(struct ipr_dump_header
);
3170 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_SUCCESS
;
3171 driver_dump
->hdr
.os
= IPR_DUMP_OS_LINUX
;
3172 driver_dump
->hdr
.driver_name
= IPR_DUMP_DRIVER_NAME
;
3174 ipr_dump_version_data(ioa_cfg
, driver_dump
);
3175 ipr_dump_location_data(ioa_cfg
, driver_dump
);
3176 ipr_dump_ioa_type_data(ioa_cfg
, driver_dump
);
3177 ipr_dump_trace_data(ioa_cfg
, driver_dump
);
3179 /* Update dump_header */
3180 driver_dump
->hdr
.len
+= sizeof(struct ipr_dump_entry_header
);
3182 /* IOA Dump entry */
3183 ipr_init_dump_entry_hdr(&ioa_dump
->hdr
);
3184 ioa_dump
->hdr
.len
= 0;
3185 ioa_dump
->hdr
.data_type
= IPR_DUMP_DATA_TYPE_BINARY
;
3186 ioa_dump
->hdr
.id
= IPR_DUMP_IOA_DUMP_ID
;
3188 /* First entries in sdt are actually a list of dump addresses and
3189 lengths to gather the real dump data. sdt represents the pointer
3190 to the ioa generated dump table. Dump data will be extracted based
3191 on entries in this table */
3192 sdt
= &ioa_dump
->sdt
;
3194 if (ioa_cfg
->sis64
) {
3195 max_num_entries
= IPR_FMT3_NUM_SDT_ENTRIES
;
3196 max_dump_size
= IPR_FMT3_MAX_IOA_DUMP_SIZE
;
3198 max_num_entries
= IPR_FMT2_NUM_SDT_ENTRIES
;
3199 max_dump_size
= IPR_FMT2_MAX_IOA_DUMP_SIZE
;
3202 bytes_to_copy
= offsetof(struct ipr_sdt
, entry
) +
3203 (max_num_entries
* sizeof(struct ipr_sdt_entry
));
3204 rc
= ipr_get_ldump_data_section(ioa_cfg
, start_addr
, (__be32
*)sdt
,
3205 bytes_to_copy
/ sizeof(__be32
));
3207 /* Smart Dump table is ready to use and the first entry is valid */
3208 if (rc
|| ((be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
3209 (be32_to_cpu(sdt
->hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
3210 dev_err(&ioa_cfg
->pdev
->dev
,
3211 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3212 rc
, be32_to_cpu(sdt
->hdr
.state
));
3213 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_FAILED
;
3214 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3215 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3219 num_entries
= be32_to_cpu(sdt
->hdr
.num_entries_used
);
3221 if (num_entries
> max_num_entries
)
3222 num_entries
= max_num_entries
;
3224 /* Update dump length to the actual data to be copied */
3225 dump
->driver_dump
.hdr
.len
+= sizeof(struct ipr_sdt_header
);
3227 dump
->driver_dump
.hdr
.len
+= num_entries
* sizeof(struct ipr_sdt_entry
);
3229 dump
->driver_dump
.hdr
.len
+= max_num_entries
* sizeof(struct ipr_sdt_entry
);
3231 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3233 for (i
= 0; i
< num_entries
; i
++) {
3234 if (ioa_dump
->hdr
.len
> max_dump_size
) {
3235 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3239 if (sdt
->entry
[i
].flags
& IPR_SDT_VALID_ENTRY
) {
3240 sdt_word
= be32_to_cpu(sdt
->entry
[i
].start_token
);
3242 bytes_to_copy
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3244 start_off
= sdt_word
& IPR_FMT2_MBX_ADDR_MASK
;
3245 end_off
= be32_to_cpu(sdt
->entry
[i
].end_token
);
3247 if (ipr_sdt_is_fmt2(sdt_word
) && sdt_word
)
3248 bytes_to_copy
= end_off
- start_off
;
3253 if (bytes_to_copy
> max_dump_size
) {
3254 sdt
->entry
[i
].flags
&= ~IPR_SDT_VALID_ENTRY
;
3258 /* Copy data from adapter to driver buffers */
3259 bytes_copied
= ipr_sdt_copy(ioa_cfg
, sdt_word
,
3262 ioa_dump
->hdr
.len
+= bytes_copied
;
3264 if (bytes_copied
!= bytes_to_copy
) {
3265 driver_dump
->hdr
.status
= IPR_DUMP_STATUS_QUAL_SUCCESS
;
3272 dev_err(&ioa_cfg
->pdev
->dev
, "Dump of IOA completed.\n");
3274 /* Update dump_header */
3275 driver_dump
->hdr
.len
+= ioa_dump
->hdr
.len
;
3277 ioa_cfg
->sdt_state
= DUMP_OBTAINED
;
3282 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3286 * ipr_release_dump - Free adapter dump memory
3287 * @kref: kref struct
3292 static void ipr_release_dump(struct kref
*kref
)
3294 struct ipr_dump
*dump
= container_of(kref
, struct ipr_dump
, kref
);
3295 struct ipr_ioa_cfg
*ioa_cfg
= dump
->ioa_cfg
;
3296 unsigned long lock_flags
= 0;
3300 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3301 ioa_cfg
->dump
= NULL
;
3302 ioa_cfg
->sdt_state
= INACTIVE
;
3303 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3305 for (i
= 0; i
< dump
->ioa_dump
.next_page_index
; i
++)
3306 free_page((unsigned long) dump
->ioa_dump
.ioa_data
[i
]);
3308 vfree(dump
->ioa_dump
.ioa_data
);
3314 * ipr_worker_thread - Worker thread
3315 * @work: ioa config struct
3317 * Called at task level from a work thread. This function takes care
3318 * of adding and removing device from the mid-layer as configuration
3319 * changes are detected by the adapter.
3324 static void ipr_worker_thread(struct work_struct
*work
)
3326 unsigned long lock_flags
;
3327 struct ipr_resource_entry
*res
;
3328 struct scsi_device
*sdev
;
3329 struct ipr_dump
*dump
;
3330 struct ipr_ioa_cfg
*ioa_cfg
=
3331 container_of(work
, struct ipr_ioa_cfg
, work_q
);
3332 u8 bus
, target
, lun
;
3336 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3338 if (ioa_cfg
->sdt_state
== READ_DUMP
) {
3339 dump
= ioa_cfg
->dump
;
3341 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3344 kref_get(&dump
->kref
);
3345 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3346 ipr_get_ioa_dump(ioa_cfg
, dump
);
3347 kref_put(&dump
->kref
, ipr_release_dump
);
3349 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3350 if (ioa_cfg
->sdt_state
== DUMP_OBTAINED
&& !ioa_cfg
->dump_timeout
)
3351 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3352 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3356 if (ioa_cfg
->scsi_unblock
) {
3357 ioa_cfg
->scsi_unblock
= 0;
3358 ioa_cfg
->scsi_blocked
= 0;
3359 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3360 scsi_unblock_requests(ioa_cfg
->host
);
3361 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3362 if (ioa_cfg
->scsi_blocked
)
3363 scsi_block_requests(ioa_cfg
->host
);
3366 if (!ioa_cfg
->scan_enabled
) {
3367 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3374 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
) {
3375 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3379 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3380 if (res
->del_from_ml
&& res
->sdev
) {
3383 if (!scsi_device_get(sdev
)) {
3384 if (!res
->add_to_ml
)
3385 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
3387 res
->del_from_ml
= 0;
3388 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3389 scsi_remove_device(sdev
);
3390 scsi_device_put(sdev
);
3391 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3398 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
3399 if (res
->add_to_ml
) {
3401 target
= res
->target
;
3404 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3405 scsi_add_device(ioa_cfg
->host
, bus
, target
, lun
);
3406 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3411 ioa_cfg
->scan_done
= 1;
3412 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3413 kobject_uevent(&ioa_cfg
->host
->shost_dev
.kobj
, KOBJ_CHANGE
);
3417 #ifdef CONFIG_SCSI_IPR_TRACE
3419 * ipr_read_trace - Dump the adapter trace
3420 * @filp: open sysfs file
3421 * @kobj: kobject struct
3422 * @bin_attr: bin_attribute struct
3425 * @count: buffer size
3428 * number of bytes printed to buffer
3430 static ssize_t
ipr_read_trace(struct file
*filp
, struct kobject
*kobj
,
3431 struct bin_attribute
*bin_attr
,
3432 char *buf
, loff_t off
, size_t count
)
3434 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
3435 struct Scsi_Host
*shost
= class_to_shost(dev
);
3436 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3437 unsigned long lock_flags
= 0;
3440 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3441 ret
= memory_read_from_buffer(buf
, count
, &off
, ioa_cfg
->trace
,
3443 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3448 static struct bin_attribute ipr_trace_attr
= {
3454 .read
= ipr_read_trace
,
3459 * ipr_show_fw_version - Show the firmware version
3460 * @dev: class device struct
3464 * number of bytes printed to buffer
3466 static ssize_t
ipr_show_fw_version(struct device
*dev
,
3467 struct device_attribute
*attr
, char *buf
)
3469 struct Scsi_Host
*shost
= class_to_shost(dev
);
3470 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3471 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
3472 unsigned long lock_flags
= 0;
3475 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3476 len
= snprintf(buf
, PAGE_SIZE
, "%02X%02X%02X%02X\n",
3477 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
3478 ucode_vpd
->minor_release
[0],
3479 ucode_vpd
->minor_release
[1]);
3480 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3484 static struct device_attribute ipr_fw_version_attr
= {
3486 .name
= "fw_version",
3489 .show
= ipr_show_fw_version
,
3493 * ipr_show_log_level - Show the adapter's error logging level
3494 * @dev: class device struct
3498 * number of bytes printed to buffer
3500 static ssize_t
ipr_show_log_level(struct device
*dev
,
3501 struct device_attribute
*attr
, char *buf
)
3503 struct Scsi_Host
*shost
= class_to_shost(dev
);
3504 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3505 unsigned long lock_flags
= 0;
3508 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3509 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->log_level
);
3510 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3515 * ipr_store_log_level - Change the adapter's error logging level
3516 * @dev: class device struct
3520 * number of bytes printed to buffer
3522 static ssize_t
ipr_store_log_level(struct device
*dev
,
3523 struct device_attribute
*attr
,
3524 const char *buf
, size_t count
)
3526 struct Scsi_Host
*shost
= class_to_shost(dev
);
3527 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3528 unsigned long lock_flags
= 0;
3530 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3531 ioa_cfg
->log_level
= simple_strtoul(buf
, NULL
, 10);
3532 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3536 static struct device_attribute ipr_log_level_attr
= {
3538 .name
= "log_level",
3539 .mode
= S_IRUGO
| S_IWUSR
,
3541 .show
= ipr_show_log_level
,
3542 .store
= ipr_store_log_level
3546 * ipr_store_diagnostics - IOA Diagnostics interface
3547 * @dev: device struct
3549 * @count: buffer size
3551 * This function will reset the adapter and wait a reasonable
3552 * amount of time for any errors that the adapter might log.
3555 * count on success / other on failure
3557 static ssize_t
ipr_store_diagnostics(struct device
*dev
,
3558 struct device_attribute
*attr
,
3559 const char *buf
, size_t count
)
3561 struct Scsi_Host
*shost
= class_to_shost(dev
);
3562 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3563 unsigned long lock_flags
= 0;
3566 if (!capable(CAP_SYS_ADMIN
))
3569 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3570 while (ioa_cfg
->in_reset_reload
) {
3571 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3572 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3573 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3576 ioa_cfg
->errors_logged
= 0;
3577 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3579 if (ioa_cfg
->in_reset_reload
) {
3580 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3581 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3583 /* Wait for a second for any errors to be logged */
3586 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3590 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3591 if (ioa_cfg
->in_reset_reload
|| ioa_cfg
->errors_logged
)
3593 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3598 static struct device_attribute ipr_diagnostics_attr
= {
3600 .name
= "run_diagnostics",
3603 .store
= ipr_store_diagnostics
3607 * ipr_show_adapter_state - Show the adapter's state
3608 * @class_dev: device struct
3612 * number of bytes printed to buffer
3614 static ssize_t
ipr_show_adapter_state(struct device
*dev
,
3615 struct device_attribute
*attr
, char *buf
)
3617 struct Scsi_Host
*shost
= class_to_shost(dev
);
3618 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3619 unsigned long lock_flags
= 0;
3622 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3623 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
3624 len
= snprintf(buf
, PAGE_SIZE
, "offline\n");
3626 len
= snprintf(buf
, PAGE_SIZE
, "online\n");
3627 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3632 * ipr_store_adapter_state - Change adapter state
3633 * @dev: device struct
3635 * @count: buffer size
3637 * This function will change the adapter's state.
3640 * count on success / other on failure
3642 static ssize_t
ipr_store_adapter_state(struct device
*dev
,
3643 struct device_attribute
*attr
,
3644 const char *buf
, size_t count
)
3646 struct Scsi_Host
*shost
= class_to_shost(dev
);
3647 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3648 unsigned long lock_flags
;
3649 int result
= count
, i
;
3651 if (!capable(CAP_SYS_ADMIN
))
3654 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3655 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&&
3656 !strncmp(buf
, "online", 6)) {
3657 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
3658 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
3659 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 0;
3660 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
3663 ioa_cfg
->reset_retries
= 0;
3664 ioa_cfg
->in_ioa_bringdown
= 0;
3665 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
3667 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3668 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3673 static struct device_attribute ipr_ioa_state_attr
= {
3675 .name
= "online_state",
3676 .mode
= S_IRUGO
| S_IWUSR
,
3678 .show
= ipr_show_adapter_state
,
3679 .store
= ipr_store_adapter_state
3683 * ipr_store_reset_adapter - Reset the adapter
3684 * @dev: device struct
3686 * @count: buffer size
3688 * This function will reset the adapter.
3691 * count on success / other on failure
3693 static ssize_t
ipr_store_reset_adapter(struct device
*dev
,
3694 struct device_attribute
*attr
,
3695 const char *buf
, size_t count
)
3697 struct Scsi_Host
*shost
= class_to_shost(dev
);
3698 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3699 unsigned long lock_flags
;
3702 if (!capable(CAP_SYS_ADMIN
))
3705 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3706 if (!ioa_cfg
->in_reset_reload
)
3707 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
3708 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3709 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3714 static struct device_attribute ipr_ioa_reset_attr
= {
3716 .name
= "reset_host",
3719 .store
= ipr_store_reset_adapter
3722 static int ipr_iopoll(struct irq_poll
*iop
, int budget
);
3724 * ipr_show_iopoll_weight - Show ipr polling mode
3725 * @dev: class device struct
3729 * number of bytes printed to buffer
3731 static ssize_t
ipr_show_iopoll_weight(struct device
*dev
,
3732 struct device_attribute
*attr
, char *buf
)
3734 struct Scsi_Host
*shost
= class_to_shost(dev
);
3735 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3736 unsigned long lock_flags
= 0;
3739 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3740 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->iopoll_weight
);
3741 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3747 * ipr_store_iopoll_weight - Change the adapter's polling mode
3748 * @dev: class device struct
3752 * number of bytes printed to buffer
3754 static ssize_t
ipr_store_iopoll_weight(struct device
*dev
,
3755 struct device_attribute
*attr
,
3756 const char *buf
, size_t count
)
3758 struct Scsi_Host
*shost
= class_to_shost(dev
);
3759 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
3760 unsigned long user_iopoll_weight
;
3761 unsigned long lock_flags
= 0;
3764 if (!ioa_cfg
->sis64
) {
3765 dev_info(&ioa_cfg
->pdev
->dev
, "irq_poll not supported on this adapter\n");
3768 if (kstrtoul(buf
, 10, &user_iopoll_weight
))
3771 if (user_iopoll_weight
> 256) {
3772 dev_info(&ioa_cfg
->pdev
->dev
, "Invalid irq_poll weight. It must be less than 256\n");
3776 if (user_iopoll_weight
== ioa_cfg
->iopoll_weight
) {
3777 dev_info(&ioa_cfg
->pdev
->dev
, "Current irq_poll weight has the same weight\n");
3781 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3782 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
3783 irq_poll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
3786 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
3787 ioa_cfg
->iopoll_weight
= user_iopoll_weight
;
3788 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
3789 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
3790 irq_poll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
3791 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
3794 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
3799 static struct device_attribute ipr_iopoll_weight_attr
= {
3801 .name
= "iopoll_weight",
3802 .mode
= S_IRUGO
| S_IWUSR
,
3804 .show
= ipr_show_iopoll_weight
,
3805 .store
= ipr_store_iopoll_weight
3809 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3810 * @buf_len: buffer length
3812 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3813 * list to use for microcode download
3816 * pointer to sglist / NULL on failure
3818 static struct ipr_sglist
*ipr_alloc_ucode_buffer(int buf_len
)
3821 struct ipr_sglist
*sglist
;
3823 /* Get the minimum size per scatter/gather element */
3824 sg_size
= buf_len
/ (IPR_MAX_SGLIST
- 1);
3826 /* Get the actual size per element */
3827 order
= get_order(sg_size
);
3829 /* Allocate a scatter/gather list for the DMA */
3830 sglist
= kzalloc(sizeof(struct ipr_sglist
), GFP_KERNEL
);
3831 if (sglist
== NULL
) {
3835 sglist
->order
= order
;
3836 sglist
->scatterlist
= sgl_alloc_order(buf_len
, order
, false, GFP_KERNEL
,
3838 if (!sglist
->scatterlist
) {
3847 * ipr_free_ucode_buffer - Frees a microcode download buffer
3848 * @p_dnld: scatter/gather list pointer
3850 * Free a DMA'able ucode download buffer previously allocated with
3851 * ipr_alloc_ucode_buffer
3856 static void ipr_free_ucode_buffer(struct ipr_sglist
*sglist
)
3858 sgl_free_order(sglist
->scatterlist
, sglist
->order
);
3863 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3864 * @sglist: scatter/gather list pointer
3865 * @buffer: buffer pointer
3866 * @len: buffer length
3868 * Copy a microcode image from a user buffer into a buffer allocated by
3869 * ipr_alloc_ucode_buffer
3872 * 0 on success / other on failure
3874 static int ipr_copy_ucode_buffer(struct ipr_sglist
*sglist
,
3875 u8
*buffer
, u32 len
)
3877 int bsize_elem
, i
, result
= 0;
3878 struct scatterlist
*scatterlist
;
3881 /* Determine the actual number of bytes per element */
3882 bsize_elem
= PAGE_SIZE
* (1 << sglist
->order
);
3884 scatterlist
= sglist
->scatterlist
;
3886 for (i
= 0; i
< (len
/ bsize_elem
); i
++, buffer
+= bsize_elem
) {
3887 struct page
*page
= sg_page(&scatterlist
[i
]);
3890 memcpy(kaddr
, buffer
, bsize_elem
);
3893 scatterlist
[i
].length
= bsize_elem
;
3901 if (len
% bsize_elem
) {
3902 struct page
*page
= sg_page(&scatterlist
[i
]);
3905 memcpy(kaddr
, buffer
, len
% bsize_elem
);
3908 scatterlist
[i
].length
= len
% bsize_elem
;
3911 sglist
->buffer_len
= len
;
3916 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3917 * @ipr_cmd: ipr command struct
3918 * @sglist: scatter/gather list
3920 * Builds a microcode download IOA data list (IOADL).
3923 static void ipr_build_ucode_ioadl64(struct ipr_cmnd
*ipr_cmd
,
3924 struct ipr_sglist
*sglist
)
3926 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3927 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
3928 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3931 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3932 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3933 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3936 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
3937 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3938 ioadl64
[i
].flags
= cpu_to_be32(IPR_IOADL_FLAGS_WRITE
);
3939 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(&scatterlist
[i
]));
3940 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(&scatterlist
[i
]));
3943 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3947 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3948 * @ipr_cmd: ipr command struct
3949 * @sglist: scatter/gather list
3951 * Builds a microcode download IOA data list (IOADL).
3954 static void ipr_build_ucode_ioadl(struct ipr_cmnd
*ipr_cmd
,
3955 struct ipr_sglist
*sglist
)
3957 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
3958 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
3959 struct scatterlist
*scatterlist
= sglist
->scatterlist
;
3962 ipr_cmd
->dma_use_sg
= sglist
->num_dma_sg
;
3963 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
3964 ioarcb
->data_transfer_length
= cpu_to_be32(sglist
->buffer_len
);
3967 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
3969 for (i
= 0; i
< ipr_cmd
->dma_use_sg
; i
++) {
3970 ioadl
[i
].flags_and_data_len
=
3971 cpu_to_be32(IPR_IOADL_FLAGS_WRITE
| sg_dma_len(&scatterlist
[i
]));
3973 cpu_to_be32(sg_dma_address(&scatterlist
[i
]));
3976 ioadl
[i
-1].flags_and_data_len
|=
3977 cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
3981 * ipr_update_ioa_ucode - Update IOA's microcode
3982 * @ioa_cfg: ioa config struct
3983 * @sglist: scatter/gather list
3985 * Initiate an adapter reset to update the IOA's microcode
3988 * 0 on success / -EIO on failure
3990 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg
*ioa_cfg
,
3991 struct ipr_sglist
*sglist
)
3993 unsigned long lock_flags
;
3995 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
3996 while (ioa_cfg
->in_reset_reload
) {
3997 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
3998 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
3999 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4002 if (ioa_cfg
->ucode_sglist
) {
4003 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4004 dev_err(&ioa_cfg
->pdev
->dev
,
4005 "Microcode download already in progress\n");
4009 sglist
->num_dma_sg
= dma_map_sg(&ioa_cfg
->pdev
->dev
,
4010 sglist
->scatterlist
, sglist
->num_sg
,
4013 if (!sglist
->num_dma_sg
) {
4014 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4015 dev_err(&ioa_cfg
->pdev
->dev
,
4016 "Failed to map microcode download buffer!\n");
4020 ioa_cfg
->ucode_sglist
= sglist
;
4021 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
4022 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4023 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
4025 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4026 ioa_cfg
->ucode_sglist
= NULL
;
4027 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4032 * ipr_store_update_fw - Update the firmware on the adapter
4033 * @class_dev: device struct
4035 * @count: buffer size
4037 * This function will update the firmware on the adapter.
4040 * count on success / other on failure
4042 static ssize_t
ipr_store_update_fw(struct device
*dev
,
4043 struct device_attribute
*attr
,
4044 const char *buf
, size_t count
)
4046 struct Scsi_Host
*shost
= class_to_shost(dev
);
4047 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4048 struct ipr_ucode_image_header
*image_hdr
;
4049 const struct firmware
*fw_entry
;
4050 struct ipr_sglist
*sglist
;
4054 int result
, dnld_size
;
4056 if (!capable(CAP_SYS_ADMIN
))
4059 snprintf(fname
, sizeof(fname
), "%s", buf
);
4061 endline
= strchr(fname
, '\n');
4065 if (request_firmware(&fw_entry
, fname
, &ioa_cfg
->pdev
->dev
)) {
4066 dev_err(&ioa_cfg
->pdev
->dev
, "Firmware file %s not found\n", fname
);
4070 image_hdr
= (struct ipr_ucode_image_header
*)fw_entry
->data
;
4072 src
= (u8
*)image_hdr
+ be32_to_cpu(image_hdr
->header_length
);
4073 dnld_size
= fw_entry
->size
- be32_to_cpu(image_hdr
->header_length
);
4074 sglist
= ipr_alloc_ucode_buffer(dnld_size
);
4077 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode buffer allocation failed\n");
4078 release_firmware(fw_entry
);
4082 result
= ipr_copy_ucode_buffer(sglist
, src
, dnld_size
);
4085 dev_err(&ioa_cfg
->pdev
->dev
,
4086 "Microcode buffer copy to DMA buffer failed\n");
4090 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4092 result
= ipr_update_ioa_ucode(ioa_cfg
, sglist
);
4097 ipr_free_ucode_buffer(sglist
);
4098 release_firmware(fw_entry
);
4102 static struct device_attribute ipr_update_fw_attr
= {
4104 .name
= "update_fw",
4107 .store
= ipr_store_update_fw
4111 * ipr_show_fw_type - Show the adapter's firmware type.
4112 * @dev: class device struct
4116 * number of bytes printed to buffer
4118 static ssize_t
ipr_show_fw_type(struct device
*dev
,
4119 struct device_attribute
*attr
, char *buf
)
4121 struct Scsi_Host
*shost
= class_to_shost(dev
);
4122 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4123 unsigned long lock_flags
= 0;
4126 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4127 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", ioa_cfg
->sis64
);
4128 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4132 static struct device_attribute ipr_ioa_fw_type_attr
= {
4137 .show
= ipr_show_fw_type
4140 static ssize_t
ipr_read_async_err_log(struct file
*filep
, struct kobject
*kobj
,
4141 struct bin_attribute
*bin_attr
, char *buf
,
4142 loff_t off
, size_t count
)
4144 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4145 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4146 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4147 struct ipr_hostrcb
*hostrcb
;
4148 unsigned long lock_flags
= 0;
4151 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4152 hostrcb
= list_first_entry_or_null(&ioa_cfg
->hostrcb_report_q
,
4153 struct ipr_hostrcb
, queue
);
4155 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4158 ret
= memory_read_from_buffer(buf
, count
, &off
, &hostrcb
->hcam
,
4159 sizeof(hostrcb
->hcam
));
4160 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4164 static ssize_t
ipr_next_async_err_log(struct file
*filep
, struct kobject
*kobj
,
4165 struct bin_attribute
*bin_attr
, char *buf
,
4166 loff_t off
, size_t count
)
4168 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4169 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4170 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4171 struct ipr_hostrcb
*hostrcb
;
4172 unsigned long lock_flags
= 0;
4174 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4175 hostrcb
= list_first_entry_or_null(&ioa_cfg
->hostrcb_report_q
,
4176 struct ipr_hostrcb
, queue
);
4178 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4182 /* Reclaim hostrcb before exit */
4183 list_move_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
4184 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4188 static struct bin_attribute ipr_ioa_async_err_log
= {
4190 .name
= "async_err_log",
4191 .mode
= S_IRUGO
| S_IWUSR
,
4194 .read
= ipr_read_async_err_log
,
4195 .write
= ipr_next_async_err_log
4198 static struct device_attribute
*ipr_ioa_attrs
[] = {
4199 &ipr_fw_version_attr
,
4200 &ipr_log_level_attr
,
4201 &ipr_diagnostics_attr
,
4202 &ipr_ioa_state_attr
,
4203 &ipr_ioa_reset_attr
,
4204 &ipr_update_fw_attr
,
4205 &ipr_ioa_fw_type_attr
,
4206 &ipr_iopoll_weight_attr
,
4210 #ifdef CONFIG_SCSI_IPR_DUMP
4212 * ipr_read_dump - Dump the adapter
4213 * @filp: open sysfs file
4214 * @kobj: kobject struct
4215 * @bin_attr: bin_attribute struct
4218 * @count: buffer size
4221 * number of bytes printed to buffer
4223 static ssize_t
ipr_read_dump(struct file
*filp
, struct kobject
*kobj
,
4224 struct bin_attribute
*bin_attr
,
4225 char *buf
, loff_t off
, size_t count
)
4227 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4228 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4229 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4230 struct ipr_dump
*dump
;
4231 unsigned long lock_flags
= 0;
4236 if (!capable(CAP_SYS_ADMIN
))
4239 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4240 dump
= ioa_cfg
->dump
;
4242 if (ioa_cfg
->sdt_state
!= DUMP_OBTAINED
|| !dump
) {
4243 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4246 kref_get(&dump
->kref
);
4247 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4249 if (off
> dump
->driver_dump
.hdr
.len
) {
4250 kref_put(&dump
->kref
, ipr_release_dump
);
4254 if (off
+ count
> dump
->driver_dump
.hdr
.len
) {
4255 count
= dump
->driver_dump
.hdr
.len
- off
;
4259 if (count
&& off
< sizeof(dump
->driver_dump
)) {
4260 if (off
+ count
> sizeof(dump
->driver_dump
))
4261 len
= sizeof(dump
->driver_dump
) - off
;
4264 src
= (u8
*)&dump
->driver_dump
+ off
;
4265 memcpy(buf
, src
, len
);
4271 off
-= sizeof(dump
->driver_dump
);
4274 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4275 (be32_to_cpu(dump
->ioa_dump
.sdt
.hdr
.num_entries_used
) *
4276 sizeof(struct ipr_sdt_entry
));
4278 sdt_end
= offsetof(struct ipr_ioa_dump
, sdt
.entry
) +
4279 (IPR_FMT2_NUM_SDT_ENTRIES
* sizeof(struct ipr_sdt_entry
));
4281 if (count
&& off
< sdt_end
) {
4282 if (off
+ count
> sdt_end
)
4283 len
= sdt_end
- off
;
4286 src
= (u8
*)&dump
->ioa_dump
+ off
;
4287 memcpy(buf
, src
, len
);
4296 if ((off
& PAGE_MASK
) != ((off
+ count
) & PAGE_MASK
))
4297 len
= PAGE_ALIGN(off
) - off
;
4300 src
= (u8
*)dump
->ioa_dump
.ioa_data
[(off
& PAGE_MASK
) >> PAGE_SHIFT
];
4301 src
+= off
& ~PAGE_MASK
;
4302 memcpy(buf
, src
, len
);
4308 kref_put(&dump
->kref
, ipr_release_dump
);
4313 * ipr_alloc_dump - Prepare for adapter dump
4314 * @ioa_cfg: ioa config struct
4317 * 0 on success / other on failure
4319 static int ipr_alloc_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4321 struct ipr_dump
*dump
;
4323 unsigned long lock_flags
= 0;
4325 dump
= kzalloc(sizeof(struct ipr_dump
), GFP_KERNEL
);
4328 ipr_err("Dump memory allocation failed\n");
4333 ioa_data
= vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES
,
4336 ioa_data
= vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES
,
4340 ipr_err("Dump memory allocation failed\n");
4345 dump
->ioa_dump
.ioa_data
= ioa_data
;
4347 kref_init(&dump
->kref
);
4348 dump
->ioa_cfg
= ioa_cfg
;
4350 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4352 if (INACTIVE
!= ioa_cfg
->sdt_state
) {
4353 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4354 vfree(dump
->ioa_dump
.ioa_data
);
4359 ioa_cfg
->dump
= dump
;
4360 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
4361 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
&& !ioa_cfg
->dump_taken
) {
4362 ioa_cfg
->dump_taken
= 1;
4363 schedule_work(&ioa_cfg
->work_q
);
4365 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4371 * ipr_free_dump - Free adapter dump memory
4372 * @ioa_cfg: ioa config struct
4375 * 0 on success / other on failure
4377 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
)
4379 struct ipr_dump
*dump
;
4380 unsigned long lock_flags
= 0;
4384 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4385 dump
= ioa_cfg
->dump
;
4387 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4391 ioa_cfg
->dump
= NULL
;
4392 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4394 kref_put(&dump
->kref
, ipr_release_dump
);
4401 * ipr_write_dump - Setup dump state of adapter
4402 * @filp: open sysfs file
4403 * @kobj: kobject struct
4404 * @bin_attr: bin_attribute struct
4407 * @count: buffer size
4410 * number of bytes printed to buffer
4412 static ssize_t
ipr_write_dump(struct file
*filp
, struct kobject
*kobj
,
4413 struct bin_attribute
*bin_attr
,
4414 char *buf
, loff_t off
, size_t count
)
4416 struct device
*cdev
= container_of(kobj
, struct device
, kobj
);
4417 struct Scsi_Host
*shost
= class_to_shost(cdev
);
4418 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
4421 if (!capable(CAP_SYS_ADMIN
))
4425 rc
= ipr_alloc_dump(ioa_cfg
);
4426 else if (buf
[0] == '0')
4427 rc
= ipr_free_dump(ioa_cfg
);
4437 static struct bin_attribute ipr_dump_attr
= {
4440 .mode
= S_IRUSR
| S_IWUSR
,
4443 .read
= ipr_read_dump
,
4444 .write
= ipr_write_dump
4447 static int ipr_free_dump(struct ipr_ioa_cfg
*ioa_cfg
) { return 0; };
4451 * ipr_change_queue_depth - Change the device's queue depth
4452 * @sdev: scsi device struct
4453 * @qdepth: depth to set
4454 * @reason: calling context
4459 static int ipr_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
4461 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4462 struct ipr_resource_entry
*res
;
4463 unsigned long lock_flags
= 0;
4465 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4466 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4468 if (res
&& ipr_is_gata(res
) && qdepth
> IPR_MAX_CMD_PER_ATA_LUN
)
4469 qdepth
= IPR_MAX_CMD_PER_ATA_LUN
;
4470 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4472 scsi_change_queue_depth(sdev
, qdepth
);
4473 return sdev
->queue_depth
;
4477 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4478 * @dev: device struct
4479 * @attr: device attribute structure
4483 * number of bytes printed to buffer
4485 static ssize_t
ipr_show_adapter_handle(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4487 struct scsi_device
*sdev
= to_scsi_device(dev
);
4488 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4489 struct ipr_resource_entry
*res
;
4490 unsigned long lock_flags
= 0;
4491 ssize_t len
= -ENXIO
;
4493 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4494 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4496 len
= snprintf(buf
, PAGE_SIZE
, "%08X\n", res
->res_handle
);
4497 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4501 static struct device_attribute ipr_adapter_handle_attr
= {
4503 .name
= "adapter_handle",
4506 .show
= ipr_show_adapter_handle
4510 * ipr_show_resource_path - Show the resource path or the resource address for
4512 * @dev: device struct
4513 * @attr: device attribute structure
4517 * number of bytes printed to buffer
4519 static ssize_t
ipr_show_resource_path(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4521 struct scsi_device
*sdev
= to_scsi_device(dev
);
4522 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4523 struct ipr_resource_entry
*res
;
4524 unsigned long lock_flags
= 0;
4525 ssize_t len
= -ENXIO
;
4526 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4528 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4529 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4530 if (res
&& ioa_cfg
->sis64
)
4531 len
= snprintf(buf
, PAGE_SIZE
, "%s\n",
4532 __ipr_format_res_path(res
->res_path
, buffer
,
4535 len
= snprintf(buf
, PAGE_SIZE
, "%d:%d:%d:%d\n", ioa_cfg
->host
->host_no
,
4536 res
->bus
, res
->target
, res
->lun
);
4538 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4542 static struct device_attribute ipr_resource_path_attr
= {
4544 .name
= "resource_path",
4547 .show
= ipr_show_resource_path
4551 * ipr_show_device_id - Show the device_id for this device.
4552 * @dev: device struct
4553 * @attr: device attribute structure
4557 * number of bytes printed to buffer
4559 static ssize_t
ipr_show_device_id(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4561 struct scsi_device
*sdev
= to_scsi_device(dev
);
4562 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4563 struct ipr_resource_entry
*res
;
4564 unsigned long lock_flags
= 0;
4565 ssize_t len
= -ENXIO
;
4567 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4568 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4569 if (res
&& ioa_cfg
->sis64
)
4570 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", be64_to_cpu(res
->dev_id
));
4572 len
= snprintf(buf
, PAGE_SIZE
, "0x%llx\n", res
->lun_wwn
);
4574 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4578 static struct device_attribute ipr_device_id_attr
= {
4580 .name
= "device_id",
4583 .show
= ipr_show_device_id
4587 * ipr_show_resource_type - Show the resource type for this device.
4588 * @dev: device struct
4589 * @attr: device attribute structure
4593 * number of bytes printed to buffer
4595 static ssize_t
ipr_show_resource_type(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
4597 struct scsi_device
*sdev
= to_scsi_device(dev
);
4598 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4599 struct ipr_resource_entry
*res
;
4600 unsigned long lock_flags
= 0;
4601 ssize_t len
= -ENXIO
;
4603 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4604 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4607 len
= snprintf(buf
, PAGE_SIZE
, "%x\n", res
->type
);
4609 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4613 static struct device_attribute ipr_resource_type_attr
= {
4615 .name
= "resource_type",
4618 .show
= ipr_show_resource_type
4622 * ipr_show_raw_mode - Show the adapter's raw mode
4623 * @dev: class device struct
4627 * number of bytes printed to buffer
4629 static ssize_t
ipr_show_raw_mode(struct device
*dev
,
4630 struct device_attribute
*attr
, char *buf
)
4632 struct scsi_device
*sdev
= to_scsi_device(dev
);
4633 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4634 struct ipr_resource_entry
*res
;
4635 unsigned long lock_flags
= 0;
4638 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4639 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4641 len
= snprintf(buf
, PAGE_SIZE
, "%d\n", res
->raw_mode
);
4644 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4649 * ipr_store_raw_mode - Change the adapter's raw mode
4650 * @dev: class device struct
4654 * number of bytes printed to buffer
4656 static ssize_t
ipr_store_raw_mode(struct device
*dev
,
4657 struct device_attribute
*attr
,
4658 const char *buf
, size_t count
)
4660 struct scsi_device
*sdev
= to_scsi_device(dev
);
4661 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)sdev
->host
->hostdata
;
4662 struct ipr_resource_entry
*res
;
4663 unsigned long lock_flags
= 0;
4666 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4667 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
4669 if (ipr_is_af_dasd_device(res
)) {
4670 res
->raw_mode
= simple_strtoul(buf
, NULL
, 10);
4673 sdev_printk(KERN_INFO
, res
->sdev
, "raw mode is %s\n",
4674 res
->raw_mode
? "enabled" : "disabled");
4679 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4683 static struct device_attribute ipr_raw_mode_attr
= {
4686 .mode
= S_IRUGO
| S_IWUSR
,
4688 .show
= ipr_show_raw_mode
,
4689 .store
= ipr_store_raw_mode
4692 static struct device_attribute
*ipr_dev_attrs
[] = {
4693 &ipr_adapter_handle_attr
,
4694 &ipr_resource_path_attr
,
4695 &ipr_device_id_attr
,
4696 &ipr_resource_type_attr
,
4702 * ipr_biosparam - Return the HSC mapping
4703 * @sdev: scsi device struct
4704 * @block_device: block device pointer
4705 * @capacity: capacity of the device
4706 * @parm: Array containing returned HSC values.
4708 * This function generates the HSC parms that fdisk uses.
4709 * We want to make sure we return something that places partitions
4710 * on 4k boundaries for best performance with the IOA.
4715 static int ipr_biosparam(struct scsi_device
*sdev
,
4716 struct block_device
*block_device
,
4717 sector_t capacity
, int *parm
)
4725 cylinders
= capacity
;
4726 sector_div(cylinders
, (128 * 32));
4731 parm
[2] = cylinders
;
4737 * ipr_find_starget - Find target based on bus/target.
4738 * @starget: scsi target struct
4741 * resource entry pointer if found / NULL if not found
4743 static struct ipr_resource_entry
*ipr_find_starget(struct scsi_target
*starget
)
4745 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4746 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4747 struct ipr_resource_entry
*res
;
4749 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4750 if ((res
->bus
== starget
->channel
) &&
4751 (res
->target
== starget
->id
)) {
4759 static struct ata_port_info sata_port_info
;
4762 * ipr_target_alloc - Prepare for commands to a SCSI target
4763 * @starget: scsi target struct
4765 * If the device is a SATA device, this function allocates an
4766 * ATA port with libata, else it does nothing.
4769 * 0 on success / non-0 on failure
4771 static int ipr_target_alloc(struct scsi_target
*starget
)
4773 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4774 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4775 struct ipr_sata_port
*sata_port
;
4776 struct ata_port
*ap
;
4777 struct ipr_resource_entry
*res
;
4778 unsigned long lock_flags
;
4780 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4781 res
= ipr_find_starget(starget
);
4782 starget
->hostdata
= NULL
;
4784 if (res
&& ipr_is_gata(res
)) {
4785 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4786 sata_port
= kzalloc(sizeof(*sata_port
), GFP_KERNEL
);
4790 ap
= ata_sas_port_alloc(&ioa_cfg
->ata_host
, &sata_port_info
, shost
);
4792 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4793 sata_port
->ioa_cfg
= ioa_cfg
;
4795 sata_port
->res
= res
;
4797 res
->sata_port
= sata_port
;
4798 ap
->private_data
= sata_port
;
4799 starget
->hostdata
= sata_port
;
4805 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4811 * ipr_target_destroy - Destroy a SCSI target
4812 * @starget: scsi target struct
4814 * If the device was a SATA device, this function frees the libata
4815 * ATA port, else it does nothing.
4818 static void ipr_target_destroy(struct scsi_target
*starget
)
4820 struct ipr_sata_port
*sata_port
= starget
->hostdata
;
4821 struct Scsi_Host
*shost
= dev_to_shost(&starget
->dev
);
4822 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
4824 if (ioa_cfg
->sis64
) {
4825 if (!ipr_find_starget(starget
)) {
4826 if (starget
->channel
== IPR_ARRAY_VIRTUAL_BUS
)
4827 clear_bit(starget
->id
, ioa_cfg
->array_ids
);
4828 else if (starget
->channel
== IPR_VSET_VIRTUAL_BUS
)
4829 clear_bit(starget
->id
, ioa_cfg
->vset_ids
);
4830 else if (starget
->channel
== 0)
4831 clear_bit(starget
->id
, ioa_cfg
->target_ids
);
4836 starget
->hostdata
= NULL
;
4837 ata_sas_port_destroy(sata_port
->ap
);
4843 * ipr_find_sdev - Find device based on bus/target/lun.
4844 * @sdev: scsi device struct
4847 * resource entry pointer if found / NULL if not found
4849 static struct ipr_resource_entry
*ipr_find_sdev(struct scsi_device
*sdev
)
4851 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4852 struct ipr_resource_entry
*res
;
4854 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
4855 if ((res
->bus
== sdev
->channel
) &&
4856 (res
->target
== sdev
->id
) &&
4857 (res
->lun
== sdev
->lun
))
4865 * ipr_slave_destroy - Unconfigure a SCSI device
4866 * @sdev: scsi device struct
4871 static void ipr_slave_destroy(struct scsi_device
*sdev
)
4873 struct ipr_resource_entry
*res
;
4874 struct ipr_ioa_cfg
*ioa_cfg
;
4875 unsigned long lock_flags
= 0;
4877 ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4879 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4880 res
= (struct ipr_resource_entry
*) sdev
->hostdata
;
4883 res
->sata_port
->ap
->link
.device
[0].class = ATA_DEV_NONE
;
4884 sdev
->hostdata
= NULL
;
4886 res
->sata_port
= NULL
;
4888 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4892 * ipr_slave_configure - Configure a SCSI device
4893 * @sdev: scsi device struct
4895 * This function configures the specified scsi device.
4900 static int ipr_slave_configure(struct scsi_device
*sdev
)
4902 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4903 struct ipr_resource_entry
*res
;
4904 struct ata_port
*ap
= NULL
;
4905 unsigned long lock_flags
= 0;
4906 char buffer
[IPR_MAX_RES_PATH_LENGTH
];
4908 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4909 res
= sdev
->hostdata
;
4911 if (ipr_is_af_dasd_device(res
))
4912 sdev
->type
= TYPE_RAID
;
4913 if (ipr_is_af_dasd_device(res
) || ipr_is_ioa_resource(res
)) {
4914 sdev
->scsi_level
= 4;
4915 sdev
->no_uld_attach
= 1;
4917 if (ipr_is_vset_device(res
)) {
4918 sdev
->scsi_level
= SCSI_SPC_3
;
4919 sdev
->no_report_opcodes
= 1;
4920 blk_queue_rq_timeout(sdev
->request_queue
,
4921 IPR_VSET_RW_TIMEOUT
);
4922 blk_queue_max_hw_sectors(sdev
->request_queue
, IPR_VSET_MAX_SECTORS
);
4924 if (ipr_is_gata(res
) && res
->sata_port
)
4925 ap
= res
->sata_port
->ap
;
4926 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4929 scsi_change_queue_depth(sdev
, IPR_MAX_CMD_PER_ATA_LUN
);
4930 ata_sas_slave_configure(sdev
, ap
);
4934 sdev_printk(KERN_INFO
, sdev
, "Resource path: %s\n",
4935 ipr_format_res_path(ioa_cfg
,
4936 res
->res_path
, buffer
, sizeof(buffer
)));
4939 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
4944 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4945 * @sdev: scsi device struct
4947 * This function initializes an ATA port so that future commands
4948 * sent through queuecommand will work.
4953 static int ipr_ata_slave_alloc(struct scsi_device
*sdev
)
4955 struct ipr_sata_port
*sata_port
= NULL
;
4959 if (sdev
->sdev_target
)
4960 sata_port
= sdev
->sdev_target
->hostdata
;
4962 rc
= ata_sas_port_init(sata_port
->ap
);
4964 rc
= ata_sas_sync_probe(sata_port
->ap
);
4968 ipr_slave_destroy(sdev
);
4975 * ipr_slave_alloc - Prepare for commands to a device.
4976 * @sdev: scsi device struct
4978 * This function saves a pointer to the resource entry
4979 * in the scsi device struct if the device exists. We
4980 * can then use this pointer in ipr_queuecommand when
4981 * handling new commands.
4984 * 0 on success / -ENXIO if device does not exist
4986 static int ipr_slave_alloc(struct scsi_device
*sdev
)
4988 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) sdev
->host
->hostdata
;
4989 struct ipr_resource_entry
*res
;
4990 unsigned long lock_flags
;
4993 sdev
->hostdata
= NULL
;
4995 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
4997 res
= ipr_find_sdev(sdev
);
5002 sdev
->hostdata
= res
;
5003 if (!ipr_is_naca_model(res
))
5004 res
->needs_sync_complete
= 1;
5006 if (ipr_is_gata(res
)) {
5007 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5008 return ipr_ata_slave_alloc(sdev
);
5012 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5018 * ipr_match_lun - Match function for specified LUN
5019 * @ipr_cmd: ipr command struct
5020 * @device: device to match (sdev)
5023 * 1 if command matches sdev / 0 if command does not match sdev
5025 static int ipr_match_lun(struct ipr_cmnd
*ipr_cmd
, void *device
)
5027 if (ipr_cmd
->scsi_cmd
&& ipr_cmd
->scsi_cmd
->device
== device
)
5033 * ipr_cmnd_is_free - Check if a command is free or not
5034 * @ipr_cmd ipr command struct
5039 static bool ipr_cmnd_is_free(struct ipr_cmnd
*ipr_cmd
)
5041 struct ipr_cmnd
*loop_cmd
;
5043 list_for_each_entry(loop_cmd
, &ipr_cmd
->hrrq
->hrrq_free_q
, queue
) {
5044 if (loop_cmd
== ipr_cmd
)
5052 * ipr_match_res - Match function for specified resource entry
5053 * @ipr_cmd: ipr command struct
5054 * @resource: resource entry to match
5057 * 1 if command matches sdev / 0 if command does not match sdev
5059 static int ipr_match_res(struct ipr_cmnd
*ipr_cmd
, void *resource
)
5061 struct ipr_resource_entry
*res
= resource
;
5063 if (res
&& ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
)
5069 * ipr_wait_for_ops - Wait for matching commands to complete
5070 * @ipr_cmd: ipr command struct
5071 * @device: device to match (sdev)
5072 * @match: match function to use
5077 static int ipr_wait_for_ops(struct ipr_ioa_cfg
*ioa_cfg
, void *device
,
5078 int (*match
)(struct ipr_cmnd
*, void *))
5080 struct ipr_cmnd
*ipr_cmd
;
5082 unsigned long flags
;
5083 struct ipr_hrr_queue
*hrrq
;
5084 signed long timeout
= IPR_ABORT_TASK_TIMEOUT
;
5085 DECLARE_COMPLETION_ONSTACK(comp
);
5091 for_each_hrrq(hrrq
, ioa_cfg
) {
5092 spin_lock_irqsave(hrrq
->lock
, flags
);
5093 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5094 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5095 if (!ipr_cmnd_is_free(ipr_cmd
)) {
5096 if (match(ipr_cmd
, device
)) {
5097 ipr_cmd
->eh_comp
= &comp
;
5102 spin_unlock_irqrestore(hrrq
->lock
, flags
);
5106 timeout
= wait_for_completion_timeout(&comp
, timeout
);
5111 for_each_hrrq(hrrq
, ioa_cfg
) {
5112 spin_lock_irqsave(hrrq
->lock
, flags
);
5113 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5114 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5115 if (!ipr_cmnd_is_free(ipr_cmd
)) {
5116 if (match(ipr_cmd
, device
)) {
5117 ipr_cmd
->eh_comp
= NULL
;
5122 spin_unlock_irqrestore(hrrq
->lock
, flags
);
5126 dev_err(&ioa_cfg
->pdev
->dev
, "Timed out waiting for aborted commands\n");
5128 return wait
? FAILED
: SUCCESS
;
5137 static int ipr_eh_host_reset(struct scsi_cmnd
*cmd
)
5139 struct ipr_ioa_cfg
*ioa_cfg
;
5140 unsigned long lock_flags
= 0;
5144 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5145 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5147 if (!ioa_cfg
->in_reset_reload
&& !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5148 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5149 dev_err(&ioa_cfg
->pdev
->dev
,
5150 "Adapter being reset as a result of error recovery.\n");
5152 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5153 ioa_cfg
->sdt_state
= GET_DUMP
;
5156 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5157 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5158 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5160 /* If we got hit with a host reset while we were already resetting
5161 the adapter for some reason, and the reset failed. */
5162 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
5167 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5173 * ipr_device_reset - Reset the device
5174 * @ioa_cfg: ioa config struct
5175 * @res: resource entry struct
5177 * This function issues a device reset to the affected device.
5178 * If the device is a SCSI device, a LUN reset will be sent
5179 * to the device first. If that does not work, a target reset
5180 * will be sent. If the device is a SATA device, a PHY reset will
5184 * 0 on success / non-zero on failure
5186 static int ipr_device_reset(struct ipr_ioa_cfg
*ioa_cfg
,
5187 struct ipr_resource_entry
*res
)
5189 struct ipr_cmnd
*ipr_cmd
;
5190 struct ipr_ioarcb
*ioarcb
;
5191 struct ipr_cmd_pkt
*cmd_pkt
;
5192 struct ipr_ioarcb_ata_regs
*regs
;
5196 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5197 ioarcb
= &ipr_cmd
->ioarcb
;
5198 cmd_pkt
= &ioarcb
->cmd_pkt
;
5200 if (ipr_cmd
->ioa_cfg
->sis64
) {
5201 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
5202 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
5204 regs
= &ioarcb
->u
.add_data
.u
.regs
;
5206 ioarcb
->res_handle
= res
->res_handle
;
5207 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5208 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5209 if (ipr_is_gata(res
)) {
5210 cmd_pkt
->cdb
[2] = IPR_ATA_PHY_RESET
;
5211 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(regs
->flags
));
5212 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
5215 ipr_send_blocking_cmd(ipr_cmd
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5216 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5217 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5218 if (ipr_is_gata(res
) && res
->sata_port
&& ioasc
!= IPR_IOASC_IOA_WAS_RESET
) {
5219 if (ipr_cmd
->ioa_cfg
->sis64
)
5220 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
5221 sizeof(struct ipr_ioasa_gata
));
5223 memcpy(&res
->sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
5224 sizeof(struct ipr_ioasa_gata
));
5228 return IPR_IOASC_SENSE_KEY(ioasc
) ? -EIO
: 0;
5232 * ipr_sata_reset - Reset the SATA port
5233 * @link: SATA link to reset
5234 * @classes: class of the attached device
5236 * This function issues a SATA phy reset to the affected ATA link.
5239 * 0 on success / non-zero on failure
5241 static int ipr_sata_reset(struct ata_link
*link
, unsigned int *classes
,
5242 unsigned long deadline
)
5244 struct ipr_sata_port
*sata_port
= link
->ap
->private_data
;
5245 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
5246 struct ipr_resource_entry
*res
;
5247 unsigned long lock_flags
= 0;
5248 int rc
= -ENXIO
, ret
;
5251 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5252 while (ioa_cfg
->in_reset_reload
) {
5253 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5254 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5255 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5258 res
= sata_port
->res
;
5260 rc
= ipr_device_reset(ioa_cfg
, res
);
5261 *classes
= res
->ata_class
;
5262 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5264 ret
= ipr_wait_for_ops(ioa_cfg
, res
, ipr_match_res
);
5265 if (ret
!= SUCCESS
) {
5266 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5267 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_ABBREV
);
5268 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5270 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
5273 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5280 * ipr_eh_dev_reset - Reset the device
5281 * @scsi_cmd: scsi command struct
5283 * This function issues a device reset to the affected device.
5284 * A LUN reset will be sent to the device first. If that does
5285 * not work, a target reset will be sent.
5290 static int __ipr_eh_dev_reset(struct scsi_cmnd
*scsi_cmd
)
5292 struct ipr_cmnd
*ipr_cmd
;
5293 struct ipr_ioa_cfg
*ioa_cfg
;
5294 struct ipr_resource_entry
*res
;
5295 struct ata_port
*ap
;
5297 struct ipr_hrr_queue
*hrrq
;
5300 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5301 res
= scsi_cmd
->device
->hostdata
;
5304 * If we are currently going through reset/reload, return failed. This will force the
5305 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5308 if (ioa_cfg
->in_reset_reload
)
5310 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5313 for_each_hrrq(hrrq
, ioa_cfg
) {
5314 spin_lock(&hrrq
->_lock
);
5315 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5316 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[i
];
5318 if (ipr_cmd
->ioarcb
.res_handle
== res
->res_handle
) {
5321 if (ipr_cmnd_is_free(ipr_cmd
))
5324 ipr_cmd
->done
= ipr_sata_eh_done
;
5325 if (!(ipr_cmd
->qc
->flags
& ATA_QCFLAG_FAILED
)) {
5326 ipr_cmd
->qc
->err_mask
|= AC_ERR_TIMEOUT
;
5327 ipr_cmd
->qc
->flags
|= ATA_QCFLAG_FAILED
;
5331 spin_unlock(&hrrq
->_lock
);
5333 res
->resetting_device
= 1;
5334 scmd_printk(KERN_ERR
, scsi_cmd
, "Resetting device\n");
5336 if (ipr_is_gata(res
) && res
->sata_port
) {
5337 ap
= res
->sata_port
->ap
;
5338 spin_unlock_irq(scsi_cmd
->device
->host
->host_lock
);
5339 ata_std_error_handler(ap
);
5340 spin_lock_irq(scsi_cmd
->device
->host
->host_lock
);
5342 rc
= ipr_device_reset(ioa_cfg
, res
);
5343 res
->resetting_device
= 0;
5344 res
->reset_occurred
= 1;
5347 return rc
? FAILED
: SUCCESS
;
5350 static int ipr_eh_dev_reset(struct scsi_cmnd
*cmd
)
5353 struct ipr_ioa_cfg
*ioa_cfg
;
5354 struct ipr_resource_entry
*res
;
5356 ioa_cfg
= (struct ipr_ioa_cfg
*) cmd
->device
->host
->hostdata
;
5357 res
= cmd
->device
->hostdata
;
5362 spin_lock_irq(cmd
->device
->host
->host_lock
);
5363 rc
= __ipr_eh_dev_reset(cmd
);
5364 spin_unlock_irq(cmd
->device
->host
->host_lock
);
5366 if (rc
== SUCCESS
) {
5367 if (ipr_is_gata(res
) && res
->sata_port
)
5368 rc
= ipr_wait_for_ops(ioa_cfg
, res
, ipr_match_res
);
5370 rc
= ipr_wait_for_ops(ioa_cfg
, cmd
->device
, ipr_match_lun
);
5377 * ipr_bus_reset_done - Op done function for bus reset.
5378 * @ipr_cmd: ipr command struct
5380 * This function is the op done function for a bus reset
5385 static void ipr_bus_reset_done(struct ipr_cmnd
*ipr_cmd
)
5387 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5388 struct ipr_resource_entry
*res
;
5391 if (!ioa_cfg
->sis64
)
5392 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
5393 if (res
->res_handle
== ipr_cmd
->ioarcb
.res_handle
) {
5394 scsi_report_bus_reset(ioa_cfg
->host
, res
->bus
);
5400 * If abort has not completed, indicate the reset has, else call the
5401 * abort's done function to wake the sleeping eh thread
5403 if (ipr_cmd
->sibling
->sibling
)
5404 ipr_cmd
->sibling
->sibling
= NULL
;
5406 ipr_cmd
->sibling
->done(ipr_cmd
->sibling
);
5408 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5413 * ipr_abort_timeout - An abort task has timed out
5414 * @ipr_cmd: ipr command struct
5416 * This function handles when an abort task times out. If this
5417 * happens we issue a bus reset since we have resources tied
5418 * up that must be freed before returning to the midlayer.
5423 static void ipr_abort_timeout(struct timer_list
*t
)
5425 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
5426 struct ipr_cmnd
*reset_cmd
;
5427 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
5428 struct ipr_cmd_pkt
*cmd_pkt
;
5429 unsigned long lock_flags
= 0;
5432 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
5433 if (ipr_cmd
->completion
.done
|| ioa_cfg
->in_reset_reload
) {
5434 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5438 sdev_printk(KERN_ERR
, ipr_cmd
->u
.sdev
, "Abort timed out. Resetting bus.\n");
5439 reset_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5440 ipr_cmd
->sibling
= reset_cmd
;
5441 reset_cmd
->sibling
= ipr_cmd
;
5442 reset_cmd
->ioarcb
.res_handle
= ipr_cmd
->ioarcb
.res_handle
;
5443 cmd_pkt
= &reset_cmd
->ioarcb
.cmd_pkt
;
5444 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5445 cmd_pkt
->cdb
[0] = IPR_RESET_DEVICE
;
5446 cmd_pkt
->cdb
[2] = IPR_RESET_TYPE_SELECT
| IPR_BUS_RESET
;
5448 ipr_do_req(reset_cmd
, ipr_bus_reset_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
5449 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
5454 * ipr_cancel_op - Cancel specified op
5455 * @scsi_cmd: scsi command struct
5457 * This function cancels specified op.
5462 static int ipr_cancel_op(struct scsi_cmnd
*scsi_cmd
)
5464 struct ipr_cmnd
*ipr_cmd
;
5465 struct ipr_ioa_cfg
*ioa_cfg
;
5466 struct ipr_resource_entry
*res
;
5467 struct ipr_cmd_pkt
*cmd_pkt
;
5469 int i
, op_found
= 0;
5470 struct ipr_hrr_queue
*hrrq
;
5473 ioa_cfg
= (struct ipr_ioa_cfg
*)scsi_cmd
->device
->host
->hostdata
;
5474 res
= scsi_cmd
->device
->hostdata
;
5476 /* If we are currently going through reset/reload, return failed.
5477 * This will force the mid-layer to call ipr_eh_host_reset,
5478 * which will then go to sleep and wait for the reset to complete
5480 if (ioa_cfg
->in_reset_reload
||
5481 ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
5487 * If we are aborting a timed out op, chances are that the timeout was caused
5488 * by a still not detected EEH error. In such cases, reading a register will
5489 * trigger the EEH recovery infrastructure.
5491 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5493 if (!ipr_is_gscsi(res
))
5496 for_each_hrrq(hrrq
, ioa_cfg
) {
5497 spin_lock(&hrrq
->_lock
);
5498 for (i
= hrrq
->min_cmd_id
; i
<= hrrq
->max_cmd_id
; i
++) {
5499 if (ioa_cfg
->ipr_cmnd_list
[i
]->scsi_cmd
== scsi_cmd
) {
5500 if (!ipr_cmnd_is_free(ioa_cfg
->ipr_cmnd_list
[i
])) {
5506 spin_unlock(&hrrq
->_lock
);
5512 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
5513 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
5514 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
5515 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
5516 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
5517 ipr_cmd
->u
.sdev
= scsi_cmd
->device
;
5519 scmd_printk(KERN_ERR
, scsi_cmd
, "Aborting command: %02X\n",
5521 ipr_send_blocking_cmd(ipr_cmd
, ipr_abort_timeout
, IPR_CANCEL_ALL_TIMEOUT
);
5522 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5525 * If the abort task timed out and we sent a bus reset, we will get
5526 * one the following responses to the abort
5528 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
|| ioasc
== IPR_IOASC_SYNC_REQUIRED
) {
5533 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
5534 if (!ipr_is_naca_model(res
))
5535 res
->needs_sync_complete
= 1;
5538 return IPR_IOASC_SENSE_KEY(ioasc
) ? FAILED
: SUCCESS
;
5542 * ipr_eh_abort - Abort a single op
5543 * @scsi_cmd: scsi command struct
5546 * 0 if scan in progress / 1 if scan is complete
5548 static int ipr_scan_finished(struct Scsi_Host
*shost
, unsigned long elapsed_time
)
5550 unsigned long lock_flags
;
5551 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*) shost
->hostdata
;
5554 spin_lock_irqsave(shost
->host_lock
, lock_flags
);
5555 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
|| ioa_cfg
->scan_done
)
5557 if ((elapsed_time
/HZ
) > (ioa_cfg
->transop_timeout
* 2))
5559 spin_unlock_irqrestore(shost
->host_lock
, lock_flags
);
5564 * ipr_eh_host_reset - Reset the host adapter
5565 * @scsi_cmd: scsi command struct
5570 static int ipr_eh_abort(struct scsi_cmnd
*scsi_cmd
)
5572 unsigned long flags
;
5574 struct ipr_ioa_cfg
*ioa_cfg
;
5578 ioa_cfg
= (struct ipr_ioa_cfg
*) scsi_cmd
->device
->host
->hostdata
;
5580 spin_lock_irqsave(scsi_cmd
->device
->host
->host_lock
, flags
);
5581 rc
= ipr_cancel_op(scsi_cmd
);
5582 spin_unlock_irqrestore(scsi_cmd
->device
->host
->host_lock
, flags
);
5585 rc
= ipr_wait_for_ops(ioa_cfg
, scsi_cmd
->device
, ipr_match_lun
);
5591 * ipr_handle_other_interrupt - Handle "other" interrupts
5592 * @ioa_cfg: ioa config struct
5593 * @int_reg: interrupt register
5596 * IRQ_NONE / IRQ_HANDLED
5598 static irqreturn_t
ipr_handle_other_interrupt(struct ipr_ioa_cfg
*ioa_cfg
,
5601 irqreturn_t rc
= IRQ_HANDLED
;
5604 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
5605 int_reg
&= ~int_mask_reg
;
5607 /* If an interrupt on the adapter did not occur, ignore it.
5608 * Or in the case of SIS 64, check for a stage change interrupt.
5610 if ((int_reg
& IPR_PCII_OPER_INTERRUPTS
) == 0) {
5611 if (ioa_cfg
->sis64
) {
5612 int_mask_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
5613 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5614 if (int_reg
& IPR_PCII_IPL_STAGE_CHANGE
) {
5616 /* clear stage change */
5617 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.clr_interrupt_reg
);
5618 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
) & ~int_mask_reg
;
5619 list_del(&ioa_cfg
->reset_cmd
->queue
);
5620 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5621 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5629 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
5630 /* Mask the interrupt */
5631 writel(IPR_PCII_IOA_TRANS_TO_OPER
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
5632 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
5634 list_del(&ioa_cfg
->reset_cmd
->queue
);
5635 del_timer(&ioa_cfg
->reset_cmd
->timer
);
5636 ipr_reset_ioa_job(ioa_cfg
->reset_cmd
);
5637 } else if ((int_reg
& IPR_PCII_HRRQ_UPDATED
) == int_reg
) {
5638 if (ioa_cfg
->clear_isr
) {
5639 if (ipr_debug
&& printk_ratelimit())
5640 dev_err(&ioa_cfg
->pdev
->dev
,
5641 "Spurious interrupt detected. 0x%08X\n", int_reg
);
5642 writel(IPR_PCII_HRRQ_UPDATED
, ioa_cfg
->regs
.clr_interrupt_reg32
);
5643 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5647 if (int_reg
& IPR_PCII_IOA_UNIT_CHECKED
)
5648 ioa_cfg
->ioa_unit_checked
= 1;
5649 else if (int_reg
& IPR_PCII_NO_HOST_RRQ
)
5650 dev_err(&ioa_cfg
->pdev
->dev
,
5651 "No Host RRQ. 0x%08X\n", int_reg
);
5653 dev_err(&ioa_cfg
->pdev
->dev
,
5654 "Permanent IOA failure. 0x%08X\n", int_reg
);
5656 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5657 ioa_cfg
->sdt_state
= GET_DUMP
;
5659 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
5660 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5667 * ipr_isr_eh - Interrupt service routine error handler
5668 * @ioa_cfg: ioa config struct
5669 * @msg: message to log
5674 static void ipr_isr_eh(struct ipr_ioa_cfg
*ioa_cfg
, char *msg
, u16 number
)
5676 ioa_cfg
->errors_logged
++;
5677 dev_err(&ioa_cfg
->pdev
->dev
, "%s %d\n", msg
, number
);
5679 if (WAIT_FOR_DUMP
== ioa_cfg
->sdt_state
)
5680 ioa_cfg
->sdt_state
= GET_DUMP
;
5682 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
5685 static int ipr_process_hrrq(struct ipr_hrr_queue
*hrr_queue
, int budget
,
5686 struct list_head
*doneq
)
5690 struct ipr_cmnd
*ipr_cmd
;
5691 struct ipr_ioa_cfg
*ioa_cfg
= hrr_queue
->ioa_cfg
;
5694 /* If interrupts are disabled, ignore the interrupt */
5695 if (!hrr_queue
->allow_interrupts
)
5698 while ((be32_to_cpu(*hrr_queue
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5699 hrr_queue
->toggle_bit
) {
5701 cmd_index
= (be32_to_cpu(*hrr_queue
->hrrq_curr
) &
5702 IPR_HRRQ_REQ_RESP_HANDLE_MASK
) >>
5703 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT
;
5705 if (unlikely(cmd_index
> hrr_queue
->max_cmd_id
||
5706 cmd_index
< hrr_queue
->min_cmd_id
)) {
5708 "Invalid response handle from IOA: ",
5713 ipr_cmd
= ioa_cfg
->ipr_cmnd_list
[cmd_index
];
5714 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
5716 ipr_trc_hook(ipr_cmd
, IPR_TRACE_FINISH
, ioasc
);
5718 list_move_tail(&ipr_cmd
->queue
, doneq
);
5720 if (hrr_queue
->hrrq_curr
< hrr_queue
->hrrq_end
) {
5721 hrr_queue
->hrrq_curr
++;
5723 hrr_queue
->hrrq_curr
= hrr_queue
->hrrq_start
;
5724 hrr_queue
->toggle_bit
^= 1u;
5727 if (budget
> 0 && num_hrrq
>= budget
)
5734 static int ipr_iopoll(struct irq_poll
*iop
, int budget
)
5736 struct ipr_ioa_cfg
*ioa_cfg
;
5737 struct ipr_hrr_queue
*hrrq
;
5738 struct ipr_cmnd
*ipr_cmd
, *temp
;
5739 unsigned long hrrq_flags
;
5743 hrrq
= container_of(iop
, struct ipr_hrr_queue
, iopoll
);
5744 ioa_cfg
= hrrq
->ioa_cfg
;
5746 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5747 completed_ops
= ipr_process_hrrq(hrrq
, budget
, &doneq
);
5749 if (completed_ops
< budget
)
5750 irq_poll_complete(iop
);
5751 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5753 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5754 list_del(&ipr_cmd
->queue
);
5755 del_timer(&ipr_cmd
->timer
);
5756 ipr_cmd
->fast_done(ipr_cmd
);
5759 return completed_ops
;
5763 * ipr_isr - Interrupt service routine
5765 * @devp: pointer to ioa config struct
5768 * IRQ_NONE / IRQ_HANDLED
5770 static irqreturn_t
ipr_isr(int irq
, void *devp
)
5772 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5773 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5774 unsigned long hrrq_flags
= 0;
5778 struct ipr_cmnd
*ipr_cmd
, *temp
;
5779 irqreturn_t rc
= IRQ_NONE
;
5782 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5783 /* If interrupts are disabled, ignore the interrupt */
5784 if (!hrrq
->allow_interrupts
) {
5785 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5790 if (ipr_process_hrrq(hrrq
, -1, &doneq
)) {
5793 if (!ioa_cfg
->clear_isr
)
5796 /* Clear the PCI interrupt */
5799 writel(IPR_PCII_HRRQ_UPDATED
,
5800 ioa_cfg
->regs
.clr_interrupt_reg32
);
5801 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5802 } while (int_reg
& IPR_PCII_HRRQ_UPDATED
&&
5803 num_hrrq
++ < IPR_MAX_HRRQ_RETRIES
);
5805 } else if (rc
== IRQ_NONE
&& irq_none
== 0) {
5806 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
5808 } else if (num_hrrq
== IPR_MAX_HRRQ_RETRIES
&&
5809 int_reg
& IPR_PCII_HRRQ_UPDATED
) {
5811 "Error clearing HRRQ: ", num_hrrq
);
5818 if (unlikely(rc
== IRQ_NONE
))
5819 rc
= ipr_handle_other_interrupt(ioa_cfg
, int_reg
);
5821 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5822 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5823 list_del(&ipr_cmd
->queue
);
5824 del_timer(&ipr_cmd
->timer
);
5825 ipr_cmd
->fast_done(ipr_cmd
);
5831 * ipr_isr_mhrrq - Interrupt service routine
5833 * @devp: pointer to ioa config struct
5836 * IRQ_NONE / IRQ_HANDLED
5838 static irqreturn_t
ipr_isr_mhrrq(int irq
, void *devp
)
5840 struct ipr_hrr_queue
*hrrq
= (struct ipr_hrr_queue
*)devp
;
5841 struct ipr_ioa_cfg
*ioa_cfg
= hrrq
->ioa_cfg
;
5842 unsigned long hrrq_flags
= 0;
5843 struct ipr_cmnd
*ipr_cmd
, *temp
;
5844 irqreturn_t rc
= IRQ_NONE
;
5847 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
5849 /* If interrupts are disabled, ignore the interrupt */
5850 if (!hrrq
->allow_interrupts
) {
5851 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5855 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
5856 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5858 irq_poll_sched(&hrrq
->iopoll
);
5859 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5863 if ((be32_to_cpu(*hrrq
->hrrq_curr
) & IPR_HRRQ_TOGGLE_BIT
) ==
5866 if (ipr_process_hrrq(hrrq
, -1, &doneq
))
5870 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
5872 list_for_each_entry_safe(ipr_cmd
, temp
, &doneq
, queue
) {
5873 list_del(&ipr_cmd
->queue
);
5874 del_timer(&ipr_cmd
->timer
);
5875 ipr_cmd
->fast_done(ipr_cmd
);
5881 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5882 * @ioa_cfg: ioa config struct
5883 * @ipr_cmd: ipr command struct
5886 * 0 on success / -1 on failure
5888 static int ipr_build_ioadl64(struct ipr_ioa_cfg
*ioa_cfg
,
5889 struct ipr_cmnd
*ipr_cmd
)
5892 struct scatterlist
*sg
;
5894 u32 ioadl_flags
= 0;
5895 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5896 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5897 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ioadl64
;
5899 length
= scsi_bufflen(scsi_cmd
);
5903 nseg
= scsi_dma_map(scsi_cmd
);
5905 if (printk_ratelimit())
5906 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5910 ipr_cmd
->dma_use_sg
= nseg
;
5912 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5914 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
5916 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5917 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5918 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5919 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
)
5920 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5922 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5923 ioadl64
[i
].flags
= cpu_to_be32(ioadl_flags
);
5924 ioadl64
[i
].data_len
= cpu_to_be32(sg_dma_len(sg
));
5925 ioadl64
[i
].address
= cpu_to_be64(sg_dma_address(sg
));
5928 ioadl64
[i
-1].flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5933 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5934 * @ioa_cfg: ioa config struct
5935 * @ipr_cmd: ipr command struct
5938 * 0 on success / -1 on failure
5940 static int ipr_build_ioadl(struct ipr_ioa_cfg
*ioa_cfg
,
5941 struct ipr_cmnd
*ipr_cmd
)
5944 struct scatterlist
*sg
;
5946 u32 ioadl_flags
= 0;
5947 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
5948 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
5949 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
5951 length
= scsi_bufflen(scsi_cmd
);
5955 nseg
= scsi_dma_map(scsi_cmd
);
5957 dev_err(&ioa_cfg
->pdev
->dev
, "scsi_dma_map failed!\n");
5961 ipr_cmd
->dma_use_sg
= nseg
;
5963 if (scsi_cmd
->sc_data_direction
== DMA_TO_DEVICE
) {
5964 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
5965 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
5966 ioarcb
->data_transfer_length
= cpu_to_be32(length
);
5968 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5969 } else if (scsi_cmd
->sc_data_direction
== DMA_FROM_DEVICE
) {
5970 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
5971 ioarcb
->read_data_transfer_length
= cpu_to_be32(length
);
5972 ioarcb
->read_ioadl_len
=
5973 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
5976 if (ipr_cmd
->dma_use_sg
<= ARRAY_SIZE(ioarcb
->u
.add_data
.u
.ioadl
)) {
5977 ioadl
= ioarcb
->u
.add_data
.u
.ioadl
;
5978 ioarcb
->write_ioadl_addr
= cpu_to_be32((ipr_cmd
->dma_addr
) +
5979 offsetof(struct ipr_ioarcb
, u
.add_data
));
5980 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
5983 scsi_for_each_sg(scsi_cmd
, sg
, ipr_cmd
->dma_use_sg
, i
) {
5984 ioadl
[i
].flags_and_data_len
=
5985 cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
5986 ioadl
[i
].address
= cpu_to_be32(sg_dma_address(sg
));
5989 ioadl
[i
-1].flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
5994 * __ipr_erp_done - Process completion of ERP for a device
5995 * @ipr_cmd: ipr command struct
5997 * This function copies the sense buffer into the scsi_cmd
5998 * struct and pushes the scsi_done function.
6003 static void __ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
6005 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6006 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6007 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6009 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
6010 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6011 scmd_printk(KERN_ERR
, scsi_cmd
,
6012 "Request Sense failed with IOASC: 0x%08X\n", ioasc
);
6014 memcpy(scsi_cmd
->sense_buffer
, ipr_cmd
->sense_buffer
,
6015 SCSI_SENSE_BUFFERSIZE
);
6019 if (!ipr_is_naca_model(res
))
6020 res
->needs_sync_complete
= 1;
6023 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6024 scsi_cmd
->scsi_done(scsi_cmd
);
6025 if (ipr_cmd
->eh_comp
)
6026 complete(ipr_cmd
->eh_comp
);
6027 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6031 * ipr_erp_done - Process completion of ERP for a device
6032 * @ipr_cmd: ipr command struct
6034 * This function copies the sense buffer into the scsi_cmd
6035 * struct and pushes the scsi_done function.
6040 static void ipr_erp_done(struct ipr_cmnd
*ipr_cmd
)
6042 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
6043 unsigned long hrrq_flags
;
6045 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
6046 __ipr_erp_done(ipr_cmd
);
6047 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
6051 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6052 * @ipr_cmd: ipr command struct
6057 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd
*ipr_cmd
)
6059 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6060 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6061 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6063 memset(&ioarcb
->cmd_pkt
, 0, sizeof(struct ipr_cmd_pkt
));
6064 ioarcb
->data_transfer_length
= 0;
6065 ioarcb
->read_data_transfer_length
= 0;
6066 ioarcb
->ioadl_len
= 0;
6067 ioarcb
->read_ioadl_len
= 0;
6068 ioasa
->hdr
.ioasc
= 0;
6069 ioasa
->hdr
.residual_data_len
= 0;
6071 if (ipr_cmd
->ioa_cfg
->sis64
)
6072 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6073 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
6075 ioarcb
->write_ioadl_addr
=
6076 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
6077 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
6082 * __ipr_erp_request_sense - Send request sense to a device
6083 * @ipr_cmd: ipr command struct
6085 * This function sends a request sense to a device as a result
6086 * of a check condition.
6091 static void __ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
6093 struct ipr_cmd_pkt
*cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
6094 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6096 if (IPR_IOASC_SENSE_KEY(ioasc
) > 0) {
6097 __ipr_erp_done(ipr_cmd
);
6101 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
6103 cmd_pkt
->request_type
= IPR_RQTYPE_SCSICDB
;
6104 cmd_pkt
->cdb
[0] = REQUEST_SENSE
;
6105 cmd_pkt
->cdb
[4] = SCSI_SENSE_BUFFERSIZE
;
6106 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_SYNC_OVERRIDE
;
6107 cmd_pkt
->flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6108 cmd_pkt
->timeout
= cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT
/ HZ
);
6110 ipr_init_ioadl(ipr_cmd
, ipr_cmd
->sense_buffer_dma
,
6111 SCSI_SENSE_BUFFERSIZE
, IPR_IOADL_FLAGS_READ_LAST
);
6113 ipr_do_req(ipr_cmd
, ipr_erp_done
, ipr_timeout
,
6114 IPR_REQUEST_SENSE_TIMEOUT
* 2);
6118 * ipr_erp_request_sense - Send request sense to a device
6119 * @ipr_cmd: ipr command struct
6121 * This function sends a request sense to a device as a result
6122 * of a check condition.
6127 static void ipr_erp_request_sense(struct ipr_cmnd
*ipr_cmd
)
6129 struct ipr_hrr_queue
*hrrq
= ipr_cmd
->hrrq
;
6130 unsigned long hrrq_flags
;
6132 spin_lock_irqsave(&hrrq
->_lock
, hrrq_flags
);
6133 __ipr_erp_request_sense(ipr_cmd
);
6134 spin_unlock_irqrestore(&hrrq
->_lock
, hrrq_flags
);
6138 * ipr_erp_cancel_all - Send cancel all to a device
6139 * @ipr_cmd: ipr command struct
6141 * This function sends a cancel all to a device to clear the
6142 * queue. If we are running TCQ on the device, QERR is set to 1,
6143 * which means all outstanding ops have been dropped on the floor.
6144 * Cancel all will return them to us.
6149 static void ipr_erp_cancel_all(struct ipr_cmnd
*ipr_cmd
)
6151 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6152 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6153 struct ipr_cmd_pkt
*cmd_pkt
;
6157 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd
);
6159 if (!scsi_cmd
->device
->simple_tags
) {
6160 __ipr_erp_request_sense(ipr_cmd
);
6164 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
6165 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
6166 cmd_pkt
->cdb
[0] = IPR_CANCEL_ALL_REQUESTS
;
6168 ipr_do_req(ipr_cmd
, ipr_erp_request_sense
, ipr_timeout
,
6169 IPR_CANCEL_ALL_TIMEOUT
);
6173 * ipr_dump_ioasa - Dump contents of IOASA
6174 * @ioa_cfg: ioa config struct
6175 * @ipr_cmd: ipr command struct
6176 * @res: resource entry struct
6178 * This function is invoked by the interrupt handler when ops
6179 * fail. It will log the IOASA if appropriate. Only called
6185 static void ipr_dump_ioasa(struct ipr_ioa_cfg
*ioa_cfg
,
6186 struct ipr_cmnd
*ipr_cmd
, struct ipr_resource_entry
*res
)
6190 u32 ioasc
, fd_ioasc
;
6191 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6192 __be32
*ioasa_data
= (__be32
*)ioasa
;
6195 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
) & IPR_IOASC_IOASC_MASK
;
6196 fd_ioasc
= be32_to_cpu(ioasa
->hdr
.fd_ioasc
) & IPR_IOASC_IOASC_MASK
;
6201 if (ioa_cfg
->log_level
< IPR_DEFAULT_LOG_LEVEL
)
6204 if (ioasc
== IPR_IOASC_BUS_WAS_RESET
&& fd_ioasc
)
6205 error_index
= ipr_get_error(fd_ioasc
);
6207 error_index
= ipr_get_error(ioasc
);
6209 if (ioa_cfg
->log_level
< IPR_MAX_LOG_LEVEL
) {
6210 /* Don't log an error if the IOA already logged one */
6211 if (ioasa
->hdr
.ilid
!= 0)
6214 if (!ipr_is_gscsi(res
))
6217 if (ipr_error_table
[error_index
].log_ioasa
== 0)
6221 ipr_res_err(ioa_cfg
, res
, "%s\n", ipr_error_table
[error_index
].error
);
6223 data_len
= be16_to_cpu(ioasa
->hdr
.ret_stat_len
);
6224 if (ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa64
) < data_len
)
6225 data_len
= sizeof(struct ipr_ioasa64
);
6226 else if (!ioa_cfg
->sis64
&& sizeof(struct ipr_ioasa
) < data_len
)
6227 data_len
= sizeof(struct ipr_ioasa
);
6229 ipr_err("IOASA Dump:\n");
6231 for (i
= 0; i
< data_len
/ 4; i
+= 4) {
6232 ipr_err("%08X: %08X %08X %08X %08X\n", i
*4,
6233 be32_to_cpu(ioasa_data
[i
]),
6234 be32_to_cpu(ioasa_data
[i
+1]),
6235 be32_to_cpu(ioasa_data
[i
+2]),
6236 be32_to_cpu(ioasa_data
[i
+3]));
6241 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6243 * @sense_buf: sense data buffer
6248 static void ipr_gen_sense(struct ipr_cmnd
*ipr_cmd
)
6251 u8
*sense_buf
= ipr_cmd
->scsi_cmd
->sense_buffer
;
6252 struct ipr_resource_entry
*res
= ipr_cmd
->scsi_cmd
->device
->hostdata
;
6253 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6254 u32 ioasc
= be32_to_cpu(ioasa
->hdr
.ioasc
);
6256 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
6258 if (ioasc
>= IPR_FIRST_DRIVER_IOASC
)
6261 ipr_cmd
->scsi_cmd
->result
= SAM_STAT_CHECK_CONDITION
;
6263 if (ipr_is_vset_device(res
) &&
6264 ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
&&
6265 ioasa
->u
.vset
.failing_lba_hi
!= 0) {
6266 sense_buf
[0] = 0x72;
6267 sense_buf
[1] = IPR_IOASC_SENSE_KEY(ioasc
);
6268 sense_buf
[2] = IPR_IOASC_SENSE_CODE(ioasc
);
6269 sense_buf
[3] = IPR_IOASC_SENSE_QUAL(ioasc
);
6273 sense_buf
[9] = 0x0A;
6274 sense_buf
[10] = 0x80;
6276 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_hi
);
6278 sense_buf
[12] = (failing_lba
& 0xff000000) >> 24;
6279 sense_buf
[13] = (failing_lba
& 0x00ff0000) >> 16;
6280 sense_buf
[14] = (failing_lba
& 0x0000ff00) >> 8;
6281 sense_buf
[15] = failing_lba
& 0x000000ff;
6283 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6285 sense_buf
[16] = (failing_lba
& 0xff000000) >> 24;
6286 sense_buf
[17] = (failing_lba
& 0x00ff0000) >> 16;
6287 sense_buf
[18] = (failing_lba
& 0x0000ff00) >> 8;
6288 sense_buf
[19] = failing_lba
& 0x000000ff;
6290 sense_buf
[0] = 0x70;
6291 sense_buf
[2] = IPR_IOASC_SENSE_KEY(ioasc
);
6292 sense_buf
[12] = IPR_IOASC_SENSE_CODE(ioasc
);
6293 sense_buf
[13] = IPR_IOASC_SENSE_QUAL(ioasc
);
6295 /* Illegal request */
6296 if ((IPR_IOASC_SENSE_KEY(ioasc
) == 0x05) &&
6297 (be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_FIELD_POINTER_VALID
)) {
6298 sense_buf
[7] = 10; /* additional length */
6300 /* IOARCB was in error */
6301 if (IPR_IOASC_SENSE_CODE(ioasc
) == 0x24)
6302 sense_buf
[15] = 0xC0;
6303 else /* Parameter data was invalid */
6304 sense_buf
[15] = 0x80;
6307 ((IPR_FIELD_POINTER_MASK
&
6308 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) >> 8) & 0xff;
6310 (IPR_FIELD_POINTER_MASK
&
6311 be32_to_cpu(ioasa
->hdr
.ioasc_specific
)) & 0xff;
6313 if (ioasc
== IPR_IOASC_MED_DO_NOT_REALLOC
) {
6314 if (ipr_is_vset_device(res
))
6315 failing_lba
= be32_to_cpu(ioasa
->u
.vset
.failing_lba_lo
);
6317 failing_lba
= be32_to_cpu(ioasa
->u
.dasd
.failing_lba
);
6319 sense_buf
[0] |= 0x80; /* Or in the Valid bit */
6320 sense_buf
[3] = (failing_lba
& 0xff000000) >> 24;
6321 sense_buf
[4] = (failing_lba
& 0x00ff0000) >> 16;
6322 sense_buf
[5] = (failing_lba
& 0x0000ff00) >> 8;
6323 sense_buf
[6] = failing_lba
& 0x000000ff;
6326 sense_buf
[7] = 6; /* additional length */
6332 * ipr_get_autosense - Copy autosense data to sense buffer
6333 * @ipr_cmd: ipr command struct
6335 * This function copies the autosense buffer to the buffer
6336 * in the scsi_cmd, if there is autosense available.
6339 * 1 if autosense was available / 0 if not
6341 static int ipr_get_autosense(struct ipr_cmnd
*ipr_cmd
)
6343 struct ipr_ioasa
*ioasa
= &ipr_cmd
->s
.ioasa
;
6344 struct ipr_ioasa64
*ioasa64
= &ipr_cmd
->s
.ioasa64
;
6346 if ((be32_to_cpu(ioasa
->hdr
.ioasc_specific
) & IPR_AUTOSENSE_VALID
) == 0)
6349 if (ipr_cmd
->ioa_cfg
->sis64
)
6350 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa64
->auto_sense
.data
,
6351 min_t(u16
, be16_to_cpu(ioasa64
->auto_sense
.auto_sense_len
),
6352 SCSI_SENSE_BUFFERSIZE
));
6354 memcpy(ipr_cmd
->scsi_cmd
->sense_buffer
, ioasa
->auto_sense
.data
,
6355 min_t(u16
, be16_to_cpu(ioasa
->auto_sense
.auto_sense_len
),
6356 SCSI_SENSE_BUFFERSIZE
));
6361 * ipr_erp_start - Process an error response for a SCSI op
6362 * @ioa_cfg: ioa config struct
6363 * @ipr_cmd: ipr command struct
6365 * This function determines whether or not to initiate ERP
6366 * on the affected device.
6371 static void ipr_erp_start(struct ipr_ioa_cfg
*ioa_cfg
,
6372 struct ipr_cmnd
*ipr_cmd
)
6374 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6375 struct ipr_resource_entry
*res
= scsi_cmd
->device
->hostdata
;
6376 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6377 u32 masked_ioasc
= ioasc
& IPR_IOASC_IOASC_MASK
;
6380 __ipr_scsi_eh_done(ipr_cmd
);
6384 if (!ipr_is_gscsi(res
) && masked_ioasc
!= IPR_IOASC_HW_DEV_BUS_STATUS
)
6385 ipr_gen_sense(ipr_cmd
);
6387 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6389 switch (masked_ioasc
) {
6390 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST
:
6391 if (ipr_is_naca_model(res
))
6392 scsi_cmd
->result
|= (DID_ABORT
<< 16);
6394 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6396 case IPR_IOASC_IR_RESOURCE_HANDLE
:
6397 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA
:
6398 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6400 case IPR_IOASC_HW_SEL_TIMEOUT
:
6401 scsi_cmd
->result
|= (DID_NO_CONNECT
<< 16);
6402 if (!ipr_is_naca_model(res
))
6403 res
->needs_sync_complete
= 1;
6405 case IPR_IOASC_SYNC_REQUIRED
:
6407 res
->needs_sync_complete
= 1;
6408 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6410 case IPR_IOASC_MED_DO_NOT_REALLOC
: /* prevent retries */
6411 case IPR_IOASA_IR_DUAL_IOA_DISABLED
:
6413 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6414 * so SCSI mid-layer and upper layers handle it accordingly.
6416 if (scsi_cmd
->result
!= SAM_STAT_CHECK_CONDITION
)
6417 scsi_cmd
->result
|= (DID_PASSTHROUGH
<< 16);
6419 case IPR_IOASC_BUS_WAS_RESET
:
6420 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER
:
6422 * Report the bus reset and ask for a retry. The device
6423 * will give CC/UA the next command.
6425 if (!res
->resetting_device
)
6426 scsi_report_bus_reset(ioa_cfg
->host
, scsi_cmd
->device
->channel
);
6427 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6428 if (!ipr_is_naca_model(res
))
6429 res
->needs_sync_complete
= 1;
6431 case IPR_IOASC_HW_DEV_BUS_STATUS
:
6432 scsi_cmd
->result
|= IPR_IOASC_SENSE_STATUS(ioasc
);
6433 if (IPR_IOASC_SENSE_STATUS(ioasc
) == SAM_STAT_CHECK_CONDITION
) {
6434 if (!ipr_get_autosense(ipr_cmd
)) {
6435 if (!ipr_is_naca_model(res
)) {
6436 ipr_erp_cancel_all(ipr_cmd
);
6441 if (!ipr_is_naca_model(res
))
6442 res
->needs_sync_complete
= 1;
6444 case IPR_IOASC_NR_INIT_CMD_REQUIRED
:
6446 case IPR_IOASC_IR_NON_OPTIMIZED
:
6447 if (res
->raw_mode
) {
6449 scsi_cmd
->result
|= (DID_IMM_RETRY
<< 16);
6451 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6454 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6455 scsi_cmd
->result
|= (DID_ERROR
<< 16);
6456 if (!ipr_is_vset_device(res
) && !ipr_is_naca_model(res
))
6457 res
->needs_sync_complete
= 1;
6461 scsi_dma_unmap(ipr_cmd
->scsi_cmd
);
6462 scsi_cmd
->scsi_done(scsi_cmd
);
6463 if (ipr_cmd
->eh_comp
)
6464 complete(ipr_cmd
->eh_comp
);
6465 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6469 * ipr_scsi_done - mid-layer done function
6470 * @ipr_cmd: ipr command struct
6472 * This function is invoked by the interrupt handler for
6473 * ops generated by the SCSI mid-layer
6478 static void ipr_scsi_done(struct ipr_cmnd
*ipr_cmd
)
6480 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6481 struct scsi_cmnd
*scsi_cmd
= ipr_cmd
->scsi_cmd
;
6482 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6483 unsigned long lock_flags
;
6485 scsi_set_resid(scsi_cmd
, be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.residual_data_len
));
6487 if (likely(IPR_IOASC_SENSE_KEY(ioasc
) == 0)) {
6488 scsi_dma_unmap(scsi_cmd
);
6490 spin_lock_irqsave(ipr_cmd
->hrrq
->lock
, lock_flags
);
6491 scsi_cmd
->scsi_done(scsi_cmd
);
6492 if (ipr_cmd
->eh_comp
)
6493 complete(ipr_cmd
->eh_comp
);
6494 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6495 spin_unlock_irqrestore(ipr_cmd
->hrrq
->lock
, lock_flags
);
6497 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6498 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6499 ipr_erp_start(ioa_cfg
, ipr_cmd
);
6500 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6501 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6506 * ipr_queuecommand - Queue a mid-layer request
6507 * @shost: scsi host struct
6508 * @scsi_cmd: scsi command struct
6510 * This function queues a request generated by the mid-layer.
6514 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6515 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6517 static int ipr_queuecommand(struct Scsi_Host
*shost
,
6518 struct scsi_cmnd
*scsi_cmd
)
6520 struct ipr_ioa_cfg
*ioa_cfg
;
6521 struct ipr_resource_entry
*res
;
6522 struct ipr_ioarcb
*ioarcb
;
6523 struct ipr_cmnd
*ipr_cmd
;
6524 unsigned long hrrq_flags
, lock_flags
;
6526 struct ipr_hrr_queue
*hrrq
;
6529 ioa_cfg
= (struct ipr_ioa_cfg
*)shost
->hostdata
;
6531 scsi_cmd
->result
= (DID_OK
<< 16);
6532 res
= scsi_cmd
->device
->hostdata
;
6534 if (ipr_is_gata(res
) && res
->sata_port
) {
6535 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
6536 rc
= ata_sas_queuecmd(scsi_cmd
, res
->sata_port
->ap
);
6537 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
6541 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6542 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6544 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6546 * We are currently blocking all devices due to a host reset
6547 * We have told the host to stop giving us new requests, but
6548 * ERP ops don't count. FIXME
6550 if (unlikely(!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
&& !hrrq
->removing_ioa
)) {
6551 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6552 return SCSI_MLQUEUE_HOST_BUSY
;
6556 * FIXME - Create scsi_set_host_offline interface
6557 * and the ioa_is_dead check can be removed
6559 if (unlikely(hrrq
->ioa_is_dead
|| hrrq
->removing_ioa
|| !res
)) {
6560 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6564 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6565 if (ipr_cmd
== NULL
) {
6566 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6567 return SCSI_MLQUEUE_HOST_BUSY
;
6569 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6571 ipr_init_ipr_cmnd(ipr_cmd
, ipr_scsi_done
);
6572 ioarcb
= &ipr_cmd
->ioarcb
;
6574 memcpy(ioarcb
->cmd_pkt
.cdb
, scsi_cmd
->cmnd
, scsi_cmd
->cmd_len
);
6575 ipr_cmd
->scsi_cmd
= scsi_cmd
;
6576 ipr_cmd
->done
= ipr_scsi_eh_done
;
6578 if (ipr_is_gscsi(res
)) {
6579 if (scsi_cmd
->underflow
== 0)
6580 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6582 if (res
->reset_occurred
) {
6583 res
->reset_occurred
= 0;
6584 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_DELAY_AFTER_RST
;
6588 if (ipr_is_gscsi(res
) || ipr_is_vset_device(res
)) {
6589 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
6591 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_ALIGNED_BFR
;
6592 if (scsi_cmd
->flags
& SCMD_TAGGED
)
6593 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_SIMPLE_TASK
;
6595 ioarcb
->cmd_pkt
.flags_lo
|= IPR_FLAGS_LO_UNTAGGED_TASK
;
6598 if (scsi_cmd
->cmnd
[0] >= 0xC0 &&
6599 (!ipr_is_gscsi(res
) || scsi_cmd
->cmnd
[0] == IPR_QUERY_RSRC_STATE
)) {
6600 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
6602 if (res
->raw_mode
&& ipr_is_af_dasd_device(res
)) {
6603 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_PIPE
;
6605 if (scsi_cmd
->underflow
== 0)
6606 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
6610 rc
= ipr_build_ioadl64(ioa_cfg
, ipr_cmd
);
6612 rc
= ipr_build_ioadl(ioa_cfg
, ipr_cmd
);
6614 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6615 if (unlikely(rc
|| (!hrrq
->allow_cmds
&& !hrrq
->ioa_is_dead
))) {
6616 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6617 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6619 scsi_dma_unmap(scsi_cmd
);
6620 return SCSI_MLQUEUE_HOST_BUSY
;
6623 if (unlikely(hrrq
->ioa_is_dead
)) {
6624 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_free_q
);
6625 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6626 scsi_dma_unmap(scsi_cmd
);
6630 ioarcb
->res_handle
= res
->res_handle
;
6631 if (res
->needs_sync_complete
) {
6632 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_SYNC_COMPLETE
;
6633 res
->needs_sync_complete
= 0;
6635 list_add_tail(&ipr_cmd
->queue
, &hrrq
->hrrq_pending_q
);
6636 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
6637 ipr_send_command(ipr_cmd
);
6638 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6642 spin_lock_irqsave(hrrq
->lock
, hrrq_flags
);
6643 memset(scsi_cmd
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
6644 scsi_cmd
->result
= (DID_NO_CONNECT
<< 16);
6645 scsi_cmd
->scsi_done(scsi_cmd
);
6646 spin_unlock_irqrestore(hrrq
->lock
, hrrq_flags
);
6651 * ipr_ioctl - IOCTL handler
6652 * @sdev: scsi device struct
6657 * 0 on success / other on failure
6659 static int ipr_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
6661 struct ipr_resource_entry
*res
;
6663 res
= (struct ipr_resource_entry
*)sdev
->hostdata
;
6664 if (res
&& ipr_is_gata(res
)) {
6665 if (cmd
== HDIO_GET_IDENTITY
)
6667 return ata_sas_scsi_ioctl(res
->sata_port
->ap
, sdev
, cmd
, arg
);
6674 * ipr_info - Get information about the card/driver
6675 * @scsi_host: scsi host struct
6678 * pointer to buffer with description string
6680 static const char *ipr_ioa_info(struct Scsi_Host
*host
)
6682 static char buffer
[512];
6683 struct ipr_ioa_cfg
*ioa_cfg
;
6684 unsigned long lock_flags
= 0;
6686 ioa_cfg
= (struct ipr_ioa_cfg
*) host
->hostdata
;
6688 spin_lock_irqsave(host
->host_lock
, lock_flags
);
6689 sprintf(buffer
, "IBM %X Storage Adapter", ioa_cfg
->type
);
6690 spin_unlock_irqrestore(host
->host_lock
, lock_flags
);
6695 static struct scsi_host_template driver_template
= {
6696 .module
= THIS_MODULE
,
6698 .info
= ipr_ioa_info
,
6700 .queuecommand
= ipr_queuecommand
,
6701 .eh_abort_handler
= ipr_eh_abort
,
6702 .eh_device_reset_handler
= ipr_eh_dev_reset
,
6703 .eh_host_reset_handler
= ipr_eh_host_reset
,
6704 .slave_alloc
= ipr_slave_alloc
,
6705 .slave_configure
= ipr_slave_configure
,
6706 .slave_destroy
= ipr_slave_destroy
,
6707 .scan_finished
= ipr_scan_finished
,
6708 .target_alloc
= ipr_target_alloc
,
6709 .target_destroy
= ipr_target_destroy
,
6710 .change_queue_depth
= ipr_change_queue_depth
,
6711 .bios_param
= ipr_biosparam
,
6712 .can_queue
= IPR_MAX_COMMANDS
,
6714 .sg_tablesize
= IPR_MAX_SGLIST
,
6715 .max_sectors
= IPR_IOA_MAX_SECTORS
,
6716 .cmd_per_lun
= IPR_MAX_CMD_PER_LUN
,
6717 .use_clustering
= ENABLE_CLUSTERING
,
6718 .shost_attrs
= ipr_ioa_attrs
,
6719 .sdev_attrs
= ipr_dev_attrs
,
6720 .proc_name
= IPR_NAME
,
6724 * ipr_ata_phy_reset - libata phy_reset handler
6725 * @ap: ata port to reset
6728 static void ipr_ata_phy_reset(struct ata_port
*ap
)
6730 unsigned long flags
;
6731 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6732 struct ipr_resource_entry
*res
= sata_port
->res
;
6733 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6737 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6738 while (ioa_cfg
->in_reset_reload
) {
6739 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6740 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6741 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6744 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
)
6747 rc
= ipr_device_reset(ioa_cfg
, res
);
6750 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6754 ap
->link
.device
[0].class = res
->ata_class
;
6755 if (ap
->link
.device
[0].class == ATA_DEV_UNKNOWN
)
6756 ap
->link
.device
[0].class = ATA_DEV_NONE
;
6759 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6764 * ipr_ata_post_internal - Cleanup after an internal command
6765 * @qc: ATA queued command
6770 static void ipr_ata_post_internal(struct ata_queued_cmd
*qc
)
6772 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6773 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6774 struct ipr_cmnd
*ipr_cmd
;
6775 struct ipr_hrr_queue
*hrrq
;
6776 unsigned long flags
;
6778 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6779 while (ioa_cfg
->in_reset_reload
) {
6780 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6781 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
6782 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
6785 for_each_hrrq(hrrq
, ioa_cfg
) {
6786 spin_lock(&hrrq
->_lock
);
6787 list_for_each_entry(ipr_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
6788 if (ipr_cmd
->qc
== qc
) {
6789 ipr_device_reset(ioa_cfg
, sata_port
->res
);
6793 spin_unlock(&hrrq
->_lock
);
6795 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
6799 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6800 * @regs: destination
6801 * @tf: source ATA taskfile
6806 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs
*regs
,
6807 struct ata_taskfile
*tf
)
6809 regs
->feature
= tf
->feature
;
6810 regs
->nsect
= tf
->nsect
;
6811 regs
->lbal
= tf
->lbal
;
6812 regs
->lbam
= tf
->lbam
;
6813 regs
->lbah
= tf
->lbah
;
6814 regs
->device
= tf
->device
;
6815 regs
->command
= tf
->command
;
6816 regs
->hob_feature
= tf
->hob_feature
;
6817 regs
->hob_nsect
= tf
->hob_nsect
;
6818 regs
->hob_lbal
= tf
->hob_lbal
;
6819 regs
->hob_lbam
= tf
->hob_lbam
;
6820 regs
->hob_lbah
= tf
->hob_lbah
;
6821 regs
->ctl
= tf
->ctl
;
6825 * ipr_sata_done - done function for SATA commands
6826 * @ipr_cmd: ipr command struct
6828 * This function is invoked by the interrupt handler for
6829 * ops generated by the SCSI mid-layer to SATA devices
6834 static void ipr_sata_done(struct ipr_cmnd
*ipr_cmd
)
6836 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
6837 struct ata_queued_cmd
*qc
= ipr_cmd
->qc
;
6838 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
6839 struct ipr_resource_entry
*res
= sata_port
->res
;
6840 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
6842 spin_lock(&ipr_cmd
->hrrq
->_lock
);
6843 if (ipr_cmd
->ioa_cfg
->sis64
)
6844 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa64
.u
.gata
,
6845 sizeof(struct ipr_ioasa_gata
));
6847 memcpy(&sata_port
->ioasa
, &ipr_cmd
->s
.ioasa
.u
.gata
,
6848 sizeof(struct ipr_ioasa_gata
));
6849 ipr_dump_ioasa(ioa_cfg
, ipr_cmd
, res
);
6851 if (be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc_specific
) & IPR_ATA_DEVICE_WAS_RESET
)
6852 scsi_report_device_reset(ioa_cfg
->host
, res
->bus
, res
->target
);
6854 if (IPR_IOASC_SENSE_KEY(ioasc
) > RECOVERED_ERROR
)
6855 qc
->err_mask
|= __ac_err_mask(sata_port
->ioasa
.status
);
6857 qc
->err_mask
|= ac_err_mask(sata_port
->ioasa
.status
);
6858 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
6859 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
6860 ata_qc_complete(qc
);
6864 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6865 * @ipr_cmd: ipr command struct
6866 * @qc: ATA queued command
6869 static void ipr_build_ata_ioadl64(struct ipr_cmnd
*ipr_cmd
,
6870 struct ata_queued_cmd
*qc
)
6872 u32 ioadl_flags
= 0;
6873 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6874 struct ipr_ioadl64_desc
*ioadl64
= ipr_cmd
->i
.ata_ioadl
.ioadl64
;
6875 struct ipr_ioadl64_desc
*last_ioadl64
= NULL
;
6876 int len
= qc
->nbytes
;
6877 struct scatterlist
*sg
;
6879 dma_addr_t dma_addr
= ipr_cmd
->dma_addr
;
6884 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6885 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6886 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6887 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
)
6888 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6890 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6892 cpu_to_be32(sizeof(struct ipr_ioadl64_desc
) * ipr_cmd
->dma_use_sg
);
6893 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
6894 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ata_ioadl
.ioadl64
));
6896 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6897 ioadl64
->flags
= cpu_to_be32(ioadl_flags
);
6898 ioadl64
->data_len
= cpu_to_be32(sg_dma_len(sg
));
6899 ioadl64
->address
= cpu_to_be64(sg_dma_address(sg
));
6901 last_ioadl64
= ioadl64
;
6905 if (likely(last_ioadl64
))
6906 last_ioadl64
->flags
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6910 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6911 * @ipr_cmd: ipr command struct
6912 * @qc: ATA queued command
6915 static void ipr_build_ata_ioadl(struct ipr_cmnd
*ipr_cmd
,
6916 struct ata_queued_cmd
*qc
)
6918 u32 ioadl_flags
= 0;
6919 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
6920 struct ipr_ioadl_desc
*ioadl
= ipr_cmd
->i
.ioadl
;
6921 struct ipr_ioadl_desc
*last_ioadl
= NULL
;
6922 int len
= qc
->nbytes
;
6923 struct scatterlist
*sg
;
6929 if (qc
->dma_dir
== DMA_TO_DEVICE
) {
6930 ioadl_flags
= IPR_IOADL_FLAGS_WRITE
;
6931 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
6932 ioarcb
->data_transfer_length
= cpu_to_be32(len
);
6934 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6935 } else if (qc
->dma_dir
== DMA_FROM_DEVICE
) {
6936 ioadl_flags
= IPR_IOADL_FLAGS_READ
;
6937 ioarcb
->read_data_transfer_length
= cpu_to_be32(len
);
6938 ioarcb
->read_ioadl_len
=
6939 cpu_to_be32(sizeof(struct ipr_ioadl_desc
) * ipr_cmd
->dma_use_sg
);
6942 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
6943 ioadl
->flags_and_data_len
= cpu_to_be32(ioadl_flags
| sg_dma_len(sg
));
6944 ioadl
->address
= cpu_to_be32(sg_dma_address(sg
));
6950 if (likely(last_ioadl
))
6951 last_ioadl
->flags_and_data_len
|= cpu_to_be32(IPR_IOADL_FLAGS_LAST
);
6955 * ipr_qc_defer - Get a free ipr_cmd
6956 * @qc: queued command
6961 static int ipr_qc_defer(struct ata_queued_cmd
*qc
)
6963 struct ata_port
*ap
= qc
->ap
;
6964 struct ipr_sata_port
*sata_port
= ap
->private_data
;
6965 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
6966 struct ipr_cmnd
*ipr_cmd
;
6967 struct ipr_hrr_queue
*hrrq
;
6970 hrrq_id
= ipr_get_hrrq_index(ioa_cfg
);
6971 hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
6973 qc
->lldd_task
= NULL
;
6974 spin_lock(&hrrq
->_lock
);
6975 if (unlikely(hrrq
->ioa_is_dead
)) {
6976 spin_unlock(&hrrq
->_lock
);
6980 if (unlikely(!hrrq
->allow_cmds
)) {
6981 spin_unlock(&hrrq
->_lock
);
6982 return ATA_DEFER_LINK
;
6985 ipr_cmd
= __ipr_get_free_ipr_cmnd(hrrq
);
6986 if (ipr_cmd
== NULL
) {
6987 spin_unlock(&hrrq
->_lock
);
6988 return ATA_DEFER_LINK
;
6991 qc
->lldd_task
= ipr_cmd
;
6992 spin_unlock(&hrrq
->_lock
);
6997 * ipr_qc_issue - Issue a SATA qc to a device
6998 * @qc: queued command
7003 static unsigned int ipr_qc_issue(struct ata_queued_cmd
*qc
)
7005 struct ata_port
*ap
= qc
->ap
;
7006 struct ipr_sata_port
*sata_port
= ap
->private_data
;
7007 struct ipr_resource_entry
*res
= sata_port
->res
;
7008 struct ipr_ioa_cfg
*ioa_cfg
= sata_port
->ioa_cfg
;
7009 struct ipr_cmnd
*ipr_cmd
;
7010 struct ipr_ioarcb
*ioarcb
;
7011 struct ipr_ioarcb_ata_regs
*regs
;
7013 if (qc
->lldd_task
== NULL
)
7016 ipr_cmd
= qc
->lldd_task
;
7017 if (ipr_cmd
== NULL
)
7018 return AC_ERR_SYSTEM
;
7020 qc
->lldd_task
= NULL
;
7021 spin_lock(&ipr_cmd
->hrrq
->_lock
);
7022 if (unlikely(!ipr_cmd
->hrrq
->allow_cmds
||
7023 ipr_cmd
->hrrq
->ioa_is_dead
)) {
7024 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7025 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7026 return AC_ERR_SYSTEM
;
7029 ipr_init_ipr_cmnd(ipr_cmd
, ipr_lock_and_done
);
7030 ioarcb
= &ipr_cmd
->ioarcb
;
7032 if (ioa_cfg
->sis64
) {
7033 regs
= &ipr_cmd
->i
.ata_ioadl
.regs
;
7034 ioarcb
->add_cmd_parms_offset
= cpu_to_be16(sizeof(*ioarcb
));
7036 regs
= &ioarcb
->u
.add_data
.u
.regs
;
7038 memset(regs
, 0, sizeof(*regs
));
7039 ioarcb
->add_cmd_parms_len
= cpu_to_be16(sizeof(*regs
));
7041 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
7043 ipr_cmd
->done
= ipr_sata_done
;
7044 ipr_cmd
->ioarcb
.res_handle
= res
->res_handle
;
7045 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_ATA_PASSTHRU
;
7046 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_LINK_DESC
;
7047 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_NO_ULEN_CHK
;
7048 ipr_cmd
->dma_use_sg
= qc
->n_elem
;
7051 ipr_build_ata_ioadl64(ipr_cmd
, qc
);
7053 ipr_build_ata_ioadl(ipr_cmd
, qc
);
7055 regs
->flags
|= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION
;
7056 ipr_copy_sata_tf(regs
, &qc
->tf
);
7057 memcpy(ioarcb
->cmd_pkt
.cdb
, qc
->cdb
, IPR_MAX_CDB_LEN
);
7058 ipr_trc_hook(ipr_cmd
, IPR_TRACE_START
, IPR_GET_RES_PHYS_LOC(res
));
7060 switch (qc
->tf
.protocol
) {
7061 case ATA_PROT_NODATA
:
7066 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
7069 case ATAPI_PROT_PIO
:
7070 case ATAPI_PROT_NODATA
:
7071 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
7074 case ATAPI_PROT_DMA
:
7075 regs
->flags
|= IPR_ATA_FLAG_PACKET_CMD
;
7076 regs
->flags
|= IPR_ATA_FLAG_XFER_TYPE_DMA
;
7081 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7082 return AC_ERR_INVALID
;
7085 ipr_send_command(ipr_cmd
);
7086 spin_unlock(&ipr_cmd
->hrrq
->_lock
);
7092 * ipr_qc_fill_rtf - Read result TF
7093 * @qc: ATA queued command
7098 static bool ipr_qc_fill_rtf(struct ata_queued_cmd
*qc
)
7100 struct ipr_sata_port
*sata_port
= qc
->ap
->private_data
;
7101 struct ipr_ioasa_gata
*g
= &sata_port
->ioasa
;
7102 struct ata_taskfile
*tf
= &qc
->result_tf
;
7104 tf
->feature
= g
->error
;
7105 tf
->nsect
= g
->nsect
;
7109 tf
->device
= g
->device
;
7110 tf
->command
= g
->status
;
7111 tf
->hob_nsect
= g
->hob_nsect
;
7112 tf
->hob_lbal
= g
->hob_lbal
;
7113 tf
->hob_lbam
= g
->hob_lbam
;
7114 tf
->hob_lbah
= g
->hob_lbah
;
7119 static struct ata_port_operations ipr_sata_ops
= {
7120 .phy_reset
= ipr_ata_phy_reset
,
7121 .hardreset
= ipr_sata_reset
,
7122 .post_internal_cmd
= ipr_ata_post_internal
,
7123 .qc_prep
= ata_noop_qc_prep
,
7124 .qc_defer
= ipr_qc_defer
,
7125 .qc_issue
= ipr_qc_issue
,
7126 .qc_fill_rtf
= ipr_qc_fill_rtf
,
7127 .port_start
= ata_sas_port_start
,
7128 .port_stop
= ata_sas_port_stop
7131 static struct ata_port_info sata_port_info
= {
7132 .flags
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
|
7134 .pio_mask
= ATA_PIO4_ONLY
,
7135 .mwdma_mask
= ATA_MWDMA2
,
7136 .udma_mask
= ATA_UDMA6
,
7137 .port_ops
= &ipr_sata_ops
7140 #ifdef CONFIG_PPC_PSERIES
7141 static const u16 ipr_blocked_processors
[] = {
7153 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7154 * @ioa_cfg: ioa cfg struct
7156 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7157 * certain pSeries hardware. This function determines if the given
7158 * adapter is in one of these confgurations or not.
7161 * 1 if adapter is not supported / 0 if adapter is supported
7163 static int ipr_invalid_adapter(struct ipr_ioa_cfg
*ioa_cfg
)
7167 if ((ioa_cfg
->type
== 0x5702) && (ioa_cfg
->pdev
->revision
< 4)) {
7168 for (i
= 0; i
< ARRAY_SIZE(ipr_blocked_processors
); i
++) {
7169 if (pvr_version_is(ipr_blocked_processors
[i
]))
7176 #define ipr_invalid_adapter(ioa_cfg) 0
7180 * ipr_ioa_bringdown_done - IOA bring down completion.
7181 * @ipr_cmd: ipr command struct
7183 * This function processes the completion of an adapter bring down.
7184 * It wakes any reset sleepers.
7189 static int ipr_ioa_bringdown_done(struct ipr_cmnd
*ipr_cmd
)
7191 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7195 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
7197 ioa_cfg
->scsi_unblock
= 1;
7198 schedule_work(&ioa_cfg
->work_q
);
7201 ioa_cfg
->in_reset_reload
= 0;
7202 ioa_cfg
->reset_retries
= 0;
7203 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
7204 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
7205 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
7206 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
7210 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7211 wake_up_all(&ioa_cfg
->reset_wait_q
);
7214 return IPR_RC_JOB_RETURN
;
7218 * ipr_ioa_reset_done - IOA reset completion.
7219 * @ipr_cmd: ipr command struct
7221 * This function processes the completion of an adapter reset.
7222 * It schedules any necessary mid-layer add/removes and
7223 * wakes any reset sleepers.
7228 static int ipr_ioa_reset_done(struct ipr_cmnd
*ipr_cmd
)
7230 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7231 struct ipr_resource_entry
*res
;
7235 ioa_cfg
->in_reset_reload
= 0;
7236 for (j
= 0; j
< ioa_cfg
->hrrq_num
; j
++) {
7237 spin_lock(&ioa_cfg
->hrrq
[j
]._lock
);
7238 ioa_cfg
->hrrq
[j
].allow_cmds
= 1;
7239 spin_unlock(&ioa_cfg
->hrrq
[j
]._lock
);
7242 ioa_cfg
->reset_cmd
= NULL
;
7243 ioa_cfg
->doorbell
|= IPR_RUNTIME_RESET
;
7245 list_for_each_entry(res
, &ioa_cfg
->used_res_q
, queue
) {
7246 if (res
->add_to_ml
|| res
->del_from_ml
) {
7251 schedule_work(&ioa_cfg
->work_q
);
7253 for (j
= 0; j
< IPR_NUM_HCAMS
; j
++) {
7254 list_del_init(&ioa_cfg
->hostrcb
[j
]->queue
);
7255 if (j
< IPR_NUM_LOG_HCAMS
)
7256 ipr_send_hcam(ioa_cfg
,
7257 IPR_HCAM_CDB_OP_CODE_LOG_DATA
,
7258 ioa_cfg
->hostrcb
[j
]);
7260 ipr_send_hcam(ioa_cfg
,
7261 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE
,
7262 ioa_cfg
->hostrcb
[j
]);
7265 scsi_report_bus_reset(ioa_cfg
->host
, IPR_VSET_BUS
);
7266 dev_info(&ioa_cfg
->pdev
->dev
, "IOA initialized.\n");
7268 ioa_cfg
->reset_retries
= 0;
7269 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7270 wake_up_all(&ioa_cfg
->reset_wait_q
);
7272 ioa_cfg
->scsi_unblock
= 1;
7273 schedule_work(&ioa_cfg
->work_q
);
7275 return IPR_RC_JOB_RETURN
;
7279 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7280 * @supported_dev: supported device struct
7281 * @vpids: vendor product id struct
7286 static void ipr_set_sup_dev_dflt(struct ipr_supported_device
*supported_dev
,
7287 struct ipr_std_inq_vpids
*vpids
)
7289 memset(supported_dev
, 0, sizeof(struct ipr_supported_device
));
7290 memcpy(&supported_dev
->vpids
, vpids
, sizeof(struct ipr_std_inq_vpids
));
7291 supported_dev
->num_records
= 1;
7292 supported_dev
->data_length
=
7293 cpu_to_be16(sizeof(struct ipr_supported_device
));
7294 supported_dev
->reserved
= 0;
7298 * ipr_set_supported_devs - Send Set Supported Devices for a device
7299 * @ipr_cmd: ipr command struct
7301 * This function sends a Set Supported Devices to the adapter
7304 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7306 static int ipr_set_supported_devs(struct ipr_cmnd
*ipr_cmd
)
7308 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7309 struct ipr_supported_device
*supp_dev
= &ioa_cfg
->vpd_cbs
->supp_dev
;
7310 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7311 struct ipr_resource_entry
*res
= ipr_cmd
->u
.res
;
7313 ipr_cmd
->job_step
= ipr_ioa_reset_done
;
7315 list_for_each_entry_continue(res
, &ioa_cfg
->used_res_q
, queue
) {
7316 if (!ipr_is_scsi_disk(res
))
7319 ipr_cmd
->u
.res
= res
;
7320 ipr_set_sup_dev_dflt(supp_dev
, &res
->std_inq_data
.vpids
);
7322 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7323 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7324 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7326 ioarcb
->cmd_pkt
.cdb
[0] = IPR_SET_SUPPORTED_DEVICES
;
7327 ioarcb
->cmd_pkt
.cdb
[1] = IPR_SET_ALL_SUPPORTED_DEVICES
;
7328 ioarcb
->cmd_pkt
.cdb
[7] = (sizeof(struct ipr_supported_device
) >> 8) & 0xff;
7329 ioarcb
->cmd_pkt
.cdb
[8] = sizeof(struct ipr_supported_device
) & 0xff;
7331 ipr_init_ioadl(ipr_cmd
,
7332 ioa_cfg
->vpd_cbs_dma
+
7333 offsetof(struct ipr_misc_cbs
, supp_dev
),
7334 sizeof(struct ipr_supported_device
),
7335 IPR_IOADL_FLAGS_WRITE_LAST
);
7337 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7338 IPR_SET_SUP_DEVICE_TIMEOUT
);
7340 if (!ioa_cfg
->sis64
)
7341 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7343 return IPR_RC_JOB_RETURN
;
7347 return IPR_RC_JOB_CONTINUE
;
7351 * ipr_get_mode_page - Locate specified mode page
7352 * @mode_pages: mode page buffer
7353 * @page_code: page code to find
7354 * @len: minimum required length for mode page
7357 * pointer to mode page / NULL on failure
7359 static void *ipr_get_mode_page(struct ipr_mode_pages
*mode_pages
,
7360 u32 page_code
, u32 len
)
7362 struct ipr_mode_page_hdr
*mode_hdr
;
7366 if (!mode_pages
|| (mode_pages
->hdr
.length
== 0))
7369 length
= (mode_pages
->hdr
.length
+ 1) - 4 - mode_pages
->hdr
.block_desc_len
;
7370 mode_hdr
= (struct ipr_mode_page_hdr
*)
7371 (mode_pages
->data
+ mode_pages
->hdr
.block_desc_len
);
7374 if (IPR_GET_MODE_PAGE_CODE(mode_hdr
) == page_code
) {
7375 if (mode_hdr
->page_length
>= (len
- sizeof(struct ipr_mode_page_hdr
)))
7379 page_length
= (sizeof(struct ipr_mode_page_hdr
) +
7380 mode_hdr
->page_length
);
7381 length
-= page_length
;
7382 mode_hdr
= (struct ipr_mode_page_hdr
*)
7383 ((unsigned long)mode_hdr
+ page_length
);
7390 * ipr_check_term_power - Check for term power errors
7391 * @ioa_cfg: ioa config struct
7392 * @mode_pages: IOAFP mode pages buffer
7394 * Check the IOAFP's mode page 28 for term power errors
7399 static void ipr_check_term_power(struct ipr_ioa_cfg
*ioa_cfg
,
7400 struct ipr_mode_pages
*mode_pages
)
7404 struct ipr_dev_bus_entry
*bus
;
7405 struct ipr_mode_page28
*mode_page
;
7407 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7408 sizeof(struct ipr_mode_page28
));
7410 entry_length
= mode_page
->entry_length
;
7412 bus
= mode_page
->bus
;
7414 for (i
= 0; i
< mode_page
->num_entries
; i
++) {
7415 if (bus
->flags
& IPR_SCSI_ATTR_NO_TERM_PWR
) {
7416 dev_err(&ioa_cfg
->pdev
->dev
,
7417 "Term power is absent on scsi bus %d\n",
7421 bus
= (struct ipr_dev_bus_entry
*)((char *)bus
+ entry_length
);
7426 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7427 * @ioa_cfg: ioa config struct
7429 * Looks through the config table checking for SES devices. If
7430 * the SES device is in the SES table indicating a maximum SCSI
7431 * bus speed, the speed is limited for the bus.
7436 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg
*ioa_cfg
)
7441 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
7442 max_xfer_rate
= ipr_get_max_scsi_speed(ioa_cfg
, i
,
7443 ioa_cfg
->bus_attr
[i
].bus_width
);
7445 if (max_xfer_rate
< ioa_cfg
->bus_attr
[i
].max_xfer_rate
)
7446 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= max_xfer_rate
;
7451 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7452 * @ioa_cfg: ioa config struct
7453 * @mode_pages: mode page 28 buffer
7455 * Updates mode page 28 based on driver configuration
7460 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg
*ioa_cfg
,
7461 struct ipr_mode_pages
*mode_pages
)
7463 int i
, entry_length
;
7464 struct ipr_dev_bus_entry
*bus
;
7465 struct ipr_bus_attributes
*bus_attr
;
7466 struct ipr_mode_page28
*mode_page
;
7468 mode_page
= ipr_get_mode_page(mode_pages
, 0x28,
7469 sizeof(struct ipr_mode_page28
));
7471 entry_length
= mode_page
->entry_length
;
7473 /* Loop for each device bus entry */
7474 for (i
= 0, bus
= mode_page
->bus
;
7475 i
< mode_page
->num_entries
;
7476 i
++, bus
= (struct ipr_dev_bus_entry
*)((u8
*)bus
+ entry_length
)) {
7477 if (bus
->res_addr
.bus
> IPR_MAX_NUM_BUSES
) {
7478 dev_err(&ioa_cfg
->pdev
->dev
,
7479 "Invalid resource address reported: 0x%08X\n",
7480 IPR_GET_PHYS_LOC(bus
->res_addr
));
7484 bus_attr
= &ioa_cfg
->bus_attr
[i
];
7485 bus
->extended_reset_delay
= IPR_EXTENDED_RESET_DELAY
;
7486 bus
->bus_width
= bus_attr
->bus_width
;
7487 bus
->max_xfer_rate
= cpu_to_be32(bus_attr
->max_xfer_rate
);
7488 bus
->flags
&= ~IPR_SCSI_ATTR_QAS_MASK
;
7489 if (bus_attr
->qas_enabled
)
7490 bus
->flags
|= IPR_SCSI_ATTR_ENABLE_QAS
;
7492 bus
->flags
|= IPR_SCSI_ATTR_DISABLE_QAS
;
7497 * ipr_build_mode_select - Build a mode select command
7498 * @ipr_cmd: ipr command struct
7499 * @res_handle: resource handle to send command to
7500 * @parm: Byte 2 of Mode Sense command
7501 * @dma_addr: DMA buffer address
7502 * @xfer_len: data transfer length
7507 static void ipr_build_mode_select(struct ipr_cmnd
*ipr_cmd
,
7508 __be32 res_handle
, u8 parm
,
7509 dma_addr_t dma_addr
, u8 xfer_len
)
7511 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7513 ioarcb
->res_handle
= res_handle
;
7514 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7515 ioarcb
->cmd_pkt
.flags_hi
|= IPR_FLAGS_HI_WRITE_NOT_READ
;
7516 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SELECT
;
7517 ioarcb
->cmd_pkt
.cdb
[1] = parm
;
7518 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7520 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_WRITE_LAST
);
7524 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7525 * @ipr_cmd: ipr command struct
7527 * This function sets up the SCSI bus attributes and sends
7528 * a Mode Select for Page 28 to activate them.
7533 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd
*ipr_cmd
)
7535 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7536 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7540 ipr_scsi_bus_speed_limit(ioa_cfg
);
7541 ipr_check_term_power(ioa_cfg
, mode_pages
);
7542 ipr_modify_ioafp_mode_page_28(ioa_cfg
, mode_pages
);
7543 length
= mode_pages
->hdr
.length
+ 1;
7544 mode_pages
->hdr
.length
= 0;
7546 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7547 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7550 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7551 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7552 struct ipr_resource_entry
, queue
);
7553 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7556 return IPR_RC_JOB_RETURN
;
7560 * ipr_build_mode_sense - Builds a mode sense command
7561 * @ipr_cmd: ipr command struct
7562 * @res: resource entry struct
7563 * @parm: Byte 2 of mode sense command
7564 * @dma_addr: DMA address of mode sense buffer
7565 * @xfer_len: Size of DMA buffer
7570 static void ipr_build_mode_sense(struct ipr_cmnd
*ipr_cmd
,
7572 u8 parm
, dma_addr_t dma_addr
, u8 xfer_len
)
7574 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7576 ioarcb
->res_handle
= res_handle
;
7577 ioarcb
->cmd_pkt
.cdb
[0] = MODE_SENSE
;
7578 ioarcb
->cmd_pkt
.cdb
[2] = parm
;
7579 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7580 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7582 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7586 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7587 * @ipr_cmd: ipr command struct
7589 * This function handles the failure of an IOA bringup command.
7594 static int ipr_reset_cmd_failed(struct ipr_cmnd
*ipr_cmd
)
7596 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7597 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7599 dev_err(&ioa_cfg
->pdev
->dev
,
7600 "0x%02X failed with IOASC: 0x%08X\n",
7601 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0], ioasc
);
7603 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
7604 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
7605 return IPR_RC_JOB_RETURN
;
7609 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7610 * @ipr_cmd: ipr command struct
7612 * This function handles the failure of a Mode Sense to the IOAFP.
7613 * Some adapters do not handle all mode pages.
7616 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7618 static int ipr_reset_mode_sense_failed(struct ipr_cmnd
*ipr_cmd
)
7620 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7621 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7623 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7624 ipr_cmd
->job_step
= ipr_set_supported_devs
;
7625 ipr_cmd
->u
.res
= list_entry(ioa_cfg
->used_res_q
.next
,
7626 struct ipr_resource_entry
, queue
);
7627 return IPR_RC_JOB_CONTINUE
;
7630 return ipr_reset_cmd_failed(ipr_cmd
);
7634 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7635 * @ipr_cmd: ipr command struct
7637 * This function send a Page 28 mode sense to the IOA to
7638 * retrieve SCSI bus attributes.
7643 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd
*ipr_cmd
)
7645 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7648 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7649 0x28, ioa_cfg
->vpd_cbs_dma
+
7650 offsetof(struct ipr_misc_cbs
, mode_pages
),
7651 sizeof(struct ipr_mode_pages
));
7653 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page28
;
7654 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_failed
;
7656 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7659 return IPR_RC_JOB_RETURN
;
7663 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7664 * @ipr_cmd: ipr command struct
7666 * This function enables dual IOA RAID support if possible.
7671 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd
*ipr_cmd
)
7673 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7674 struct ipr_mode_pages
*mode_pages
= &ioa_cfg
->vpd_cbs
->mode_pages
;
7675 struct ipr_mode_page24
*mode_page
;
7679 mode_page
= ipr_get_mode_page(mode_pages
, 0x24,
7680 sizeof(struct ipr_mode_page24
));
7683 mode_page
->flags
|= IPR_ENABLE_DUAL_IOA_AF
;
7685 length
= mode_pages
->hdr
.length
+ 1;
7686 mode_pages
->hdr
.length
= 0;
7688 ipr_build_mode_select(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
), 0x11,
7689 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, mode_pages
),
7692 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7693 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7696 return IPR_RC_JOB_RETURN
;
7700 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7701 * @ipr_cmd: ipr command struct
7703 * This function handles the failure of a Mode Sense to the IOAFP.
7704 * Some adapters do not handle all mode pages.
7707 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7709 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd
*ipr_cmd
)
7711 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7713 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
) {
7714 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7715 return IPR_RC_JOB_CONTINUE
;
7718 return ipr_reset_cmd_failed(ipr_cmd
);
7722 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7723 * @ipr_cmd: ipr command struct
7725 * This function send a mode sense to the IOA to retrieve
7726 * the IOA Advanced Function Control mode page.
7731 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd
*ipr_cmd
)
7733 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7736 ipr_build_mode_sense(ipr_cmd
, cpu_to_be32(IPR_IOA_RES_HANDLE
),
7737 0x24, ioa_cfg
->vpd_cbs_dma
+
7738 offsetof(struct ipr_misc_cbs
, mode_pages
),
7739 sizeof(struct ipr_mode_pages
));
7741 ipr_cmd
->job_step
= ipr_ioafp_mode_select_page24
;
7742 ipr_cmd
->job_step_failed
= ipr_reset_mode_sense_page24_failed
;
7744 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7747 return IPR_RC_JOB_RETURN
;
7751 * ipr_init_res_table - Initialize the resource table
7752 * @ipr_cmd: ipr command struct
7754 * This function looks through the existing resource table, comparing
7755 * it with the config table. This function will take care of old/new
7756 * devices and schedule adding/removing them from the mid-layer
7760 * IPR_RC_JOB_CONTINUE
7762 static int ipr_init_res_table(struct ipr_cmnd
*ipr_cmd
)
7764 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7765 struct ipr_resource_entry
*res
, *temp
;
7766 struct ipr_config_table_entry_wrapper cfgtew
;
7767 int entries
, found
, flag
, i
;
7772 flag
= ioa_cfg
->u
.cfg_table64
->hdr64
.flags
;
7774 flag
= ioa_cfg
->u
.cfg_table
->hdr
.flags
;
7776 if (flag
& IPR_UCODE_DOWNLOAD_REQ
)
7777 dev_err(&ioa_cfg
->pdev
->dev
, "Microcode download required\n");
7779 list_for_each_entry_safe(res
, temp
, &ioa_cfg
->used_res_q
, queue
)
7780 list_move_tail(&res
->queue
, &old_res
);
7783 entries
= be16_to_cpu(ioa_cfg
->u
.cfg_table64
->hdr64
.num_entries
);
7785 entries
= ioa_cfg
->u
.cfg_table
->hdr
.num_entries
;
7787 for (i
= 0; i
< entries
; i
++) {
7789 cfgtew
.u
.cfgte64
= &ioa_cfg
->u
.cfg_table64
->dev
[i
];
7791 cfgtew
.u
.cfgte
= &ioa_cfg
->u
.cfg_table
->dev
[i
];
7794 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7795 if (ipr_is_same_device(res
, &cfgtew
)) {
7796 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7803 if (list_empty(&ioa_cfg
->free_res_q
)) {
7804 dev_err(&ioa_cfg
->pdev
->dev
, "Too many devices attached\n");
7809 res
= list_entry(ioa_cfg
->free_res_q
.next
,
7810 struct ipr_resource_entry
, queue
);
7811 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7812 ipr_init_res_entry(res
, &cfgtew
);
7814 } else if (res
->sdev
&& (ipr_is_vset_device(res
) || ipr_is_scsi_disk(res
)))
7815 res
->sdev
->allow_restart
= 1;
7818 ipr_update_res_entry(res
, &cfgtew
);
7821 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7823 res
->del_from_ml
= 1;
7824 res
->res_handle
= IPR_INVALID_RES_HANDLE
;
7825 list_move_tail(&res
->queue
, &ioa_cfg
->used_res_q
);
7829 list_for_each_entry_safe(res
, temp
, &old_res
, queue
) {
7830 ipr_clear_res_target(res
);
7831 list_move_tail(&res
->queue
, &ioa_cfg
->free_res_q
);
7834 if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
7835 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page24
;
7837 ipr_cmd
->job_step
= ipr_ioafp_mode_sense_page28
;
7840 return IPR_RC_JOB_CONTINUE
;
7844 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7845 * @ipr_cmd: ipr command struct
7847 * This function sends a Query IOA Configuration command
7848 * to the adapter to retrieve the IOA configuration table.
7853 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd
*ipr_cmd
)
7855 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7856 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7857 struct ipr_inquiry_page3
*ucode_vpd
= &ioa_cfg
->vpd_cbs
->page3_data
;
7858 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
7861 if (cap
->cap
& IPR_CAP_DUAL_IOA_RAID
)
7862 ioa_cfg
->dual_raid
= 1;
7863 dev_info(&ioa_cfg
->pdev
->dev
, "Adapter firmware version: %02X%02X%02X%02X\n",
7864 ucode_vpd
->major_release
, ucode_vpd
->card_type
,
7865 ucode_vpd
->minor_release
[0], ucode_vpd
->minor_release
[1]);
7866 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7867 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7869 ioarcb
->cmd_pkt
.cdb
[0] = IPR_QUERY_IOA_CONFIG
;
7870 ioarcb
->cmd_pkt
.cdb
[6] = (ioa_cfg
->cfg_table_size
>> 16) & 0xff;
7871 ioarcb
->cmd_pkt
.cdb
[7] = (ioa_cfg
->cfg_table_size
>> 8) & 0xff;
7872 ioarcb
->cmd_pkt
.cdb
[8] = ioa_cfg
->cfg_table_size
& 0xff;
7874 ipr_init_ioadl(ipr_cmd
, ioa_cfg
->cfg_table_dma
, ioa_cfg
->cfg_table_size
,
7875 IPR_IOADL_FLAGS_READ_LAST
);
7877 ipr_cmd
->job_step
= ipr_init_res_table
;
7879 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7882 return IPR_RC_JOB_RETURN
;
7885 static int ipr_ioa_service_action_failed(struct ipr_cmnd
*ipr_cmd
)
7887 u32 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
7889 if (ioasc
== IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT
)
7890 return IPR_RC_JOB_CONTINUE
;
7892 return ipr_reset_cmd_failed(ipr_cmd
);
7895 static void ipr_build_ioa_service_action(struct ipr_cmnd
*ipr_cmd
,
7896 __be32 res_handle
, u8 sa_code
)
7898 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7900 ioarcb
->res_handle
= res_handle
;
7901 ioarcb
->cmd_pkt
.cdb
[0] = IPR_IOA_SERVICE_ACTION
;
7902 ioarcb
->cmd_pkt
.cdb
[1] = sa_code
;
7903 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
7907 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7913 static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd
*ipr_cmd
)
7915 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7916 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
7917 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
7921 ipr_cmd
->job_step
= ipr_ioafp_query_ioa_cfg
;
7923 if (pageC4
->cache_cap
[0] & IPR_CAP_SYNC_CACHE
) {
7924 ipr_build_ioa_service_action(ipr_cmd
,
7925 cpu_to_be32(IPR_IOA_RES_HANDLE
),
7926 IPR_IOA_SA_CHANGE_CACHE_PARAMS
);
7928 ioarcb
->cmd_pkt
.cdb
[2] = 0x40;
7930 ipr_cmd
->job_step_failed
= ipr_ioa_service_action_failed
;
7931 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
7932 IPR_SET_SUP_DEVICE_TIMEOUT
);
7935 return IPR_RC_JOB_RETURN
;
7939 return IPR_RC_JOB_CONTINUE
;
7943 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7944 * @ipr_cmd: ipr command struct
7946 * This utility function sends an inquiry to the adapter.
7951 static void ipr_ioafp_inquiry(struct ipr_cmnd
*ipr_cmd
, u8 flags
, u8 page
,
7952 dma_addr_t dma_addr
, u8 xfer_len
)
7954 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
7957 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
7958 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
7960 ioarcb
->cmd_pkt
.cdb
[0] = INQUIRY
;
7961 ioarcb
->cmd_pkt
.cdb
[1] = flags
;
7962 ioarcb
->cmd_pkt
.cdb
[2] = page
;
7963 ioarcb
->cmd_pkt
.cdb
[4] = xfer_len
;
7965 ipr_init_ioadl(ipr_cmd
, dma_addr
, xfer_len
, IPR_IOADL_FLAGS_READ_LAST
);
7967 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, IPR_INTERNAL_TIMEOUT
);
7972 * ipr_inquiry_page_supported - Is the given inquiry page supported
7973 * @page0: inquiry page 0 buffer
7976 * This function determines if the specified inquiry page is supported.
7979 * 1 if page is supported / 0 if not
7981 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0
*page0
, u8 page
)
7985 for (i
= 0; i
< min_t(u8
, page0
->len
, IPR_INQUIRY_PAGE0_ENTRIES
); i
++)
7986 if (page0
->page
[i
] == page
)
7993 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7994 * @ipr_cmd: ipr command struct
7996 * This function sends a Page 0xC4 inquiry to the adapter
7997 * to retrieve software VPD information.
8000 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8002 static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd
*ipr_cmd
)
8004 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8005 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
8006 struct ipr_inquiry_pageC4
*pageC4
= &ioa_cfg
->vpd_cbs
->pageC4_data
;
8009 ipr_cmd
->job_step
= ipr_ioafp_set_caching_parameters
;
8010 memset(pageC4
, 0, sizeof(*pageC4
));
8012 if (ipr_inquiry_page_supported(page0
, 0xC4)) {
8013 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xC4,
8014 (ioa_cfg
->vpd_cbs_dma
8015 + offsetof(struct ipr_misc_cbs
,
8017 sizeof(struct ipr_inquiry_pageC4
));
8018 return IPR_RC_JOB_RETURN
;
8022 return IPR_RC_JOB_CONTINUE
;
8026 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8027 * @ipr_cmd: ipr command struct
8029 * This function sends a Page 0xD0 inquiry to the adapter
8030 * to retrieve adapter capabilities.
8033 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8035 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd
*ipr_cmd
)
8037 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8038 struct ipr_inquiry_page0
*page0
= &ioa_cfg
->vpd_cbs
->page0_data
;
8039 struct ipr_inquiry_cap
*cap
= &ioa_cfg
->vpd_cbs
->cap
;
8042 ipr_cmd
->job_step
= ipr_ioafp_pageC4_inquiry
;
8043 memset(cap
, 0, sizeof(*cap
));
8045 if (ipr_inquiry_page_supported(page0
, 0xD0)) {
8046 ipr_ioafp_inquiry(ipr_cmd
, 1, 0xD0,
8047 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, cap
),
8048 sizeof(struct ipr_inquiry_cap
));
8049 return IPR_RC_JOB_RETURN
;
8053 return IPR_RC_JOB_CONTINUE
;
8057 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8058 * @ipr_cmd: ipr command struct
8060 * This function sends a Page 3 inquiry to the adapter
8061 * to retrieve software VPD information.
8064 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8066 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd
*ipr_cmd
)
8068 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8072 ipr_cmd
->job_step
= ipr_ioafp_cap_inquiry
;
8074 ipr_ioafp_inquiry(ipr_cmd
, 1, 3,
8075 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page3_data
),
8076 sizeof(struct ipr_inquiry_page3
));
8079 return IPR_RC_JOB_RETURN
;
8083 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8084 * @ipr_cmd: ipr command struct
8086 * This function sends a Page 0 inquiry to the adapter
8087 * to retrieve supported inquiry pages.
8090 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8092 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd
*ipr_cmd
)
8094 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8099 /* Grab the type out of the VPD and store it away */
8100 memcpy(type
, ioa_cfg
->vpd_cbs
->ioa_vpd
.std_inq_data
.vpids
.product_id
, 4);
8102 ioa_cfg
->type
= simple_strtoul((char *)type
, NULL
, 16);
8104 if (ipr_invalid_adapter(ioa_cfg
)) {
8105 dev_err(&ioa_cfg
->pdev
->dev
,
8106 "Adapter not supported in this hardware configuration.\n");
8108 if (!ipr_testmode
) {
8109 ioa_cfg
->reset_retries
+= IPR_NUM_RESET_RELOAD_RETRIES
;
8110 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8111 list_add_tail(&ipr_cmd
->queue
,
8112 &ioa_cfg
->hrrq
->hrrq_free_q
);
8113 return IPR_RC_JOB_RETURN
;
8117 ipr_cmd
->job_step
= ipr_ioafp_page3_inquiry
;
8119 ipr_ioafp_inquiry(ipr_cmd
, 1, 0,
8120 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, page0_data
),
8121 sizeof(struct ipr_inquiry_page0
));
8124 return IPR_RC_JOB_RETURN
;
8128 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8129 * @ipr_cmd: ipr command struct
8131 * This function sends a standard inquiry to the adapter.
8136 static int ipr_ioafp_std_inquiry(struct ipr_cmnd
*ipr_cmd
)
8138 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8141 ipr_cmd
->job_step
= ipr_ioafp_page0_inquiry
;
8143 ipr_ioafp_inquiry(ipr_cmd
, 0, 0,
8144 ioa_cfg
->vpd_cbs_dma
+ offsetof(struct ipr_misc_cbs
, ioa_vpd
),
8145 sizeof(struct ipr_ioa_vpd
));
8148 return IPR_RC_JOB_RETURN
;
8152 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
8153 * @ipr_cmd: ipr command struct
8155 * This function send an Identify Host Request Response Queue
8156 * command to establish the HRRQ with the adapter.
8161 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd
*ipr_cmd
)
8163 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8164 struct ipr_ioarcb
*ioarcb
= &ipr_cmd
->ioarcb
;
8165 struct ipr_hrr_queue
*hrrq
;
8168 ipr_cmd
->job_step
= ipr_ioafp_std_inquiry
;
8169 if (ioa_cfg
->identify_hrrq_index
== 0)
8170 dev_info(&ioa_cfg
->pdev
->dev
, "Starting IOA initialization sequence.\n");
8172 if (ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
) {
8173 hrrq
= &ioa_cfg
->hrrq
[ioa_cfg
->identify_hrrq_index
];
8175 ioarcb
->cmd_pkt
.cdb
[0] = IPR_ID_HOST_RR_Q
;
8176 ioarcb
->res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
8178 ioarcb
->cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
8180 ioarcb
->cmd_pkt
.cdb
[1] = 0x1;
8182 if (ioa_cfg
->nvectors
== 1)
8183 ioarcb
->cmd_pkt
.cdb
[1] &= ~IPR_ID_HRRQ_SELE_ENABLE
;
8185 ioarcb
->cmd_pkt
.cdb
[1] |= IPR_ID_HRRQ_SELE_ENABLE
;
8187 ioarcb
->cmd_pkt
.cdb
[2] =
8188 ((u64
) hrrq
->host_rrq_dma
>> 24) & 0xff;
8189 ioarcb
->cmd_pkt
.cdb
[3] =
8190 ((u64
) hrrq
->host_rrq_dma
>> 16) & 0xff;
8191 ioarcb
->cmd_pkt
.cdb
[4] =
8192 ((u64
) hrrq
->host_rrq_dma
>> 8) & 0xff;
8193 ioarcb
->cmd_pkt
.cdb
[5] =
8194 ((u64
) hrrq
->host_rrq_dma
) & 0xff;
8195 ioarcb
->cmd_pkt
.cdb
[7] =
8196 ((sizeof(u32
) * hrrq
->size
) >> 8) & 0xff;
8197 ioarcb
->cmd_pkt
.cdb
[8] =
8198 (sizeof(u32
) * hrrq
->size
) & 0xff;
8200 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8201 ioarcb
->cmd_pkt
.cdb
[9] =
8202 ioa_cfg
->identify_hrrq_index
;
8204 if (ioa_cfg
->sis64
) {
8205 ioarcb
->cmd_pkt
.cdb
[10] =
8206 ((u64
) hrrq
->host_rrq_dma
>> 56) & 0xff;
8207 ioarcb
->cmd_pkt
.cdb
[11] =
8208 ((u64
) hrrq
->host_rrq_dma
>> 48) & 0xff;
8209 ioarcb
->cmd_pkt
.cdb
[12] =
8210 ((u64
) hrrq
->host_rrq_dma
>> 40) & 0xff;
8211 ioarcb
->cmd_pkt
.cdb
[13] =
8212 ((u64
) hrrq
->host_rrq_dma
>> 32) & 0xff;
8215 if (ioarcb
->cmd_pkt
.cdb
[1] & IPR_ID_HRRQ_SELE_ENABLE
)
8216 ioarcb
->cmd_pkt
.cdb
[14] =
8217 ioa_cfg
->identify_hrrq_index
;
8219 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
8220 IPR_INTERNAL_TIMEOUT
);
8222 if (++ioa_cfg
->identify_hrrq_index
< ioa_cfg
->hrrq_num
)
8223 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8226 return IPR_RC_JOB_RETURN
;
8230 return IPR_RC_JOB_CONTINUE
;
8234 * ipr_reset_timer_done - Adapter reset timer function
8235 * @ipr_cmd: ipr command struct
8237 * Description: This function is used in adapter reset processing
8238 * for timing events. If the reset_cmd pointer in the IOA
8239 * config struct is not this adapter's we are doing nested
8240 * resets and fail_all_ops will take care of freeing the
8246 static void ipr_reset_timer_done(struct timer_list
*t
)
8248 struct ipr_cmnd
*ipr_cmd
= from_timer(ipr_cmd
, t
, timer
);
8249 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8250 unsigned long lock_flags
= 0;
8252 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8254 if (ioa_cfg
->reset_cmd
== ipr_cmd
) {
8255 list_del(&ipr_cmd
->queue
);
8256 ipr_cmd
->done(ipr_cmd
);
8259 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8263 * ipr_reset_start_timer - Start a timer for adapter reset job
8264 * @ipr_cmd: ipr command struct
8265 * @timeout: timeout value
8267 * Description: This function is used in adapter reset processing
8268 * for timing events. If the reset_cmd pointer in the IOA
8269 * config struct is not this adapter's we are doing nested
8270 * resets and fail_all_ops will take care of freeing the
8276 static void ipr_reset_start_timer(struct ipr_cmnd
*ipr_cmd
,
8277 unsigned long timeout
)
8281 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8282 ipr_cmd
->done
= ipr_reset_ioa_job
;
8284 ipr_cmd
->timer
.expires
= jiffies
+ timeout
;
8285 ipr_cmd
->timer
.function
= ipr_reset_timer_done
;
8286 add_timer(&ipr_cmd
->timer
);
8290 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8291 * @ioa_cfg: ioa cfg struct
8296 static void ipr_init_ioa_mem(struct ipr_ioa_cfg
*ioa_cfg
)
8298 struct ipr_hrr_queue
*hrrq
;
8300 for_each_hrrq(hrrq
, ioa_cfg
) {
8301 spin_lock(&hrrq
->_lock
);
8302 memset(hrrq
->host_rrq
, 0, sizeof(u32
) * hrrq
->size
);
8304 /* Initialize Host RRQ pointers */
8305 hrrq
->hrrq_start
= hrrq
->host_rrq
;
8306 hrrq
->hrrq_end
= &hrrq
->host_rrq
[hrrq
->size
- 1];
8307 hrrq
->hrrq_curr
= hrrq
->hrrq_start
;
8308 hrrq
->toggle_bit
= 1;
8309 spin_unlock(&hrrq
->_lock
);
8313 ioa_cfg
->identify_hrrq_index
= 0;
8314 if (ioa_cfg
->hrrq_num
== 1)
8315 atomic_set(&ioa_cfg
->hrrq_index
, 0);
8317 atomic_set(&ioa_cfg
->hrrq_index
, 1);
8319 /* Zero out config table */
8320 memset(ioa_cfg
->u
.cfg_table
, 0, ioa_cfg
->cfg_table_size
);
8324 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8325 * @ipr_cmd: ipr command struct
8328 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8330 static int ipr_reset_next_stage(struct ipr_cmnd
*ipr_cmd
)
8332 unsigned long stage
, stage_time
;
8334 volatile u32 int_reg
;
8335 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8338 feedback
= readl(ioa_cfg
->regs
.init_feedback_reg
);
8339 stage
= feedback
& IPR_IPL_INIT_STAGE_MASK
;
8340 stage_time
= feedback
& IPR_IPL_INIT_STAGE_TIME_MASK
;
8342 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage
, stage_time
);
8344 /* sanity check the stage_time value */
8345 if (stage_time
== 0)
8346 stage_time
= IPR_IPL_INIT_DEFAULT_STAGE_TIME
;
8347 else if (stage_time
< IPR_IPL_INIT_MIN_STAGE_TIME
)
8348 stage_time
= IPR_IPL_INIT_MIN_STAGE_TIME
;
8349 else if (stage_time
> IPR_LONG_OPERATIONAL_TIMEOUT
)
8350 stage_time
= IPR_LONG_OPERATIONAL_TIMEOUT
;
8352 if (stage
== IPR_IPL_INIT_STAGE_UNKNOWN
) {
8353 writel(IPR_PCII_IPL_STAGE_CHANGE
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8354 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8355 stage_time
= ioa_cfg
->transop_timeout
;
8356 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8357 } else if (stage
== IPR_IPL_INIT_STAGE_TRANSOP
) {
8358 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8359 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8360 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8361 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8362 maskval
= (maskval
<< 32) | IPR_PCII_IOA_TRANS_TO_OPER
;
8363 writeq(maskval
, ioa_cfg
->regs
.set_interrupt_mask_reg
);
8364 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8365 return IPR_RC_JOB_CONTINUE
;
8369 ipr_cmd
->timer
.expires
= jiffies
+ stage_time
* HZ
;
8370 ipr_cmd
->timer
.function
= ipr_oper_timeout
;
8371 ipr_cmd
->done
= ipr_reset_ioa_job
;
8372 add_timer(&ipr_cmd
->timer
);
8374 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8376 return IPR_RC_JOB_RETURN
;
8380 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8381 * @ipr_cmd: ipr command struct
8383 * This function reinitializes some control blocks and
8384 * enables destructive diagnostics on the adapter.
8389 static int ipr_reset_enable_ioa(struct ipr_cmnd
*ipr_cmd
)
8391 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8392 volatile u32 int_reg
;
8393 volatile u64 maskval
;
8397 ipr_cmd
->job_step
= ipr_ioafp_identify_hrrq
;
8398 ipr_init_ioa_mem(ioa_cfg
);
8400 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
8401 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
8402 ioa_cfg
->hrrq
[i
].allow_interrupts
= 1;
8403 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
8405 if (ioa_cfg
->sis64
) {
8406 /* Set the adapter to the correct endian mode. */
8407 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8408 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8411 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
8413 if (int_reg
& IPR_PCII_IOA_TRANS_TO_OPER
) {
8414 writel((IPR_PCII_ERROR_INTERRUPTS
| IPR_PCII_HRRQ_UPDATED
),
8415 ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8416 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8417 return IPR_RC_JOB_CONTINUE
;
8420 /* Enable destructive diagnostics on IOA */
8421 writel(ioa_cfg
->doorbell
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8423 if (ioa_cfg
->sis64
) {
8424 maskval
= IPR_PCII_IPL_STAGE_CHANGE
;
8425 maskval
= (maskval
<< 32) | IPR_PCII_OPER_INTERRUPTS
;
8426 writeq(maskval
, ioa_cfg
->regs
.clr_interrupt_mask_reg
);
8428 writel(IPR_PCII_OPER_INTERRUPTS
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
8430 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
8432 dev_info(&ioa_cfg
->pdev
->dev
, "Initializing IOA.\n");
8434 if (ioa_cfg
->sis64
) {
8435 ipr_cmd
->job_step
= ipr_reset_next_stage
;
8436 return IPR_RC_JOB_CONTINUE
;
8439 ipr_cmd
->timer
.expires
= jiffies
+ (ioa_cfg
->transop_timeout
* HZ
);
8440 ipr_cmd
->timer
.function
= ipr_oper_timeout
;
8441 ipr_cmd
->done
= ipr_reset_ioa_job
;
8442 add_timer(&ipr_cmd
->timer
);
8443 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
8446 return IPR_RC_JOB_RETURN
;
8450 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8451 * @ipr_cmd: ipr command struct
8453 * This function is invoked when an adapter dump has run out
8454 * of processing time.
8457 * IPR_RC_JOB_CONTINUE
8459 static int ipr_reset_wait_for_dump(struct ipr_cmnd
*ipr_cmd
)
8461 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8463 if (ioa_cfg
->sdt_state
== GET_DUMP
)
8464 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8465 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
8466 ioa_cfg
->sdt_state
= ABORT_DUMP
;
8468 ioa_cfg
->dump_timeout
= 1;
8469 ipr_cmd
->job_step
= ipr_reset_alert
;
8471 return IPR_RC_JOB_CONTINUE
;
8475 * ipr_unit_check_no_data - Log a unit check/no data error log
8476 * @ioa_cfg: ioa config struct
8478 * Logs an error indicating the adapter unit checked, but for some
8479 * reason, we were unable to fetch the unit check buffer.
8484 static void ipr_unit_check_no_data(struct ipr_ioa_cfg
*ioa_cfg
)
8486 ioa_cfg
->errors_logged
++;
8487 dev_err(&ioa_cfg
->pdev
->dev
, "IOA unit check with no data\n");
8491 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8492 * @ioa_cfg: ioa config struct
8494 * Fetches the unit check buffer from the adapter by clocking the data
8495 * through the mailbox register.
8500 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg
*ioa_cfg
)
8502 unsigned long mailbox
;
8503 struct ipr_hostrcb
*hostrcb
;
8504 struct ipr_uc_sdt sdt
;
8508 mailbox
= readl(ioa_cfg
->ioa_mailbox
);
8510 if (!ioa_cfg
->sis64
&& !ipr_sdt_is_fmt2(mailbox
)) {
8511 ipr_unit_check_no_data(ioa_cfg
);
8515 memset(&sdt
, 0, sizeof(struct ipr_uc_sdt
));
8516 rc
= ipr_get_ldump_data_section(ioa_cfg
, mailbox
, (__be32
*) &sdt
,
8517 (sizeof(struct ipr_uc_sdt
)) / sizeof(__be32
));
8519 if (rc
|| !(sdt
.entry
[0].flags
& IPR_SDT_VALID_ENTRY
) ||
8520 ((be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT3_SDT_READY_TO_USE
) &&
8521 (be32_to_cpu(sdt
.hdr
.state
) != IPR_FMT2_SDT_READY_TO_USE
))) {
8522 ipr_unit_check_no_data(ioa_cfg
);
8526 /* Find length of the first sdt entry (UC buffer) */
8527 if (be32_to_cpu(sdt
.hdr
.state
) == IPR_FMT3_SDT_READY_TO_USE
)
8528 length
= be32_to_cpu(sdt
.entry
[0].end_token
);
8530 length
= (be32_to_cpu(sdt
.entry
[0].end_token
) -
8531 be32_to_cpu(sdt
.entry
[0].start_token
)) &
8532 IPR_FMT2_MBX_ADDR_MASK
;
8534 hostrcb
= list_entry(ioa_cfg
->hostrcb_free_q
.next
,
8535 struct ipr_hostrcb
, queue
);
8536 list_del_init(&hostrcb
->queue
);
8537 memset(&hostrcb
->hcam
, 0, sizeof(hostrcb
->hcam
));
8539 rc
= ipr_get_ldump_data_section(ioa_cfg
,
8540 be32_to_cpu(sdt
.entry
[0].start_token
),
8541 (__be32
*)&hostrcb
->hcam
,
8542 min(length
, (int)sizeof(hostrcb
->hcam
)) / sizeof(__be32
));
8545 ipr_handle_log_data(ioa_cfg
, hostrcb
);
8546 ioasc
= be32_to_cpu(hostrcb
->hcam
.u
.error
.fd_ioasc
);
8547 if (ioasc
== IPR_IOASC_NR_IOA_RESET_REQUIRED
&&
8548 ioa_cfg
->sdt_state
== GET_DUMP
)
8549 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
8551 ipr_unit_check_no_data(ioa_cfg
);
8553 list_add_tail(&hostrcb
->queue
, &ioa_cfg
->hostrcb_free_q
);
8557 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8558 * @ipr_cmd: ipr command struct
8560 * Description: This function will call to get the unit check buffer.
8565 static int ipr_reset_get_unit_check_job(struct ipr_cmnd
*ipr_cmd
)
8567 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8570 ioa_cfg
->ioa_unit_checked
= 0;
8571 ipr_get_unit_check_buffer(ioa_cfg
);
8572 ipr_cmd
->job_step
= ipr_reset_alert
;
8573 ipr_reset_start_timer(ipr_cmd
, 0);
8576 return IPR_RC_JOB_RETURN
;
8579 static int ipr_dump_mailbox_wait(struct ipr_cmnd
*ipr_cmd
)
8581 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8585 if (ioa_cfg
->sdt_state
!= GET_DUMP
)
8586 return IPR_RC_JOB_RETURN
;
8588 if (!ioa_cfg
->sis64
|| !ipr_cmd
->u
.time_left
||
8589 (readl(ioa_cfg
->regs
.sense_interrupt_reg
) &
8590 IPR_PCII_MAILBOX_STABLE
)) {
8592 if (!ipr_cmd
->u
.time_left
)
8593 dev_err(&ioa_cfg
->pdev
->dev
,
8594 "Timed out waiting for Mailbox register.\n");
8596 ioa_cfg
->sdt_state
= READ_DUMP
;
8597 ioa_cfg
->dump_timeout
= 0;
8599 ipr_reset_start_timer(ipr_cmd
, IPR_SIS64_DUMP_TIMEOUT
);
8601 ipr_reset_start_timer(ipr_cmd
, IPR_SIS32_DUMP_TIMEOUT
);
8602 ipr_cmd
->job_step
= ipr_reset_wait_for_dump
;
8603 schedule_work(&ioa_cfg
->work_q
);
8606 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8607 ipr_reset_start_timer(ipr_cmd
,
8608 IPR_CHECK_FOR_RESET_TIMEOUT
);
8612 return IPR_RC_JOB_RETURN
;
8616 * ipr_reset_restore_cfg_space - Restore PCI config space.
8617 * @ipr_cmd: ipr command struct
8619 * Description: This function restores the saved PCI config space of
8620 * the adapter, fails all outstanding ops back to the callers, and
8621 * fetches the dump/unit check if applicable to this reset.
8624 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8626 static int ipr_reset_restore_cfg_space(struct ipr_cmnd
*ipr_cmd
)
8628 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8632 ioa_cfg
->pdev
->state_saved
= true;
8633 pci_restore_state(ioa_cfg
->pdev
);
8635 if (ipr_set_pcix_cmd_reg(ioa_cfg
)) {
8636 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8637 return IPR_RC_JOB_CONTINUE
;
8640 ipr_fail_all_ops(ioa_cfg
);
8642 if (ioa_cfg
->sis64
) {
8643 /* Set the adapter to the correct endian mode. */
8644 writel(IPR_ENDIAN_SWAP_KEY
, ioa_cfg
->regs
.endian_swap_reg
);
8645 int_reg
= readl(ioa_cfg
->regs
.endian_swap_reg
);
8648 if (ioa_cfg
->ioa_unit_checked
) {
8649 if (ioa_cfg
->sis64
) {
8650 ipr_cmd
->job_step
= ipr_reset_get_unit_check_job
;
8651 ipr_reset_start_timer(ipr_cmd
, IPR_DUMP_DELAY_TIMEOUT
);
8652 return IPR_RC_JOB_RETURN
;
8654 ioa_cfg
->ioa_unit_checked
= 0;
8655 ipr_get_unit_check_buffer(ioa_cfg
);
8656 ipr_cmd
->job_step
= ipr_reset_alert
;
8657 ipr_reset_start_timer(ipr_cmd
, 0);
8658 return IPR_RC_JOB_RETURN
;
8662 if (ioa_cfg
->in_ioa_bringdown
) {
8663 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8664 } else if (ioa_cfg
->sdt_state
== GET_DUMP
) {
8665 ipr_cmd
->job_step
= ipr_dump_mailbox_wait
;
8666 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_MAILBOX
;
8668 ipr_cmd
->job_step
= ipr_reset_enable_ioa
;
8672 return IPR_RC_JOB_CONTINUE
;
8676 * ipr_reset_bist_done - BIST has completed on the adapter.
8677 * @ipr_cmd: ipr command struct
8679 * Description: Unblock config space and resume the reset process.
8682 * IPR_RC_JOB_CONTINUE
8684 static int ipr_reset_bist_done(struct ipr_cmnd
*ipr_cmd
)
8686 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8689 if (ioa_cfg
->cfg_locked
)
8690 pci_cfg_access_unlock(ioa_cfg
->pdev
);
8691 ioa_cfg
->cfg_locked
= 0;
8692 ipr_cmd
->job_step
= ipr_reset_restore_cfg_space
;
8694 return IPR_RC_JOB_CONTINUE
;
8698 * ipr_reset_start_bist - Run BIST on the adapter.
8699 * @ipr_cmd: ipr command struct
8701 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8704 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8706 static int ipr_reset_start_bist(struct ipr_cmnd
*ipr_cmd
)
8708 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8709 int rc
= PCIBIOS_SUCCESSFUL
;
8712 if (ioa_cfg
->ipr_chip
->bist_method
== IPR_MMIO
)
8713 writel(IPR_UPROCI_SIS64_START_BIST
,
8714 ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8716 rc
= pci_write_config_byte(ioa_cfg
->pdev
, PCI_BIST
, PCI_BIST_START
);
8718 if (rc
== PCIBIOS_SUCCESSFUL
) {
8719 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8720 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8721 rc
= IPR_RC_JOB_RETURN
;
8723 if (ioa_cfg
->cfg_locked
)
8724 pci_cfg_access_unlock(ipr_cmd
->ioa_cfg
->pdev
);
8725 ioa_cfg
->cfg_locked
= 0;
8726 ipr_cmd
->s
.ioasa
.hdr
.ioasc
= cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR
);
8727 rc
= IPR_RC_JOB_CONTINUE
;
8735 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8736 * @ipr_cmd: ipr command struct
8738 * Description: This clears PCI reset to the adapter and delays two seconds.
8743 static int ipr_reset_slot_reset_done(struct ipr_cmnd
*ipr_cmd
)
8746 ipr_cmd
->job_step
= ipr_reset_bist_done
;
8747 ipr_reset_start_timer(ipr_cmd
, IPR_WAIT_FOR_BIST_TIMEOUT
);
8749 return IPR_RC_JOB_RETURN
;
8753 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8754 * @work: work struct
8756 * Description: This pulses warm reset to a slot.
8759 static void ipr_reset_reset_work(struct work_struct
*work
)
8761 struct ipr_cmnd
*ipr_cmd
= container_of(work
, struct ipr_cmnd
, work
);
8762 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8763 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
8764 unsigned long lock_flags
= 0;
8767 pci_set_pcie_reset_state(pdev
, pcie_warm_reset
);
8768 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT
));
8769 pci_set_pcie_reset_state(pdev
, pcie_deassert_reset
);
8771 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
8772 if (ioa_cfg
->reset_cmd
== ipr_cmd
)
8773 ipr_reset_ioa_job(ipr_cmd
);
8774 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
8779 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8780 * @ipr_cmd: ipr command struct
8782 * Description: This asserts PCI reset to the adapter.
8787 static int ipr_reset_slot_reset(struct ipr_cmnd
*ipr_cmd
)
8789 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8792 INIT_WORK(&ipr_cmd
->work
, ipr_reset_reset_work
);
8793 queue_work(ioa_cfg
->reset_work_q
, &ipr_cmd
->work
);
8794 ipr_cmd
->job_step
= ipr_reset_slot_reset_done
;
8796 return IPR_RC_JOB_RETURN
;
8800 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8801 * @ipr_cmd: ipr command struct
8803 * Description: This attempts to block config access to the IOA.
8806 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8808 static int ipr_reset_block_config_access_wait(struct ipr_cmnd
*ipr_cmd
)
8810 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8811 int rc
= IPR_RC_JOB_CONTINUE
;
8813 if (pci_cfg_access_trylock(ioa_cfg
->pdev
)) {
8814 ioa_cfg
->cfg_locked
= 1;
8815 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8817 if (ipr_cmd
->u
.time_left
) {
8818 rc
= IPR_RC_JOB_RETURN
;
8819 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8820 ipr_reset_start_timer(ipr_cmd
,
8821 IPR_CHECK_FOR_RESET_TIMEOUT
);
8823 ipr_cmd
->job_step
= ioa_cfg
->reset
;
8824 dev_err(&ioa_cfg
->pdev
->dev
,
8825 "Timed out waiting to lock config access. Resetting anyway.\n");
8833 * ipr_reset_block_config_access - Block config access to the IOA
8834 * @ipr_cmd: ipr command struct
8836 * Description: This attempts to block config access to the IOA
8839 * IPR_RC_JOB_CONTINUE
8841 static int ipr_reset_block_config_access(struct ipr_cmnd
*ipr_cmd
)
8843 ipr_cmd
->ioa_cfg
->cfg_locked
= 0;
8844 ipr_cmd
->job_step
= ipr_reset_block_config_access_wait
;
8845 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8846 return IPR_RC_JOB_CONTINUE
;
8850 * ipr_reset_allowed - Query whether or not IOA can be reset
8851 * @ioa_cfg: ioa config struct
8854 * 0 if reset not allowed / non-zero if reset is allowed
8856 static int ipr_reset_allowed(struct ipr_ioa_cfg
*ioa_cfg
)
8858 volatile u32 temp_reg
;
8860 temp_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
8861 return ((temp_reg
& IPR_PCII_CRITICAL_OPERATION
) == 0);
8865 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8866 * @ipr_cmd: ipr command struct
8868 * Description: This function waits for adapter permission to run BIST,
8869 * then runs BIST. If the adapter does not give permission after a
8870 * reasonable time, we will reset the adapter anyway. The impact of
8871 * resetting the adapter without warning the adapter is the risk of
8872 * losing the persistent error log on the adapter. If the adapter is
8873 * reset while it is writing to the flash on the adapter, the flash
8874 * segment will have bad ECC and be zeroed.
8877 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8879 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd
*ipr_cmd
)
8881 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8882 int rc
= IPR_RC_JOB_RETURN
;
8884 if (!ipr_reset_allowed(ioa_cfg
) && ipr_cmd
->u
.time_left
) {
8885 ipr_cmd
->u
.time_left
-= IPR_CHECK_FOR_RESET_TIMEOUT
;
8886 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8888 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8889 rc
= IPR_RC_JOB_CONTINUE
;
8896 * ipr_reset_alert - Alert the adapter of a pending reset
8897 * @ipr_cmd: ipr command struct
8899 * Description: This function alerts the adapter that it will be reset.
8900 * If memory space is not currently enabled, proceed directly
8901 * to running BIST on the adapter. The timer must always be started
8902 * so we guarantee we do not run BIST from ipr_isr.
8907 static int ipr_reset_alert(struct ipr_cmnd
*ipr_cmd
)
8909 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8914 rc
= pci_read_config_word(ioa_cfg
->pdev
, PCI_COMMAND
, &cmd_reg
);
8916 if ((rc
== PCIBIOS_SUCCESSFUL
) && (cmd_reg
& PCI_COMMAND_MEMORY
)) {
8917 ipr_mask_and_clear_interrupts(ioa_cfg
, ~0);
8918 writel(IPR_UPROCI_RESET_ALERT
, ioa_cfg
->regs
.set_uproc_interrupt_reg32
);
8919 ipr_cmd
->job_step
= ipr_reset_wait_to_start_bist
;
8921 ipr_cmd
->job_step
= ipr_reset_block_config_access
;
8924 ipr_cmd
->u
.time_left
= IPR_WAIT_FOR_RESET_TIMEOUT
;
8925 ipr_reset_start_timer(ipr_cmd
, IPR_CHECK_FOR_RESET_TIMEOUT
);
8928 return IPR_RC_JOB_RETURN
;
8932 * ipr_reset_quiesce_done - Complete IOA disconnect
8933 * @ipr_cmd: ipr command struct
8935 * Description: Freeze the adapter to complete quiesce processing
8938 * IPR_RC_JOB_CONTINUE
8940 static int ipr_reset_quiesce_done(struct ipr_cmnd
*ipr_cmd
)
8942 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8945 ipr_cmd
->job_step
= ipr_ioa_bringdown_done
;
8946 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
8948 return IPR_RC_JOB_CONTINUE
;
8952 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8953 * @ipr_cmd: ipr command struct
8955 * Description: Ensure nothing is outstanding to the IOA and
8956 * proceed with IOA disconnect. Otherwise reset the IOA.
8959 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8961 static int ipr_reset_cancel_hcam_done(struct ipr_cmnd
*ipr_cmd
)
8963 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
8964 struct ipr_cmnd
*loop_cmd
;
8965 struct ipr_hrr_queue
*hrrq
;
8966 int rc
= IPR_RC_JOB_CONTINUE
;
8970 ipr_cmd
->job_step
= ipr_reset_quiesce_done
;
8972 for_each_hrrq(hrrq
, ioa_cfg
) {
8973 spin_lock(&hrrq
->_lock
);
8974 list_for_each_entry(loop_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
8976 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
8977 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
8978 rc
= IPR_RC_JOB_RETURN
;
8981 spin_unlock(&hrrq
->_lock
);
8992 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8993 * @ipr_cmd: ipr command struct
8995 * Description: Cancel any oustanding HCAMs to the IOA.
8998 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9000 static int ipr_reset_cancel_hcam(struct ipr_cmnd
*ipr_cmd
)
9002 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9003 int rc
= IPR_RC_JOB_CONTINUE
;
9004 struct ipr_cmd_pkt
*cmd_pkt
;
9005 struct ipr_cmnd
*hcam_cmd
;
9006 struct ipr_hrr_queue
*hrrq
= &ioa_cfg
->hrrq
[IPR_INIT_HRRQ
];
9009 ipr_cmd
->job_step
= ipr_reset_cancel_hcam_done
;
9011 if (!hrrq
->ioa_is_dead
) {
9012 if (!list_empty(&ioa_cfg
->hostrcb_pending_q
)) {
9013 list_for_each_entry(hcam_cmd
, &hrrq
->hrrq_pending_q
, queue
) {
9014 if (hcam_cmd
->ioarcb
.cmd_pkt
.cdb
[0] != IPR_HOST_CONTROLLED_ASYNC
)
9017 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9018 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9019 cmd_pkt
= &ipr_cmd
->ioarcb
.cmd_pkt
;
9020 cmd_pkt
->request_type
= IPR_RQTYPE_IOACMD
;
9021 cmd_pkt
->cdb
[0] = IPR_CANCEL_REQUEST
;
9022 cmd_pkt
->cdb
[1] = IPR_CANCEL_64BIT_IOARCB
;
9023 cmd_pkt
->cdb
[10] = ((u64
) hcam_cmd
->dma_addr
>> 56) & 0xff;
9024 cmd_pkt
->cdb
[11] = ((u64
) hcam_cmd
->dma_addr
>> 48) & 0xff;
9025 cmd_pkt
->cdb
[12] = ((u64
) hcam_cmd
->dma_addr
>> 40) & 0xff;
9026 cmd_pkt
->cdb
[13] = ((u64
) hcam_cmd
->dma_addr
>> 32) & 0xff;
9027 cmd_pkt
->cdb
[2] = ((u64
) hcam_cmd
->dma_addr
>> 24) & 0xff;
9028 cmd_pkt
->cdb
[3] = ((u64
) hcam_cmd
->dma_addr
>> 16) & 0xff;
9029 cmd_pkt
->cdb
[4] = ((u64
) hcam_cmd
->dma_addr
>> 8) & 0xff;
9030 cmd_pkt
->cdb
[5] = ((u64
) hcam_cmd
->dma_addr
) & 0xff;
9032 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
9033 IPR_CANCEL_TIMEOUT
);
9035 rc
= IPR_RC_JOB_RETURN
;
9036 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
9041 ipr_cmd
->job_step
= ipr_reset_alert
;
9048 * ipr_reset_ucode_download_done - Microcode download completion
9049 * @ipr_cmd: ipr command struct
9051 * Description: This function unmaps the microcode download buffer.
9054 * IPR_RC_JOB_CONTINUE
9056 static int ipr_reset_ucode_download_done(struct ipr_cmnd
*ipr_cmd
)
9058 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9059 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
9061 dma_unmap_sg(&ioa_cfg
->pdev
->dev
, sglist
->scatterlist
,
9062 sglist
->num_sg
, DMA_TO_DEVICE
);
9064 ipr_cmd
->job_step
= ipr_reset_alert
;
9065 return IPR_RC_JOB_CONTINUE
;
9069 * ipr_reset_ucode_download - Download microcode to the adapter
9070 * @ipr_cmd: ipr command struct
9072 * Description: This function checks to see if it there is microcode
9073 * to download to the adapter. If there is, a download is performed.
9076 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9078 static int ipr_reset_ucode_download(struct ipr_cmnd
*ipr_cmd
)
9080 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9081 struct ipr_sglist
*sglist
= ioa_cfg
->ucode_sglist
;
9084 ipr_cmd
->job_step
= ipr_reset_alert
;
9087 return IPR_RC_JOB_CONTINUE
;
9089 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9090 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_SCSICDB
;
9091 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = WRITE_BUFFER
;
9092 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE
;
9093 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[6] = (sglist
->buffer_len
& 0xff0000) >> 16;
9094 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[7] = (sglist
->buffer_len
& 0x00ff00) >> 8;
9095 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[8] = sglist
->buffer_len
& 0x0000ff;
9098 ipr_build_ucode_ioadl64(ipr_cmd
, sglist
);
9100 ipr_build_ucode_ioadl(ipr_cmd
, sglist
);
9101 ipr_cmd
->job_step
= ipr_reset_ucode_download_done
;
9103 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
,
9104 IPR_WRITE_BUFFER_TIMEOUT
);
9107 return IPR_RC_JOB_RETURN
;
9111 * ipr_reset_shutdown_ioa - Shutdown the adapter
9112 * @ipr_cmd: ipr command struct
9114 * Description: This function issues an adapter shutdown of the
9115 * specified type to the specified adapter as part of the
9116 * adapter reset job.
9119 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9121 static int ipr_reset_shutdown_ioa(struct ipr_cmnd
*ipr_cmd
)
9123 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9124 enum ipr_shutdown_type shutdown_type
= ipr_cmd
->u
.shutdown_type
;
9125 unsigned long timeout
;
9126 int rc
= IPR_RC_JOB_CONTINUE
;
9129 if (shutdown_type
== IPR_SHUTDOWN_QUIESCE
)
9130 ipr_cmd
->job_step
= ipr_reset_cancel_hcam
;
9131 else if (shutdown_type
!= IPR_SHUTDOWN_NONE
&&
9132 !ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
) {
9133 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
9134 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
9135 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
9136 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = shutdown_type
;
9138 if (shutdown_type
== IPR_SHUTDOWN_NORMAL
)
9139 timeout
= IPR_SHUTDOWN_TIMEOUT
;
9140 else if (shutdown_type
== IPR_SHUTDOWN_PREPARE_FOR_NORMAL
)
9141 timeout
= IPR_INTERNAL_TIMEOUT
;
9142 else if (ioa_cfg
->dual_raid
&& ipr_dual_ioa_raid
)
9143 timeout
= IPR_DUAL_IOA_ABBR_SHUTDOWN_TO
;
9145 timeout
= IPR_ABBREV_SHUTDOWN_TIMEOUT
;
9147 ipr_do_req(ipr_cmd
, ipr_reset_ioa_job
, ipr_timeout
, timeout
);
9149 rc
= IPR_RC_JOB_RETURN
;
9150 ipr_cmd
->job_step
= ipr_reset_ucode_download
;
9152 ipr_cmd
->job_step
= ipr_reset_alert
;
9159 * ipr_reset_ioa_job - Adapter reset job
9160 * @ipr_cmd: ipr command struct
9162 * Description: This function is the job router for the adapter reset job.
9167 static void ipr_reset_ioa_job(struct ipr_cmnd
*ipr_cmd
)
9170 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9173 ioasc
= be32_to_cpu(ipr_cmd
->s
.ioasa
.hdr
.ioasc
);
9175 if (ioa_cfg
->reset_cmd
!= ipr_cmd
) {
9177 * We are doing nested adapter resets and this is
9178 * not the current reset job.
9180 list_add_tail(&ipr_cmd
->queue
,
9181 &ipr_cmd
->hrrq
->hrrq_free_q
);
9185 if (IPR_IOASC_SENSE_KEY(ioasc
)) {
9186 rc
= ipr_cmd
->job_step_failed(ipr_cmd
);
9187 if (rc
== IPR_RC_JOB_RETURN
)
9191 ipr_reinit_ipr_cmnd(ipr_cmd
);
9192 ipr_cmd
->job_step_failed
= ipr_reset_cmd_failed
;
9193 rc
= ipr_cmd
->job_step(ipr_cmd
);
9194 } while (rc
== IPR_RC_JOB_CONTINUE
);
9198 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9199 * @ioa_cfg: ioa config struct
9200 * @job_step: first job step of reset job
9201 * @shutdown_type: shutdown type
9203 * Description: This function will initiate the reset of the given adapter
9204 * starting at the selected job step.
9205 * If the caller needs to wait on the completion of the reset,
9206 * the caller must sleep on the reset_wait_q.
9211 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9212 int (*job_step
) (struct ipr_cmnd
*),
9213 enum ipr_shutdown_type shutdown_type
)
9215 struct ipr_cmnd
*ipr_cmd
;
9218 ioa_cfg
->in_reset_reload
= 1;
9219 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9220 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9221 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9222 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9225 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9226 ioa_cfg
->scsi_unblock
= 0;
9227 ioa_cfg
->scsi_blocked
= 1;
9228 scsi_block_requests(ioa_cfg
->host
);
9231 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
9232 ioa_cfg
->reset_cmd
= ipr_cmd
;
9233 ipr_cmd
->job_step
= job_step
;
9234 ipr_cmd
->u
.shutdown_type
= shutdown_type
;
9236 ipr_reset_ioa_job(ipr_cmd
);
9240 * ipr_initiate_ioa_reset - Initiate an adapter reset
9241 * @ioa_cfg: ioa config struct
9242 * @shutdown_type: shutdown type
9244 * Description: This function will initiate the reset of the given adapter.
9245 * If the caller needs to wait on the completion of the reset,
9246 * the caller must sleep on the reset_wait_q.
9251 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg
*ioa_cfg
,
9252 enum ipr_shutdown_type shutdown_type
)
9256 if (ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].ioa_is_dead
)
9259 if (ioa_cfg
->in_reset_reload
) {
9260 if (ioa_cfg
->sdt_state
== GET_DUMP
)
9261 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
9262 else if (ioa_cfg
->sdt_state
== READ_DUMP
)
9263 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9266 if (ioa_cfg
->reset_retries
++ >= IPR_NUM_RESET_RELOAD_RETRIES
) {
9267 dev_err(&ioa_cfg
->pdev
->dev
,
9268 "IOA taken offline - error recovery failed\n");
9270 ioa_cfg
->reset_retries
= 0;
9271 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9272 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9273 ioa_cfg
->hrrq
[i
].ioa_is_dead
= 1;
9274 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9278 if (ioa_cfg
->in_ioa_bringdown
) {
9279 ioa_cfg
->reset_cmd
= NULL
;
9280 ioa_cfg
->in_reset_reload
= 0;
9281 ipr_fail_all_ops(ioa_cfg
);
9282 wake_up_all(&ioa_cfg
->reset_wait_q
);
9284 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].removing_ioa
) {
9285 ioa_cfg
->scsi_unblock
= 1;
9286 schedule_work(&ioa_cfg
->work_q
);
9290 ioa_cfg
->in_ioa_bringdown
= 1;
9291 shutdown_type
= IPR_SHUTDOWN_NONE
;
9295 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_shutdown_ioa
,
9300 * ipr_reset_freeze - Hold off all I/O activity
9301 * @ipr_cmd: ipr command struct
9303 * Description: If the PCI slot is frozen, hold off all I/O
9304 * activity; then, as soon as the slot is available again,
9305 * initiate an adapter reset.
9307 static int ipr_reset_freeze(struct ipr_cmnd
*ipr_cmd
)
9309 struct ipr_ioa_cfg
*ioa_cfg
= ipr_cmd
->ioa_cfg
;
9312 /* Disallow new interrupts, avoid loop */
9313 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9314 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9315 ioa_cfg
->hrrq
[i
].allow_interrupts
= 0;
9316 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9319 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_pending_q
);
9320 ipr_cmd
->done
= ipr_reset_ioa_job
;
9321 return IPR_RC_JOB_RETURN
;
9325 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9326 * @pdev: PCI device struct
9328 * Description: This routine is called to tell us that the MMIO
9329 * access to the IOA has been restored
9331 static pci_ers_result_t
ipr_pci_mmio_enabled(struct pci_dev
*pdev
)
9333 unsigned long flags
= 0;
9334 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9336 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9337 if (!ioa_cfg
->probe_done
)
9338 pci_save_state(pdev
);
9339 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9340 return PCI_ERS_RESULT_NEED_RESET
;
9344 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9345 * @pdev: PCI device struct
9347 * Description: This routine is called to tell us that the PCI bus
9348 * is down. Can't do anything here, except put the device driver
9349 * into a holding pattern, waiting for the PCI bus to come back.
9351 static void ipr_pci_frozen(struct pci_dev
*pdev
)
9353 unsigned long flags
= 0;
9354 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9356 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9357 if (ioa_cfg
->probe_done
)
9358 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_freeze
, IPR_SHUTDOWN_NONE
);
9359 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9363 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9364 * @pdev: PCI device struct
9366 * Description: This routine is called by the pci error recovery
9367 * code after the PCI slot has been reset, just before we
9368 * should resume normal operations.
9370 static pci_ers_result_t
ipr_pci_slot_reset(struct pci_dev
*pdev
)
9372 unsigned long flags
= 0;
9373 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9375 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9376 if (ioa_cfg
->probe_done
) {
9377 if (ioa_cfg
->needs_warm_reset
)
9378 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9380 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_restore_cfg_space
,
9383 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9384 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9385 return PCI_ERS_RESULT_RECOVERED
;
9389 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9390 * @pdev: PCI device struct
9392 * Description: This routine is called when the PCI bus has
9393 * permanently failed.
9395 static void ipr_pci_perm_failure(struct pci_dev
*pdev
)
9397 unsigned long flags
= 0;
9398 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
9401 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
9402 if (ioa_cfg
->probe_done
) {
9403 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
9404 ioa_cfg
->sdt_state
= ABORT_DUMP
;
9405 ioa_cfg
->reset_retries
= IPR_NUM_RESET_RELOAD_RETRIES
- 1;
9406 ioa_cfg
->in_ioa_bringdown
= 1;
9407 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9408 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
9409 ioa_cfg
->hrrq
[i
].allow_cmds
= 0;
9410 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
9413 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9415 wake_up_all(&ioa_cfg
->eeh_wait_q
);
9416 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
9420 * ipr_pci_error_detected - Called when a PCI error is detected.
9421 * @pdev: PCI device struct
9422 * @state: PCI channel state
9424 * Description: Called when a PCI error is detected.
9427 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9429 static pci_ers_result_t
ipr_pci_error_detected(struct pci_dev
*pdev
,
9430 pci_channel_state_t state
)
9433 case pci_channel_io_frozen
:
9434 ipr_pci_frozen(pdev
);
9435 return PCI_ERS_RESULT_CAN_RECOVER
;
9436 case pci_channel_io_perm_failure
:
9437 ipr_pci_perm_failure(pdev
);
9438 return PCI_ERS_RESULT_DISCONNECT
;
9443 return PCI_ERS_RESULT_NEED_RESET
;
9447 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9448 * @ioa_cfg: ioa cfg struct
9450 * Description: This is the second phase of adapter initialization
9451 * This function takes care of initilizing the adapter to the point
9452 * where it can accept new commands.
9455 * 0 on success / -EIO on failure
9457 static int ipr_probe_ioa_part2(struct ipr_ioa_cfg
*ioa_cfg
)
9460 unsigned long host_lock_flags
= 0;
9463 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9464 dev_dbg(&ioa_cfg
->pdev
->dev
, "ioa_cfg adx: 0x%p\n", ioa_cfg
);
9465 ioa_cfg
->probe_done
= 1;
9466 if (ioa_cfg
->needs_hard_reset
) {
9467 ioa_cfg
->needs_hard_reset
= 0;
9468 ipr_initiate_ioa_reset(ioa_cfg
, IPR_SHUTDOWN_NONE
);
9470 _ipr_initiate_ioa_reset(ioa_cfg
, ipr_reset_enable_ioa
,
9472 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
9479 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9480 * @ioa_cfg: ioa config struct
9485 static void ipr_free_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9489 if (ioa_cfg
->ipr_cmnd_list
) {
9490 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9491 if (ioa_cfg
->ipr_cmnd_list
[i
])
9492 dma_pool_free(ioa_cfg
->ipr_cmd_pool
,
9493 ioa_cfg
->ipr_cmnd_list
[i
],
9494 ioa_cfg
->ipr_cmnd_list_dma
[i
]);
9496 ioa_cfg
->ipr_cmnd_list
[i
] = NULL
;
9500 if (ioa_cfg
->ipr_cmd_pool
)
9501 dma_pool_destroy(ioa_cfg
->ipr_cmd_pool
);
9503 kfree(ioa_cfg
->ipr_cmnd_list
);
9504 kfree(ioa_cfg
->ipr_cmnd_list_dma
);
9505 ioa_cfg
->ipr_cmnd_list
= NULL
;
9506 ioa_cfg
->ipr_cmnd_list_dma
= NULL
;
9507 ioa_cfg
->ipr_cmd_pool
= NULL
;
9511 * ipr_free_mem - Frees memory allocated for an adapter
9512 * @ioa_cfg: ioa cfg struct
9517 static void ipr_free_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9521 kfree(ioa_cfg
->res_entries
);
9522 dma_free_coherent(&ioa_cfg
->pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9523 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9524 ipr_free_cmd_blks(ioa_cfg
);
9526 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++)
9527 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9528 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9529 ioa_cfg
->hrrq
[i
].host_rrq
,
9530 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9532 dma_free_coherent(&ioa_cfg
->pdev
->dev
, ioa_cfg
->cfg_table_size
,
9533 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9535 for (i
= 0; i
< IPR_MAX_HCAMS
; i
++) {
9536 dma_free_coherent(&ioa_cfg
->pdev
->dev
,
9537 sizeof(struct ipr_hostrcb
),
9538 ioa_cfg
->hostrcb
[i
],
9539 ioa_cfg
->hostrcb_dma
[i
]);
9542 ipr_free_dump(ioa_cfg
);
9543 kfree(ioa_cfg
->trace
);
9547 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9548 * @ioa_cfg: ipr cfg struct
9550 * This function frees all allocated IRQs for the
9551 * specified adapter.
9556 static void ipr_free_irqs(struct ipr_ioa_cfg
*ioa_cfg
)
9558 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9561 for (i
= 0; i
< ioa_cfg
->nvectors
; i
++)
9562 free_irq(pci_irq_vector(pdev
, i
), &ioa_cfg
->hrrq
[i
]);
9563 pci_free_irq_vectors(pdev
);
9567 * ipr_free_all_resources - Free all allocated resources for an adapter.
9568 * @ipr_cmd: ipr command struct
9570 * This function frees all allocated resources for the
9571 * specified adapter.
9576 static void ipr_free_all_resources(struct ipr_ioa_cfg
*ioa_cfg
)
9578 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9581 ipr_free_irqs(ioa_cfg
);
9582 if (ioa_cfg
->reset_work_q
)
9583 destroy_workqueue(ioa_cfg
->reset_work_q
);
9584 iounmap(ioa_cfg
->hdw_dma_regs
);
9585 pci_release_regions(pdev
);
9586 ipr_free_mem(ioa_cfg
);
9587 scsi_host_put(ioa_cfg
->host
);
9588 pci_disable_device(pdev
);
9593 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9594 * @ioa_cfg: ioa config struct
9597 * 0 on success / -ENOMEM on allocation failure
9599 static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg
*ioa_cfg
)
9601 struct ipr_cmnd
*ipr_cmd
;
9602 struct ipr_ioarcb
*ioarcb
;
9603 dma_addr_t dma_addr
;
9604 int i
, entries_each_hrrq
, hrrq_id
= 0;
9606 ioa_cfg
->ipr_cmd_pool
= dma_pool_create(IPR_NAME
, &ioa_cfg
->pdev
->dev
,
9607 sizeof(struct ipr_cmnd
), 512, 0);
9609 if (!ioa_cfg
->ipr_cmd_pool
)
9612 ioa_cfg
->ipr_cmnd_list
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(struct ipr_cmnd
*), GFP_KERNEL
);
9613 ioa_cfg
->ipr_cmnd_list_dma
= kcalloc(IPR_NUM_CMD_BLKS
, sizeof(dma_addr_t
), GFP_KERNEL
);
9615 if (!ioa_cfg
->ipr_cmnd_list
|| !ioa_cfg
->ipr_cmnd_list_dma
) {
9616 ipr_free_cmd_blks(ioa_cfg
);
9620 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9621 if (ioa_cfg
->hrrq_num
> 1) {
9623 entries_each_hrrq
= IPR_NUM_INTERNAL_CMD_BLKS
;
9624 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9625 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9626 (entries_each_hrrq
- 1);
9629 IPR_NUM_BASE_CMD_BLKS
/
9630 (ioa_cfg
->hrrq_num
- 1);
9631 ioa_cfg
->hrrq
[i
].min_cmd_id
=
9632 IPR_NUM_INTERNAL_CMD_BLKS
+
9633 (i
- 1) * entries_each_hrrq
;
9634 ioa_cfg
->hrrq
[i
].max_cmd_id
=
9635 (IPR_NUM_INTERNAL_CMD_BLKS
+
9636 i
* entries_each_hrrq
- 1);
9639 entries_each_hrrq
= IPR_NUM_CMD_BLKS
;
9640 ioa_cfg
->hrrq
[i
].min_cmd_id
= 0;
9641 ioa_cfg
->hrrq
[i
].max_cmd_id
= (entries_each_hrrq
- 1);
9643 ioa_cfg
->hrrq
[i
].size
= entries_each_hrrq
;
9646 BUG_ON(ioa_cfg
->hrrq_num
== 0);
9648 i
= IPR_NUM_CMD_BLKS
-
9649 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
- 1;
9651 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].size
+= i
;
9652 ioa_cfg
->hrrq
[ioa_cfg
->hrrq_num
- 1].max_cmd_id
+= i
;
9655 for (i
= 0; i
< IPR_NUM_CMD_BLKS
; i
++) {
9656 ipr_cmd
= dma_pool_zalloc(ioa_cfg
->ipr_cmd_pool
,
9657 GFP_KERNEL
, &dma_addr
);
9660 ipr_free_cmd_blks(ioa_cfg
);
9664 ioa_cfg
->ipr_cmnd_list
[i
] = ipr_cmd
;
9665 ioa_cfg
->ipr_cmnd_list_dma
[i
] = dma_addr
;
9667 ioarcb
= &ipr_cmd
->ioarcb
;
9668 ipr_cmd
->dma_addr
= dma_addr
;
9670 ioarcb
->a
.ioarcb_host_pci_addr64
= cpu_to_be64(dma_addr
);
9672 ioarcb
->a
.ioarcb_host_pci_addr
= cpu_to_be32(dma_addr
);
9674 ioarcb
->host_response_handle
= cpu_to_be32(i
<< 2);
9675 if (ioa_cfg
->sis64
) {
9676 ioarcb
->u
.sis64_addr_data
.data_ioadl_addr
=
9677 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl64
));
9678 ioarcb
->u
.sis64_addr_data
.ioasa_host_pci_addr
=
9679 cpu_to_be64(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa64
));
9681 ioarcb
->write_ioadl_addr
=
9682 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, i
.ioadl
));
9683 ioarcb
->read_ioadl_addr
= ioarcb
->write_ioadl_addr
;
9684 ioarcb
->ioasa_host_pci_addr
=
9685 cpu_to_be32(dma_addr
+ offsetof(struct ipr_cmnd
, s
.ioasa
));
9687 ioarcb
->ioasa_len
= cpu_to_be16(sizeof(struct ipr_ioasa
));
9688 ipr_cmd
->cmd_index
= i
;
9689 ipr_cmd
->ioa_cfg
= ioa_cfg
;
9690 ipr_cmd
->sense_buffer_dma
= dma_addr
+
9691 offsetof(struct ipr_cmnd
, sense_buffer
);
9693 ipr_cmd
->ioarcb
.cmd_pkt
.hrrq_id
= hrrq_id
;
9694 ipr_cmd
->hrrq
= &ioa_cfg
->hrrq
[hrrq_id
];
9695 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
9696 if (i
>= ioa_cfg
->hrrq
[hrrq_id
].max_cmd_id
)
9704 * ipr_alloc_mem - Allocate memory for an adapter
9705 * @ioa_cfg: ioa config struct
9708 * 0 on success / non-zero for error
9710 static int ipr_alloc_mem(struct ipr_ioa_cfg
*ioa_cfg
)
9712 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9713 int i
, rc
= -ENOMEM
;
9716 ioa_cfg
->res_entries
= kcalloc(ioa_cfg
->max_devs_supported
,
9717 sizeof(struct ipr_resource_entry
),
9720 if (!ioa_cfg
->res_entries
)
9723 for (i
= 0; i
< ioa_cfg
->max_devs_supported
; i
++) {
9724 list_add_tail(&ioa_cfg
->res_entries
[i
].queue
, &ioa_cfg
->free_res_q
);
9725 ioa_cfg
->res_entries
[i
].ioa_cfg
= ioa_cfg
;
9728 ioa_cfg
->vpd_cbs
= dma_alloc_coherent(&pdev
->dev
,
9729 sizeof(struct ipr_misc_cbs
),
9730 &ioa_cfg
->vpd_cbs_dma
,
9733 if (!ioa_cfg
->vpd_cbs
)
9734 goto out_free_res_entries
;
9736 if (ipr_alloc_cmd_blks(ioa_cfg
))
9737 goto out_free_vpd_cbs
;
9739 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9740 ioa_cfg
->hrrq
[i
].host_rrq
= dma_alloc_coherent(&pdev
->dev
,
9741 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9742 &ioa_cfg
->hrrq
[i
].host_rrq_dma
,
9745 if (!ioa_cfg
->hrrq
[i
].host_rrq
) {
9747 dma_free_coherent(&pdev
->dev
,
9748 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9749 ioa_cfg
->hrrq
[i
].host_rrq
,
9750 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9751 goto out_ipr_free_cmd_blocks
;
9753 ioa_cfg
->hrrq
[i
].ioa_cfg
= ioa_cfg
;
9756 ioa_cfg
->u
.cfg_table
= dma_alloc_coherent(&pdev
->dev
,
9757 ioa_cfg
->cfg_table_size
,
9758 &ioa_cfg
->cfg_table_dma
,
9761 if (!ioa_cfg
->u
.cfg_table
)
9762 goto out_free_host_rrq
;
9764 for (i
= 0; i
< IPR_MAX_HCAMS
; i
++) {
9765 ioa_cfg
->hostrcb
[i
] = dma_alloc_coherent(&pdev
->dev
,
9766 sizeof(struct ipr_hostrcb
),
9767 &ioa_cfg
->hostrcb_dma
[i
],
9770 if (!ioa_cfg
->hostrcb
[i
])
9771 goto out_free_hostrcb_dma
;
9773 ioa_cfg
->hostrcb
[i
]->hostrcb_dma
=
9774 ioa_cfg
->hostrcb_dma
[i
] + offsetof(struct ipr_hostrcb
, hcam
);
9775 ioa_cfg
->hostrcb
[i
]->ioa_cfg
= ioa_cfg
;
9776 list_add_tail(&ioa_cfg
->hostrcb
[i
]->queue
, &ioa_cfg
->hostrcb_free_q
);
9779 ioa_cfg
->trace
= kcalloc(IPR_NUM_TRACE_ENTRIES
,
9780 sizeof(struct ipr_trace_entry
),
9783 if (!ioa_cfg
->trace
)
9784 goto out_free_hostrcb_dma
;
9791 out_free_hostrcb_dma
:
9793 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_hostrcb
),
9794 ioa_cfg
->hostrcb
[i
],
9795 ioa_cfg
->hostrcb_dma
[i
]);
9797 dma_free_coherent(&pdev
->dev
, ioa_cfg
->cfg_table_size
,
9798 ioa_cfg
->u
.cfg_table
, ioa_cfg
->cfg_table_dma
);
9800 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
9801 dma_free_coherent(&pdev
->dev
,
9802 sizeof(u32
) * ioa_cfg
->hrrq
[i
].size
,
9803 ioa_cfg
->hrrq
[i
].host_rrq
,
9804 ioa_cfg
->hrrq
[i
].host_rrq_dma
);
9806 out_ipr_free_cmd_blocks
:
9807 ipr_free_cmd_blks(ioa_cfg
);
9809 dma_free_coherent(&pdev
->dev
, sizeof(struct ipr_misc_cbs
),
9810 ioa_cfg
->vpd_cbs
, ioa_cfg
->vpd_cbs_dma
);
9811 out_free_res_entries
:
9812 kfree(ioa_cfg
->res_entries
);
9817 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9818 * @ioa_cfg: ioa config struct
9823 static void ipr_initialize_bus_attr(struct ipr_ioa_cfg
*ioa_cfg
)
9827 for (i
= 0; i
< IPR_MAX_NUM_BUSES
; i
++) {
9828 ioa_cfg
->bus_attr
[i
].bus
= i
;
9829 ioa_cfg
->bus_attr
[i
].qas_enabled
= 0;
9830 ioa_cfg
->bus_attr
[i
].bus_width
= IPR_DEFAULT_BUS_WIDTH
;
9831 if (ipr_max_speed
< ARRAY_SIZE(ipr_max_bus_speeds
))
9832 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= ipr_max_bus_speeds
[ipr_max_speed
];
9834 ioa_cfg
->bus_attr
[i
].max_xfer_rate
= IPR_U160_SCSI_RATE
;
9839 * ipr_init_regs - Initialize IOA registers
9840 * @ioa_cfg: ioa config struct
9845 static void ipr_init_regs(struct ipr_ioa_cfg
*ioa_cfg
)
9847 const struct ipr_interrupt_offsets
*p
;
9848 struct ipr_interrupts
*t
;
9851 p
= &ioa_cfg
->chip_cfg
->regs
;
9853 base
= ioa_cfg
->hdw_dma_regs
;
9855 t
->set_interrupt_mask_reg
= base
+ p
->set_interrupt_mask_reg
;
9856 t
->clr_interrupt_mask_reg
= base
+ p
->clr_interrupt_mask_reg
;
9857 t
->clr_interrupt_mask_reg32
= base
+ p
->clr_interrupt_mask_reg32
;
9858 t
->sense_interrupt_mask_reg
= base
+ p
->sense_interrupt_mask_reg
;
9859 t
->sense_interrupt_mask_reg32
= base
+ p
->sense_interrupt_mask_reg32
;
9860 t
->clr_interrupt_reg
= base
+ p
->clr_interrupt_reg
;
9861 t
->clr_interrupt_reg32
= base
+ p
->clr_interrupt_reg32
;
9862 t
->sense_interrupt_reg
= base
+ p
->sense_interrupt_reg
;
9863 t
->sense_interrupt_reg32
= base
+ p
->sense_interrupt_reg32
;
9864 t
->ioarrin_reg
= base
+ p
->ioarrin_reg
;
9865 t
->sense_uproc_interrupt_reg
= base
+ p
->sense_uproc_interrupt_reg
;
9866 t
->sense_uproc_interrupt_reg32
= base
+ p
->sense_uproc_interrupt_reg32
;
9867 t
->set_uproc_interrupt_reg
= base
+ p
->set_uproc_interrupt_reg
;
9868 t
->set_uproc_interrupt_reg32
= base
+ p
->set_uproc_interrupt_reg32
;
9869 t
->clr_uproc_interrupt_reg
= base
+ p
->clr_uproc_interrupt_reg
;
9870 t
->clr_uproc_interrupt_reg32
= base
+ p
->clr_uproc_interrupt_reg32
;
9872 if (ioa_cfg
->sis64
) {
9873 t
->init_feedback_reg
= base
+ p
->init_feedback_reg
;
9874 t
->dump_addr_reg
= base
+ p
->dump_addr_reg
;
9875 t
->dump_data_reg
= base
+ p
->dump_data_reg
;
9876 t
->endian_swap_reg
= base
+ p
->endian_swap_reg
;
9881 * ipr_init_ioa_cfg - Initialize IOA config struct
9882 * @ioa_cfg: ioa config struct
9883 * @host: scsi host struct
9884 * @pdev: PCI dev struct
9889 static void ipr_init_ioa_cfg(struct ipr_ioa_cfg
*ioa_cfg
,
9890 struct Scsi_Host
*host
, struct pci_dev
*pdev
)
9894 ioa_cfg
->host
= host
;
9895 ioa_cfg
->pdev
= pdev
;
9896 ioa_cfg
->log_level
= ipr_log_level
;
9897 ioa_cfg
->doorbell
= IPR_DOORBELL
;
9898 sprintf(ioa_cfg
->eye_catcher
, IPR_EYECATCHER
);
9899 sprintf(ioa_cfg
->trace_start
, IPR_TRACE_START_LABEL
);
9900 sprintf(ioa_cfg
->cfg_table_start
, IPR_CFG_TBL_START
);
9901 sprintf(ioa_cfg
->resource_table_label
, IPR_RES_TABLE_LABEL
);
9902 sprintf(ioa_cfg
->ipr_hcam_label
, IPR_HCAM_LABEL
);
9903 sprintf(ioa_cfg
->ipr_cmd_label
, IPR_CMD_LABEL
);
9905 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_free_q
);
9906 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_pending_q
);
9907 INIT_LIST_HEAD(&ioa_cfg
->hostrcb_report_q
);
9908 INIT_LIST_HEAD(&ioa_cfg
->free_res_q
);
9909 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
9910 INIT_WORK(&ioa_cfg
->work_q
, ipr_worker_thread
);
9911 init_waitqueue_head(&ioa_cfg
->reset_wait_q
);
9912 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
9913 init_waitqueue_head(&ioa_cfg
->eeh_wait_q
);
9914 ioa_cfg
->sdt_state
= INACTIVE
;
9916 ipr_initialize_bus_attr(ioa_cfg
);
9917 ioa_cfg
->max_devs_supported
= ipr_max_devs
;
9919 if (ioa_cfg
->sis64
) {
9920 host
->max_id
= IPR_MAX_SIS64_TARGETS_PER_BUS
;
9921 host
->max_lun
= IPR_MAX_SIS64_LUNS_PER_TARGET
;
9922 if (ipr_max_devs
> IPR_MAX_SIS64_DEVS
)
9923 ioa_cfg
->max_devs_supported
= IPR_MAX_SIS64_DEVS
;
9924 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr64
)
9925 + ((sizeof(struct ipr_config_table_entry64
)
9926 * ioa_cfg
->max_devs_supported
)));
9928 host
->max_id
= IPR_MAX_NUM_TARGETS_PER_BUS
;
9929 host
->max_lun
= IPR_MAX_NUM_LUNS_PER_TARGET
;
9930 if (ipr_max_devs
> IPR_MAX_PHYSICAL_DEVS
)
9931 ioa_cfg
->max_devs_supported
= IPR_MAX_PHYSICAL_DEVS
;
9932 ioa_cfg
->cfg_table_size
= (sizeof(struct ipr_config_table_hdr
)
9933 + ((sizeof(struct ipr_config_table_entry
)
9934 * ioa_cfg
->max_devs_supported
)));
9937 host
->max_channel
= IPR_VSET_BUS
;
9938 host
->unique_id
= host
->host_no
;
9939 host
->max_cmd_len
= IPR_MAX_CDB_LEN
;
9940 host
->can_queue
= ioa_cfg
->max_cmds
;
9941 pci_set_drvdata(pdev
, ioa_cfg
);
9943 for (i
= 0; i
< ARRAY_SIZE(ioa_cfg
->hrrq
); i
++) {
9944 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_free_q
);
9945 INIT_LIST_HEAD(&ioa_cfg
->hrrq
[i
].hrrq_pending_q
);
9946 spin_lock_init(&ioa_cfg
->hrrq
[i
]._lock
);
9948 ioa_cfg
->hrrq
[i
].lock
= ioa_cfg
->host
->host_lock
;
9950 ioa_cfg
->hrrq
[i
].lock
= &ioa_cfg
->hrrq
[i
]._lock
;
9955 * ipr_get_chip_info - Find adapter chip information
9956 * @dev_id: PCI device id struct
9959 * ptr to chip information on success / NULL on failure
9961 static const struct ipr_chip_t
*
9962 ipr_get_chip_info(const struct pci_device_id
*dev_id
)
9966 for (i
= 0; i
< ARRAY_SIZE(ipr_chip
); i
++)
9967 if (ipr_chip
[i
].vendor
== dev_id
->vendor
&&
9968 ipr_chip
[i
].device
== dev_id
->device
)
9969 return &ipr_chip
[i
];
9974 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9976 * @ioa_cfg: ioa config struct
9981 static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg
*ioa_cfg
)
9983 struct pci_dev
*pdev
= ioa_cfg
->pdev
;
9985 if (pci_channel_offline(pdev
)) {
9986 wait_event_timeout(ioa_cfg
->eeh_wait_q
,
9987 !pci_channel_offline(pdev
),
9988 IPR_PCI_ERROR_RECOVERY_TIMEOUT
);
9989 pci_restore_state(pdev
);
9993 static void name_msi_vectors(struct ipr_ioa_cfg
*ioa_cfg
)
9995 int vec_idx
, n
= sizeof(ioa_cfg
->vectors_info
[0].desc
) - 1;
9997 for (vec_idx
= 0; vec_idx
< ioa_cfg
->nvectors
; vec_idx
++) {
9998 snprintf(ioa_cfg
->vectors_info
[vec_idx
].desc
, n
,
9999 "host%d-%d", ioa_cfg
->host
->host_no
, vec_idx
);
10000 ioa_cfg
->vectors_info
[vec_idx
].
10001 desc
[strlen(ioa_cfg
->vectors_info
[vec_idx
].desc
)] = 0;
10005 static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg
*ioa_cfg
,
10006 struct pci_dev
*pdev
)
10010 for (i
= 1; i
< ioa_cfg
->nvectors
; i
++) {
10011 rc
= request_irq(pci_irq_vector(pdev
, i
),
10014 ioa_cfg
->vectors_info
[i
].desc
,
10015 &ioa_cfg
->hrrq
[i
]);
10018 free_irq(pci_irq_vector(pdev
, i
),
10019 &ioa_cfg
->hrrq
[i
]);
10027 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10028 * @pdev: PCI device struct
10030 * Description: Simply set the msi_received flag to 1 indicating that
10031 * Message Signaled Interrupts are supported.
10034 * 0 on success / non-zero on failure
10036 static irqreturn_t
ipr_test_intr(int irq
, void *devp
)
10038 struct ipr_ioa_cfg
*ioa_cfg
= (struct ipr_ioa_cfg
*)devp
;
10039 unsigned long lock_flags
= 0;
10040 irqreturn_t rc
= IRQ_HANDLED
;
10042 dev_info(&ioa_cfg
->pdev
->dev
, "Received IRQ : %d\n", irq
);
10043 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10045 ioa_cfg
->msi_received
= 1;
10046 wake_up(&ioa_cfg
->msi_wait_q
);
10048 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10053 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10054 * @pdev: PCI device struct
10056 * Description: This routine sets up and initiates a test interrupt to determine
10057 * if the interrupt is received via the ipr_test_intr() service routine.
10058 * If the tests fails, the driver will fall back to LSI.
10061 * 0 on success / non-zero on failure
10063 static int ipr_test_msi(struct ipr_ioa_cfg
*ioa_cfg
, struct pci_dev
*pdev
)
10066 volatile u32 int_reg
;
10067 unsigned long lock_flags
= 0;
10068 int irq
= pci_irq_vector(pdev
, 0);
10072 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10073 init_waitqueue_head(&ioa_cfg
->msi_wait_q
);
10074 ioa_cfg
->msi_received
= 0;
10075 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10076 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.clr_interrupt_mask_reg32
);
10077 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg
);
10078 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10080 rc
= request_irq(irq
, ipr_test_intr
, 0, IPR_NAME
, ioa_cfg
);
10082 dev_err(&pdev
->dev
, "Can not assign irq %d\n", irq
);
10084 } else if (ipr_debug
)
10085 dev_info(&pdev
->dev
, "IRQ assigned: %d\n", irq
);
10087 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE
, ioa_cfg
->regs
.sense_interrupt_reg32
);
10088 int_reg
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10089 wait_event_timeout(ioa_cfg
->msi_wait_q
, ioa_cfg
->msi_received
, HZ
);
10090 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10091 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10093 if (!ioa_cfg
->msi_received
) {
10094 /* MSI test failed */
10095 dev_info(&pdev
->dev
, "MSI test failed. Falling back to LSI.\n");
10097 } else if (ipr_debug
)
10098 dev_info(&pdev
->dev
, "MSI test succeeded.\n");
10100 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10102 free_irq(irq
, ioa_cfg
);
10109 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
10110 * @pdev: PCI device struct
10111 * @dev_id: PCI device id struct
10114 * 0 on success / non-zero on failure
10116 static int ipr_probe_ioa(struct pci_dev
*pdev
,
10117 const struct pci_device_id
*dev_id
)
10119 struct ipr_ioa_cfg
*ioa_cfg
;
10120 struct Scsi_Host
*host
;
10121 unsigned long ipr_regs_pci
;
10122 void __iomem
*ipr_regs
;
10123 int rc
= PCIBIOS_SUCCESSFUL
;
10124 volatile u32 mask
, uproc
, interrupts
;
10125 unsigned long lock_flags
, driver_lock_flags
;
10126 unsigned int irq_flag
;
10130 dev_info(&pdev
->dev
, "Found IOA with IRQ: %d\n", pdev
->irq
);
10131 host
= scsi_host_alloc(&driver_template
, sizeof(*ioa_cfg
));
10134 dev_err(&pdev
->dev
, "call to scsi_host_alloc failed!\n");
10139 ioa_cfg
= (struct ipr_ioa_cfg
*)host
->hostdata
;
10140 memset(ioa_cfg
, 0, sizeof(struct ipr_ioa_cfg
));
10141 ata_host_init(&ioa_cfg
->ata_host
, &pdev
->dev
, &ipr_sata_ops
);
10143 ioa_cfg
->ipr_chip
= ipr_get_chip_info(dev_id
);
10145 if (!ioa_cfg
->ipr_chip
) {
10146 dev_err(&pdev
->dev
, "Unknown adapter chipset 0x%04X 0x%04X\n",
10147 dev_id
->vendor
, dev_id
->device
);
10148 goto out_scsi_host_put
;
10151 /* set SIS 32 or SIS 64 */
10152 ioa_cfg
->sis64
= ioa_cfg
->ipr_chip
->sis_type
== IPR_SIS64
? 1 : 0;
10153 ioa_cfg
->chip_cfg
= ioa_cfg
->ipr_chip
->cfg
;
10154 ioa_cfg
->clear_isr
= ioa_cfg
->chip_cfg
->clear_isr
;
10155 ioa_cfg
->max_cmds
= ioa_cfg
->chip_cfg
->max_cmds
;
10157 if (ipr_transop_timeout
)
10158 ioa_cfg
->transop_timeout
= ipr_transop_timeout
;
10159 else if (dev_id
->driver_data
& IPR_USE_LONG_TRANSOP_TIMEOUT
)
10160 ioa_cfg
->transop_timeout
= IPR_LONG_OPERATIONAL_TIMEOUT
;
10162 ioa_cfg
->transop_timeout
= IPR_OPERATIONAL_TIMEOUT
;
10164 ioa_cfg
->revid
= pdev
->revision
;
10166 ipr_init_ioa_cfg(ioa_cfg
, host
, pdev
);
10168 ipr_regs_pci
= pci_resource_start(pdev
, 0);
10170 rc
= pci_request_regions(pdev
, IPR_NAME
);
10172 dev_err(&pdev
->dev
,
10173 "Couldn't register memory range of registers\n");
10174 goto out_scsi_host_put
;
10177 rc
= pci_enable_device(pdev
);
10179 if (rc
|| pci_channel_offline(pdev
)) {
10180 if (pci_channel_offline(pdev
)) {
10181 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10182 rc
= pci_enable_device(pdev
);
10186 dev_err(&pdev
->dev
, "Cannot enable adapter\n");
10187 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10188 goto out_release_regions
;
10192 ipr_regs
= pci_ioremap_bar(pdev
, 0);
10195 dev_err(&pdev
->dev
,
10196 "Couldn't map memory range of registers\n");
10201 ioa_cfg
->hdw_dma_regs
= ipr_regs
;
10202 ioa_cfg
->hdw_dma_regs_pci
= ipr_regs_pci
;
10203 ioa_cfg
->ioa_mailbox
= ioa_cfg
->chip_cfg
->mailbox
+ ipr_regs
;
10205 ipr_init_regs(ioa_cfg
);
10207 if (ioa_cfg
->sis64
) {
10208 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
10210 dev_dbg(&pdev
->dev
, "Failed to set 64 bit DMA mask\n");
10211 rc
= dma_set_mask_and_coherent(&pdev
->dev
,
10215 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
10218 dev_err(&pdev
->dev
, "Failed to set DMA mask\n");
10219 goto cleanup_nomem
;
10222 rc
= pci_write_config_byte(pdev
, PCI_CACHE_LINE_SIZE
,
10223 ioa_cfg
->chip_cfg
->cache_line_size
);
10225 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10226 dev_err(&pdev
->dev
, "Write of cache line size failed\n");
10227 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10229 goto cleanup_nomem
;
10232 /* Issue MMIO read to ensure card is not in EEH */
10233 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg
);
10234 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10236 if (ipr_number_of_msix
> IPR_MAX_MSIX_VECTORS
) {
10237 dev_err(&pdev
->dev
, "The max number of MSIX is %d\n",
10238 IPR_MAX_MSIX_VECTORS
);
10239 ipr_number_of_msix
= IPR_MAX_MSIX_VECTORS
;
10242 irq_flag
= PCI_IRQ_LEGACY
;
10243 if (ioa_cfg
->ipr_chip
->has_msi
)
10244 irq_flag
|= PCI_IRQ_MSI
| PCI_IRQ_MSIX
;
10245 rc
= pci_alloc_irq_vectors(pdev
, 1, ipr_number_of_msix
, irq_flag
);
10247 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10248 goto cleanup_nomem
;
10250 ioa_cfg
->nvectors
= rc
;
10252 if (!pdev
->msi_enabled
&& !pdev
->msix_enabled
)
10253 ioa_cfg
->clear_isr
= 1;
10255 pci_set_master(pdev
);
10257 if (pci_channel_offline(pdev
)) {
10258 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10259 pci_set_master(pdev
);
10260 if (pci_channel_offline(pdev
)) {
10262 goto out_msi_disable
;
10266 if (pdev
->msi_enabled
|| pdev
->msix_enabled
) {
10267 rc
= ipr_test_msi(ioa_cfg
, pdev
);
10270 dev_info(&pdev
->dev
,
10271 "Request for %d MSI%ss succeeded.", ioa_cfg
->nvectors
,
10272 pdev
->msix_enabled
? "-X" : "");
10275 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10276 pci_free_irq_vectors(pdev
);
10278 ioa_cfg
->nvectors
= 1;
10279 ioa_cfg
->clear_isr
= 1;
10282 goto out_msi_disable
;
10286 ioa_cfg
->hrrq_num
= min3(ioa_cfg
->nvectors
,
10287 (unsigned int)num_online_cpus(),
10288 (unsigned int)IPR_MAX_HRRQ_NUM
);
10290 if ((rc
= ipr_save_pcix_cmd_reg(ioa_cfg
)))
10291 goto out_msi_disable
;
10293 if ((rc
= ipr_set_pcix_cmd_reg(ioa_cfg
)))
10294 goto out_msi_disable
;
10296 rc
= ipr_alloc_mem(ioa_cfg
);
10298 dev_err(&pdev
->dev
,
10299 "Couldn't allocate enough memory for device driver!\n");
10300 goto out_msi_disable
;
10303 /* Save away PCI config space for use following IOA reset */
10304 rc
= pci_save_state(pdev
);
10306 if (rc
!= PCIBIOS_SUCCESSFUL
) {
10307 dev_err(&pdev
->dev
, "Failed to save PCI config space\n");
10309 goto cleanup_nolog
;
10313 * If HRRQ updated interrupt is not masked, or reset alert is set,
10314 * the card is in an unknown state and needs a hard reset
10316 mask
= readl(ioa_cfg
->regs
.sense_interrupt_mask_reg32
);
10317 interrupts
= readl(ioa_cfg
->regs
.sense_interrupt_reg32
);
10318 uproc
= readl(ioa_cfg
->regs
.sense_uproc_interrupt_reg32
);
10319 if ((mask
& IPR_PCII_HRRQ_UPDATED
) == 0 || (uproc
& IPR_UPROCI_RESET_ALERT
))
10320 ioa_cfg
->needs_hard_reset
= 1;
10321 if ((interrupts
& IPR_PCII_ERROR_INTERRUPTS
) || reset_devices
)
10322 ioa_cfg
->needs_hard_reset
= 1;
10323 if (interrupts
& IPR_PCII_IOA_UNIT_CHECKED
)
10324 ioa_cfg
->ioa_unit_checked
= 1;
10326 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10327 ipr_mask_and_clear_interrupts(ioa_cfg
, ~IPR_PCII_IOA_TRANS_TO_OPER
);
10328 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10330 if (pdev
->msi_enabled
|| pdev
->msix_enabled
) {
10331 name_msi_vectors(ioa_cfg
);
10332 rc
= request_irq(pci_irq_vector(pdev
, 0), ipr_isr
, 0,
10333 ioa_cfg
->vectors_info
[0].desc
,
10334 &ioa_cfg
->hrrq
[0]);
10336 rc
= ipr_request_other_msi_irqs(ioa_cfg
, pdev
);
10338 rc
= request_irq(pdev
->irq
, ipr_isr
,
10340 IPR_NAME
, &ioa_cfg
->hrrq
[0]);
10343 dev_err(&pdev
->dev
, "Couldn't register IRQ %d! rc=%d\n",
10345 goto cleanup_nolog
;
10348 if ((dev_id
->driver_data
& IPR_USE_PCI_WARM_RESET
) ||
10349 (dev_id
->device
== PCI_DEVICE_ID_IBM_OBSIDIAN_E
&& !ioa_cfg
->revid
)) {
10350 ioa_cfg
->needs_warm_reset
= 1;
10351 ioa_cfg
->reset
= ipr_reset_slot_reset
;
10353 ioa_cfg
->reset_work_q
= alloc_ordered_workqueue("ipr_reset_%d",
10354 WQ_MEM_RECLAIM
, host
->host_no
);
10356 if (!ioa_cfg
->reset_work_q
) {
10357 dev_err(&pdev
->dev
, "Couldn't register reset workqueue\n");
10362 ioa_cfg
->reset
= ipr_reset_start_bist
;
10364 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10365 list_add_tail(&ioa_cfg
->queue
, &ipr_ioa_head
);
10366 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10373 ipr_free_irqs(ioa_cfg
);
10375 ipr_free_mem(ioa_cfg
);
10377 ipr_wait_for_pci_err_recovery(ioa_cfg
);
10378 pci_free_irq_vectors(pdev
);
10382 pci_disable_device(pdev
);
10383 out_release_regions
:
10384 pci_release_regions(pdev
);
10386 scsi_host_put(host
);
10391 * ipr_initiate_ioa_bringdown - Bring down an adapter
10392 * @ioa_cfg: ioa config struct
10393 * @shutdown_type: shutdown type
10395 * Description: This function will initiate bringing down the adapter.
10396 * This consists of issuing an IOA shutdown to the adapter
10397 * to flush the cache, and running BIST.
10398 * If the caller needs to wait on the completion of the reset,
10399 * the caller must sleep on the reset_wait_q.
10404 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg
*ioa_cfg
,
10405 enum ipr_shutdown_type shutdown_type
)
10408 if (ioa_cfg
->sdt_state
== WAIT_FOR_DUMP
)
10409 ioa_cfg
->sdt_state
= ABORT_DUMP
;
10410 ioa_cfg
->reset_retries
= 0;
10411 ioa_cfg
->in_ioa_bringdown
= 1;
10412 ipr_initiate_ioa_reset(ioa_cfg
, shutdown_type
);
10417 * __ipr_remove - Remove a single adapter
10418 * @pdev: pci device struct
10420 * Adapter hot plug remove entry point.
10425 static void __ipr_remove(struct pci_dev
*pdev
)
10427 unsigned long host_lock_flags
= 0;
10428 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10430 unsigned long driver_lock_flags
;
10433 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10434 while (ioa_cfg
->in_reset_reload
) {
10435 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10436 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10437 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10440 for (i
= 0; i
< ioa_cfg
->hrrq_num
; i
++) {
10441 spin_lock(&ioa_cfg
->hrrq
[i
]._lock
);
10442 ioa_cfg
->hrrq
[i
].removing_ioa
= 1;
10443 spin_unlock(&ioa_cfg
->hrrq
[i
]._lock
);
10446 ipr_initiate_ioa_bringdown(ioa_cfg
, IPR_SHUTDOWN_NORMAL
);
10448 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10449 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10450 flush_work(&ioa_cfg
->work_q
);
10451 if (ioa_cfg
->reset_work_q
)
10452 flush_workqueue(ioa_cfg
->reset_work_q
);
10453 INIT_LIST_HEAD(&ioa_cfg
->used_res_q
);
10454 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10456 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10457 list_del(&ioa_cfg
->queue
);
10458 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10460 if (ioa_cfg
->sdt_state
== ABORT_DUMP
)
10461 ioa_cfg
->sdt_state
= WAIT_FOR_DUMP
;
10462 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, host_lock_flags
);
10464 ipr_free_all_resources(ioa_cfg
);
10470 * ipr_remove - IOA hot plug remove entry point
10471 * @pdev: pci device struct
10473 * Adapter hot plug remove entry point.
10478 static void ipr_remove(struct pci_dev
*pdev
)
10480 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10484 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10486 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10488 sysfs_remove_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10489 &ipr_ioa_async_err_log
);
10490 scsi_remove_host(ioa_cfg
->host
);
10492 __ipr_remove(pdev
);
10498 * ipr_probe - Adapter hot plug add entry point
10501 * 0 on success / non-zero on failure
10503 static int ipr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*dev_id
)
10505 struct ipr_ioa_cfg
*ioa_cfg
;
10506 unsigned long flags
;
10509 rc
= ipr_probe_ioa(pdev
, dev_id
);
10514 ioa_cfg
= pci_get_drvdata(pdev
);
10515 rc
= ipr_probe_ioa_part2(ioa_cfg
);
10518 __ipr_remove(pdev
);
10522 rc
= scsi_add_host(ioa_cfg
->host
, &pdev
->dev
);
10525 __ipr_remove(pdev
);
10529 rc
= ipr_create_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10533 scsi_remove_host(ioa_cfg
->host
);
10534 __ipr_remove(pdev
);
10538 rc
= sysfs_create_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10539 &ipr_ioa_async_err_log
);
10542 ipr_remove_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10544 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10546 scsi_remove_host(ioa_cfg
->host
);
10547 __ipr_remove(pdev
);
10551 rc
= ipr_create_dump_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10555 sysfs_remove_bin_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10556 &ipr_ioa_async_err_log
);
10557 ipr_remove_trace_file(&ioa_cfg
->host
->shost_dev
.kobj
,
10559 scsi_remove_host(ioa_cfg
->host
);
10560 __ipr_remove(pdev
);
10563 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10564 ioa_cfg
->scan_enabled
= 1;
10565 schedule_work(&ioa_cfg
->work_q
);
10566 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10568 ioa_cfg
->iopoll_weight
= ioa_cfg
->chip_cfg
->iopoll_weight
;
10570 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10571 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++) {
10572 irq_poll_init(&ioa_cfg
->hrrq
[i
].iopoll
,
10573 ioa_cfg
->iopoll_weight
, ipr_iopoll
);
10577 scsi_scan_host(ioa_cfg
->host
);
10583 * ipr_shutdown - Shutdown handler.
10584 * @pdev: pci device struct
10586 * This function is invoked upon system shutdown/reboot. It will issue
10587 * an adapter shutdown to the adapter to flush the write cache.
10592 static void ipr_shutdown(struct pci_dev
*pdev
)
10594 struct ipr_ioa_cfg
*ioa_cfg
= pci_get_drvdata(pdev
);
10595 unsigned long lock_flags
= 0;
10596 enum ipr_shutdown_type shutdown_type
= IPR_SHUTDOWN_NORMAL
;
10599 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10600 if (ioa_cfg
->iopoll_weight
&& ioa_cfg
->sis64
&& ioa_cfg
->nvectors
> 1) {
10601 ioa_cfg
->iopoll_weight
= 0;
10602 for (i
= 1; i
< ioa_cfg
->hrrq_num
; i
++)
10603 irq_poll_disable(&ioa_cfg
->hrrq
[i
].iopoll
);
10606 while (ioa_cfg
->in_reset_reload
) {
10607 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10608 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10609 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, lock_flags
);
10612 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
)
10613 shutdown_type
= IPR_SHUTDOWN_QUIESCE
;
10615 ipr_initiate_ioa_bringdown(ioa_cfg
, shutdown_type
);
10616 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, lock_flags
);
10617 wait_event(ioa_cfg
->reset_wait_q
, !ioa_cfg
->in_reset_reload
);
10618 if (ipr_fast_reboot
&& system_state
== SYSTEM_RESTART
&& ioa_cfg
->sis64
) {
10619 ipr_free_irqs(ioa_cfg
);
10620 pci_disable_device(ioa_cfg
->pdev
);
10624 static struct pci_device_id ipr_pci_table
[] = {
10625 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10626 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5702
, 0, 0, 0 },
10627 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10628 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_5703
, 0, 0, 0 },
10629 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10630 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573D
, 0, 0, 0 },
10631 { PCI_VENDOR_ID_MYLEX
, PCI_DEVICE_ID_IBM_GEMSTONE
,
10632 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_573E
, 0, 0, 0 },
10633 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10634 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571B
, 0, 0, 0 },
10635 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10636 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572E
, 0, 0, 0 },
10637 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10638 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571A
, 0, 0, 0 },
10639 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CITRINE
,
10640 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575B
, 0, 0,
10641 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10642 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10643 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10644 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10645 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10646 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10647 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN
,
10648 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10649 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10650 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10651 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572A
, 0, 0, 0 },
10652 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10653 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572B
, 0, 0,
10654 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10655 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN
,
10656 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_575C
, 0, 0,
10657 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10658 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10659 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574E
, 0, 0,
10660 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10661 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10662 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B3
, 0, 0, 0 },
10663 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10664 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CC
, 0, 0, 0 },
10665 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_OBSIDIAN_E
,
10666 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B7
, 0, 0,
10667 IPR_USE_LONG_TRANSOP_TIMEOUT
| IPR_USE_PCI_WARM_RESET
},
10668 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_SNIPE
,
10669 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2780
, 0, 0, 0 },
10670 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10671 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571E
, 0, 0, 0 },
10672 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10673 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_571F
, 0, 0,
10674 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10675 { PCI_VENDOR_ID_ADAPTEC2
, PCI_DEVICE_ID_ADAPTEC2_SCAMP
,
10676 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_572F
, 0, 0,
10677 IPR_USE_LONG_TRANSOP_TIMEOUT
},
10678 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10679 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B5
, 0, 0, 0 },
10680 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10681 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_574D
, 0, 0, 0 },
10682 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10683 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B2
, 0, 0, 0 },
10684 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10685 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C0
, 0, 0, 0 },
10686 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10687 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C3
, 0, 0, 0 },
10688 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROC_FPGA_E2
,
10689 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C4
, 0, 0, 0 },
10690 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10691 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B4
, 0, 0, 0 },
10692 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10693 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57B1
, 0, 0, 0 },
10694 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10695 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C6
, 0, 0, 0 },
10696 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10697 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57C8
, 0, 0, 0 },
10698 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10699 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57CE
, 0, 0, 0 },
10700 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10701 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D5
, 0, 0, 0 },
10702 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10703 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D6
, 0, 0, 0 },
10704 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10705 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D7
, 0, 0, 0 },
10706 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10707 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D8
, 0, 0, 0 },
10708 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10709 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57D9
, 0, 0, 0 },
10710 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10711 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57DA
, 0, 0, 0 },
10712 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10713 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EB
, 0, 0, 0 },
10714 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10715 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EC
, 0, 0, 0 },
10716 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10717 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57ED
, 0, 0, 0 },
10718 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10719 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EE
, 0, 0, 0 },
10720 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10721 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57EF
, 0, 0, 0 },
10722 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10723 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_57F0
, 0, 0, 0 },
10724 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10725 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCA
, 0, 0, 0 },
10726 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10727 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CD2
, 0, 0, 0 },
10728 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_CROCODILE
,
10729 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_2CCD
, 0, 0, 0 },
10730 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
,
10731 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_580A
, 0, 0, 0 },
10732 { PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_IBM_RATTLESNAKE
,
10733 PCI_VENDOR_ID_IBM
, IPR_SUBS_DEV_ID_580B
, 0, 0, 0 },
10736 MODULE_DEVICE_TABLE(pci
, ipr_pci_table
);
10738 static const struct pci_error_handlers ipr_err_handler
= {
10739 .error_detected
= ipr_pci_error_detected
,
10740 .mmio_enabled
= ipr_pci_mmio_enabled
,
10741 .slot_reset
= ipr_pci_slot_reset
,
10744 static struct pci_driver ipr_driver
= {
10746 .id_table
= ipr_pci_table
,
10747 .probe
= ipr_probe
,
10748 .remove
= ipr_remove
,
10749 .shutdown
= ipr_shutdown
,
10750 .err_handler
= &ipr_err_handler
,
10754 * ipr_halt_done - Shutdown prepare completion
10759 static void ipr_halt_done(struct ipr_cmnd
*ipr_cmd
)
10761 list_add_tail(&ipr_cmd
->queue
, &ipr_cmd
->hrrq
->hrrq_free_q
);
10765 * ipr_halt - Issue shutdown prepare to all adapters
10768 * NOTIFY_OK on success / NOTIFY_DONE on failure
10770 static int ipr_halt(struct notifier_block
*nb
, ulong event
, void *buf
)
10772 struct ipr_cmnd
*ipr_cmd
;
10773 struct ipr_ioa_cfg
*ioa_cfg
;
10774 unsigned long flags
= 0, driver_lock_flags
;
10776 if (event
!= SYS_RESTART
&& event
!= SYS_HALT
&& event
!= SYS_POWER_OFF
)
10777 return NOTIFY_DONE
;
10779 spin_lock_irqsave(&ipr_driver_lock
, driver_lock_flags
);
10781 list_for_each_entry(ioa_cfg
, &ipr_ioa_head
, queue
) {
10782 spin_lock_irqsave(ioa_cfg
->host
->host_lock
, flags
);
10783 if (!ioa_cfg
->hrrq
[IPR_INIT_HRRQ
].allow_cmds
||
10784 (ipr_fast_reboot
&& event
== SYS_RESTART
&& ioa_cfg
->sis64
)) {
10785 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10789 ipr_cmd
= ipr_get_free_ipr_cmnd(ioa_cfg
);
10790 ipr_cmd
->ioarcb
.res_handle
= cpu_to_be32(IPR_IOA_RES_HANDLE
);
10791 ipr_cmd
->ioarcb
.cmd_pkt
.request_type
= IPR_RQTYPE_IOACMD
;
10792 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[0] = IPR_IOA_SHUTDOWN
;
10793 ipr_cmd
->ioarcb
.cmd_pkt
.cdb
[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL
;
10795 ipr_do_req(ipr_cmd
, ipr_halt_done
, ipr_timeout
, IPR_DEVICE_RESET_TIMEOUT
);
10796 spin_unlock_irqrestore(ioa_cfg
->host
->host_lock
, flags
);
10798 spin_unlock_irqrestore(&ipr_driver_lock
, driver_lock_flags
);
10803 static struct notifier_block ipr_notifier
= {
10808 * ipr_init - Module entry point
10811 * 0 on success / negative value on failure
10813 static int __init
ipr_init(void)
10815 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10816 IPR_DRIVER_VERSION
, IPR_DRIVER_DATE
);
10818 register_reboot_notifier(&ipr_notifier
);
10819 return pci_register_driver(&ipr_driver
);
10823 * ipr_exit - Module unload
10825 * Module unload entry point.
10830 static void __exit
ipr_exit(void)
10832 unregister_reboot_notifier(&ipr_notifier
);
10833 pci_unregister_driver(&ipr_driver
);
10836 module_init(ipr_init
);
10837 module_exit(ipr_exit
);